├── .comate
└── memory.json
├── rpc
├── help_doc
├── grpcx
│ ├── etcdx
│ │ ├── etcd_test.go_help
│ │ ├── balancer_test.go_help
│ │ ├── etcd_ lease_test.go_help
│ │ ├── service.go
│ │ └── etcd_test.go
│ ├── balancer
│ │ └── wrr
│ │ │ ├── wrr.go_help
│ │ │ └── wrr.go
│ ├── limiter
│ │ ├── fixedWindow
│ │ │ ├── fixedWindow.go_help
│ │ │ └── fixedWindow.go
│ │ ├── leakyBucket
│ │ │ ├── leakyBucket_help
│ │ │ └── leakyBucket.go
│ │ ├── slidingWindow
│ │ │ ├── slidingWindow.go_help
│ │ │ ├── testPkg
│ │ │ │ └── service.go
│ │ │ └── slidingWindow.go
│ │ ├── tokenBucket
│ │ │ ├── tokenBocket_help
│ │ │ └── tokenBocket.go
│ │ ├── counterLiniter
│ │ │ ├── counterLimiter.go_help
│ │ │ └── counterLimiter.go
│ │ └── limiter_help
│ ├── failover
│ │ ├── failover.json_help
│ │ ├── failover.json
│ │ ├── failover.go_help
│ │ └── service.go
│ ├── observationX
│ │ ├── observationX_help
│ │ ├── builder.go
│ │ ├── grpcLogX
│ │ │ ├── grpcLogX.go
│ │ │ └── grpcLogX_test.go
│ │ └── prometeusX
│ │ │ └── builder.go
│ └── circuitbreaker
│ │ ├── circuitbreaker_help
│ │ └── interceptor.go
├── kratosx
│ ├── kratos_example_test.go_help
│ ├── service.go
│ └── kratos_example_test.go
└── go_zero
│ ├── go_zero_example_test.go_help
│ ├── service.go
│ └── go_zero_example_test.go
├── DBx
├── DBX_help
├── mysqlX
│ └── gormx
│ │ ├── dbMovex
│ │ └── myMovex
│ │ │ ├── doubleWritePoolx
│ │ │ └── help_doc
│ │ │ ├── migrator.go
│ │ │ ├── events
│ │ │ ├── inconsistent.go
│ │ │ └── structTest.go
│ │ │ ├── help_doc.txt
│ │ │ └── messageQueue
│ │ │ ├── consumerx
│ │ │ ├── fixer.go
│ │ │ ├── consumer.go
│ │ │ └── consumer_test.go
│ │ │ └── producer
│ │ │ └── producer_test.go
│ │ ├── dbLogx
│ │ ├── types.go
│ │ └── zeroLogx.go
│ │ ├── dbPrometheusx
│ │ ├── help_docx
│ │ └── prometheusGormBuild.go
│ │ └── gorm_help
├── redisX
│ ├── cacheCountServiceX
│ │ ├── cnt_help
│ │ ├── lua
│ │ │ ├── cnt.lua
│ │ │ ├── get_rank.lua
│ │ │ └── cntRank.lua
│ │ ├── types.go
│ │ └── setConfig.go
│ └── redisMonitorX
│ │ └── redisPrometheusx
│ │ ├── help_docx
│ │ ├── redisPrometheusKeyx.go
│ │ └── redosPrometheusTimex.go
├── localCahceX
│ ├── types.go
│ └── cacheLocalRistrettox
│ │ ├── ristretto.go
│ │ └── ristretto_test.go
└── sqlX
│ └── json.go
├── registry
├── help_doc
└── etcdx
│ ├── etcd_test.go_help
│ ├── balancer_test.go_help
│ ├── etcd_ lease_test.go_help
│ ├── service.go
│ └── etcd_test.go
├── channelx
├── messageQueuex
│ ├── help
│ ├── saramax
│ │ ├── saramaProducerx
│ │ │ └── testType.go
│ │ └── saramaConsumerx
│ │ │ ├── consumer.go
│ │ │ └── serviceLogic
│ │ │ └── serviceLogic.go
│ └── types.go
└── mqX
│ ├── mqX_help
│ ├── kafkaX
│ └── saramaX
│ │ ├── consumerX
│ │ ├── cfg.go
│ │ ├── offsetConsumer_test.go
│ │ └── consumer_test.go
│ │ └── producerX
│ │ ├── cfg.go
│ │ └── pro_test.go
│ └── types.go
├── configx
├── viperX
│ ├── redis.yaml
│ ├── db.yaml
│ └── viper_test.go
└── types.go
├── sliceX
├── queueX
│ └── queue_help
├── types.go
├── slicex_test.go
├── diff.go
├── slicex.go
├── contains.go
├── diff_test.go
└── map.go
├── serviceLogicX
├── cronX
│ ├── cron_help
│ └── cron_test.go.bak
└── rankingListX
│ ├── rankingServiceCronX
│ ├── cronRankingService_test.go
│ └── cron.go
│ ├── types.go
│ ├── rankingServiceRdbZsetX
│ ├── types
│ │ └── types.go
│ └── rankingServiceRdbZset_test.go
│ ├── rankingServiceX
│ ├── types
│ │ └── types.go
│ ├── buildGormX
│ │ └── BuildDataSource.go
│ └── rankingService.go
│ └── ranking_help
├── syncX
├── lock
│ └── redisLock
│ │ └── redsyncx
│ │ ├── help_doc.txt
│ │ ├── types.go
│ │ ├── lock_cron_mysql
│ │ ├── lock_cron_mysql_help
│ │ ├── db_test.go
│ │ ├── cron.go
│ │ ├── domain
│ │ │ └── db.go
│ │ ├── repository
│ │ │ ├── dao
│ │ │ │ ├── db.go
│ │ │ │ └── cron.go
│ │ │ └── cron.go
│ │ └── service
│ │ │ └── cron.go
│ │ ├── redsync1_test.go
│ │ └── redsync_test.go
├── pool.go
├── limitPool.go
├── atomicx
│ ├── example_test.go
│ └── atomic.go
├── rwMutexMap_test.go
└── map.go
├── .vscode
└── settings.json
├── observationX
├── observationX_help
├── opentelemetryX
│ ├── otelTracer.go
│ ├── otel_test.go
│ ├── otel.go
│ └── help_docx.txt
└── prometheusX
│ └── prometheus.go
├── webx
└── ginx
│ ├── middleware
│ ├── jwtx
│ │ ├── jwtx_help
│ │ ├── types.go
│ │ └── jwtx_test.go
│ ├── limitX
│ │ ├── limitX_help
│ │ ├── counterLimiter.go
│ │ ├── fixedWindow.go
│ │ ├── ratelimitGinBuild.go
│ │ ├── tokenBocket.go
│ │ └── slidingWindow.go
│ ├── ginPrometheusx
│ │ ├── types.go
│ │ └── help_docx
│ ├── jwtX2
│ │ ├── claims.go
│ │ ├── jwt_test.go
│ │ ├── types.go
│ │ └── jwtX2_help
│ └── sessionx
│ │ └── sessionGinBuild.go
│ └── result.go
├── .idea
├── .gitignore
├── vcs.xml
├── modules.xml
└── pkg_tool.iml
├── limiter
├── types.go
├── slide_window.lua
└── mocks
│ └── limiter.mock.go
├── hashx
└── passwdX
│ ├── passwordx_test.go
│ └── passwordx.go
├── netX
└── ip.go
├── convertx
├── convertx.go
└── toanyx
│ └── toany_test.go
├── logx
├── types.go
├── zerologx
│ └── logx_test.go
├── zaplogx
│ ├── zapLogger.go
│ └── zapLogger_test.go
└── fields.go
├── compare.go
├── systemLoad
└── gopsutilx
│ └── gopsutil_test.go
└── README.md
/.comate/memory.json:
--------------------------------------------------------------------------------
1 | []
--------------------------------------------------------------------------------
/rpc/help_doc:
--------------------------------------------------------------------------------
1 | package grpc相关
--------------------------------------------------------------------------------
/DBx/DBX_help:
--------------------------------------------------------------------------------
1 | /*
2 | DB的一些封装、插件等
3 | */
--------------------------------------------------------------------------------
/registry/help_doc:
--------------------------------------------------------------------------------
1 | package etcdx 服务注册与发现相关
--------------------------------------------------------------------------------
/channelx/messageQueuex/help:
--------------------------------------------------------------------------------
1 | /*
2 | 优先使用mqX包
3 | */
--------------------------------------------------------------------------------
/configx/viperX/redis.yaml:
--------------------------------------------------------------------------------
1 | redis:
2 | addr: "test2"
--------------------------------------------------------------------------------
/sliceX/queueX/queue_help:
--------------------------------------------------------------------------------
1 | /*
2 | 泛型实现优先级队列
3 | */
--------------------------------------------------------------------------------
/registry/etcdx/etcd_test.go_help:
--------------------------------------------------------------------------------
1 | /*
2 | grpc中接入etcd
3 | */
--------------------------------------------------------------------------------
/rpc/grpcx/etcdx/etcd_test.go_help:
--------------------------------------------------------------------------------
1 | /*
2 | grpc中接入etcd
3 | */
--------------------------------------------------------------------------------
/serviceLogicX/cronX/cron_help:
--------------------------------------------------------------------------------
1 | /*
2 | 封装的通用定时任务的调度机制
3 | */
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/dbMovex/myMovex/doubleWritePoolx/help_doc:
--------------------------------------------------------------------------------
1 | 此包为DB双写池部分
--------------------------------------------------------------------------------
/registry/etcdx/balancer_test.go_help:
--------------------------------------------------------------------------------
1 | /*
2 | grpc中自带的轮询算法和加权轮询算法
3 | */
--------------------------------------------------------------------------------
/syncX/lock/redisLock/redsyncx/help_doc.txt:
--------------------------------------------------------------------------------
1 | /*
2 | 基于redis实现分布式锁
3 | */
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "editor.maxTokenizationLineLength": 20000
3 | }
--------------------------------------------------------------------------------
/rpc/grpcx/etcdx/balancer_test.go_help:
--------------------------------------------------------------------------------
1 | /*
2 | grpc中自带的轮询算法和加权轮询算法
3 | */
--------------------------------------------------------------------------------
/registry/etcdx/etcd_ lease_test.go_help:
--------------------------------------------------------------------------------
1 | /*
2 | 在grpc中使用etcd注册中心启用续约、租约
3 | */
4 |
--------------------------------------------------------------------------------
/rpc/grpcx/etcdx/etcd_ lease_test.go_help:
--------------------------------------------------------------------------------
1 | /*
2 | 在grpc中使用etcd注册中心启用续约、租约
3 | */
4 |
--------------------------------------------------------------------------------
/configx/viperX/db.yaml:
--------------------------------------------------------------------------------
1 | mysql:
2 | dsn: "test1"
3 |
4 | testKey:
5 | val: 6666
6 |
--------------------------------------------------------------------------------
/DBx/redisX/cacheCountServiceX/cnt_help:
--------------------------------------------------------------------------------
1 | /*
2 | 此包cacheCountService暂时先不用,有一部分小问题异常
3 | */
--------------------------------------------------------------------------------
/observationX/observationX_help:
--------------------------------------------------------------------------------
1 | /*
2 | 可观测性相关
3 | Prometheus、opentelemetry等
4 | */
--------------------------------------------------------------------------------
/webx/ginx/middleware/jwtx/jwtx_help:
--------------------------------------------------------------------------------
1 | /*
2 | 注意:【jwtx此包弃用,优先使用jwtX2】
3 | 此包暂时弃用
4 | */
--------------------------------------------------------------------------------
/rpc/kratosx/kratos_example_test.go_help:
--------------------------------------------------------------------------------
1 | // kratos中的负载均衡:默认算法
2 | // 默认使用的是平滑的加权轮询算法【即不指定任何算法时候,用的就是这个算法】
--------------------------------------------------------------------------------
/serviceLogicX/rankingListX/rankingServiceCronX/cronRankingService_test.go:
--------------------------------------------------------------------------------
1 | package rankingServiceCronX
2 |
--------------------------------------------------------------------------------
/webx/ginx/middleware/limitX/limitX_help:
--------------------------------------------------------------------------------
1 | /*
2 | 优先使用基于redis限流,ratelimitGinBuild.go【NewRedisBuilder】
3 | */
--------------------------------------------------------------------------------
/rpc/go_zero/go_zero_example_test.go_help:
--------------------------------------------------------------------------------
1 | // go-zero中的负载均衡算法【默认是p2c算法,这是框架内部写死的,需要修改的话,就要传入一个原生的grpc.WithDefaultServiceConfig】
--------------------------------------------------------------------------------
/serviceLogicX/rankingListX/types.go:
--------------------------------------------------------------------------------
1 | package rankingListX
2 |
3 | type RankingTopN[T any] interface {
4 | GetTopN() []T
5 | }
6 |
--------------------------------------------------------------------------------
/sliceX/types.go:
--------------------------------------------------------------------------------
1 | package sliceX
2 |
3 | // equalFunc 比较两个元素是否相等
4 | type equalFunc[T any] func(src, dst T) bool
5 |
6 | type matchFunc[T any] func(src T) bool
7 |
--------------------------------------------------------------------------------
/syncX/lock/redisLock/redsyncx/types.go:
--------------------------------------------------------------------------------
1 | package redsyncx
2 |
3 | type RedSyncIn interface {
4 | Start() <-chan LockResult
5 | Stop()
6 | IsLocked() bool
7 | }
8 |
--------------------------------------------------------------------------------
/webx/ginx/result.go:
--------------------------------------------------------------------------------
1 | package ginx
2 |
3 | type Result struct {
4 | Code int `json:"code"`
5 | Msg string `json:"msg"`
6 | Data any `json:"data"`
7 | }
8 |
--------------------------------------------------------------------------------
/channelx/mqX/mqX_help:
--------------------------------------------------------------------------------
1 | /*
2 | mqX是消息队列的顶级封装,后可装饰器模式等,来进行其他实现,优先使用mqX,而非messageQueuex
3 |
4 | 注意:
5 | 1、消费者的业务逻辑handler传入,需实现 ConsumerHandlerType 接口,然后handler里传入实现的结构体/接口
6 | */
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # 默认忽略的文件
2 | /shelf/
3 | /workspace.xml
4 | # 基于编辑器的 HTTP 客户端请求
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/syncX/lock/redisLock/redsyncx/lock_cron_mysql/lock_cron_mysql_help:
--------------------------------------------------------------------------------
1 | /*
2 | 基于mysql定时任务、redis分布式锁,分布式任务调度平台
3 | 加入部门管理和权限控制功能、接入http任务和grpc任务(调用一个http或者grpc接口)、加入任务执行历史功能(有记录历史任务执行情况)
4 | */
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/channelx/messageQueuex/saramax/saramaProducerx/testType.go:
--------------------------------------------------------------------------------
1 | package saramaProducerx
2 |
3 | // Deprecated: messageQueuex此包弃用,此方法将在未来版本中删除,请使用mqX包
4 | type ValTest struct {
5 | Name string
6 | Age int
7 | }
8 |
--------------------------------------------------------------------------------
/limiter/types.go:
--------------------------------------------------------------------------------
1 | package limiter
2 |
3 | import "context"
4 |
5 | type Limiter interface {
6 | // Limit 限制、限流,是否触发限流。
7 | // 返回True就是触发限流。
8 | Limit(ctx context.Context, key string) (bool, error)
9 | }
10 |
--------------------------------------------------------------------------------
/rpc/grpcx/balancer/wrr/wrr.go_help:
--------------------------------------------------------------------------------
1 | // 在gRPC中接入自己的负载均衡算法
2 | // 需在项目中匿名导入,就是为了触发wrr包中的init()方法,初始化好算法,注册实现
3 | _ "gitee.com/hgg_test/pkg_tool/v2/rpc/grpcx/balancer/wrr"
4 | _ "google.golang.org/grpc/balancer/weightedroundrobin"
--------------------------------------------------------------------------------
/rpc/grpcx/limiter/fixedWindow/ fixedWindow.go_help:
--------------------------------------------------------------------------------
1 | /*
2 | 限流算法:固定窗口
3 | 将时间切成一个个窗口,确保每个窗口的请求数量没有超过阈值
4 |
5 |
6 | 返回的UnaryServerInterceptor方法是,提供了一个钩子来拦截服务器上一元RPC的执行。
7 | info包含拦截器可以操作的RPC的所有信息。而handler是服务方法实现的包装器。 拦截器负责调用处理程序来完成RPC。
8 | */
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/dbMovex/myMovex/migrator.go:
--------------------------------------------------------------------------------
1 | package myMovex
2 |
3 | type Entity interface {
4 | // ID 要求返回 ID
5 | ID() int64
6 | // CompareTo dst 必然也是 Entity,正常来说类型是一样的,怎么比较两张表数据
7 | CompareTo(dst Entity) bool
8 |
9 | // Types 返回表名
10 | Types() string
11 | }
12 |
--------------------------------------------------------------------------------
/rpc/grpcx/limiter/leakyBucket/leakyBucket_help:
--------------------------------------------------------------------------------
1 | /*
2 | rpc限流算法:漏桶算法
3 | 漏桶算法要点:
4 | ·有一个人按一定的速率发令牌
5 | ·每一个请求拿一个令牌
6 | ·拿到令牌的请求就会被处理
7 | ·没有拿到令牌的请求就会:
8 | ·1、直接被拒绝
9 | ·2、阻塞直到拿到令牌或者超时
10 | */
--------------------------------------------------------------------------------
/rpc/grpcx/limiter/slidingWindow/slidingWindow.go_help:
--------------------------------------------------------------------------------
1 | /*
2 | 限流算法:滑动窗口
3 | 可以任务自始至终只有一个窗口,这个窗口就是从当前时间往前回溯窗口大小的一段时间内,在这个窗口内,只能处理固定数量的请求
4 |
5 | 返回的UnaryServerInterceptor方法是,提供了一个钩子来拦截服务器上一元RPC的执行。
6 | info包含拦截器可以操作的RPC的所有信息。而handler是服务方法实现的包装器。 拦截器负责调用处理程序来完成RPC。
7 | */
--------------------------------------------------------------------------------
/rpc/grpcx/limiter/tokenBucket/tokenBocket_help:
--------------------------------------------------------------------------------
1 | /*
2 | rpc限流算法:令牌桶算法
3 | 令牌桶算法要点:
4 | 有一个人按一定的速率发令牌
5 | ·令牌会被放到一个桶里
6 | ·每一个请求从桶里面拿一个令牌
7 | ·拿到令牌的请求就会被处理
8 | ·没有拿到令牌的请求就会:
9 | ·直接被拒绝
10 | ·阻塞直到拿到令牌或者超时
11 | */
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/rpc/grpcx/limiter/counterLiniter/counterLimiter.go_help:
--------------------------------------------------------------------------------
1 | /*
2 | 计数限流算法:
3 | BuildServerInterceptor是在服务端拦截器时,进行rpc限流的算法,计数限流算法【任何时候系统中只有固定请求】
4 | 请求过来+1,返回相应-1。达到阈值限流。
5 |
6 | 返回的UnaryServerInterceptor方法是,提供了一个钩子来拦截服务器上一元RPC的执行。
7 | info包含拦截器可以操作的RPC的所有信息。而handler是服务方法实现的包装器。 拦截器负责调用处理程序来完成RPC。
8 | */
--------------------------------------------------------------------------------
/DBx/redisX/cacheCountServiceX/lua/cnt.lua:
--------------------------------------------------------------------------------
1 | -- 具体业务,文章、帖子等
2 | local key = KEYS[1]
3 | -- 阅读数,点赞数还是收藏数
4 | local cntKey = ARGV[1]
5 | -- 增量
6 | local delta = tonumber(ARGV[2])
7 | -- key是否存在
8 | local exist = redis.call('EXISTS', key)
9 | if exist == 1 then
10 | redis.call('HINCRBY',key,cntKey,delta)
11 | return 1
12 | else
13 | return 0
14 | end
--------------------------------------------------------------------------------
/webx/ginx/middleware/ginPrometheusx/types.go:
--------------------------------------------------------------------------------
1 | package ginPrometheusx
2 |
3 | import "github.com/gin-gonic/gin"
4 |
5 | // PrometheusGinBuilder 接口
6 | type PrometheusGinBuilder interface {
7 | // BuildResponseTime 构建响应时间
8 | BuildResponseTime() gin.HandlerFunc
9 |
10 | // BuildActiveRequest 构建活跃请求
11 | BuildActiveRequest() gin.HandlerFunc
12 | }
13 |
--------------------------------------------------------------------------------
/.idea/pkg_tool.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/hashx/passwdX/passwordx_test.go:
--------------------------------------------------------------------------------
1 | package passwdX
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestPasswordx(t *testing.T) {
8 | p, err := PasswdJiaMi("123123", 10)
9 | if err != nil {
10 | t.Error(err)
11 | }
12 | t.Log(p)
13 |
14 | err = PasswdJieMi(string(p), "123123")
15 | if err != nil {
16 | t.Error(err)
17 | }
18 | t.Log("密码验证成功")
19 | }
20 |
--------------------------------------------------------------------------------
/DBx/redisX/cacheCountServiceX/lua/get_rank.lua:
--------------------------------------------------------------------------------
1 | -- 接收 KEYS[1] 为排行榜键,ARGV[1] 和 ARGV[2] 为排名范围。返回一个数组。
2 | local rank_key = KEYS[1]
3 | local start_idx = tonumber(ARGV[1])
4 | local end_idx = tonumber(ARGV[2])
5 |
6 | if not start_idx or not end_idx then
7 | return {}
8 | end
9 | -- 获取排行榜
10 | local items = redis.call('ZREVRANGE', rank_key, start_idx, end_idx, 'WITHSCORES')
11 | return items
--------------------------------------------------------------------------------
/netX/ip.go:
--------------------------------------------------------------------------------
1 | package netX
2 |
3 | import "net"
4 |
5 | // GetOutboundIP 获得对外发送消息的 IP 地址
6 | func GetOutboundIP() string {
7 | conn, err := net.Dial("udp", "8.8.8.8:80") // 8.8.8.8 is google's DNS
8 | if err != nil {
9 | return ""
10 | }
11 | defer conn.Close()
12 |
13 | localAddr := conn.LocalAddr().(*net.UDPAddr) // *net.UDPAddr is a struct
14 | return localAddr.IP.String()
15 | }
16 |
--------------------------------------------------------------------------------
/rpc/grpcx/failover/failover.json_help:
--------------------------------------------------------------------------------
1 | /*
2 | retry policy 详解
3 |
4 | 首先重试我们一般通过methodConfig来控制:
5 |
6 | name:指定哪些方法。
7 | 这里我们指定的是UserService 上所有方法。这个 service 对应的值,要去 proto 编译后的文件里面找。
8 | retryPolicy:重试的策略。
9 | maxAttempts:最大重试次数。
10 | initBackoff:重试间隔的初始值。
11 | maxBackoff:重试间隔的最大值。
12 | backoffMultiplier:重试间隔的增长系数。
13 |
14 | 手写JSON很容易出错,千万仔细检查。
15 | */
--------------------------------------------------------------------------------
/rpc/grpcx/failover/failover.json:
--------------------------------------------------------------------------------
1 | {
2 | "loadBalancingConfig": [{"round_robin": {}}],
3 | "methodConfig": [
4 | {
5 | "name": [{"service": "UserService"}],
6 | "retryPolicy": {
7 | "maxAttempts": 4,
8 | "initialBackoff": "0.01s",
9 | "maxBackoff": "0.1s",
10 | "backoffMultiplier": 2.0,
11 | "retryableStatusCodes": ["UNAVAILABLE"]
12 | }
13 | }
14 | ]
15 | }
--------------------------------------------------------------------------------
/observationX/opentelemetryX/otelTracer.go:
--------------------------------------------------------------------------------
1 | package opentelemetryX
2 |
3 | import (
4 | "go.opentelemetry.io/otel"
5 | "go.opentelemetry.io/otel/trace"
6 | )
7 |
8 | type OtelTracerStr struct {
9 | }
10 |
11 | func NewOtelTracerStr() *OtelTracerStr {
12 | return &OtelTracerStr{}
13 | }
14 |
15 | func (o *OtelTracerStr) NewTracer(name string, opts ...trace.TracerOption) trace.Tracer {
16 | return otel.Tracer(name, opts...)
17 | }
18 |
--------------------------------------------------------------------------------
/webx/ginx/middleware/jwtX2/claims.go:
--------------------------------------------------------------------------------
1 | package jwtX2
2 |
3 | import "github.com/golang-jwt/jwt/v5"
4 |
5 | // UserClaims 用于 Access 和 Refresh Token
6 | type UserClaims struct {
7 | jwt.RegisteredClaims
8 | Uid int64 `json:"uid"`
9 | Name string `json:"name"`
10 | Ssid string `json:"ssid"` // 会话唯一 ID
11 | UserAgent string `json:"user_agent"`
12 | TokenType string `json:"token_type"` // "access" 或 "refresh"
13 | }
14 |
--------------------------------------------------------------------------------
/serviceLogicX/rankingListX/rankingServiceRdbZsetX/types/types.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | // ScoreProvider 分数提供器(用于排序)
4 | type ScoreProvider interface {
5 | Score(item HotScore) float64
6 | }
7 |
8 | type HotScore struct {
9 | Biz string
10 | BizID string
11 | Score float64
12 | Title string
13 | }
14 |
15 | type HotScoreProvider struct{}
16 |
17 | func (p HotScoreProvider) Score(item HotScore) float64 {
18 | return item.Score
19 | }
20 |
--------------------------------------------------------------------------------
/serviceLogicX/rankingListX/rankingServiceX/types/types.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | // ScoreProvider 定义如何从任意类型 T 中提取得分
4 | type ScoreProvider[T any] interface {
5 | Score(item T) float64
6 | }
7 |
8 | type HotScore struct {
9 | Biz string
10 | BizID string
11 | Score float64
12 | Title string
13 | }
14 |
15 | type HotScoreProvider struct{}
16 |
17 | func (p HotScoreProvider) Score(item HotScore) float64 {
18 | return item.Score
19 | }
20 |
--------------------------------------------------------------------------------
/syncX/lock/redisLock/redsyncx/lock_cron_mysql/db_test.go:
--------------------------------------------------------------------------------
1 | package lock_cron_mysql
2 |
3 | import (
4 | db2 "gitee.com/hgg_test/pkg_tool/v2/syncX/lock/redisLock/redsyncx/lock_cron_mysql/repository/dao"
5 | "github.com/stretchr/testify/assert"
6 | "gorm.io/driver/mysql"
7 | "gorm.io/gorm"
8 | "testing"
9 | )
10 |
11 | func TestDb(t *testing.T) {
12 | db, err := gorm.Open(mysql.Open("root:root@(localhost:13306)/src_db"))
13 | assert.NoError(t, err)
14 | err = db.AutoMigrate(&db2.CronJob{})
15 | assert.NoError(t, err)
16 | }
17 |
--------------------------------------------------------------------------------
/serviceLogicX/rankingListX/ranking_help:
--------------------------------------------------------------------------------
1 | /*
2 | 榜单服务:
3 | rankingServiceX: ===> RankingServiceBatch适合、小时榜单、日榜、历史总榜等(凌晨跑批) → 结果存 本地缓存/Redis
4 | 1、NewRankingServiceBatch // 创建一个排行榜服务
5 | 2、SetBatchSize // 设置批量数据源的批量大小
6 | 3、SetSource // 设置批量数据源逻辑
7 | 4、GetTopN // 获取榜单结果
8 |
9 |
10 | rankingServiceRdbZsetX: ===> ZSET 分 Key + 本地缓存(秒级更新),适合【实时】热榜(Top 100)等
11 | 1、 NewRankingService // 创建全局服务
12 | 2、WithBizType // 获取 article 榜单
13 | 3、StartRefresh // 自动缓存刷新(可选)
14 | 4、GetTopN // 获取榜单
15 | */
--------------------------------------------------------------------------------
/convertx/convertx.go:
--------------------------------------------------------------------------------
1 | package convertx
2 |
3 | // ToPtr 将任意值转换为指针
4 | func ToPtr[T any](t T) *T {
5 | return &t
6 | }
7 |
8 | // ToPtrs 将任意值转换为指针数组
9 | func ToPtrs[T any](ts ...T) []*T {
10 | var ptrs []*T
11 | for _, t := range ts {
12 | ptrs = append(ptrs, ToPtr(t))
13 | }
14 | return ptrs
15 | }
16 |
17 | // Deref 通用解引用
18 | func Deref[T any](t *T) T {
19 | if t != nil {
20 | return *t
21 | }
22 | return *new(T)
23 | }
24 |
25 | // DerefOr 安全解引用指针,如果为nil时返回指定默认值or
26 | func DerefOr[T any](ptr *T, or T) T {
27 | if ptr != nil {
28 | return *ptr
29 | }
30 | return or
31 | }
32 |
--------------------------------------------------------------------------------
/rpc/grpcx/limiter/limiter_help:
--------------------------------------------------------------------------------
1 | /*
2 | 一些其它的负载均衡限流算法
3 |
4 | 以滑动窗口限流器为例:使用
5 | // 创建滑动窗口限流器, 最多2/s个请求,多了就触发限流
6 | limit := NewSlidingWindowLimiter(time.Second, 2)
7 | // 创建grpc服务,注册限流拦截器
8 | gs := grpc.NewServer(
9 | grpc.UnaryInterceptor(limit.BuildServerInterceptor()),
10 | )
11 |
12 | 【需要有多个限流器的话】
13 | grpc.UnaryInterceptor
14 | 替换
15 | grpc.ChainUnaryInterceptor(
16 | limit.BuildServerInterceptor(),
17 | 日志、
18 | 监控、
19 | )
20 | */
--------------------------------------------------------------------------------
/syncX/pool.go:
--------------------------------------------------------------------------------
1 | package syncX
2 |
3 | import "sync"
4 |
5 | // Pool 是对 sync.Pool 的简单封装
6 | // 会有一些性能损耗,但是基本可以忽略不计。担忧性能问题的可以参考
7 | type Pool[T any] struct {
8 | p sync.Pool
9 | }
10 |
11 | // NewPool 创建一个 Pool 实例
12 | // factory 必须返回 T 类型的值,并且不能返回 nil
13 | func NewPool[T any](factory func() T) *Pool[T] {
14 | return &Pool[T]{
15 | p: sync.Pool{
16 | New: func() any {
17 | return factory()
18 | },
19 | },
20 | }
21 | }
22 |
23 | // Get 取出一个元素
24 | func (p *Pool[T]) Get() T {
25 | return p.p.Get().(T)
26 | }
27 |
28 | // Put 放回去一个元素
29 | func (p *Pool[T]) Put(t T) {
30 | p.p.Put(t)
31 | }
32 |
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/dbLogx/types.go:
--------------------------------------------------------------------------------
1 | package dbLogx
2 |
3 | import (
4 | "context"
5 | glogger "gorm.io/gorm/logger"
6 | "time"
7 | )
8 |
9 | //go:generate mockgen -source=./types.go -package=dbLogxmocks -destination=mocks/dbLogx.mock.go GormLogIn
10 | type GormLogIn interface {
11 | LogMode(level glogger.LogLevel) glogger.Interface
12 | Info(ctx context.Context, msg string, data ...interface{})
13 | Warn(ctx context.Context, msg string, data ...interface{})
14 | Error(ctx context.Context, msg string, data ...interface{})
15 | Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error)
16 | }
17 |
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/dbMovex/myMovex/events/inconsistent.go:
--------------------------------------------------------------------------------
1 | package events
2 |
3 | type InconsistentEvent struct {
4 | ID int64
5 | // 用什么来修,取值为 SRC,意味着,以源表为准,取值为 DST,以目标表为准
6 | Direction string
7 | // 有些时候,一些观测,或者一些第三方,需要知道,是什么引起的不一致
8 | // 因为他要去 DEBUG
9 | // 这个是可选的
10 | Type string
11 | }
12 |
13 | const (
14 | // InconsistentEventTypeTargetMissing 校验的目标数据,缺了这一条
15 | InconsistentEventTypeTargetMissing = "target_missing"
16 | // InconsistentEventTypeNEQ 事件类型不一致
17 | InconsistentEventTypeNEQ = "neq"
18 | // InconsistentEventTypeBaseMissing 缺少不一致的事件类型基础
19 | InconsistentEventTypeBaseMissing = "base_missing"
20 | )
21 |
--------------------------------------------------------------------------------
/syncX/lock/redisLock/redsyncx/lock_cron_mysql/cron.go:
--------------------------------------------------------------------------------
1 | package lock_cron_mysql
2 |
3 | import (
4 | "gitee.com/hgg_test/pkg_tool/v2/logx"
5 | "gitee.com/hgg_test/pkg_tool/v2/syncX/lock/redisLock/redsyncx"
6 | "github.com/gin-gonic/gin"
7 | "gorm.io/gorm"
8 | )
9 |
10 | type CronMysql struct {
11 | web *gin.Engine
12 | db *gorm.DB
13 | redSync redsyncx.RedSyncIn
14 |
15 | l logx.Loggerx
16 | }
17 |
18 | func NewCronMysql(web *gin.Engine, db *gorm.DB, redSync redsyncx.RedSyncIn, l logx.Loggerx) *CronMysql {
19 | return &CronMysql{
20 | web: web,
21 | db: db,
22 | redSync: redSync,
23 | l: l,
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/logx/types.go:
--------------------------------------------------------------------------------
1 | package logx
2 |
3 | // Loggerx 接口(依赖抽象)
4 | //
5 | //go:generate mockgen -source=./types.go -package=logxmocks -destination=mocks/logx.mock.go Loggerx
6 | type Loggerx interface {
7 | Debug(msg string, fields ...Field)
8 | Info(msg string, fields ...Field)
9 | Warn(msg string, fields ...Field)
10 | Error(msg string, fields ...Field)
11 | }
12 | type Field struct {
13 | Key string
14 | Value any
15 | }
16 |
17 | //type Zlogger interface {
18 | // Info() *zerolog.Event
19 | // Error() *zerolog.Event
20 | // Debug() *zerolog.Event
21 | // Warn() *zerolog.Event
22 | // With() zerolog.Context
23 | // GetZerolog() *zerolog.Logger
24 | //}
25 |
--------------------------------------------------------------------------------
/sliceX/slicex_test.go:
--------------------------------------------------------------------------------
1 | package sliceX
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestSlicex(t *testing.T) {
8 | s := make([]int, 5, 5)
9 | s = []int{1, 3, 5, 6, 7, 4, 2}
10 | ma := Max[int](s)
11 | mi := Min[int](s)
12 | t.Log("max: ", ma)
13 | t.Log("min: ", mi)
14 |
15 | s = make([]int, 0)
16 | ma = Max[int](s)
17 | mi = Min[int](s)
18 | t.Log("max: ", ma)
19 | t.Log("min: ", mi)
20 |
21 | numbers := []int{10, 20, 30, 40}
22 | // 使用 Map:将每个数字转为字符串,格式为 "索引: 数字"
23 | result := Map[int, float64](numbers, func(idx int, num int) float64 {
24 | return float64(num) * 1.1
25 | })
26 | t.Log(result)
27 |
28 | t.Log(Sum(numbers))
29 | }
30 |
--------------------------------------------------------------------------------
/webx/ginx/middleware/jwtX2/jwt_test.go:
--------------------------------------------------------------------------------
1 | package jwtX2
2 |
3 | import (
4 | "github.com/redis/go-redis/v9"
5 | "testing"
6 | )
7 |
8 | func TestNewJwtxMiddlewareGinx(t *testing.T) {
9 | NewJwtxMiddlewareGinx(InitCache(), &JwtxMiddlewareGinxConfig{
10 | SigningMethod: nil,
11 | DurationExpiresIn: 0,
12 | LongDurationExpiresIn: 0,
13 | JwtKey: []byte("qwerrewq"),
14 | LongJwtKey: []byte("qwerrewq123"),
15 | HeaderJwtTokenKey: "",
16 | LongHeaderJwtTokenKey: "",
17 | })
18 | }
19 |
20 | func InitCache() redis.Cmdable {
21 | return redis.NewClient(&redis.Options{
22 | Addr: "127.0.0.1:6379",
23 | })
24 | }
25 |
--------------------------------------------------------------------------------
/rpc/grpcx/failover/failover.go_help:
--------------------------------------------------------------------------------
1 | /*
2 | 在grpc中实现failover熔断降级等思路
3 |
4 | 在 gRPC 中实现 failover 的基本思路简单来说就是:重试+负载均衡。【默认的failover.json】
5 | · 重试:当调用不通的时候,gRPC会重试,再次经过负载均衡。
6 | · 负载均衡:要确保在某个节点不可用之后,负载均衡的策略不会再次挑选中它。
7 |
8 |
9 | 复杂的 failover 策略:
10 | 正常来说,轮询的那个【默认的】 failover 策略就可以了。
11 | 但有些情况下,你需要一些更加复杂的 failover 策略,那么就需要自己手动实现负载均衡策略。
12 | 并且在负载均衡算法里面嵌入自己的 failover 的逻辑。
13 | 例如,在我们手写基于权重的轮询负载均衡策略时,
14 | 你就可以考虑在 Done 里面直接把 currentWeight 调整到一 个极低的值,或者直接把这个节点从可用节点列表里面挪走。
15 | 【也就是wrr包下的,Done: func(info balancer.DoneInfo){}部分,weight/currentWeight调整极低】
16 | 【实际不常用这一类】
17 | */
--------------------------------------------------------------------------------
/DBx/redisX/cacheCountServiceX/types.go:
--------------------------------------------------------------------------------
1 | package cacheCountServiceX
2 |
3 | import (
4 | "context"
5 | "gitee.com/hgg_test/pkg_tool/v2/DBx/localCahceX"
6 | )
7 |
8 | // CntServiceIn 抽象计数服务接口【只用计数服务的话,此方法即可】
9 | type CntServiceIn[K localCahceX.Key, V any] interface {
10 | // Key(biz string, bizId int64) string
11 | // RankKey(biz string) string
12 |
13 | SetCnt(ctx context.Context, biz string, bizId int64, num ...int64) *Count[K, V]
14 | DelCnt(ctx context.Context, biz string, bizId int64) error
15 | GetCnt(ctx context.Context, biz string, bizId int64) ([]RankItem, error)
16 | GetCntRank(ctx context.Context, biz string, opt ...GetCntType) ([]RankItem, error)
17 | }
18 |
--------------------------------------------------------------------------------
/convertx/toanyx/toany_test.go:
--------------------------------------------------------------------------------
1 | package toanyx
2 |
3 | import (
4 | "github.com/stretchr/testify/assert"
5 | "testing"
6 | )
7 |
8 | func TestToAny(t *testing.T) {
9 | testCases := []struct {
10 | name string
11 | src any
12 |
13 | wanRes any
14 | }{
15 | {
16 | name: "IntToString ok",
17 | src: map[string]string{"a": "1", "b": "2"},
18 | wanRes: map[string]string{"a": "1", "b": "2"},
19 | },
20 | // ...............
21 | }
22 |
23 | for _, tc := range testCases {
24 | t.Run(tc.name, func(t *testing.T) {
25 | s, ok := ToAny[[]map[string]any](tc.src)
26 | assert.True(t, ok, "转换失败")
27 | assert.Equal(t, tc.wanRes, s)
28 | t.Log(s)
29 | })
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/channelx/mqX/kafkaX/saramaX/consumerX/cfg.go:
--------------------------------------------------------------------------------
1 | package consumerX
2 |
3 | import "time"
4 |
5 | type ConsumerConfig struct {
6 | // BatchSize 批量消费的最大消息数(仅 BatchConsumerHandler 生效)
7 | // 默认:100
8 | BatchSize int
9 |
10 | // BatchTimeout 批量消费的超时时间(从第一条消息进入缓冲区开始计时)
11 | // 默认:5秒
12 | // 若为 0,则禁用超时,仅按数量触发
13 | BatchTimeout time.Duration
14 | }
15 |
16 | func DefaultConsumerConfig() *ConsumerConfig {
17 | return &ConsumerConfig{
18 | BatchSize: 100,
19 | BatchTimeout: 5 * time.Second,
20 | }
21 | }
22 |
23 | func (c *ConsumerConfig) Validate() {
24 | if c.BatchSize <= 0 {
25 | c.BatchSize = 100
26 | }
27 | if c.BatchTimeout < 0 {
28 | c.BatchTimeout = 5 * time.Second
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/channelx/mqX/kafkaX/saramaX/producerX/cfg.go:
--------------------------------------------------------------------------------
1 | package producerX
2 |
3 | import "time"
4 |
5 | type ProducerConfig struct {
6 | // BatchSize 最大批量大小(达到即发送)
7 | BatchSize int
8 |
9 | // BatchTimeout 超时自动 flush(从第一条消息进入缓冲区开始计时)
10 | // 0 表示禁用超时,仅按数量触发
11 | BatchTimeout time.Duration
12 |
13 | // Async 是否使用异步生产者(推荐 true)
14 | Async bool
15 | }
16 |
17 | func DefaultProducerConfig() *ProducerConfig {
18 | return &ProducerConfig{
19 | BatchSize: 100,
20 | BatchTimeout: 5 * time.Second,
21 | Async: true, // 默认异步
22 | }
23 | }
24 |
25 | func (c *ProducerConfig) Validate() {
26 | if c.BatchSize <= 0 {
27 | c.BatchSize = 100
28 | }
29 | if c.BatchTimeout < 0 {
30 | c.BatchTimeout = 5 * time.Second
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/hashx/passwdX/passwordx.go:
--------------------------------------------------------------------------------
1 | package passwdX
2 |
3 | import "golang.org/x/crypto/bcrypt"
4 |
5 | // PasswdJiaMi cost 的取值范围是 4 到 31【默认10】,表示 2^cost 次加密循环【注意取值莫要太大影响cpu计算】。
6 | // 原始明文密码(byte 形式)不应超过 72 字节,否则多余部分被忽略。校验时要校验这个长度
7 | // Cost 值 迭代次数(约) 安全性 速度
8 | // 4 16 ❌ 太弱 ⚡ 很快
9 | // 10 1,024 ✅ 推荐默认 🟡 适中
10 | // 12 4,096 🔐 更安全 🔽 较慢
11 | // 14 16,384 🔐🔐 高安全 🔻 很慢
12 | // 31 ~20亿 🤯 不现实 🐢 极慢
13 | func PasswdJiaMi(password string, cost int) (string, error) {
14 | p, err := bcrypt.GenerateFromPassword([]byte(password), cost)
15 | return string(p), err
16 | }
17 |
18 | // PasswdJieMi 验证密码,srcHashedPwd为密文,dstPwd为明文【err == nil,验证完成】
19 | func PasswdJieMi(srcHashedPwd, dstPwd string) error {
20 | return bcrypt.CompareHashAndPassword([]byte(srcHashedPwd), []byte(dstPwd))
21 | }
22 |
--------------------------------------------------------------------------------
/sliceX/diff.go:
--------------------------------------------------------------------------------
1 | package sliceX
2 |
3 | // DiffSet 差集,只支持 comparable 类型
4 | // 已去重
5 | // 并且返回值的顺序是不确定的
6 | func DiffSet[T comparable](src, dst []T) []T {
7 | srcMap := toMap[T](src)
8 | for _, val := range dst {
9 | delete(srcMap, val)
10 | }
11 |
12 | var ret = make([]T, 0, len(srcMap))
13 | for key := range srcMap {
14 | ret = append(ret, key)
15 | }
16 |
17 | return ret
18 | }
19 |
20 | // DiffSetFunc 差集,已去重
21 | // 你应该优先使用 DiffSet
22 | func DiffSetFunc[T any](src, dst []T, equal equalFunc[T]) []T {
23 | var ret = make([]T, 0, len(src))
24 | for _, val := range src {
25 | if !ContainsFunc[T](dst, func(src T) bool {
26 | return equal(src, val)
27 | }) {
28 | ret = append(ret, val)
29 | }
30 | }
31 | return deduplicateFunc[T](ret, equal)
32 | }
33 |
--------------------------------------------------------------------------------
/webx/ginx/middleware/limitX/counterLimiter.go:
--------------------------------------------------------------------------------
1 | package limitX
2 |
3 | import (
4 | "github.com/gin-gonic/gin"
5 | "net/http"
6 | "sync/atomic"
7 | )
8 |
9 | type CounterLimiter struct {
10 | cnt atomic.Int32 // 计数器
11 | threshold int32 // 阈值
12 | }
13 |
14 | // NewCounterBuilder 创建计数器限流算法
15 | func NewCounterBuilder(threshold int32) *CounterLimiter {
16 | return &CounterLimiter{threshold: threshold}
17 | }
18 |
19 | // Build 计数器限流算法
20 | func (c *CounterLimiter) Build() gin.HandlerFunc {
21 | return func(ctx *gin.Context) {
22 | // 请求进来,先占坑
23 | cnt := c.cnt.Add(1)
24 | defer func() {
25 | c.cnt.Add(-1)
26 | }()
27 | if cnt >= c.threshold {
28 | ctx.AbortWithStatus(http.StatusTooManyRequests)
29 | return
30 | }
31 | ctx.Next()
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/compare.go:
--------------------------------------------------------------------------------
1 | package hggTool
2 |
3 | // Comparator 用于比较两个对象的大小
4 | // - src < dst, 返回-1
5 | // - src = dst, 返回0
6 | // - src > dst, 返回1
7 | /*
8 | 不要返回任何其它值!
9 | */
10 | type Comparator[T any] func(src T, dst T) int
11 |
12 | // ComparatorRealNumber 用于比较两个对象的大小
13 | // - src < dst, 返回-1
14 | // - src = dst, 返回0
15 | // - src > dst, 返回1
16 | /*
17 | 不要返回任何其它值!
18 | */
19 | func ComparatorRealNumber[T RealNumber](src T, dst T) int {
20 | if src < dst {
21 | return -1
22 | } else if src == dst {
23 | return 0
24 | } else {
25 | return 1
26 | }
27 | }
28 |
29 | // RealNumber 实数
30 | // 绝大多数情况下,你都应该用这个来表达数字的含义
31 | type RealNumber interface {
32 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 |
33 | ~int | ~int8 | ~int16 | ~int32 | ~int64 |
34 | ~float32 | ~float64
35 | }
36 |
--------------------------------------------------------------------------------
/sliceX/slicex.go:
--------------------------------------------------------------------------------
1 | package sliceX
2 |
3 | type NumberOrString interface {
4 | int | int8 | int16 | int32 | int64 |
5 | uint | uint8 | uint16 | uint32 | uint64 | uintptr |
6 | float32 | float64 |
7 | string
8 | }
9 |
10 | // Max 切片里面最大值
11 | func Max[T NumberOrString](a []T) T {
12 | if len(a) < 1 {
13 | var s T
14 | return s
15 | }
16 | m := a[0]
17 | for _, v := range a {
18 | m = max(m, v)
19 | }
20 | return m
21 | }
22 |
23 | // Min 切片里面最小值
24 | func Min[T NumberOrString](a []T) T {
25 | if len(a) < 1 {
26 | var s T
27 | return s
28 | }
29 | m := a[0]
30 | for _, v := range a {
31 | m = min(m, v)
32 | }
33 | return m
34 | }
35 |
36 | // Sum 切片求和
37 | func Sum[T NumberOrString](s []T) T {
38 | var a T
39 | for _, v := range s {
40 | a += v
41 | }
42 | return a
43 | }
44 |
--------------------------------------------------------------------------------
/rpc/grpcx/observationX/observationX_help:
--------------------------------------------------------------------------------
1 | /*
2 | rpc可观测性相关
3 | 类似middleware中接入日志、监控等等
4 |
5 | 以滑动窗口限流器为例:使用
6 | // 创建滑动窗口限流器, 最多2/s个请求,多了就触发限流
7 | limit := NewSlidingWindowLimiter(time.Second, 2)
8 |
9 | =============================================
10 | // 创建grpc服务,注册限流拦截器
11 | gs := grpc.NewServer(
12 | grpc.UnaryInterceptor(limit.BuildServerInterceptor()),
13 | )
14 |
15 | 【需要有多个限流器的话】
16 | grpc.UnaryInterceptor
17 | 替换
18 | grpc.ChainUnaryInterceptor(
19 | limit.BuildServerInterceptor(),
20 | 日志、
21 | 监控、
22 | )
23 | =============================================
24 | */
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/dbMovex/myMovex/events/structTest.go:
--------------------------------------------------------------------------------
1 | package events
2 |
3 | import (
4 | "gitee.com/hgg_test/pkg_tool/v2/DBx/mysqlX/gormx/dbMovex/myMovex"
5 | )
6 |
7 | /*
8 | =======================
9 | 仅测试用的结构体
10 | =======================
11 | */
12 |
13 | type TestUser struct {
14 | Id int64 `gorm:"primaryKey, autoIncrement"`
15 | Name string `gorm:"column:nick_name;type:varchar(128);size:128"`
16 | Email string `gorm:"unique"`
17 | UpdatedAt int64
18 | Ctime int64
19 | Utime int64
20 | }
21 |
22 | func (i TestUser) ID() int64 {
23 | return i.Id
24 | }
25 |
26 | func (i TestUser) CompareTo(dst myMovex.Entity) bool {
27 | val, ok := dst.(TestUser)
28 | if !ok {
29 | return false
30 | }
31 | return i == val
32 | }
33 | func (i TestUser) Types() string {
34 | return "TestUser"
35 | }
36 |
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/dbMovex/myMovex/help_doc.txt:
--------------------------------------------------------------------------------
1 | 基于gorm数据迁移方案
2 |
3 | 更方便处理,调用者可直接使用scheduler包下,启用http接口控制迁移过程
4 | 包中的ginx.Wrap和ginx.WrapBody
5 | 需启动ginx.InitCounter()和ginx.NewLogMdlHandlerFunc()依赖,注入进去gin框架【即启动gin服务时,初始化gin.default即可启动两个方法即可】
6 |
7 | 1、都需要先替换默认的*gorm.db,初始化doubleWritePoolx.NewDoubleWritePool()【替换业务代码中的gorm.db注入, scheduler包下的NewScheduler进行双写具体逻辑】
8 | db, err := gorm.Open(mysql.New(mysql.Config{Conn: doubleWritePoolx.NewDoubleWritePool(),}))
9 | 2、双写逻辑scheduler.NewScheduler()
10 | 1、此部分可由scheduler.NewScheduler(),可集成代码中
11 | 2、也可直接集成gin,进行调用切换双写方式逻辑,scheduler.NewScheduler()初始化后--》scheduler.RegisterRoutes()
12 | 3、validator.NewValidator上报不一致数据至kafka消息队列,削峰、解耦。
13 | 1、 2部分双写逻辑scheduler.NewScheduler()全量/增量校验, 有自动上报部分调用validator.NewValidator
14 |
15 | 正常1、---》2、即可【2内部调用3】
16 |
17 |
18 | 亦或者某些自定义跳转,可参考测试用例
--------------------------------------------------------------------------------
/DBx/localCahceX/types.go:
--------------------------------------------------------------------------------
1 | package localCahceX
2 |
3 | import (
4 | "time"
5 | )
6 |
7 | type Key interface {
8 | uint64 | string | []byte | byte | int | uint | int32 | uint32 | int64
9 | }
10 |
11 | /*
12 | 本地缓存:
13 | cost权重建议:【Set时的cost权重配置建议】【一般用于缓存过期时间没到,但是内存阈值到了,根据weight权重释放一些本地缓存】
14 | 【场景:】 【推荐 cost 设置:】
15 | 缓存 []byte、string cost = len(value)
16 | 缓存结构体 估算大小,或统一设为 1(按条数)
17 | 缓存不同重要性数据 按业务权重设置 cost(VIP=10, 普通=1)
18 | 不确定大小 固定 cost=1,靠 MaxCost 控制总条数
19 | */
20 |
21 | // CacheLocalIn 抽象缓存接口
22 | type CacheLocalIn[K Key, V any] interface {
23 | Set(key K, value V, ttl time.Duration, weight int64) error
24 | Get(key K) (V, error)
25 | Del(key K) error
26 |
27 | // WaitSet 等待值通过缓冲区【除非有重要的缓存,实时性要求特别较高,要堵塞直至等待缓冲写入通过,否则不用管 也就毫秒纳秒级甚至还不到】
28 | WaitSet()
29 | // Close 关闭会停止所有goroutines并关闭所有频道。【ristretto实现一定记着】 defer cache.Close()
30 | Close()
31 | }
32 |
--------------------------------------------------------------------------------
/logx/zerologx/logx_test.go:
--------------------------------------------------------------------------------
1 | package zerologx
2 |
3 | import (
4 | "gitee.com/hgg_test/pkg_tool/v2/logx"
5 | "github.com/rs/zerolog"
6 | "os"
7 | "testing"
8 | "time"
9 | )
10 |
11 | func TestNewZeroLogger(t *testing.T) {
12 | // InitLog 初始化zerolog日志模块
13 | zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
14 | // Level日志级别【可以考虑作为参数传】,测试传zerolog.InfoLevel/NoLevel不打印
15 | // 模块化: Str("module", "userService模块")
16 | logger := zerolog.New(os.Stderr).Level(zerolog.DebugLevel).With().Timestamp().Logger()
17 |
18 | l := NewZeroLogger(&logger)
19 | t.Log(time.Now().UnixMilli())
20 | // 当日志级别为,警告war和错误err 级别时,调用堆栈
21 |
22 | l.Info("初始化zerolog日志模块", logx.Int64("id", 1), logx.String("name", "hgg"))
23 | l.Warn("初始化zerolog日志模块", logx.Int64("id", 1), logx.String("name", "hgg"))
24 | l.Error("初始化zerolog日志模块", logx.Int64("id", 1), logx.String("name", "hgg"))
25 | t.Log(time.Now().UnixMilli())
26 | }
27 |
--------------------------------------------------------------------------------
/webx/ginx/middleware/jwtX2/types.go:
--------------------------------------------------------------------------------
1 | package jwtX2
2 |
3 | import "github.com/gin-gonic/gin"
4 |
5 | //go:generate mockgen -source=./types.go -package=jwtxmocks -destination=mocks/jwtHdl.mock.go JwtHandlerx
6 | type JwtHandlerx interface {
7 | // SetToken 登录时设置长短 Token
8 | SetToken(ctx *gin.Context, userId int64, name string, ssid string) (*UserClaims, error)
9 | // ExtractToken 从 Authorization: Bearer 中提取 token
10 | ExtractToken(ctx *gin.Context) string
11 | // VerifyToken 验证 Access Token(短 Token)
12 | VerifyToken(ctx *gin.Context) (*UserClaims, error)
13 | // LongVerifyToken 验证 Refresh Token(长 Token)
14 | LongVerifyToken(ctx *gin.Context) (*UserClaims, error)
15 | // RefreshToken 刷新 Token,可选新 ssid(若为空则复用原 ssid)
16 | RefreshToken(ctx *gin.Context, newSsid string) (*UserClaims, error)
17 | // DeleteToken 退出登录:仅删除当前会话 Token
18 | DeleteToken(ctx *gin.Context) (*UserClaims, error)
19 | }
20 |
--------------------------------------------------------------------------------
/rpc/grpcx/circuitbreaker/circuitbreaker_help:
--------------------------------------------------------------------------------
1 | /*
2 | 熔断拦截器:
3 | 使用的是kratos的一个子项目https://github.com/go-kratos/aegis
4 |
5 | 以滑动窗口限流器为例:使用
6 | // 创建滑动窗口限流器, 最多2/s个请求,多了就触发限流
7 | limit := NewSlidingWindowLimiter(time.Second, 2)
8 | =============================================
9 | // 创建grpc服务,注册限流拦截器
10 | gs := grpc.NewServer(
11 | grpc.UnaryInterceptor(limit.BuildServerInterceptor()),
12 | )
13 |
14 | 【需要有多个限流器的话】
15 | grpc.UnaryInterceptor
16 | 替换
17 | grpc.ChainUnaryInterceptor(
18 | limit.BuildServerInterceptor(),
19 | 日志、
20 | 监控、
21 | )
22 | =============================================
23 | */
--------------------------------------------------------------------------------
/webx/ginx/middleware/jwtx/types.go:
--------------------------------------------------------------------------------
1 | package jwtx
2 |
3 | import "github.com/gin-gonic/gin"
4 |
5 | // Deprecated: jwtx此包弃用,此方法将在未来版本中删除,请使用jwtX2包【可无缝替换jwtX2包实现】
6 | // JwtHandlerx 方法
7 | // - 一般情况下,只用登录、登出、验证、刷新四个token方法
8 | //
9 | //go:generate mockgen -source=./types.go -package=jwtxmocks -destination=mocks/jwtHdl.mock.go JwtHandlerx
10 | type JwtHandlerx interface {
11 | // SetToken 生成 JwtToken
12 | SetToken(ctx *gin.Context, userId int64, name string, ssid string) (*UserClaims, error)
13 | // ExtractToken 获取 JwtToken
14 | ExtractToken(ctx *gin.Context) string
15 | // VerifyToken 验证 JwtToken
16 | VerifyToken(ctx *gin.Context) (*UserClaims, error)
17 | LongVerifyToken(ctx *gin.Context) (*RefreshUserClaims, error)
18 | // RefreshToken 刷新 JwtToken 过期时间
19 | RefreshToken(ctx *gin.Context, ssid string) (*UserClaims, error)
20 | // DeleteToken 删除 JwtToken
21 | DeleteToken(ctx *gin.Context) (*UserClaims, error)
22 | }
23 |
--------------------------------------------------------------------------------
/webx/ginx/middleware/jwtx/jwtx_test.go:
--------------------------------------------------------------------------------
1 | package jwtx
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestJwtxToken(t *testing.T) {
8 | //conf := JwtxMiddlewareGinxConfig{
9 | // SigningMethod: jwt.SigningMethodHS512,
10 | // ExpiresIn: time.Second * 60,
11 | // LongExpiresIn: time.Minute * 24,
12 | // JwtKey: []byte("123123123qwe"),
13 | // LongJwtKey: []byte("qweqwewqdsads21"),
14 | // HeaderJwtTokenKey: "",
15 | // LongHeaderJwtTokenKey: "",
16 | //}
17 |
18 | //j := NewJwtxMiddlewareGinx(&JwtxMiddlewareGinxConfig{
19 | // SigningMethod: jwt.SigningMethodHS512,
20 | // DurationExpiresIn: time.Second * 60,
21 | // LongDurationExpiresIn: time.Second * 60 * 2,
22 | // JwtKey: []byte("123123123qwe"),
23 | // LongJwtKey: []byte("qweqwewqdsads21"),
24 | // HeaderJwtTokenKey: "duan-jwt-token",
25 | // LongHeaderJwtTokenKey: "",
26 | //})
27 | //t.Log(j)
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/webx/ginx/middleware/ginPrometheusx/help_docx:
--------------------------------------------------------------------------------
1 | // 业务中需有专门给 prometheus 用的端口,用于Prometheus向业务中拉去数据用的端口
2 | // Prometheus知道去8081拉去端口,是因为在Prometheus服务中的Prometheus.yaml配置文件中标注,去哪拉取数据
3 | // 【不要使用gin和业务端口混合】
4 | func initPrometheus() {
5 | go func() {
6 | http.Handle("/metrics", promhttp.Handler())
7 | err := http.ListenAndServe(":8081", nil)
8 | if err != nil {
9 | log.Println("【error】prometheus启动失败", err)
10 | }
11 | }()
12 | }
13 |
14 |
15 | /**/
16 | func NewPrometheusHttpIntrMiddleware() ginPrometheusx.PrometheusGinBuilder {
17 | budConf := ginPrometheusx.BuilderConfig{
18 | Namespace: "hgg",
19 | Subsystem: "hgg_Subsystem",
20 | Name: "hgg_Name",
21 | InstanceId: "123123",
22 | Help: "hgg_HTTP接口统计",
23 | }
24 | return ginPrometheusx.NewBuilder(budConf)
25 | }
26 |
27 | // middleware中间件可以集成
28 | ph := middleware2.NewPrometheusHttpIntrMiddleware() // Prometheus-SummaryVec统计HTTP接口响应时间
29 | // 测试Prometheus响应时间
30 | ph.BuildActiveRequest(),
31 | ph.BuildResponseTime(),
--------------------------------------------------------------------------------
/channelx/messageQueuex/types.go:
--------------------------------------------------------------------------------
1 | // Package messageQueuex 消息队列抽象接口
2 | package messageQueuex
3 |
4 | import "context"
5 |
6 | // Deprecated: messageQueuex此包弃用,此方法将在未来版本中删除,请使用mqX包
7 | type KeyOrTopic struct {
8 | Key []byte
9 | Topic string
10 | // .......
11 | }
12 |
13 | // Deprecated: messageQueuex此包弃用,此方法将在未来版本中删除,请使用mqX包
14 | type Tp KeyOrTopic
15 |
16 | // Deprecated: messageQueuex此包弃用,此方法将在未来版本中删除,请使用mqX包
17 | // ProducerIn 生产者抽象接口
18 | // - 当使用 sarama.NewSyncProducer() 创建生产者时,请使用 NewSaramaProducerStr()
19 | // - 请在main函数最顶层defer住生产者的Producer.Close(),优雅关闭防止goroutine泄露
20 | type ProducerIn[ProducerTyp any] interface {
21 | SendMessage(ctx context.Context, keyOrTopic Tp, value []byte) error
22 | // CloseProducer 关闭生产者Producer,请在main函数最顶层defer住生产者的Producer.Close(),优雅关闭防止goroutine泄露
23 | CloseProducer() error
24 | }
25 |
26 | // Deprecated: messageQueuex此包弃用,此方法将在未来版本中删除,请使用mqX包
27 | // ConsumerIn 消费者抽象接口
28 | type ConsumerIn interface {
29 | ReceiveMessage(ctx context.Context, keyOrTopic []Tp) error
30 | }
31 |
--------------------------------------------------------------------------------
/rpc/grpcx/circuitbreaker/interceptor.go:
--------------------------------------------------------------------------------
1 | package circuitbreaker
2 |
3 | import (
4 | "context"
5 | "github.com/go-kratos/aegis/circuitbreaker"
6 | "google.golang.org/grpc"
7 | "google.golang.org/grpc/codes"
8 | "google.golang.org/grpc/status"
9 | )
10 |
11 | type InterceptorBuilder struct {
12 | breaker circuitbreaker.CircuitBreaker
13 | }
14 |
15 | func (b *InterceptorBuilder) BuildServerUnaryInterceptor() grpc.UnaryServerInterceptor {
16 | return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
17 | err = b.breaker.Allow() // 熔断器允许通过
18 | if err == nil {
19 | resp, err = handler(ctx, req)
20 | if err == nil {
21 | b.breaker.MarkSuccess()
22 | } else {
23 | // circuitbreaker.CircuitBreaker,需要标记成功或者失败
24 | // 更加仔细检测,只有真实代表服务端出现故障的,才 mark failed
25 | b.breaker.MarkFailed()
26 | }
27 | return
28 | } else {
29 | b.breaker.MarkFailed()
30 | return nil, status.Errorf(codes.Unavailable, "熔断")
31 | }
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/webx/ginx/middleware/sessionx/sessionGinBuild.go:
--------------------------------------------------------------------------------
1 | package sessionx
2 |
3 | import (
4 | "github.com/gin-contrib/sessions"
5 | "github.com/gin-gonic/gin"
6 | )
7 |
8 | type SessionMiddlewareGinx interface {
9 | Sessionx(name string) gin.HandlerFunc
10 | }
11 |
12 | type middlewareGinx struct {
13 | Store sessions.Store
14 | }
15 |
16 | // NewMiddlewareGinx 【注册到gin中间件, server.use()】【store】需要【NewStore】一个【cookie实现就cookie.NewStore,redis实现就Redis.NewStore。。。】
17 | //
18 | // 【业务逻辑一般设置】
19 | // sess := sessions.Default(ctx)
20 | // sess.Set("userId", u.Id) // 设置用户id,把用户Id放入session中
21 | // sess.Options(sessions.Options{
22 | // MaxAge: 600, // 设置session过期时间,单位为秒
23 | // })
24 | // err := sess.Save() // 保存session,save才会以上sess设置才生效
25 | func NewSessionMiddlewareGinx(store sessions.Store) SessionMiddlewareGinx {
26 | return &middlewareGinx{Store: store}
27 | }
28 |
29 | // SessionRedis 基于redis实现【注册到gin中间件, server.use()】
30 | func (m *middlewareGinx) Sessionx(name string) gin.HandlerFunc {
31 | return sessions.Sessions(name, m.Store)
32 | }
33 |
--------------------------------------------------------------------------------
/syncX/limitPool.go:
--------------------------------------------------------------------------------
1 | package syncX
2 |
3 | import (
4 | "sync/atomic"
5 | )
6 |
7 | // LimitPool 是对 Pool 的简单封装允许用户通过控制一段时间内对Pool的令牌申请次数来间接控制Pool中对象的内存总占用量
8 | type LimitPool[T any] struct {
9 | pool *Pool[T]
10 | tokens *atomic.Int32
11 | }
12 |
13 | // NewLimitPool 创建一个 LimitPool 实例
14 | // maxTokens 表示一段时间内的允许发放的最大令牌数
15 | // factory 必须返回 T 类型的值,并且不能返回 nil
16 | func NewLimitPool[T any](maxTokens int, factory func() T) *LimitPool[T] {
17 | var tokens atomic.Int32
18 | tokens.Add(int32(maxTokens))
19 | return &LimitPool[T]{
20 | pool: NewPool[T](factory),
21 | tokens: &tokens,
22 | }
23 | }
24 |
25 | // Get 取出一个元素
26 | // 如果返回值是 true,则代表确实从 Pool 里面取出来了一个
27 | // 否则是新建了一个
28 | func (l *LimitPool[T]) Get() (T, bool) {
29 | // 使用循环来避免竞态条件
30 | for {
31 | current := l.tokens.Load()
32 | if current <= 0 {
33 | var zero T
34 | return zero, false
35 | }
36 | // 尝试原子地减少令牌计数
37 | if l.tokens.CompareAndSwap(current, current-1) {
38 | // 成功获取令牌,从池中获取对象
39 | return l.pool.Get(), true
40 | }
41 | // 如果CAS失败,说明有其他goroutine修改了令牌计数,重试
42 | }
43 | }
44 |
45 | // Put 放回去一个元素
46 | func (l *LimitPool[T]) Put(t T) {
47 | l.pool.Put(t)
48 | l.tokens.Add(1)
49 | }
50 |
--------------------------------------------------------------------------------
/logx/zaplogx/zapLogger.go:
--------------------------------------------------------------------------------
1 | package zaplogx
2 |
3 | import (
4 | "gitee.com/hgg_test/pkg_tool/v2/logx"
5 | "go.uber.org/zap"
6 | )
7 |
8 | type ZapLogger struct {
9 | l *zap.Logger
10 | }
11 |
12 | // func NewZapLogger(l *zap.Logger) *ZapLogger {
13 | // return &ZapLogger{
14 | // l: l,
15 | // }
16 | // }
17 | func NewZapLogger(l *zap.Logger) logx.Loggerx {
18 | return &ZapLogger{
19 | l: l,
20 | }
21 | }
22 |
23 | func (z *ZapLogger) Debug(msg string, fields ...logx.Field) {
24 | z.l.Debug(msg, z.toArgs(fields)...)
25 | }
26 |
27 | func (z *ZapLogger) Info(msg string, fields ...logx.Field) {
28 | z.l.Info(msg, z.toArgs(fields)...)
29 | }
30 |
31 | func (z *ZapLogger) Warn(msg string, fields ...logx.Field) {
32 | z.l.Warn(msg, z.toArgs(fields)...)
33 | }
34 |
35 | func (z *ZapLogger) Error(msg string, fields ...logx.Field) {
36 | z.l.Error(msg, z.toArgs(fields)...)
37 | }
38 |
39 | // 转换参数
40 | func (z *ZapLogger) toArgs(args []logx.Field) []zap.Field { // 适配
41 | res := make([]zap.Field, 0, len(args)) // 创建一个空切片,预分配内存,创建一个切片[]zap.Field,第一个参数是切片的容量,第二个参数是切片的长度
42 | for _, arg := range args {
43 | res = append(res, zap.Any(arg.Key, arg.Value))
44 | }
45 | return res
46 | }
47 |
--------------------------------------------------------------------------------
/syncX/lock/redisLock/redsyncx/lock_cron_mysql/domain/db.go:
--------------------------------------------------------------------------------
1 | package domain
2 |
3 | // TaskType 任务类型
4 | type TaskType string
5 |
6 | const (
7 | TaskTypeFunction TaskType = "function"
8 | TaskTypeHTTP TaskType = "http"
9 | TaskTypeGRPC TaskType = "grpc"
10 | )
11 |
12 | // JobStatus 任务状态
13 | type JobStatus string
14 |
15 | const (
16 | // JobStatusActive 启用
17 | JobStatusActive JobStatus = "active"
18 | // JobStatusRunning 运行中
19 | JobStatusRunning JobStatus = "running"
20 | // JobStatusPaused 暂停
21 | JobStatusPaused JobStatus = "paused"
22 | // JobStatusDeleted 删除
23 | JobStatusDeleted JobStatus = "deleted"
24 | )
25 |
26 | // CronJob 定时任务
27 | type CronJob struct {
28 | ID int64 `json:"id"`
29 | CronId int64 `json:"cronId"`
30 | // 任务名
31 | Name string `json:"name"`
32 | // 任务描述
33 | Description string `json:"description"`
34 | // 任务执行表达式
35 | CronExpr string `json:"cronExpr"`
36 | // 任务类型
37 | TaskType TaskType `json:"taskType"`
38 |
39 | // 任务状态
40 | Status JobStatus `json:"status"`
41 | // 任务最大重试次数
42 | MaxRetry int `json:"maxRetry"`
43 | // 任务超时时间(秒)
44 | Timeout int `json:"timeout"`
45 |
46 | Ctime float64 `json:"ctime"`
47 | Utime float64 `json:"utime"`
48 | }
49 |
--------------------------------------------------------------------------------
/rpc/grpcx/limiter/counterLiniter/counterLimiter.go:
--------------------------------------------------------------------------------
1 | package counterLiniter
2 |
3 | import (
4 | "context"
5 | "sync/atomic"
6 |
7 | "google.golang.org/grpc"
8 | "google.golang.org/grpc/codes"
9 | "google.golang.org/grpc/status"
10 | )
11 |
12 | type CounterLimiter struct {
13 | cnt atomic.Int32 // 当前正在处理的请求计数器
14 | threshold int32 // 阈值
15 | }
16 |
17 | // NewCounterLimiter 创建计数器限流算法
18 | func NewCounterLimiter(threshold int32) *CounterLimiter {
19 | return &CounterLimiter{threshold: threshold}
20 | }
21 |
22 | // BuildServerInterceptor 计数器限流算法
23 | func (c *CounterLimiter) BuildServerInterceptor() grpc.UnaryServerInterceptor {
24 | return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
25 | // 请求进来,先增加计数器
26 | cnt := c.cnt.Add(1)
27 |
28 | // 检查是否超过阈值
29 | if cnt > c.threshold {
30 | // 超过阈值,回滚计数器增加(减少1)
31 | c.cnt.Add(-1)
32 | // 触发限流
33 | return nil, status.Errorf(codes.ResourceExhausted, "限流:当前并发数 %d,阈值 %d", cnt-1, c.threshold)
34 | }
35 |
36 | // 请求被接受处理,在处理完成后减少计数器
37 | defer func() {
38 | c.cnt.Add(-1)
39 | }()
40 |
41 | // 处理请求
42 | resp, err = handler(ctx, req)
43 | return
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/rpc/grpcx/observationX/builder.go:
--------------------------------------------------------------------------------
1 | package observationX
2 |
3 | import (
4 | "context"
5 | "google.golang.org/grpc/metadata"
6 | "google.golang.org/grpc/peer"
7 | "net"
8 | "strings"
9 | )
10 |
11 | type Builder struct {
12 | }
13 |
14 | // PeerName 获取对端应用名称
15 | func (b *Builder) PeerName(ctx context.Context) string {
16 | return b.grpcHeaderValue(ctx, "app")
17 | }
18 |
19 | // PeerIP 获取对端ip
20 | func (b *Builder) PeerIP(ctx context.Context) string {
21 | // 如果在 ctx 里面传入。或者说客户端里面设置了,就直接用它设置的
22 | // 有些时候你经过网关之类的东西,就需要客户端主动设置,防止后面拿到网关的 IP
23 | clientIP := b.grpcHeaderValue(ctx, "client-ip")
24 | if clientIP != "" {
25 | return clientIP
26 | }
27 |
28 | // 从grpc里取对端ip
29 | pr, ok2 := peer.FromContext(ctx)
30 | if !ok2 {
31 | return ""
32 | }
33 | if pr.Addr == net.Addr(nil) {
34 | return ""
35 | }
36 | addSlice := strings.Split(pr.Addr.String(), ":")
37 | if len(addSlice) > 1 {
38 | return addSlice[0]
39 | }
40 | return ""
41 | }
42 |
43 | func (b *Builder) grpcHeaderValue(ctx context.Context, key string) string {
44 | if key == "" {
45 | return ""
46 | }
47 | md, ok := metadata.FromIncomingContext(ctx)
48 | if !ok {
49 | return ""
50 | }
51 | return strings.Join(md.Get(key), ";")
52 | }
53 |
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/dbMovex/myMovex/messageQueue/consumerx/fixer.go:
--------------------------------------------------------------------------------
1 | package consumerx
2 |
3 | import (
4 | "context"
5 | "gitee.com/hgg_test/pkg_tool/v2/DBx/mysqlX/gormx/dbMovex/myMovex"
6 | "gorm.io/gorm"
7 | "gorm.io/gorm/clause"
8 | )
9 |
10 | type OverrideFixer[T myMovex.Entity] struct {
11 | base *gorm.DB
12 | target *gorm.DB
13 |
14 | columns []string
15 | }
16 |
17 | func NewOverrideFixer[T myMovex.Entity](base *gorm.DB, target *gorm.DB) (*OverrideFixer[T], error) {
18 | rows, err := base.Model(new(T)).Order("id").Rows()
19 | if err != nil {
20 | return nil, err
21 | }
22 | columns, err := rows.Columns()
23 | return &OverrideFixer[T]{base: base, target: target, columns: columns}, err
24 | }
25 | func (f *OverrideFixer[T]) Fix(ctx context.Context, id int64) error {
26 | // 最最粗暴的
27 | var t T
28 | err := f.base.WithContext(ctx).Where("id=?", id).First(&t).Error
29 | switch err {
30 | case gorm.ErrRecordNotFound:
31 | return f.target.WithContext(ctx).Model(&t).Delete("id = ?", id).Error
32 | case nil:
33 | // upsert
34 | return f.target.WithContext(ctx).Clauses(clause.OnConflict{
35 | DoUpdates: clause.AssignmentColumns(f.columns),
36 | }).Create(&t).Error
37 | default:
38 | return err
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/serviceLogicX/rankingListX/rankingServiceCronX/cron.go:
--------------------------------------------------------------------------------
1 | package rankingServiceCronX
2 |
3 | import (
4 | "gitee.com/hgg_test/pkg_tool/v2/logx"
5 | "gitee.com/hgg_test/pkg_tool/v2/serviceLogicX/cronX"
6 | "gitee.com/hgg_test/pkg_tool/v2/serviceLogicX/rankingListX"
7 | "gitee.com/hgg_test/pkg_tool/v2/syncX/lock/redisLock/redsyncx"
8 | )
9 |
10 | // RankingServiceCron 分布式锁+定时任务服务封装
11 | type RankingServiceCron[T any] struct {
12 | redSync *redsyncx.LockRedsync // 分布式锁
13 | cron *cronX.CronX // 定时任务服务
14 | rankingListX rankingListX.RankingTopN[T] // 榜单服务
15 | logx logx.Loggerx
16 | }
17 |
18 | // NewRankingServiceCron 创建定时任务服务
19 | // - 需优先调用函数设置任务表达式 SetExpr
20 | // - 需优先调用函数设置任务执行业务逻辑 SetCmd
21 | func NewRankingServiceCron[T any](redSync *redsyncx.LockRedsync, cron *cronX.CronX, logx logx.Loggerx) *RankingServiceCron[T] {
22 | return &RankingServiceCron[T]{
23 | redSync: redSync,
24 | cron: cron,
25 | logx: logx,
26 | }
27 | }
28 |
29 | func (r *RankingServiceCron[T]) Start() error {
30 | // 启动分布式锁
31 | r.redSync.Start()
32 | // 启动定时任务服务
33 | return r.cron.Start()
34 | }
35 |
36 | func (r *RankingServiceCron[T]) Stop() {
37 | r.cron.StopCron()
38 | r.redSync.Stop()
39 | }
40 |
--------------------------------------------------------------------------------
/webx/ginx/middleware/limitX/fixedWindow.go:
--------------------------------------------------------------------------------
1 | package limitX
2 |
3 | import (
4 | "github.com/gin-gonic/gin"
5 | "net/http"
6 | "sync"
7 | "time"
8 | )
9 |
10 | // FixedWindowLimiter 固定窗口限流算法
11 | type FixedWindowLimiter struct {
12 | window time.Duration // 窗口大小
13 | lastWindowStart time.Time // 窗口开始时间
14 | cnt int // 窗口内请求数
15 | threshold int // 阈值
16 | lock sync.Mutex // 保护临界资源
17 | }
18 |
19 | // NewFixedWindowBuilder 创建固定窗口限流算法
20 | // - window 窗口大小
21 | // - threshold 阈值
22 | func NewFixedWindowBuilder(window time.Duration, threshold int) *FixedWindowLimiter {
23 | return &FixedWindowLimiter{window: window, lastWindowStart: time.Now(), cnt: 0, threshold: threshold}
24 | }
25 |
26 | // Build 固定窗口限流算法
27 | func (c *FixedWindowLimiter) Build() gin.HandlerFunc {
28 | return func(ctx *gin.Context) {
29 | c.lock.Lock() // 加锁
30 | now := time.Now()
31 | // 判断是否到了新的窗口
32 | if now.After(c.lastWindowStart.Add(c.window)) {
33 | c.cnt = 0
34 | c.lastWindowStart = now
35 | }
36 | cnt := c.cnt + 1
37 | c.lock.Unlock() // 解锁
38 | if cnt >= c.threshold {
39 | ctx.AbortWithStatus(http.StatusTooManyRequests)
40 | return
41 | }
42 | ctx.Next()
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/syncX/atomicx/example_test.go:
--------------------------------------------------------------------------------
1 | package atomicx
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | func ExampleNewValue() {
8 | val := NewValue[int]()
9 | data := val.Load()
10 | fmt.Println(data)
11 | // Output:
12 | // 0
13 | }
14 |
15 | func ExampleNewValueOf() {
16 | val := NewValueOf[int](123)
17 | data := val.Load()
18 | fmt.Println(data)
19 | // Output:
20 | // 123
21 | }
22 |
23 | func ExampleValue_Load() {
24 | val := NewValueOf[int](123)
25 | data := val.Load()
26 | fmt.Println(data)
27 | // Output:
28 | // 123
29 | }
30 |
31 | func ExampleValue_Store() {
32 | val := NewValueOf[int](123)
33 | data := val.Load()
34 | fmt.Println(data)
35 | val.Store(456)
36 | data = val.Load()
37 | fmt.Println(data)
38 | // Output:
39 | // 123
40 | // 456
41 | }
42 |
43 | func ExampleValue_Swap() {
44 | val := NewValueOf[int](123)
45 | oldVal := val.Swap(456)
46 | newVal := val.Load()
47 | fmt.Printf("old: %d, new: %d", oldVal, newVal)
48 | // Output:
49 | // old: 123, new: 456
50 | }
51 |
52 | func ExampleValue_CompareAndSwap() {
53 | val := NewValueOf[int](123)
54 | swapped := val.CompareAndSwap(123, 456)
55 | fmt.Println(swapped)
56 |
57 | swapped = val.CompareAndSwap(455, 459)
58 | fmt.Println(swapped)
59 | // Output:
60 | // true
61 | // false
62 | }
63 |
--------------------------------------------------------------------------------
/syncX/rwMutexMap_test.go:
--------------------------------------------------------------------------------
1 | package syncX
2 |
3 | import (
4 | "github.com/stretchr/testify/assert"
5 | "strconv"
6 | "testing"
7 | )
8 |
9 | // TestWriteRWMutexMap 测试写入并发安全的Map
10 | func TestWriteRWMutexMap(t *testing.T) {
11 | testCases := []struct {
12 | name string
13 | capacity, maxCapacity int
14 |
15 | key string
16 | value int
17 |
18 | wantBool bool
19 | }{
20 | {
21 | name: "无容量限制添加成功",
22 | capacity: 0,
23 | maxCapacity: 0,
24 | key: "key1",
25 | value: 1,
26 |
27 | wantBool: true,
28 | },
29 | {
30 | name: "有容量限制添加成功",
31 | capacity: 0,
32 | maxCapacity: 2,
33 | key: "key1",
34 | value: 1,
35 |
36 | wantBool: true,
37 | },
38 | {
39 | name: "有容量限制,添加失败,超出最大容量",
40 | capacity: 0,
41 | maxCapacity: 1,
42 | key: "key1",
43 | value: 1,
44 |
45 | wantBool: false,
46 | },
47 | }
48 | for _, tc := range testCases {
49 | t.Run(tc.name, func(t *testing.T) {
50 | m := NewRWMutexMap[string, int](tc.capacity, tc.maxCapacity)
51 | ok := m.Set(tc.key, tc.value)
52 | assert.True(t, ok)
53 | ok = m.Set(tc.key+strconv.Itoa(1), tc.value)
54 | assert.Equal(t, tc.wantBool, ok)
55 | })
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/channelx/messageQueuex/saramax/saramaConsumerx/consumer.go:
--------------------------------------------------------------------------------
1 | package saramaConsumerx
2 |
3 | import (
4 | "context"
5 | "gitee.com/hgg_test/pkg_tool/v2/channelx/messageQueuex"
6 | "github.com/IBM/sarama"
7 | )
8 |
9 | // Deprecated: messageQueuex此包弃用,此方法将在未来版本中删除,请使用mqX包
10 | type ConsumerGroup struct {
11 | // ConsumerGroup sararma的消费者组接口逻辑
12 | ConsumerGroup sarama.ConsumerGroup
13 | // ConsumerGroupHandlers 消费者组处理逻辑
14 | ConsumerGroupHandlers sarama.ConsumerGroupHandler
15 | }
16 |
17 | // Deprecated: messageQueuex此包弃用,此方法将在未来版本中删除,请使用mqX包
18 | // NewConsumerIn
19 | // - consumerGroup New一个sarama.NewConsumerGroup
20 | // - ConsumerGroupHandlers 可自信封装消费者组处理逻辑【也可使用hgg的ConsumerGroupHandlerx包下默认封装的逻辑】
21 | func NewConsumerIn(consumerGroup sarama.ConsumerGroup, ConsumerGroupHandlers sarama.ConsumerGroupHandler) messageQueuex.ConsumerIn {
22 | c := &ConsumerGroup{
23 | ConsumerGroup: consumerGroup,
24 | ConsumerGroupHandlers: ConsumerGroupHandlers,
25 | }
26 | return c
27 | }
28 |
29 | func (c *ConsumerGroup) ReceiveMessage(ctx context.Context, keyOrTopic []messageQueuex.Tp) error {
30 | var topic []string
31 | for _, v := range keyOrTopic {
32 | topic = append(topic, v.Topic)
33 | }
34 | return c.ConsumerGroup.Consume(ctx, topic, c.ConsumerGroupHandlers)
35 | }
36 |
--------------------------------------------------------------------------------
/syncX/atomicx/atomic.go:
--------------------------------------------------------------------------------
1 | package atomicx
2 |
3 | import "sync/atomic"
4 |
5 | // Value 是对 atomic.Value 的泛型封装
6 | // 相比直接使用 atomic.Value,
7 | // - Load 方法大概开销多了 0.5 ns
8 | // - Store 方法多了不到 2 ns
9 | // - Swap 方法多了 14 ns
10 | // - CompareAndSwap 在失败的情况下,会多 2 ns,成功的时候多了 0.3 ns
11 | // 使用 NewValue 或者 NewValueOf 来创建实例
12 | type Value[T any] struct {
13 | val atomic.Value
14 | }
15 |
16 | // NewValue 会创建一个 Value 对象,里面存放着 T 的零值
17 | // 注意,这个零值是带了类型的零值
18 | func NewValue[T any]() *Value[T] {
19 | var t T
20 | return NewValueOf[T](t)
21 | }
22 |
23 | // NewValueOf 会使用传入的值来创建一个 Value 对象
24 | func NewValueOf[T any](t T) *Value[T] {
25 | val := atomic.Value{}
26 | val.Store(t)
27 | return &Value[T]{
28 | val: val,
29 | }
30 | }
31 |
32 | // Load 会返回当前 Value 存放的值
33 | func (v *Value[T]) Load() (val T) {
34 | data := v.val.Load()
35 | val = data.(T)
36 | return
37 | }
38 |
39 | // Store 会将传入的值保存到 Value 中
40 | func (v *Value[T]) Store(val T) {
41 | v.val.Store(val)
42 | }
43 |
44 | // Swap 会将传入的值替换当前 Value 存放的值,并返回替换前的值
45 | func (v *Value[T]) Swap(new T) (old T) {
46 | data := v.val.Swap(new)
47 | old = data.(T)
48 | return
49 | }
50 |
51 | // CompareAndSwap 会将传入的值和当前 Value 存放的值进行比较,如果相同,替换当前 Value 存放的值,并返回替换结果
52 | func (v *Value[T]) CompareAndSwap(old, new T) (swapped bool) {
53 | return v.val.CompareAndSwap(old, new)
54 | }
55 |
--------------------------------------------------------------------------------
/registry/etcdx/service.go:
--------------------------------------------------------------------------------
1 | package etcdx
2 |
3 | import "context"
4 |
5 | // Service 实现 service
6 | type Service struct {
7 | UnimplementedUserServiceServer // 继承,实现service的时候【必须强制组合mented】,也是为了向后兼容
8 | Name string
9 | }
10 |
11 | func (s *Service) GetById(ctx context.Context, request *GetByIdRequest) (*GetByIdResponse, error) {
12 | //ctx, span := otel.Tracer("server_biz123").Start(ctx, "get_by_id123")
13 | //defer span.End()
14 | //ddl, ok := ctx.Deadline() // Deadline是一个时间点,表示某个时间点deadline,过了这个时间点,context就被取消
15 | //if ok {
16 | // res := ddl.Sub(time.Now()) // 计算时间差
17 | // log.Println(res.String())
18 | //}
19 | //time.Sleep(time.Millisecond * 100)
20 | return &GetByIdResponse{
21 | User: &User{
22 | Id: 123,
23 | Name: "hgg+" + s.Name,
24 | },
25 | }, nil
26 | }
27 |
28 | func (s *Service) GetById1(ctx context.Context, request *GetByIdRequest) (*GetByIdResponse, error) {
29 | //ctx, span := otel.Tracer("server_biz456").Start(ctx, "get_by_id456")
30 | //defer span.End()
31 | //ddl, ok := ctx.Deadline() // Deadline是一个时间点,表示某个时间点deadline,过了这个时间点,context就被取消
32 | //if ok {
33 | // res := ddl.Sub(time.Now()) // 计算时间差
34 | // log.Println(res.String())
35 | //}
36 | //time.Sleep(time.Millisecond * 100)
37 | return &GetByIdResponse{
38 | User: &User{
39 | Id: 456,
40 | Name: "hgg+" + s.Name,
41 | },
42 | }, nil
43 | }
44 |
--------------------------------------------------------------------------------
/rpc/go_zero/service.go:
--------------------------------------------------------------------------------
1 | package go_zero
2 |
3 | import "context"
4 |
5 | // Service 实现 service
6 | type Service struct {
7 | UnimplementedUserServiceServer // 继承,实现service的时候【必须强制组合mented】,也是为了向后兼容
8 | Name string
9 | }
10 |
11 | func (s *Service) GetById(ctx context.Context, request *GetByIdRequest) (*GetByIdResponse, error) {
12 | //ctx, span := otel.Tracer("server_biz123").Start(ctx, "get_by_id123")
13 | //defer span.End()
14 | //ddl, ok := ctx.Deadline() // Deadline是一个时间点,表示某个时间点deadline,过了这个时间点,context就被取消
15 | //if ok {
16 | // res := ddl.Sub(time.Now()) // 计算时间差
17 | // log.Println(res.String())
18 | //}
19 | //time.Sleep(time.Millisecond * 100)
20 | return &GetByIdResponse{
21 | User: &User{
22 | Id: 123,
23 | Name: "hgg+" + s.Name,
24 | },
25 | }, nil
26 | }
27 |
28 | func (s *Service) GetById1(ctx context.Context, request *GetByIdRequest) (*GetByIdResponse, error) {
29 | //ctx, span := otel.Tracer("server_biz456").Start(ctx, "get_by_id456")
30 | //defer span.End()
31 | //ddl, ok := ctx.Deadline() // Deadline是一个时间点,表示某个时间点deadline,过了这个时间点,context就被取消
32 | //if ok {
33 | // res := ddl.Sub(time.Now()) // 计算时间差
34 | // log.Println(res.String())
35 | //}
36 | //time.Sleep(time.Millisecond * 100)
37 | return &GetByIdResponse{
38 | User: &User{
39 | Id: 456,
40 | Name: "hgg+" + s.Name,
41 | },
42 | }, nil
43 | }
44 |
--------------------------------------------------------------------------------
/rpc/grpcx/etcdx/service.go:
--------------------------------------------------------------------------------
1 | package etcdx
2 |
3 | import "context"
4 |
5 | // Service 实现 service
6 | type Service struct {
7 | UnimplementedUserServiceServer // 继承,实现service的时候【必须强制组合mented】,也是为了向后兼容
8 | Name string
9 | }
10 |
11 | func (s *Service) GetById(ctx context.Context, request *GetByIdRequest) (*GetByIdResponse, error) {
12 | //ctx, span := otel.Tracer("server_biz123").Start(ctx, "get_by_id123")
13 | //defer span.End()
14 | //ddl, ok := ctx.Deadline() // Deadline是一个时间点,表示某个时间点deadline,过了这个时间点,context就被取消
15 | //if ok {
16 | // res := ddl.Sub(time.Now()) // 计算时间差
17 | // log.Println(res.String())
18 | //}
19 | //time.Sleep(time.Millisecond * 100)
20 | return &GetByIdResponse{
21 | User: &User{
22 | Id: 123,
23 | Name: "hgg+" + s.Name,
24 | },
25 | }, nil
26 | }
27 |
28 | func (s *Service) GetById1(ctx context.Context, request *GetByIdRequest) (*GetByIdResponse, error) {
29 | //ctx, span := otel.Tracer("server_biz456").Start(ctx, "get_by_id456")
30 | //defer span.End()
31 | //ddl, ok := ctx.Deadline() // Deadline是一个时间点,表示某个时间点deadline,过了这个时间点,context就被取消
32 | //if ok {
33 | // res := ddl.Sub(time.Now()) // 计算时间差
34 | // log.Println(res.String())
35 | //}
36 | //time.Sleep(time.Millisecond * 100)
37 | return &GetByIdResponse{
38 | User: &User{
39 | Id: 456,
40 | Name: "hgg+" + s.Name,
41 | },
42 | }, nil
43 | }
44 |
--------------------------------------------------------------------------------
/rpc/kratosx/service.go:
--------------------------------------------------------------------------------
1 | package kratosx
2 |
3 | import "context"
4 |
5 | // Service 实现 service
6 | type Service struct {
7 | UnimplementedUserServiceServer // 继承,实现service的时候【必须强制组合mented】,也是为了向后兼容
8 | Name string
9 | }
10 |
11 | func (s *Service) GetById(ctx context.Context, request *GetByIdRequest) (*GetByIdResponse, error) {
12 | //ctx, span := otel.Tracer("server_biz123").Start(ctx, "get_by_id123")
13 | //defer span.End()
14 | //ddl, ok := ctx.Deadline() // Deadline是一个时间点,表示某个时间点deadline,过了这个时间点,context就被取消
15 | //if ok {
16 | // res := ddl.Sub(time.Now()) // 计算时间差
17 | // log.Println(res.String())
18 | //}
19 | //time.Sleep(time.Millisecond * 100)
20 | return &GetByIdResponse{
21 | User: &User{
22 | Id: 123,
23 | Name: "hgg+" + s.Name,
24 | },
25 | }, nil
26 | }
27 |
28 | func (s *Service) GetById1(ctx context.Context, request *GetByIdRequest) (*GetByIdResponse, error) {
29 | //ctx, span := otel.Tracer("server_biz456").Start(ctx, "get_by_id456")
30 | //defer span.End()
31 | //ddl, ok := ctx.Deadline() // Deadline是一个时间点,表示某个时间点deadline,过了这个时间点,context就被取消
32 | //if ok {
33 | // res := ddl.Sub(time.Now()) // 计算时间差
34 | // log.Println(res.String())
35 | //}
36 | //time.Sleep(time.Millisecond * 100)
37 | return &GetByIdResponse{
38 | User: &User{
39 | Id: 456,
40 | Name: "hgg+" + s.Name,
41 | },
42 | }, nil
43 | }
44 |
--------------------------------------------------------------------------------
/rpc/grpcx/failover/service.go:
--------------------------------------------------------------------------------
1 | package failover
2 |
3 | import "context"
4 |
5 | // Service 实现 service
6 | type Service struct {
7 | UnimplementedUserServiceServer // 继承,实现service的时候【必须强制组合mented】,也是为了向后兼容
8 | Name string
9 | }
10 |
11 | func (s *Service) GetById(ctx context.Context, request *GetByIdRequest) (*GetByIdResponse, error) {
12 | //ctx, span := otel.Tracer("server_biz123").Start(ctx, "get_by_id123")
13 | //defer span.End()
14 | //ddl, ok := ctx.Deadline() // Deadline是一个时间点,表示某个时间点deadline,过了这个时间点,context就被取消
15 | //if ok {
16 | // res := ddl.Sub(time.Now()) // 计算时间差
17 | // log.Println(res.String())
18 | //}
19 | //time.Sleep(time.Millisecond * 100)
20 | return &GetByIdResponse{
21 | User: &User{
22 | Id: 123,
23 | Name: "hgg+" + s.Name,
24 | },
25 | }, nil
26 | }
27 |
28 | func (s *Service) GetById1(ctx context.Context, request *GetByIdRequest) (*GetByIdResponse, error) {
29 | //ctx, span := otel.Tracer("server_biz456").Start(ctx, "get_by_id456")
30 | //defer span.End()
31 | //ddl, ok := ctx.Deadline() // Deadline是一个时间点,表示某个时间点deadline,过了这个时间点,context就被取消
32 | //if ok {
33 | // res := ddl.Sub(time.Now()) // 计算时间差
34 | // log.Println(res.String())
35 | //}
36 | //time.Sleep(time.Millisecond * 100)
37 | return &GetByIdResponse{
38 | User: &User{
39 | Id: 456,
40 | Name: "hgg+" + s.Name,
41 | },
42 | }, nil
43 | }
44 |
--------------------------------------------------------------------------------
/rpc/grpcx/limiter/slidingWindow/testPkg/service.go:
--------------------------------------------------------------------------------
1 | package testPkg
2 |
3 | import "context"
4 |
5 | // Service 实现 service
6 | type Service struct {
7 | UnimplementedUserServiceServer // 继承,实现service的时候【必须强制组合mented】,也是为了向后兼容
8 | Name string
9 | }
10 |
11 | func (s *Service) GetById(ctx context.Context, request *GetByIdRequest) (*GetByIdResponse, error) {
12 | //ctx, span := otel.Tracer("server_biz123").Start(ctx, "get_by_id123")
13 | //defer span.End()
14 | //ddl, ok := ctx.Deadline() // Deadline是一个时间点,表示某个时间点deadline,过了这个时间点,context就被取消
15 | //if ok {
16 | // res := ddl.Sub(time.Now()) // 计算时间差
17 | // log.Println(res.String())
18 | //}
19 | //time.Sleep(time.Millisecond * 100)
20 | return &GetByIdResponse{
21 | User: &User{
22 | Id: 123,
23 | Name: "hgg+" + s.Name,
24 | },
25 | }, nil
26 | }
27 |
28 | func (s *Service) GetById1(ctx context.Context, request *GetByIdRequest) (*GetByIdResponse, error) {
29 | //ctx, span := otel.Tracer("server_biz456").Start(ctx, "get_by_id456")
30 | //defer span.End()
31 | //ddl, ok := ctx.Deadline() // Deadline是一个时间点,表示某个时间点deadline,过了这个时间点,context就被取消
32 | //if ok {
33 | // res := ddl.Sub(time.Now()) // 计算时间差
34 | // log.Println(res.String())
35 | //}
36 | //time.Sleep(time.Millisecond * 100)
37 | return &GetByIdResponse{
38 | User: &User{
39 | Id: 456,
40 | Name: "hgg+" + s.Name,
41 | },
42 | }, nil
43 | }
44 |
--------------------------------------------------------------------------------
/syncX/lock/redisLock/redsyncx/lock_cron_mysql/repository/dao/db.go:
--------------------------------------------------------------------------------
1 | package dao
2 |
3 | import "database/sql"
4 |
5 | // TaskType 任务类型
6 | type TaskType string
7 |
8 | const (
9 | TaskTypeFunction TaskType = "function"
10 | TaskTypeHTTP TaskType = "http"
11 | TaskTypeGRPC TaskType = "grpc"
12 | )
13 |
14 | // JobStatus 任务状态
15 | type JobStatus string
16 |
17 | const (
18 | // JobStatusActive 启用
19 | JobStatusActive JobStatus = "active"
20 | // JobStatusRunning 运行中
21 | JobStatusRunning JobStatus = "running"
22 | // JobStatusPaused 暂停
23 | JobStatusPaused JobStatus = "paused"
24 | // JobStatusDeleted 删除
25 | JobStatusDeleted JobStatus = "deleted"
26 | )
27 |
28 | // CronJob 定时任务
29 | type CronJob struct {
30 | ID int64 `json:"id" gorm:"primaryKey, autoIncrement"`
31 | CronId int64 `json:"cron_id" gorm:"unique"`
32 | // 任务名
33 | Name string `gorm:"column:cron_name;type:varchar(128);size:128"`
34 | // 任务描述
35 | Description sql.NullString `gorm:"column:description;type=varchar(4096);size:4096"`
36 | // 任务执行表达式
37 | CronExpr string `gorm:"column:cron_expr"`
38 | // 任务类型
39 | TaskType TaskType `gorm:"column:task_type"`
40 |
41 | // 任务状态
42 | Status JobStatus `gorm:"column:status;type:varchar(128);size:128"`
43 | // 任务最大重试次数
44 | MaxRetry int `gorm:"column:max_retry"`
45 | // 任务超时时间(秒)
46 | Timeout int `gorm:"column:timeout"`
47 |
48 | Ctime float64
49 | Utime float64
50 | }
51 |
--------------------------------------------------------------------------------
/channelx/mqX/kafkaX/saramaX/consumerX/offsetConsumer_test.go:
--------------------------------------------------------------------------------
1 | package consumerX
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "gitee.com/hgg_test/pkg_tool/v2/channelx/mqX"
7 | "github.com/stretchr/testify/assert"
8 | "log"
9 | "testing"
10 | "time"
11 | )
12 |
13 | type MyHandlers struct{}
14 |
15 | func (h *MyHandlers) HandleBatch(ctx context.Context, msgs []*mqX.Message) (success bool, err error) {
16 | var events []mqX.UserEventTest
17 | for _, v := range msgs {
18 | var event mqX.UserEventTest
19 | if err := json.Unmarshal(v.Value, &event); err != nil {
20 | return false, err
21 | }
22 | events = append(events, event)
23 | }
24 | // 处理业务逻辑
25 | log.Println("Received event:", events)
26 | return true, nil
27 | }
28 |
29 | func TestExampleOffsetConsumer(t *testing.T) {
30 | consumer, err := NewOffsetConsumer(addr, &OffsetConsumerConfig{
31 | BatchSize: 50,
32 | BatchTimeout: 2 * time.Second,
33 | AutoCommit: false, // 由 handler 控制
34 | })
35 | if err != nil {
36 | t.Skipf("跳过测试:无法连接 Kafka: %v", err)
37 | return
38 | }
39 | defer consumer.Close()
40 |
41 | handlers := &MyHandlers{}
42 |
43 | // 生产可在方法传一个ctx,但是具体ctx返回至main函数defer住
44 | ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
45 | defer cancel()
46 |
47 | // 从 offset=1000 开始消费 partition 0
48 | err = consumer.ConsumeFrom(ctx, "user-events", 0, 10, handlers)
49 | assert.NoError(t, err)
50 | }
51 |
--------------------------------------------------------------------------------
/DBx/redisX/redisMonitorX/redisPrometheusx/help_docx:
--------------------------------------------------------------------------------
1 | // eg: 装饰器模式,基于Prometheus监控统计redis缓存
2 | // 【监控redis命令耗时】、【监控redis缓存命中率】,使用哪个,初始化redis时,利用redis的Hook能力加入Prometheus
3 |
4 | // 初始化redis客户端
5 | client := redis.NewClient(&redis.Options{
6 | Addr: "localhost:6379",
7 | })
8 |
9 | // 基于prometheus,监控redis命令耗时【利用redis的Hook能力】
10 | clientHookTime := redisx.NewPrometheusRedisHookTime(prometheus.HistogramOpts{
11 | Namespace: "hgg",
12 | Subsystem: "hggXiaoWeiShu",
13 | Name: "redisCommandTime",
14 | Help: "监控redis缓存命令耗时",
15 | Buckets: []float64{ // 各个分桶值单位为秒【1ms一个桶,1ms以内的响应时间都在一个桶内】
16 | 0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0,
17 | },
18 | })
19 |
20 | // 监控redis缓存命中率【利用redis的Hook能力】
21 | clientHookKeyRate := redisx.NewPrometheusHookKeyRate(prometheus.SummaryOpts{
22 | Namespace: "hgg",
23 | Subsystem: "hggXiaoWeiShu",
24 | Name: "redisKeyRate",
25 | Help: "监控redis缓存命中率",
26 | Objectives: map[float64]float64{
27 | 0.5: 0.01, // 以响应时间为例:50%的观测值响应时间,在0.01的百分比内【误差在 %1】
28 | 0.75: 0.01, // 以响应时间为例:75%的观测值响应时间,在0.01的百分比内【误差在 %1】
29 | 0.90: 0.005, // 以响应时间为例:90%的观测值响应时间,在0.005的百分比内【误差在 %0.5】
30 | 0.98: 0.002, // 以响应时间为例:98%的观测值响应时间,在0.002的百分比内【误差在 %0.2】
31 | 0.99: 0.001, // 以响应时间为例:99%的观测值响应时间,在0.001的百分比内【误差在 %0.1】
32 | 0.999: 0.0001, // 以响应时间为例:99.9%的观测值响应时间,在0.0001的百分比内【误差在 %0.01】
33 | },
34 | })
35 |
36 | client.AddHook(clientHookKeyRate)
37 | client.AddHook(clientHookTime)
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/DBx/redisX/cacheCountServiceX/lua/cntRank.lua:
--------------------------------------------------------------------------------
1 | ---- 接收 KEYS[1] 为计数器键,KEYS[2] 为排行榜键,ARGV[1] 为变化量。返回新的计数值。
2 | --local cnt_key = KEYS[1]
3 | --local rank_key = KEYS[2]
4 | --local delta = ARGV[1]
5 | --
6 | ---- 更新计数
7 | --local new_cnt = redis.call('INCRBY', cnt_key, delta)
8 | --if new_cnt < 0 then
9 | -- new_cnt = 0
10 | -- redis.call('SET', cnt_key, 0)
11 | --end
12 | --
13 | ---- 更新排行榜
14 | --if delta ~= '0' then
15 | -- redis.call('ZADD', rank_key, new_cnt, string.sub(cnt_key, -string.find(string.reverse(cnt_key), ':') + 1))
16 | --end
17 | --
18 | ---- 设置过期时间(如果需要)
19 | ---- 计数过期时间短,5分钟
20 | --redis.call('EXPIRE', cnt_key, 300)
21 | ---- 排行榜过期时间更长,1小时,避免缓存击穿
22 | --redis.call('EXPIRE', rank_key, 3600)
23 | --
24 | --return new_cnt
25 |
26 |
27 |
28 |
29 |
30 | -- KEYS[1] = 计数器 key (e.g., "user:123:cnt")
31 | -- KEYS[2] = 排行榜 key (e.g., "rank:daily")
32 | -- ARGV[1] = delta (string, e.g., "1")
33 | -- ARGV[2] = member (e.g., "123") ← 更安全高效!
34 |
35 | local cnt_key = KEYS[1]
36 | local rank_key = KEYS[2]
37 | local delta = tonumber(ARGV[1])
38 | local member = ARGV[2]
39 |
40 | -- 更新计数
41 | local new_cnt = redis.call('INCRBY', cnt_key, delta)
42 | if new_cnt < 0 then
43 | new_cnt = 0
44 | redis.call('SET', cnt_key, 0)
45 | end
46 |
47 | -- 更新排行榜(仅当变化量非零)
48 | if delta ~= 0 then
49 | redis.call('ZADD', rank_key, new_cnt, member)
50 | end
51 |
52 | -- 设置过期时间(滑动窗口场景下合理)
53 | redis.call('EXPIRE', cnt_key, 660) -- 11分钟
54 | redis.call('EXPIRE', rank_key, 86460) -- 1天1分
55 |
56 | return new_cnt
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/dbPrometheusx/help_docx:
--------------------------------------------------------------------------------
1 | // 自定义监控指标,统计sql执行时间
2 | db, err := gorm.Open(mysql.Open(cfg.DSN), &gorm.Config{Logger: l})
3 | prometheusConf := dbPrometheusx.PrometheusSummaryOpts{
4 | Namespace: "hgg_XiaoWeiShu",
5 | Subsystem: "webook",
6 | Name: "gorm_db",
7 | Help: "统计 GORM 的数据库查询",
8 | ConstLabels: map[string]string{
9 | "instance_id": "my_instance",
10 | },
11 | Objectives: map[float64]float64{
12 | 0.5: 0.01,
13 | 0.75: 0.01,
14 | 0.9: 0.01,
15 | 0.99: 0.001,
16 | 0.999: 0.0001,
17 | },
18 | }
19 | cb := dbPrometheusx.NewCallbacks(prometheusConf)
20 | err = db.Use(cb)
21 | if err != nil {
22 | panic(err)
23 | }
24 |
25 |
26 |
27 | // GORM自己也有一个prometheus,启用监控
28 | err = db.Use(prometheus.New(prometheus.Config{
29 | DBName: "webook", // 数据库名称
30 | RefreshInterval: 15, // 刷新间隔,多久向连接池发送一次监控数据
31 | //PushAddr: "", // 推送地址
32 | //PushUser: "", // 推送用户名
33 | //PushPassword: "",
34 | //StartServer: false,
35 | //HTTPServerPort: 0,
36 | MetricsCollector: []prometheus.MetricsCollector{ // 自定义监控指标
37 | &prometheus.MySQL{
38 | VariableNames: []string{"thread_running"}, // 监控的变量名
39 | },
40 | },
41 | //Labels: nil,
42 | }))
43 | if err != nil {
44 | panic(err)
45 | }
--------------------------------------------------------------------------------
/serviceLogicX/rankingListX/rankingServiceX/buildGormX/BuildDataSource.go:
--------------------------------------------------------------------------------
1 | package buildGormX
2 |
3 | import (
4 | "context"
5 | "gitee.com/hgg_test/pkg_tool/v2/logx"
6 | "gitee.com/hgg_test/pkg_tool/v2/serviceLogicX/rankingListX/rankingServiceX/types"
7 | "gorm.io/gorm"
8 | )
9 |
10 | // BuildDataSource 通用数据源构建器
11 | // - baseQuery: 查询时有可能会有status等字段需要过滤,自定义构建数据源的status等,
12 | // 所以暴漏外部来控制数据源查询条件【构造Select/Where/Order,注意不要加分页!】,
13 | // - baseQuery: 一、是为了灵活构造查询。二、如果where无法命中索引,就不要加where了,性能影响较大
14 | // - mapper: 映射数据源结构体到分数结构体,返回分数结构体
15 | func BuildDataSource[T any](
16 | ctx context.Context,
17 | db *gorm.DB, // 数据库连接
18 | baseQuery func(*gorm.DB) *gorm.DB, // 构造Select/Where/Order,注意不要加分页!
19 | mapper func(T) types.HotScore, // 映射数据源结构体到业务结构体
20 | logger logx.Loggerx, // 日志
21 | ) func(offset, limit int) ([]types.HotScore, error) {
22 |
23 | return func(offset, limit int) ([]types.HotScore, error) {
24 | query := db.Model(new(T)).WithContext(ctx)
25 | if baseQuery != nil {
26 | query = baseQuery(query)
27 | }
28 | // 内部统一加 Offset/Limit,确保分页正确
29 | query = query.Offset(offset).Limit(limit)
30 |
31 | var items []T
32 | if err := query.Find(&items).Error; err != nil {
33 | logger.Error("ranking: DB query failed, 计算榜单时查询数据库数据失败",
34 | logx.Int("offset", offset),
35 | logx.Int("limit", limit),
36 | logx.Error(err))
37 | return nil, err
38 | }
39 |
40 | result := make([]types.HotScore, len(items))
41 | for i, item := range items {
42 | result[i] = mapper(item)
43 | }
44 | return result, nil
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/limiter/slide_window.lua:
--------------------------------------------------------------------------------
1 | -- Redis滑动窗口限流Lua脚本
2 | -- 修复版本:解决member唯一性和统计逻辑问题
3 |
4 | -- KEYS[1]: 限流key
5 | -- ARGV[1]: 窗口大小(毫秒)
6 | -- ARGV[2]: 阈值(窗口内允许的最大请求数)
7 | -- ARGV[3]: 当前时间戳(毫秒)
8 | -- ARGV[4]: 唯一请求ID(可选,用于生成唯一member)
9 |
10 | local key = KEYS[1]
11 | local window = tonumber(ARGV[1]) -- 窗口大小(毫秒)
12 | local threshold = tonumber(ARGV[2]) -- 阈值
13 | local now = tonumber(ARGV[3]) -- 当前时间戳(毫秒)
14 | local requestId = ARGV[4] -- 唯一请求ID(可选)
15 |
16 | -- 计算窗口开始时间戳
17 | local windowStart = now - window
18 |
19 | -- 1. 清理窗口外的过期数据
20 | -- 删除所有score小于窗口开始时间的数据
21 | redis.call('ZREMRANGEBYSCORE', key, '-inf', windowStart)
22 |
23 | -- 2. 统计当前窗口内的请求数量
24 | -- 注意:使用windowStart到'+inf'的范围,只统计窗口内的数据
25 | local cnt = redis.call('ZCOUNT', key, windowStart, '+inf')
26 |
27 | -- 3. 检查是否超过阈值
28 | if cnt >= threshold then
29 | -- 触发限流,返回1
30 | return 1
31 | end
32 |
33 | -- 4. 生成唯一的member标识
34 | -- 如果提供了requestId,则使用它;否则生成一个基于时间的唯一标识
35 | local member
36 | if requestId and requestId ~= '' then
37 | member = requestId
38 | else
39 | -- 使用时间戳+微秒时间生成相对唯一的member
40 | -- 这里使用redis的TIME命令获取当前时间的秒和微秒部分
41 | local timeInfo = redis.call('TIME')
42 | local seconds = tonumber(timeInfo[1])
43 | local microseconds = tonumber(timeInfo[2])
44 | member = string.format("%s:%s:%s", now, seconds, microseconds)
45 | end
46 |
47 | -- 5. 添加当前请求到滑动窗口
48 | -- score使用当前时间戳,member使用唯一标识
49 | redis.call('ZADD', key, now, member)
50 |
51 | -- 6. 设置key的过期时间,避免内存泄漏
52 | -- 过期时间设置为窗口大小的2倍,确保即使没有新请求也能自动清理
53 | redis.call('PEXPIRE', key, window * 2)
54 |
55 | -- 7. 返回0表示通过限流
56 | return 0
57 |
--------------------------------------------------------------------------------
/rpc/grpcx/limiter/fixedWindow/fixedWindow.go:
--------------------------------------------------------------------------------
1 | package fixedWindow
2 |
3 | import (
4 | "context"
5 | "sync"
6 | "time"
7 |
8 | "google.golang.org/grpc"
9 | "google.golang.org/grpc/codes"
10 | "google.golang.org/grpc/status"
11 | )
12 |
13 | // FixedWindowLimiter 固定窗口限流算法
14 | type FixedWindowLimiter struct {
15 | window time.Duration // 窗口大小
16 | lastWindowStart time.Time // 窗口开始时间
17 | cnt int // 窗口内请求数
18 | threshold int // 阈值
19 | lock sync.Mutex // 保护临界资源
20 | }
21 |
22 | // NewFixedWindowLimiter 创建固定窗口限流器
23 | func NewFixedWindowLimiter(window time.Duration, threshold int) *FixedWindowLimiter {
24 | return &FixedWindowLimiter{
25 | window: window,
26 | lastWindowStart: time.Now(),
27 | cnt: 0,
28 | threshold: threshold,
29 | }
30 | }
31 |
32 | // BuildServerInterceptor 构建gRPC服务端拦截器
33 | func (c *FixedWindowLimiter) BuildServerInterceptor() grpc.UnaryServerInterceptor {
34 | return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
35 | // 加锁保护共享状态
36 | c.lock.Lock()
37 |
38 | now := time.Now()
39 | // 判断是否进入新的窗口
40 | if now.Sub(c.lastWindowStart) > c.window {
41 | // 重置窗口
42 | c.cnt = 0
43 | c.lastWindowStart = now
44 | }
45 |
46 | // 检查是否超过阈值
47 | if c.cnt >= c.threshold {
48 | c.lock.Unlock()
49 | return nil, status.Errorf(codes.ResourceExhausted,
50 | "固定窗口限流:当前窗口请求数 %d,阈值 %d", c.cnt, c.threshold)
51 | }
52 |
53 | // 增加计数器并允许请求
54 | c.cnt++
55 | c.lock.Unlock()
56 |
57 | // 处理请求
58 | return handler(ctx, req)
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/sliceX/contains.go:
--------------------------------------------------------------------------------
1 | package sliceX
2 |
3 | // Contains 判断 src 里面是否存在 dst
4 | func Contains[T comparable](src []T, dst T) bool {
5 | return ContainsFunc[T](src, func(src T) bool {
6 | return src == dst
7 | })
8 | }
9 |
10 | // ContainsFunc 判断 src 里面是否存在 dst
11 | // 你应该优先使用 Contains
12 | func ContainsFunc[T any](src []T, equal func(src T) bool) bool {
13 | // 遍历调用equal函数进行判断
14 | for _, v := range src {
15 | if equal(v) {
16 | return true
17 | }
18 | }
19 | return false
20 | }
21 |
22 | // ContainsAny 判断 src 里面是否存在 dst 中的任何一个元素
23 | func ContainsAny[T comparable](src, dst []T) bool {
24 | srcMap := toMap[T](src)
25 | for _, v := range dst {
26 | if _, exist := srcMap[v]; exist {
27 | return true
28 | }
29 | }
30 | return false
31 | }
32 |
33 | // ContainsAnyFunc 判断 src 里面是否存在 dst 中的任何一个元素
34 | // 你应该优先使用 ContainsAny
35 | func ContainsAnyFunc[T any](src, dst []T, equal equalFunc[T]) bool {
36 | for _, valDst := range dst {
37 | for _, valSrc := range src {
38 | if equal(valSrc, valDst) {
39 | return true
40 | }
41 | }
42 | }
43 | return false
44 | }
45 |
46 | // ContainsAll 判断 src 里面是否存在 dst 中的所有元素
47 | func ContainsAll[T comparable](src, dst []T) bool {
48 | srcMap := toMap[T](src)
49 | for _, v := range dst {
50 | if _, exist := srcMap[v]; !exist {
51 | return false
52 | }
53 | }
54 | return true
55 | }
56 |
57 | // ContainsAllFunc 判断 src 里面是否存在 dst 中的所有元素
58 | // 你应该优先使用 ContainsAll
59 | func ContainsAllFunc[T any](src, dst []T, equal equalFunc[T]) bool {
60 | for _, valDst := range dst {
61 | if !ContainsFunc[T](src, func(src T) bool {
62 | return equal(src, valDst)
63 | }) {
64 | return false
65 | }
66 | }
67 | return true
68 | }
69 |
--------------------------------------------------------------------------------
/DBx/localCahceX/cacheLocalRistrettox/ristretto.go:
--------------------------------------------------------------------------------
1 | package cacheLocalRistrettox
2 |
3 | import (
4 | "errors"
5 | "gitee.com/hgg_test/pkg_tool/v2/DBx/localCahceX"
6 | "github.com/dgraph-io/ristretto/v2"
7 | "time"
8 | )
9 |
10 | type CacheLocalRistrettoStr[K localCahceX.Key, V any] struct {
11 | cache *ristretto.Cache[K, V]
12 | }
13 |
14 | // NewCacheLocalRistrettoStr 是高性能、并发安全、带准入策略的内存缓存库【初始化参考测试用例 V1 版本】
15 | func NewCacheLocalRistrettoStr[K localCahceX.Key, V any](cache *ristretto.Cache[K, V]) localCahceX.CacheLocalIn[K, V] {
16 | return &CacheLocalRistrettoStr[K, V]{
17 | cache: cache,
18 | }
19 | }
20 |
21 | // Set 设置本地缓存
22 | // - weight: 缓存权重
23 | func (c *CacheLocalRistrettoStr[K, V]) Set(key K, value V, ttl time.Duration, weight int64) error {
24 | ok := c.cache.SetWithTTL(key, value, weight, ttl)
25 | if ok {
26 | return nil
27 | }
28 | return errors.New("set localCache fail error")
29 | }
30 |
31 | func (c *CacheLocalRistrettoStr[K, V]) Get(key K) (V, error) {
32 | val, ok := c.cache.GetTTL(key)
33 | if !ok || val <= time.Duration(0) {
34 | var v V
35 | return v, errors.New("查询缓存失败")
36 | }
37 | if value, isok := c.cache.Get(key); isok {
38 | return value, nil
39 | }
40 | var v V
41 | return v, errors.New("get localCache error, no key --> value, 查询缓存失败, Key不存在")
42 | }
43 |
44 | func (c *CacheLocalRistrettoStr[K, V]) Del(key K) error {
45 | c.cache.Del(key)
46 | return nil
47 | }
48 |
49 | func (c *CacheLocalRistrettoStr[K, V]) Close() {
50 | c.cache.Close()
51 | }
52 |
53 | func (c *CacheLocalRistrettoStr[K, V]) WaitSet() {
54 | c.cache.Wait()
55 | }
56 |
57 | // init方法会被自动调用
58 | func (c *CacheLocalRistrettoStr[K, V]) initClose() {
59 | defer c.cache.Close()
60 | }
61 |
--------------------------------------------------------------------------------
/syncX/lock/redisLock/redsyncx/lock_cron_mysql/service/cron.go:
--------------------------------------------------------------------------------
1 | package service
2 |
3 | import (
4 | "context"
5 | "gitee.com/hgg_test/pkg_tool/v2/syncX/lock/redisLock/redsyncx/lock_cron_mysql/domain"
6 | "gitee.com/hgg_test/pkg_tool/v2/syncX/lock/redisLock/redsyncx/lock_cron_mysql/repository"
7 | )
8 |
9 | var (
10 | ErrDataRecordNotFound error = repository.ErrDataRecordNotFound
11 | ErrDuplicateData error = repository.ErrDuplicateData
12 | )
13 |
14 | type CronService interface {
15 | GetCronJob(ctx context.Context, id int64) (domain.CronJob, error)
16 | GetCronJobs(ctx context.Context) ([]domain.CronJob, error)
17 | AddCronJob(ctx context.Context, job domain.CronJob) error
18 | AddCronJobs(ctx context.Context, jobs []domain.CronJob) error
19 | DelCronJob(ctx context.Context, id int64) error
20 | DelCronJobs(ctx context.Context, ids []int64) error
21 | }
22 |
23 | type cronService struct {
24 | cronRepo repository.CronRepository
25 | }
26 |
27 | func (c *cronService) GetCronJob(ctx context.Context, id int64) (domain.CronJob, error) {
28 | return c.cronRepo.FindById(ctx, id)
29 | }
30 |
31 | func (c *cronService) GetCronJobs(ctx context.Context) ([]domain.CronJob, error) {
32 | return c.cronRepo.FindAll(ctx)
33 | }
34 |
35 | func (c *cronService) AddCronJob(ctx context.Context, job domain.CronJob) error {
36 | return c.cronRepo.CreateCron(ctx, job)
37 | }
38 | func (c *cronService) AddCronJobs(ctx context.Context, jobs []domain.CronJob) error {
39 | return c.cronRepo.CreateCrons(ctx, jobs)
40 | }
41 |
42 | func (c *cronService) DelCronJob(ctx context.Context, id int64) error {
43 | return c.cronRepo.DelCron(ctx, id)
44 | }
45 | func (c *cronService) DelCronJobs(ctx context.Context, ids []int64) error {
46 | return c.cronRepo.DelCrons(ctx, ids)
47 | }
48 |
--------------------------------------------------------------------------------
/DBx/redisX/redisMonitorX/redisPrometheusx/redisPrometheusKeyx.go:
--------------------------------------------------------------------------------
1 | package redisPrometheusx
2 |
3 | /*
4 | 监控缓存的命中率
5 | */
6 |
7 | import (
8 | "context"
9 | "github.com/prometheus/client_golang/prometheus"
10 | "github.com/redis/go-redis/v9"
11 | "strings"
12 | "time"
13 | )
14 |
15 | type PrometheusHookKeyRate struct {
16 | vector *prometheus.SummaryVec
17 | }
18 |
19 | // NewPrometheusHookKeyRate 监控缓存get的命中率
20 | func NewPrometheusHookKeyRate(opts prometheus.SummaryOpts) *PrometheusHookKeyRate {
21 | h := &PrometheusHookKeyRate{
22 | vector: prometheus.NewSummaryVec(opts, []string{"cmd", "key_exist"}),
23 | }
24 | prometheus.MustRegister(h.vector)
25 | return h
26 | }
27 |
28 | func (p *PrometheusHookKeyRate) DialHook(next redis.DialHook) redis.DialHook {
29 | return next // 透传,不监控
30 | }
31 |
32 | func (p *PrometheusHookKeyRate) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
33 | return func(ctx context.Context, cmd redis.Cmder) error {
34 | start := time.Now()
35 | err := next(ctx, cmd) // 先执行命令,拿到 err
36 |
37 | // 再记录指标(不要 defer,避免 panic 影响 err 捕获)
38 | duration := time.Since(start).Seconds()
39 | cmdName := strings.ToLower(cmd.Name())
40 | keyExist := getLabelKeyExist(cmd)
41 |
42 | p.vector.WithLabelValues(cmdName, keyExist).Observe(duration)
43 |
44 | return err
45 | }
46 | }
47 |
48 | func (p *PrometheusHookKeyRate) ProcessPipelineHook(next redis.ProcessPipelineHook) redis.ProcessPipelineHook {
49 | return next
50 | }
51 |
52 | func getLabelKeyExist(cmd redis.Cmder) string {
53 | switch strings.ToLower(cmd.Name()) {
54 | case "get", "hget", "lindex", "zscore", "exists":
55 | if cmd.Err() == redis.Nil {
56 | return "false"
57 | } else if cmd.Err() == nil {
58 | return "true"
59 | }
60 | }
61 | return "n/a"
62 | }
63 |
--------------------------------------------------------------------------------
/limiter/mocks/limiter.mock.go:
--------------------------------------------------------------------------------
1 | // Code generated by MockGen. DO NOT EDIT.
2 | // Source: ./pkg/limiter/types.go
3 | //
4 | // Generated by this command:
5 | //
6 | // mockgen -source=./pkg/limiter/types.go -package=limitermocks -destination=./pkg/limiter/mocks/limiter.mock.go
7 | //
8 |
9 | // Package limitermocks is a generated GoMock package.
10 | package limitermocks
11 |
12 | import (
13 | context "context"
14 | reflect "reflect"
15 |
16 | gomock "go.uber.org/mock/gomock"
17 | )
18 |
19 | // MockLimiter is a mock of Limiter interface.
20 | type MockLimiter struct {
21 | ctrl *gomock.Controller
22 | recorder *MockLimiterMockRecorder
23 | isgomock struct{}
24 | }
25 |
26 | // MockLimiterMockRecorder is the mock recorder for MockLimiter.
27 | type MockLimiterMockRecorder struct {
28 | mock *MockLimiter
29 | }
30 |
31 | // NewMockLimiter creates a new mock instance.
32 | func NewMockLimiter(ctrl *gomock.Controller) *MockLimiter {
33 | mock := &MockLimiter{ctrl: ctrl}
34 | mock.recorder = &MockLimiterMockRecorder{mock}
35 | return mock
36 | }
37 |
38 | // EXPECT returns an object that allows the caller to indicate expected use.
39 | func (m *MockLimiter) EXPECT() *MockLimiterMockRecorder {
40 | return m.recorder
41 | }
42 |
43 | // Limit mocks base method.
44 | func (m *MockLimiter) Limit(ctx context.Context, key string) (bool, error) {
45 | m.ctrl.T.Helper()
46 | ret := m.ctrl.Call(m, "Limit", ctx, key)
47 | ret0, _ := ret[0].(bool)
48 | ret1, _ := ret[1].(error)
49 | return ret0, ret1
50 | }
51 |
52 | // Limit indicates an expected call of Limit.
53 | func (mr *MockLimiterMockRecorder) Limit(ctx, key any) *gomock.Call {
54 | mr.mock.ctrl.T.Helper()
55 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Limit", reflect.TypeOf((*MockLimiter)(nil).Limit), ctx, key)
56 | }
57 |
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/dbMovex/myMovex/messageQueue/producer/producer_test.go:
--------------------------------------------------------------------------------
1 | package producer
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "gitee.com/hgg_test/pkg_tool/v2/DBx/mysqlX/gormx/dbMovex/myMovex/doubleWritePoolx"
7 | "gitee.com/hgg_test/pkg_tool/v2/DBx/mysqlX/gormx/dbMovex/myMovex/events"
8 | "gitee.com/hgg_test/pkg_tool/v2/channelx/mqX"
9 | "gitee.com/hgg_test/pkg_tool/v2/channelx/mqX/kafkaX/saramaX/producerX"
10 | "github.com/stretchr/testify/assert"
11 | "testing"
12 | "time"
13 | )
14 |
15 | var addr []string = []string{"localhost:9094"}
16 |
17 | // 测试同步发送
18 | func TestNewSaramaProducerStrSync(t *testing.T) {
19 | //cfg := saramaX.NewConfig()
20 | ////========同步发送==========
21 | //cfg.Producer.Return.Successes = true
22 | //
23 | //syncPro, err := saramaX.NewSyncProducer(addr, cfg)
24 | //assert.NoError(t, err)
25 | //pro := saramaProducerx.NewSaramaProducerStr[saramaX.SyncProducer](syncPro, cfg)
26 | pro, err := producerX.NewKafkaProducer(addr, &producerX.ProducerConfig{
27 | BatchSize: 0,
28 | BatchTimeout: 0,
29 | Async: true,
30 | })
31 | if err != nil {
32 | t.Skipf("跳过测试:无法连接 Kafka: %v", err)
33 | return
34 | }
35 | // CloseProducer 关闭生产者Producer,请在main函数最顶层defer住生产者的Producer.Close(),优雅关闭防止goroutine泄露
36 | //defer pro.CloseProducer()
37 | defer pro.Close()
38 |
39 | value, err := json.Marshal(events.InconsistentEvent{
40 | ID: 10,
41 | Direction: "SRC",
42 | Type: doubleWritePoolx.PatternSrcFirst,
43 | })
44 | assert.NoError(t, err)
45 |
46 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
47 | // 同步发送
48 | //err = pro.SendMessage(ctx, messageQueuex.Tp{Topic: "dbMove"}, value)
49 | err = pro.Send(ctx, &mqX.Message{Topic: "dbMove", Value: value})
50 | assert.NoError(t, err)
51 | cancel()
52 | }
53 |
--------------------------------------------------------------------------------
/webx/ginx/middleware/limitX/ratelimitGinBuild.go:
--------------------------------------------------------------------------------
1 | package limitX
2 |
3 | import (
4 | _ "embed"
5 | "fmt"
6 | "log"
7 | "net/http"
8 |
9 | "gitee.com/hgg_test/pkg_tool/limiter"
10 | "gitee.com/hgg_test/pkg_tool/v2/logx"
11 | "github.com/gin-gonic/gin"
12 | )
13 |
14 | type Builder struct {
15 | prefix string
16 | //cmd redis.Cmdable
17 | //interval time.Duration
18 | //// 阈值
19 | //rate int
20 | limiter limiter.Limiter
21 | log logx.Loggerx
22 | }
23 |
24 | // NewRedisBuilder 【注册到gin中间件, server.use()】
25 | // ratelimitx.NewBuilder(limiter.NewRedisSlideWindowKLimiter(redisClient, time.Second, 1000)).Build(),
26 | // 限流中间件,注册到 gin框架,使用 redis,100次请求/秒。传三个参数,第一个为redis客户端,第二个为限流时间,第三个为限流次数。
27 | func NewRedisBuilder(l limiter.Limiter, logh logx.Loggerx) *Builder {
28 | return &Builder{
29 | prefix: "ip-limiter",
30 | limiter: l,
31 | log: logh,
32 | }
33 | }
34 |
35 | func (b *Builder) Prefix(prefix string) *Builder {
36 | b.prefix = prefix
37 | return b
38 | }
39 |
40 | func (b *Builder) Build() gin.HandlerFunc {
41 | return func(ctx *gin.Context) {
42 | limited, err := b.limiter.Limit(ctx, fmt.Sprintf("%s:%s", b.prefix, ctx.ClientIP()))
43 | if err != nil {
44 | log.Println(err)
45 | b.log.Error("限流出错,redis有误", logx.Error(err))
46 | // 这一步很有意思,就是如果这边出错了
47 | // 要怎么办?
48 | // 保守做法:因为借助于 Redis 来做限流,那么 Redis 崩溃了,为了防止系统崩溃,直接限流
49 | ctx.AbortWithStatus(http.StatusInternalServerError)
50 | // 激进做法:虽然 Redis 崩溃了,但是这个时候还是要尽量服务正常的用户,所以不限流
51 | // ctx.Next()
52 | return
53 | }
54 | if limited {
55 | if b.log != nil {
56 | b.log.Info("请求被限流",
57 | logx.String("client_ip", ctx.ClientIP()),
58 | logx.String("path", ctx.Request.URL.Path))
59 | }
60 | ctx.AbortWithStatus(http.StatusTooManyRequests)
61 | return
62 | }
63 | ctx.Next()
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/logx/fields.go:
--------------------------------------------------------------------------------
1 | package logx
2 |
3 | import (
4 | "time"
5 | )
6 |
7 | func Error(err error) Field {
8 | return Field{Key: "error", Value: err}
9 | }
10 |
11 | func Bool(key string, val bool) Field {
12 | return Field{Key: key, Value: val}
13 | }
14 |
15 | func Int(key string, val int) Field {
16 | return Field{Key: key, Value: val}
17 | }
18 |
19 | func Int8(key string, val int8) Field {
20 | return Field{Key: key, Value: val}
21 | }
22 |
23 | func Int16(key string, val int16) Field {
24 | return Field{Key: key, Value: val}
25 | }
26 | func Int32(key string, val int32) Field {
27 | return Field{Key: key, Value: val}
28 | }
29 |
30 | func Int64(key string, val int64) Field {
31 | return Field{Key: key, Value: val}
32 | }
33 |
34 | func Uint(key string, val uint) Field {
35 | return Field{Key: key, Value: val}
36 | }
37 |
38 | func Uint8(key string, val uint8) Field {
39 | return Field{Key: key, Value: val}
40 | }
41 |
42 | func Uint16(key string, val uint16) Field {
43 | return Field{Key: key, Value: val}
44 | }
45 | func Uint32(key string, val uint32) Field {
46 | return Field{Key: key, Value: val}
47 | }
48 |
49 | func Uint64(key string, val uint64) Field {
50 | return Field{Key: key, Value: val}
51 | }
52 |
53 | func Float32(key string, val float32) Field {
54 | return Field{Key: key, Value: val}
55 | }
56 |
57 | func Float64(key string, val float64) Field {
58 | return Field{Key: key, Value: val}
59 | }
60 | func String(key string, val string) Field {
61 | return Field{Key: key, Value: val}
62 | }
63 |
64 | func TimeTime(key string, val time.Time) Field {
65 | return Field{Key: key, Value: val}
66 | }
67 |
68 | func TimeDuration(key string, val time.Duration) Field {
69 | return Field{Key: key, Value: val}
70 | }
71 |
72 | func Any(key string, val any) Field {
73 | return Field{Key: key, Value: val}
74 | }
75 |
--------------------------------------------------------------------------------
/DBx/sqlX/json.go:
--------------------------------------------------------------------------------
1 | package sqlX
2 |
3 | import (
4 | "database/sql/driver"
5 | "encoding/json"
6 | "fmt"
7 | )
8 |
9 | // JsonColumn 是一个支持 NULL 的泛型 JSON 列包装器
10 | // 类似 sql.NullString,但用于任意可 JSON 序列化的类型
11 | type JsonColumn[T any] struct {
12 | Val T // 存储实际数据
13 | Valid bool // 表示该字段在数据库中是否为非 NULL
14 | }
15 |
16 | // Value 实现 driver.Valuer 接口
17 | // 将 Go 值转换为数据库存储值([]byte 或 nil)
18 | func (j JsonColumn[T]) Value() (driver.Value, error) {
19 | if !j.Valid {
20 | return nil, nil
21 | }
22 | return json.Marshal(j.Val)
23 | }
24 |
25 | // Scan 实现 sql.Scanner 接口
26 | // 从数据库读取值([]byte, string, nil)并反序列化到 j.Val
27 | func (j *JsonColumn[T]) Scan(src any) error {
28 | if src == nil {
29 | // 数据库值为 NULL:重置为零值,Valid = false
30 | var zero T
31 | j.Val = zero
32 | j.Valid = false
33 | return nil
34 | }
35 |
36 | var bs []byte
37 | switch v := src.(type) {
38 | case []byte:
39 | bs = v
40 | case string:
41 | bs = []byte(v)
42 | default:
43 | return fmt.Errorf("JsonColumn.Scan: unsupported src type %T", src)
44 | }
45 |
46 | if err := json.Unmarshal(bs, &j.Val); err != nil {
47 | return fmt.Errorf("JsonColumn.Scan: failed to unmarshal JSON: %w", err)
48 | }
49 | j.Valid = true
50 | return nil
51 | }
52 |
53 | // MarshalJSON 实现 json.Marshaler 接口
54 | // 当 Valid 为 true 时,直接序列化 Val 的值;否则序列化为 null
55 | func (j JsonColumn[T]) MarshalJSON() ([]byte, error) {
56 | if !j.Valid {
57 | return []byte("null"), nil
58 | }
59 | return json.Marshal(j.Val)
60 | }
61 |
62 | // UnmarshalJSON 实现 json.Unmarshaler 接口
63 | // 当输入为 null 时,设置 Valid 为 false;否则反序列化到 Val
64 | func (j *JsonColumn[T]) UnmarshalJSON(data []byte) error {
65 | if string(data) == "null" {
66 | var zero T
67 | j.Val = zero
68 | j.Valid = false
69 | return nil
70 | }
71 | if err := json.Unmarshal(data, &j.Val); err != nil {
72 | return err
73 | }
74 | j.Valid = true
75 | return nil
76 | }
77 |
--------------------------------------------------------------------------------
/observationX/opentelemetryX/otel_test.go:
--------------------------------------------------------------------------------
1 | package opentelemetryX
2 |
3 | import (
4 | "context"
5 | "github.com/gin-gonic/gin"
6 | "github.com/stretchr/testify/assert"
7 | "go.opentelemetry.io/otel/attribute"
8 | "go.opentelemetry.io/otel/exporters/zipkin"
9 | "go.opentelemetry.io/otel/trace"
10 | "testing"
11 | "time"
12 | )
13 |
14 | func TestNewOtelStr(t *testing.T) {
15 | // 使用zipkin的实现trace.SpanExporter
16 | exporter, err := zipkin.New("http://localhost:9411/api/v2/spans") // zipkin exporter
17 | assert.NoError(t, err)
18 | // 初始化全局链路追踪
19 | ct, err := NewOtelStr(SvcInfo{ServiceName: "hgg", ServiceVersion: "v0.0.1"}, exporter)
20 | assert.NoError(t, err)
21 |
22 | // main方法里defer住
23 | defer func() {
24 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
25 | defer cancel()
26 | ct(ctx)
27 | }()
28 |
29 | // 可以在其他业务中,结构体,依赖注入方式,注入 trace.Tracer ,链路追踪方法使用【提前初始化好-全局链路追踪】
30 | otr := NewOtelTracerStr()
31 | tracer := otr.NewTracer("gitee.com/hgg_test/jksj-study/opentelemetry")
32 |
33 | server(tracer)
34 | }
35 |
36 | // ===============================
37 | // ===============================
38 |
39 | func server(tracer trace.Tracer) {
40 | server := gin.Default()
41 | server.GET("/test", func(ginCtx *gin.Context) {
42 | // 名字唯一
43 | //tracer := otel.Tracer("gitee.com/hgg_test/jksj-study/opentelemetry")
44 | var ctx context.Context = ginCtx
45 | // 创建 span
46 | ctx, span := tracer.Start(ctx, "top_span")
47 | defer span.End()
48 |
49 | time.Sleep(time.Second)
50 | span.AddEvent("发生了什么事情") // 添加事件,强调在某个时间点/某个时间发生了什么
51 |
52 | ctx, subSpan := tracer.Start(ctx, "sub_span")
53 | defer subSpan.End()
54 |
55 | subSpan.SetAttributes(attribute.String("attr1", "value1")) // 添加属性, 强调在上下文里面有什么数据
56 | time.Sleep(time.Millisecond * 300)
57 |
58 | ginCtx.String(200, "hello world, 测试span")
59 | })
60 | server.Run(":8082")
61 | }
62 |
--------------------------------------------------------------------------------
/channelx/mqX/kafkaX/saramaX/consumerX/consumer_test.go:
--------------------------------------------------------------------------------
1 | package consumerX
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "log"
7 | "testing"
8 | "time"
9 |
10 | "gitee.com/hgg_test/pkg_tool/v2/channelx/mqX"
11 | "github.com/IBM/sarama"
12 | "github.com/stretchr/testify/assert"
13 | )
14 |
15 | var addr []string = []string{"localhost:9094"}
16 |
17 | // // ============消费数据后业务逻辑=================
18 | type MyHandler struct{}
19 |
20 | // IsBatch 业务处理逻辑是否批量
21 | func (h *MyHandler) IsBatch() bool {
22 | return false
23 | }
24 |
25 | func (h *MyHandler) HandleBatch(ctx context.Context, msgs []*mqX.Message) (success bool, err error) {
26 | //var events []mqX.UserEventTest
27 | //for _, v := range msgs {
28 | // var event mqX.UserEventTest
29 | // if er := json.Unmarshal(v.Value, &event); er != nil {
30 | // return false, er
31 | // }
32 | // events = append(events, event)
33 | //}
34 | //log.Println("Received events:", events)
35 | return true, nil
36 | }
37 |
38 | func (h *MyHandler) Handle(ctx context.Context, msg *mqX.Message) error {
39 | var event mqX.UserEventTest
40 | if err := json.Unmarshal(msg.Value, &event); err != nil {
41 | return err
42 | }
43 | // 处理业务逻辑
44 | log.Println("Received event:", event)
45 | return nil
46 | }
47 |
48 | // ============消费者=================
49 | // 测试的消费者
50 | func TestNewKafkaConsumer(t *testing.T) {
51 | cfg := sarama.NewConfig()
52 | saramaCG, err := sarama.NewConsumerGroup(addr, "test_group", cfg)
53 | if err != nil {
54 | t.Skipf("无法连接 Kafka: %v", err)
55 | return
56 | }
57 | defer saramaCG.Close()
58 |
59 | // 创建你的封装消费者
60 | kafkaConsumer := NewKafkaConsumer(saramaCG, &ConsumerConfig{
61 | BatchSize: 20, // 批量大小
62 | BatchTimeout: 3 * time.Second, // 批量超时时间
63 | })
64 |
65 | // 调用你的通用接口方法,消费消息
66 | err = kafkaConsumer.Subscribe(context.Background(), []string{"user-events"}, &MyHandler{})
67 | assert.NoError(t, err)
68 |
69 | }
70 |
--------------------------------------------------------------------------------
/rpc/go_zero/go_zero_example_test.go:
--------------------------------------------------------------------------------
1 | package go_zero
2 |
3 | import (
4 | "context"
5 | "github.com/stretchr/testify/require"
6 | "github.com/stretchr/testify/suite"
7 | "github.com/zeromicro/go-zero/core/discov"
8 | "github.com/zeromicro/go-zero/zrpc"
9 | "google.golang.org/grpc"
10 | "testing"
11 | "time"
12 | )
13 |
14 | type GoZeroTestSuite struct {
15 | suite.Suite
16 | }
17 |
18 | // TestGoZeroClient 启动 grpc 客户端
19 | func (s *GoZeroTestSuite) TestGoZeroClient() {
20 | zClient := zrpc.MustNewClient(zrpc.RpcClientConf{
21 | Etcd: discov.EtcdConf{
22 | Hosts: []string{"localhost:12379"},
23 | Key: "user",
24 | },
25 | },
26 | zrpc.WithDialOption(
27 | grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`),
28 | ))
29 | client := NewUserServiceClient(zClient.Conn())
30 | for i := 0; i < 10; i++ {
31 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
32 | resp, err := client.GetById(ctx, &GetByIdRequest{
33 | Id: 123,
34 | })
35 | cancel()
36 | require.NoError(s.T(), err)
37 | s.T().Log(resp.User)
38 | }
39 | }
40 |
41 | // TestGoZeroServer 启动 grpc 服务端
42 | func (s *GoZeroTestSuite) TestGoZeroServer() {
43 | go func() {
44 | s.startServer(":8090")
45 | }()
46 | s.startServer(":8091")
47 | }
48 |
49 | func (s *GoZeroTestSuite) startServer(addr string) {
50 | // 正常来说,这个都是从配置文件中读取的
51 | //var c zrpc.RpcServerConf
52 | // 类似与 main 函数那样,从命令行接收配置文件的路径
53 | //conf.MustLoad(*configFile, &c)
54 | c := zrpc.RpcServerConf{
55 | ListenOn: addr,
56 | Etcd: discov.EtcdConf{
57 | Hosts: []string{"localhost:12379"},
58 | Key: "user",
59 | },
60 | }
61 | // 创建一个服务器,并且注册服务实例
62 | server := zrpc.MustNewServer(c, func(grpcServer *grpc.Server) {
63 | RegisterUserServiceServer(grpcServer, &Service{
64 | Name: addr,
65 | })
66 | })
67 |
68 | // 这个是往 gRPC 里面增加拦截器(也可以叫做插件)
69 | // server.AddUnaryInterceptors(interceptor)
70 | server.Start()
71 | }
72 |
73 | func TestGoZero(t *testing.T) {
74 | suite.Run(t, new(GoZeroTestSuite))
75 | }
76 |
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/gorm_help:
--------------------------------------------------------------------------------
1 | /*
2 | gorm模型定义:
3 | https://gorm.io/zh_CN/docs/models.html
4 | https://gorm.io/zh_CN/docs/associations.html#tags
5 |
6 | eg:
7 | type User struct {
8 | Id int64 `gorm:"primaryKey, autoIncrement"` // 主键和自增
9 | Email sql.NullString `gorm:"unique"` // 唯一索引,代表这是一个可以为 NULL 的列,也可以写成 Email *string
10 | Password string
11 | Phone sql.NullString `gorm:"unique"` // 唯一索引,代表这是一个可以为 NULL 的列,也可以写成 Phone *string
12 |
13 | NickName string `gorm:"column:nick_name;type:varchar(128);size:128"` // 用户名
14 | Birthday string // 生日
15 | AboutMe string `gorm:"column:about_me;type=varchar(4096);size:4096"` // 个人简介
16 |
17 | // 创建时间,时间关系到时区问题,服务器、数据库、程序等不在同一地方,尽量同一用UTC 0 的毫秒数,【做时区转换时候,在返回数据给前端时候做或者让前端处理】
18 | Ctime int64
19 | // 更新时间
20 | Utime int64
21 |
22 | ////Json存储,存在领域层对不上的问题,domain层的Address结构体过来建表,可能是一个字段/一张表
23 | //Addr string
24 |
25 | // 1、如果查询要求同时使用 openid 和 unionid,那么可以使用联合索引
26 | // 2、如果查询只用openid,那么只在openid上面创建唯一索引,或者联合索引,注意前后
27 | // 3、如果查询只用unionid,那么只在unionid上面创建唯一索引,或者联合索引,注意前后
28 | WechatOpenId sql.NullString `gorm:"unique"`
29 | WechatUnionId sql.NullString
30 | }
31 |
32 | type Interactive struct {
33 | Id int64 `gorm:"primaryKey, autoIncrement"` //主键
34 |
35 | // ,联合索引,bizId和id建立一个联合索引biz_type_id 【前后顺序影响查询速度】
36 | BizId int64 `gorm:"uniqueIndex:biz_type_id"` //业务id
37 | Biz string `gorm:"type:varchar(128);uniqueIndex:biz_type_id"` //业务类型
38 |
39 | ReadCnt int64 //阅读次数
40 | LikeCnt int64 //点赞次数
41 | CollectCnt int64 //收藏次数
42 | Utime int64 //更新时间
43 | Ctime int64 //创建时间
44 | }
45 | */
--------------------------------------------------------------------------------
/rpc/grpcx/observationX/grpcLogX/grpcLogX.go:
--------------------------------------------------------------------------------
1 | package grpcLogX
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "gitee.com/hgg_test/pkg_tool/v2/logx"
7 | "gitee.com/hgg_test/pkg_tool/v2/rpc/grpcx/observationX"
8 | "google.golang.org/grpc"
9 | "google.golang.org/grpc/codes"
10 | "google.golang.org/grpc/status"
11 | "runtime"
12 | "time"
13 | )
14 |
15 | type InterceptorBuilder struct {
16 | l logx.Loggerx
17 | observationX.Builder
18 | }
19 |
20 | func NewInterceptorBuilder(l logx.Loggerx) *InterceptorBuilder {
21 | return &InterceptorBuilder{l: l}
22 | }
23 |
24 | func (b *InterceptorBuilder) BuildServerUnaryInterceptor() grpc.UnaryServerInterceptor {
25 | return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
26 | start := time.Now()
27 | event := "normal"
28 | defer func() {
29 | // 最终输出日志
30 | cost := time.Since(start)
31 |
32 | // 发生了 panic
33 | if rec := recover(); rec != nil {
34 | switch re := rec.(type) {
35 | case error:
36 | err = re
37 | default:
38 | err = fmt.Errorf("%v", rec)
39 | }
40 | event = "recover"
41 | stack := make([]byte, 4096)
42 | stack = stack[:runtime.Stack(stack, true)]
43 | err = status.New(codes.Internal, "panic, err "+err.Error()).Err()
44 | }
45 |
46 | fields := []logx.Field{
47 | // unary stream 是 grpc 的两种调用形态
48 | logx.String("type", "unary"),
49 | logx.Int64("cost", cost.Milliseconds()),
50 | logx.String("event", event),
51 | logx.String("method", info.FullMethod),
52 | // 客户端的信息
53 | logx.String("peer", b.PeerName(ctx)),
54 | logx.String("peer_ip", b.PeerIP(ctx)),
55 | }
56 | st, _ := status.FromError(err)
57 | if st != nil {
58 | // 错误码
59 | fields = append(fields, logx.String("code", st.Code().String()))
60 | fields = append(fields, logx.String("code_msg", st.Message()))
61 | b.l.Error("RPC调用", fields...)
62 | } else {
63 | b.l.Info("RPC调用", fields...)
64 | }
65 | }()
66 | resp, err = handler(ctx, req)
67 | return
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/channelx/messageQueuex/saramax/saramaConsumerx/serviceLogic/serviceLogic.go:
--------------------------------------------------------------------------------
1 | package serviceLogic
2 |
3 | import (
4 | "gitee.com/hgg_test/pkg_tool/v2/logx"
5 | "github.com/IBM/sarama"
6 | )
7 |
8 | /*
9 | ====================================
10 | 普通的利用context超时控制的消费者处理逻辑
11 | ====================================
12 | */
13 |
14 | // SvcLogicFn 消费消息后的,自身业务逻辑处理eg:入库、缓存...
15 | type SvcLogicFn[EvenT any] func(msg *sarama.ConsumerMessage, event EvenT) error
16 |
17 | // SvcLogicFns 批量消费后的,自身业务逻辑处理eg:入库、缓存...
18 | type SvcLogicFns[EvenT any] func(msgs []*sarama.ConsumerMessage, event []EvenT) error
19 |
20 | // SaramaConsumerGroupMessage 消费者组消息处理【消费消息后的,自身业务逻辑处理eg:入库、缓存...】
21 | type SaramaConsumerGroupMessage[EvenT any] struct {
22 | // 日志注入
23 | L logx.Loggerx
24 | // 消费消息后的,自身业务逻辑处理eg:入库、缓存...
25 | SvcLogicFn[EvenT]
26 | // 【批量】消费后的,自身业务逻辑处理eg:入库、缓存...
27 | SvcLogicFns[EvenT]
28 |
29 | // Offset配置指定偏移量消费【消费历史消息、或者从某个消息开始消费】
30 | IsOffset bool
31 | OffsetTopic string
32 | Offset int64
33 |
34 | // 批量消费配置
35 | IsBatch bool
36 | BatchSize int64
37 | }
38 |
39 | // NewSaramaConsumerGroupMessage 创建消费者组消息处理【消费消息后的,自身业务逻辑处理eg:入库、缓存...】
40 | // - fn 消费消息后的,自身业务逻辑处理eg:入库、缓存...
41 | // - SvcLogicFns 批量消费后的,自身业务逻辑处理eg:入库、缓存...
42 | // - 使用fn的话,fns传nil即可,同理反之
43 | func NewSaramaConsumerGroupMessage[EvenT any](l logx.Loggerx, fn SvcLogicFn[EvenT], fns SvcLogicFns[EvenT]) *SaramaConsumerGroupMessage[EvenT] {
44 | return &SaramaConsumerGroupMessage[EvenT]{
45 | L: l,
46 | SvcLogicFn: fn,
47 | SvcLogicFns: fns,
48 | IsOffset: false,
49 | Offset: 0,
50 | IsBatch: false,
51 | BatchSize: 0,
52 | }
53 | }
54 |
55 | // SetOffset 设置偏移量消费【消费历史消息、或者从某个消息开始消费】
56 | func (s *SaramaConsumerGroupMessage[EvenT]) SetOffset(IsOffset bool, OffsetTopic string, Offset int64) {
57 | s.IsOffset = IsOffset
58 | s.OffsetTopic = OffsetTopic
59 | s.Offset = Offset
60 | }
61 |
62 | // SetBatch 批量消费配置
63 | func (s *SaramaConsumerGroupMessage[EvenT]) SetBatch(IsBatch bool, BatchSize int64) {
64 | s.IsBatch = IsBatch
65 | s.BatchSize = BatchSize
66 | }
67 |
--------------------------------------------------------------------------------
/channelx/mqX/kafkaX/saramaX/producerX/pro_test.go:
--------------------------------------------------------------------------------
1 | package producerX
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "strconv"
7 | "testing"
8 | "time"
9 |
10 | "gitee.com/hgg_test/pkg_tool/v2/channelx/mqX"
11 | "github.com/stretchr/testify/assert"
12 | "golang.org/x/sync/errgroup"
13 | )
14 |
15 | var addr []string = []string{"localhost:9094"}
16 |
17 | func TestNewKafkaProducer(t *testing.T) {
18 |
19 | // =========创建异步批量生产者=========
20 | pro, err := NewKafkaProducer(addr, &ProducerConfig{
21 | Async: true, // true默认为异步批量发送
22 | BatchSize: 200, // 批量发送消息数量
23 | BatchTimeout: 3 * time.Second, // 批量发送消息时间间隔
24 | })
25 | if err != nil {
26 | t.Skipf("无法连接 Kafka (异步生产者): %v", err)
27 | return
28 | }
29 |
30 | // =========创建同步生产者=========
31 | pros, err := NewKafkaProducer(addr, &ProducerConfig{
32 | Async: false, // false为同步
33 | })
34 | if err != nil {
35 | pro.Close()
36 | t.Skipf("无法连接 Kafka (同步生产者): %v", err)
37 | return
38 | }
39 |
40 | defer func() {
41 | pro.Close()
42 | pros.Close()
43 | }()
44 |
45 | user := mqX.UserEventTest{
46 | UserId: 1,
47 | Name: "hggTest",
48 | }
49 | val, err := json.Marshal(&user)
50 | assert.NoError(t, err)
51 |
52 | // 并发执行按单个发送和批量发送生产者
53 | var eg errgroup.Group
54 | for i := 0; i < 5; i++ {
55 | eg.Go(func() error {
56 | return pro.Send(context.Background(), &mqX.Message{
57 | Topic: "user-events",
58 | Key: []byte("user-123"),
59 | Value: val,
60 | })
61 | })
62 | }
63 | time.Sleep(time.Second)
64 | eg.Go(func() error {
65 | var ms []*mqX.Message
66 | var er error
67 | for i := 0; i < 20; i++ {
68 | use := mqX.UserEventTest{
69 | UserId: int64(i + 1),
70 | Name: "hggTest" + strconv.Itoa(i+1),
71 | }
72 | va, e := json.Marshal(&use)
73 | assert.NoError(t, e)
74 | ms = append(ms, &mqX.Message{Topic: "user-events", Value: va})
75 | }
76 | err = pros.SendBatch(context.Background(), ms)
77 | assert.NoError(t, err)
78 | return er
79 | })
80 | err = eg.Wait()
81 | assert.NoError(t, err)
82 |
83 | }
84 |
--------------------------------------------------------------------------------
/DBx/redisX/redisMonitorX/redisPrometheusx/redosPrometheusTimex.go:
--------------------------------------------------------------------------------
1 | package redisPrometheusx
2 |
3 | /*
4 | 监控redis命令耗时
5 | */
6 |
7 | import (
8 | "context"
9 | "github.com/prometheus/client_golang/prometheus"
10 | "github.com/redis/go-redis/v9"
11 | "strings"
12 | "time"
13 | )
14 |
15 | type PrometheusHookTime struct {
16 | histogram *prometheus.HistogramVec
17 | }
18 |
19 | // NewPrometheusRedisHookTime 监控命令耗时
20 | func NewPrometheusRedisHookTime(opts prometheus.HistogramOpts) *PrometheusHookTime {
21 | // 标签:命令名、是否成功、错误类型、业务标识
22 | h := &PrometheusHookTime{
23 | histogram: prometheus.NewHistogramVec(
24 | opts,
25 | []string{"cmd", "success", "error_type", "biz"},
26 | ),
27 | }
28 | prometheus.MustRegister(h.histogram)
29 | return h
30 | }
31 |
32 | func (p *PrometheusHookTime) DialHook(next redis.DialHook) redis.DialHook {
33 | return next // 连接阶段不监控(或可单独加监控)
34 | }
35 |
36 | func (p *PrometheusHookTime) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
37 | return func(ctx context.Context, cmd redis.Cmder) error {
38 | start := time.Now()
39 | err := next(ctx, cmd) // 执行命令,获取真实错误
40 |
41 | duration := time.Since(start).Seconds() // Prometheus 标准单位:秒
42 | cmdName := strings.ToLower(cmd.Name()) // 统一小写,避免 cardinality 爆炸
43 |
44 | // 提取业务标识 biz
45 | // 官方推荐context.WithValue中key用结构体
46 | type key struct {
47 | key1 string
48 | }
49 | biz, ok := ctx.Value(key{key1: "biz"}).(string)
50 | if biz == "" || !ok {
51 | biz = "unknown"
52 | }
53 |
54 | // 分析错误类型
55 | success := "true"
56 | errorType := "none"
57 | if err != nil {
58 | success = "false"
59 | if err == redis.Nil {
60 | errorType = "key_not_found"
61 | } else {
62 | errorType = "other" // 可扩展为 timeout, connection_error 等
63 | }
64 | }
65 |
66 | // 记录指标
67 | p.histogram.WithLabelValues(cmdName, success, errorType, biz).Observe(duration)
68 |
69 | return err
70 | }
71 | }
72 |
73 | func (p *PrometheusHookTime) ProcessPipelineHook(next redis.ProcessPipelineHook) redis.ProcessPipelineHook {
74 | return next // 如需监控 pipeline,可类似 ProcessHook 实现
75 | }
76 |
--------------------------------------------------------------------------------
/serviceLogicX/cronX/cron_test.go.bak:
--------------------------------------------------------------------------------
1 | package cronX
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/robfig/cron/v3"
7 | "github.com/stretchr/testify/assert"
8 | "log"
9 | "testing"
10 | "time"
11 | )
12 |
13 | func TestCron(t *testing.T) {
14 | expr := cron.New(cron.WithSeconds()) // 秒级
15 | // https://help.aliyun.com/document_detail/133509.html可以参考这个,任务调度cron表达式
16 | id, err := expr.AddFunc("@every 5s", func() { // 5秒一次定时任务
17 | t.Log("5秒一次定时任务")
18 | })
19 | assert.NoError(t, err)
20 | t.Log("任务id: ", id)
21 |
22 | // 获取所有任务条目
23 | entries := expr.Entries()
24 | fmt.Printf("当前有 %d 个任务\n", len(entries))
25 | for k, v := range entries {
26 | t.Logf("第%d个任务,任务详情:%v", k+1, v)
27 | }
28 |
29 | expr.Start() // 启动定时器
30 |
31 | // 运行1分钟,5秒一次,任务需持续运行的话实际也可在main控制stop退出
32 | time.Sleep(time.Minute)
33 |
34 | ctx := expr.Stop() // 暂停定时器,不调度新任务执行了,正在执行的继续执行
35 | t.Log("发出停止信号")
36 | <-ctx.Done() // 彻底停止定时器
37 | t.Log("彻底停止,没有任务执行了")
38 | }
39 |
40 | func TestCronTicker(t *testing.T) {
41 | ctx, cancelc := context.WithTimeout(context.Background(), time.Minute)
42 | defer cancelc()
43 |
44 | // 每10分钟执行一次入库
45 | ticker := time.NewTicker(time.Second * 5)
46 | defer ticker.Stop()
47 |
48 | loop:
49 | for {
50 | select {
51 | case <-ticker.C:
52 | // 时间到了,可以执行任务了
53 | // 【限制任务总时间的话,eg: 5秒运行一次任务,总计1分钟,运行12次,那么for外部创建1分钟的context.WithTimeout】
54 | log.Println("每10秒执行一次任务")
55 | case <-ctx.Done():
56 | log.Println("任务持续总时长一分钟,任务结束")
57 | //return
58 | break loop // break不能中断当前for循环,【但是可以使用golang中标签,break loop就能跳出指定循环位置】
59 | }
60 | }
61 |
62 | t.Log("跳出循环,任务结束")
63 | }
64 |
65 | func TestCronTickerV1(t *testing.T) {
66 | ctx, cancelc := context.WithTimeout(context.Background(), time.Minute)
67 | defer cancelc()
68 |
69 | // 每10分钟执行一次入库
70 | ticker := time.NewTicker(time.Second * 5)
71 | defer ticker.Stop()
72 |
73 | for {
74 | select {
75 | case <-ticker.C:
76 | // 时间到了,可以执行任务了
77 | // 【限制任务总时间的话,eg: 5秒运行一次任务,总计1分钟,运行12次,那么for外部创建1分钟的context.WithTimeout】
78 | log.Println("每10秒执行一次任务")
79 | case <-ctx.Done():
80 | log.Println("任务持续总时长一分钟,任务结束")
81 | goto end // 跳出当前for循环【最好不用goto ,可读性差】
82 | }
83 | }
84 |
85 | end:
86 | t.Log("跳出循环,任务结束")
87 | }
88 |
--------------------------------------------------------------------------------
/webx/ginx/middleware/limitX/tokenBocket.go:
--------------------------------------------------------------------------------
1 | package limitX
2 |
3 | import (
4 | "net/http"
5 | "sync"
6 | "time"
7 |
8 | "github.com/gin-gonic/gin"
9 | )
10 |
11 | // TokenBucketLimiter 令牌桶限流算法
12 | type TokenBucketLimiter struct {
13 | // 隔多久产生一个令牌
14 | interval time.Duration // 令牌产生的时间间隔
15 | buckets chan struct{} // 令牌桶
16 | closeCh chan struct{} // 关闭信号
17 | closeOnce sync.Once // 关闭信号只关闭一次
18 | started bool // 标记是否已启动goroutine
19 | mu sync.Mutex // 保护started字段
20 | }
21 |
22 | // NewTokenBucketBuilder 创建令牌桶限流算法
23 | // - interval 令牌产生时间间隔
24 | // - capacity 令牌桶容量
25 | func NewTokenBucketBuilder(interval time.Duration, capacity int) *TokenBucketLimiter {
26 | bucket := make(chan struct{}, capacity)
27 | closec := make(chan struct{})
28 | limiter := &TokenBucketLimiter{
29 | interval: interval,
30 | buckets: bucket,
31 | closeCh: closec,
32 | started: false,
33 | }
34 | // 在构造函数中启动goroutine
35 | limiter.startTokenGenerator()
36 | return limiter
37 | }
38 |
39 | // startTokenGenerator 启动令牌生成goroutine(只启动一次)
40 | func (c *TokenBucketLimiter) startTokenGenerator() {
41 | c.mu.Lock()
42 | defer c.mu.Unlock()
43 |
44 | if c.started {
45 | return // 已启动,避免重复
46 | }
47 | c.started = true
48 |
49 | // 发令牌
50 | ticker := time.NewTicker(c.interval)
51 | go func() {
52 | defer ticker.Stop() // 确保 ticker 被正确停止,防止资源泄漏
53 | for {
54 | select {
55 | case <-ticker.C:
56 | select {
57 | case c.buckets <- struct{}{}:
58 | default:
59 | // bucket 满了
60 | }
61 | case <-c.closeCh:
62 | return
63 | }
64 | }
65 | }()
66 | }
67 |
68 | // Build 令牌桶限流算法
69 | func (c *TokenBucketLimiter) Build() gin.HandlerFunc {
70 | // 取令牌
71 | return func(ctx *gin.Context) {
72 | select {
73 | case <-c.buckets: // 从桶中取一个令牌
74 | ctx.Next() // 处理请求,可以执行业务代码
75 | // 限流
76 | default:
77 | // 做法1, 直接返回
78 | ctx.AbortWithStatus(http.StatusTooManyRequests)
79 | return
80 | // 做法2,等待超时了再退出【根据context】
81 | //case <-ctx.Done():
82 | // return nil, ctx.Err()
83 | }
84 | }
85 | }
86 |
87 | // Close 关闭限流器,释放资源
88 | func (c *TokenBucketLimiter) Close() error {
89 | c.closeOnce.Do(func() {
90 | close(c.closeCh)
91 | })
92 | return nil
93 | }
94 |
--------------------------------------------------------------------------------
/logx/zaplogx/zapLogger_test.go:
--------------------------------------------------------------------------------
1 | package zaplogx
2 |
3 | import (
4 | "gitee.com/hgg_test/pkg_tool/v2/logx"
5 | "github.com/spf13/viper"
6 | "go.uber.org/zap"
7 | "testing"
8 | )
9 |
10 | func TestNewZapLogger(t *testing.T) {
11 | cfg := zap.NewDevelopmentConfig() // 配置
12 | err := viper.UnmarshalKey("logger", &cfg)
13 | if err != nil {
14 | panic(err)
15 | }
16 |
17 | // 创建一个日志实例
18 | //l, err := zap.NewDevelopment()
19 | l, err := cfg.Build()
20 | if err != nil {
21 | panic(err)
22 | }
23 | log := NewZapLogger(l)
24 | log.Error("测试", logx.String("key", "value"), logx.Int("key", 1))
25 | }
26 |
27 | //func InitLogger() logx.Loggerx {
28 | // cfg := zap.NewDevelopmentConfig() // 配置
29 | // err := viper.UnmarshalKey("logger", &cfg)
30 | // if err != nil {
31 | // panic(err)
32 | // }
33 | //
34 | // // 创建一个日志实例
35 | // //l, err := zap.NewDevelopment()
36 | // l, err := cfg.Build()
37 | // if err != nil {
38 | // panic(err)
39 | // }
40 | // return logger.NewZapLogger(l)
41 | //}
42 | //
43 |
44 | //// InitLogger 使用文件来记录日志
45 | //func InitLogger() logx.Loggerx {
46 | // // 这里我们用一个小技巧,
47 | // // 就是直接使用 zap 本身的配置结构体来处理
48 | // // 配置Lumberjack以支持日志文件的滚动
49 | // lumberjackLogger := &lumberjack.Logger{
50 | // // 注意有没有权限
51 | // //Filename: "/var/log/user.log", // 指定日志文件路径
52 | // Filename: "D:/soft_az/docker/hggPkg/ELK/log/user.log", // 指定日志文件路径
53 | // MaxSize: 50, // 每个日志文件的最大大小,单位:MB
54 | // MaxBackups: 3, // 保留旧日志文件的最大个数
55 | // MaxAge: 28, // 保留旧日志文件的最大天数
56 | // Compress: true, // 是否压缩旧的日志文件
57 | // }
58 | //
59 | // // 创建zap日志核心
60 | // core := zapcore.NewCore(
61 | // zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()),
62 | // zapcore.AddSync(lumberjackLogger),
63 | // zapcore.DebugLevel, // 设置日志级别
64 | // )
65 | //
66 | // l := zap.New(core, zap.AddCaller())
67 | // res := logger.NewZapLogger(l)
68 | // go func() {
69 | // // 为了演示 ELK,我直接输出日志
70 | // ticker := time.NewTicker(time.Millisecond * 1000)
71 | // for t := range ticker.C {
72 | // res.Info("模拟输出日志", logger.String("time", t.String()))
73 | // }
74 | // }()
75 | // return res
76 | //}
77 |
--------------------------------------------------------------------------------
/rpc/kratosx/kratos_example_test.go:
--------------------------------------------------------------------------------
1 | package kratosx
2 |
3 | import (
4 | "context"
5 | etcd "github.com/go-kratos/kratos/contrib/registry/etcd/v2"
6 | "github.com/go-kratos/kratos/v2"
7 | "github.com/go-kratos/kratos/v2/middleware/recovery"
8 | "github.com/go-kratos/kratos/v2/transport/grpc"
9 | "github.com/stretchr/testify/require"
10 | "github.com/stretchr/testify/suite"
11 | etcdv3 "go.etcd.io/etcd/client/v3"
12 | "testing"
13 | "time"
14 | )
15 |
16 | type KratosTestSuite struct {
17 | suite.Suite
18 | etcdClient *etcdv3.Client
19 | }
20 |
21 | func (s *KratosTestSuite) SetupSuite() {
22 | cli, err := etcdv3.New(etcdv3.Config{
23 | Endpoints: []string{"localhost:12379"},
24 | })
25 | require.NoError(s.T(), err)
26 | s.etcdClient = cli
27 | }
28 |
29 | func (s *KratosTestSuite) TestClient() {
30 |
31 | r := etcd.New(s.etcdClient)
32 | //selector.SetGlobalSelector(random.NewBuilder()) // 随机负载均衡
33 | cc, err := grpc.DialInsecure(context.Background(),
34 | grpc.WithEndpoint("discovery:///user"),
35 | grpc.WithDiscovery(r),
36 | //grpc.WithNodeFilter(func(ctx context.Context, nodes []selector.Node) []selector.Node {
37 | // // 你可以在这里过滤一些东西
38 | // res := make([]selector.Node, 0, len(nodes))
39 | // for _, n := range nodes {
40 | // if n.Metadata()["vip"] == "true" {
41 | // res = append(res, n)
42 | // }
43 | // }
44 | // return res
45 | //}),
46 | )
47 | require.NoError(s.T(), err)
48 | defer cc.Close()
49 |
50 | client := NewUserServiceClient(cc)
51 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
52 | defer cancel()
53 | resp, err := client.GetById(ctx, &GetByIdRequest{
54 | Id: 123,
55 | })
56 | require.NoError(s.T(), err)
57 | s.T().Log(resp.User)
58 | }
59 |
60 | // TestServer 启动服务器
61 | func (s *KratosTestSuite) TestServer() {
62 | grpcSrv := grpc.NewServer(
63 | grpc.Address(":8090"),
64 | grpc.Middleware(recovery.Recovery()),
65 | )
66 | RegisterUserServiceServer(grpcSrv, &Service{})
67 | // etcd 注册中心
68 | r := etcd.New(s.etcdClient)
69 | app := kratos.New(
70 | kratos.Name("user"),
71 | kratos.Server(
72 | grpcSrv,
73 | ),
74 | kratos.Registrar(r),
75 | )
76 | app.Run()
77 | }
78 |
79 | func TestKratos(t *testing.T) {
80 | suite.Run(t, new(KratosTestSuite))
81 | }
82 |
--------------------------------------------------------------------------------
/DBx/redisX/cacheCountServiceX/setConfig.go:
--------------------------------------------------------------------------------
1 | package cacheCountServiceX
2 |
3 | import (
4 | "fmt"
5 | "time"
6 | )
7 |
8 | // Key 生成缓存键
9 | func (i *Count[K, V]) Key(biz string, bizId int64) string {
10 | //return fmt.Sprintf("%s:%s:%d", i.ServiceTypeName, biz, bizId)
11 | return fmt.Sprintf("cnt:%s:%s:%d", i.ServiceTypeName, biz, bizId)
12 | }
13 |
14 | // RankKey 生成排行榜键
15 | func (i *Count[K, V]) RankKey(biz string) string {
16 | //return fmt.Sprintf("%s:%s:rank", i.ServiceTypeName, biz)
17 | return fmt.Sprintf("rank_cnt:%s:%s:rank", i.ServiceTypeName, biz)
18 | }
19 |
20 | // SetCntOpt : true为增加,false为减少【默认为增加】
21 | func (i *Count[K, V]) SetCntOpt(ctp bool) *Count[K, V] {
22 | i.CntOpt = ctp
23 | return i
24 | }
25 |
26 | // SetExpiration : 设置缓存过期时间【默认为5分钟】
27 | func (i *Count[K, V]) SetExpiration(expiration time.Duration) *Count[K, V] {
28 | i.Expiration = expiration
29 | return i
30 | }
31 |
32 | // SetServiceTypeName : 设置服务名eg: like_cnt 【默认为count_service】
33 | func (i *Count[K, V]) SetServiceTypeName(ServiceTypeName string) *Count[K, V] {
34 | i.ServiceTypeName = ServiceTypeName
35 | return i
36 | }
37 |
38 | // SetWeight : 设置本地缓存中数据权重【多用于缓存时间未过期,但是分配内存满了,需释放部分】
39 | func (i *Count[K, V]) SetWeight(weight int64) *Count[K, V] {
40 | i.Weight = weight
41 | return i
42 | }
43 |
44 | // SetRankCacheExpiration 设置排行榜缓存过期时间
45 | func (i *Count[K, V]) SetRankCacheExpiration(expiration time.Duration) *Count[K, V] {
46 | i.RankCacheExpiration = expiration
47 | return i
48 | }
49 |
50 | // SetLuaCnt : 设置Lua脚本,用于增减计数的 Lua 脚本
51 | func (i *Count[K, V]) SetLuaCnt(LuaCnt string) *Count[K, V] {
52 | i.LuaCnt = LuaCnt
53 | // 如果需要,这里可以自动调用 SCRIPT LOAD 并设置 incrScriptSha
54 | return i
55 | }
56 |
57 | // SetGetLuaGetRank : 设置排行榜的 Lua 脚本, 用于获取排行榜的 Lua 脚本
58 | func (i *Count[K, V]) SetGetLuaGetRank(LuaGetRank string) *Count[K, V] {
59 | i.LuaGetRank = LuaGetRank
60 | // 如果需要,这里可以自动调用 SCRIPT LOAD 并设置 getRankScriptSha
61 | return i
62 | }
63 |
64 | // SetRankCount : 设置是否统计排行榜数据
65 | func (i *Count[K, V]) SetRankCount(rankCount bool) *Count[K, V] {
66 | i.RankCount = rankCount
67 | return i
68 | }
69 |
70 | // SetCntTypeConf : 设置获取排行榜数据时的参数
71 | func (i *Count[K, V]) SetCntTypeConf(setCntTypeConf GetCntType) *Count[K, V] {
72 | i.CntTypeConf = setCntTypeConf
73 | return i
74 | }
75 |
--------------------------------------------------------------------------------
/rpc/grpcx/balancer/wrr/wrr.go:
--------------------------------------------------------------------------------
1 | package wrr
2 |
3 | // 在gRPC中接入自己的负载均衡算法
4 | import (
5 | "google.golang.org/grpc/balancer"
6 | "google.golang.org/grpc/balancer/base"
7 | "sync"
8 | )
9 |
10 | // Name 自定义的负载均衡算法的名称
11 | const Name = "custom_weighted_round_robin"
12 |
13 | func newBuilder() balancer.Builder {
14 | return base.NewBalancerBuilder(Name, &PickerBuilder{}, base.Config{HealthCheck: true})
15 | }
16 |
17 | func init() {
18 | balancer.Register(newBuilder())
19 | }
20 |
21 | type PickerBuilder struct {
22 | }
23 |
24 | func (p *PickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
25 | conns := make([]*weightConn, 0, len(info.ReadySCs))
26 | for sc, sci := range info.ReadySCs {
27 | md, _ := sci.Address.Metadata.(map[string]any)
28 | weightVal, _ := md["weight"]
29 | weight, _ := weightVal.(float64)
30 | //if weight == 0 {
31 | //
32 | //}
33 | conns = append(conns, &weightConn{
34 | SubConn: sc,
35 | weight: int(weight),
36 | currentWeight: int(weight),
37 | })
38 | }
39 |
40 | return &Picker{
41 | conns: conns,
42 | }
43 | }
44 |
45 | type Picker struct {
46 | conns []*weightConn
47 | lock sync.Mutex
48 | }
49 |
50 | func (p *Picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
51 | p.lock.Lock()
52 | defer p.lock.Unlock()
53 | if len(p.conns) == 0 {
54 | return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
55 | }
56 | // 总权重
57 | var total int
58 | var maxCC *weightConn
59 | for _, c := range p.conns {
60 | total += c.weight
61 | c.currentWeight = c.currentWeight + c.weight
62 | if maxCC == nil || maxCC.currentWeight < c.currentWeight {
63 | maxCC = c
64 | }
65 | }
66 |
67 | maxCC.currentWeight = maxCC.currentWeight - total
68 |
69 | return balancer.PickResult{
70 | SubConn: maxCC.SubConn,
71 | Done: func(info balancer.DoneInfo) {
72 | // 要在这里进一步调整weight/currentWeight
73 | // failover 要在这里做文章
74 | // 根据调用结果的具体错误信息进行容错
75 | // 1. 如果要是触发了限流了,
76 | // 1.1 你可以考虑直接挪走这个节点,后面再挪回来
77 | // 1.2 你可以考虑直接将 weight/currentWeight 调整到极低
78 | // 2. 触发了熔断呢?
79 | // 3. 降级呢?
80 | },
81 | }, nil
82 |
83 | }
84 |
85 | type weightConn struct {
86 | balancer.SubConn
87 | // 权重
88 | weight int
89 | // 当前权重
90 | currentWeight int
91 |
92 | // 可以用来标记不可用
93 | available bool
94 | }
95 |
--------------------------------------------------------------------------------
/rpc/grpcx/limiter/tokenBucket/tokenBocket.go:
--------------------------------------------------------------------------------
1 | package tokenBucket
2 |
3 | import (
4 | "context"
5 | "sync"
6 | "time"
7 |
8 | "google.golang.org/grpc"
9 | "google.golang.org/grpc/codes"
10 | "google.golang.org/grpc/status"
11 | )
12 |
13 | // TokenBucketLimiter 令牌桶限流算法
14 | type TokenBucketLimiter struct {
15 | // 隔多久产生一个令牌
16 | interval time.Duration // 令牌产生的时间间隔
17 | buckets chan struct{} // 令牌桶
18 | closeCh chan struct{} // 关闭信号
19 | closeOnce sync.Once // 关闭信号只关闭一次
20 | started bool // 标记是否已启动goroutine
21 | mu sync.Mutex // 保护started字段
22 | }
23 |
24 | // NewTokenBucketLimiter 创建令牌桶限流算法
25 | // - interval 令牌产生时间间隔
26 | // - capacity 令牌桶容量
27 | func NewTokenBucketLimiter(interval time.Duration, capacity int) *TokenBucketLimiter {
28 | bucket := make(chan struct{}, capacity)
29 | closec := make(chan struct{})
30 | limiter := &TokenBucketLimiter{
31 | interval: interval,
32 | buckets: bucket,
33 | closeCh: closec,
34 | started: false,
35 | }
36 | // 在构造函数中启动goroutine
37 | limiter.startTokenGenerator()
38 | return limiter
39 | }
40 |
41 | // startTokenGenerator 启动令牌生成goroutine(只启动一次)
42 | func (c *TokenBucketLimiter) startTokenGenerator() {
43 | c.mu.Lock()
44 | defer c.mu.Unlock()
45 |
46 | if c.started {
47 | return // 已启动,避免重复
48 | }
49 | c.started = true
50 |
51 | // 发令牌
52 | ticker := time.NewTicker(c.interval)
53 | go func() {
54 | defer ticker.Stop() // 确保 ticker 被正确停止,防止资源泄漏
55 | for {
56 | select {
57 | case <-ticker.C:
58 | select {
59 | case c.buckets <- struct{}{}:
60 | default:
61 | // bucket 满了
62 | }
63 | case <-c.closeCh:
64 | return
65 | }
66 | }
67 | }()
68 | }
69 |
70 | // BuildServerInterceptor 令牌桶限流算法
71 | func (c *TokenBucketLimiter) BuildServerInterceptor() grpc.UnaryServerInterceptor {
72 | // 取令牌
73 | return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
74 | select {
75 | case <-c.buckets: // 从桶中取一个令牌
76 | return handler(ctx, req) // 处理请求,可以执行业务代码
77 | //做法1
78 | default:
79 | return nil, status.Errorf(codes.ResourceExhausted, "限流")
80 | // 做法2,等待超时了再退出【根据context】
81 | //case <-ctx.Done():
82 | // return nil, ctx.Err()
83 | }
84 | }
85 | }
86 |
87 | // Close 关闭限流器,释放资源
88 | func (c *TokenBucketLimiter) Close() error {
89 | c.closeOnce.Do(func() {
90 | close(c.closeCh)
91 | })
92 | return nil
93 | }
94 |
--------------------------------------------------------------------------------
/rpc/grpcx/observationX/prometeusX/builder.go:
--------------------------------------------------------------------------------
1 | package prometeusX
2 |
3 | import (
4 | "context"
5 | "gitee.com/hgg_test/pkg_tool/v2/rpc/grpcx/observationX"
6 | "github.com/prometheus/client_golang/prometheus"
7 | "google.golang.org/grpc"
8 | "google.golang.org/grpc/status"
9 | "strings"
10 | "time"
11 | )
12 |
13 | type InterceptorBuilder struct {
14 | Namespace string
15 | Subsystem string
16 | Name string
17 | InstanceId string
18 | Help string
19 | observationX.Builder
20 | }
21 |
22 | // NewInterceptorBuilder 创建 prometheus 拦截器
23 | func NewInterceptorBuilder(namespace string, subsystem string, name string, instanceId string, help string) *InterceptorBuilder {
24 | return &InterceptorBuilder{Namespace: namespace, Subsystem: subsystem, Name: name, InstanceId: instanceId, Help: help}
25 | }
26 |
27 | func (b *InterceptorBuilder) BuildServerUnaryInterceptor() grpc.UnaryServerInterceptor {
28 | // pattern 是指命中的路由
29 | labels := []string{"type", "service", "method", "peer", "code"}
30 | vector := prometheus.NewSummaryVec(prometheus.SummaryOpts{
31 | Namespace: b.Namespace,
32 | Subsystem: b.Subsystem,
33 | Help: b.Help,
34 | // Namespace 和 Subsystem 和 Name 都不能有 _ 以外的其它符号
35 | Name: b.Name + "_resp_time",
36 | ConstLabels: map[string]string{
37 | "instance_id": b.InstanceId,
38 | },
39 | Objectives: map[float64]float64{
40 | 0.5: 0.01,
41 | 0.75: 0.01,
42 | 0.9: 0.01,
43 | 0.99: 0.001,
44 | 0.999: 0.0001,
45 | },
46 | }, labels)
47 | prometheus.MustRegister(vector)
48 | return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
49 | start := time.Now()
50 | defer func() {
51 | sn, method := b.splitMethodName(info.FullMethod)
52 | code := "OK"
53 | if err != nil {
54 | st, _ := status.FromError(err)
55 | code = st.Code().String()
56 | }
57 | cost := float64(time.Since(start).Milliseconds())
58 | vector.WithLabelValues("unary", sn, method, b.PeerName(ctx), code).Observe(cost)
59 | }()
60 | resp, err = handler(ctx, req)
61 | return
62 | }
63 | }
64 |
65 | func (b *InterceptorBuilder) splitMethodName(fullMethodName string) (string, string) {
66 | fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
67 | if i := strings.Index(fullMethodName, "/"); i >= 0 {
68 | return fullMethodName[:i], fullMethodName[i+1:]
69 | }
70 | return "unknown", "unknown"
71 | }
72 |
--------------------------------------------------------------------------------
/sliceX/diff_test.go:
--------------------------------------------------------------------------------
1 | package sliceX
2 |
3 | import (
4 | "fmt"
5 | "sort"
6 | "testing"
7 |
8 | "github.com/stretchr/testify/assert"
9 | )
10 |
11 | func TestDiffSet(t *testing.T) {
12 | tests := []struct {
13 | name string
14 | src []int
15 | dst []int
16 | want []int
17 | }{
18 | {
19 | want: []int{7},
20 | src: []int{1, 3, 5, 7},
21 | dst: []int{1, 3, 5},
22 | name: "diff 1",
23 | },
24 | {
25 | src: []int{1, 3, 5},
26 | dst: []int{1, 3, 5, 7},
27 | want: []int{},
28 | name: "src less than dst",
29 | },
30 | {
31 | src: []int{1, 3, 5, 7, 7},
32 | dst: []int{1, 3, 5},
33 | want: []int{7},
34 | name: "diff deduplicate",
35 | },
36 | {
37 | src: []int{1, 1, 3, 5, 7},
38 | dst: []int{1, 3, 5, 5},
39 | want: []int{7},
40 | name: "dst duplicate ele",
41 | },
42 | }
43 | for _, tt := range tests {
44 | t.Run(tt.name, func(t *testing.T) {
45 | res := DiffSet[int](tt.src, tt.dst)
46 | assert.ElementsMatch(t, tt.want, res)
47 | })
48 | }
49 | }
50 |
51 | func TestDiffSetFunc(t *testing.T) {
52 | tests := []struct {
53 | name string
54 | src []int
55 | dst []int
56 | want []int
57 | }{
58 | {
59 | want: []int{7},
60 | src: []int{1, 3, 5, 7},
61 | dst: []int{1, 3, 5},
62 | name: "diff 1",
63 | },
64 | {
65 | src: []int{1, 3, 5},
66 | dst: []int{1, 3, 5, 7},
67 | want: []int{},
68 | name: "src less than dst",
69 | },
70 | {
71 | src: []int{1, 3, 5, 7, 7},
72 | dst: []int{1, 3, 5},
73 | want: []int{7},
74 | name: "diff deduplicate",
75 | },
76 | {
77 | src: []int{1, 1, 3, 5, 7},
78 | dst: []int{1, 3, 5, 5},
79 | want: []int{7},
80 | name: "dst duplicate ele",
81 | },
82 | }
83 | for _, tt := range tests {
84 | t.Run(tt.name, func(t *testing.T) {
85 | res := DiffSetFunc[int](tt.src, tt.dst, func(src, dst int) bool {
86 | return src == dst
87 | })
88 | assert.ElementsMatch(t, tt.want, res)
89 | })
90 | }
91 | }
92 |
93 | func ExampleDiffSet() {
94 | res := DiffSet[int]([]int{1, 3, 2, 2, 4}, []int{3, 4, 5, 6})
95 | sort.Ints(res)
96 | fmt.Println(res)
97 | // Output:
98 | // [1 2]
99 | }
100 |
101 | func ExampleDiffSetFunc() {
102 | res := DiffSetFunc[int]([]int{1, 3, 2, 2, 4}, []int{3, 4, 5, 6}, func(src, dst int) bool {
103 | return src == dst
104 | })
105 | fmt.Println(res)
106 | // Output:
107 | // [1 2]
108 | }
109 |
--------------------------------------------------------------------------------
/channelx/mqX/types.go:
--------------------------------------------------------------------------------
1 | package mqX
2 |
3 | // messageQueuex/mq.go
4 |
5 | import "context"
6 |
7 | // Message 通用消息结构,如果需要在处理程序调用之后保留它们,【请复制】。
8 | // - represents a Kafka message.
9 | // - Note: Key and Value may share memory with internal buffers.
10 | // - Make a copy if you need to retain them beyond the handler call.
11 | // - 表示Kafka消息。
12 | // - 注意:Key和Value可以与内部缓冲区共享内存。
13 | // - 如果需要在处理程序调用之后保留它们,【请复制】。
14 | // - 例如,可能与主 goroutine 冲突(如果主 goroutine 复用 buffer)
15 | // - go func() {
16 | // log.Println(string(msg.Value))
17 | // }()
18 | // - 更危险:修改 msg
19 | //
20 | // msg.Value[0] = 'X' // 破坏原始数据,且可能影响底层 buffer(见下文)
21 | type Message struct {
22 | Topic string
23 | Key []byte // read-only in handlers; copy if retained, 在处理程序中只读;如果保留则复制
24 | Value []byte // read-only in handlers; copy if retained, 在处理程序中只读;如果保留则复制
25 | }
26 |
27 | // Producer 生产者抽象接口
28 | //
29 | //go:generate mockgen -source=./types.go -package=Producermocks -destination=mocks/Producermocks/mqX.Producermock.go mqX
30 | type Producer interface {
31 | Send(ctx context.Context, msg *Message) error
32 | SendBatch(ctx context.Context, msgs []*Message) error
33 | Close() error
34 | }
35 |
36 | // ConsumerHandlerType 消费者处理接口类型
37 | // - IsBatch() bool: 是否批量处理, true: 批量处理需实现BatchConsumerHandler, false: 单条处理需实现ConsumerHandler
38 | //
39 | //go:generate mockgen -source=./types.go -package=ConsumerHandlerTypemocks -destination=mocks/ConsumerHandlerType/mqX.ConsumerHandlerTypemock.go mqX
40 | type ConsumerHandlerType interface {
41 | IsBatch() bool // 是否批量处理, true: 批量处理需实现BatchConsumerHandler, false: 单条处理需实现ConsumerHandler
42 | ConsumerHandler
43 | BatchConsumerHandler
44 | }
45 |
46 | // ConsumerHandler 单条消费者处理接口
47 | //
48 | //go:generate mockgen -source=./types.go -package=ConsumerHandlermocks -destination=mocks/ConsumerHandler/mqX.ConsumerHandlermock.go mqX
49 | type ConsumerHandler interface {
50 | Handle(ctx context.Context, msg *Message) error
51 | }
52 |
53 | // BatchConsumerHandler 批量消费者处理接口
54 | //
55 | //go:generate mockgen -source=./types.go -package=BatchConsumerHandlermocks -destination=mocks/BatchConsumerHandler/mqX.BatchConsumerHandlermock.go mqX
56 | type BatchConsumerHandler interface {
57 | HandleBatch(ctx context.Context, msgs []*Message) (success bool, err error)
58 | }
59 |
60 | // Consumer 消费者抽象接口
61 | //
62 | //go:generate mockgen -source=./types.go -package=Consumermocks -destination=mocks/Consumer/mqX.Consumerrmock.go mqX
63 | type Consumer interface {
64 | Subscribe(ctx context.Context, topics []string, handler ConsumerHandlerType) error
65 | }
66 |
67 | type UserEventTest struct {
68 | UserId int64
69 | Name string
70 | }
71 |
--------------------------------------------------------------------------------
/syncX/map.go:
--------------------------------------------------------------------------------
1 | package syncX
2 |
3 | import "sync"
4 |
5 | // Map 是对 sync.Map 的一个泛型封装
6 | // 要注意,K 必须是 comparable 的,并且谨慎使用指针作为 K。
7 | // 使用指针的情况下,两个 key 是否相等,仅仅取决于它们的地址
8 | // 而不是地址指向的值。可以参考 Load 测试。
9 | // 注意,key 不存在和 key 存在但是值恰好为零值(如 nil),是两码事
10 | type Map[K comparable, V any] struct {
11 | m sync.Map
12 | }
13 |
14 | // NewMap 为了防止有时使用时忘记&取地址,所以又加了New构造
15 | func NewMap[K comparable, V any]() *Map[K, V] {
16 | return &Map[K, V]{}
17 | }
18 |
19 | // Load 加载键值对
20 | func (m *Map[K, V]) Load(key K) (value V, ok bool) {
21 | var anyVal any
22 | anyVal, ok = m.m.Load(key)
23 | if anyVal != nil {
24 | value = anyVal.(V)
25 | }
26 | return
27 | }
28 |
29 | // Store 存储键值对
30 | func (m *Map[K, V]) Store(key K, value V) {
31 | m.m.Store(key, value)
32 | }
33 |
34 | // LoadOrStore 加载或者存储一个键值对
35 | // true 代表是加载的,false 代表执行了 store
36 | func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
37 | var anyVal any
38 | anyVal, loaded = m.m.LoadOrStore(key, value)
39 | if anyVal != nil {
40 | actual = anyVal.(V)
41 | }
42 | return
43 | }
44 |
45 | // LoadOrStoreFunc 是一个优化,也就是使用该方法能够避免无意义的创建实例。
46 | // 如果你的初始化过程非常消耗资源,那么使用这个方法是有价值的。
47 | // 它的代价就是 Key 不存在的时候会多一次 Load 调用。
48 | // 当 fn 返回 error 的时候,LoadOrStoreFunc 也会返回 error。
49 | func (m *Map[K, V]) LoadOrStoreFunc(key K, fn func() (V, error)) (actual V, loaded bool, err error) {
50 | val, ok := m.Load(key)
51 | if ok {
52 | return val, true, nil
53 | }
54 | val, err = fn()
55 | if err != nil {
56 | return
57 | }
58 | actual, loaded = m.LoadOrStore(key, val)
59 | return
60 | }
61 |
62 | // LoadAndDelete 加载并且删除一个键值对
63 | func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
64 | var anyVal any
65 | anyVal, loaded = m.m.LoadAndDelete(key)
66 | if anyVal != nil {
67 | value = anyVal.(V)
68 | }
69 | return
70 | }
71 |
72 | // Delete 删除键值对
73 | func (m *Map[K, V]) Delete(key K) {
74 | m.m.Delete(key)
75 | }
76 |
77 | // Range 遍历, f 不能为 nil
78 | // 传入 f 的时候,K 和 V 直接使用对应的类型,如果 f 返回 false,那么就会中断遍历
79 | func (m *Map[K, V]) Range(f func(key K, value V) bool) {
80 | m.m.Range(func(key, value any) bool {
81 | var (
82 | k K
83 | v V
84 | )
85 | if value != nil {
86 | v = value.(V)
87 | }
88 | if key != nil {
89 | k = key.(K)
90 | }
91 | return f(k, v)
92 | })
93 | }
94 |
95 | // IsEmpty 判断 Map 是否为空
96 | // - true : Map 为空
97 | // - false : Map 不为空
98 | func (m *Map[K, V]) IsEmpty() bool {
99 | empty := true
100 | m.Range(func(key K, value V) bool {
101 | empty = false
102 | return false // 立即停止遍历
103 | })
104 | return empty
105 | }
106 |
--------------------------------------------------------------------------------
/rpc/grpcx/observationX/grpcLogX/grpcLogX_test.go:
--------------------------------------------------------------------------------
1 | package grpcLogX
2 |
3 | import (
4 | "context"
5 | "gitee.com/hgg_test/pkg_tool/v2/logx"
6 | "gitee.com/hgg_test/pkg_tool/v2/logx/zerologx"
7 | "gitee.com/hgg_test/pkg_tool/v2/rpc/grpcx/limiter/slidingWindow/testPkg"
8 | "github.com/rs/zerolog"
9 | "github.com/stretchr/testify/require"
10 | "golang.org/x/sync/errgroup"
11 | "google.golang.org/grpc"
12 | "google.golang.org/grpc/credentials/insecure"
13 | "google.golang.org/grpc/keepalive"
14 | "log"
15 | "net"
16 | "os"
17 | "testing"
18 | "time"
19 | )
20 |
21 | func TestNewSlidingWindowLimiter(t *testing.T) {
22 | // 创建grpc服务,注册限流拦截器
23 | gs := grpc.NewServer(
24 | // 创建日志拦截器
25 | grpc.ChainUnaryInterceptor(NewInterceptorBuilder(initLog()).BuildServerUnaryInterceptor()),
26 | )
27 |
28 | us := &testPkg.Service{} // 实例化一个服务
29 | testPkg.RegisterUserServiceServer(gs, us) // 注册服务
30 |
31 | l, err := net.Listen("tcp", ":8090")
32 | require.NoError(t, err)
33 |
34 | err = gs.Serve(l)
35 | require.NoError(t, err)
36 |
37 | t.Log("服务启动成功")
38 | }
39 |
40 | // GRPC客户端发起调用
41 | func TestClient(t *testing.T) {
42 | // insecure.NewCredentials是创建一个不安全的凭证,不启用https
43 | cl, err := grpc.NewClient( // 创建 grpc 客户端
44 | "127.0.0.1:8090", // 本机测试域名localhost有点慢。
45 | grpc.WithTransportCredentials(insecure.NewCredentials()),
46 | grpc.WithConnectParams(grpc.ConnectParams{
47 | MinConnectTimeout: 5 * time.Second, // 最小连接超时(默认20秒)
48 | }),
49 | grpc.WithKeepaliveParams(keepalive.ClientParameters{
50 | Time: 5 * time.Second, // 发送 keepalive 探测的时间间隔
51 | Timeout: 5 * time.Second, // 等待确认的超时时间
52 | PermitWithoutStream: true, // 即使没有活跃流也发送 keepalive
53 | }),
54 | )
55 | require.NoError(t, err)
56 | defer cl.Close()
57 |
58 | ucClient := testPkg.NewUserServiceClient(cl)
59 |
60 | log.Println("开始调用服务")
61 |
62 | // 并发调用服务
63 | var eg errgroup.Group
64 | // 模拟3个客户端并发调用服务
65 | for i := 0; i < 3; i++ {
66 | eg.Go(func() error {
67 | res, er := ucClient.GetById(context.Background(), &testPkg.GetByIdRequest{Id: 123})
68 | if er != nil {
69 | return er
70 | }
71 | log.Println("resp.user: ", res.User)
72 | return nil
73 | })
74 | }
75 | err = eg.Wait()
76 | t.Log(err)
77 | }
78 |
79 | func initLog() logx.Loggerx {
80 | zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
81 | // Level日志级别【可以考虑作为参数传】,测试传zerolog.InfoLevel/NoLevel不打印
82 | // 模块化: Str("module", "userService模块")
83 | logger := zerolog.New(os.Stderr).Level(zerolog.DebugLevel).With().CallerWithSkipFrameCount(4).Timestamp().Logger()
84 |
85 | l := zerologx.NewZeroLogger(&logger)
86 | return l
87 | }
88 |
--------------------------------------------------------------------------------
/DBx/localCahceX/cacheLocalRistrettox/ristretto_test.go:
--------------------------------------------------------------------------------
1 | package cacheLocalRistrettox
2 |
3 | import (
4 | "github.com/dgraph-io/ristretto/v2"
5 | "github.com/stretchr/testify/assert"
6 | "testing"
7 | "time"
8 | )
9 |
10 | /*
11 | ✅ 最佳实践建议:【Set时的cost配置建议】
12 | 【场景:】 【推荐 cost 设置:】
13 | 缓存 []byte、string cost = len(value)
14 | 缓存结构体 估算大小,或统一设为 1(按条数)
15 | 缓存不同重要性数据 按业务权重设置 cost(VIP=10, 普通=1)
16 | 不确定大小 固定 cost=1,靠 MaxCost 控制总条数
17 | */
18 |
19 | const (
20 | VipUserCost = 10
21 | UserCost = 1
22 | )
23 |
24 | /*
25 | “位左移运算符”
26 | 1. 表示 2 的幂,语义清晰
27 | 1 << 10 → 1024 → 1KB
28 | 1 << 20 → 1,048,576 → 1MB
29 | 1 << 30 → 1,073,741,824 → 1GB
30 | 1 << 40 → 1TB
31 | */
32 |
33 | func TestRistretto(t *testing.T) {
34 | cache, err := ristretto.NewCache(&ristretto.Config[string, string]{
35 | NumCounters: 1e7, // 按键跟踪次数为(10M)。
36 | MaxCost: 1 << 30, // 最大缓存成本(1GB)“位左移运算符”。
37 | BufferItems: 64, // 每个Get缓冲区的键数。
38 | })
39 | assert.NoError(t, err)
40 | defer cache.Close()
41 |
42 | // 设置一个成本为1的值
43 | //cache.Set("key", "value", 1)
44 | cache.SetWithTTL("key", "value", 1, time.Second*5)
45 |
46 | // 等待值通过缓冲区【除非有重要的缓存,实时性要求较高,要堵塞直至等待缓冲通过】
47 | cache.Wait()
48 |
49 | // 从缓存中获取值
50 | value, ok := cache.Get("key")
51 | if !ok {
52 | t.Log("missing value")
53 | }
54 | t.Log("local cache: ", value)
55 |
56 | time.Sleep(time.Second * 1)
57 | v, ok := cache.GetTTL("key1")
58 | if !ok {
59 | t.Log("missing value")
60 | }
61 | t.Log("local cache v: ", v)
62 | if v <= 0 {
63 | t.Log("value TTL is no duration")
64 | } else {
65 | value, _ = cache.Get("key")
66 | t.Log("local cache: ", value)
67 | }
68 |
69 | // 缓存中的del值
70 | cache.Del("key")
71 | }
72 |
73 | func TestRistrettoV1(t *testing.T) {
74 | cache, err := ristretto.NewCache(&ristretto.Config[string, any]{
75 | NumCounters: 1e7, // 按键跟踪次数为(10M)。
76 | MaxCost: 1 << 30, // 最大缓存成本(1GB)“位左移运算符”。
77 | BufferItems: 64, // 每个Get缓冲区的键数。
78 | })
79 | //defer cache.Close()
80 |
81 | assert.NoError(t, err)
82 | ca := NewCacheLocalRistrettoStr[string, any](cache)
83 | defer ca.Close()
84 |
85 | // 打印时间单位为微秒
86 | t.Log("set cache: ", time.Now().UnixMicro())
87 | err = ca.Set("key", "value", time.Second*5, VipUserCost)
88 | // 等待值通过缓冲区
89 | ca.WaitSet()
90 | //time.Sleep(time.Second * 1)
91 |
92 | assert.NoError(t, err)
93 | t.Log("set cache ok: ", time.Now().UnixMicro())
94 | t.Log("get cache: ", time.Now().UnixMicro())
95 | val, err := ca.Get("key")
96 | assert.NoError(t, err)
97 | t.Log("val: ", val)
98 | t.Log("get cache ok: ", time.Now().UnixMicro())
99 | ca.Del("key")
100 | }
101 |
--------------------------------------------------------------------------------
/webx/ginx/middleware/jwtX2/jwtX2_help:
--------------------------------------------------------------------------------
1 | /*
2 | jwtX2为jwtx升级版,此实现:
3 | · 完全兼容原有接口
4 | · 解决多设备互踢问题
5 | · 支持“退出即失效”
6 | · 可 mock、可测试
7 |
8 |
9 | 1. 前端需支持:携带X-Device-ID
10 | 首次访问生成 deviceId 并存 localStorage
11 | 所有请求携带:
12 | X-Device-ID: abc123def456...
13 | Authorization: Bearer
14 | 2. 也可以,若不传 X-Device-ID:
15 | 自动使用 User-Agent 哈希作为设备 ID
16 | 同一浏览器/Postman 视为同一设备,多次登录自动踢旧
17 | */
18 |
19 |
20 | /* eg:例如
21 | // main.go
22 | package main
23 |
24 | import (
25 | "github.com/gin-gonic/gin"
26 | "github.com/redis/go-redis/v9"
27 | "your-project/jwtx"
28 | )
29 |
30 | func main() {
31 | r := gin.Default()
32 |
33 | redisClient := redis.NewClient(&redis.Options{Addr: "localhost:6379"})
34 | jwtHandler := jwtx.NewJwtxMiddlewareGinx(redisClient, &jwtx.JwtxMiddlewareGinxConfig{
35 | JwtKey: []byte("your-access-secret-32-bytes!!"),
36 | LongJwtKey: []byte("your-refresh-secret-32-bytes!!"),
37 | DurationExpiresIn: 30 * time.Minute,
38 | LongDurationExpiresIn: 7 * 24 * time.Hour,
39 | })
40 |
41 | // 登录
42 | r.POST("/login", func(c *gin.Context) {
43 | // 验证用户名密码...
44 | userID := int64(123)
45 | name := "alice"
46 | ssid := uuid.New().String() // 需引入 github.com/google/uuid
47 |
48 | _, err := jwtHandler.SetToken(c, userID, name, ssid)
49 | if err != nil {
50 | c.JSON(500, gin.H{"error": err.Error()})
51 | return
52 | }
53 | c.JSON(200, gin.H{"msg": "login success"})
54 | })
55 |
56 | // 刷新
57 | r.POST("/refresh", func(c *gin.Context) {
58 | // 可传 newSsid 实现“续会话”,或留空复用
59 | _, err := jwtHandler.RefreshToken(c, "")
60 | if err != nil {
61 | c.JSON(401, gin.H{"error": err.Error()})
62 | return
63 | }
64 | c.JSON(200, gin.H{"msg": "refresh success"})
65 | })
66 |
67 | // 退出
68 | r.POST("/logout", func(c *gin.Context) {
69 | _, err := jwtHandler.DeleteToken(c)
70 | if err != nil {
71 | c.JSON(401, gin.H{"error": err.Error()})
72 | return
73 | }
74 | c.JSON(200, gin.H{"msg": "logout success"})
75 | })
76 |
77 | // 受保护路由
78 | api := r.Group("/api")
79 | api.Use(func(c *gin.Context) {
80 | _, err := jwtHandler.VerifyToken(c)
81 | if err != nil {
82 | c.AbortWithStatusJSON(401, gin.H{"error": "unauthorized"})
83 | return
84 | }
85 | c.Next()
86 | })
87 | api.GET("/profile", func(c *gin.Context) {
88 | c.JSON(200, gin.H{"msg": "protected"})
89 | })
90 |
91 | r.Run(":8080")
92 | }
93 | */
--------------------------------------------------------------------------------
/syncX/lock/redisLock/redsyncx/lock_cron_mysql/repository/dao/cron.go:
--------------------------------------------------------------------------------
1 | package dao
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "gitee.com/hgg_test/pkg_tool/v2/syncX/lock/redisLock/redsyncx/lock_cron_mysql/domain"
7 | "github.com/go-sql-driver/mysql"
8 | "gorm.io/gorm"
9 | )
10 |
11 | var (
12 | ErrDataRecordNotFound error = errors.New("数据不存在, 查询为空")
13 | ErrDuplicateData error = errors.New("数据已存在, 重复添加")
14 | )
15 |
16 | type CronDb interface {
17 | FindById(ctx context.Context, id int64) (CronJob, error)
18 | FindAll(ctx context.Context) ([]CronJob, error)
19 | Insert(ctx context.Context, job CronJob) error
20 | Inserts(ctx context.Context, jobs []domain.CronJob) error
21 | Delete(ctx context.Context, id int64) error
22 | Deletes(ctx context.Context, ids []int64) error
23 | }
24 |
25 | type cronCronDb struct {
26 | db *gorm.DB
27 | }
28 |
29 | func (c *cronCronDb) FindById(ctx context.Context, id int64) (CronJob, error) {
30 | var cronJob CronJob
31 | err := c.db.Model(&cronJob).WithContext(ctx).Where("cron_id = ?", id).First(&cronJob).Error
32 | switch err {
33 | case gorm.ErrRecordNotFound:
34 | return CronJob{}, ErrDataRecordNotFound
35 | default:
36 | return cronJob, err
37 | }
38 | }
39 | func (c *cronCronDb) FindAll(ctx context.Context) ([]CronJob, error) {
40 | var cronJobs []CronJob
41 | err := c.db.Model(&cronJobs).WithContext(ctx).Where("cron_id >= ", 0).Find(&cronJobs).Error
42 | switch err {
43 | case gorm.ErrRecordNotFound:
44 | return []CronJob{}, ErrDataRecordNotFound
45 | default:
46 | return cronJobs, err
47 | }
48 | }
49 |
50 | func (c *cronCronDb) Insert(ctx context.Context, job CronJob) error {
51 | var cron CronJob
52 | err := c.db.Model(&cron).WithContext(ctx).Where("cron_id = ?", job.ID).First(&cron).Error
53 | if e, ok := err.(*mysql.MySQLError); ok {
54 | const duplicateError uint16 = 1062
55 | if e.Number == duplicateError {
56 | return ErrDuplicateData
57 | }
58 | }
59 | return c.db.Model(&job).WithContext(ctx).Create(&job).Error
60 | }
61 |
62 | func (c *cronCronDb) Inserts(ctx context.Context, jobs []domain.CronJob) error {
63 | err := c.db.Model(&CronJob{}).WithContext(ctx).Create(&jobs).Error
64 | if e, ok := err.(*mysql.MySQLError); ok {
65 | const duplicateError uint16 = 1062
66 | if e.Number == duplicateError {
67 | return ErrDuplicateData
68 | }
69 | }
70 | return c.db.Model(&jobs).WithContext(ctx).Create(&jobs).Error
71 | }
72 |
73 | func (c *cronCronDb) Delete(ctx context.Context, id int64) error {
74 | return c.db.Model(&CronJob{}).WithContext(ctx).Where("cron_id = ?", id).Delete(&CronJob{}).Error
75 | }
76 | func (c *cronCronDb) Deletes(ctx context.Context, ids []int64) error {
77 | return c.db.Model(&CronJob{}).WithContext(ctx).Where("cron_id in ?", ids).Delete(&CronJob{}).Error
78 | }
79 |
--------------------------------------------------------------------------------
/sliceX/map.go:
--------------------------------------------------------------------------------
1 | package sliceX
2 |
3 | // FilterMap 执行过滤并且转化
4 | // 如果 m 的第二个返回值是 false,那么我们会忽略第一个返回值
5 | // 即便第二个返回值是 false,后续的元素依旧会被遍历
6 | func FilterMap[Src any, Dst any](src []Src, m func(idx int, src Src) (Dst, bool)) []Dst {
7 | res := make([]Dst, 0, len(src))
8 | for i, s := range src {
9 | dst, ok := m(i, s)
10 | if ok {
11 | res = append(res, dst)
12 | }
13 | }
14 | return res
15 | }
16 |
17 | // Map 将一个 []Src 类型的切片,通过一个映射函数 m,转换为一个新的 []Dst 类型的切片。
18 | func Map[Src any, Dst any](src []Src, m func(idx int, src Src) Dst) []Dst {
19 | dst := make([]Dst, len(src))
20 | for i, s := range src {
21 | dst[i] = m(i, s)
22 | }
23 | return dst
24 | }
25 |
26 | // ToMap 将[]Ele映射到map[Key]Ele
27 | // 从Ele中提取Key的函数fn由使用者提供
28 | //
29 | // 注意:
30 | // 如果出现 i < j
31 | // 设:
32 | //
33 | // key_i := fn(elements[i])
34 | // key_j := fn(elements[j])
35 | //
36 | // 满足key_i == key_j 的情况,则在返回结果的resultMap中
37 | // resultMap[key_i] = val_j
38 | //
39 | // 即使传入的字符串为nil,也保证返回的map是一个空map而不是nil
40 | func ToMap[Ele any, Key comparable](
41 | elements []Ele,
42 | fn func(element Ele) Key,
43 | ) map[Key]Ele {
44 | return ToMapV(
45 | elements,
46 | func(element Ele) (Key, Ele) {
47 | return fn(element), element
48 | })
49 | }
50 |
51 | // ToMapV 将[]Ele映射到map[Key]Val
52 | // 从Ele中提取Key和Val的函数fn由使用者提供
53 | //
54 | // 注意:
55 | // 如果出现 i < j
56 | // 设:
57 | //
58 | // key_i, val_i := fn(elements[i])
59 | // key_j, val_j := fn(elements[j])
60 | //
61 | // 满足key_i == key_j 的情况,则在返回结果的resultMap中
62 | // resultMap[key_i] = val_j
63 | //
64 | // 即使传入的字符串为nil,也保证返回的map是一个空map而不是nil
65 | func ToMapV[Ele any, Key comparable, Val any](
66 | elements []Ele,
67 | fn func(element Ele) (Key, Val),
68 | ) (resultMap map[Key]Val) {
69 | resultMap = make(map[Key]Val, len(elements))
70 | for _, element := range elements {
71 | k, v := fn(element)
72 | resultMap[k] = v
73 | }
74 | return
75 | }
76 |
77 | // 构造map
78 | func toMap[T comparable](src []T) map[T]struct{} {
79 | var dataMap = make(map[T]struct{}, len(src))
80 | for _, v := range src {
81 | // 使用空结构体,减少内存消耗
82 | dataMap[v] = struct{}{}
83 | }
84 | return dataMap
85 | }
86 |
87 | func deduplicateFunc[T any](data []T, equal equalFunc[T]) []T {
88 | var newData = make([]T, 0, len(data))
89 | for k, v := range data {
90 | if !ContainsFunc[T](data[k+1:], func(src T) bool {
91 | return equal(src, v)
92 | }) {
93 | newData = append(newData, v)
94 | }
95 | }
96 | return newData
97 | }
98 |
99 | func deduplicate[T comparable](data []T) []T {
100 | dataMap := toMap[T](data)
101 | var newData = make([]T, 0, len(dataMap))
102 | for key := range dataMap {
103 | newData = append(newData, key)
104 | }
105 | return newData
106 | }
107 |
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/dbLogx/zeroLogx.go:
--------------------------------------------------------------------------------
1 | package dbLogx
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "gitee.com/hgg_test/pkg_tool/v2/logx"
7 | "gitee.com/hgg_test/pkg_tool/v2/sliceX"
8 | glogger "gorm.io/gorm/logger"
9 | "time"
10 | )
11 |
12 | // GormLogStrx 适配器,将GORM日志转换为Zerolog
13 | type GormLogStrx struct {
14 | // SlowThreshold
15 | // - 慢查询阈值,单位为秒
16 | // - 0 表示不启用慢查询
17 | SlowThreshold time.Duration
18 | logx logx.Loggerx
19 | }
20 |
21 | // NewGormLogStrx 初始化GORM日志适配器
22 | // - 需先初始化日志模块,参考测试用例InitLog()方法
23 | // - showThreshold: 慢查询阈值,单位为秒
24 | // - gorm.Open(mysql.Open(dsn), &gorm.Config{Logger: NewGormLogStrx(time.Second, InitLog())})
25 | func NewGormLogStrx(slowThreshold time.Duration, logx logx.Loggerx) GormLogIn {
26 | return &GormLogStrx{SlowThreshold: slowThreshold, logx: logx}
27 | }
28 |
29 | // LogMode 实现gorm.Logger接口
30 | func (l *GormLogStrx) LogMode(level glogger.LogLevel) glogger.Interface {
31 | // 使用Zerolog的级别控制,所以这里不需要做任何事
32 | return l
33 | }
34 |
35 | // Info 实现gorm.Logger接口 - 信息日志
36 | func (l *GormLogStrx) Info(ctx context.Context, msg string, data ...interface{}) {
37 | fld := sliceX.Map[any, logx.Field](data, func(idx int, src any) logx.Field {
38 | return logx.Any(fmt.Sprintf("%d", idx), src)
39 | })
40 | l.logx.Info(msg, fld...)
41 | }
42 |
43 | // Warn 实现gorm.Logger接口 - 警告日志
44 | func (l *GormLogStrx) Warn(ctx context.Context, msg string, data ...interface{}) {
45 | fld := sliceX.Map[any, logx.Field](data, func(idx int, src any) logx.Field {
46 | return logx.Any(fmt.Sprintf("%d", idx), src)
47 | })
48 | l.logx.Warn(msg, fld...)
49 | }
50 |
51 | // Error 实现gorm.Logger接口 - 错误日志
52 | func (l *GormLogStrx) Error(ctx context.Context, msg string, data ...interface{}) {
53 | fld := sliceX.Map[any, logx.Field](data, func(idx int, src any) logx.Field {
54 | return logx.Any(fmt.Sprintf("%d", idx), src)
55 | })
56 | l.logx.Error(msg, fld...)
57 | }
58 |
59 | // Trace 实现gorm.Logger接口 - 跟踪日志(拆分成不同级别)
60 | func (l *GormLogStrx) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) {
61 | // 计算耗时,单位为毫秒
62 | elapsed := time.Since(begin)
63 | sql, rows := fc()
64 |
65 | // 如果有错误,记录错误日志
66 | if err != nil {
67 | l.logx.Error("SQL Error", logx.Error(err),
68 | logx.String("sql", sql),
69 | logx.Int64("rows", rows),
70 | logx.TimeDuration("elapsed-ms", elapsed),
71 | )
72 | return
73 | }
74 |
75 | // 如果是慢查询,记录警告日志
76 | if l.SlowThreshold != 0 && elapsed > l.SlowThreshold {
77 | l.logx.Error("Slow SQL", logx.Error(err),
78 | logx.String("sql", sql),
79 | logx.Int64("rows", rows),
80 | logx.TimeDuration("elapsed-ms", elapsed),
81 | )
82 | return
83 | }
84 |
85 | // 普通查询记录调试日志
86 | l.logx.Error("SQL Query", logx.Error(err),
87 | logx.String("sql", sql),
88 | logx.Int64("rows", rows),
89 | logx.TimeDuration("elapsed-ms", elapsed),
90 | )
91 | }
92 |
--------------------------------------------------------------------------------
/serviceLogicX/rankingListX/rankingServiceRdbZsetX/rankingServiceRdbZset_test.go:
--------------------------------------------------------------------------------
1 | package rankingServiceRdbZsetX
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "gitee.com/hgg_test/pkg_tool/v2/DBx/localCahceX"
7 | "gitee.com/hgg_test/pkg_tool/v2/DBx/localCahceX/cacheLocalRistrettox"
8 | "gitee.com/hgg_test/pkg_tool/v2/logx"
9 | "gitee.com/hgg_test/pkg_tool/v2/logx/zerologx"
10 | "gitee.com/hgg_test/pkg_tool/v2/serviceLogicX/rankingListX/rankingServiceRdbZsetX/types"
11 | "github.com/dgraph-io/ristretto/v2"
12 | "github.com/redis/go-redis/v9"
13 | "github.com/rs/zerolog"
14 | "log"
15 | "os"
16 | "testing"
17 | "time"
18 | )
19 |
20 | func TestNewRankingService(t *testing.T) {
21 | globalSvc := NewRankingService(10, newRedisCli(), newLocalCache(), newLogger())
22 | defer globalSvc.Stop()
23 |
24 | // 获取 article 榜单
25 | articleSvc := globalSvc.WithBizType("article", types.HotScoreProvider{})
26 |
27 | // 启动缓存刷新(可选)【本地缓存默认为15秒过期,自动刷新缓存开启的话,可小一些】
28 | articleSvc.StartRefresh(10 * time.Second)
29 |
30 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
31 | defer cancel()
32 | // 用户点赞
33 | //for i := 0; i < 1000; i++ {
34 | // _ = articleSvc.IncrScore(ctx, strconv.Itoa(i+1), float64(i+1.0), map[string]string{
35 | // "title": "rankTest_" + strconv.Itoa(i+1),
36 | // "author": "李四",
37 | // })
38 | //}
39 |
40 | // 获取榜单(自动补全 Title)
41 | for i := 0; i < 3; i++ {
42 | now := time.Now()
43 | log.Println("开始获取榜单:", i+1)
44 | top100, _ := articleSvc.GetTopN(ctx, 5)
45 | for _, item := range top100 {
46 | fmt.Printf("ID: %s, Title: %s, Score: %.2f\n", item.BizID, item.Title, item.Score)
47 | }
48 | timeStop := time.Since(now).String()
49 | log.Println("获取榜单结束,耗时:", timeStop)
50 |
51 | time.Sleep(time.Second * 1)
52 | }
53 | }
54 |
55 | func newLogger() logx.Loggerx {
56 | zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
57 | // Level日志级别【可以考虑作为参数传】,测试传zerolog.InfoLevel/NoLevel不打印
58 | // 模块化: Str("module", "userService模块")
59 | logger := zerolog.New(os.Stderr).Level(zerolog.DebugLevel).With().CallerWithSkipFrameCount(4).Timestamp().Logger()
60 |
61 | return zerologx.NewZeroLogger(&logger)
62 | }
63 |
64 | func newRedisCli() redis.Cmdable {
65 | return redis.NewClient(&redis.Options{
66 | Addr: "127.0.0.1:6379",
67 | })
68 | }
69 |
70 | func TestNewLocalCache(t *testing.T) {
71 | newLocalCache()
72 | }
73 | func newLocalCache() localCahceX.CacheLocalIn[string, []types.HotScore] {
74 | cache, err := ristretto.NewCache[string, []types.HotScore](&ristretto.Config[string, []types.HotScore]{
75 | NumCounters: 1e7, // 按键跟踪次数为(10M)。
76 | MaxCost: 1 << 30, // 最大缓存成本(1GB)“位左移运算符”。
77 | BufferItems: 64, // 每个Get缓冲区的键数。
78 | })
79 | if err != nil {
80 | return nil
81 | }
82 | localCache := cacheLocalRistrettox.NewCacheLocalRistrettoStr[string, []types.HotScore](cache)
83 | return localCache
84 | }
85 |
--------------------------------------------------------------------------------
/webx/ginx/middleware/limitX/slidingWindow.go:
--------------------------------------------------------------------------------
1 | package limitX
2 |
3 | import (
4 | "gitee.com/hgg_test/pkg_tool/v2/sliceX/queueX"
5 | "github.com/gin-gonic/gin"
6 | "net/http"
7 | "sync"
8 | "time"
9 | )
10 |
11 | // SlidingWindowLimiter 滑动窗口限流器
12 | type SlidingWindowLimiter struct {
13 | window time.Duration // 统计窗口大小(如1分钟)
14 | threshold int // 窗口内允许的最大请求数(阈值)
15 | queue *queueX.PriorityQueue[time.Time] // 存储请求时间戳的最小堆(队首是最早请求)
16 | lock sync.Mutex // 限流器全局锁(保护队列操作)
17 | }
18 |
19 | // NewSlidingWindowBuilder 滑动窗口限流器, 构造函数
20 | // - window: 窗口时长(如time.Second)
21 | // - threshold: 窗口内最大请求数(如100)
22 | //
23 | // 每1秒最多有100个请求/秒,即1秒内最多100个请求
24 | func NewSlidingWindowBuilder(window time.Duration, threshold int) *SlidingWindowLimiter {
25 | // 初始化最小堆:比较函数用time.Time.Before(早的时间戳更小,队首是最旧请求)
26 | return &SlidingWindowLimiter{
27 | window: window,
28 | threshold: threshold,
29 | queue: queueX.NewPriorityQueue[time.Time](func(a, b time.Time) bool { return a.Before(b) }, 0),
30 | }
31 | }
32 |
33 | func (c *SlidingWindowLimiter) Build() gin.HandlerFunc {
34 | return func(ctx *gin.Context) {
35 | if !c.Allow() {
36 | // 限流
37 | ctx.AbortWithStatus(http.StatusTooManyRequests)
38 | return
39 | }
40 | // 允许请求:调用后续处理逻辑
41 | ctx.Next()
42 | }
43 | }
44 |
45 | // Allow
46 | // - 检查是否允许通过请求
47 | // - 判断是否允许当前请求(核心逻辑)
48 | func (c *SlidingWindowLimiter) Allow() bool {
49 | c.lock.Lock()
50 | defer c.lock.Unlock()
51 |
52 | now := time.Now()
53 | windowStart := now.Add(-c.window) // 窗口左边界(当前时间往前推window)
54 |
55 | // 1. 清理过期请求:删除队列中所有早于windowStart的时间戳
56 | c.removeExpired(windowStart)
57 | //for {
58 | // peekTime, ok := c.queue.Peek()
59 | // if !ok || !peekTime.Before(windowStart) {
60 | // break // 队首已处于窗口内,无需继续清理
61 | // }
62 | // c.queue.Pop() // 移除过期请求
63 | //}
64 |
65 | // 2. 检查是否超过阈值
66 | if c.queue.Size() >= c.threshold {
67 | return false // 超过阈值,不允许
68 | }
69 |
70 | // 3. 记录当前请求时间戳
71 | //c.queue.Push(now)
72 | c.queue.Enqueue(now)
73 | return true // 允许请求
74 | }
75 |
76 | // removeExpired 移除过期的请求时间戳
77 | func (c *SlidingWindowLimiter) removeExpired(windowStart time.Time) {
78 | // 持续移除窗口开始时间之前的请求
79 | // 清理过期请求:删除队列中所有早于windowStart的时间戳
80 | for {
81 | peekTime, ok := c.queue.Peek()
82 | if !ok || !peekTime.Before(windowStart) {
83 | break // 队首已处于窗口内,无需继续清理
84 | }
85 | //c.queue.Pop() // 移除过期请求
86 | c.queue.Dequeue() // 移除过期请求
87 | }
88 | }
89 |
90 | // GetCurrentCount 获取当前队列长度(主要用于测试)
91 | func (c *SlidingWindowLimiter) GetCurrentCount() int {
92 | c.lock.Lock()
93 | defer c.lock.Unlock()
94 |
95 | now := time.Now()
96 | windowStart := now.Add(-c.window)
97 | c.removeExpired(windowStart)
98 |
99 | return c.queue.Len()
100 | }
101 |
--------------------------------------------------------------------------------
/systemLoad/gopsutilx/gopsutil_test.go:
--------------------------------------------------------------------------------
1 | package gopsutilx
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "log"
7 | "testing"
8 | "time"
9 |
10 | "github.com/shirou/gopsutil/v4/cpu"
11 | "github.com/shirou/gopsutil/v4/host"
12 | "github.com/shirou/gopsutil/v4/load"
13 | "github.com/stretchr/testify/assert"
14 | )
15 |
16 | func TestGetProcessInfo(t *testing.T) {
17 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
18 | defer cancel()
19 |
20 | // 获取负载信息
21 | //loadAvg, err := load.Avg()
22 | loadAvg, err := load.AvgWithContext(ctx)
23 | if err != nil {
24 | t.Fatal(err)
25 | }
26 |
27 | // 打印结果(Load1/Load5/Load15)
28 | fmt.Printf("1分钟负载: %.2f, 5分钟负载: %.2f, 15分钟负载: %.2f\n", loadAvg.Load1, loadAvg.Load5, loadAvg.Load15)
29 |
30 | // 获取系统信息
31 | systemInfo, err := host.Info()
32 | if err != nil {
33 | t.Fatal(err)
34 | }
35 | fmt.Println("system info: ", systemInfo)
36 |
37 | // 获取CPU信息
38 | cpuInfo, err := cpu.Info()
39 | if err != nil {
40 | t.Fatal(err)
41 | }
42 | fmt.Println("cpu info num: ", len(cpuInfo))
43 | for k, v := range cpuInfo {
44 | fmt.Printf("cpu%d info%v:\n ", k, v)
45 | }
46 | }
47 |
48 | func TestLocal(t *testing.T) {
49 | // 获取磁盘信息
50 | s := NewSystemLoad()
51 | total, err := s.DiskTotals()
52 | assert.NoError(t, err)
53 | log.Println(total)
54 |
55 | // 获取磁盘使用情况
56 | usage, err := s.DiskUsage([]string{"C:"})
57 | assert.NoError(t, err)
58 | log.Println("===================")
59 | for _, v := range usage {
60 | log.Printf("disk%s info%v:\n ", v.Name, v)
61 | }
62 |
63 | // 获取内存使用情况
64 | us, err := s.MemUsage()
65 | assert.NoError(t, err)
66 | log.Println("===================")
67 | log.Println("mem info: ", us)
68 |
69 | // 获取CPU使用情况
70 | c, err := s.CpuUsage()
71 | assert.NoError(t, err)
72 | log.Println("===================")
73 | for i, f := range c {
74 | log.Printf("CPU %d: %.2f%%\n", i, f)
75 | }
76 |
77 | // 获取整体 CPU 使用率
78 | cAll, err := s.CpuAllUsage()
79 | assert.NoError(t, err)
80 | log.Println("===================")
81 | log.Printf("CPU: %.2f%%\n", cAll)
82 |
83 | // 获取cpu信息
84 | cInfo, err := s.CpuInfo()
85 | if err == nil {
86 | log.Println("===================")
87 | for _, info := range cInfo {
88 | fmt.Printf("CPU 型号: %s\n", info.ModelName)
89 | fmt.Printf("核心数: %d\n", info.Cores)
90 | }
91 | }
92 |
93 | // 获取系统负载
94 | i, err := s.SystemLoad()
95 | if err == nil {
96 | switch i {
97 | case 0:
98 | log.Println("未获取到系统负载")
99 | case 1:
100 | log.Println("系统负载良好")
101 | case 2:
102 | log.Println("系统负载警戒")
103 | case 3:
104 | log.Println("系统负载危险")
105 | }
106 | }
107 | log.Println("===================")
108 |
109 | info, err := s.HostInfo(context.Background())
110 | if err == nil {
111 | log.Println("Host Info: ", info)
112 | }
113 | }
114 |
--------------------------------------------------------------------------------
/configx/viperX/viper_test.go:
--------------------------------------------------------------------------------
1 | package viperX
2 |
3 | import (
4 | "gitee.com/hgg_test/pkg_tool/v2/logx"
5 | "gitee.com/hgg_test/pkg_tool/v2/logx/zerologx"
6 | "github.com/rs/zerolog"
7 | "github.com/stretchr/testify/assert"
8 | "os"
9 | "testing"
10 | "time"
11 | )
12 |
13 | const (
14 | DbConfFile = "db.yaml"
15 | RedisConfFile = "redis.yaml"
16 | )
17 |
18 | // TestInitViperLocals 测试读取多个配置文件
19 | func TestInitViperLocals(t *testing.T) {
20 | t.Log(time.Now().UnixMilli())
21 | // 初始化viper
22 | conf := NewViperConfigStr(InitLog())
23 | // 初始化读取配置文件1
24 | err := conf.InitViperLocals("db", "yaml", ".")
25 | assert.NoError(t, err)
26 | // 初始化读取配置文件2
27 | err = conf.InitViperLocals("redis", "yaml", ".")
28 | assert.NoError(t, err)
29 | // 正常项目已经可在此返回ViperConfigIn接口了
30 | //return conf
31 |
32 | // 获取配置文件信息==========================
33 | // - 调用configx的单独Get方法,基于泛型约束,自动匹配返回值类型
34 | // - conf参数为configx.ConfigIn接口,初始化配置文件时返回
35 | dbConf := Get[string](conf, "mysql.dsn", DbConfFile)
36 | testKey := Get[time.Duration](conf, "testKey.val", DbConfFile)
37 | redisConf := Get[string](conf, "redis.addr", RedisConfFile)
38 | t.Logf("dbConf: %s, redisConf: %s", dbConf, redisConf)
39 | t.Logf("testKey: %v", testKey)
40 | t.Log(time.Now().UnixMilli())
41 |
42 | // 获取配置文件信息,并反序列化为结构体==========================
43 | type confDB struct {
44 | Dsn string `yaml:"dsn"`
45 | }
46 | type confRedis struct {
47 | Addr string `yaml:"addr"`
48 | }
49 | var confDb confDB
50 | var confRe confRedis
51 | err = GetUnmarshalStruct(conf, "mysql", &confDb, DbConfFile)
52 | assert.NoError(t, err)
53 | err = GetUnmarshalStruct(conf, "redis", &confRe, RedisConfFile)
54 | assert.NoError(t, err)
55 | t.Logf("confDb: %s", confDb.Dsn)
56 | t.Logf("confRe: %s", confRe.Addr)
57 | t.Log(time.Now().UnixMilli())
58 | }
59 |
60 | // TestInitViperLocalsWatchs 测试读取多个配置文件并监听文件变化
61 | func TestInitViperLocalsWatchs(t *testing.T) {
62 | conf := NewViperConfigStr(InitLog())
63 | err := conf.InitViperLocalsWatchs("db", "yaml", ".")
64 | assert.NoError(t, err)
65 | err = conf.InitViperLocalsWatchs("redis", "yaml", ".")
66 | assert.NoError(t, err)
67 |
68 | // 正常项目已经可在此返回ViperConfigIn接口了
69 | //return conf
70 |
71 | // 获取配置文件信息
72 | // - 调用configx的单独Get方法,基于泛型约束,自动匹配返回值类型
73 | // - conf参数为configx.ConfigIn接口,初始化配置文件时返回
74 | dbConf := Get[string](conf, "mysql.dsn", DbConfFile)
75 | redisConf := Get[string](conf, "redis.addr", RedisConfFile)
76 | t.Logf("dbConf: %s, redisConf: %s", dbConf, redisConf)
77 |
78 | time.Sleep(time.Minute * 5)
79 | }
80 |
81 | func InitLog() logx.Loggerx {
82 | // InitLog 初始化zerolog日志模块
83 | zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
84 | // Level日志级别【可以考虑作为参数传】,测试传zerolog.InfoLevel/NoLevel不打印
85 | // 模块化: Str("module", "userService模块")
86 | logger := zerolog.New(os.Stderr).Level(zerolog.DebugLevel).With().Timestamp().Logger()
87 |
88 | l := zerologx.NewZeroLogger(&logger)
89 | return l
90 | }
91 |
--------------------------------------------------------------------------------
/observationX/opentelemetryX/otel.go:
--------------------------------------------------------------------------------
1 | package opentelemetryX
2 |
3 | import (
4 | "context"
5 | "go.opentelemetry.io/otel"
6 | "go.opentelemetry.io/otel/propagation"
7 | "go.opentelemetry.io/otel/sdk/resource"
8 | "go.opentelemetry.io/otel/sdk/trace"
9 | semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
10 | "time"
11 | )
12 |
13 | type CtxFn func(ctx context.Context)
14 |
15 | type SvcInfo struct {
16 | ServiceName string
17 | ServiceVersion string
18 | }
19 |
20 | type OtelStr struct {
21 | serviceName string
22 | serviceVersion string
23 | ResErr error
24 |
25 | // 创建 span exporter
26 | spanExporter trace.SpanExporter
27 |
28 | resource *resource.Resource
29 |
30 | tracerProvider *trace.TracerProvider
31 | }
32 |
33 | func NewOtelStr(svc SvcInfo, spanExporter trace.SpanExporter) (CtxFn, error) {
34 | o := &OtelStr{
35 | serviceName: svc.ServiceName,
36 | serviceVersion: svc.ServiceVersion,
37 | spanExporter: spanExporter,
38 | }
39 | //res, err := newResource("demo", "v0.0.1")
40 | res, err := o.newResource()
41 | if err == nil {
42 | o.ResErr = nil
43 | o.resource = res
44 | }
45 | o.ResErr = err
46 |
47 | prop := o.newPropagator()
48 | // 设置 propagator, 在客户端和服务端之间传递 tracing 的相关信息
49 | otel.SetTextMapPropagator(prop)
50 |
51 | // 初始化 trace provider
52 | // 这个 provider 就是用来在打点的时候构建 trace 的
53 | tp, err := o.newTraceProvider()
54 | if err != nil {
55 | o.ResErr = err
56 | }
57 | o.tracerProvider = tp
58 |
59 | // 设置 trace provider
60 | otel.SetTracerProvider(tp)
61 |
62 | return o.initOtel(), err
63 | }
64 |
65 | // InitOtel main方法里,defer住 tp.Shutdown(ctx),InitOtel
66 | func (o *OtelStr) initOtel() func(ctx context.Context) {
67 | return func(ctx context.Context) {
68 | _ = o.tracerProvider.Shutdown(ctx)
69 | }
70 | }
71 |
72 | // newResource
73 | func (o *OtelStr) newResource() (*resource.Resource, error) {
74 | return resource.Merge(resource.Default(),
75 | resource.NewWithAttributes(semconv.SchemaURL,
76 | semconv.ServiceName(o.serviceName),
77 | semconv.ServiceVersion(o.serviceVersion),
78 | ))
79 | }
80 |
81 | // newPropagator 用于在客户端和服务端之间传递 tracing 的相关信息
82 | func (o *OtelStr) newPropagator() propagation.TextMapPropagator {
83 | return propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})
84 | }
85 |
86 | // newTraceProvider 用于初始化 trace provider
87 | func (o *OtelStr) newTraceProvider() (*trace.TracerProvider, error) {
88 | //exporter, err := zipkin.New("http://localhost:9411/api/v2/spans") // zipkin exporter
89 | //if err != nil {
90 | // return nil, err
91 | //}
92 |
93 | traceProvider := trace.NewTracerProvider(
94 | trace.WithBatcher(o.spanExporter,
95 | // Default is 5s. Set to 1s for demonstrative purposes.
96 | trace.WithBatchTimeout(time.Second)),
97 | trace.WithResource(o.resource),
98 | )
99 | return traceProvider, nil
100 | }
101 |
--------------------------------------------------------------------------------
/registry/etcdx/etcd_test.go:
--------------------------------------------------------------------------------
1 | package etcdx
2 |
3 | import (
4 | "context"
5 | "github.com/stretchr/testify/require"
6 | "github.com/stretchr/testify/suite"
7 | etcdv3 "go.etcd.io/etcd/client/v3"
8 | "go.etcd.io/etcd/client/v3/naming/endpoints"
9 | "google.golang.org/grpc"
10 | "net"
11 | "testing"
12 | "time"
13 | )
14 |
15 | /*
16 | grpc中接入etcd
17 | */
18 |
19 | type EtcdTestSuite struct {
20 | suite.Suite
21 | cli *etcdv3.Client
22 | }
23 |
24 | func (e *EtcdTestSuite) SetupSuite() {
25 | cli, err := etcdv3.NewFromURL("localhost:12379")
26 | // etcdv3.NewFromURLs()
27 | // etcdv3.New(etcdv3.Config{})
28 | //assert.NoError(e.T(), err) // 仅标记测试失败,继续执行后续代码
29 | require.NoError(e.T(), err) // 立即终止当前测试函数(使用 return)
30 | e.cli = cli
31 | }
32 |
33 | // 注册中心、注册的实例,添加一个节点
34 | func (e *EtcdTestSuite) TestServer() {
35 | t := e.T()
36 | l, err := net.Listen("tcp", ":8090")
37 | require.NoError(t, err)
38 | em, err := endpoints.NewManager(e.cli, "service/user") // 注册中心的信息
39 | require.NoError(t, err)
40 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
41 | defer cancel()
42 | addr := "127.0.0.1:8090"
43 | key := "service/user/" + addr
44 | // 添加一个服务
45 | err = em.AddEndpoint(ctx, key, endpoints.Endpoint{
46 | // 定位信息,客户端怎么连接你
47 | Addr: addr,
48 | })
49 | require.NoError(t, err)
50 |
51 | go func() {
52 | // 模拟注册信息变更
53 | ticker := time.NewTicker(time.Second)
54 | for now := range ticker.C { // ticker.C 是一个通道,每秒发送一个时间
55 | ctx1, cancel1 := context.WithTimeout(context.Background(), time.Second)
56 | // 更新注册中心
57 | err1 := em.Update(ctx1, []*endpoints.UpdateWithOpts{
58 | {
59 | Update: endpoints.Update{
60 | Op: endpoints.Add,
61 | Key: key,
62 | Endpoint: endpoints.Endpoint{
63 | Addr: addr,
64 | Metadata: now.String(),
65 | },
66 | },
67 | },
68 | // Update的话,两条【key】不能相同
69 | //{
70 | // Update: endpoints.Update{
71 | // Op: endpoints.Add,
72 | // Key: key1,
73 | // Endpoint: endpoints.Endpoint{
74 | // Addr: addr,
75 | // Metadata: now.String(),
76 | // },
77 | // },
78 | //},
79 | })
80 | // 更新注册中心,AddEndpoint简化写法。INSERT OR UPDATE, SAVE 的语义
81 | //err1 := em.AddEndpoint(ctx, key, endpoints.Endpoint{
82 | // Addr: addr,
83 | // Metadata: now.String(),
84 | //})
85 | cancel1()
86 | if err1 != nil {
87 | t.Log(err1)
88 | }
89 | }
90 | }()
91 |
92 | // grpc服务端
93 | server := grpc.NewServer()
94 | RegisterUserServiceServer(server, &Service{})
95 | // 启动服务
96 | err = server.Serve(l)
97 | require.NoError(t, err)
98 |
99 | // 删除一个服务
100 | err = em.DeleteEndpoint(ctx, key)
101 | require.NoError(t, err)
102 |
103 | server.GracefulStop() // gRPC优雅关闭退出
104 | err = e.cli.Close() // 关闭etcd客户端
105 | require.NoError(t, err)
106 | }
107 |
108 | func TestEtcd(t *testing.T) {
109 | suite.Run(t, new(EtcdTestSuite))
110 | }
111 |
--------------------------------------------------------------------------------
/rpc/grpcx/etcdx/etcd_test.go:
--------------------------------------------------------------------------------
1 | package etcdx
2 |
3 | import (
4 | "context"
5 | "github.com/stretchr/testify/require"
6 | "github.com/stretchr/testify/suite"
7 | etcdv3 "go.etcd.io/etcd/client/v3"
8 | "go.etcd.io/etcd/client/v3/naming/endpoints"
9 | "google.golang.org/grpc"
10 | "net"
11 | "testing"
12 | "time"
13 | )
14 |
15 | /*
16 | grpc中接入etcd
17 | */
18 |
19 | type EtcdTestSuite struct {
20 | suite.Suite
21 | cli *etcdv3.Client
22 | }
23 |
24 | func (e *EtcdTestSuite) SetupSuite() {
25 | cli, err := etcdv3.NewFromURL("localhost:12379")
26 | // etcdv3.NewFromURLs()
27 | // etcdv3.New(etcdv3.Config{})
28 | //assert.NoError(e.T(), err) // 仅标记测试失败,继续执行后续代码
29 | require.NoError(e.T(), err) // 立即终止当前测试函数(使用 return)
30 | e.cli = cli
31 | }
32 |
33 | // 注册中心、注册的实例,添加一个节点
34 | func (e *EtcdTestSuite) TestServer() {
35 | t := e.T()
36 | l, err := net.Listen("tcp", ":8090")
37 | require.NoError(t, err)
38 | em, err := endpoints.NewManager(e.cli, "service/user") // 注册中心的信息
39 | require.NoError(t, err)
40 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
41 | defer cancel()
42 | addr := "127.0.0.1:8090"
43 | key := "service/user/" + addr
44 | // 添加一个服务
45 | err = em.AddEndpoint(ctx, key, endpoints.Endpoint{
46 | // 定位信息,客户端怎么连接你
47 | Addr: addr,
48 | })
49 | require.NoError(t, err)
50 |
51 | go func() {
52 | // 模拟注册信息变更
53 | ticker := time.NewTicker(time.Second)
54 | for now := range ticker.C { // ticker.C 是一个通道,每秒发送一个时间
55 | ctx1, cancel1 := context.WithTimeout(context.Background(), time.Second)
56 | // 更新注册中心
57 | err1 := em.Update(ctx1, []*endpoints.UpdateWithOpts{
58 | {
59 | Update: endpoints.Update{
60 | Op: endpoints.Add,
61 | Key: key,
62 | Endpoint: endpoints.Endpoint{
63 | Addr: addr,
64 | Metadata: now.String(),
65 | },
66 | },
67 | },
68 | // Update的话,两条【key】不能相同
69 | //{
70 | // Update: endpoints.Update{
71 | // Op: endpoints.Add,
72 | // Key: key1,
73 | // Endpoint: endpoints.Endpoint{
74 | // Addr: addr,
75 | // Metadata: now.String(),
76 | // },
77 | // },
78 | //},
79 | })
80 | // 更新注册中心,AddEndpoint简化写法。INSERT OR UPDATE, SAVE 的语义
81 | //err1 := em.AddEndpoint(ctx, key, endpoints.Endpoint{
82 | // Addr: addr,
83 | // Metadata: now.String(),
84 | //})
85 | cancel1()
86 | if err1 != nil {
87 | t.Log(err1)
88 | }
89 | }
90 | }()
91 |
92 | // grpc服务端
93 | server := grpc.NewServer()
94 | RegisterUserServiceServer(server, &Service{})
95 | // 启动服务
96 | err = server.Serve(l)
97 | require.NoError(t, err)
98 |
99 | // 删除一个服务
100 | err = em.DeleteEndpoint(ctx, key)
101 | require.NoError(t, err)
102 |
103 | server.GracefulStop() // gRPC优雅关闭退出
104 | err = e.cli.Close() // 关闭etcd客户端
105 | require.NoError(t, err)
106 | }
107 |
108 | func TestEtcd(t *testing.T) {
109 | suite.Run(t, new(EtcdTestSuite))
110 | }
111 |
--------------------------------------------------------------------------------
/syncX/lock/redisLock/redsyncx/lock_cron_mysql/repository/cron.go:
--------------------------------------------------------------------------------
1 | package repository
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "gitee.com/hgg_test/pkg_tool/v2/sliceX"
7 | "gitee.com/hgg_test/pkg_tool/v2/syncX/lock/redisLock/redsyncx/lock_cron_mysql/domain"
8 | "gitee.com/hgg_test/pkg_tool/v2/syncX/lock/redisLock/redsyncx/lock_cron_mysql/repository/dao"
9 | )
10 |
11 | var (
12 | ErrDataRecordNotFound error = dao.ErrDataRecordNotFound
13 | ErrDuplicateData error = dao.ErrDuplicateData
14 | )
15 |
16 | type CronRepository interface {
17 | FindById(ctx context.Context, id int64) (domain.CronJob, error)
18 | FindAll(ctx context.Context) ([]domain.CronJob, error)
19 | CreateCron(ctx context.Context, job domain.CronJob) error
20 | CreateCrons(ctx context.Context, jobs []domain.CronJob) error
21 | DelCron(ctx context.Context, id int64) error
22 | DelCrons(ctx context.Context, ids []int64) error
23 | }
24 |
25 | type cronRepository struct {
26 | db dao.CronDb
27 | }
28 |
29 | func (c *cronRepository) FindById(ctx context.Context, id int64) (domain.CronJob, error) {
30 | cron, err := c.db.FindById(ctx, id)
31 | if err != nil {
32 | return domain.CronJob{}, err
33 | }
34 | return toDomain(cron), nil
35 | }
36 | func (c *cronRepository) FindAll(ctx context.Context) ([]domain.CronJob, error) {
37 | crons, err := c.db.FindAll(ctx)
38 | if err != nil {
39 | return []domain.CronJob{}, err
40 | }
41 | return sliceX.Map[dao.CronJob, domain.CronJob](crons, func(idx int, src dao.CronJob) domain.CronJob {
42 | return toDomain(src)
43 | }), err
44 | }
45 |
46 | func (c *cronRepository) CreateCron(ctx context.Context, job domain.CronJob) error {
47 | return c.db.Insert(ctx, toEntity(job))
48 | }
49 |
50 | func (c *cronRepository) CreateCrons(ctx context.Context, jobs []domain.CronJob) error {
51 | return c.db.Inserts(ctx, jobs)
52 | }
53 |
54 | func (c *cronRepository) DelCron(ctx context.Context, id int64) error {
55 | return c.db.Delete(ctx, id)
56 | }
57 |
58 | func (c *cronRepository) DelCrons(ctx context.Context, ids []int64) error {
59 | return c.db.Deletes(ctx, ids)
60 | }
61 |
62 | func toDomain(cron dao.CronJob) domain.CronJob {
63 | return domain.CronJob{
64 | ID: cron.ID,
65 | CronId: cron.CronId,
66 | Name: cron.Name,
67 | Description: cron.Description.String,
68 | CronExpr: cron.CronExpr,
69 | TaskType: domain.TaskType(cron.TaskType),
70 | Status: domain.JobStatus(cron.Status),
71 | MaxRetry: cron.MaxRetry,
72 | Timeout: cron.Timeout,
73 | Ctime: cron.Ctime,
74 | Utime: cron.Utime,
75 | }
76 | }
77 |
78 | func toEntity(cron domain.CronJob) dao.CronJob {
79 | return dao.CronJob{
80 | ID: cron.ID,
81 | CronId: cron.CronId,
82 | Name: cron.Name,
83 | Description: sql.NullString{
84 | String: cron.Description,
85 | Valid: cron.Description != "",
86 | },
87 | CronExpr: cron.CronExpr,
88 | TaskType: dao.TaskType(cron.TaskType),
89 | Status: dao.JobStatus(cron.Status),
90 | MaxRetry: cron.MaxRetry,
91 | Timeout: cron.Timeout,
92 | Ctime: cron.Ctime,
93 | Utime: cron.Utime,
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/serviceLogicX/rankingListX/rankingServiceX/rankingService.go:
--------------------------------------------------------------------------------
1 | package rankingServiceX
2 |
3 | import (
4 | "gitee.com/hgg_test/pkg_tool/v2/logx"
5 | "gitee.com/hgg_test/pkg_tool/v2/serviceLogicX/rankingListX/rankingServiceX/types"
6 | "gitee.com/hgg_test/pkg_tool/v2/sliceX/queueX"
7 | )
8 |
9 | // RankingServiceBatch 泛型榜单服务
10 | type RankingServiceBatch[T any] struct {
11 | batchSize int // 没批的数据批量大小
12 | topN int // 列表长度,前多少排名
13 | source func(offset, limit int) ([]T, error) // 批量数据源逻辑
14 | scoreProv types.ScoreProvider[T] // 得分提取器
15 | l logx.Loggerx
16 | }
17 |
18 | // NewRankingServiceBatch 创建泛型榜单服务
19 | // - topN 榜单长度,即统计榜单前xx的排名
20 | // - prov 获取得分的逻辑
21 | func NewRankingServiceBatch[T any](topN int, prov types.ScoreProvider[T], log logx.Loggerx) *RankingServiceBatch[T] {
22 | const maxTopN = 10000 // 列表长度,前多少排名,不可大于10000,一万
23 | if topN <= 0 {
24 | topN = 100
25 | }
26 | if topN > maxTopN {
27 | log.Error("topN is too large, 榜单长度不可大于10000,请设置合理值", logx.Int("topN", topN))
28 | topN = maxTopN
29 | }
30 | return &RankingServiceBatch[T]{
31 | batchSize: 100,
32 | topN: topN,
33 | scoreProv: prov,
34 | l: log,
35 | }
36 | }
37 |
38 | // SetBatchSize 设置批量数据源的批量大小
39 | func (r *RankingServiceBatch[T]) SetBatchSize(size int) {
40 | if size > 0 {
41 | r.batchSize = size
42 | }
43 | }
44 |
45 | // SetSource 设置批量数据源逻辑
46 | func (r *RankingServiceBatch[T]) SetSource(source func(offset, limit int) ([]T, error)) {
47 | r.source = source
48 | }
49 |
50 | // GetTopN 返回按得分从高到低排序的 Top-N 列表
51 | func (r *RankingServiceBatch[T]) GetTopN() []T {
52 | if r.source == nil {
53 | r.l.Error("source is nil,批量数据源未设置【server.SetSource】")
54 | return nil
55 | }
56 |
57 | pq := queueX.NewPriorityQueue(func(a, b T) bool {
58 | return r.scoreProv.Score(a) < r.scoreProv.Score(b)
59 | }, r.topN)
60 |
61 | offset := 0
62 | for {
63 | batch, err := r.source(offset, r.batchSize)
64 | if err != nil {
65 | r.l.Error("fetch batch error at offset, 在偏移处提取批次错误", logx.Int("offset", offset), logx.Error(err))
66 | break
67 | }
68 | if len(batch) == 0 {
69 | break
70 | }
71 |
72 | for _, item := range batch {
73 | if pq.Size() < r.topN {
74 | pq.Enqueue(item)
75 | } else if topItem, ok := pq.Peek(); ok {
76 | if r.scoreProv.Score(item) > r.scoreProv.Score(topItem) {
77 | pq.Dequeue()
78 | pq.Enqueue(item)
79 | }
80 | }
81 | }
82 |
83 | offset += len(batch)
84 | if len(batch) < r.batchSize {
85 | break
86 | }
87 | }
88 |
89 | result := make([]T, 0, pq.Size())
90 | for pq.Size() > 0 {
91 | if item, ok := pq.Dequeue(); ok {
92 | result = append(result, item)
93 | }
94 | }
95 |
96 | // 降序
97 | //sort.Slice(result, func(i, j int) bool {
98 | // return r.scoreProv.Score(result[i]) > r.scoreProv.Score(result[j])
99 | //})
100 | for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
101 | result[i], result[j] = result[j], result[i]
102 | }
103 | return result
104 | }
105 |
--------------------------------------------------------------------------------
/observationX/opentelemetryX/help_docx.txt:
--------------------------------------------------------------------------------
1 | // 需先全局注册,eg:
2 | 1、ioc里配置初始化
3 | package ioc
4 | import (
5 | "gitee.com/hgg_test/pkg_tool/v2/opentelemetryx"
6 | "go.opentelemetry.io/otel/exporters/zipkin"
7 | )
8 | func InitOTEL() opentelemetryx.CtxFn {
9 | // 使用zipkin的实现trace.SpanExporter
10 | exporter, err := zipkin.New("http://localhost:9411/api/v2/spans") // zipkin exporter
11 | if err != nil {
12 | panic(err)
13 | }
14 |
15 | ct, err := opentelemetryx.NewOtelStr(opentelemetryx.SvcInfo{ServiceName: "hgg_XiaoWeiShu", ServiceVersion: "v0.0.1"}, exporter)
16 | if err != nil {
17 | panic(err)
18 | }
19 | return ct
20 | }
21 |
22 | 2、main方法里配置初始化以及defer住
23 | package main
24 |
25 | tpCancel := ioc.InitOTEL() // 初始化open telemetry 【otle】
26 | defer func() {
27 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
28 | defer cancel()
29 | tpCancel(ctx)
30 | }()
31 |
32 |
33 | 3.1、=============================业务中接入=============================
34 | 3.1、=============================业务中接入=============================
35 | 3.1、=============================业务中接入=============================
36 | // InitOTELTracer 初始化 trace,需先初始化上面的全局
37 | func InitOTELTracer() trace.Tracer {
38 | otr := opentelemetryx.NewOtelTracerStr()
39 | tracer := otr.NewTracer("gitee.com/hgg_test/jksj-study/opentelemetry")
40 | return tracer
41 | }
42 |
43 | type userSvc struct {
44 | repo repository.UserRepo
45 | tracer trace.Tracer // 注入trace.Tracer
46 | }
47 |
48 | func NewUserSvc(repo repository.UserRepo, tracer trace.Tracer) UserSvc { // 构造方法传入trace.Tracer
49 | return &userSvc{
50 | repo: repo,
51 | tracer: tracer,
52 | }
53 | }
54 |
55 | func (u *userSvc) Profile(ctx context.Context, uid int64) (domain.User, error) {
56 | // ================业务中使用================
57 | ctx, span := u.tracer.Start(ctx, "UserProfile")
58 | defer span.End()
59 | span.AddEvent("IsServiceProfileInterface") // 添加事件,发生了某事
60 | span.SetAttributes(attribute.String("uid", strconv.Itoa(int(uid)))) // 添加属性, 强调在上下文里面有什么数据
61 | // ================业务中使用================
62 |
63 | return u.repo.FindById(ctx, uid)
64 | }
65 |
66 |
67 |
68 |
69 |
70 | 3.2、=============================gin中接入=============================
71 | 3.2、=============================gin中接入=============================
72 | 3.2、=============================gin中接入=============================
73 | // openTelemetry提供有gin的接入,直接在gin的middleware中使用即可
74 | // gin.HandlerFunc中接入openTelemetry,链路追踪
75 | otelgin.Middleware("hgg_XiaoWeiShu"),
76 |
77 |
78 |
79 |
80 | 3.3、=============================gorm中接入=============================
81 | 3.3、=============================gorm中接入=============================
82 | 3.3、=============================gorm中接入=============================
83 | // openTelemetry提供有gorm的接入,直接在gorm的注册use接入使用即可
84 | // ==================================GORM自己带的openTelemetry链路追踪==================================
85 | db, err := gorm.Open
86 | err = db.Use(tracing.NewPlugin(tracing.WithoutMetrics()))
87 | if err != nil {
88 | panic(err)
89 | }
90 |
91 |
92 |
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/dbMovex/myMovex/messageQueue/consumerx/consumer.go:
--------------------------------------------------------------------------------
1 | package consumerx
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | events2 "gitee.com/hgg_test/pkg_tool/v2/DBx/mysqlX/gormx/dbMovex/myMovex/events"
7 | "gitee.com/hgg_test/pkg_tool/v2/channelx/mqX"
8 | "gitee.com/hgg_test/pkg_tool/v2/channelx/mqX/kafkaX/saramaX/consumerX"
9 | "gitee.com/hgg_test/pkg_tool/v2/logx"
10 | "github.com/IBM/sarama"
11 | "gorm.io/gorm"
12 | "log"
13 | )
14 |
15 | /*
16 | ==============================
17 | 利用kafka消息队列,削峰、解耦,消费者处理不一致数据
18 | ==============================
19 | */
20 |
21 | type ConsumerConf struct {
22 | Addr []string
23 | GroupId string
24 | SaramaConf *sarama.Config
25 | }
26 |
27 | type DbConf struct {
28 | SrcDb *gorm.DB
29 | DstDb *gorm.DB
30 | }
31 |
32 | type Consumer[T events2.InconsistentEvent] struct {
33 | ConsumerConfig ConsumerConf
34 | DbConfig DbConf
35 | //Fn func(msg *sarama.ConsumerMessage, event events.InconsistentEvent) error
36 | L logx.Loggerx
37 | //ConsumerIn messageQueuex.ConsumerIn
38 | Consumer mqX.Consumer
39 | }
40 |
41 | func NewConsumer[T events2.InconsistentEvent](consumerConf ConsumerConf, dbConfig DbConf, l logx.Loggerx) *Consumer[T] {
42 | c := &Consumer[T]{
43 | ConsumerConfig: consumerConf,
44 | DbConfig: dbConfig,
45 | L: l,
46 | }
47 | //c.SetFn(c.fn()) // 业务逻辑函数初始化
48 | //c.ConsumerIn = saramaConsumerx.NewConsumerIn(c.newConsumerGroup(), c.newConsumerGroupHandler())
49 | consumerCg, err := sarama.NewConsumerGroup(c.ConsumerConfig.Addr, c.ConsumerConfig.GroupId, c.ConsumerConfig.SaramaConf)
50 | if err != nil {
51 | c.L.Error("new consumer group error", logx.Error(err))
52 | }
53 | c.Consumer = consumerX.NewKafkaConsumer(consumerCg, &consumerX.ConsumerConfig{
54 | BatchSize: 0,
55 | BatchTimeout: 0,
56 | })
57 | return c
58 | }
59 |
60 | func (c *Consumer[T]) InitConsumer(ctx context.Context, topic string) error {
61 | //return c.ConsumerIn.ReceiveMessage(ctx, []messageQueuex.Tp{{Topic: topic}})
62 | return c.Consumer.Subscribe(ctx, []string{topic}, newFn(&c.DbConfig, c.L))
63 | }
64 |
65 | type fn struct {
66 | db *DbConf
67 | l logx.Loggerx
68 | }
69 |
70 | func newFn(db *DbConf, l logx.Loggerx) *fn {
71 | return &fn{db: db, l: l}
72 | }
73 |
74 | func (f *fn) IsBatch() bool {
75 | return false
76 | }
77 |
78 | func (f *fn) Handle(ctx context.Context, msg *mqX.Message) error {
79 | log.Println("receive message")
80 | f.l.Info("receive message", logx.Any("msg: ", msg))
81 | ov, err := NewOverrideFixer[events2.TestUser](f.db.SrcDb, f.db.DstDb)
82 | if err != nil {
83 | //panic(err)
84 | return err
85 | }
86 | var event events2.InconsistentEvent
87 | err = json.Unmarshal(msg.Value, &event)
88 | if err != nil {
89 | return err
90 | }
91 | // 修复数据
92 | err = ov.Fix(context.Background(), event.ID)
93 | f.l.Info("receive message success, 消费不一致数据", logx.Int64("value_id: ", event.ID), logx.Any("event: ", event))
94 | return nil
95 | }
96 |
97 | func (f *fn) HandleBatch(ctx context.Context, msgs []*mqX.Message) (success bool, err error) {
98 | return true, nil
99 | }
100 |
--------------------------------------------------------------------------------
/rpc/grpcx/limiter/slidingWindow/slidingWindow.go:
--------------------------------------------------------------------------------
1 | package slidingWindow
2 |
3 | import (
4 | "context"
5 | "gitee.com/hgg_test/pkg_tool/v2/sliceX/queueX"
6 | "google.golang.org/grpc"
7 | "google.golang.org/grpc/codes"
8 | "google.golang.org/grpc/status"
9 | "sync"
10 | "time"
11 | )
12 |
13 | // SlidingWindowLimiter 滑动窗口限流器
14 | type SlidingWindowLimiter struct {
15 | window time.Duration // 统计窗口大小(如1分钟)
16 | threshold int // 窗口内允许的最大请求数(阈值)
17 | queue *queueX.PriorityQueue[time.Time] // 存储请求时间戳的最小堆(队首是最早请求)
18 | lock sync.Mutex // 限流器全局锁(保护队列操作)
19 | }
20 |
21 | // NewSlidingWindowLimiter 构造函数
22 | // - window: 窗口时长(如time.Second)
23 | // - threshold: 窗口内最大请求数(如100)
24 | //
25 | // 每1秒最多有100个请求/秒,即1秒内最多100个请求
26 | func NewSlidingWindowLimiter(window time.Duration, threshold int) *SlidingWindowLimiter {
27 | // 初始化最小堆:比较函数用time.Time.Before(早的时间戳更小,队首是最旧请求)
28 | return &SlidingWindowLimiter{
29 | window: window,
30 | threshold: threshold,
31 | queue: queueX.NewPriorityQueue[time.Time](func(a, b time.Time) bool { return a.Before(b) }, 0),
32 | }
33 | }
34 |
35 | // BuildServerInterceptor
36 | // - 构建gRPC服务端拦截器
37 | // - 构建gRPC UnaryServerInterceptor
38 | func (c *SlidingWindowLimiter) BuildServerInterceptor() grpc.UnaryServerInterceptor {
39 | return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
40 | if !c.Allow() {
41 | // 限流:返回资源耗尽错误【返回使用status.Errorf方便日志捕捉】
42 | return nil, status.Errorf(codes.ResourceExhausted, "【hgg: rpc触发限流】超出窗口最大请求数量%d", c.threshold)
43 | }
44 | // 允许请求:调用后续处理逻辑
45 | return handler(ctx, req)
46 | }
47 | }
48 |
49 | // Allow
50 | // - 检查是否允许通过请求
51 | // - 判断是否允许当前请求(核心逻辑)
52 | func (c *SlidingWindowLimiter) Allow() bool {
53 | c.lock.Lock()
54 | defer c.lock.Unlock()
55 |
56 | now := time.Now()
57 | windowStart := now.Add(-c.window) // 窗口左边界(当前时间往前推window)
58 |
59 | // 1. 清理过期请求:删除队列中所有早于windowStart的时间戳
60 | c.removeExpired(windowStart)
61 | //for {
62 | // peekTime, ok := c.queue.Peek()
63 | // if !ok || !peekTime.Before(windowStart) {
64 | // break // 队首已处于窗口内,无需继续清理
65 | // }
66 | // c.queue.Pop() // 移除过期请求
67 | //}
68 |
69 | // 2. 检查是否超过阈值
70 | if c.queue.Size() >= c.threshold {
71 | return false // 超过阈值,不允许
72 | }
73 |
74 | // 3. 记录当前请求时间戳
75 | //c.queue.Push(now)
76 | c.queue.Enqueue(now)
77 | return true // 允许请求
78 | }
79 |
80 | // removeExpired 移除过期的请求时间戳
81 | func (c *SlidingWindowLimiter) removeExpired(windowStart time.Time) {
82 | // 持续移除窗口开始时间之前的请求
83 | // 清理过期请求:删除队列中所有早于windowStart的时间戳
84 | for {
85 | peekTime, ok := c.queue.Peek()
86 | if !ok || !peekTime.Before(windowStart) {
87 | break // 队首已处于窗口内,无需继续清理
88 | }
89 | //c.queue.Pop() // 移除过期请求
90 | c.queue.Dequeue() // 移除过期请求
91 | }
92 | }
93 |
94 | // GetCurrentCount 获取当前队列长度(主要用于测试)
95 | func (c *SlidingWindowLimiter) GetCurrentCount() int {
96 | c.lock.Lock()
97 | defer c.lock.Unlock()
98 |
99 | now := time.Now()
100 | windowStart := now.Add(-c.window)
101 | c.removeExpired(windowStart)
102 |
103 | return c.queue.Len()
104 | }
105 |
--------------------------------------------------------------------------------
/rpc/grpcx/limiter/leakyBucket/leakyBucket.go:
--------------------------------------------------------------------------------
1 | package leakyBucket
2 |
3 | import (
4 | "context"
5 | "sync"
6 | "time"
7 |
8 | "google.golang.org/grpc"
9 | "google.golang.org/grpc/codes"
10 | "google.golang.org/grpc/status"
11 | )
12 |
13 | // LeakyBucketLimiter 漏桶限流算法
14 | // 漏桶算法特点:
15 | // 1. 固定容量的桶
16 | // 2. 以恒定速率处理请求(漏水)
17 | // 3. 当桶满时,新请求被拒绝(溢出)
18 | type LeakyBucketLimiter struct {
19 | capacity int // 桶容量
20 | rate time.Duration // 漏水速率(产生令牌的间隔)
21 | bucket chan struct{} // 漏桶,缓冲channel模拟桶容量
22 | closeCh chan struct{} // 关闭信号
23 | closeOnce sync.Once
24 | wg sync.WaitGroup // 等待goroutine退出
25 | }
26 |
27 | // NewLeakyBucketLimiter 创建漏桶限流器
28 | // capacity: 桶容量,最多可累积的请求数
29 | // rate: 漏水速率,即处理请求的最小间隔时间
30 | func NewLeakyBucketLimiter(capacity int, rate time.Duration) *LeakyBucketLimiter {
31 | if capacity <= 0 {
32 | capacity = 1
33 | }
34 | if rate <= 0 {
35 | rate = time.Millisecond * 100
36 | }
37 |
38 | limiter := &LeakyBucketLimiter{
39 | capacity: capacity,
40 | rate: rate,
41 | bucket: make(chan struct{}, capacity),
42 | closeCh: make(chan struct{}),
43 | }
44 |
45 | // 启动漏水(令牌生成)goroutine
46 | limiter.wg.Add(1)
47 | go limiter.leakWater()
48 |
49 | return limiter
50 | }
51 |
52 | // leakWater 漏水过程:以固定速率向桶中添加令牌
53 | func (l *LeakyBucketLimiter) leakWater() {
54 | defer l.wg.Done()
55 |
56 | ticker := time.NewTicker(l.rate)
57 | defer ticker.Stop()
58 |
59 | for {
60 | select {
61 | case <-ticker.C:
62 | // 尝试向桶中添加一个令牌(非阻塞)
63 | select {
64 | case l.bucket <- struct{}{}:
65 | // 成功添加令牌
66 | default:
67 | // 桶已满,丢弃令牌(正常情况)
68 | }
69 | case <-l.closeCh:
70 | // 收到关闭信号,退出goroutine
71 | return
72 | }
73 | }
74 | }
75 |
76 | // BuildServerInterceptor 构建gRPC服务端拦截器
77 | func (l *LeakyBucketLimiter) BuildServerInterceptor() grpc.UnaryServerInterceptor {
78 | return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
79 | select {
80 | case <-l.bucket:
81 | // 成功从桶中取出一个令牌(漏水),允许处理请求
82 | return handler(ctx, req)
83 | case <-ctx.Done():
84 | // 请求上下文已取消
85 | return nil, ctx.Err()
86 | default:
87 | // 桶为空,触发限流
88 | return nil, status.Errorf(codes.ResourceExhausted,
89 | "漏桶限流:桶容量 %d,漏水速率 %v", l.capacity, l.rate)
90 | }
91 | }
92 | }
93 |
94 | // Close 关闭限流器,释放资源
95 | func (l *LeakyBucketLimiter) Close() error {
96 | l.closeOnce.Do(func() {
97 | close(l.closeCh)
98 | l.wg.Wait() // 等待漏水goroutine退出
99 | close(l.bucket)
100 | })
101 | return nil
102 | }
103 |
104 | // Allow 检查是否允许请求通过(可用于非gRPC场景)
105 | func (l *LeakyBucketLimiter) Allow(ctx context.Context) bool {
106 | select {
107 | case <-l.bucket:
108 | return true
109 | case <-ctx.Done():
110 | return false
111 | default:
112 | return false
113 | }
114 | }
115 |
116 | // AllowWithTimeout 带超时的检查
117 | func (l *LeakyBucketLimiter) AllowWithTimeout(timeout time.Duration) bool {
118 | if timeout <= 0 {
119 | return l.Allow(context.Background())
120 | }
121 |
122 | ctx, cancel := context.WithTimeout(context.Background(), timeout)
123 | defer cancel()
124 |
125 | return l.Allow(ctx)
126 | }
127 |
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/dbPrometheusx/prometheusGormBuild.go:
--------------------------------------------------------------------------------
1 | package dbPrometheusx
2 |
3 | import (
4 | "github.com/prometheus/client_golang/prometheus"
5 | "gorm.io/gorm"
6 | "time"
7 | )
8 |
9 | // Callbacks sql语句执行时间
10 | type Callbacks struct {
11 | vector *prometheus.SummaryVec
12 | }
13 |
14 | type PrometheusSummaryOpts prometheus.SummaryOpts
15 |
16 | func NewCallbacks(PrometheusConf PrometheusSummaryOpts) *Callbacks {
17 | vector := prometheus.NewSummaryVec(prometheus.SummaryOpts(PrometheusConf), []string{"type", "table"})
18 | prometheus.MustRegister(vector)
19 | return &Callbacks{
20 | vector: vector,
21 | }
22 | }
23 |
24 | func (c *Callbacks) Before() func(db *gorm.DB) {
25 | return func(db *gorm.DB) {
26 | start := time.Now()
27 | db.Set("start_time", start) // 设置一个key为start_time,值为start的变量
28 | }
29 | }
30 | func (c *Callbacks) After(typ string) func(db *gorm.DB) {
31 | return func(db *gorm.DB) {
32 | val, _ := db.Get("start_time") // 获取key为start_time的变量
33 | start, ok := val.(time.Time) // 将val转换为time.Time类型【断言】
34 | if ok {
35 | duration := time.Since(start).Milliseconds() // 计算时间差
36 | c.vector.WithLabelValues(typ, db.Statement.Table).Observe(float64(duration)) // 将时间差放入prometheus
37 | }
38 | }
39 | }
40 |
41 | func (c *Callbacks) Name() string {
42 | return "gormDbPrometheus"
43 | }
44 |
45 | func (c *Callbacks) Initialize(db *gorm.DB) error {
46 | err := db.Callback().Create().Before("*").Register("prometheus_gorm_create_before", c.Before())
47 | if err != nil {
48 | return err
49 | }
50 | err = db.Callback().Create().After("*").Register("prometheus_gorm_create_after", c.After("CREATE"))
51 | if err != nil {
52 | return err
53 | }
54 |
55 | err = db.Callback().Query().Before("*").Register("prometheus_gorm_query_before", c.Before())
56 | if err != nil {
57 | return err
58 | }
59 | err = db.Callback().Query().After("*").Register("prometheus_gorm_query_after", c.After("QUERY"))
60 | if err != nil {
61 | return err
62 | }
63 |
64 | err = db.Callback().Raw().Before("*").Register("prometheus_gorm_raw_before", c.Before())
65 | if err != nil {
66 | return err
67 | }
68 | err = db.Callback().Raw().After("*").Register("prometheus_gorm_raw_after", c.After("RAW"))
69 | if err != nil {
70 | return err
71 | }
72 |
73 | err = db.Callback().Update().Before("*").Register("prometheus_gorm_update_before", c.Before())
74 | if err != nil {
75 | return err
76 | }
77 | err = db.Callback().Update().After("*").Register("prometheus_gorm_update_after", c.After("UPDATE"))
78 | if err != nil {
79 | return err
80 | }
81 |
82 | err = db.Callback().Delete().Before("*").Register("prometheus_gorm_delete_before", c.Before())
83 | if err != nil {
84 | return err
85 | }
86 | err = db.Callback().Delete().After("*").Register("prometheus_gorm_delete_after", c.After("DELETE"))
87 | if err != nil {
88 | return err
89 | }
90 |
91 | err = db.Callback().Row().Before("*").Register("prometheus_gorm_row_before", c.Before())
92 | if err != nil {
93 | return err
94 | }
95 | err = db.Callback().Row().After("*").Register("prometheus_gorm_row_after", c.After("ROW"))
96 | //if err != nil {
97 | // return err
98 | //}
99 | return err
100 | }
101 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # pkg_tool
2 |
3 | 这是一个功能丰富的工具包项目,包含了多种工具模块,适用于不同的开发需求。
4 |
5 | ## 功能特性
6 |
7 | - **Web 中间件**:提供 Gin 框架的中间件支持,包括 JWT 认证、限流、日志记录等。
8 | - **Rpc 中间件**:多种限流算法【滑动窗口、计数器、令牌桶等】、负载均衡算法、熔断拦截器、可观测性平台。
9 | - **榜单模型计算**:提供多种基于高性能榜单服务模型计算封装。
10 | - **定时任务的调度机制**:二次封装定时任务调度机制,可控制主机节点负载进行任务调度以及候选等实现。
11 | - **数据库迁移工具**:支持数据库的双写池、迁移调度器、数据校验等功能。
12 | - **限流与锁**:实现滑动窗口限流和 Redis 分布式锁。
13 | - **消息队列**:消息队列顶级接口封装,支持 Kafka 的生产者和消费者实现。
14 | - **配置管理**:提供基于 Viper 的配置管理接口。
15 | - **类型转换**:提供多种类型转换工具函数。
16 | - **日志与监控**:支持多种日志框架和 Prometheus 监控。
17 |
18 |
19 | ## 安装
20 |
21 | 确保你已经安装了 Go 环境,然后使用以下命令获取项目:
22 |
23 | ```bash
24 | go get gitee.com/hgg_test/pkg_tool/v2@latest
25 | ```
26 |
27 | ## 使用示例
28 |
29 | ### 缓存计数服务
30 |
31 | ```go
32 | redisClient := redis.NewClient(&redis.Options{
33 | Addr: "localhost:6379",
34 | })
35 |
36 | localCache := cacheLocalx.NewCacheLocalRistrettoStr[string, string](ristretto.NewCache[string, string]())
37 |
38 | countService := cacheCountServicex.NewCount[string, string](redisClient, localCache)
39 | ```
40 |
41 | ### 数据库迁移
42 |
43 | ```go
44 | srcDB, _ := gorm.Open(mysql.Open("user:pass@tcp(localhost:3306)/src_db"), &gorm.Config{})
45 | dstDB, _ := gorm.Open(mysql.Open("user:pass@tcp(localhost:3306)/dst_db"), &gorm.Config{})
46 |
47 | doubleWritePool := dbMovex.NewDoubleWritePool(srcDB, dstDB, logger, config...)
48 | ```
49 |
50 | ### 消息队列生产者
51 |
52 | ```go
53 | config := sarama.NewConfig()
54 | config.Producer.Return.Successes = true
55 |
56 | producer, _ := sarama.NewSyncProducer([]string{"localhost:9092"}, config)
57 | messageProducer := saramaProducerx.NewSaramaProducerStr[sarama.SyncProducer](producer, config)
58 | ```
59 |
60 | ### 消息队列消费者
61 |
62 | ```go
63 | consumerGroup, _ := sarama.NewConsumerGroup([]string{"localhost:9092"}, "group_id", sarama.NewConfig())
64 | consumer := saramaConsumerx.NewConsumerIn(consumerGroup, handler)
65 | ```
66 |
67 | ### 配置管理
68 |
69 | ```go
70 | viperConfig := viper.New()
71 | viperConfig.SetDefault("mysql.dsn", "user:pass@tcp(localhost:3306)/dbname")
72 |
73 | configService := viperx.NewViperConfigStr()
74 | configService.InitViperLocal("config.yaml", DefaultConfig{})
75 | ```
76 |
77 | ### 类型转换
78 |
79 | ```go
80 | intValue, ok := toanyx.ToAny[int](someValue)
81 | stringValue, ok := toanyx.ToAny[string](someValue)
82 | ```
83 |
84 | ### 限流
85 |
86 | ```go
87 | redisClient := redis.NewClient(&redis.Options{
88 | Addr: "localhost:6379",
89 | })
90 |
91 | limiter := redis_slide_window.NewRedisSlideWindowKLimiter(redisClient, time.Minute, 100)
92 | ```
93 |
94 | ### 分布式锁
95 |
96 | ```go
97 | redisClients := []*redis.Client{
98 | redis.NewClient(&redis.Options{Addr: "localhost:6379"}),
99 | }
100 |
101 | lock := redsyncx.NewLockRedsync(redisClients, logger, redsyncx.Config{})
102 | ```
103 |
104 | ### 日志记录
105 |
106 | ```go
107 | zapLogger, _ := zap.NewProduction()
108 | logger := zaplogx.NewZapLogger(zapLogger)
109 | ```
110 |
111 | ### Web 中间件
112 |
113 | ```go
114 | r := gin.Default()
115 |
116 | jwtMiddleware := jwtx.NewJwtxMiddlewareGinx(redisClient, &jwtx.JwtxMiddlewareGinxConfig{})
117 | r.Use(jwtMiddleware.VerifyToken)
118 | ```
119 |
120 | ## 贡献
121 |
122 | 欢迎贡献代码和提出问题。请先阅读 [贡献指南](CONTRIBUTING.md)。
123 |
124 | ## 许可证
125 |
126 | 该项目使用 MIT 许可证。详情请查看 [LICENSE](LICENSE) 文件。
--------------------------------------------------------------------------------
/observationX/prometheusX/prometheus.go:
--------------------------------------------------------------------------------
1 | package prometheusX
2 |
3 | import (
4 | "github.com/prometheus/client_golang/prometheus"
5 | "github.com/prometheus/client_golang/prometheus/promhttp"
6 | "net/http"
7 | )
8 |
9 | func InitPrometheus(addr string) {
10 | http.Handle("/metrics", promhttp.Handler())
11 | err := http.ListenAndServe(addr, nil)
12 | if err != nil {
13 | panic(err)
14 | }
15 | }
16 |
17 | // PrometheusCounter 计数器
18 | func PrometheusCounter() {
19 | // 创建计数器
20 | counter := prometheus.NewCounter(prometheus.CounterOpts{
21 | Namespace: "hgg", // 命名空间
22 | Subsystem: "hgg_XiaoWeiShu", // 子系统
23 | Name: "hgg_counter", // 名称
24 | })
25 | // 注册
26 | prometheus.MustRegister(counter)
27 | // +1,默认为0
28 | counter.Inc()
29 | // 必须是正数,不能小于0
30 | counter.Add(10.2)
31 | }
32 |
33 | // PrometheusGauge 仪表
34 | func PrometheusGauge() {
35 | gauge := prometheus.NewGauge(prometheus.GaugeOpts{
36 | Namespace: "hgg",
37 | Subsystem: "hgg_XiaoWeiShu",
38 | Name: "hgg_gauge",
39 | })
40 | prometheus.MustRegister(gauge)
41 | // 设置 gauge 值
42 | gauge.Set(12)
43 | // gauge Add
44 | gauge.Add(10.2)
45 | gauge.Add(-3)
46 | gauge.Sub(3)
47 | }
48 |
49 | // PrometheusHistogram 直方图
50 | func PrometheusHistogram() {
51 | histogram := prometheus.NewHistogram(prometheus.HistogramOpts{
52 | Namespace: "hgg",
53 | Subsystem: "hgg_XiaoWeiShu",
54 | Name: "hgg_histogram",
55 | // 按照这个分桶
56 | Buckets: []float64{10, 50, 100, 500, 1000, 10000},
57 | })
58 | prometheus.MustRegister(histogram)
59 | // 观测,12.4是观测的值
60 | histogram.Observe(12.4)
61 | }
62 |
63 | // PrometheusSummary 概要、总结
64 | func PrometheusSummary() {
65 | summary := prometheus.NewSummary(prometheus.SummaryOpts{
66 | Namespace: "hgg",
67 | Subsystem: "hgg_XiaoWeiShu",
68 | Name: "hgg_summary",
69 | Objectives: map[float64]float64{
70 | 0.5: 0.01, // 以响应时间为例:50%的观测值响应时间,在0.01的百分比内【误差在 %1】
71 | 0.75: 0.01, // 以响应时间为例:75%的观测值响应时间,在0.01的百分比内【误差在 %1】
72 | 0.90: 0.005, // 以响应时间为例:90%的观测值响应时间,在0.005的百分比内【误差在 %0.5】
73 | 0.98: 0.002, // 以响应时间为例:98%的观测值响应时间,在0.002的百分比内【误差在 %0.2】
74 | 0.99: 0.001, // 以响应时间为例:99%的观测值响应时间,在0.001的百分比内【误差在 %0.1】
75 | 0.999: 0.0001, // 以响应时间为例:99.9%的观测值响应时间,在0.0001的百分比内【误差在 %0.01】
76 | },
77 | })
78 | prometheus.MustRegister(summary)
79 | // 观测
80 | // Observe 12.3是观测的值,就是响应时间有多少在哪一个区间内
81 | // eg: 百分之99的请求,在12.3毫秒内完成
82 | summary.Observe(12.3)
83 | }
84 |
85 | // PrometheusVector 向量
86 | // - 实践中,我们采集的指标很有可能是根据一些业务特征来统计的,比如说分开统计HTTP响应码是2XX的以及非2XX的。
87 | // - 在这种情况下,可以考虑使用 Prometheus 中的Vector 用法。
88 | func PrometheusVector() {
89 | labelNames := []string{"pattern", "method", "status"}
90 | opts := prometheus.SummaryOpts{
91 | Namespace: "hgg",
92 | Subsystem: "hgg_XiaoWeiShu",
93 | Name: "hgg_summaryVector",
94 | ConstLabels: map[string]string{
95 | "server": "localhost:9091",
96 | "evn": "test",
97 | "appName": "hgg_XiaoWeiShu",
98 | },
99 | Help: "The statics info for http request",
100 | }
101 |
102 | // labelNames设置观测哪些值, summaryVector.WithLabelValues()就是观测的值
103 | summaryVector := prometheus.NewSummaryVec(opts, labelNames)
104 | // 所以最后一个 Observe 方法,可以看成是当次请求 pattern = /profile, method = get 和 status = 200 的时候,响应时间是128单位是毫秒的都是被统计的+1。
105 | summaryVector.WithLabelValues("/profile", "GET", "200").Observe(128)
106 | }
107 |
--------------------------------------------------------------------------------
/configx/types.go:
--------------------------------------------------------------------------------
1 | package configx
2 |
3 | import (
4 | "time"
5 | )
6 |
7 | // ConfigIn 抽象配置服务接口
8 | type ConfigIn interface {
9 | /*
10 | ====================================================
11 | 此部分为初始化配置服务接口
12 | ====================================================
13 | */
14 | // GetViper 获取viper的实例【仅用于整个项目单个配置文件】
15 | //GetViper() *viper.Viper
16 |
17 | // GetNamedViper 获取指定名称的viper实例【用于配置多个配置文件时使用】
18 | //GetNamedViper(name string) (*viper.Viper, error)
19 |
20 | // InitViperLocal 配置单个文件
21 | // - filePath是文件路径 精确到文件名,如:config/dev.yaml
22 | // - defaultConfig是默认配置项【viper.SetDefault("mysql.dsn", "root:root@tcp(localhost:3306)/webook")】
23 | InitViperLocal(filePath string, defaultConfig ...DefaultConfig) error
24 |
25 | // InitViperLocals 配置多个文件
26 | // - 读取多个配置文件,fileName是文件名 精确文件名不带后缀,fileType是文件得类型eg: yaml、json....,filePath是文件路径 精确到文件夹名,
27 | // - defaultConfig是默认配置项【viper.SetDefault("mysql.dsn", "root:root@tcp(localhost:3306)/webook")】
28 | InitViperLocals(fileName, fileType, filePath string, defaultConfig ...DefaultConfig) error
29 |
30 | // InitViperLocalWatch 配置单个本地文件并监听文件变化
31 | // - filePath是文件路径 精确到文件名,如:config/dev.yaml
32 | // - defaultConfig是默认配置项【viper.SetDefault("mysql.dsn", "root:root@tcp(localhost:3306)/webook")】
33 | InitViperLocalWatch(filePath string, defaultConfig ...DefaultConfig) error
34 |
35 | // InitViperLocalsWatchs 配置多个本地文件并监听文件变化
36 | // - filePath是文件路径 精确到文件名,如:config/dev.yaml
37 | // - defaultConfig是默认配置项【viper.SetDefault("mysql.dsn", "root:root@tcp(localhost:3306)/webook")】
38 | InitViperLocalsWatchs(fileName, fileType, filePath string, defaultConfig ...DefaultConfig) error
39 |
40 | // InitViperRemote 配置远程文件
41 | // - provider 是远程配置的提供者,这里使用的是etcd3
42 | // - endpoint 是远程配置的访问地址
43 | // - path 是远程配置的存储路径
44 | InitViperRemote(provider, endpoint, path string) error
45 |
46 | // InitViperRemoteWatch 配置远程文件并监听文件变化
47 | // - provider 是远程配置的提供者,这里使用的是etcd3
48 | // - endpoint 是远程配置的访问地址
49 | // - path 是远程配置的存储路径
50 | // - interval 是远程配置的监听间隔频率【几秒监听一次...】
51 | InitViperRemoteWatch(provider, endpoint, path string) error
52 |
53 | // SetInterval 设置远程配置的监听间隔频率【几秒监听一次...】
54 | // - t 是远程配置的监听间隔频率【几秒监听一次...】
55 | SetInterval(t time.Duration)
56 |
57 | /*
58 | ====================================================
59 | 此部分为读取读取配置文件接口
60 | ====================================================
61 | */
62 | // Get 获取配置项【当整个项目读取Init一个配置文件,fileName文件名留空,但整个项目读取Init多个配置文件,需传入文件名eg: db.yaml】
63 | // - - 注意=============注意=============注意=============
64 | // - 新版本从【configx.Get】单独读取配置文件
65 | // - 注意=============注意=============注意=============
66 | Get(key string, fileName ...string) any
67 | GetUnmarshalKey(key string, rawVal any, fileName ...string) error
68 | }
69 |
70 | type DefaultConfig struct {
71 | Key string
72 | Val any
73 | }
74 |
75 | //func InitConfigViper() configx.ViperConfigIn {
76 | // conf := configx.NewViperConfigStr(viper.New())
77 | // err := conf.InitViperLocalWatch("./config/dev.yaml",
78 | // // 默认配置,当配置文件读取失败时使用
79 | // configx.DefaultConfig{
80 | // Key: "mysql.dsn",
81 | // Val: "root:root@tcp(localhost:3306)/hgg",
82 | // },
83 | // // 默认配置,当配置文件读取失败时使用
84 | // configx.DefaultConfig{
85 | // Key: "redis.addr",
86 | // Val: "localhost:6379",
87 | // },
88 | // )
89 | //
90 | // if err != nil {
91 | // panic(err)
92 | // }
93 | // return conf
94 | //}
95 |
96 | /*
97 | 如果配置多个配置文件
98 | 参考测试用例
99 | }
100 | */
101 |
--------------------------------------------------------------------------------
/syncX/lock/redisLock/redsyncx/redsync1_test.go:
--------------------------------------------------------------------------------
1 | package redsyncx
2 |
3 | import (
4 | "context"
5 | "gitee.com/hgg_test/pkg_tool/v2/logx/zerologx"
6 | "github.com/redis/go-redis/v9"
7 | "github.com/robfig/cron/v3"
8 | "github.com/rs/zerolog"
9 | "github.com/stretchr/testify/assert"
10 | "log"
11 | "os"
12 | "testing"
13 | "time"
14 | )
15 |
16 | func TestNewResSyncStr1(t *testing.T) {
17 | var clis []*redis.Client
18 | // 创建 Redis 客户端
19 | client := redis.NewClient(&redis.Options{
20 | Addr: "localhost:6379",
21 | })
22 | clis = append(clis, client)
23 | client1 := redis.NewClient(&redis.Options{
24 | Addr: "localhost:6380",
25 | })
26 | clis = append(clis, client1)
27 | client2 := redis.NewClient(&redis.Options{
28 | Addr: "localhost:6380",
29 | })
30 | clis = append(clis, client2)
31 |
32 | // 测试 Redis 连接
33 | if err := client.Ping(context.Background()).Err(); err != nil {
34 | assert.NoError(t, err)
35 | log.Fatal("Redis连接失败:", err)
36 | }
37 | // 测试 Redis 连接
38 | if err := client1.Ping(context.Background()).Err(); err != nil {
39 | assert.NoError(t, err)
40 | log.Fatal("Redis1连接失败:", err)
41 | }
42 | // 测试 Redis 连接
43 | if err := client2.Ping(context.Background()).Err(); err != nil {
44 | assert.NoError(t, err)
45 | log.Fatal("Redis1连接失败:", err)
46 | }
47 |
48 | // =========================
49 | // =========================
50 | // =========================
51 |
52 | // 创建日志器
53 | logger := zerolog.New(os.Stdout).Level(zerolog.DebugLevel)
54 | zlog := zerologx.NewZeroLogger(&logger)
55 |
56 | // 创建分布式锁配置
57 | config := Config{
58 | LockName: "test-lock",
59 | Expiry: 10 * time.Second,
60 | RetryDelay: 1 * time.Second,
61 | MaxRetries: 2,
62 | }
63 |
64 | // 创建分布式锁实例
65 | dl := NewLockRedsync(clis, zlog, config)
66 | defer dl.Stop()
67 | dl.Start()
68 | time.Sleep(time.Second)
69 | // ============方式1===============
70 | // ============方式1===============
71 | // 监听锁状态,定时任务测试
72 | // 1. 生成一个cron表达式
73 | expr := cron.New(cron.WithSeconds()) // 秒级
74 | id, err := expr.AddFunc("@every 5s", func() { // 5秒一次定时任务
75 | if dl.IsLocked() {
76 | logicService2(t)
77 | }
78 | })
79 | id1, err1 := expr.AddFunc("@every 5s", func() { // 5秒一次定时任务
80 | if dl.IsLocked() {
81 | logicService22(t)
82 | }
83 | })
84 | assert.NoError(t, err)
85 | assert.NoError(t, err1)
86 | t.Log("任务id: ", id)
87 | t.Log("任务id: ", id1)
88 |
89 | expr.Start() // 启动定时器
90 |
91 | // 模拟定时任务总时间20秒,20秒后停止定时器【实际业务可以expr := cron.New后返回expr,由main控制退出】
92 | time.Sleep(time.Second * 30)
93 |
94 | ctx := expr.Stop() // 暂停定时器,不调度新任务执行了,正在执行的继续执行
95 | t.Log("发出停止信号")
96 | <-ctx.Done() // 彻底停止定时器
97 | t.Log("彻底停止,没有任务执行了")
98 |
99 | // ==============方式2=============
100 | // ==============方式2=============
101 | // 监听锁状态,定时任务测试, 方式2
102 | //ticker := time.NewTicker(time.Second * 5)
103 | //for {
104 | // select {
105 | // case <-ticker.C:
106 | // // 锁已获取,执行业务逻辑
107 | // if dl.IsLocked() {
108 | // logicService(t)
109 | // }
110 | // continue
111 | // }
112 | //}
113 | }
114 |
115 | func logicService2(t *testing.T) {
116 | t.Log(time.Now().Format(time.DateTime), "开始执行业务逻辑1")
117 | time.Sleep(time.Second * 2)
118 | t.Log(time.Now().Format(time.DateTime), "done logicService")
119 | }
120 |
121 | func logicService22(t *testing.T) {
122 | t.Log(time.Now().Format(time.DateTime), "开始执行业务逻辑2")
123 | time.Sleep(time.Second * 2)
124 | t.Log(time.Now().Format(time.DateTime), "done logicService")
125 | }
126 |
--------------------------------------------------------------------------------
/syncX/lock/redisLock/redsyncx/redsync_test.go:
--------------------------------------------------------------------------------
1 | package redsyncx
2 |
3 | import (
4 | "context"
5 | "gitee.com/hgg_test/pkg_tool/v2/logx/zerologx"
6 | "github.com/redis/go-redis/v9"
7 | "github.com/robfig/cron/v3"
8 | "github.com/rs/zerolog"
9 | "github.com/stretchr/testify/assert"
10 | "log"
11 | "os"
12 | "testing"
13 | "time"
14 | )
15 |
16 | func TestNewResSyncStr12(t *testing.T) {
17 | var clis []*redis.Client
18 | // 创建 Redis 客户端
19 | client := redis.NewClient(&redis.Options{
20 | Addr: "localhost:6379",
21 | })
22 | clis = append(clis, client)
23 | client1 := redis.NewClient(&redis.Options{
24 | Addr: "localhost:6380",
25 | })
26 | clis = append(clis, client1)
27 | client2 := redis.NewClient(&redis.Options{
28 | Addr: "localhost:6380",
29 | })
30 | clis = append(clis, client2)
31 |
32 | // 测试 Redis 连接
33 | if err := client.Ping(context.Background()).Err(); err != nil {
34 | assert.NoError(t, err)
35 | log.Fatal("Redis连接失败:", err)
36 | }
37 | // 测试 Redis 连接
38 | if err := client1.Ping(context.Background()).Err(); err != nil {
39 | assert.NoError(t, err)
40 | log.Fatal("Redis1连接失败:", err)
41 | }
42 | // 测试 Redis 连接
43 | if err := client2.Ping(context.Background()).Err(); err != nil {
44 | assert.NoError(t, err)
45 | log.Fatal("Redis1连接失败:", err)
46 | }
47 |
48 | // =========================
49 | // =========================
50 | // =========================
51 |
52 | // 创建日志器
53 | logger := zerolog.New(os.Stdout).Level(zerolog.DebugLevel)
54 | zlog := zerologx.NewZeroLogger(&logger)
55 |
56 | // 创建分布式锁配置
57 | config := Config{
58 | LockName: "test-lock",
59 | Expiry: 10 * time.Second,
60 | RetryDelay: 1 * time.Second,
61 | MaxRetries: 2,
62 | }
63 |
64 | // 创建分布式锁实例【可以根据gopsutilx包下的获取系统负载,然后判断选择负载较低的实例,负载信息可以存储redis/mysql】
65 | dl := NewLockRedsync(clis, zlog, config)
66 | defer dl.Stop() // 停止锁并释放资源
67 | dl.Start() // 启动锁获取和续约
68 |
69 | time.Sleep(time.Second)
70 |
71 | // ============方式1===============
72 | // ============方式1===============
73 | // 监听锁状态,定时任务测试
74 | // 1. 生成一个cron表达式
75 | expr := cron.New(cron.WithSeconds()) // 秒级
76 | id, err := expr.AddFunc("@every 5s", func() { // 5秒一次定时任务
77 | if dl.IsLocked() {
78 | logicService(t)
79 | }
80 | })
81 | id1, err1 := expr.AddFunc("@every 5s", func() { // 5秒一次定时任务
82 | if dl.IsLocked() {
83 | logicService11(t)
84 | }
85 | })
86 | assert.NoError(t, err)
87 | assert.NoError(t, err1)
88 | t.Log("任务id: ", id)
89 | t.Log("任务id: ", id1)
90 |
91 | expr.Start() // 启动定时器
92 |
93 | // 模拟定时任务总时间20秒,20秒后停止定时器【实际业务可以expr := cron.New后返回expr,由main控制退出】
94 | time.Sleep(time.Second * 30)
95 |
96 | ctx := expr.Stop() // 暂停定时器,不调度新任务执行了,正在执行的继续执行
97 | t.Log("发出停止信号")
98 | <-ctx.Done() // 彻底停止定时器
99 | t.Log("彻底停止,没有任务执行了")
100 |
101 | // ==============方式2=============
102 | // ==============方式2=============
103 | // 监听锁状态,定时任务测试, 方式2
104 | //ticker := time.NewTicker(time.Second * 5)
105 | //for {
106 | // select {
107 | // case <-ticker.C:
108 | // // 锁已获取,执行业务逻辑
109 | // if dl.IsLocked() {
110 | // logicService(t)
111 | // }
112 | // continue
113 | // }
114 | //}
115 | }
116 |
117 | func logicService(t *testing.T) {
118 | t.Log(time.Now().Format(time.DateTime), "开始执行业务逻辑1")
119 | time.Sleep(time.Second * 2)
120 | t.Log(time.Now().Format(time.DateTime), "done logicService")
121 | }
122 |
123 | func logicService11(t *testing.T) {
124 | t.Log(time.Now().Format(time.DateTime), "开始执行业务逻辑2")
125 | time.Sleep(time.Second * 2)
126 | t.Log(time.Now().Format(time.DateTime), "done logicService")
127 | }
128 |
--------------------------------------------------------------------------------
/DBx/mysqlX/gormx/dbMovex/myMovex/messageQueue/consumerx/consumer_test.go:
--------------------------------------------------------------------------------
1 | package consumerx
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "os"
7 | "strings"
8 | "testing"
9 |
10 | "gitee.com/hgg_test/pkg_tool/v2/DBx/mysqlX/gormx/dbMovex/myMovex/events"
11 | "gitee.com/hgg_test/pkg_tool/v2/logx"
12 | "gitee.com/hgg_test/pkg_tool/v2/logx/zerologx"
13 | "github.com/IBM/sarama"
14 | "github.com/rs/zerolog"
15 | "github.com/stretchr/testify/assert"
16 | "gorm.io/driver/mysql"
17 | "gorm.io/gorm"
18 | )
19 |
20 | /*
21 | ======================
22 | 测试不一致消息上报kafka后,消费者消费不一致消息
23 | ======================
24 | */
25 |
26 | func setupTestSrcDB() (*gorm.DB, error) {
27 | srcdb, err := gorm.Open(mysql.Open("root:root@tcp(localhost:13306)/src_db?parseTime=true"), &gorm.Config{})
28 | if err != nil {
29 | return nil, fmt.Errorf("failed to connect source database: %w", err)
30 | }
31 |
32 | // 自动迁移表结构
33 | err = srcdb.AutoMigrate(&events.TestUser{})
34 | if err != nil {
35 | return nil, fmt.Errorf("failed to auto migrate source database: %w", err)
36 | }
37 | return srcdb, nil
38 | }
39 |
40 | func setupTestDstDB() (*gorm.DB, error) {
41 | dstdb, err := gorm.Open(mysql.Open("root:root@tcp(localhost:13306)/dst_db?parseTime=true"), &gorm.Config{})
42 | if err != nil {
43 | return nil, fmt.Errorf("failed to connect destination database: %w", err)
44 | }
45 |
46 | // 自动迁移表结构
47 | err = dstdb.AutoMigrate(&events.TestUser{})
48 | if err != nil {
49 | return nil, fmt.Errorf("failed to auto migrate destination database: %w", err)
50 | }
51 | return dstdb, nil
52 | }
53 |
54 | var addr []string = []string{"localhost:9094"}
55 |
56 | func InitLog() logx.Loggerx {
57 | zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
58 | // Level日志级别【可以考虑作为参数传】,测试传zerolog.InfoLevel/NoLevel不打印
59 | // 模块化: Str("module", "userService模块")
60 | logger := zerolog.New(os.Stderr).Level(zerolog.DebugLevel).With().CallerWithSkipFrameCount(4).Caller().Timestamp().Logger()
61 | return zerologx.NewZeroLogger(&logger)
62 | }
63 |
64 | func TestNewConsumerGroupHandler1(t *testing.T) {
65 | // 设置测试数据库连接
66 | srcDb, err := setupTestSrcDB()
67 | if err != nil {
68 | t.Skipf("跳过测试:无法连接源数据库: %v", err)
69 | return
70 | }
71 |
72 | dstDb, err := setupTestDstDB()
73 | if err != nil {
74 | t.Skipf("跳过测试:无法连接目标数据库: %v", err)
75 | return
76 | }
77 |
78 | cfg := sarama.NewConfig()
79 | // 创建消费者配置
80 | cm := NewConsumer(ConsumerConf{
81 | Addr: addr,
82 | GroupId: "test_group",
83 | SaramaConf: cfg,
84 | }, DbConf{
85 | SrcDb: srcDb,
86 | DstDb: dstDb,
87 | }, InitLog())
88 |
89 | // 初始化消费者
90 | err = cm.InitConsumer(context.Background(), "dbMove")
91 | if err != nil {
92 | // 检查是否为连接错误
93 | if isConnectionError(err) {
94 | t.Skipf("跳过测试:无法连接Kafka或初始化消费者: %v", err)
95 | return
96 | }
97 | assert.NoError(t, err)
98 | }
99 | }
100 |
101 | // isConnectionError 检查错误是否为连接错误
102 | func isConnectionError(err error) bool {
103 | errStr := err.Error()
104 | // 检查常见的连接错误消息
105 | connectionErrors := []string{
106 | "connection refused",
107 | "connectex",
108 | "no such host",
109 | "timeout",
110 | "dial tcp",
111 | "network is unreachable",
112 | "brokers not available",
113 | "cannot connect",
114 | }
115 |
116 | for _, connErr := range connectionErrors {
117 | if containsIgnoreCase(errStr, connErr) {
118 | return true
119 | }
120 | }
121 | return false
122 | }
123 |
124 | // containsIgnoreCase 检查字符串是否包含子字符串(忽略大小写)
125 | func containsIgnoreCase(s, substr string) bool {
126 | return strings.Contains(strings.ToLower(s), strings.ToLower(substr))
127 | }
128 |
--------------------------------------------------------------------------------