From 1899546ba40bbb409c08d47976d6b975d19b6353 Mon Sep 17 00:00:00 2001 From: douxu Date: Fri, 21 Feb 2025 15:27:25 +0800 Subject: [PATCH 01/33] init code of share memory --- share_memory/file_lock.go | 52 +++++++++++++++++++ share_memory/share_memeory.go | 98 +++++++++++++++++++++++++++++++++++ 2 files changed, 150 insertions(+) create mode 100644 share_memory/file_lock.go create mode 100644 share_memory/share_memeory.go diff --git a/share_memory/file_lock.go b/share_memory/file_lock.go new file mode 100644 index 0000000..9a169e7 --- /dev/null +++ b/share_memory/file_lock.go @@ -0,0 +1,52 @@ +package sharememory + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +func main() { + // 打开文件 + file, err := os.OpenFile("testfile.txt", os.O_RDWR|os.O_CREATE, 0o666) + if err != nil { + fmt.Println("Error opening file:", err) + return + } + defer file.Close() + + // 加独占锁 + fmt.Println("Acquiring exclusive lock...") + err = unix.Flock(int(file.Fd()), unix.LOCK_EX) + if err != nil { + fmt.Println("Error acquiring exclusive lock:", err) + return + } + defer unix.Flock(int(file.Fd()), unix.LOCK_UN) // 释放锁 + + fmt.Println("Exclusive lock acquired. Writing to file...") + // 这里可以添加写文件的逻辑 + fmt.Println("Writing complete.") + + // 打开文件 + file, err = os.OpenFile("testfile.txt", os.O_RDONLY, 0o666) + if err != nil { + fmt.Println("Error opening file:", err) + return + } + defer file.Close() + + // 加共享锁 + fmt.Println("Acquiring shared lock...") + err = unix.Flock(int(file.Fd()), unix.LOCK_SH) + if err != nil { + fmt.Println("Error acquiring shared lock:", err) + return + } + defer unix.Flock(int(file.Fd()), unix.LOCK_UN) // 释放锁 + + fmt.Println("Shared lock acquired. Reading from file...") + // 这里可以添加读文件的逻辑 + fmt.Println("Reading complete.") +} diff --git a/share_memory/share_memeory.go b/share_memory/share_memeory.go new file mode 100644 index 0000000..6667cfe --- /dev/null +++ b/share_memory/share_memeory.go @@ -0,0 +1,98 @@ +package sharememory + +import ( + "fmt" + "unsafe" + + "modelRT/orm" + + "golang.org/x/sys/unix" +) + +// CreateShareMemory defines a function to create a shared memory +func CreateShareMemory(key uintptr, structSize uintptr) (uintptr, error) { + // logger := logger.GetLoggerInstance() + // create shared memory + shmID, _, err := unix.Syscall(unix.SYS_SHMGET, key, structSize, unix.IPC_CREAT|0o666) + if err != 0 { + // logger.Error(fmt.Sprintf("create shared memory by key %v failed:", key), zap.Error(err)) + return 0, fmt.Errorf("create shared memory failed:%w", err) + } + + // attach shared memory + shmAddr, _, err := unix.Syscall(unix.SYS_SHMAT, shmID, 0, 0) + if err != 0 { + // logger.Error(fmt.Sprintf("attach shared memory by shmID %v failed:", shmID), zap.Error(err)) + return 0, fmt.Errorf("attach shared memory failed:%w", err) + } + return shmAddr, nil +} + +// ReadComponentFromShareMemory defines a function to read component value from shared memory +func ReadComponentFromShareMemory(key uintptr, componentInfo *orm.Component) error { + structSize := unsafe.Sizeof(orm.Component{}) + shmID, _, err := unix.Syscall(unix.SYS_SHMGET, key, uintptr(int(structSize)), 0o666) + if err != 0 { + return fmt.Errorf("get shared memory failed:%w", err) + } + + shmAddr, _, err := unix.Syscall(unix.SYS_SHMAT, shmID, 0, 0) + if err != 0 { + return fmt.Errorf("attach shared memory failed:%w", err) + } + + // 读取共享内存中的数据 + componentInfo = (*orm.Component)(unsafe.Pointer(shmAddr + structSize)) + + // Detach shared memory + unix.Syscall(unix.SYS_SHMDT, shmAddr, 0, 0) + return nil +} + +func WriteComponentInShareMemory(key uintptr, componentInfo *orm.Component) error { + structSize := unsafe.Sizeof(orm.Component{}) + shmID, _, err := unix.Syscall(unix.SYS_SHMGET, key, uintptr(int(structSize)), 0o666) + if err != 0 { + return fmt.Errorf("get shared memory failed:%w", err) + } + + shmAddr, _, err := unix.Syscall(unix.SYS_SHMAT, shmID, 0, 0) + if err != 0 { + return fmt.Errorf("attach shared memory failed:%w", err) + } + + obj := (*orm.Component)(unsafe.Pointer(shmAddr + unsafe.Sizeof(structSize))) + fmt.Println(obj) + obj.ComponentType = componentInfo.ComponentType + + // id integer NOT NULL DEFAULT nextval('component_id_seq'::regclass), + // global_uuid uuid NOT NULL DEFAULT gen_random_uuid(), + // nspath character varying(32) COLLATE pg_catalog."default", + // tag character varying(32) COLLATE pg_catalog."default" NOT NULL, + // name character varying(64) COLLATE pg_catalog."default" NOT NULL, + // description character varying(512) COLLATE pg_catalog."default" NOT NULL DEFAULT ''::character varying, + // grid character varying(64) COLLATE pg_catalog."default" NOT NULL, + // zone character varying(64) COLLATE pg_catalog."default" NOT NULL, + // station character varying(64) COLLATE pg_catalog."default" NOT NULL, + // type integer NOT NULL, + // in_service boolean DEFAULT false, + // state integer NOT NULL DEFAULT 0, + // connected_bus jsonb NOT NULL DEFAULT '{}'::jsonb, + // label jsonb NOT NULL DEFAULT '{}'::jsonb, + // context jsonb NOT NULL DEFAULT '{}'::jsonb, + // page_id integer NOT NULL, + // op integer NOT NULL DEFAULT '-1'::integer, + // ts timestamp with time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, + + unix.Syscall(unix.SYS_SHMDT, shmAddr, 0, 0) + return nil +} + +// DeleteShareMemory defines a function to delete shared memory +func DeleteShareMemory(key uintptr) error { + _, _, err := unix.Syscall(unix.SYS_SHM_UNLINK, key, 0, 0o666) + if err != 0 { + return fmt.Errorf("get shared memory failed:%w", err) + } + return nil +} From 2c2c2811a7038a541707b60d3ae48ff300630bf1 Mon Sep 17 00:00:00 2001 From: douxu Date: Fri, 28 Feb 2025 16:00:16 +0800 Subject: [PATCH 02/33] init read lock script of distributedlock --- README.md | 1 + distributedlock/luascript/rlock_script.go | 107 ++++ distributedlock/redis_lock.go | 299 +++++++++++ distributedlock/redis_rwlock.go | 471 ++++++++++++++++++ go.mod | 6 +- go.sum | 45 ++ share_memory/file_lock.go | 52 -- .../share_memeory.go | 0 8 files changed, 928 insertions(+), 53 deletions(-) create mode 100644 distributedlock/luascript/rlock_script.go create mode 100644 distributedlock/redis_lock.go create mode 100644 distributedlock/redis_rwlock.go delete mode 100644 share_memory/file_lock.go rename {share_memory => sharememory}/share_memeory.go (100%) diff --git a/README.md b/README.md index b247ca2..b52488c 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,3 @@ # ModelRT + [![Build Status](http://192.168.46.100:4080/api/badges/CL-Softwares/modelRT/status.svg)](http://192.168.46.100:4080/CL-Softwares/modelRT) \ No newline at end of file diff --git a/distributedlock/luascript/rlock_script.go b/distributedlock/luascript/rlock_script.go new file mode 100644 index 0000000..d2d72f1 --- /dev/null +++ b/distributedlock/luascript/rlock_script.go @@ -0,0 +1,107 @@ +// Package luascript defines the lua script used for redis distributed lock +package luascript + +// RlockScript is the lua script for the lock read lock command +/* +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 +ARGV[1]:锁的过期时间(lockLeaseTime),单位为毫秒。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +*/ +var RlockScript = `local mode = redis.call('hget', KEYS[1], 'mode'); +local lockKey = KEYS[2] .. ARGV[2]; +if (mode == false) then + redis.call('hset', KEYS[1], 'mode', 'read'); + redis.call('hset', KEYS[1], lockKey, '1'); + redis.call('hexpire', KEYS[1], ARGV[1] 'fields' '1' lockKey); + redis.call('expire', KEYS[1], ARGV[1]); + return 1; +end; + +if (mode == 'write') then + return -1; +end; + +if (mode == 'read') then + if (redis.call('exists', KEYS[1], ARGV[2]) == 1) then + redis.call('hincrby', KEYS[1], lockKey, '1'); + local remainTime = redis.call('httl', KEYS[1], 'fields', '1', lockKey); + redis.call('hexpire', key, math.max(remainTime, ARGV[1])); + else + redis.call('hset', KEYS[1], lockKey, '1'); + redis.call('hexpire', KEYS[1], lockKey, ARGV[1]); + end; + local cursor = 0; + local maxRemainTime = tonumber(ARGV[1]); + local pattern = KEYS[2] .. ':*'; + repeat + local hscanResult = redis.call('hscan', KEYS[1], cursor, 'match', pattern, 'count', '100'); + cursor = tonumber(hscanResult[1]); + local fields = hscanResult[2]; + + for i = 1, #fields,2 do + local field = fields[i]; + local remainTime = redis.call('httl', KEYS[1], 'fields', '1', field); + maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime); + end; + until cursor == 0; + + local remainTime = redis.call('ttl', KEYS[1]); + redis.call('expire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime)); + return 1; +end; +` + +// TODO 优化读锁解锁语句 +// UnRlockScript is the lua script for the unlock read lock command +/* +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +KEYS[2]:锁的释放通知频道(chankey),用于通知其他客户端锁已释放。 +KEYS[3]:锁的超时键名前缀(rwTimeoutTokenPrefix),用于存储每个读锁的超时键。 +KEYS[4]:锁的超时键名前缀(prefixKey),用于存储每个读锁的超时键。 +ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +*/ +var UnRlockScript = `local mode = redis.call('hget', KEYS[1], 'mode'); +if (mode == false) then + redis.call('publish', KEYS[2], ARGV[1]); + return 1; +end; +local lockExists = redis.call('hexists', KEYS[1], ARGV[2]); +if (lockExists == 0) then + return nil; +end; + +local counter = redis.call('hincrby', KEYS[1], ARGV[2], -1); +if (counter == 0) then + redis.call('hdel', KEYS[1], ARGV[2]); +end; +redis.call('del', KEYS[3] .. ':' .. (counter+1)); + +if (redis.call('hlen', KEYS[1]) > 1) then + local maxRemainTime = -3; + local keys = redis.call('hkeys', KEYS[1]); + for n, key in ipairs(keys) do + counter = tonumber(redis.call('hget', KEYS[1], key)); + if type(counter) == 'number' then + for i=counter, 1, -1 do + local remainTime = redis.call('ttl', KEYS[4] .. ':' .. key .. ':rwlock_timeout:' .. i); + maxRemainTime = math.max(remainTime, maxRemainTime); + end; + end; + end; + + if maxRemainTime > 0 then + redis.call('pexpire', KEYS[1], maxRemainTime); + return 0; + end; + + if mode == 'write' then + return 0; + end; +end; + +redis.call('del', KEYS[1]); +redis.call('publish', KEYS[2], ARGV[1]); +return 1; +` diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go new file mode 100644 index 0000000..e66c03c --- /dev/null +++ b/distributedlock/redis_lock.go @@ -0,0 +1,299 @@ +package distributed_lock + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + luascript "modelRT/distributedlock/luascript" + + "github.com/go-redis/redis" + uuid "github.com/google/uuid" + "go.uber.org/zap" +) + +var lockScript string = strings.Join([]string{ + "if (redis.call('exists', KEYS[1]) == 0) then ", + "redis.call('hset', KEYS[1], ARGV[2], 1); ", + "redis.call('pexpire', KEYS[1], ARGV[1]); ", + "return nil; ", + "end; ", + "if (redis.call('hexists', KEYS[1], ARGV[2]) == 1) then ", + "redis.call('hincrby', KEYS[1], ARGV[2], 1); ", + "redis.call('pexpire', KEYS[1], ARGV[1]); ", + "return nil; ", + "end; ", + "return redis.call('pttl', KEYS[1]);", +}, "") + +var refreshLockScript string = strings.Join([]string{ + "if (redis.call('hexists', KEYS[1], ARGV[2]) == 1) then ", + "redis.call('pexpire', KEYS[1], ARGV[1]); ", + "return 1; ", + "end; ", + "return 0;", +}, "") + +var unlockScript string = strings.Join([]string{ + "if (redis.call('exists', KEYS[1]) == 0) then ", + "redis.call('publish', KEYS[2], ARGV[1]); ", + "return 1; ", + "end;", + "if (redis.call('hexists', KEYS[1], ARGV[3]) == 0) then ", + "return nil;", + "end; ", + "local counter = redis.call('hincrby', KEYS[1], ARGV[3], -1); ", + "if (counter > 0) then ", + "redis.call('pexpire', KEYS[1], ARGV[2]); ", + "return 0; ", + "else ", + "redis.call('del', KEYS[1]); ", + "redis.call('publish', KEYS[2], ARGV[1]); ", + "return 1; ", + "end; ", + "return nil;", +}, "") + +const ( + internalLockLeaseTime = uint64(30) * 1000 + unlockMessage = 0 +) + +type RedissionLockConfig struct { + LockLeaseTime time.Duration + Prefix string + ChanPrefix string + Key string +} + +type redissionLocker struct { + token string + key string + chankey string + exit chan struct{} + lockLeaseTime uint64 + client *redis.Client + once *sync.Once + logger *zap.Logger +} + +func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) { + fmt.Println(luascript.RlockScript) + if rl.exit == nil { + rl.exit = make(chan struct{}) + } + ttl, err := rl.tryLock() + if err != nil { + panic(err) + } + + if ttl <= 0 { + rl.once.Do(func() { + go rl.refreshLockTimeout() + }) + return + } + + submsg := make(chan struct{}, 1) + defer close(submsg) + sub := rl.client.Subscribe(rl.chankey) + defer sub.Close() + go rl.subscribeLock(sub, submsg) + // listen := rl.listenManager.Subscribe(rl.key, rl.token) + // defer rl.listenManager.UnSubscribe(rl.key, rl.token) + + timer := time.NewTimer(ttl) + defer timer.Stop() + // outimer 的作用理解为如果超过多长时间无法获得这个锁,那么就直接放弃 + var outimer *time.Timer + if len(timeout) > 0 && timeout[0] > 0 { + outimer = time.NewTimer(timeout[0]) + } +LOOP: + for { + ttl, err = rl.tryLock() + if err != nil { + panic(err) + } + + if ttl <= 0 { + rl.once.Do(func() { + go rl.refreshLockTimeout() + }) + return + } + if outimer != nil { + select { + case _, ok := <-submsg: + if !timer.Stop() { + <-timer.C + } + + if !ok { + panic("lock listen release") + } + + timer.Reset(ttl) + case <-ctx.Done(): + // break LOOP + panic("lock context already release") + case <-timer.C: + timer.Reset(ttl) + case <-outimer.C: + if !timer.Stop() { + <-timer.C + } + break LOOP + } + } else { + select { + case _, ok := <-submsg: + if !timer.Stop() { + <-timer.C + } + + if !ok { + panic("lock listen release") + } + + timer.Reset(ttl) + case <-ctx.Done(): + // break LOOP + panic("lock context already release") + case <-timer.C: + timer.Reset(ttl) + } + } + } +} + +func (rl *redissionLocker) subscribeLock(sub *redis.PubSub, out chan struct{}) { + defer func() { + if err := recover(); err != nil { + rl.logger.Error("subscribeLock catch error", zap.Error(err.(error))) + } + }() + if sub == nil || out == nil { + return + } + rl.logger.Debug("lock:%s enter sub routine", zap.String("token", rl.token)) +LOOP: + for { + msg, err := sub.Receive() + if err != nil { + rl.logger.Info("sub receive message", zap.Error(err)) + break LOOP + } + + select { + case <-rl.exit: + break LOOP + default: + if len(out) > 0 { + // if channel hava msg. drop it + rl.logger.Debug("drop message when channel if full") + continue + } + + switch msg.(type) { + case *redis.Subscription: + // Ignore. + case *redis.Pong: + // Ignore. + case *redis.Message: + out <- struct{}{} + default: + } + } + } + rl.logger.Debug("lock sub routine release", zap.String("token", rl.token)) +} + +func (rl *redissionLocker) refreshLockTimeout() { + rl.logger.Debug("lock", zap.String("token", rl.token), zap.String("lock key", rl.key)) + lockTime := time.Duration(rl.lockLeaseTime/3) * time.Millisecond + timer := time.NewTimer(lockTime) + defer timer.Stop() +LOOP: + for { + select { + case <-timer.C: + timer.Reset(lockTime) + // update key expire time + res := rl.client.Eval(refreshLockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) + val, err := res.Int() + if err != nil { + panic(err) + } + if val == 0 { + rl.logger.Debug("not find the lock key of self") + break LOOP + } + case <-rl.exit: + break LOOP + + } + } + rl.logger.Debug("refresh routine release", zap.String("token", rl.token)) +} + +func (rl *redissionLocker) cancelRefreshLockTime() { + if rl.exit != nil { + close(rl.exit) + rl.exit = nil + rl.once = &sync.Once{} + } +} + +func (rl *redissionLocker) tryLock() (time.Duration, error) { + res := rl.client.Eval(lockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) + v, err := res.Result() + if err != redis.Nil && err != nil { + return 0, err + } + + if v == nil { + return 0, nil + } + + return time.Duration(v.(int64)), nil +} + +func (rl *redissionLocker) UnLock() { + res := rl.client.Eval(unlockScript, []string{rl.key, rl.chankey}, unlockMessage, rl.lockLeaseTime, rl.token) + val, err := res.Result() + if err != redis.Nil && err != nil { + panic(err) + } + if val == nil { + panic("attempt to unlock lock, not locked by current routine by lock id:" + rl.token) + } + rl.logger.Debug("unlock", zap.String("token", rl.token), zap.String("key", rl.key)) + if val.(int64) == 1 { + rl.cancelRefreshLockTime() + } +} + +func GetLocker(client *redis.Client, ops *RedissionLockConfig) *redissionLocker { + r := &redissionLocker{ + token: uuid.New().String(), + client: client, + exit: make(chan struct{}), + once: &sync.Once{}, + } + + if len(ops.Prefix) <= 0 { + ops.Prefix = "redission-lock" + } + if len(ops.ChanPrefix) <= 0 { + ops.ChanPrefix = "redission-lock-channel" + } + if ops.LockLeaseTime == 0 { + r.lockLeaseTime = internalLockLeaseTime + } + r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") + r.chankey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") + return r +} diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go new file mode 100644 index 0000000..cd15035 --- /dev/null +++ b/distributedlock/redis_rwlock.go @@ -0,0 +1,471 @@ +package distributed_lock + +import ( + "context" + "strings" + "sync" + "time" + + "github.com/go-redis/redis" + uuid "github.com/google/uuid" + "go.uber.org/zap" +) + +var rlockScript string = strings.Join([]string{ + "local mode = redis.call('hget', KEYS[1], 'mode'); ", + "if (mode == false) then ", + "redis.call('hset', KEYS[1], 'mode', 'read'); ", + "redis.call('hset', KEYS[1], ARGV[2], 1); ", + "redis.call('set', KEYS[2] .. ':1', 1); ", + "redis.call('pexpire', KEYS[2] .. ':1', ARGV[1]); ", + "redis.call('pexpire', KEYS[1], ARGV[1]); ", + "return nil; ", + "end; ", + "if (mode == 'read') or (mode == 'write' and redis.call('hexists', KEYS[1], ARGV[3]) == 1) then ", + "local ind = redis.call('hincrby', KEYS[1], ARGV[2], 1); ", + "local key = KEYS[2] .. ':' .. ind;", + "redis.call('set', key, 1); ", + "redis.call('pexpire', key, ARGV[1]); ", + "local remainTime = redis.call('pttl', KEYS[1]); ", + "redis.call('pexpire', KEYS[1], math.max(remainTime, ARGV[1])); ", + "return nil; ", + "end;", + "return redis.call('pttl', KEYS[1]);", +}, "") + +var runlockScript string = strings.Join([]string{ + "local mode = redis.call('hget', KEYS[1], 'mode'); ", + "if (mode == false) then ", + "redis.call('publish', KEYS[2], ARGV[1]); ", + "return 1; ", + "end; ", + "local lockExists = redis.call('hexists', KEYS[1], ARGV[2]); ", + "if (lockExists == 0) then ", + "return nil;", + "end; ", + + "local counter = redis.call('hincrby', KEYS[1], ARGV[2], -1); ", + "if (counter == 0) then ", + "redis.call('hdel', KEYS[1], ARGV[2]); ", + "end;", + "redis.call('del', KEYS[3] .. ':' .. (counter+1)); ", + + "if (redis.call('hlen', KEYS[1]) > 1) then ", + "local maxRemainTime = -3; ", + "local keys = redis.call('hkeys', KEYS[1]); ", + "for n, key in ipairs(keys) do ", + "counter = tonumber(redis.call('hget', KEYS[1], key)); ", + "if type(counter) == 'number' then ", + "for i=counter, 1, -1 do ", + "local remainTime = redis.call('pttl', KEYS[4] .. ':' .. key .. ':rwlock_timeout:' .. i); ", + "maxRemainTime = math.max(remainTime, maxRemainTime);", + "end; ", + "end; ", + "end; ", + + "if maxRemainTime > 0 then ", + "redis.call('pexpire', KEYS[1], maxRemainTime); ", + "return 0; ", + "end;", + + "if mode == 'write' then ", + "return 0;", + "end; ", + "end; ", + + "redis.call('del', KEYS[1]); ", + "redis.call('publish', KEYS[2], ARGV[1]); ", + "return 1; ", +}, "") + +var rlockrefreshScript = strings.Join([]string{ + "local counter = redis.call('hget', KEYS[1], ARGV[2]); ", + "if (counter ~= false) then ", + "redis.call('pexpire', KEYS[1], ARGV[1]); ", + + "if (redis.call('hlen', KEYS[1]) > 1) then ", + "local keys = redis.call('hkeys', KEYS[1]); ", + "for n, key in ipairs(keys) do ", + "counter = tonumber(redis.call('hget', KEYS[1], key)); ", + "if type(counter) == 'number' then ", + "for i=counter, 1, -1 do ", + "redis.call('pexpire', KEYS[2] .. ':' .. key .. ':rwlock_timeout:' .. i, ARGV[1]); ", + "end; ", + "end; ", + "end; ", + "end; ", + + "return 1; ", + "end; ", + "return 0;", +}, "") + +var wlockScript string = strings.Join([]string{ + "local mode = redis.call('hget', KEYS[1], 'mode'); ", + "if (mode == false) then ", + "redis.call('hset', KEYS[1], 'mode', 'write'); ", + "redis.call('hset', KEYS[1], ARGV[2], 1); ", + "redis.call('pexpire', KEYS[1], ARGV[1]); ", + "return nil; ", + "end; ", + "if (mode == 'write') then ", + "if (redis.call('hexists', KEYS[1], ARGV[2]) == 1) then ", + "redis.call('hincrby', KEYS[1], ARGV[2], 1); ", + "local currentExpire = redis.call('pttl', KEYS[1]); ", + "redis.call('pexpire', KEYS[1], currentExpire + ARGV[1]); ", + "return nil; ", + "end; ", + "end;", + "return redis.call('pttl', KEYS[1]);", +}, "") + +var wunlockScript string = strings.Join([]string{ + "local mode = redis.call('hget', KEYS[1], 'mode'); ", + "if (mode == false) then ", + "redis.call('publish', KEYS[2], ARGV[1]); ", + "return 1; ", + "end;", + "if (mode == 'write') then ", + "local lockExists = redis.call('hexists', KEYS[1], ARGV[3]); ", + "if (lockExists == 0) then ", + "return nil;", + "else ", + "local counter = redis.call('hincrby', KEYS[1], ARGV[3], -1); ", + "if (counter > 0) then ", + "redis.call('pexpire', KEYS[1], ARGV[2]); ", + "return 0; ", + "else ", + "redis.call('hdel', KEYS[1], ARGV[3]); ", + "if (redis.call('hlen', KEYS[1]) == 1) then ", + "redis.call('del', KEYS[1]); ", + "redis.call('publish', KEYS[2], ARGV[1]); ", + "else ", + // has unlocked read-locks + "redis.call('hset', KEYS[1], 'mode', 'read'); ", + "end; ", + "return 1; ", + "end; ", + "end; ", + "end; ", + "return nil;", +}, "") + +type redissionReadLocker struct { + redissionLocker + rwTimeoutTokenPrefix string + prefixKey string +} + +func (rl *redissionReadLocker) Lock(ctx context.Context, timeout ...time.Duration) { + if rl.exit == nil { + rl.exit = make(chan struct{}) + } + ttl, err := rl.tryLock() + if err != nil { + panic(err) + } + + if ttl <= 0 { + rl.once.Do(func() { + go rl.refreshLockTimeout() + }) + return + } + + submsg := make(chan struct{}, 1) + defer close(submsg) + sub := rl.client.Subscribe(rl.chankey) + + defer sub.Close() + go rl.subscribeLock(sub, submsg) + // listen := rl.listenManager.Subscribe(rl.key, rl.token) + // defer rl.listenManager.UnSubscribe(rl.key, rl.token) + + timer := time.NewTimer(ttl) + defer timer.Stop() + var outimer *time.Timer + if len(timeout) > 0 && timeout[0] > 0 { + outimer = time.NewTimer(timeout[0]) + } +LOOP: + for { + ttl, err = rl.tryLock() + if err != nil { + panic(err) + } + + if ttl <= 0 { + rl.once.Do(func() { + go rl.refreshLockTimeout() + }) + return + } + if outimer != nil { + select { + case _, ok := <-submsg: + if !timer.Stop() { + <-timer.C + } + + if !ok { + panic("lock listen release") + } + + timer.Reset(ttl) + case <-ctx.Done(): + // break LOOP + panic("lock context already release") + case <-timer.C: + timer.Reset(ttl) + case <-outimer.C: + if !timer.Stop() { + <-timer.C + } + break LOOP + } + } else { + select { + case _, ok := <-submsg: + if !timer.Stop() { + <-timer.C + } + + if !ok { + panic("lock listen release") + } + + timer.Reset(ttl) + case <-ctx.Done(): + // break LOOP + panic("lock context already release") + case <-timer.C: + timer.Reset(ttl) + } + } + } +} + +func (rl *redissionReadLocker) tryLock() (time.Duration, error) { + writeLockToken := strings.Join([]string{rl.token, "write"}, ":") + res := rl.client.Eval(rlockScript, []string{rl.key, rl.rwTimeoutTokenPrefix}, rl.lockLeaseTime, rl.token, writeLockToken) + v, err := res.Result() + if err != redis.Nil && err != nil { + return 0, err + } + + if v == nil { + return 0, nil + } + + return time.Duration(v.(int64)), nil +} + +func (rl *redissionReadLocker) refreshLockTimeout() { + rl.logger.Debug("rlock: %s lock %s\n", zap.String("token", rl.token), zap.String("key", rl.key)) + lockTime := time.Duration(rl.lockLeaseTime/3) * time.Millisecond + timer := time.NewTimer(lockTime) + defer timer.Stop() +LOOP: + for { + select { + case <-timer.C: + timer.Reset(lockTime) + // update key expire time + res := rl.client.Eval(rlockrefreshScript, []string{rl.key, rl.prefixKey}, rl.lockLeaseTime, rl.token) + val, err := res.Int() + if err != nil { + panic(err) + } + if val == 0 { + rl.logger.Debug("not find the rlock key of self") + break LOOP + } + case <-rl.exit: + break LOOP + + } + } + rl.logger.Debug("rlock: refresh routine release", zap.String("token", rl.token)) +} + +func (rl *redissionReadLocker) UnLock() { + res := rl.client.Eval(runlockScript, []string{rl.key, rl.chankey, rl.rwTimeoutTokenPrefix, rl.prefixKey}, unlockMessage, rl.token) + val, err := res.Result() + if err != redis.Nil && err != nil { + panic(err) + } + if val == nil { + panic("attempt to unlock lock, not locked by current routine by lock id:" + rl.token) + } + rl.logger.Debug("lock: %s unlock %s\n", zap.String("token", rl.token), zap.String("key", rl.key)) + if val.(int64) == 1 { + rl.cancelRefreshLockTime() + } +} + +type redissionWriteLocker struct { + redissionLocker +} + +func (rl *redissionWriteLocker) Lock(ctx context.Context, timeout ...time.Duration) { + if rl.exit == nil { + rl.exit = make(chan struct{}) + } + ttl, err := rl.tryLock() + if err != nil { + panic(err) + } + + if ttl <= 0 { + rl.once.Do(func() { + go rl.refreshLockTimeout() + }) + return + } + + submsg := make(chan struct{}, 1) + defer close(submsg) + sub := rl.client.Subscribe(rl.chankey) + defer sub.Close() + go rl.subscribeLock(sub, submsg) + // listen := rl.listenManager.Subscribe(rl.key, rl.token) + // defer rl.listenManager.UnSubscribe(rl.key, rl.token) + + timer := time.NewTimer(ttl) + defer timer.Stop() + // outimer 理解为如果超过这个时间没有获取到锁,就直接放弃 + var outimer *time.Timer + if len(timeout) > 0 && timeout[0] > 0 { + outimer = time.NewTimer(timeout[0]) + } +LOOP: + for { + ttl, err = rl.tryLock() + if err != nil { + panic(err) + } + + if ttl <= 0 { + rl.once.Do(func() { + go rl.refreshLockTimeout() + }) + return + } + if outimer != nil { + select { + case _, ok := <-submsg: + if !timer.Stop() { + <-timer.C + } + + if !ok { + panic("lock listen release") + } + + timer.Reset(ttl) + case <-ctx.Done(): + // break LOOP + panic("lock context already release") + case <-timer.C: + timer.Reset(ttl) + case <-outimer.C: + if !timer.Stop() { + <-timer.C + } + break LOOP + } + } else { + select { + case _, ok := <-submsg: + if !timer.Stop() { + <-timer.C + } + + if !ok { + panic("lock listen release") + } + + timer.Reset(ttl) + case <-ctx.Done(): + // break LOOP + panic("lock context already release") + case <-timer.C: + timer.Reset(ttl) + } + } + } +} + +func (rl *redissionWriteLocker) tryLock() (time.Duration, error) { + res := rl.client.Eval(wlockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) + v, err := res.Result() + if err != redis.Nil && err != nil { + return 0, err + } + + if v == nil { + return 0, nil + } + + return time.Duration(v.(int64)), nil +} + +func (rl *redissionWriteLocker) UnLock() { + res := rl.client.Eval(wunlockScript, []string{rl.key, rl.chankey}, unlockMessage, rl.lockLeaseTime, rl.token) + val, err := res.Result() + if err != redis.Nil && err != nil { + panic(err) + } + if val == nil { + panic("attempt to unlock lock, not locked by current routine by lock id:" + rl.token) + } + rl.logger.Debug("lock: unlock", zap.String("token", rl.token), zap.String("key", rl.key)) + if val.(int64) == 1 { + rl.cancelRefreshLockTime() + } +} + +func GetReadLocker(client *redis.Client, ops *RedissionLockConfig) *redissionReadLocker { + r := &redissionLocker{ + token: uuid.New().String(), + client: client, + exit: make(chan struct{}), + once: &sync.Once{}, + } + + if len(ops.Prefix) <= 0 { + ops.Prefix = "redission-rwlock" + } + if len(ops.ChanPrefix) <= 0 { + ops.ChanPrefix = "redission-rwlock-channel" + } + if ops.LockLeaseTime == 0 { + r.lockLeaseTime = internalLockLeaseTime + } + r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") + r.chankey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") + tkey := strings.Join([]string{"{", r.key, "}"}, "") + return &redissionReadLocker{redissionLocker: *r, rwTimeoutTokenPrefix: strings.Join([]string{tkey, r.token, "rwlock_timeout"}, ":"), prefixKey: tkey} +} + +func GetWriteLocker(client *redis.Client, ops *RedissionLockConfig) *redissionWriteLocker { + r := &redissionLocker{ + token: uuid.New().String(), + client: client, + exit: make(chan struct{}), + once: &sync.Once{}, + } + + if len(ops.Prefix) <= 0 { + ops.Prefix = "redission-rwlock" + } + if len(ops.ChanPrefix) <= 0 { + ops.ChanPrefix = "redission-rwlock-channel" + } + if ops.LockLeaseTime == 0 { + r.lockLeaseTime = internalLockLeaseTime + } + r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") + r.chankey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") + return &redissionWriteLocker{redissionLocker: *r} +} diff --git a/go.mod b/go.mod index e5f2b8c..3d655e8 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,9 @@ require ( github.com/bitly/go-simplejson v0.5.1 github.com/confluentinc/confluent-kafka-go v1.9.2 github.com/gin-gonic/gin v1.10.0 + github.com/go-redis/redis v6.15.9+incompatible github.com/gofrs/uuid v4.4.0+incompatible + github.com/google/uuid v1.4.0 github.com/gorilla/websocket v1.5.3 github.com/json-iterator/go v1.1.12 github.com/natefinch/lumberjack v2.0.0+incompatible @@ -16,6 +18,7 @@ require ( github.com/swaggo/gin-swagger v1.6.0 github.com/swaggo/swag v1.16.4 go.uber.org/zap v1.27.0 + golang.org/x/sys v0.28.0 gorm.io/driver/postgres v1.5.9 gorm.io/gorm v1.25.12 ) @@ -54,6 +57,8 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/ginkgo v1.16.5 // indirect + github.com/onsi/gomega v1.18.1 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect @@ -70,7 +75,6 @@ require ( golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/net v0.32.0 // indirect golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/tools v0.28.0 // indirect google.golang.org/protobuf v1.35.2 // indirect diff --git a/go.sum b/go.sum index da45600..86f0495 100644 --- a/go.sum +++ b/go.sum @@ -52,6 +52,8 @@ github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA= @@ -79,6 +81,9 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o= github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= +github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= @@ -112,9 +117,12 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -122,7 +130,9 @@ github.com/hamba/avro v1.5.6/go.mod h1:3vNT0RLXXpFm2Tb/5KC71ZRJlOroggq1Rcitb6k4F github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/heetch/avro v0.3.1/go.mod h1:4xn38Oz/+hiEUTpbVfGVLfvOg0yKLlRP7Q9+gJJILgA= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -186,6 +196,20 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= github.com/nrwiersma/avro-benchmarks v0.0.0-20210913175520-21aec48c8f76/go.mod h1:iKyFMidsk/sVYONJRE372sJuX/QTRPacU7imPqqsu7g= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/panjf2000/ants/v2 v2.10.0 h1:zhRg1pQUtkyRiOFo2Sbqwjp0GfBNo9cUY2/Grpx1p+8= github.com/panjf2000/ants/v2 v2.10.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= @@ -242,6 +266,7 @@ github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2 github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -265,11 +290,13 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -277,11 +304,13 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= @@ -293,22 +322,31 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -322,6 +360,7 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -333,6 +372,7 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= @@ -378,6 +418,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/httprequest.v1 v1.2.1/go.mod h1:x2Otw96yda5+8+6ZeWwHIJTFkEHWP/qP8pJOzqEtWPM= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -385,10 +426,14 @@ gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3M gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/share_memory/file_lock.go b/share_memory/file_lock.go deleted file mode 100644 index 9a169e7..0000000 --- a/share_memory/file_lock.go +++ /dev/null @@ -1,52 +0,0 @@ -package sharememory - -import ( - "fmt" - "os" - - "golang.org/x/sys/unix" -) - -func main() { - // 打开文件 - file, err := os.OpenFile("testfile.txt", os.O_RDWR|os.O_CREATE, 0o666) - if err != nil { - fmt.Println("Error opening file:", err) - return - } - defer file.Close() - - // 加独占锁 - fmt.Println("Acquiring exclusive lock...") - err = unix.Flock(int(file.Fd()), unix.LOCK_EX) - if err != nil { - fmt.Println("Error acquiring exclusive lock:", err) - return - } - defer unix.Flock(int(file.Fd()), unix.LOCK_UN) // 释放锁 - - fmt.Println("Exclusive lock acquired. Writing to file...") - // 这里可以添加写文件的逻辑 - fmt.Println("Writing complete.") - - // 打开文件 - file, err = os.OpenFile("testfile.txt", os.O_RDONLY, 0o666) - if err != nil { - fmt.Println("Error opening file:", err) - return - } - defer file.Close() - - // 加共享锁 - fmt.Println("Acquiring shared lock...") - err = unix.Flock(int(file.Fd()), unix.LOCK_SH) - if err != nil { - fmt.Println("Error acquiring shared lock:", err) - return - } - defer unix.Flock(int(file.Fd()), unix.LOCK_UN) // 释放锁 - - fmt.Println("Shared lock acquired. Reading from file...") - // 这里可以添加读文件的逻辑 - fmt.Println("Reading complete.") -} diff --git a/share_memory/share_memeory.go b/sharememory/share_memeory.go similarity index 100% rename from share_memory/share_memeory.go rename to sharememory/share_memeory.go From b894d61b54251e0c7ec40526c1034e8f4ccc3e87 Mon Sep 17 00:00:00 2001 From: douxu Date: Tue, 4 Mar 2025 16:33:35 +0800 Subject: [PATCH 03/33] =?UTF-8?q?init=20UnRLockScript=20=E3=80=81WLockScri?= =?UTF-8?q?pt=E3=80=81UnWLockScript=E3=80=81RefreshLockScript=20script?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- distributedlock/luascript/rlock_script.go | 224 ++++++++++++++++++---- distributedlock/redis_lock.go | 2 +- 2 files changed, 186 insertions(+), 40 deletions(-) diff --git a/distributedlock/luascript/rlock_script.go b/distributedlock/luascript/rlock_script.go index d2d72f1..ee5805c 100644 --- a/distributedlock/luascript/rlock_script.go +++ b/distributedlock/luascript/rlock_script.go @@ -1,14 +1,15 @@ // Package luascript defines the lua script used for redis distributed lock package luascript -// RlockScript is the lua script for the lock read lock command +// RLockScript is the lua script for the lock read lock command /* KEYS[1]:锁的键名(key),通常是锁的唯一标识。 KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 -ARGV[1]:锁的过期时间(lockLeaseTime),单位为毫秒。 +ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ -var RlockScript = `local mode = redis.call('hget', KEYS[1], 'mode'); +var RLockScript = ` +local mode = redis.call('hget', KEYS[1], 'mode'); local lockKey = KEYS[2] .. ARGV[2]; if (mode == false) then redis.call('hset', KEYS[1], 'mode', 'read'); @@ -19,6 +20,9 @@ if (mode == false) then end; if (mode == 'write') then + -- TODO 放到 list 中等待写锁释放后再次尝试加锁并且订阅写锁释放的消息 + local key = KEYS[1] .. ':read'; + redis.call('rpush', key, ARGV[2]); return -1; end; @@ -42,7 +46,7 @@ if (mode == 'read') then for i = 1, #fields,2 do local field = fields[i]; local remainTime = redis.call('httl', KEYS[1], 'fields', '1', field); - maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime); + maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime); end; until cursor == 0; @@ -52,56 +56,198 @@ if (mode == 'read') then end; ` -// TODO 优化读锁解锁语句 -// UnRlockScript is the lua script for the unlock read lock command +// UnRLockScript is the lua script for the unlock read lock command /* KEYS[1]:锁的键名(key),通常是锁的唯一标识。 -KEYS[2]:锁的释放通知频道(chankey),用于通知其他客户端锁已释放。 -KEYS[3]:锁的超时键名前缀(rwTimeoutTokenPrefix),用于存储每个读锁的超时键。 -KEYS[4]:锁的超时键名前缀(prefixKey),用于存储每个读锁的超时键。 +KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 +KEYS[3]:锁的释放通知读频道(chankey),用于通知其他客户端锁已释放。 +KEYS[4]:锁的释放通知写频道(chankey),用于通知其他客户端锁已释放。 ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ -var UnRlockScript = `local mode = redis.call('hget', KEYS[1], 'mode'); +var UnRLockScript = ` +local lockKey = KEYS[2] .. ARGV[2]; +local mode = redis.call('hget', KEYS[1], 'mode'); if (mode == false) then - redis.call('publish', KEYS[2], ARGV[1]); + local writeWait = KEYS[1] .. ':write'; + -- 优先写锁加锁,无写锁的情况通知读锁加锁 + local counter = redis.call('llen',writeWait) + if (counter >= 1) then + redis.call('publish', KEYS[4], ARGV[1]); + else + redis.call('publish', KEYS[3], ARGV[1]); + end; + return 1; +elseif (mode == 'write') then + return -2; +end; + +-- 判断当前的确是读模式但是当前 token 并没有加读锁的情况,返回 1 +local lockExists = redis.call('hexists', KEYS[1], lockKey); +if ((mode == 'read') and (lockExists == 0)) then return 1; end; -local lockExists = redis.call('hexists', KEYS[1], ARGV[2]); -if (lockExists == 0) then - return nil; -end; -local counter = redis.call('hincrby', KEYS[1], ARGV[2], -1); +local counter = redis.call('hincrby', KEYS[1], lockKey, -1); if (counter == 0) then - redis.call('hdel', KEYS[1], ARGV[2]); + redis.call('hdel', KEYS[1], lockKey); end; -redis.call('del', KEYS[3] .. ':' .. (counter+1)); if (redis.call('hlen', KEYS[1]) > 1) then - local maxRemainTime = -3; - local keys = redis.call('hkeys', KEYS[1]); - for n, key in ipairs(keys) do - counter = tonumber(redis.call('hget', KEYS[1], key)); - if type(counter) == 'number' then - for i=counter, 1, -1 do - local remainTime = redis.call('ttl', KEYS[4] .. ':' .. key .. ':rwlock_timeout:' .. i); - maxRemainTime = math.max(remainTime, maxRemainTime); + local cursor = 0; + local maxRemainTime = 0; + local pattern = KEYS[2] .. ':*'; + repeat + local hscanResult = redis.call('hscan', KEYS[1], cursor, 'match', pattern, 'count', '100'); + cursor = tonumber(hscanResult[1]); + local fields = hscanResult[2]; + + for i = 1, #fields,2 do + local field = fields[i]; + local remainTime = redis.call('httl', KEYS[1], 'fields', '1', field); + maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime); + end; + until cursor == 0; + + if (maxRemainTime > 0) then + local remainTime = redis.call('ttl', KEYS[1]); + redis.call('expire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime)); + end; +else + redis.call('del', KEYS[1]); + local writeWait = KEYS[1] .. ':write'; + -- 优先写锁加锁,无写锁的情况通知读锁加锁 + local counter = redis.call('llen',writeWait) + if (counter >= 1) then + redis.call('publish', KEYS[4], ARGV[1]); + else + redis.call('publish', KEYS[3], ARGV[1]); + end; + return 1; +end; +` + +// WLockScript is the lua script for the lock write lock command +/* +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 +ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +*/ +var WLockScript = ` +local mode = redis.call('hget', KEYS[1], 'mode'); +local lockKey = KEYS[2] .. ARGV[2]; +local waitKey = KEYS[1] .. ':write'; +if (mode == false) then + local firstToken = redis.call('lindex', waitKey,'0') + if (firstToken ~= ARGV[2]) then + return -7; + end; + redis.call('hset', KEYS[1], 'mode', 'write'); + redis.call('hset', KEYS[1], lockKey, 1); + redis.call('hexpire', KEYS[1], ARGV[1] 'fields' '1' lockKey); + redis.call('expire', KEYS[1], ARGV[1]); + redis.call('lpop', waitKey, '1') + return 1; +elseif (mode == 'read') then + -- TODO 放到 list 中等待读锁释放后再次尝试加锁并且订阅读锁释放的消息 + redis.call('rpush', waitkey, ARGV[2]); + return -3; +else + // 可重入写锁逻辑 + local lockKey = KEYS[2] .. ARGV[2] + local lockExists = redis.call('hexists', KEYS[1], lockKey) + if (lockExists == 1) then + redis.call('hincrby', KEYS[1], lockKey, 1); + redis.call('hexpire', KEYS[1], ARGV[1] 'fields' '1' lockKey); + redis.call('expire', KEYS[1], ARGV[1]); + return 1; + end; + -- 放到 list 中等待写锁释放后再次尝试加锁并且订阅写锁释放的消息 + local key = KEYS[1] .. ':write'; + redis.call('rpush', key, ARGV[2]); + return -4; +end; +` + +// UnWLockScript is the lua script for the unlock write lock command +/* +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 +KEYS[3]:锁的释放通知读频道(chankey),用于通知其他客户端锁已释放。 +KEYS[4]:锁的释放通知写频道(chankey),用于通知其他客户端锁已释放。 +ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +*/ +var UnWLockScript = ` +local mode = redis.call('hget', KEYS[1], 'mode'); +local writeWait = KEYS[1] .. ':write'; +if (mode == false) then + -- 优先写锁加锁,无写锁的情况通知读锁加锁 + local counter = redis.call('llen',writeWait) + if (counter >= 1) then + redis.call('publish', KEYS[4], ARGV[1]); + else + redis.call('publish', KEYS[3], ARGV[1]); + end; + return 1; +elseif (mode == 'read') then + return -5; +else + // 可重入写锁逻辑 + local lockKey = KEYS[2] .. ARGV[2] + local lockExists = redis.call('hexists', KEYS[1], lockKey) + if (lockExists == 1) then + local incrRes = redis.call('hincrby', KEYS[1], lockKey, -1); + if (incrRes == 0) then + redis.call('del', KEYS[1]); + local counter = redis.call('llen',writeWait) + if (counter >= 1) then + redis.call('publish', KEYS[4], ARGV[1]); + else + redis.call('publish', KEYS[3], ARGV[1]); end; + return 1 end; end; - - if maxRemainTime > 0 then - redis.call('pexpire', KEYS[1], maxRemainTime); - return 0; - end; - - if mode == 'write' then - return 0; - end; + return -6; end; - -redis.call('del', KEYS[1]); -redis.call('publish', KEYS[2], ARGV[1]); -return 1; +` + +// RefreshLockScript is the lua script for the refresh lock command +/* +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 +ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +*/ +var RefreshLockScript = ` +local lockKey = KEYS[2] .. ARGV[2] +local lockExists = redis.call('hexists', KEYS[1], lockKey); +local mode = redis.call('hget', KEYS[1], 'mode') +if (lockExists == 1) then + redis.call('hexpire', KEYS[1], ARGV[1] 'fields' '1' lockKey); + if (mode == 'read' ) then + local cursor = 0; + local maxRemainTime = tonumber(ARGV[1]); + local pattern = KEYS[2] .. ':*'; + repeat + local hscanResult = redis.call('hscan', KEYS[1], cursor, 'match', pattern, 'count', '100'); + cursor = tonumber(hscanResult[1]); + local fields = hscanResult[2]; + + for i = 1, #fields,2 do + local field = fields[i]; + local remainTime = redis.call('httl', KEYS[1], 'fields', '1', field); + maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime); + end; + until cursor == 0; + if (maxRemainTime > 0) then + local remainTime = redis.call('ttl', KEYS[1]); + redis.call('expire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime)); + end; + end; + return 1; +end; +return -8; ` diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index e66c03c..cbf79dc 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -80,7 +80,7 @@ type redissionLocker struct { } func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) { - fmt.Println(luascript.RlockScript) + fmt.Println(luascript.RLockScript) if rl.exit == nil { rl.exit = make(chan struct{}) } From c08f4b91f59d7d8c3ce7202b852bad58db0faf50 Mon Sep 17 00:00:00 2001 From: douxu Date: Wed, 5 Mar 2025 16:42:59 +0800 Subject: [PATCH 04/33] optimize read lock acquisition statements of redisRWLock --- distributedlock/constant/redis_err.go | 123 ++++++++++ distributedlock/luascript/rlock_script.go | 16 +- distributedlock/redis_lock.go | 8 +- distributedlock/redis_rwlock.go | 286 +++++----------------- 4 files changed, 198 insertions(+), 235 deletions(-) create mode 100644 distributedlock/constant/redis_err.go diff --git a/distributedlock/constant/redis_err.go b/distributedlock/constant/redis_err.go new file mode 100644 index 0000000..69579dd --- /dev/null +++ b/distributedlock/constant/redis_err.go @@ -0,0 +1,123 @@ +package constant + +import "fmt" + +type RedisResult int + +const ( + LockSuccess = RedisResult(1) + UnLockSuccess = RedisResult(1) + RefreshLockSuccess = RedisResult(1) + UnRLockSuccess = RedisResult(0) + RLockFailure = RedisResult(-1) + UnRLockFailureWithWLockOccupancy = RedisResult(-2) + WLockFailureWithRLockOccupancy = RedisResult(-3) + WLockFailureWithWLockOccupancy = RedisResult(-4) + UnWLockFailureWithRLockOccupancy = RedisResult(-5) + UnWLockFailureWithWLockOccupancy = RedisResult(-6) + WLockFailureWithNotFirstPriority = RedisResult(-7) + RefreshLockFailure = RedisResult(-8) + UnknownInternalError = RedisResult(-99) +) + +type RedisLockType int + +const ( + LockType = RedisLockType(iota) + UnLockType + RefreshLockType +) + +type RedisError struct { + Code RedisResult + Message string +} + +func (e *RedisError) Error() string { + return fmt.Sprintf("redis execution code:%d,message:%s\n", e.Code, e.Message) +} + +func (e *RedisError) OutputResultMessage() string { + return e.Message +} + +func (e *RedisError) OutputResultCode() int { + return int(e.Code) +} + +func NewRedisError(res RedisResult) error { + resInt := int(res) + switch resInt { + case -1: + return &RedisError{Code: -1, Message: "redis lock read lock failure,the lock is already occupied by another processes write lock"} + default: + return nil + } +} + +func ConvertResultToErr(res RedisResult, lockType RedisLockType, redisMsg string) error { + resInt := int(res) + switch resInt { + case 1: + if lockType == LockType { + return &RedisError{Code: res, Message: "redis lock success"} + } else if lockType == UnLockType { + return &RedisError{Code: res, Message: "redis unlock success"} + } else { + return &RedisError{Code: res, Message: "redis refresh lock success"} + } + case 0: + return &RedisError{Code: res, Message: "redis unlock read lock success, the lock is still occupied by other processes read lock"} + case -1: + return &RedisError{Code: res, Message: "redis lock read lock failure,the lock is already occupied by another processes write lock"} + case -2: + return &RedisError{Code: res, Message: "redis un lock read lock failure,the lock is already occupied by another processes write lock"} + case -3: + return &RedisError{Code: res, Message: "redis lock write lock failure,the lock is already occupied by anthor processes read lock"} + case -4: + return &RedisError{Code: res, Message: "redis lock write lock failure,the lock is already occupied by anthor processes write lock"} + case -5: + return &RedisError{Code: res, Message: "redis un lock write lock failure,the lock is already occupied by another processes read lock"} + case -6: + return &RedisError{Code: res, Message: "redis un lock write lock failure,the lock is already occupied by another processes write lock"} + case -7: + return &RedisError{Code: res, Message: "redis lock write lock failure,the first priority in the current process non-waiting queue"} + case -8: + return &RedisError{Code: res, Message: "redis refresh lock failure,the lock not exist"} + default: + return &RedisError{Code: res, Message: fmt.Sprintf("unkown redis execution result:%s\n", redisMsg)} + } +} + +func TranslateResultToStr(res RedisResult, lockType RedisLockType) string { + resInt := int(res) + switch resInt { + case 1: + if lockType == LockType { + return "redis lock success" + } else if lockType == UnLockType { + return "redis unlock success" + } else { + return "redis refresh lock success" + } + case 0: + return "redis unlock read lock success, the lock is still occupied by other processes read lock" + case -1: + return "redis lock read lock failure,the lock is already occupied by another processes write lock" + case -2: + return "redis un lock read lock failure,the lock is already occupied by another processes write lock" + case -3: + return "redis lock write lock failure,the lock is already occupied by anthor processes read lock" + case -4: + return "redis lock write lock failure,the lock is already occupied by anthor processes write lock" + case -5: + return "redis un lock write lock failure,the lock is already occupied by another processes read lock" + case -6: + return "redis un lock write lock failure,the lock is already occupied by another processes write lock" + case -7: + return "redis lock write lock failure,the first priority in the current process non-waiting queue" + case -8: + return "redis refresh lock failure,the lock not exist" + } + return "unkown redis execution result" +} diff --git a/distributedlock/luascript/rlock_script.go b/distributedlock/luascript/rlock_script.go index ee5805c..3588670 100644 --- a/distributedlock/luascript/rlock_script.go +++ b/distributedlock/luascript/rlock_script.go @@ -10,7 +10,7 @@ ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户 */ var RLockScript = ` local mode = redis.call('hget', KEYS[1], 'mode'); -local lockKey = KEYS[2] .. ARGV[2]; +local lockKey = KEYS[2] .. ':' .. ARGV[2]; if (mode == false) then redis.call('hset', KEYS[1], 'mode', 'read'); redis.call('hset', KEYS[1], lockKey, '1'); @@ -66,7 +66,7 @@ ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ var UnRLockScript = ` -local lockKey = KEYS[2] .. ARGV[2]; +local lockKey = KEYS[2] .. ':' .. ARGV[2]; local mode = redis.call('hget', KEYS[1], 'mode'); if (mode == false) then local writeWait = KEYS[1] .. ':write'; @@ -136,7 +136,7 @@ ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户 */ var WLockScript = ` local mode = redis.call('hget', KEYS[1], 'mode'); -local lockKey = KEYS[2] .. ARGV[2]; +local lockKey = KEYS[2] .. ':' .. ARGV[2]; local waitKey = KEYS[1] .. ':write'; if (mode == false) then local firstToken = redis.call('lindex', waitKey,'0') @@ -154,8 +154,8 @@ elseif (mode == 'read') then redis.call('rpush', waitkey, ARGV[2]); return -3; else - // 可重入写锁逻辑 - local lockKey = KEYS[2] .. ARGV[2] + -- 可重入写锁逻辑 + local lockKey = KEYS[2] .. ':' .. ARGV[2] local lockExists = redis.call('hexists', KEYS[1], lockKey) if (lockExists == 1) then redis.call('hincrby', KEYS[1], lockKey, 1); @@ -194,8 +194,8 @@ if (mode == false) then elseif (mode == 'read') then return -5; else - // 可重入写锁逻辑 - local lockKey = KEYS[2] .. ARGV[2] + -- 可重入写锁逻辑 + local lockKey = KEYS[2] .. ':' .. ARGV[2] local lockExists = redis.call('hexists', KEYS[1], lockKey) if (lockExists == 1) then local incrRes = redis.call('hincrby', KEYS[1], lockKey, -1); @@ -222,7 +222,7 @@ ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ var RefreshLockScript = ` -local lockKey = KEYS[2] .. ARGV[2] +local lockKey = KEYS[2] .. ':' .. ARGV[2] local lockExists = redis.call('hexists', KEYS[1], lockKey); local mode = redis.call('hget', KEYS[1], 'mode') if (lockExists == 1) then diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index cbf79dc..6f9b9aa 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -71,7 +71,7 @@ type RedissionLockConfig struct { type redissionLocker struct { token string key string - chankey string + waitChankey string exit chan struct{} lockLeaseTime uint64 client *redis.Client @@ -98,7 +98,7 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) { submsg := make(chan struct{}, 1) defer close(submsg) - sub := rl.client.Subscribe(rl.chankey) + sub := rl.client.Subscribe(rl.waitChankey) defer sub.Close() go rl.subscribeLock(sub, submsg) // listen := rl.listenManager.Subscribe(rl.key, rl.token) @@ -262,7 +262,7 @@ func (rl *redissionLocker) tryLock() (time.Duration, error) { } func (rl *redissionLocker) UnLock() { - res := rl.client.Eval(unlockScript, []string{rl.key, rl.chankey}, unlockMessage, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(unlockScript, []string{rl.key, rl.waitChankey}, unlockMessage, rl.lockLeaseTime, rl.token) val, err := res.Result() if err != redis.Nil && err != nil { panic(err) @@ -294,6 +294,6 @@ func GetLocker(client *redis.Client, ops *RedissionLockConfig) *redissionLocker r.lockLeaseTime = internalLockLeaseTime } r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") - r.chankey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") + r.waitChankey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") return r } diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index cd15035..d99c467 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -2,262 +2,98 @@ package distributed_lock import ( "context" + "errors" + "fmt" "strings" "sync" "time" + "modelRT/distributedlock/constant" + "modelRT/distributedlock/luascript" + "modelRT/logger" + "github.com/go-redis/redis" uuid "github.com/google/uuid" "go.uber.org/zap" ) -var rlockScript string = strings.Join([]string{ - "local mode = redis.call('hget', KEYS[1], 'mode'); ", - "if (mode == false) then ", - "redis.call('hset', KEYS[1], 'mode', 'read'); ", - "redis.call('hset', KEYS[1], ARGV[2], 1); ", - "redis.call('set', KEYS[2] .. ':1', 1); ", - "redis.call('pexpire', KEYS[2] .. ':1', ARGV[1]); ", - "redis.call('pexpire', KEYS[1], ARGV[1]); ", - "return nil; ", - "end; ", - "if (mode == 'read') or (mode == 'write' and redis.call('hexists', KEYS[1], ARGV[3]) == 1) then ", - "local ind = redis.call('hincrby', KEYS[1], ARGV[2], 1); ", - "local key = KEYS[2] .. ':' .. ind;", - "redis.call('set', key, 1); ", - "redis.call('pexpire', key, ARGV[1]); ", - "local remainTime = redis.call('pttl', KEYS[1]); ", - "redis.call('pexpire', KEYS[1], math.max(remainTime, ARGV[1])); ", - "return nil; ", - "end;", - "return redis.call('pttl', KEYS[1]);", -}, "") - -var runlockScript string = strings.Join([]string{ - "local mode = redis.call('hget', KEYS[1], 'mode'); ", - "if (mode == false) then ", - "redis.call('publish', KEYS[2], ARGV[1]); ", - "return 1; ", - "end; ", - "local lockExists = redis.call('hexists', KEYS[1], ARGV[2]); ", - "if (lockExists == 0) then ", - "return nil;", - "end; ", - - "local counter = redis.call('hincrby', KEYS[1], ARGV[2], -1); ", - "if (counter == 0) then ", - "redis.call('hdel', KEYS[1], ARGV[2]); ", - "end;", - "redis.call('del', KEYS[3] .. ':' .. (counter+1)); ", - - "if (redis.call('hlen', KEYS[1]) > 1) then ", - "local maxRemainTime = -3; ", - "local keys = redis.call('hkeys', KEYS[1]); ", - "for n, key in ipairs(keys) do ", - "counter = tonumber(redis.call('hget', KEYS[1], key)); ", - "if type(counter) == 'number' then ", - "for i=counter, 1, -1 do ", - "local remainTime = redis.call('pttl', KEYS[4] .. ':' .. key .. ':rwlock_timeout:' .. i); ", - "maxRemainTime = math.max(remainTime, maxRemainTime);", - "end; ", - "end; ", - "end; ", - - "if maxRemainTime > 0 then ", - "redis.call('pexpire', KEYS[1], maxRemainTime); ", - "return 0; ", - "end;", - - "if mode == 'write' then ", - "return 0;", - "end; ", - "end; ", - - "redis.call('del', KEYS[1]); ", - "redis.call('publish', KEYS[2], ARGV[1]); ", - "return 1; ", -}, "") - -var rlockrefreshScript = strings.Join([]string{ - "local counter = redis.call('hget', KEYS[1], ARGV[2]); ", - "if (counter ~= false) then ", - "redis.call('pexpire', KEYS[1], ARGV[1]); ", - - "if (redis.call('hlen', KEYS[1]) > 1) then ", - "local keys = redis.call('hkeys', KEYS[1]); ", - "for n, key in ipairs(keys) do ", - "counter = tonumber(redis.call('hget', KEYS[1], key)); ", - "if type(counter) == 'number' then ", - "for i=counter, 1, -1 do ", - "redis.call('pexpire', KEYS[2] .. ':' .. key .. ':rwlock_timeout:' .. i, ARGV[1]); ", - "end; ", - "end; ", - "end; ", - "end; ", - - "return 1; ", - "end; ", - "return 0;", -}, "") - -var wlockScript string = strings.Join([]string{ - "local mode = redis.call('hget', KEYS[1], 'mode'); ", - "if (mode == false) then ", - "redis.call('hset', KEYS[1], 'mode', 'write'); ", - "redis.call('hset', KEYS[1], ARGV[2], 1); ", - "redis.call('pexpire', KEYS[1], ARGV[1]); ", - "return nil; ", - "end; ", - "if (mode == 'write') then ", - "if (redis.call('hexists', KEYS[1], ARGV[2]) == 1) then ", - "redis.call('hincrby', KEYS[1], ARGV[2], 1); ", - "local currentExpire = redis.call('pttl', KEYS[1]); ", - "redis.call('pexpire', KEYS[1], currentExpire + ARGV[1]); ", - "return nil; ", - "end; ", - "end;", - "return redis.call('pttl', KEYS[1]);", -}, "") - -var wunlockScript string = strings.Join([]string{ - "local mode = redis.call('hget', KEYS[1], 'mode'); ", - "if (mode == false) then ", - "redis.call('publish', KEYS[2], ARGV[1]); ", - "return 1; ", - "end;", - "if (mode == 'write') then ", - "local lockExists = redis.call('hexists', KEYS[1], ARGV[3]); ", - "if (lockExists == 0) then ", - "return nil;", - "else ", - "local counter = redis.call('hincrby', KEYS[1], ARGV[3], -1); ", - "if (counter > 0) then ", - "redis.call('pexpire', KEYS[1], ARGV[2]); ", - "return 0; ", - "else ", - "redis.call('hdel', KEYS[1], ARGV[3]); ", - "if (redis.call('hlen', KEYS[1]) == 1) then ", - "redis.call('del', KEYS[1]); ", - "redis.call('publish', KEYS[2], ARGV[1]); ", - "else ", - // has unlocked read-locks - "redis.call('hset', KEYS[1], 'mode', 'read'); ", - "end; ", - "return 1; ", - "end; ", - "end; ", - "end; ", - "return nil;", -}, "") - type redissionReadLocker struct { redissionLocker - rwTimeoutTokenPrefix string - prefixKey string + rwTimeoutPrefix string + prefixKey string + needRefresh bool } -func (rl *redissionReadLocker) Lock(ctx context.Context, timeout ...time.Duration) { +// TODO 将参数中的 ctx 优化掉 +func (rl *redissionReadLocker) Lock(ctx context.Context, timeout ...time.Duration) error { if rl.exit == nil { rl.exit = make(chan struct{}) } - ttl, err := rl.tryLock() - if err != nil { - panic(err) + + resultErr := rl.tryLock().(*constant.RedisError) + if resultErr.Code == constant.UnknownInternalError { + rl.logger.Error(resultErr.OutputResultMessage()) + return fmt.Errorf("get read lock failed:%w", resultErr) } - if ttl <= 0 { + if (resultErr.Code == constant.LockSuccess) && rl.needRefresh { rl.once.Do(func() { + // async refresh lock timeout unitl receive exit singal go rl.refreshLockTimeout() }) - return + return nil } - submsg := make(chan struct{}, 1) - defer close(submsg) - sub := rl.client.Subscribe(rl.chankey) - - defer sub.Close() - go rl.subscribeLock(sub, submsg) - // listen := rl.listenManager.Subscribe(rl.key, rl.token) - // defer rl.listenManager.UnSubscribe(rl.key, rl.token) - - timer := time.NewTimer(ttl) - defer timer.Stop() - var outimer *time.Timer + var acquireTimer *time.Timer if len(timeout) > 0 && timeout[0] > 0 { - outimer = time.NewTimer(timeout[0]) + acquireTimer = time.NewTimer(timeout[0]) } -LOOP: - for { - ttl, err = rl.tryLock() - if err != nil { - panic(err) - } - if ttl <= 0 { - rl.once.Do(func() { - go rl.refreshLockTimeout() - }) - return - } - if outimer != nil { + subMsg := make(chan struct{}, 1) + defer close(subMsg) + sub := rl.client.Subscribe(rl.waitChankey) + defer sub.Close() + go rl.subscribeLock(sub, subMsg) + + if len(timeout) > 0 && timeout[0] > 0 { + acquireTimer = time.NewTimer(timeout[0]) + for { select { - case _, ok := <-submsg: - if !timer.Stop() { - <-timer.C - } - + case _, ok := <-subMsg: if !ok { - panic("lock listen release") + err := errors.New("failed to read the read lock waiting for for the channel message") + rl.logger.Error("failed to read the read lock waiting for for the channel message") + return err } - timer.Reset(ttl) - case <-ctx.Done(): - // break LOOP - panic("lock context already release") - case <-timer.C: - timer.Reset(ttl) - case <-outimer.C: - if !timer.Stop() { - <-timer.C - } - break LOOP - } - } else { - select { - case _, ok := <-submsg: - if !timer.Stop() { - <-timer.C + resultErr := rl.tryLock().(*constant.RedisError) + if (resultErr.Code == constant.RLockFailure) || (resultErr.Code == constant.UnknownInternalError) { + rl.logger.Info(resultErr.OutputResultMessage()) + continue } - if !ok { - panic("lock listen release") + if resultErr.Code == constant.LockSuccess { + rl.logger.Info(resultErr.OutputResultMessage()) + return nil } - - timer.Reset(ttl) - case <-ctx.Done(): - // break LOOP - panic("lock context already release") - case <-timer.C: - timer.Reset(ttl) + case <-acquireTimer.C: + err := errors.New("the waiting time for obtaining the read lock operation has timed out") + rl.logger.Info("the waiting time for obtaining the read lock operation has timed out") + return err } } } + return fmt.Errorf("get read lock failed:%w", constant.NewRedisError(constant.RLockFailure)) } -func (rl *redissionReadLocker) tryLock() (time.Duration, error) { - writeLockToken := strings.Join([]string{rl.token, "write"}, ":") - res := rl.client.Eval(rlockScript, []string{rl.key, rl.rwTimeoutTokenPrefix}, rl.lockLeaseTime, rl.token, writeLockToken) +func (rl *redissionReadLocker) tryLock() error { + lockType := constant.LockType + res := rl.client.Eval(luascript.RLockScript, []string{rl.key, rl.rwTimeoutPrefix}, rl.lockLeaseTime, rl.token) v, err := res.Result() if err != redis.Nil && err != nil { - return 0, err + return constant.ConvertResultToErr(constant.UnknownInternalError, lockType, err.Error()) } - - if v == nil { - return 0, nil - } - - return time.Duration(v.(int64)), nil + return constant.ConvertResultToErr(v.(constant.RedisResult), lockType, "") } func (rl *redissionReadLocker) refreshLockTimeout() { @@ -271,7 +107,7 @@ LOOP: case <-timer.C: timer.Reset(lockTime) // update key expire time - res := rl.client.Eval(rlockrefreshScript, []string{rl.key, rl.prefixKey}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(luascript.RefreshLockScript, []string{rl.key, rl.prefixKey}, rl.lockLeaseTime, rl.token) val, err := res.Int() if err != nil { panic(err) @@ -289,7 +125,7 @@ LOOP: } func (rl *redissionReadLocker) UnLock() { - res := rl.client.Eval(runlockScript, []string{rl.key, rl.chankey, rl.rwTimeoutTokenPrefix, rl.prefixKey}, unlockMessage, rl.token) + res := rl.client.Eval(luascript.UnRLockScript, []string{rl.key, rl.waitChankey, rl.rwTimeoutPrefix, rl.prefixKey}, unlockMessage, rl.token) val, err := res.Result() if err != redis.Nil && err != nil { panic(err) @@ -318,6 +154,7 @@ func (rl *redissionWriteLocker) Lock(ctx context.Context, timeout ...time.Durati if ttl <= 0 { rl.once.Do(func() { + // async refresh lock timeout unitl receive exit singal go rl.refreshLockTimeout() }) return @@ -325,7 +162,7 @@ func (rl *redissionWriteLocker) Lock(ctx context.Context, timeout ...time.Durati submsg := make(chan struct{}, 1) defer close(submsg) - sub := rl.client.Subscribe(rl.chankey) + sub := rl.client.Subscribe(rl.waitChankey) defer sub.Close() go rl.subscribeLock(sub, submsg) // listen := rl.listenManager.Subscribe(rl.key, rl.token) @@ -397,7 +234,7 @@ LOOP: } func (rl *redissionWriteLocker) tryLock() (time.Duration, error) { - res := rl.client.Eval(wlockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(luascript.WLockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) v, err := res.Result() if err != redis.Nil && err != nil { return 0, err @@ -411,7 +248,7 @@ func (rl *redissionWriteLocker) tryLock() (time.Duration, error) { } func (rl *redissionWriteLocker) UnLock() { - res := rl.client.Eval(wunlockScript, []string{rl.key, rl.chankey}, unlockMessage, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(luascript.UnWLockScript, []string{rl.key, rl.waitChankey}, unlockMessage, rl.lockLeaseTime, rl.token) val, err := res.Result() if err != redis.Nil && err != nil { panic(err) @@ -436,16 +273,18 @@ func GetReadLocker(client *redis.Client, ops *RedissionLockConfig) *redissionRea if len(ops.Prefix) <= 0 { ops.Prefix = "redission-rwlock" } + if len(ops.ChanPrefix) <= 0 { ops.ChanPrefix = "redission-rwlock-channel" } + if ops.LockLeaseTime == 0 { r.lockLeaseTime = internalLockLeaseTime } r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") - r.chankey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") + r.waitChankey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") tkey := strings.Join([]string{"{", r.key, "}"}, "") - return &redissionReadLocker{redissionLocker: *r, rwTimeoutTokenPrefix: strings.Join([]string{tkey, r.token, "rwlock_timeout"}, ":"), prefixKey: tkey} + return &redissionReadLocker{redissionLocker: *r, rwTimeoutPrefix: strings.Join([]string{tkey, r.token, "rwlock_timeout"}, ":"), prefixKey: tkey, needRefresh: true} } func GetWriteLocker(client *redis.Client, ops *RedissionLockConfig) *redissionWriteLocker { @@ -454,6 +293,7 @@ func GetWriteLocker(client *redis.Client, ops *RedissionLockConfig) *redissionWr client: client, exit: make(chan struct{}), once: &sync.Once{}, + logger: logger.GetLoggerInstance(), } if len(ops.Prefix) <= 0 { @@ -466,6 +306,6 @@ func GetWriteLocker(client *redis.Client, ops *RedissionLockConfig) *redissionWr r.lockLeaseTime = internalLockLeaseTime } r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") - r.chankey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") + r.waitChankey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") return &redissionWriteLocker{redissionLocker: *r} } From 09225fc96fe64830ba3cbd1fceb9c75485dcbe0e Mon Sep 17 00:00:00 2001 From: douxu Date: Thu, 6 Mar 2025 16:35:36 +0800 Subject: [PATCH 05/33] optimize structer of redisRWLock and acquisition statements of write lock --- distributedlock/constant/redis_err.go | 88 ++++--- distributedlock/luascript/rlock_script.go | 18 +- distributedlock/redis_lock.go | 11 +- distributedlock/redis_rwlock.go | 293 +++++++++------------- 4 files changed, 179 insertions(+), 231 deletions(-) diff --git a/distributedlock/constant/redis_err.go b/distributedlock/constant/redis_err.go index 69579dd..875df2f 100644 --- a/distributedlock/constant/redis_err.go +++ b/distributedlock/constant/redis_err.go @@ -1,23 +1,25 @@ package constant -import "fmt" +import ( + "fmt" +) -type RedisResult int +type RedisCode int const ( - LockSuccess = RedisResult(1) - UnLockSuccess = RedisResult(1) - RefreshLockSuccess = RedisResult(1) - UnRLockSuccess = RedisResult(0) - RLockFailure = RedisResult(-1) - UnRLockFailureWithWLockOccupancy = RedisResult(-2) - WLockFailureWithRLockOccupancy = RedisResult(-3) - WLockFailureWithWLockOccupancy = RedisResult(-4) - UnWLockFailureWithRLockOccupancy = RedisResult(-5) - UnWLockFailureWithWLockOccupancy = RedisResult(-6) - WLockFailureWithNotFirstPriority = RedisResult(-7) - RefreshLockFailure = RedisResult(-8) - UnknownInternalError = RedisResult(-99) + LockSuccess = RedisCode(1) + UnLockSuccess = RedisCode(1) + RefreshLockSuccess = RedisCode(1) + UnRLockSuccess = RedisCode(0) + RLockFailureWithWLockOccupancy = RedisCode(-1) + UnRLockFailureWithWLockOccupancy = RedisCode(-2) + WLockFailureWithRLockOccupancy = RedisCode(-3) + WLockFailureWithWLockOccupancy = RedisCode(-4) + UnWLockFailureWithRLockOccupancy = RedisCode(-5) + UnWLockFailureWithWLockOccupancy = RedisCode(-6) + WLockFailureWithNotFirstPriority = RedisCode(-7) + RefreshLockFailure = RedisCode(-8) + UnknownInternalError = RedisCode(-99) ) type RedisLockType int @@ -28,68 +30,64 @@ const ( RefreshLockType ) -type RedisError struct { - Code RedisResult +type RedisResult struct { + Code RedisCode Message string } -func (e *RedisError) Error() string { +func (e *RedisResult) Error() string { return fmt.Sprintf("redis execution code:%d,message:%s\n", e.Code, e.Message) } -func (e *RedisError) OutputResultMessage() string { +func (e *RedisResult) OutputResultMessage() string { return e.Message } -func (e *RedisError) OutputResultCode() int { +func (e *RedisResult) OutputResultCode() int { return int(e.Code) } -func NewRedisError(res RedisResult) error { - resInt := int(res) - switch resInt { - case -1: - return &RedisError{Code: -1, Message: "redis lock read lock failure,the lock is already occupied by another processes write lock"} - default: - return nil - } -} - -func ConvertResultToErr(res RedisResult, lockType RedisLockType, redisMsg string) error { +func NewRedisResult(res RedisCode, lockType RedisLockType, redisMsg string) error { resInt := int(res) switch resInt { case 1: if lockType == LockType { - return &RedisError{Code: res, Message: "redis lock success"} + return &RedisResult{Code: res, Message: "redis lock success"} } else if lockType == UnLockType { - return &RedisError{Code: res, Message: "redis unlock success"} + return &RedisResult{Code: res, Message: "redis unlock success"} } else { - return &RedisError{Code: res, Message: "redis refresh lock success"} + return &RedisResult{Code: res, Message: "redis refresh lock success"} } case 0: - return &RedisError{Code: res, Message: "redis unlock read lock success, the lock is still occupied by other processes read lock"} + return &RedisResult{Code: res, Message: "redis unlock read lock success, the lock is still occupied by other processes read lock"} case -1: - return &RedisError{Code: res, Message: "redis lock read lock failure,the lock is already occupied by another processes write lock"} + return &RedisResult{Code: res, Message: "redis lock read lock failure,the lock is already occupied by another processes write lock"} case -2: - return &RedisError{Code: res, Message: "redis un lock read lock failure,the lock is already occupied by another processes write lock"} + return &RedisResult{Code: res, Message: "redis un lock read lock failure,the lock is already occupied by another processes write lock"} case -3: - return &RedisError{Code: res, Message: "redis lock write lock failure,the lock is already occupied by anthor processes read lock"} + return &RedisResult{Code: res, Message: "redis lock write lock failure,the lock is already occupied by anthor processes read lock"} case -4: - return &RedisError{Code: res, Message: "redis lock write lock failure,the lock is already occupied by anthor processes write lock"} + return &RedisResult{Code: res, Message: "redis lock write lock failure,the lock is already occupied by anthor processes write lock"} case -5: - return &RedisError{Code: res, Message: "redis un lock write lock failure,the lock is already occupied by another processes read lock"} + return &RedisResult{Code: res, Message: "redis un lock write lock failure,the lock is already occupied by another processes read lock"} case -6: - return &RedisError{Code: res, Message: "redis un lock write lock failure,the lock is already occupied by another processes write lock"} + return &RedisResult{Code: res, Message: "redis un lock write lock failure,the lock is already occupied by another processes write lock"} case -7: - return &RedisError{Code: res, Message: "redis lock write lock failure,the first priority in the current process non-waiting queue"} + return &RedisResult{Code: res, Message: "redis lock write lock failure,the first priority in the current process non-waiting queue"} case -8: - return &RedisError{Code: res, Message: "redis refresh lock failure,the lock not exist"} + return &RedisResult{Code: res, Message: "redis refresh lock failure,the lock not exist"} + case -99: + return &RedisResult{Code: res, Message: "redis internal execution error"} default: - return &RedisError{Code: res, Message: fmt.Sprintf("unkown redis execution result:%s\n", redisMsg)} + msg := "unkown redis execution result" + if redisMsg != "" { + msg = fmt.Sprintf("%s:%s\n", msg, redisMsg) + } + return &RedisResult{Code: res, Message: msg} } } -func TranslateResultToStr(res RedisResult, lockType RedisLockType) string { +func TranslateResultToStr(res RedisCode, lockType RedisLockType) string { resInt := int(res) switch resInt { case 1: diff --git a/distributedlock/luascript/rlock_script.go b/distributedlock/luascript/rlock_script.go index 3588670..278ea9c 100644 --- a/distributedlock/luascript/rlock_script.go +++ b/distributedlock/luascript/rlock_script.go @@ -20,9 +20,9 @@ if (mode == false) then end; if (mode == 'write') then - -- TODO 放到 list 中等待写锁释放后再次尝试加锁并且订阅写锁释放的消息 - local key = KEYS[1] .. ':read'; - redis.call('rpush', key, ARGV[2]); + -- 放到 list 中等待写锁释放后再次尝试加锁并且订阅写锁释放的消息 + local waitKey = KEYS[1] .. ':read'; + redis.call('rpush', waitKey, ARGV[2]); return -1; end; @@ -60,8 +60,7 @@ end; /* KEYS[1]:锁的键名(key),通常是锁的唯一标识。 KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 -KEYS[3]:锁的释放通知读频道(chankey),用于通知其他客户端锁已释放。 -KEYS[4]:锁的释放通知写频道(chankey),用于通知其他客户端锁已释放。 +KEYS[3]:锁的释放通知写频道(chankey),用于通知其他客户端锁已释放。 ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ @@ -74,8 +73,6 @@ if (mode == false) then local counter = redis.call('llen',writeWait) if (counter >= 1) then redis.call('publish', KEYS[4], ARGV[1]); - else - redis.call('publish', KEYS[3], ARGV[1]); end; return 1; elseif (mode == 'write') then @@ -150,7 +147,7 @@ if (mode == false) then redis.call('lpop', waitKey, '1') return 1; elseif (mode == 'read') then - -- TODO 放到 list 中等待读锁释放后再次尝试加锁并且订阅读锁释放的消息 + -- 放到 list 中等待读锁释放后再次尝试加锁并且订阅读锁释放的消息 redis.call('rpush', waitkey, ARGV[2]); return -3; else @@ -174,8 +171,7 @@ end; /* KEYS[1]:锁的键名(key),通常是锁的唯一标识。 KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 -KEYS[3]:锁的释放通知读频道(chankey),用于通知其他客户端锁已释放。 -KEYS[4]:锁的释放通知写频道(chankey),用于通知其他客户端锁已释放。 +KEYS[3]:锁的释放通知写频道(chankey),用于通知其他客户端锁已释放。 ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ @@ -186,8 +182,6 @@ if (mode == false) then -- 优先写锁加锁,无写锁的情况通知读锁加锁 local counter = redis.call('llen',writeWait) if (counter >= 1) then - redis.call('publish', KEYS[4], ARGV[1]); - else redis.call('publish', KEYS[3], ARGV[1]); end; return 1; diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index 6f9b9aa..6cc7a21 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -57,7 +57,7 @@ var unlockScript string = strings.Join([]string{ }, "") const ( - internalLockLeaseTime = uint64(30) * 1000 + internalLockLeaseTime = uint64(30) unlockMessage = 0 ) @@ -71,7 +71,7 @@ type RedissionLockConfig struct { type redissionLocker struct { token string key string - waitChankey string + waitChanKey string exit chan struct{} lockLeaseTime uint64 client *redis.Client @@ -98,7 +98,7 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) { submsg := make(chan struct{}, 1) defer close(submsg) - sub := rl.client.Subscribe(rl.waitChankey) + sub := rl.client.Subscribe(rl.waitChanKey) defer sub.Close() go rl.subscribeLock(sub, submsg) // listen := rl.listenManager.Subscribe(rl.key, rl.token) @@ -242,7 +242,6 @@ LOOP: func (rl *redissionLocker) cancelRefreshLockTime() { if rl.exit != nil { close(rl.exit) - rl.exit = nil rl.once = &sync.Once{} } } @@ -262,7 +261,7 @@ func (rl *redissionLocker) tryLock() (time.Duration, error) { } func (rl *redissionLocker) UnLock() { - res := rl.client.Eval(unlockScript, []string{rl.key, rl.waitChankey}, unlockMessage, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(unlockScript, []string{rl.key, rl.waitChanKey}, unlockMessage, rl.lockLeaseTime, rl.token) val, err := res.Result() if err != redis.Nil && err != nil { panic(err) @@ -294,6 +293,6 @@ func GetLocker(client *redis.Client, ops *RedissionLockConfig) *redissionLocker r.lockLeaseTime = internalLockLeaseTime } r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") - r.waitChankey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") + r.waitChanKey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") return r } diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index d99c467..5a35133 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -1,11 +1,9 @@ package distributed_lock import ( - "context" "errors" "fmt" "strings" - "sync" "time" "modelRT/distributedlock/constant" @@ -17,26 +15,25 @@ import ( "go.uber.org/zap" ) -type redissionReadLocker struct { +type RedissionRWLocker struct { redissionLocker - rwTimeoutPrefix string - prefixKey string - needRefresh bool + writeWaitChanKey string + rwTimeoutPrefix string + needRefresh bool } -// TODO 将参数中的 ctx 优化掉 -func (rl *redissionReadLocker) Lock(ctx context.Context, timeout ...time.Duration) error { +func (rl *RedissionRWLocker) RLock(timeout ...time.Duration) error { if rl.exit == nil { rl.exit = make(chan struct{}) } - resultErr := rl.tryLock().(*constant.RedisError) - if resultErr.Code == constant.UnknownInternalError { - rl.logger.Error(resultErr.OutputResultMessage()) - return fmt.Errorf("get read lock failed:%w", resultErr) + result := rl.tryRLock().(*constant.RedisResult) + if result.Code == constant.UnknownInternalError { + rl.logger.Error(result.OutputResultMessage()) + return fmt.Errorf("get read lock failed:%w", result) } - if (resultErr.Code == constant.LockSuccess) && rl.needRefresh { + if (result.Code == constant.LockSuccess) && rl.needRefresh { rl.once.Do(func() { // async refresh lock timeout unitl receive exit singal go rl.refreshLockTimeout() @@ -44,21 +41,17 @@ func (rl *redissionReadLocker) Lock(ctx context.Context, timeout ...time.Duratio return nil } - var acquireTimer *time.Timer - if len(timeout) > 0 && timeout[0] > 0 { - acquireTimer = time.NewTimer(timeout[0]) - } - subMsg := make(chan struct{}, 1) defer close(subMsg) - sub := rl.client.Subscribe(rl.waitChankey) + sub := rl.client.Subscribe(rl.writeWaitChanKey) defer sub.Close() go rl.subscribeLock(sub, subMsg) if len(timeout) > 0 && timeout[0] > 0 { - acquireTimer = time.NewTimer(timeout[0]) + acquireTimer := time.NewTimer(timeout[0]) for { select { + case _, ok := <-subMsg: if !ok { err := errors.New("failed to read the read lock waiting for for the channel message") @@ -66,8 +59,8 @@ func (rl *redissionReadLocker) Lock(ctx context.Context, timeout ...time.Duratio return err } - resultErr := rl.tryLock().(*constant.RedisError) - if (resultErr.Code == constant.RLockFailure) || (resultErr.Code == constant.UnknownInternalError) { + resultErr := rl.tryRLock().(*constant.RedisResult) + if (resultErr.Code == constant.RLockFailureWithWLockOccupancy) || (resultErr.Code == constant.UnknownInternalError) { rl.logger.Info(resultErr.OutputResultMessage()) continue } @@ -83,229 +76,193 @@ func (rl *redissionReadLocker) Lock(ctx context.Context, timeout ...time.Duratio } } } - return fmt.Errorf("get read lock failed:%w", constant.NewRedisError(constant.RLockFailure)) + return fmt.Errorf("lock read lock failed:%w", result) } -func (rl *redissionReadLocker) tryLock() error { +func (rl *RedissionRWLocker) tryRLock() error { lockType := constant.LockType + res := rl.client.Eval(luascript.RLockScript, []string{rl.key, rl.rwTimeoutPrefix}, rl.lockLeaseTime, rl.token) - v, err := res.Result() + val, err := res.Int() if err != redis.Nil && err != nil { - return constant.ConvertResultToErr(constant.UnknownInternalError, lockType, err.Error()) + return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) } - return constant.ConvertResultToErr(v.(constant.RedisResult), lockType, "") + return constant.NewRedisResult(constant.RedisCode(val), lockType, "") } -func (rl *redissionReadLocker) refreshLockTimeout() { - rl.logger.Debug("rlock: %s lock %s\n", zap.String("token", rl.token), zap.String("key", rl.key)) - lockTime := time.Duration(rl.lockLeaseTime/3) * time.Millisecond +func (rl *RedissionRWLocker) refreshLockTimeout() { + rl.logger.Info("read lock refresh by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + + lockTime := time.Duration(rl.lockLeaseTime/3) * time.Second timer := time.NewTimer(lockTime) defer timer.Stop() -LOOP: + for { select { case <-timer.C: - timer.Reset(lockTime) - // update key expire time - res := rl.client.Eval(luascript.RefreshLockScript, []string{rl.key, rl.prefixKey}, rl.lockLeaseTime, rl.token) + // extend key lease time + res := rl.client.Eval(luascript.RefreshLockScript, []string{rl.key, rl.rwTimeoutPrefix}, rl.lockLeaseTime, rl.token) val, err := res.Int() - if err != nil { - panic(err) + if err != redis.Nil && err != nil { + rl.logger.Info("read lock refresh failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + return } - if val == 0 { - rl.logger.Debug("not find the rlock key of self") - break LOOP - } - case <-rl.exit: - break LOOP + if constant.RedisCode(val) == constant.RefreshLockFailure { + rl.logger.Error("read lock refreash failed,can not find the read lock by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + break + } + + if constant.RedisCode(val) == constant.RefreshLockSuccess { + rl.logger.Info("read lock refresh success by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + } + timer.Reset(lockTime) + case <-rl.exit: + break } } - rl.logger.Debug("rlock: refresh routine release", zap.String("token", rl.token)) } -func (rl *redissionReadLocker) UnLock() { - res := rl.client.Eval(luascript.UnRLockScript, []string{rl.key, rl.waitChankey, rl.rwTimeoutPrefix, rl.prefixKey}, unlockMessage, rl.token) - val, err := res.Result() +func (rl *RedissionRWLocker) UnRLock() error { + res := rl.client.Eval(luascript.UnRLockScript, []string{rl.key, rl.rwTimeoutPrefix, rl.writeWaitChanKey}, unlockMessage, rl.token) + val, err := res.Int() if err != redis.Nil && err != nil { - panic(err) + rl.logger.Info("unlock read lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.LockType, err.Error())) } - if val == nil { - panic("attempt to unlock lock, not locked by current routine by lock id:" + rl.token) + + if (constant.RedisCode(val) == constant.UnLockSuccess) || (constant.RedisCode(val) == constant.UnRLockSuccess) { + if rl.needRefresh { + rl.cancelRefreshLockTime() + } + + rl.logger.Info("unlock read lock success", zap.String("token", rl.token), zap.String("key", rl.key)) + return nil } - rl.logger.Debug("lock: %s unlock %s\n", zap.String("token", rl.token), zap.String("key", rl.key)) - if val.(int64) == 1 { - rl.cancelRefreshLockTime() + + if constant.RedisCode(val) == constant.UnRLockFailureWithWLockOccupancy { + rl.logger.Info("unlock read lock failed", zap.String("token", rl.token), zap.String("key", rl.key)) + return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnRLockFailureWithWLockOccupancy, constant.UnLockType, "")) } + return nil } -type redissionWriteLocker struct { - redissionLocker -} - -func (rl *redissionWriteLocker) Lock(ctx context.Context, timeout ...time.Duration) { +func (rl *RedissionRWLocker) WLock(timeout ...time.Duration) error { if rl.exit == nil { rl.exit = make(chan struct{}) } - ttl, err := rl.tryLock() - if err != nil { - panic(err) + + result := rl.tryWLock().(*constant.RedisResult) + if result.Code == constant.UnknownInternalError { + rl.logger.Error(result.OutputResultMessage()) + return fmt.Errorf("get write lock failed:%w", result) } - if ttl <= 0 { + if (result.Code == constant.LockSuccess) && rl.needRefresh { rl.once.Do(func() { // async refresh lock timeout unitl receive exit singal go rl.refreshLockTimeout() }) - return + return nil } - submsg := make(chan struct{}, 1) - defer close(submsg) - sub := rl.client.Subscribe(rl.waitChankey) + subMsg := make(chan struct{}, 1) + defer close(subMsg) + sub := rl.client.Subscribe(rl.writeWaitChanKey) defer sub.Close() - go rl.subscribeLock(sub, submsg) - // listen := rl.listenManager.Subscribe(rl.key, rl.token) - // defer rl.listenManager.UnSubscribe(rl.key, rl.token) + go rl.subscribeLock(sub, subMsg) - timer := time.NewTimer(ttl) - defer timer.Stop() - // outimer 理解为如果超过这个时间没有获取到锁,就直接放弃 - var outimer *time.Timer if len(timeout) > 0 && timeout[0] > 0 { - outimer = time.NewTimer(timeout[0]) - } -LOOP: - for { - ttl, err = rl.tryLock() - if err != nil { - panic(err) - } - - if ttl <= 0 { - rl.once.Do(func() { - go rl.refreshLockTimeout() - }) - return - } - if outimer != nil { + acquireTimer := time.NewTimer(timeout[0]) + for { select { - case _, ok := <-submsg: - if !timer.Stop() { - <-timer.C - } - + case _, ok := <-subMsg: if !ok { - panic("lock listen release") + err := errors.New("failed to read the write lock waiting for for the channel message") + rl.logger.Error("failed to read the read lock waiting for for the channel message") + return err } - timer.Reset(ttl) - case <-ctx.Done(): - // break LOOP - panic("lock context already release") - case <-timer.C: - timer.Reset(ttl) - case <-outimer.C: - if !timer.Stop() { - <-timer.C - } - break LOOP - } - } else { - select { - case _, ok := <-submsg: - if !timer.Stop() { - <-timer.C + result := rl.tryWLock().(*constant.RedisResult) + if (result.Code == constant.UnknownInternalError) || (result.Code == constant.WLockFailureWithRLockOccupancy) || (result.Code == constant.WLockFailureWithWLockOccupancy) || (result.Code == constant.WLockFailureWithNotFirstPriority) { + rl.logger.Info(result.OutputResultMessage()) + continue } - if !ok { - panic("lock listen release") + if result.Code == constant.LockSuccess { + rl.logger.Info(result.OutputResultMessage()) + return nil } - - timer.Reset(ttl) - case <-ctx.Done(): - // break LOOP - panic("lock context already release") - case <-timer.C: - timer.Reset(ttl) + case <-acquireTimer.C: + err := errors.New("the waiting time for obtaining the write lock operation has timed out") + rl.logger.Info("the waiting time for obtaining the write lock operation has timed out") + return err } } } + return fmt.Errorf("lock write lock failed:%w", result) } -func (rl *redissionWriteLocker) tryLock() (time.Duration, error) { - res := rl.client.Eval(luascript.WLockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) - v, err := res.Result() +func (rl *RedissionRWLocker) tryWLock() error { + lockType := constant.LockType + + res := rl.client.Eval(luascript.WLockScript, []string{rl.key, rl.rwTimeoutPrefix}, rl.lockLeaseTime, rl.token) + val, err := res.Int() if err != redis.Nil && err != nil { - return 0, err + return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) } - - if v == nil { - return 0, nil - } - - return time.Duration(v.(int64)), nil + return constant.NewRedisResult(constant.RedisCode(val), lockType, "") } -func (rl *redissionWriteLocker) UnLock() { - res := rl.client.Eval(luascript.UnWLockScript, []string{rl.key, rl.waitChankey}, unlockMessage, rl.lockLeaseTime, rl.token) - val, err := res.Result() +func (rl *RedissionRWLocker) UnWLock() error { + res := rl.client.Eval(luascript.UnWLockScript, []string{rl.key, rl.rwTimeoutPrefix, rl.waitChanKey}, unlockMessage, rl.token) + val, err := res.Int() if err != redis.Nil && err != nil { - panic(err) + rl.logger.Info("unlock write lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + return fmt.Errorf("unlock write lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnLockType, err.Error())) } - if val == nil { - panic("attempt to unlock lock, not locked by current routine by lock id:" + rl.token) + + if constant.RedisCode(val) == constant.UnLockSuccess { + if rl.needRefresh { + rl.cancelRefreshLockTime() + } + rl.logger.Info("unlock write lock success", zap.String("token", rl.token), zap.String("key", rl.key)) + return nil } - rl.logger.Debug("lock: unlock", zap.String("token", rl.token), zap.String("key", rl.key)) - if val.(int64) == 1 { - rl.cancelRefreshLockTime() + + if (constant.RedisCode(val) == constant.UnWLockFailureWithRLockOccupancy) || (constant.RedisCode(val) == constant.UnWLockFailureWithWLockOccupancy) { + rl.logger.Info("unlock write lock failed", zap.String("token", rl.token), zap.String("key", rl.key)) + return fmt.Errorf("unlock write lock failed:%w", constant.NewRedisResult(constant.RedisCode(val), constant.UnLockType, "")) } + return nil } -func GetReadLocker(client *redis.Client, ops *RedissionLockConfig) *redissionReadLocker { +func GetRWLocker(client *redis.Client, ops *RedissionLockConfig) *RedissionRWLocker { r := &redissionLocker{ token: uuid.New().String(), client: client, exit: make(chan struct{}), - once: &sync.Once{}, - } - - if len(ops.Prefix) <= 0 { - ops.Prefix = "redission-rwlock" - } - - if len(ops.ChanPrefix) <= 0 { - ops.ChanPrefix = "redission-rwlock-channel" - } - - if ops.LockLeaseTime == 0 { - r.lockLeaseTime = internalLockLeaseTime - } - r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") - r.waitChankey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") - tkey := strings.Join([]string{"{", r.key, "}"}, "") - return &redissionReadLocker{redissionLocker: *r, rwTimeoutPrefix: strings.Join([]string{tkey, r.token, "rwlock_timeout"}, ":"), prefixKey: tkey, needRefresh: true} -} - -func GetWriteLocker(client *redis.Client, ops *RedissionLockConfig) *redissionWriteLocker { - r := &redissionLocker{ - token: uuid.New().String(), - client: client, - exit: make(chan struct{}), - once: &sync.Once{}, logger: logger.GetLoggerInstance(), } if len(ops.Prefix) <= 0 { ops.Prefix = "redission-rwlock" } + if len(ops.ChanPrefix) <= 0 { ops.ChanPrefix = "redission-rwlock-channel" } + if ops.LockLeaseTime == 0 { r.lockLeaseTime = internalLockLeaseTime } r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") - r.waitChankey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") - return &redissionWriteLocker{redissionLocker: *r} + + rwLocker := &RedissionRWLocker{ + redissionLocker: *r, + writeWaitChanKey: strings.Join([]string{r.key, "write"}, ":"), + rwTimeoutPrefix: "rwlock_timeout", + needRefresh: true, + } + return rwLocker } From 7e3d94db4bd35576a84e94d10fb7af0ac49711a6 Mon Sep 17 00:00:00 2001 From: douxu Date: Fri, 7 Mar 2025 16:16:26 +0800 Subject: [PATCH 06/33] optimize structer of redisLock and acquisition statements of lock --- .../{redis_err.go => redis_result.go} | 4 + distributedlock/luascript/lock_script.go | 62 ++++ .../{rlock_script.go => rwlock_script.go} | 34 +-- distributedlock/redis_lock.go | 271 +++++++----------- distributedlock/redis_rwlock.go | 51 ++-- 5 files changed, 218 insertions(+), 204 deletions(-) rename distributedlock/constant/{redis_err.go => redis_result.go} (95%) create mode 100644 distributedlock/luascript/lock_script.go rename distributedlock/luascript/{rlock_script.go => rwlock_script.go} (86%) diff --git a/distributedlock/constant/redis_err.go b/distributedlock/constant/redis_result.go similarity index 95% rename from distributedlock/constant/redis_err.go rename to distributedlock/constant/redis_result.go index 875df2f..d389a39 100644 --- a/distributedlock/constant/redis_err.go +++ b/distributedlock/constant/redis_result.go @@ -19,6 +19,8 @@ const ( UnWLockFailureWithWLockOccupancy = RedisCode(-6) WLockFailureWithNotFirstPriority = RedisCode(-7) RefreshLockFailure = RedisCode(-8) + LockFailure = RedisCode(-9) + UnLocakFailureWithLockOccupancy = RedisCode(-10) UnknownInternalError = RedisCode(-99) ) @@ -76,6 +78,8 @@ func NewRedisResult(res RedisCode, lockType RedisLockType, redisMsg string) erro return &RedisResult{Code: res, Message: "redis lock write lock failure,the first priority in the current process non-waiting queue"} case -8: return &RedisResult{Code: res, Message: "redis refresh lock failure,the lock not exist"} + case -9: + return &RedisResult{Code: res, Message: "redis lock failure,the lock is already occupied by another processes lock"} case -99: return &RedisResult{Code: res, Message: "redis internal execution error"} default: diff --git a/distributedlock/luascript/lock_script.go b/distributedlock/luascript/lock_script.go new file mode 100644 index 0000000..64a6be8 --- /dev/null +++ b/distributedlock/luascript/lock_script.go @@ -0,0 +1,62 @@ +package luascript + +/* +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +*/ +var LockScript = ` +-- 锁不存在的情况下加锁 +if (redis.call('exists', KEYS[1]) == 0) then + redis.call('hset', KEYS[1], ARGV[2], 1); + redis.call('expire', KEYS[1], ARGV[1]); + return 1; +end; +-- 重入锁逻辑 +if (redis.call('hexists', KEYS[1], ARGV[2]) == 1) then + redis.call('hincrby', KEYS[1], ARGV[2], 1); + redis.call('expire', KEYS[1], ARGV[1]); + return 1; +end; +-- 持有锁的 token 不是当前客户端的 token,返回加锁失败 +return -9; +` + +/* +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +*/ +var RefreshLockScript = ` +if (redis.call('hexists', KEYS[1], ARGV[2]) == 1) then + redis.call('expire', KEYS[1], ARGV[1]); + return 1; +end; +return -8; +` + +/* +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +KEYS[2]:锁的释放通知频道(chankey),用于通知其他客户端锁已释放。 +ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +*/ +var UnLockScript = ` +if (redis.call('exists', KEYS[1]) == 0) then + redis.call('publish', KEYS[2], ARGV[1]); + return 1; +end; +if (redis.call('hexists', KEYS[1], ARGV[2]) == 0) then + return 1; +end; +local counter = redis.call('hincrby', KEYS[1], ARGV[2], -1); +if (counter > 0) then + return 1; +else + redis.call('del', KEYS[1]); + redis.call('publish', KEYS[2], ARGV[1]); + return 1; +end; +-- 持有锁的 token 不是当前客户端的 token,返回解锁失败 +return -10; +` diff --git a/distributedlock/luascript/rlock_script.go b/distributedlock/luascript/rwlock_script.go similarity index 86% rename from distributedlock/luascript/rlock_script.go rename to distributedlock/luascript/rwlock_script.go index 278ea9c..814c559 100644 --- a/distributedlock/luascript/rlock_script.go +++ b/distributedlock/luascript/rwlock_script.go @@ -3,10 +3,10 @@ package luascript // RLockScript is the lua script for the lock read lock command /* -KEYS[1]:锁的键名(key),通常是锁的唯一标识。 -KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 -ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 -ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 +ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ var RLockScript = ` local mode = redis.call('hget', KEYS[1], 'mode'); @@ -126,10 +126,10 @@ end; // WLockScript is the lua script for the lock write lock command /* -KEYS[1]:锁的键名(key),通常是锁的唯一标识。 -KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 -ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 -ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 +ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ var WLockScript = ` local mode = redis.call('hget', KEYS[1], 'mode'); @@ -169,11 +169,11 @@ end; // UnWLockScript is the lua script for the unlock write lock command /* -KEYS[1]:锁的键名(key),通常是锁的唯一标识。 -KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 KEYS[3]:锁的释放通知写频道(chankey),用于通知其他客户端锁已释放。 ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。 -ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ var UnWLockScript = ` local mode = redis.call('hget', KEYS[1], 'mode'); @@ -208,14 +208,14 @@ else end; ` -// RefreshLockScript is the lua script for the refresh lock command +// RefreshRWLockScript is the lua script for the refresh lock command /* -KEYS[1]:锁的键名(key),通常是锁的唯一标识。 -KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 -ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 -ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 +ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ -var RefreshLockScript = ` +var RefreshRWLockScript = ` local lockKey = KEYS[2] .. ':' .. ARGV[2] local lockExists = redis.call('hexists', KEYS[1], lockKey); local mode = redis.call('hget', KEYS[1], 'mode') diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index 6cc7a21..9653589 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -1,61 +1,21 @@ package distributed_lock import ( - "context" + "errors" "fmt" "strings" "sync" "time" + "modelRT/distributedlock/constant" luascript "modelRT/distributedlock/luascript" + "modelRT/logger" "github.com/go-redis/redis" uuid "github.com/google/uuid" "go.uber.org/zap" ) -var lockScript string = strings.Join([]string{ - "if (redis.call('exists', KEYS[1]) == 0) then ", - "redis.call('hset', KEYS[1], ARGV[2], 1); ", - "redis.call('pexpire', KEYS[1], ARGV[1]); ", - "return nil; ", - "end; ", - "if (redis.call('hexists', KEYS[1], ARGV[2]) == 1) then ", - "redis.call('hincrby', KEYS[1], ARGV[2], 1); ", - "redis.call('pexpire', KEYS[1], ARGV[1]); ", - "return nil; ", - "end; ", - "return redis.call('pttl', KEYS[1]);", -}, "") - -var refreshLockScript string = strings.Join([]string{ - "if (redis.call('hexists', KEYS[1], ARGV[2]) == 1) then ", - "redis.call('pexpire', KEYS[1], ARGV[1]); ", - "return 1; ", - "end; ", - "return 0;", -}, "") - -var unlockScript string = strings.Join([]string{ - "if (redis.call('exists', KEYS[1]) == 0) then ", - "redis.call('publish', KEYS[2], ARGV[1]); ", - "return 1; ", - "end;", - "if (redis.call('hexists', KEYS[1], ARGV[3]) == 0) then ", - "return nil;", - "end; ", - "local counter = redis.call('hincrby', KEYS[1], ARGV[3], -1); ", - "if (counter > 0) then ", - "redis.call('pexpire', KEYS[1], ARGV[2]); ", - "return 0; ", - "else ", - "redis.call('del', KEYS[1]); ", - "redis.call('publish', KEYS[2], ARGV[1]); ", - "return 1; ", - "end; ", - "return nil;", -}, "") - const ( internalLockLeaseTime = uint64(30) unlockMessage = 0 @@ -65,138 +25,95 @@ type RedissionLockConfig struct { LockLeaseTime time.Duration Prefix string ChanPrefix string + TimeoutPrefix string Key string } type redissionLocker struct { + lockLeaseTime uint64 token string key string waitChanKey string + needRefresh bool exit chan struct{} - lockLeaseTime uint64 client *redis.Client once *sync.Once logger *zap.Logger } -func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) { - fmt.Println(luascript.RLockScript) +func (rl *redissionLocker) Lock(timeout ...time.Duration) error { if rl.exit == nil { rl.exit = make(chan struct{}) } - ttl, err := rl.tryLock() - if err != nil { - panic(err) + result := rl.tryLock().(*constant.RedisResult) + if result.Code == constant.UnknownInternalError { + rl.logger.Error(result.OutputResultMessage()) + return fmt.Errorf("get lock failed:%w", result) } - if ttl <= 0 { + if (result.Code == constant.LockSuccess) && rl.needRefresh { rl.once.Do(func() { + // async refresh lock timeout unitl receive exit singal go rl.refreshLockTimeout() }) - return + return nil } - submsg := make(chan struct{}, 1) - defer close(submsg) + subMsg := make(chan struct{}, 1) + defer close(subMsg) sub := rl.client.Subscribe(rl.waitChanKey) defer sub.Close() - go rl.subscribeLock(sub, submsg) - // listen := rl.listenManager.Subscribe(rl.key, rl.token) - // defer rl.listenManager.UnSubscribe(rl.key, rl.token) + go rl.subscribeLock(sub, subMsg) - timer := time.NewTimer(ttl) - defer timer.Stop() - // outimer 的作用理解为如果超过多长时间无法获得这个锁,那么就直接放弃 - var outimer *time.Timer if len(timeout) > 0 && timeout[0] > 0 { - outimer = time.NewTimer(timeout[0]) - } -LOOP: - for { - ttl, err = rl.tryLock() - if err != nil { - panic(err) - } - - if ttl <= 0 { - rl.once.Do(func() { - go rl.refreshLockTimeout() - }) - return - } - if outimer != nil { + acquireTimer := time.NewTimer(timeout[0]) + for { select { - case _, ok := <-submsg: - if !timer.Stop() { - <-timer.C - } + case _, ok := <-subMsg: if !ok { - panic("lock listen release") + err := errors.New("failed to read the lock waiting for for the channel message") + rl.logger.Error("failed to read the lock waiting for for the channel message") + return err } - timer.Reset(ttl) - case <-ctx.Done(): - // break LOOP - panic("lock context already release") - case <-timer.C: - timer.Reset(ttl) - case <-outimer.C: - if !timer.Stop() { - <-timer.C - } - break LOOP - } - } else { - select { - case _, ok := <-submsg: - if !timer.Stop() { - <-timer.C + resultErr := rl.tryLock().(*constant.RedisResult) + if (resultErr.Code == constant.LockFailure) || (resultErr.Code == constant.UnknownInternalError) { + rl.logger.Info(resultErr.OutputResultMessage()) + continue } - if !ok { - panic("lock listen release") + if resultErr.Code == constant.LockSuccess { + rl.logger.Info(resultErr.OutputResultMessage()) + return nil } - - timer.Reset(ttl) - case <-ctx.Done(): - // break LOOP - panic("lock context already release") - case <-timer.C: - timer.Reset(ttl) + case <-acquireTimer.C: + err := errors.New("the waiting time for obtaining the lock operation has timed out") + rl.logger.Info("the waiting time for obtaining the lock operation has timed out") + return err } } } + return fmt.Errorf("lock the redis lock failed:%w", result) } func (rl *redissionLocker) subscribeLock(sub *redis.PubSub, out chan struct{}) { - defer func() { - if err := recover(); err != nil { - rl.logger.Error("subscribeLock catch error", zap.Error(err.(error))) - } - }() if sub == nil || out == nil { return } - rl.logger.Debug("lock:%s enter sub routine", zap.String("token", rl.token)) -LOOP: + rl.logger.Info("lock: enter sub routine", zap.String("token", rl.token)) + for { msg, err := sub.Receive() if err != nil { - rl.logger.Info("sub receive message", zap.Error(err)) - break LOOP + rl.logger.Info("sub receive message failed", zap.Error(err)) + continue } select { case <-rl.exit: - break LOOP + break default: - if len(out) > 0 { - // if channel hava msg. drop it - rl.logger.Debug("drop message when channel if full") - continue - } - switch msg.(type) { case *redis.Subscription: // Ignore. @@ -208,35 +125,44 @@ LOOP: } } } - rl.logger.Debug("lock sub routine release", zap.String("token", rl.token)) } +/* +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +*/ func (rl *redissionLocker) refreshLockTimeout() { - rl.logger.Debug("lock", zap.String("token", rl.token), zap.String("lock key", rl.key)) - lockTime := time.Duration(rl.lockLeaseTime/3) * time.Millisecond + rl.logger.Info("lock refresh by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + + lockTime := time.Duration(rl.lockLeaseTime/3) * time.Second timer := time.NewTimer(lockTime) defer timer.Stop() -LOOP: + for { select { case <-timer.C: - timer.Reset(lockTime) - // update key expire time - res := rl.client.Eval(refreshLockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) + // extend key lease time + res := rl.client.Eval(luascript.RefreshLockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) val, err := res.Int() - if err != nil { - panic(err) + if err != redis.Nil && err != nil { + rl.logger.Info("lock refresh failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + return } - if val == 0 { - rl.logger.Debug("not find the lock key of self") - break LOOP - } - case <-rl.exit: - break LOOP + if constant.RedisCode(val) == constant.RefreshLockFailure { + rl.logger.Error("lock refreash failed,can not find the lock by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + break + } + + if constant.RedisCode(val) == constant.RefreshLockSuccess { + rl.logger.Info("lock refresh success by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + } + timer.Reset(lockTime) + case <-rl.exit: + break } } - rl.logger.Debug("refresh routine release", zap.String("token", rl.token)) } func (rl *redissionLocker) cancelRefreshLockTime() { @@ -246,53 +172,72 @@ func (rl *redissionLocker) cancelRefreshLockTime() { } } -func (rl *redissionLocker) tryLock() (time.Duration, error) { - res := rl.client.Eval(lockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) - v, err := res.Result() +/* +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +*/ +func (rl *redissionLocker) tryLock() error { + lockType := constant.LockType + res := rl.client.Eval(luascript.LockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) + val, err := res.Int() if err != redis.Nil && err != nil { - return 0, err + return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) } - - if v == nil { - return 0, nil - } - - return time.Duration(v.(int64)), nil + return constant.NewRedisResult(constant.RedisCode(val), lockType, "") } -func (rl *redissionLocker) UnLock() { - res := rl.client.Eval(unlockScript, []string{rl.key, rl.waitChanKey}, unlockMessage, rl.lockLeaseTime, rl.token) - val, err := res.Result() +/* +KEYS[1]:锁的键名(key),通常是锁的唯一标识。 +KEYS[2]:锁的释放通知频道(chankey),用于通知其他客户端锁已释放。 +ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。 +ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 +*/ +func (rl *redissionLocker) UnLock() error { + res := rl.client.Eval(luascript.UnLockScript, []string{rl.key, rl.waitChanKey}, unlockMessage, rl.token) + val, err := res.Int() if err != redis.Nil && err != nil { - panic(err) + rl.logger.Info("unlock lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + return fmt.Errorf("unlock lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnLockType, err.Error())) } - if val == nil { - panic("attempt to unlock lock, not locked by current routine by lock id:" + rl.token) + + if constant.RedisCode(val) == constant.UnLockSuccess { + if rl.needRefresh { + rl.cancelRefreshLockTime() + } + + rl.logger.Info("unlock lock success", zap.String("token", rl.token), zap.String("key", rl.key)) + return nil } - rl.logger.Debug("unlock", zap.String("token", rl.token), zap.String("key", rl.key)) - if val.(int64) == 1 { - rl.cancelRefreshLockTime() + + if constant.RedisCode(val) == constant.UnLocakFailureWithLockOccupancy { + rl.logger.Info("unlock lock failed", zap.String("token", rl.token), zap.String("key", rl.key)) + return fmt.Errorf("unlock lock failed:%w", constant.NewRedisResult(constant.UnLocakFailureWithLockOccupancy, constant.UnLockType, "")) } + return nil } func GetLocker(client *redis.Client, ops *RedissionLockConfig) *redissionLocker { r := &redissionLocker{ - token: uuid.New().String(), - client: client, - exit: make(chan struct{}), - once: &sync.Once{}, + token: uuid.New().String(), + needRefresh: true, + client: client, + exit: make(chan struct{}), + logger: logger.GetLoggerInstance(), } if len(ops.Prefix) <= 0 { ops.Prefix = "redission-lock" } + if len(ops.ChanPrefix) <= 0 { ops.ChanPrefix = "redission-lock-channel" } + if ops.LockLeaseTime == 0 { r.lockLeaseTime = internalLockLeaseTime } r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") - r.waitChanKey = strings.Join([]string{ops.ChanPrefix, ops.Key}, ":") + r.waitChanKey = strings.Join([]string{ops.ChanPrefix, ops.Key, "wait"}, ":") return r } diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index 5a35133..529ff75 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -17,9 +17,7 @@ import ( type RedissionRWLocker struct { redissionLocker - writeWaitChanKey string - rwTimeoutPrefix string - needRefresh bool + rwTokenTimeoutPrefix string } func (rl *RedissionRWLocker) RLock(timeout ...time.Duration) error { @@ -43,7 +41,7 @@ func (rl *RedissionRWLocker) RLock(timeout ...time.Duration) error { subMsg := make(chan struct{}, 1) defer close(subMsg) - sub := rl.client.Subscribe(rl.writeWaitChanKey) + sub := rl.client.Subscribe(rl.waitChanKey) defer sub.Close() go rl.subscribeLock(sub, subMsg) @@ -76,13 +74,13 @@ func (rl *RedissionRWLocker) RLock(timeout ...time.Duration) error { } } } - return fmt.Errorf("lock read lock failed:%w", result) + return fmt.Errorf("lock the redis read lock failed:%w", result) } func (rl *RedissionRWLocker) tryRLock() error { lockType := constant.LockType - res := rl.client.Eval(luascript.RLockScript, []string{rl.key, rl.rwTimeoutPrefix}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(luascript.RLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix}, rl.lockLeaseTime, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) @@ -91,7 +89,7 @@ func (rl *RedissionRWLocker) tryRLock() error { } func (rl *RedissionRWLocker) refreshLockTimeout() { - rl.logger.Info("read lock refresh by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.logger.Info("lock refresh by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) lockTime := time.Duration(rl.lockLeaseTime/3) * time.Second timer := time.NewTimer(lockTime) @@ -101,20 +99,20 @@ func (rl *RedissionRWLocker) refreshLockTimeout() { select { case <-timer.C: // extend key lease time - res := rl.client.Eval(luascript.RefreshLockScript, []string{rl.key, rl.rwTimeoutPrefix}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(luascript.RefreshRWLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix}, rl.lockLeaseTime, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { - rl.logger.Info("read lock refresh failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + rl.logger.Info("lock refresh failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) return } if constant.RedisCode(val) == constant.RefreshLockFailure { - rl.logger.Error("read lock refreash failed,can not find the read lock by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.logger.Error("lock refreash failed,can not find the read lock by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) break } if constant.RedisCode(val) == constant.RefreshLockSuccess { - rl.logger.Info("read lock refresh success by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.logger.Info("lock refresh success by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) } timer.Reset(lockTime) case <-rl.exit: @@ -124,11 +122,11 @@ func (rl *RedissionRWLocker) refreshLockTimeout() { } func (rl *RedissionRWLocker) UnRLock() error { - res := rl.client.Eval(luascript.UnRLockScript, []string{rl.key, rl.rwTimeoutPrefix, rl.writeWaitChanKey}, unlockMessage, rl.token) + res := rl.client.Eval(luascript.UnRLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.waitChanKey}, unlockMessage, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { rl.logger.Info("unlock read lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) - return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.LockType, err.Error())) + return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnLockType, err.Error())) } if (constant.RedisCode(val) == constant.UnLockSuccess) || (constant.RedisCode(val) == constant.UnRLockSuccess) { @@ -168,7 +166,7 @@ func (rl *RedissionRWLocker) WLock(timeout ...time.Duration) error { subMsg := make(chan struct{}, 1) defer close(subMsg) - sub := rl.client.Subscribe(rl.writeWaitChanKey) + sub := rl.client.Subscribe(rl.waitChanKey) defer sub.Close() go rl.subscribeLock(sub, subMsg) @@ -206,7 +204,7 @@ func (rl *RedissionRWLocker) WLock(timeout ...time.Duration) error { func (rl *RedissionRWLocker) tryWLock() error { lockType := constant.LockType - res := rl.client.Eval(luascript.WLockScript, []string{rl.key, rl.rwTimeoutPrefix}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(luascript.WLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix}, rl.lockLeaseTime, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) @@ -215,7 +213,7 @@ func (rl *RedissionRWLocker) tryWLock() error { } func (rl *RedissionRWLocker) UnWLock() error { - res := rl.client.Eval(luascript.UnWLockScript, []string{rl.key, rl.rwTimeoutPrefix, rl.waitChanKey}, unlockMessage, rl.token) + res := rl.client.Eval(luascript.UnWLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.waitChanKey}, unlockMessage, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { rl.logger.Info("unlock write lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) @@ -239,16 +237,21 @@ func (rl *RedissionRWLocker) UnWLock() error { func GetRWLocker(client *redis.Client, ops *RedissionLockConfig) *RedissionRWLocker { r := &redissionLocker{ - token: uuid.New().String(), - client: client, - exit: make(chan struct{}), - logger: logger.GetLoggerInstance(), + token: uuid.New().String(), + needRefresh: true, + client: client, + exit: make(chan struct{}), + logger: logger.GetLoggerInstance(), } if len(ops.Prefix) <= 0 { ops.Prefix = "redission-rwlock" } + if len(ops.TimeoutPrefix) <= 0 { + ops.TimeoutPrefix = "rwlock_timeout" + } + if len(ops.ChanPrefix) <= 0 { ops.ChanPrefix = "redission-rwlock-channel" } @@ -256,13 +259,13 @@ func GetRWLocker(client *redis.Client, ops *RedissionLockConfig) *RedissionRWLoc if ops.LockLeaseTime == 0 { r.lockLeaseTime = internalLockLeaseTime } + r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") + r.waitChanKey = strings.Join([]string{ops.ChanPrefix, ops.Key, "write"}, ":") rwLocker := &RedissionRWLocker{ - redissionLocker: *r, - writeWaitChanKey: strings.Join([]string{r.key, "write"}, ":"), - rwTimeoutPrefix: "rwlock_timeout", - needRefresh: true, + redissionLocker: *r, + rwTokenTimeoutPrefix: ops.TimeoutPrefix, } return rwLocker } From d404dc433570a61d5db1053f17f702b68ed4ed55 Mon Sep 17 00:00:00 2001 From: douxu Date: Tue, 11 Mar 2025 15:35:15 +0800 Subject: [PATCH 07/33] fix bug of lock script and refresh script in redission rw lock --- distributedlock/constant/redis_result.go | 2 +- distributedlock/luascript/rwlock_script.go | 18 +++++---- distributedlock/redis_lock.go | 3 +- distributedlock/redis_rwlock.go | 9 +++-- distributedlock/rwlock_test.go | 46 ++++++++++++++++++++++ 5 files changed, 66 insertions(+), 12 deletions(-) create mode 100644 distributedlock/rwlock_test.go diff --git a/distributedlock/constant/redis_result.go b/distributedlock/constant/redis_result.go index d389a39..dbcef53 100644 --- a/distributedlock/constant/redis_result.go +++ b/distributedlock/constant/redis_result.go @@ -81,7 +81,7 @@ func NewRedisResult(res RedisCode, lockType RedisLockType, redisMsg string) erro case -9: return &RedisResult{Code: res, Message: "redis lock failure,the lock is already occupied by another processes lock"} case -99: - return &RedisResult{Code: res, Message: "redis internal execution error"} + return &RedisResult{Code: res, Message: fmt.Sprintf("redis internal execution error:%v\n", redisMsg)} default: msg := "unkown redis execution result" if redisMsg != "" { diff --git a/distributedlock/luascript/rwlock_script.go b/distributedlock/luascript/rwlock_script.go index 814c559..2374e4b 100644 --- a/distributedlock/luascript/rwlock_script.go +++ b/distributedlock/luascript/rwlock_script.go @@ -14,7 +14,7 @@ local lockKey = KEYS[2] .. ':' .. ARGV[2]; if (mode == false) then redis.call('hset', KEYS[1], 'mode', 'read'); redis.call('hset', KEYS[1], lockKey, '1'); - redis.call('hexpire', KEYS[1], ARGV[1] 'fields' '1' lockKey); + redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); redis.call('expire', KEYS[1], ARGV[1]); return 1; end; @@ -30,10 +30,10 @@ if (mode == 'read') then if (redis.call('exists', KEYS[1], ARGV[2]) == 1) then redis.call('hincrby', KEYS[1], lockKey, '1'); local remainTime = redis.call('httl', KEYS[1], 'fields', '1', lockKey); - redis.call('hexpire', key, math.max(remainTime, ARGV[1])); + redis.call('hexpire', KEYS[1], math.max(remainTime, ARGV[1]), 'fields', '1', lockKey); else redis.call('hset', KEYS[1], lockKey, '1'); - redis.call('hexpire', KEYS[1], lockKey, ARGV[1]); + redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); end; local cursor = 0; local maxRemainTime = tonumber(ARGV[1]); @@ -142,7 +142,7 @@ if (mode == false) then end; redis.call('hset', KEYS[1], 'mode', 'write'); redis.call('hset', KEYS[1], lockKey, 1); - redis.call('hexpire', KEYS[1], ARGV[1] 'fields' '1' lockKey); + redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); redis.call('expire', KEYS[1], ARGV[1]); redis.call('lpop', waitKey, '1') return 1; @@ -156,7 +156,7 @@ else local lockExists = redis.call('hexists', KEYS[1], lockKey) if (lockExists == 1) then redis.call('hincrby', KEYS[1], lockKey, 1); - redis.call('hexpire', KEYS[1], ARGV[1] 'fields' '1' lockKey); + redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); redis.call('expire', KEYS[1], ARGV[1]); return 1; end; @@ -219,11 +219,11 @@ var RefreshRWLockScript = ` local lockKey = KEYS[2] .. ':' .. ARGV[2] local lockExists = redis.call('hexists', KEYS[1], lockKey); local mode = redis.call('hget', KEYS[1], 'mode') +local maxRemainTime = tonumber(ARGV[1]); if (lockExists == 1) then - redis.call('hexpire', KEYS[1], ARGV[1] 'fields' '1' lockKey); + redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); if (mode == 'read' ) then local cursor = 0; - local maxRemainTime = tonumber(ARGV[1]); local pattern = KEYS[2] .. ':*'; repeat local hscanResult = redis.call('hscan', KEYS[1], cursor, 'match', pattern, 'count', '100'); @@ -236,11 +236,15 @@ if (lockExists == 1) then maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime); end; until cursor == 0; + if (maxRemainTime > 0) then local remainTime = redis.call('ttl', KEYS[1]); redis.call('expire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime)); end; + elseif (mode == 'write') then + redis.call('expire', KEYS[1], ARGV[1]); end; + -- return redis.call('ttl',KEYS[1]); return 1; end; return -8; diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index 9653589..8e55b29 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -21,8 +21,9 @@ const ( unlockMessage = 0 ) +// RedissionLockConfig define redission lock config type RedissionLockConfig struct { - LockLeaseTime time.Duration + LockLeaseTime uint64 Prefix string ChanPrefix string TimeoutPrefix string diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index 529ff75..caee734 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "strings" + "sync" "time" "modelRT/distributedlock/constant" @@ -107,8 +108,8 @@ func (rl *RedissionRWLocker) refreshLockTimeout() { } if constant.RedisCode(val) == constant.RefreshLockFailure { - rl.logger.Error("lock refreash failed,can not find the read lock by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) - break + rl.logger.Error("lock refreash failed,can not find the read lock by key and token", zap.String("rwTokenPrefix", rl.rwTokenTimeoutPrefix), zap.String("token", rl.token), zap.String("key", rl.key)) + return } if constant.RedisCode(val) == constant.RefreshLockSuccess { @@ -241,6 +242,7 @@ func GetRWLocker(client *redis.Client, ops *RedissionLockConfig) *RedissionRWLoc needRefresh: true, client: client, exit: make(chan struct{}), + once: &sync.Once{}, logger: logger.GetLoggerInstance(), } @@ -257,10 +259,11 @@ func GetRWLocker(client *redis.Client, ops *RedissionLockConfig) *RedissionRWLoc } if ops.LockLeaseTime == 0 { - r.lockLeaseTime = internalLockLeaseTime + ops.LockLeaseTime = internalLockLeaseTime } r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") + r.lockLeaseTime = ops.LockLeaseTime r.waitChanKey = strings.Join([]string{ops.ChanPrefix, ops.Key, "write"}, ":") rwLocker := &RedissionRWLocker{ diff --git a/distributedlock/rwlock_test.go b/distributedlock/rwlock_test.go new file mode 100644 index 0000000..8ecaaaa --- /dev/null +++ b/distributedlock/rwlock_test.go @@ -0,0 +1,46 @@ +package distributed_lock + +import ( + "testing" + "time" + + "github.com/go-redis/redis" + "go.uber.org/zap" +) + +var log *zap.Logger + +func init() { + log = zap.Must(zap.NewDevelopment()) +} + +func TestRWLockReentrantLock(t *testing.T) { + rdb := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "192.168.2.103:6379", + Password: "cnstar", + PoolSize: 50, + DialTimeout: 10 * time.Second, + }) + + rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 120, + Key: "component", + }) + + rwLocker.logger = log + t.Logf("%+v\n", rwLocker) + + duration := 10 * time.Second + // 第一次加读锁 + err := rwLocker.RLock(duration) + t.Logf("err:%+v\n", err) + // TODO 实现可重入读锁测试 + // rwLocker.UnRLock() + // // 第二次加读锁 + // rwLocker.RLock(duration) + // // 查看 redis 中相关 key 的值 + // rwLocker.UnRLock() + t.Log("test success") + select {} +} From 9381e547b6077fcddd7f922664ba813c2e40a5b3 Mon Sep 17 00:00:00 2001 From: douxu Date: Tue, 11 Mar 2025 15:53:53 +0800 Subject: [PATCH 08/33] add ignore item in the .gitignore file --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index adf8f72..6487bf7 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ # Go workspace file go.work +.vscode \ No newline at end of file From d962462c42fd01f5f61e1e05cd6e8088883c2c77 Mon Sep 17 00:00:00 2001 From: douxu Date: Wed, 12 Mar 2025 16:24:28 +0800 Subject: [PATCH 09/33] add rlock lock&unlock test and rlock reentrant test --- distributedlock/luascript/rwlock_script.go | 4 +- distributedlock/redis_lock.go | 29 ++++---- distributedlock/redis_rwlock.go | 46 ++++++------ distributedlock/rwlock_test.go | 81 +++++++++++++++++++--- go.mod | 3 + 5 files changed, 119 insertions(+), 44 deletions(-) diff --git a/distributedlock/luascript/rwlock_script.go b/distributedlock/luascript/rwlock_script.go index 2374e4b..956b909 100644 --- a/distributedlock/luascript/rwlock_script.go +++ b/distributedlock/luascript/rwlock_script.go @@ -30,7 +30,7 @@ if (mode == 'read') then if (redis.call('exists', KEYS[1], ARGV[2]) == 1) then redis.call('hincrby', KEYS[1], lockKey, '1'); local remainTime = redis.call('httl', KEYS[1], 'fields', '1', lockKey); - redis.call('hexpire', KEYS[1], math.max(remainTime, ARGV[1]), 'fields', '1', lockKey); + redis.call('hexpire', KEYS[1], math.max(tonumber(remainTime[1]), ARGV[1]), 'fields', '1', lockKey); else redis.call('hset', KEYS[1], lockKey, '1'); redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); @@ -222,7 +222,7 @@ local mode = redis.call('hget', KEYS[1], 'mode') local maxRemainTime = tonumber(ARGV[1]); if (lockExists == 1) then redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); - if (mode == 'read' ) then + if (mode == 'read') then local cursor = 0; local pattern = KEYS[2] .. ':*'; repeat diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index 8e55b29..651560a 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -24,10 +24,12 @@ const ( // RedissionLockConfig define redission lock config type RedissionLockConfig struct { LockLeaseTime uint64 + Token string Prefix string ChanPrefix string TimeoutPrefix string Key string + NeedRefresh bool } type redissionLocker struct { @@ -70,7 +72,6 @@ func (rl *redissionLocker) Lock(timeout ...time.Duration) error { acquireTimer := time.NewTimer(timeout[0]) for { select { - case _, ok := <-subMsg: if !ok { err := errors.New("failed to read the lock waiting for for the channel message") @@ -113,7 +114,7 @@ func (rl *redissionLocker) subscribeLock(sub *redis.PubSub, out chan struct{}) { select { case <-rl.exit: - break + return default: switch msg.(type) { case *redis.Subscription: @@ -161,7 +162,7 @@ func (rl *redissionLocker) refreshLockTimeout() { } timer.Reset(lockTime) case <-rl.exit: - break + return } } } @@ -219,12 +220,8 @@ func (rl *redissionLocker) UnLock() error { } func GetLocker(client *redis.Client, ops *RedissionLockConfig) *redissionLocker { - r := &redissionLocker{ - token: uuid.New().String(), - needRefresh: true, - client: client, - exit: make(chan struct{}), - logger: logger.GetLoggerInstance(), + if ops.Token == "" { + ops.Token = uuid.New().String() } if len(ops.Prefix) <= 0 { @@ -236,9 +233,17 @@ func GetLocker(client *redis.Client, ops *RedissionLockConfig) *redissionLocker } if ops.LockLeaseTime == 0 { - r.lockLeaseTime = internalLockLeaseTime + ops.LockLeaseTime = internalLockLeaseTime + } + + r := &redissionLocker{ + token: ops.Token, + key: strings.Join([]string{ops.Prefix, ops.Key}, ":"), + waitChanKey: strings.Join([]string{ops.ChanPrefix, ops.Key, "wait"}, ":"), + needRefresh: ops.NeedRefresh, + client: client, + exit: make(chan struct{}), + logger: logger.GetLoggerInstance(), } - r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") - r.waitChanKey = strings.Join([]string{ops.ChanPrefix, ops.Key, "wait"}, ":") return r } diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index caee734..7cf89f5 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -32,11 +32,14 @@ func (rl *RedissionRWLocker) RLock(timeout ...time.Duration) error { return fmt.Errorf("get read lock failed:%w", result) } - if (result.Code == constant.LockSuccess) && rl.needRefresh { - rl.once.Do(func() { - // async refresh lock timeout unitl receive exit singal - go rl.refreshLockTimeout() - }) + if result.Code == constant.LockSuccess { + if rl.needRefresh { + rl.once.Do(func() { + // async refresh lock timeout unitl receive exit singal + go rl.refreshLockTimeout() + }) + } + rl.logger.Info("success get the read by key and token", zap.String("key", rl.key), zap.String("token", rl.token)) return nil } @@ -50,7 +53,6 @@ func (rl *RedissionRWLocker) RLock(timeout ...time.Duration) error { acquireTimer := time.NewTimer(timeout[0]) for { select { - case _, ok := <-subMsg: if !ok { err := errors.New("failed to read the read lock waiting for for the channel message") @@ -117,12 +119,13 @@ func (rl *RedissionRWLocker) refreshLockTimeout() { } timer.Reset(lockTime) case <-rl.exit: - break + return } } } func (rl *RedissionRWLocker) UnRLock() error { + rl.logger.Info("unlock RLock by key and token", zap.String("key", rl.key), zap.String("token", rl.token)) res := rl.client.Eval(luascript.UnRLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.waitChanKey}, unlockMessage, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { @@ -237,24 +240,19 @@ func (rl *RedissionRWLocker) UnWLock() error { } func GetRWLocker(client *redis.Client, ops *RedissionLockConfig) *RedissionRWLocker { - r := &redissionLocker{ - token: uuid.New().String(), - needRefresh: true, - client: client, - exit: make(chan struct{}), - once: &sync.Once{}, - logger: logger.GetLoggerInstance(), + if ops.Token == "" { + ops.Token = uuid.New().String() } - if len(ops.Prefix) <= 0 { + if ops.Prefix == "" { ops.Prefix = "redission-rwlock" } - if len(ops.TimeoutPrefix) <= 0 { + if ops.TimeoutPrefix == "" { ops.TimeoutPrefix = "rwlock_timeout" } - if len(ops.ChanPrefix) <= 0 { + if ops.ChanPrefix == "" { ops.ChanPrefix = "redission-rwlock-channel" } @@ -262,9 +260,17 @@ func GetRWLocker(client *redis.Client, ops *RedissionLockConfig) *RedissionRWLoc ops.LockLeaseTime = internalLockLeaseTime } - r.key = strings.Join([]string{ops.Prefix, ops.Key}, ":") - r.lockLeaseTime = ops.LockLeaseTime - r.waitChanKey = strings.Join([]string{ops.ChanPrefix, ops.Key, "write"}, ":") + r := &redissionLocker{ + token: ops.Token, + key: strings.Join([]string{ops.Prefix, ops.Key}, ":"), + needRefresh: ops.NeedRefresh, + lockLeaseTime: ops.LockLeaseTime, + waitChanKey: strings.Join([]string{ops.ChanPrefix, ops.Key, "write"}, ":"), + client: client, + exit: make(chan struct{}), + once: &sync.Once{}, + logger: logger.GetLoggerInstance(), + } rwLocker := &RedissionRWLocker{ redissionLocker: *r, diff --git a/distributedlock/rwlock_test.go b/distributedlock/rwlock_test.go index 8ecaaaa..b3fc342 100644 --- a/distributedlock/rwlock_test.go +++ b/distributedlock/rwlock_test.go @@ -1,10 +1,12 @@ package distributed_lock import ( + "strings" "testing" "time" "github.com/go-redis/redis" + "github.com/stretchr/testify/assert" "go.uber.org/zap" ) @@ -14,6 +16,44 @@ func init() { log = zap.Must(zap.NewDevelopment()) } +func TestRWLockRLockAndUnRLock(t *testing.T) { + rdb := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "192.168.2.103:6379", + Password: "cnstar", + PoolSize: 50, + DialTimeout: 10 * time.Second, + }) + + rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 120, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", + }) + rwLocker.logger = log + + duration := 10 * time.Second + // 第一次加读锁 + err := rwLocker.RLock(duration) + assert.Equal(t, nil, err) + + tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") + num, err := rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + err = rwLocker.UnRLock() + assert.Equal(t, nil, err) + + num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, redis.Nil, err) + assert.Equal(t, 0, num) + t.Log("test success") + return +} + +// TODO 实现可重入读锁测试 func TestRWLockReentrantLock(t *testing.T) { rdb := redis.NewClient(&redis.Options{ Network: "tcp", @@ -25,22 +65,43 @@ func TestRWLockReentrantLock(t *testing.T) { rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ LockLeaseTime: 120, + NeedRefresh: true, Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker.logger = log - t.Logf("%+v\n", rwLocker) duration := 10 * time.Second // 第一次加读锁 err := rwLocker.RLock(duration) - t.Logf("err:%+v\n", err) - // TODO 实现可重入读锁测试 - // rwLocker.UnRLock() - // // 第二次加读锁 - // rwLocker.RLock(duration) - // // 查看 redis 中相关 key 的值 - // rwLocker.UnRLock() + assert.Equal(t, nil, err) + + tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") + num, err := rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + // 第二次加读锁 + err = rwLocker.RLock(duration) + assert.Equal(t, nil, err) + + num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 2, num) + + err = rwLocker.UnRLock() + assert.Equal(t, nil, err) + + num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, redis.Nil, err) + assert.Equal(t, 1, num) + + err = rwLocker.UnRLock() + assert.Equal(t, nil, err) + + num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, redis.Nil, err) + assert.Equal(t, 0, num) t.Log("test success") - select {} + return } diff --git a/go.mod b/go.mod index 3d655e8..21444d7 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/natefinch/lumberjack v2.0.0+incompatible github.com/panjf2000/ants/v2 v2.10.0 github.com/spf13/viper v1.19.0 + github.com/stretchr/testify v1.9.0 github.com/swaggo/files v1.0.1 github.com/swaggo/gin-swagger v1.6.0 github.com/swaggo/swag v1.16.4 @@ -30,6 +31,7 @@ require ( github.com/bytedance/sonic/loader v0.2.1 // indirect github.com/cloudwego/base64x v0.1.4 // indirect github.com/cloudwego/iasm v0.2.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.7 // indirect github.com/gin-contrib/sse v0.1.0 // indirect @@ -60,6 +62,7 @@ require ( github.com/onsi/ginkgo v1.16.5 // indirect github.com/onsi/gomega v1.18.1 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect From 7b282c49f7bc211b3692af19241b21033472d03e Mon Sep 17 00:00:00 2001 From: douxu Date: Thu, 13 Mar 2025 16:51:50 +0800 Subject: [PATCH 10/33] fix(lock script): fix bug of lock srcipt 1.fix bug of reset time wrong with ReentrantRLock in RLock script 2.fix bug of write lock failing to lock for the first time 3.fix bug of unlock failed with ReentrantWLock in UnWLock script test(lock script): add new test of RLock and WLock 1.add refresh test of RLock 2.add new test of ReentrantWLock# --- distributedlock/constant/redis_result.go | 19 ++- distributedlock/luascript/rwlock_script.go | 53 ++++--- distributedlock/redis_rwlock.go | 14 +- distributedlock/rwlock_test.go | 152 ++++++++++++++++++++- 4 files changed, 202 insertions(+), 36 deletions(-) diff --git a/distributedlock/constant/redis_result.go b/distributedlock/constant/redis_result.go index dbcef53..7c689fe 100644 --- a/distributedlock/constant/redis_result.go +++ b/distributedlock/constant/redis_result.go @@ -11,6 +11,7 @@ const ( UnLockSuccess = RedisCode(1) RefreshLockSuccess = RedisCode(1) UnRLockSuccess = RedisCode(0) + UnWLockSuccess = RedisCode(0) RLockFailureWithWLockOccupancy = RedisCode(-1) UnRLockFailureWithWLockOccupancy = RedisCode(-2) WLockFailureWithRLockOccupancy = RedisCode(-3) @@ -28,6 +29,8 @@ type RedisLockType int const ( LockType = RedisLockType(iota) + UnRLockType + UnWLockType UnLockType RefreshLockType ) @@ -55,13 +58,17 @@ func NewRedisResult(res RedisCode, lockType RedisLockType, redisMsg string) erro case 1: if lockType == LockType { return &RedisResult{Code: res, Message: "redis lock success"} - } else if lockType == UnLockType { + } else if (lockType == UnRLockType) || (lockType == UnWLockType) || (lockType == UnLockType) { return &RedisResult{Code: res, Message: "redis unlock success"} } else { return &RedisResult{Code: res, Message: "redis refresh lock success"} } case 0: - return &RedisResult{Code: res, Message: "redis unlock read lock success, the lock is still occupied by other processes read lock"} + if lockType == UnRLockType { + return &RedisResult{Code: res, Message: "redis unlock read lock success, the lock is still occupied by other processes read lock"} + } else { + return &RedisResult{Code: res, Message: "redis unlock write lock success, the lock is still occupied by other processes write lock"} + } case -1: return &RedisResult{Code: res, Message: "redis lock read lock failure,the lock is already occupied by another processes write lock"} case -2: @@ -97,13 +104,17 @@ func TranslateResultToStr(res RedisCode, lockType RedisLockType) string { case 1: if lockType == LockType { return "redis lock success" - } else if lockType == UnLockType { + } else if (lockType == UnRLockType) || (lockType == UnWLockType) || (lockType == UnLockType) { return "redis unlock success" } else { return "redis refresh lock success" } case 0: - return "redis unlock read lock success, the lock is still occupied by other processes read lock" + if lockType == UnRLockType { + return "redis unlock read lock success, the lock is still occupied by other processes read lock" + } else { + return "redis unlock write lock success, the lock is still occupied by other processes write lock" + } case -1: return "redis lock read lock failure,the lock is already occupied by another processes write lock" case -2: diff --git a/distributedlock/luascript/rwlock_script.go b/distributedlock/luascript/rwlock_script.go index 956b909..a103ecc 100644 --- a/distributedlock/luascript/rwlock_script.go +++ b/distributedlock/luascript/rwlock_script.go @@ -70,7 +70,7 @@ local mode = redis.call('hget', KEYS[1], 'mode'); if (mode == false) then local writeWait = KEYS[1] .. ':write'; -- 优先写锁加锁,无写锁的情况通知读锁加锁 - local counter = redis.call('llen',writeWait) + local counter = redis.call('llen',writeWait); if (counter >= 1) then redis.call('publish', KEYS[4], ARGV[1]); end; @@ -86,6 +86,8 @@ if ((mode == 'read') and (lockExists == 0)) then end; local counter = redis.call('hincrby', KEYS[1], lockKey, -1); +local delTTLs = redis.call('httl', KEYS[1], 'fields', '1', lockKey); +local delTTL = tonumber(delTTLs[1]); if (counter == 0) then redis.call('hdel', KEYS[1], lockKey); end; @@ -107,20 +109,24 @@ if (redis.call('hlen', KEYS[1]) > 1) then until cursor == 0; if (maxRemainTime > 0) then - local remainTime = redis.call('ttl', KEYS[1]); - redis.call('expire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime)); + if (delTTL > maxRemainTime) then + redis.call('expire', KEYS[1], maxRemainTime); + else + local remainTime = redis.call('ttl', KEYS[1]); + redis.call('expire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime)); + end; end; else redis.call('del', KEYS[1]); local writeWait = KEYS[1] .. ':write'; -- 优先写锁加锁,无写锁的情况通知读锁加锁 - local counter = redis.call('llen',writeWait) + local counter = redis.call('llen',writeWait); if (counter >= 1) then redis.call('publish', KEYS[4], ARGV[1]); else redis.call('publish', KEYS[3], ARGV[1]); end; - return 1; + return 1; end; ` @@ -136,15 +142,18 @@ local mode = redis.call('hget', KEYS[1], 'mode'); local lockKey = KEYS[2] .. ':' .. ARGV[2]; local waitKey = KEYS[1] .. ':write'; if (mode == false) then - local firstToken = redis.call('lindex', waitKey,'0') - if (firstToken ~= ARGV[2]) then - return -7; + local waitListLen = redis.call('llen', waitKey); + if (waitListLen > 0) then + local firstToken = redis.call('lindex', waitKey,'0'); + if (firstToken ~= ARGV[2]) then + return -7; + end; end; redis.call('hset', KEYS[1], 'mode', 'write'); redis.call('hset', KEYS[1], lockKey, 1); redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); redis.call('expire', KEYS[1], ARGV[1]); - redis.call('lpop', waitKey, '1') + redis.call('lpop', waitKey, '1'); return 1; elseif (mode == 'read') then -- 放到 list 中等待读锁释放后再次尝试加锁并且订阅读锁释放的消息 @@ -152,8 +161,8 @@ elseif (mode == 'read') then return -3; else -- 可重入写锁逻辑 - local lockKey = KEYS[2] .. ':' .. ARGV[2] - local lockExists = redis.call('hexists', KEYS[1], lockKey) + local lockKey = KEYS[2] .. ':' .. ARGV[2]; + local lockExists = redis.call('hexists', KEYS[1], lockKey); if (lockExists == 1) then redis.call('hincrby', KEYS[1], lockKey, 1); redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); @@ -180,7 +189,7 @@ local mode = redis.call('hget', KEYS[1], 'mode'); local writeWait = KEYS[1] .. ':write'; if (mode == false) then -- 优先写锁加锁,无写锁的情况通知读锁加锁 - local counter = redis.call('llen',writeWait) + local counter = redis.call('llen',writeWait); if (counter >= 1) then redis.call('publish', KEYS[3], ARGV[1]); end; @@ -188,23 +197,25 @@ if (mode == false) then elseif (mode == 'read') then return -5; else - -- 可重入写锁逻辑 - local lockKey = KEYS[2] .. ':' .. ARGV[2] - local lockExists = redis.call('hexists', KEYS[1], lockKey) - if (lockExists == 1) then + local lockKey = KEYS[2] .. ':' .. ARGV[2]; + local lockExists = redis.call('hexists', KEYS[1], lockKey); + if (lockExists >= 1) then + -- 可重入写锁逻辑 local incrRes = redis.call('hincrby', KEYS[1], lockKey, -1); if (incrRes == 0) then redis.call('del', KEYS[1]); - local counter = redis.call('llen',writeWait) + local counter = redis.call('llen',writeWait); if (counter >= 1) then redis.call('publish', KEYS[4], ARGV[1]); else redis.call('publish', KEYS[3], ARGV[1]); end; - return 1 + return 1; end; + return 0; + else + return -6; end; - return -6; end; ` @@ -216,9 +227,9 @@ ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ var RefreshRWLockScript = ` -local lockKey = KEYS[2] .. ':' .. ARGV[2] +local lockKey = KEYS[2] .. ':' .. ARGV[2]; local lockExists = redis.call('hexists', KEYS[1], lockKey); -local mode = redis.call('hget', KEYS[1], 'mode') +local mode = redis.call('hget', KEYS[1], 'mode'); local maxRemainTime = tonumber(ARGV[1]); if (lockExists == 1) then redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index 7cf89f5..42b338e 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -130,11 +130,11 @@ func (rl *RedissionRWLocker) UnRLock() error { val, err := res.Int() if err != redis.Nil && err != nil { rl.logger.Info("unlock read lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) - return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnLockType, err.Error())) + return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnRLockType, err.Error())) } if (constant.RedisCode(val) == constant.UnLockSuccess) || (constant.RedisCode(val) == constant.UnRLockSuccess) { - if rl.needRefresh { + if rl.needRefresh && (constant.RedisCode(val) == constant.UnLockSuccess) { rl.cancelRefreshLockTime() } @@ -144,7 +144,7 @@ func (rl *RedissionRWLocker) UnRLock() error { if constant.RedisCode(val) == constant.UnRLockFailureWithWLockOccupancy { rl.logger.Info("unlock read lock failed", zap.String("token", rl.token), zap.String("key", rl.key)) - return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnRLockFailureWithWLockOccupancy, constant.UnLockType, "")) + return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnRLockFailureWithWLockOccupancy, constant.UnRLockType, "")) } return nil } @@ -221,11 +221,11 @@ func (rl *RedissionRWLocker) UnWLock() error { val, err := res.Int() if err != redis.Nil && err != nil { rl.logger.Info("unlock write lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) - return fmt.Errorf("unlock write lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnLockType, err.Error())) + return fmt.Errorf("unlock write lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnWLockType, err.Error())) } - if constant.RedisCode(val) == constant.UnLockSuccess { - if rl.needRefresh { + if (constant.RedisCode(val) == constant.UnLockSuccess) || constant.RedisCode(val) == constant.UnWLockSuccess { + if rl.needRefresh && (constant.RedisCode(val) == constant.UnLockSuccess) { rl.cancelRefreshLockTime() } rl.logger.Info("unlock write lock success", zap.String("token", rl.token), zap.String("key", rl.key)) @@ -234,7 +234,7 @@ func (rl *RedissionRWLocker) UnWLock() error { if (constant.RedisCode(val) == constant.UnWLockFailureWithRLockOccupancy) || (constant.RedisCode(val) == constant.UnWLockFailureWithWLockOccupancy) { rl.logger.Info("unlock write lock failed", zap.String("token", rl.token), zap.String("key", rl.key)) - return fmt.Errorf("unlock write lock failed:%w", constant.NewRedisResult(constant.RedisCode(val), constant.UnLockType, "")) + return fmt.Errorf("unlock write lock failed:%w", constant.NewRedisResult(constant.RedisCode(val), constant.UnWLockType, "")) } return nil } diff --git a/distributedlock/rwlock_test.go b/distributedlock/rwlock_test.go index b3fc342..b91090c 100644 --- a/distributedlock/rwlock_test.go +++ b/distributedlock/rwlock_test.go @@ -49,12 +49,11 @@ func TestRWLockRLockAndUnRLock(t *testing.T) { num, err = rdb.HGet(rwLocker.key, tokenKey).Int() assert.Equal(t, redis.Nil, err) assert.Equal(t, 0, num) - t.Log("test success") + t.Log("rwLock rlock and unrlock test success") return } -// TODO 实现可重入读锁测试 -func TestRWLockReentrantLock(t *testing.T) { +func TestRWLockReentrantRLock(t *testing.T) { rdb := redis.NewClient(&redis.Options{ Network: "tcp", Addr: "192.168.2.103:6379", @@ -89,19 +88,164 @@ func TestRWLockReentrantLock(t *testing.T) { assert.Equal(t, nil, err) assert.Equal(t, 2, num) + // 第一次解读锁 + err = rwLocker.UnRLock() + assert.Equal(t, nil, err) + + num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + // 第二次解读锁 err = rwLocker.UnRLock() assert.Equal(t, nil, err) num, err = rdb.HGet(rwLocker.key, tokenKey).Int() assert.Equal(t, redis.Nil, err) + assert.Equal(t, 0, num) + t.Log("rwLock reentrant lock test success") + return +} + +func TestRWLockRefreshRLock(t *testing.T) { + rdb := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "192.168.2.103:6379", + Password: "cnstar", + PoolSize: 50, + DialTimeout: 10 * time.Second, + }) + + rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 10, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", + }) + rwLocker.logger = log + + duration := 10 * time.Second + // 第一次加读锁 + err := rwLocker.RLock(duration) + assert.Equal(t, nil, err) + + tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") + num, err := rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, nil, err) assert.Equal(t, 1, num) + time.Sleep(10 * time.Second) + script := `return redis.call('httl', KEYS[1], 'fields', '1', ARGV[1]);` + result, err := rdb.Eval(script, []string{rwLocker.key}, tokenKey).Result() + assert.Equal(t, nil, err) + ttls, ok := result.([]interface{}) + assert.Equal(t, true, ok) + ttl, ok := ttls[0].(int64) + assert.Equal(t, true, ok) + compareValue := int64(8) + assert.Greater(t, ttl, compareValue) + err = rwLocker.UnRLock() assert.Equal(t, nil, err) num, err = rdb.HGet(rwLocker.key, tokenKey).Int() assert.Equal(t, redis.Nil, err) assert.Equal(t, 0, num) - t.Log("test success") + t.Log("rwLock refresh lock test success") + return +} + +// TODO 设计两个客户端分别加读锁,测试是否可以加锁成功 +// TODO 设计两个客户端分别加时间不同的读锁,测试ttl时间在有一个key删除后是否可以变换成功 + +func TestRWLockWLockAndUnWLock(t *testing.T) { + rdb := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "192.168.2.103:6379", + Password: "cnstar", + PoolSize: 50, + DialTimeout: 10 * time.Second, + }) + + rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 120, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", + }) + rwLocker.logger = log + + duration := 10 * time.Second + // 第一次加读锁 + err := rwLocker.WLock(duration) + assert.Equal(t, nil, err) + + tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") + num, err := rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + err = rwLocker.UnWLock() + assert.Equal(t, nil, err) + + num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, redis.Nil, err) + assert.Equal(t, 0, num) + t.Log("rwLock rlock and unrlock test success") + return +} + +// TODO 完成写锁可重入测试 +func TestRWLockReentrantWLock(t *testing.T) { + rdb := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "192.168.2.103:6379", + Password: "cnstar", + PoolSize: 50, + DialTimeout: 10 * time.Second, + }) + + rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 120, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", + }) + rwLocker.logger = log + + duration := 10 * time.Second + // 第一次加写锁 + err := rwLocker.WLock(duration) + assert.Equal(t, nil, err) + + tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") + num, err := rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + // 第二次加写锁 + err = rwLocker.WLock(duration) + assert.Equal(t, nil, err) + + num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 2, num) + + // 第一次解写锁 + err = rwLocker.UnWLock() + assert.Equal(t, nil, err) + + num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + // 第二次解写锁 + err = rwLocker.UnWLock() + assert.Equal(t, nil, err) + + num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + assert.Equal(t, redis.Nil, err) + assert.Equal(t, 0, num) + t.Log("rwLock reentrant lock test success") return } From 13809b6a31073e4cca475e4b4a5d83da4011bad3 Mon Sep 17 00:00:00 2001 From: douxu Date: Mon, 17 Mar 2025 17:19:46 +0800 Subject: [PATCH 11/33] add new test of RWLock --- distributedlock/rwlock_test.go | 130 ++++++++++++++++++++++++++++++++- 1 file changed, 129 insertions(+), 1 deletion(-) diff --git a/distributedlock/rwlock_test.go b/distributedlock/rwlock_test.go index b91090c..03262d2 100644 --- a/distributedlock/rwlock_test.go +++ b/distributedlock/rwlock_test.go @@ -156,7 +156,136 @@ func TestRWLockRefreshRLock(t *testing.T) { } // TODO 设计两个客户端分别加读锁,测试是否可以加锁成功 +func TestRWLock2ClientRLock(t *testing.T) { + rdb := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "192.168.2.103:6379", + Password: "cnstar", + PoolSize: 50, + DialTimeout: 10 * time.Second, + }) + + rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 120, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", + }) + rwLocker1.logger = log + + rwLocker2 := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 120, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", + }) + rwLocker2.logger = log + + duration := 10 * time.Second + // locker1加读锁 + err := rwLocker1.RLock(duration) + assert.Equal(t, nil, err) + + tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") + num, err := rdb.HGet(rwLocker1.key, tokenKey1).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + // locker2加读锁 + err = rwLocker2.RLock(duration) + assert.Equal(t, nil, err) + + tokenKey2 := strings.Join([]string{rwLocker2.rwTokenTimeoutPrefix, rwLocker2.token}, ":") + num, err = rdb.HGet(rwLocker2.key, tokenKey2).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + err = rdb.HLen(rwLocker1.key).Err() + assert.Equal(t, nil, err) + hLen := rdb.HLen(rwLocker1.key).Val() + assert.Equal(t, 3, hLen) + + // locker1解读锁 + err = rwLocker1.UnRLock() + assert.Equal(t, nil, err) + + // locker1解读锁 + err = rwLocker2.UnRLock() + assert.Equal(t, nil, err) + + err = rdb.Exists(rwLocker1.key).Err() + assert.Equal(t, redis.Nil, err) + existNum := rdb.Exists(rwLocker1.key).Val() + assert.Equal(t, 0, existNum) + t.Log("rwLock 2 client lock test success") + return +} + // TODO 设计两个客户端分别加时间不同的读锁,测试ttl时间在有一个key删除后是否可以变换成功 +func TestRWLock2CWith2DifTimeRLock(t *testing.T) { + rdb := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "192.168.2.103:6379", + Password: "cnstar", + PoolSize: 50, + DialTimeout: 10 * time.Second, + }) + + rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 120, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", + }) + rwLocker1.logger = log + + rwLocker2 := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 30, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", + }) + rwLocker2.logger = log + + duration := 10 * time.Second + // locker1加读锁 + err := rwLocker1.RLock(duration) + assert.Equal(t, nil, err) + + tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") + num, err := rdb.HGet(rwLocker1.key, tokenKey1).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + // locker2加读锁 + err = rwLocker2.RLock(duration) + assert.Equal(t, nil, err) + + tokenKey2 := strings.Join([]string{rwLocker2.rwTokenTimeoutPrefix, rwLocker2.token}, ":") + num, err = rdb.HGet(rwLocker2.key, tokenKey2).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + err = rdb.HLen(rwLocker1.key).Err() + assert.Equal(t, nil, err) + hLen := rdb.HLen(rwLocker1.key).Val() + assert.Equal(t, 3, hLen) + + // locker1解读锁 + err = rwLocker1.UnRLock() + assert.Equal(t, nil, err) + + // locker1解读锁 + err = rwLocker2.UnRLock() + assert.Equal(t, nil, err) + + err = rdb.Exists(rwLocker1.key).Err() + assert.Equal(t, redis.Nil, err) + existNum := rdb.Exists(rwLocker1.key).Val() + assert.Equal(t, 0, existNum) + t.Log("rwLock 2 client lock test success") + return +} func TestRWLockWLockAndUnWLock(t *testing.T) { rdb := redis.NewClient(&redis.Options{ @@ -195,7 +324,6 @@ func TestRWLockWLockAndUnWLock(t *testing.T) { return } -// TODO 完成写锁可重入测试 func TestRWLockReentrantWLock(t *testing.T) { rdb := redis.NewClient(&redis.Options{ Network: "tcp", From 3d79993de257ae368afb6ce5f3b35cf552872741 Mon Sep 17 00:00:00 2001 From: douxu Date: Fri, 21 Mar 2025 16:21:33 +0800 Subject: [PATCH 12/33] init redis hash and redis set struct with rwlocker --- diagram/redis_hash.go | 89 +++++++++++++++++++++++++++++++++ diagram/redis_init.go | 21 ++++++++ diagram/redis_set.go | 89 +++++++++++++++++++++++++++++++++ distributedlock/redis_rwlock.go | 2 +- distributedlock/rwlock_test.go | 14 +++--- go.mod | 3 ++ 6 files changed, 210 insertions(+), 8 deletions(-) create mode 100644 diagram/redis_hash.go create mode 100644 diagram/redis_init.go create mode 100644 diagram/redis_set.go diff --git a/diagram/redis_hash.go b/diagram/redis_hash.go new file mode 100644 index 0000000..a12077c --- /dev/null +++ b/diagram/redis_hash.go @@ -0,0 +1,89 @@ +package diagram + +import ( + "context" + + locker "modelRT/distributedlock" + + "github.com/redis/go-redis/v9" + // "github.com/go-redis/redis" + + "go.uber.org/zap" +) + +// TODO 统一 storageClient与 rwLocker 中使用的 redis 版本 +// RedisHash defines the encapsulation struct of redis hash type +type RedisHash struct { + ctx context.Context + rwLocker *locker.RedissionRWLocker + storageClient *redis.Client + logger *zap.Logger +} + +// SetRedisHashByMap define func of set redis hash by map struct +func (rh *RedisHash) SetRedisHashByMap(hashKey string, fields map[string]interface{}) error { + err := rh.rwLocker.WLock() + if err != nil { + rh.logger.Error("lock wLock by hashKey failed", zap.String("hashKey", hashKey), zap.Error(err)) + return err + } + defer rh.rwLocker.UnWLock() + + err = rh.storageClient.HSet(rh.ctx, hashKey, fields).Err() + if err != nil { + rh.logger.Error("set hash by map failed", zap.String("hashKey", hashKey), zap.Any("fields", fields), zap.Error(err)) + return err + } + return nil +} + +// SetRedisHashByKV define func of set redis hash by kv struct +func (rh *RedisHash) SetRedisHashByKV(hashKey string, field string, value interface{}) error { + err := rh.rwLocker.WLock() + if err != nil { + rh.logger.Error("lock wLock by hashKey failed", zap.String("hashKey", hashKey), zap.Error(err)) + return err + } + defer rh.rwLocker.UnWLock() + + err = rh.storageClient.HSet(rh.ctx, hashKey, field, value).Err() + if err != nil { + rh.logger.Error("set hash by kv failed", zap.String("hashKey", hashKey), zap.String("field", field), zap.Any("value", value), zap.Error(err)) + return err + } + return nil +} + +// HGet define func of get specified field value from redis hash by key and field name +func (rh *RedisHash) HGet(hashKey string, field string) (string, error) { + err := rh.rwLocker.RLock() + if err != nil { + rh.logger.Error("lock rLock by hashKey failed", zap.String("hashKey", hashKey), zap.Error(err)) + return "", err + } + defer rh.rwLocker.UnRLock() + + result, err := rh.storageClient.HGet(rh.ctx, hashKey, field).Result() + if err != nil { + rh.logger.Error("set hash by kv failed", zap.String("hashKey", hashKey), zap.String("field", field), zap.Error(err)) + return "", err + } + return result, nil +} + +// HGetAll define func of get all filelds from redis hash by key +func (rh *RedisHash) HGetAll(hashKey string) (map[string]string, error) { + err := rh.rwLocker.RLock() + if err != nil { + rh.logger.Error("lock rLock by hashKey failed", zap.String("hashKey", hashKey), zap.Error(err)) + return nil, err + } + defer rh.rwLocker.UnRLock() + + result, err := rh.storageClient.HGetAll(rh.ctx, hashKey).Result() + if err != nil { + rh.logger.Error("get all hash field by hash key failed", zap.String("hashKey", hashKey), zap.Error(err)) + return nil, err + } + return result, nil +} diff --git a/diagram/redis_init.go b/diagram/redis_init.go new file mode 100644 index 0000000..1bb5923 --- /dev/null +++ b/diagram/redis_init.go @@ -0,0 +1,21 @@ +package diagram + +import ( + "sync" + + "github.com/redis/go-redis/v9" +) + +var ( + client *redis.Client + once sync.Once +) + +// GetClientInstance define func of get redis client instance +func GetClientInstance() *redis.Client { + once.Do(func() { + // TODO 根据配置文件初始化 redis client + client = &redis.Client{} + }) + return client +} diff --git a/diagram/redis_set.go b/diagram/redis_set.go new file mode 100644 index 0000000..6e9994a --- /dev/null +++ b/diagram/redis_set.go @@ -0,0 +1,89 @@ +package diagram + +import ( + "context" + "fmt" + + locker "modelRT/distributedlock" + + "github.com/redis/go-redis/v9" + "go.uber.org/zap" +) + +// TODO 统一 storageClient与 rwLocker 中使用的 redis 版本 +// RedisSet defines the encapsulation struct of redis hash type +type RedisSet struct { + ctx context.Context + rwLocker *locker.RedissionRWLocker + storageClient *redis.Client + logger *zap.Logger +} + +// SADD define func of add redis set by members +func (rs *RedisSet) SADD(setKey string, members ...interface{}) error { + err := rs.rwLocker.WLock() + if err != nil { + rs.logger.Error("lock wLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) + return err + } + defer rs.rwLocker.UnWLock() + + err = rs.storageClient.SAdd(rs.ctx, setKey, members).Err() + if err != nil { + rs.logger.Error("add set by memebers failed", zap.String("setKey", setKey), zap.Any("members", members), zap.Error(err)) + return err + } + return nil +} + +// SREM define func of remove the specified members from redis set by key +func (rh *RedisHash) SREM(setKey string, members ...interface{}) error { + err := rh.rwLocker.WLock() + if err != nil { + rh.logger.Error("lock wLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) + return err + } + defer rh.rwLocker.UnWLock() + + count, err := rh.storageClient.SRem(rh.ctx, setKey, members).Result() + if err != nil || count != int64(len(members)) { + rh.logger.Error("rem members from set failed", zap.String("setKey", setKey), zap.Any("members", members), zap.Error(err)) + + return fmt.Errorf("rem members from set failed:%w", err) + } + return nil +} + +// SMembers define func of get all memebers from redis set by key +func (rh *RedisHash) SMembers(setKey string) ([]string, error) { + err := rh.rwLocker.RLock() + if err != nil { + rh.logger.Error("lock rLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) + return nil, err + } + defer rh.rwLocker.UnRLock() + + result, err := rh.storageClient.SMembers(rh.ctx, setKey).Result() + if err != nil { + rh.logger.Error("get all hash field by hash key failed", zap.String("setKey", setKey), zap.Error(err)) + return nil, err + } + return result, nil +} + +// SIsMember define func of determine whether an member is in set by key +func (rh *RedisHash) SIsMember(setKey string, member interface{}) (bool, error) { + err := rh.rwLocker.RLock() + if err != nil { + rh.logger.Error("lock rLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) + return false, err + } + defer rh.rwLocker.UnRLock() + + result, err := rh.storageClient.SIsMember(rh.ctx, setKey, member).Result() + if err != nil { + rh.logger.Error("get all hash field by hash key failed", zap.String("setKey", setKey), zap.Error(err)) + return false, err + } + return result, nil +} diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index 42b338e..649b7f4 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -220,7 +220,7 @@ func (rl *RedissionRWLocker) UnWLock() error { res := rl.client.Eval(luascript.UnWLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.waitChanKey}, unlockMessage, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { - rl.logger.Info("unlock write lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + rl.logger.Error("unlock write lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) return fmt.Errorf("unlock write lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnWLockType, err.Error())) } diff --git a/distributedlock/rwlock_test.go b/distributedlock/rwlock_test.go index 03262d2..d23e460 100644 --- a/distributedlock/rwlock_test.go +++ b/distributedlock/rwlock_test.go @@ -19,7 +19,7 @@ func init() { func TestRWLockRLockAndUnRLock(t *testing.T) { rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.103:6379", + Addr: "192.168.2.104:6379", Password: "cnstar", PoolSize: 50, DialTimeout: 10 * time.Second, @@ -56,7 +56,7 @@ func TestRWLockRLockAndUnRLock(t *testing.T) { func TestRWLockReentrantRLock(t *testing.T) { rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.103:6379", + Addr: "192.168.2.104:6379", Password: "cnstar", PoolSize: 50, DialTimeout: 10 * time.Second, @@ -110,7 +110,7 @@ func TestRWLockReentrantRLock(t *testing.T) { func TestRWLockRefreshRLock(t *testing.T) { rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.103:6379", + Addr: "192.168.2.104:6379", Password: "cnstar", PoolSize: 50, DialTimeout: 10 * time.Second, @@ -159,7 +159,7 @@ func TestRWLockRefreshRLock(t *testing.T) { func TestRWLock2ClientRLock(t *testing.T) { rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.103:6379", + Addr: "192.168.2.104:6379", Password: "cnstar", PoolSize: 50, DialTimeout: 10 * time.Second, @@ -225,7 +225,7 @@ func TestRWLock2ClientRLock(t *testing.T) { func TestRWLock2CWith2DifTimeRLock(t *testing.T) { rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.103:6379", + Addr: "192.168.2.104:6379", Password: "cnstar", PoolSize: 50, DialTimeout: 10 * time.Second, @@ -290,7 +290,7 @@ func TestRWLock2CWith2DifTimeRLock(t *testing.T) { func TestRWLockWLockAndUnWLock(t *testing.T) { rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.103:6379", + Addr: "192.168.2.104:6379", Password: "cnstar", PoolSize: 50, DialTimeout: 10 * time.Second, @@ -327,7 +327,7 @@ func TestRWLockWLockAndUnWLock(t *testing.T) { func TestRWLockReentrantWLock(t *testing.T) { rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.103:6379", + Addr: "192.168.2.104:6379", Password: "cnstar", PoolSize: 50, DialTimeout: 10 * time.Second, diff --git a/go.mod b/go.mod index 21444d7..5e07739 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/natefinch/lumberjack v2.0.0+incompatible github.com/panjf2000/ants/v2 v2.10.0 + github.com/redis/go-redis/v9 v9.7.3 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 github.com/swaggo/files v1.0.1 @@ -29,9 +30,11 @@ require ( github.com/KyleBanks/depth v1.2.1 // indirect github.com/bytedance/sonic v1.12.5 // indirect github.com/bytedance/sonic/loader v0.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cloudwego/base64x v0.1.4 // indirect github.com/cloudwego/iasm v0.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.7 // indirect github.com/gin-contrib/sse v0.1.0 // indirect From 25a55b94e8e88a237a3e1b810a55c34e4dc8d272 Mon Sep 17 00:00:00 2001 From: douxu Date: Fri, 21 Mar 2025 16:38:47 +0800 Subject: [PATCH 13/33] fix bug of structure pointer func --- diagram/redis_set.go | 31 ++++++++++++------------------- go.sum | 10 ++++++++++ 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/diagram/redis_set.go b/diagram/redis_set.go index 6e9994a..67691e1 100644 --- a/diagram/redis_set.go +++ b/diagram/redis_set.go @@ -37,17 +37,17 @@ func (rs *RedisSet) SADD(setKey string, members ...interface{}) error { } // SREM define func of remove the specified members from redis set by key -func (rh *RedisHash) SREM(setKey string, members ...interface{}) error { - err := rh.rwLocker.WLock() +func (rs *RedisSet) SREM(setKey string, members ...interface{}) error { + err := rs.rwLocker.WLock() if err != nil { - rh.logger.Error("lock wLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) + rs.logger.Error("lock wLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) return err } - defer rh.rwLocker.UnWLock() + defer rs.rwLocker.UnWLock() - count, err := rh.storageClient.SRem(rh.ctx, setKey, members).Result() + count, err := rs.storageClient.SRem(rs.ctx, setKey, members).Result() if err != nil || count != int64(len(members)) { - rh.logger.Error("rem members from set failed", zap.String("setKey", setKey), zap.Any("members", members), zap.Error(err)) + rs.logger.Error("rem members from set failed", zap.String("setKey", setKey), zap.Any("members", members), zap.Error(err)) return fmt.Errorf("rem members from set failed:%w", err) } @@ -55,17 +55,17 @@ func (rh *RedisHash) SREM(setKey string, members ...interface{}) error { } // SMembers define func of get all memebers from redis set by key -func (rh *RedisHash) SMembers(setKey string) ([]string, error) { - err := rh.rwLocker.RLock() +func (rs *RedisSet) SMembers(setKey string) ([]string, error) { + err := rs.rwLocker.RLock() if err != nil { - rh.logger.Error("lock rLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) + rs.logger.Error("lock rLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) return nil, err } - defer rh.rwLocker.UnRLock() + defer rs.rwLocker.UnRLock() - result, err := rh.storageClient.SMembers(rh.ctx, setKey).Result() + result, err := rs.storageClient.SMembers(rs.ctx, setKey).Result() if err != nil { - rh.logger.Error("get all hash field by hash key failed", zap.String("setKey", setKey), zap.Error(err)) + rs.logger.Error("get all hash field by hash key failed", zap.String("setKey", setKey), zap.Error(err)) return nil, err } return result, nil @@ -73,13 +73,6 @@ func (rh *RedisHash) SMembers(setKey string) ([]string, error) { // SIsMember define func of determine whether an member is in set by key func (rh *RedisHash) SIsMember(setKey string, member interface{}) (bool, error) { - err := rh.rwLocker.RLock() - if err != nil { - rh.logger.Error("lock rLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) - return false, err - } - defer rh.rwLocker.UnRLock() - result, err := rh.storageClient.SIsMember(rh.ctx, setKey, member).Result() if err != nil { rh.logger.Error("get all hash field by hash key failed", zap.String("setKey", setKey), zap.Error(err)) diff --git a/go.sum b/go.sum index 86f0495..376699e 100644 --- a/go.sum +++ b/go.sum @@ -11,6 +11,10 @@ github.com/actgardner/gogen-avro/v9 v9.1.0/go.mod h1:nyTj6wPqDJoxM3qdnjcLv+EnMDS github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/bitly/go-simplejson v0.5.1 h1:xgwPbetQScXt1gh9BmoJ6j9JMr3TElvuIyjR8pgdoow= github.com/bitly/go-simplejson v0.5.1/go.mod h1:YOPVLzCfwK14b4Sff3oP1AmGhI9T9Vsg84etUnlyp+Q= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bytedance/sonic v1.12.5 h1:hoZxY8uW+mT+OpkcUWw4k0fDINtOcVavEsGfzwzFU/w= github.com/bytedance/sonic v1.12.5/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= @@ -18,6 +22,8 @@ github.com/bytedance/sonic/loader v0.2.1 h1:1GgorWTqf12TA8mma4DDSbaQigE2wOgQo7iC github.com/bytedance/sonic/loader v0.2.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -39,6 +45,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -219,6 +227,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= From 2f1b9d26b8ad5844b66b3466dd5c0c2508c15088 Mon Sep 17 00:00:00 2001 From: douxu Date: Mon, 24 Mar 2025 16:37:43 +0800 Subject: [PATCH 14/33] feat(redis hash): fix bug of redis hash 1.add redis hash init func 2.replace redis model version in go mod 3.add context parameter in redis exec statement feat(redis set): add new test of RLock and WLock 1.add redis set init func 2.replace redis model version in go mod 3.add context parameter in redis exec statement fix(logger): add new test of RLock and WLock 1.add compress parameter 2.optimize initLogger function --- config/config.go | 11 +++ config/config.yaml | 9 +++ diagram/redis_hash.go | 30 ++++--- diagram/redis_init.go | 40 ++++++++-- diagram/redis_set.go | 25 ++++-- distributedlock/locker_client_init.go | 44 +++++++++++ distributedlock/redis_lock.go | 31 ++++---- distributedlock/redis_rwlock.go | 58 ++++++++------ distributedlock/rwlock_test.go | 108 +++++++++++++------------ logger/init.go | 13 +-- main.go | 8 ++ util/redis_options.go | 109 ++++++++++++++++++++++++++ 12 files changed, 367 insertions(+), 119 deletions(-) create mode 100644 distributedlock/locker_client_init.go create mode 100644 util/redis_options.go diff --git a/config/config.go b/config/config.go index 57e917b..297dc51 100644 --- a/config/config.go +++ b/config/config.go @@ -41,6 +41,16 @@ type LoggerConfig struct { MaxSize int `mapstructure:"maxsize"` MaxBackups int `mapstructure:"maxbackups"` MaxAge int `mapstructure:"maxage"` + Compress bool `mapstructure:"compress"` +} + +// RedisConfig define config stuct of redis config +type RedisConfig struct { + Addr string `mapstructure:"addr"` + Password string `mapstructure:"password"` + DB int `mapstructure:"db"` + PoolSize int `mapstructure:"poolsize"` + Timeout int `mapstructure:"timeout"` } // AntsConfig define config stuct of ants pool config @@ -65,6 +75,7 @@ type ModelRTConfig struct { LoggerConfig `mapstructure:"logger"` AntsConfig `mapstructure:"ants"` DataRTConfig `mapstructure:"dataRT"` + RedisConfig `mapstructure:"redis"` PostgresDBURI string `mapstructure:"-"` } diff --git a/config/config.yaml b/config/config.yaml index 9df949b..55bc8be 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -29,12 +29,21 @@ logger: maxsize: 1 maxbackups: 5 maxage: 30 + compress: false # ants config ants: parse_concurrent_quantity: 10 rtd_receive_concurrent_quantity: 10 +# redis config +redis: + addr: "192.168.2.104:6379" + password: "" + db: 1 + poolsize: 50 + timeout: 10 + # modelRT base config base: grid_id: 1 diff --git a/diagram/redis_hash.go b/diagram/redis_hash.go index a12077c..13b883d 100644 --- a/diagram/redis_hash.go +++ b/diagram/redis_hash.go @@ -3,15 +3,15 @@ package diagram import ( "context" + distributed_lock "modelRT/distributedlock" locker "modelRT/distributedlock" + "modelRT/logger" "github.com/redis/go-redis/v9" - // "github.com/go-redis/redis" "go.uber.org/zap" ) -// TODO 统一 storageClient与 rwLocker 中使用的 redis 版本 // RedisHash defines the encapsulation struct of redis hash type type RedisHash struct { ctx context.Context @@ -20,14 +20,24 @@ type RedisHash struct { logger *zap.Logger } +// NewRedisHash define func of new redis hash instance +func NewRedisHash(ctx context.Context, hashKey string, token string, lockLeaseTime uint64, needRefresh bool) *RedisHash { + return &RedisHash{ + ctx: ctx, + rwLocker: distributed_lock.InitRWLocker(hashKey, token, lockLeaseTime, needRefresh), + storageClient: GetRedisClientInstance(), + logger: logger.GetLoggerInstance(), + } +} + // SetRedisHashByMap define func of set redis hash by map struct func (rh *RedisHash) SetRedisHashByMap(hashKey string, fields map[string]interface{}) error { - err := rh.rwLocker.WLock() + err := rh.rwLocker.WLock(rh.ctx) if err != nil { rh.logger.Error("lock wLock by hashKey failed", zap.String("hashKey", hashKey), zap.Error(err)) return err } - defer rh.rwLocker.UnWLock() + defer rh.rwLocker.UnWLock(rh.ctx) err = rh.storageClient.HSet(rh.ctx, hashKey, fields).Err() if err != nil { @@ -39,12 +49,12 @@ func (rh *RedisHash) SetRedisHashByMap(hashKey string, fields map[string]interfa // SetRedisHashByKV define func of set redis hash by kv struct func (rh *RedisHash) SetRedisHashByKV(hashKey string, field string, value interface{}) error { - err := rh.rwLocker.WLock() + err := rh.rwLocker.WLock(rh.ctx) if err != nil { rh.logger.Error("lock wLock by hashKey failed", zap.String("hashKey", hashKey), zap.Error(err)) return err } - defer rh.rwLocker.UnWLock() + defer rh.rwLocker.UnWLock(rh.ctx) err = rh.storageClient.HSet(rh.ctx, hashKey, field, value).Err() if err != nil { @@ -56,12 +66,12 @@ func (rh *RedisHash) SetRedisHashByKV(hashKey string, field string, value interf // HGet define func of get specified field value from redis hash by key and field name func (rh *RedisHash) HGet(hashKey string, field string) (string, error) { - err := rh.rwLocker.RLock() + err := rh.rwLocker.RLock(rh.ctx) if err != nil { rh.logger.Error("lock rLock by hashKey failed", zap.String("hashKey", hashKey), zap.Error(err)) return "", err } - defer rh.rwLocker.UnRLock() + defer rh.rwLocker.UnRLock(rh.ctx) result, err := rh.storageClient.HGet(rh.ctx, hashKey, field).Result() if err != nil { @@ -73,12 +83,12 @@ func (rh *RedisHash) HGet(hashKey string, field string) (string, error) { // HGetAll define func of get all filelds from redis hash by key func (rh *RedisHash) HGetAll(hashKey string) (map[string]string, error) { - err := rh.rwLocker.RLock() + err := rh.rwLocker.RLock(rh.ctx) if err != nil { rh.logger.Error("lock rLock by hashKey failed", zap.String("hashKey", hashKey), zap.Error(err)) return nil, err } - defer rh.rwLocker.UnRLock() + defer rh.rwLocker.UnRLock(rh.ctx) result, err := rh.storageClient.HGetAll(rh.ctx, hashKey).Result() if err != nil { diff --git a/diagram/redis_init.go b/diagram/redis_init.go index 1bb5923..1ee3207 100644 --- a/diagram/redis_init.go +++ b/diagram/redis_init.go @@ -2,20 +2,44 @@ package diagram import ( "sync" + "time" + + "modelRT/config" + "modelRT/util" "github.com/redis/go-redis/v9" ) var ( - client *redis.Client - once sync.Once + _globalStorageClient *redis.Client + once sync.Once ) -// GetClientInstance define func of get redis client instance -func GetClientInstance() *redis.Client { - once.Do(func() { - // TODO 根据配置文件初始化 redis client - client = &redis.Client{} - }) +// initClient define func of return successfully initialized redis client +func initClient(rCfg config.RedisConfig) *redis.Client { + client, err := util.NewRedisClient( + util.WithAddr(rCfg.Addr), + util.WithPassword(rCfg.Password), + util.WithDB(rCfg.DB), + util.WithPoolSize(rCfg.PoolSize), + util.WithTimeout(time.Duration(rCfg.Timeout)*time.Second), + ) + if err != nil { + panic(err) + } + return client +} + +// InitClientInstance define func of return instance of redis client +func InitClientInstance(rCfg config.RedisConfig) *redis.Client { + once.Do(func() { + _globalStorageClient = initClient(rCfg) + }) + return _globalStorageClient +} + +// GetRedisClientInstance define func of get redis client instance +func GetRedisClientInstance() *redis.Client { + client := _globalStorageClient return client } diff --git a/diagram/redis_set.go b/diagram/redis_set.go index 67691e1..d70a8b5 100644 --- a/diagram/redis_set.go +++ b/diagram/redis_set.go @@ -4,13 +4,14 @@ import ( "context" "fmt" + distributed_lock "modelRT/distributedlock" locker "modelRT/distributedlock" + "modelRT/logger" "github.com/redis/go-redis/v9" "go.uber.org/zap" ) -// TODO 统一 storageClient与 rwLocker 中使用的 redis 版本 // RedisSet defines the encapsulation struct of redis hash type type RedisSet struct { ctx context.Context @@ -19,14 +20,24 @@ type RedisSet struct { logger *zap.Logger } +// NewRedisSet define func of new redis set instance +func NewRedisSet(ctx context.Context, hashKey string, token string, lockLeaseTime uint64, needRefresh bool) *RedisSet { + return &RedisSet{ + ctx: ctx, + rwLocker: distributed_lock.InitRWLocker(hashKey, token, lockLeaseTime, needRefresh), + storageClient: GetRedisClientInstance(), + logger: logger.GetLoggerInstance(), + } +} + // SADD define func of add redis set by members func (rs *RedisSet) SADD(setKey string, members ...interface{}) error { - err := rs.rwLocker.WLock() + err := rs.rwLocker.WLock(rs.ctx) if err != nil { rs.logger.Error("lock wLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) return err } - defer rs.rwLocker.UnWLock() + defer rs.rwLocker.UnWLock(rs.ctx) err = rs.storageClient.SAdd(rs.ctx, setKey, members).Err() if err != nil { @@ -38,12 +49,12 @@ func (rs *RedisSet) SADD(setKey string, members ...interface{}) error { // SREM define func of remove the specified members from redis set by key func (rs *RedisSet) SREM(setKey string, members ...interface{}) error { - err := rs.rwLocker.WLock() + err := rs.rwLocker.WLock(rs.ctx) if err != nil { rs.logger.Error("lock wLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) return err } - defer rs.rwLocker.UnWLock() + defer rs.rwLocker.UnWLock(rs.ctx) count, err := rs.storageClient.SRem(rs.ctx, setKey, members).Result() if err != nil || count != int64(len(members)) { @@ -56,12 +67,12 @@ func (rs *RedisSet) SREM(setKey string, members ...interface{}) error { // SMembers define func of get all memebers from redis set by key func (rs *RedisSet) SMembers(setKey string) ([]string, error) { - err := rs.rwLocker.RLock() + err := rs.rwLocker.RLock(rs.ctx) if err != nil { rs.logger.Error("lock rLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) return nil, err } - defer rs.rwLocker.UnRLock() + defer rs.rwLocker.UnRLock(rs.ctx) result, err := rs.storageClient.SMembers(rs.ctx, setKey).Result() if err != nil { diff --git a/distributedlock/locker_client_init.go b/distributedlock/locker_client_init.go new file mode 100644 index 0000000..2f1d82c --- /dev/null +++ b/distributedlock/locker_client_init.go @@ -0,0 +1,44 @@ +package distributed_lock + +import ( + "sync" + "time" + + "modelRT/config" + "modelRT/util" + + "github.com/redis/go-redis/v9" +) + +var ( + _globalLockerClient *redis.Client + once sync.Once +) + +// initClient define func of return successfully initialized redis client +func initClient(rCfg config.RedisConfig) *redis.Client { + client, err := util.NewRedisClient( + util.WithAddr(rCfg.Addr), + util.WithPassword(rCfg.Password), + util.WithPoolSize(rCfg.PoolSize), + util.WithTimeout(time.Duration(rCfg.Timeout)*time.Second), + ) + if err != nil { + panic(err) + } + return client +} + +// InitClientInstance define func of return instance of redis client +func InitClientInstance(rCfg config.RedisConfig) *redis.Client { + once.Do(func() { + _globalLockerClient = initClient(rCfg) + }) + return _globalLockerClient +} + +// GetRedisClientInstance define func of get redis client instance +func GetRedisClientInstance() *redis.Client { + client := _globalLockerClient + return client +} diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index 651560a..08e34f4 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -1,6 +1,7 @@ package distributed_lock import ( + "context" "errors" "fmt" "strings" @@ -11,8 +12,8 @@ import ( luascript "modelRT/distributedlock/luascript" "modelRT/logger" - "github.com/go-redis/redis" uuid "github.com/google/uuid" + "github.com/redis/go-redis/v9" "go.uber.org/zap" ) @@ -44,11 +45,11 @@ type redissionLocker struct { logger *zap.Logger } -func (rl *redissionLocker) Lock(timeout ...time.Duration) error { +func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) error { if rl.exit == nil { rl.exit = make(chan struct{}) } - result := rl.tryLock().(*constant.RedisResult) + result := rl.tryLock(ctx).(*constant.RedisResult) if result.Code == constant.UnknownInternalError { rl.logger.Error(result.OutputResultMessage()) return fmt.Errorf("get lock failed:%w", result) @@ -57,16 +58,16 @@ func (rl *redissionLocker) Lock(timeout ...time.Duration) error { if (result.Code == constant.LockSuccess) && rl.needRefresh { rl.once.Do(func() { // async refresh lock timeout unitl receive exit singal - go rl.refreshLockTimeout() + go rl.refreshLockTimeout(ctx) }) return nil } subMsg := make(chan struct{}, 1) defer close(subMsg) - sub := rl.client.Subscribe(rl.waitChanKey) + sub := rl.client.Subscribe(ctx, rl.waitChanKey) defer sub.Close() - go rl.subscribeLock(sub, subMsg) + go rl.subscribeLock(ctx, sub, subMsg) if len(timeout) > 0 && timeout[0] > 0 { acquireTimer := time.NewTimer(timeout[0]) @@ -79,7 +80,7 @@ func (rl *redissionLocker) Lock(timeout ...time.Duration) error { return err } - resultErr := rl.tryLock().(*constant.RedisResult) + resultErr := rl.tryLock(ctx).(*constant.RedisResult) if (resultErr.Code == constant.LockFailure) || (resultErr.Code == constant.UnknownInternalError) { rl.logger.Info(resultErr.OutputResultMessage()) continue @@ -99,14 +100,14 @@ func (rl *redissionLocker) Lock(timeout ...time.Duration) error { return fmt.Errorf("lock the redis lock failed:%w", result) } -func (rl *redissionLocker) subscribeLock(sub *redis.PubSub, out chan struct{}) { +func (rl *redissionLocker) subscribeLock(ctx context.Context, sub *redis.PubSub, out chan struct{}) { if sub == nil || out == nil { return } rl.logger.Info("lock: enter sub routine", zap.String("token", rl.token)) for { - msg, err := sub.Receive() + msg, err := sub.Receive(ctx) if err != nil { rl.logger.Info("sub receive message failed", zap.Error(err)) continue @@ -134,7 +135,7 @@ KEYS[1]:锁的键名(key),通常是锁的唯一标识。 ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ -func (rl *redissionLocker) refreshLockTimeout() { +func (rl *redissionLocker) refreshLockTimeout(ctx context.Context) { rl.logger.Info("lock refresh by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) lockTime := time.Duration(rl.lockLeaseTime/3) * time.Second @@ -145,7 +146,7 @@ func (rl *redissionLocker) refreshLockTimeout() { select { case <-timer.C: // extend key lease time - res := rl.client.Eval(luascript.RefreshLockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(ctx, luascript.RefreshLockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { rl.logger.Info("lock refresh failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) @@ -179,9 +180,9 @@ KEYS[1]:锁的键名(key),通常是锁的唯一标识。 ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ -func (rl *redissionLocker) tryLock() error { +func (rl *redissionLocker) tryLock(ctx context.Context) error { lockType := constant.LockType - res := rl.client.Eval(luascript.LockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(ctx, luascript.LockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) @@ -195,8 +196,8 @@ KEYS[2]:锁的释放通知频道(chankey),用于通知其他客户端锁已 ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ -func (rl *redissionLocker) UnLock() error { - res := rl.client.Eval(luascript.UnLockScript, []string{rl.key, rl.waitChanKey}, unlockMessage, rl.token) +func (rl *redissionLocker) UnLock(ctx context.Context) error { + res := rl.client.Eval(ctx, luascript.UnLockScript, []string{rl.key, rl.waitChanKey}, unlockMessage, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { rl.logger.Info("unlock lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index 649b7f4..afd1edb 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -1,6 +1,7 @@ package distributed_lock import ( + "context" "errors" "fmt" "strings" @@ -11,8 +12,8 @@ import ( "modelRT/distributedlock/luascript" "modelRT/logger" - "github.com/go-redis/redis" uuid "github.com/google/uuid" + "github.com/redis/go-redis/v9" "go.uber.org/zap" ) @@ -21,12 +22,12 @@ type RedissionRWLocker struct { rwTokenTimeoutPrefix string } -func (rl *RedissionRWLocker) RLock(timeout ...time.Duration) error { +func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration) error { if rl.exit == nil { rl.exit = make(chan struct{}) } - result := rl.tryRLock().(*constant.RedisResult) + result := rl.tryRLock(ctx).(*constant.RedisResult) if result.Code == constant.UnknownInternalError { rl.logger.Error(result.OutputResultMessage()) return fmt.Errorf("get read lock failed:%w", result) @@ -36,7 +37,7 @@ func (rl *RedissionRWLocker) RLock(timeout ...time.Duration) error { if rl.needRefresh { rl.once.Do(func() { // async refresh lock timeout unitl receive exit singal - go rl.refreshLockTimeout() + go rl.refreshLockTimeout(ctx) }) } rl.logger.Info("success get the read by key and token", zap.String("key", rl.key), zap.String("token", rl.token)) @@ -45,9 +46,9 @@ func (rl *RedissionRWLocker) RLock(timeout ...time.Duration) error { subMsg := make(chan struct{}, 1) defer close(subMsg) - sub := rl.client.Subscribe(rl.waitChanKey) + sub := rl.client.Subscribe(ctx, rl.waitChanKey) defer sub.Close() - go rl.subscribeLock(sub, subMsg) + go rl.subscribeLock(ctx, sub, subMsg) if len(timeout) > 0 && timeout[0] > 0 { acquireTimer := time.NewTimer(timeout[0]) @@ -60,7 +61,7 @@ func (rl *RedissionRWLocker) RLock(timeout ...time.Duration) error { return err } - resultErr := rl.tryRLock().(*constant.RedisResult) + resultErr := rl.tryRLock(ctx).(*constant.RedisResult) if (resultErr.Code == constant.RLockFailureWithWLockOccupancy) || (resultErr.Code == constant.UnknownInternalError) { rl.logger.Info(resultErr.OutputResultMessage()) continue @@ -80,10 +81,10 @@ func (rl *RedissionRWLocker) RLock(timeout ...time.Duration) error { return fmt.Errorf("lock the redis read lock failed:%w", result) } -func (rl *RedissionRWLocker) tryRLock() error { +func (rl *RedissionRWLocker) tryRLock(ctx context.Context) error { lockType := constant.LockType - res := rl.client.Eval(luascript.RLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(ctx, luascript.RLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix}, rl.lockLeaseTime, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) @@ -91,7 +92,7 @@ func (rl *RedissionRWLocker) tryRLock() error { return constant.NewRedisResult(constant.RedisCode(val), lockType, "") } -func (rl *RedissionRWLocker) refreshLockTimeout() { +func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) { rl.logger.Info("lock refresh by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) lockTime := time.Duration(rl.lockLeaseTime/3) * time.Second @@ -102,7 +103,7 @@ func (rl *RedissionRWLocker) refreshLockTimeout() { select { case <-timer.C: // extend key lease time - res := rl.client.Eval(luascript.RefreshRWLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(ctx, luascript.RefreshRWLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix}, rl.lockLeaseTime, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { rl.logger.Info("lock refresh failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) @@ -124,9 +125,9 @@ func (rl *RedissionRWLocker) refreshLockTimeout() { } } -func (rl *RedissionRWLocker) UnRLock() error { +func (rl *RedissionRWLocker) UnRLock(ctx context.Context) error { rl.logger.Info("unlock RLock by key and token", zap.String("key", rl.key), zap.String("token", rl.token)) - res := rl.client.Eval(luascript.UnRLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.waitChanKey}, unlockMessage, rl.token) + res := rl.client.Eval(ctx, luascript.UnRLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.waitChanKey}, unlockMessage, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { rl.logger.Info("unlock read lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) @@ -149,12 +150,12 @@ func (rl *RedissionRWLocker) UnRLock() error { return nil } -func (rl *RedissionRWLocker) WLock(timeout ...time.Duration) error { +func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration) error { if rl.exit == nil { rl.exit = make(chan struct{}) } - result := rl.tryWLock().(*constant.RedisResult) + result := rl.tryWLock(ctx).(*constant.RedisResult) if result.Code == constant.UnknownInternalError { rl.logger.Error(result.OutputResultMessage()) return fmt.Errorf("get write lock failed:%w", result) @@ -163,16 +164,16 @@ func (rl *RedissionRWLocker) WLock(timeout ...time.Duration) error { if (result.Code == constant.LockSuccess) && rl.needRefresh { rl.once.Do(func() { // async refresh lock timeout unitl receive exit singal - go rl.refreshLockTimeout() + go rl.refreshLockTimeout(ctx) }) return nil } subMsg := make(chan struct{}, 1) defer close(subMsg) - sub := rl.client.Subscribe(rl.waitChanKey) + sub := rl.client.Subscribe(ctx, rl.waitChanKey) defer sub.Close() - go rl.subscribeLock(sub, subMsg) + go rl.subscribeLock(ctx, sub, subMsg) if len(timeout) > 0 && timeout[0] > 0 { acquireTimer := time.NewTimer(timeout[0]) @@ -185,7 +186,7 @@ func (rl *RedissionRWLocker) WLock(timeout ...time.Duration) error { return err } - result := rl.tryWLock().(*constant.RedisResult) + result := rl.tryWLock(ctx).(*constant.RedisResult) if (result.Code == constant.UnknownInternalError) || (result.Code == constant.WLockFailureWithRLockOccupancy) || (result.Code == constant.WLockFailureWithWLockOccupancy) || (result.Code == constant.WLockFailureWithNotFirstPriority) { rl.logger.Info(result.OutputResultMessage()) continue @@ -205,10 +206,10 @@ func (rl *RedissionRWLocker) WLock(timeout ...time.Duration) error { return fmt.Errorf("lock write lock failed:%w", result) } -func (rl *RedissionRWLocker) tryWLock() error { +func (rl *RedissionRWLocker) tryWLock(ctx context.Context) error { lockType := constant.LockType - res := rl.client.Eval(luascript.WLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(ctx, luascript.WLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix}, rl.lockLeaseTime, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) @@ -216,8 +217,8 @@ func (rl *RedissionRWLocker) tryWLock() error { return constant.NewRedisResult(constant.RedisCode(val), lockType, "") } -func (rl *RedissionRWLocker) UnWLock() error { - res := rl.client.Eval(luascript.UnWLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.waitChanKey}, unlockMessage, rl.token) +func (rl *RedissionRWLocker) UnWLock(ctx context.Context) error { + res := rl.client.Eval(ctx, luascript.UnWLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.waitChanKey}, unlockMessage, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { rl.logger.Error("unlock write lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) @@ -278,3 +279,14 @@ func GetRWLocker(client *redis.Client, ops *RedissionLockConfig) *RedissionRWLoc } return rwLocker } + +// TODO consider refactoring to use options mode +func InitRWLocker(key string, token string, lockLeaseTime uint64, needRefresh bool) *RedissionRWLocker { + ops := &RedissionLockConfig{ + Key: key, + Token: token, + LockLeaseTime: lockLeaseTime, + NeedRefresh: needRefresh, + } + return GetRWLocker(GetRedisClientInstance(), ops) +} diff --git a/distributedlock/rwlock_test.go b/distributedlock/rwlock_test.go index d23e460..0ecb718 100644 --- a/distributedlock/rwlock_test.go +++ b/distributedlock/rwlock_test.go @@ -1,11 +1,12 @@ package distributed_lock import ( + "context" "strings" "testing" "time" - "github.com/go-redis/redis" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "go.uber.org/zap" ) @@ -17,6 +18,7 @@ func init() { } func TestRWLockRLockAndUnRLock(t *testing.T) { + ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", Addr: "192.168.2.104:6379", @@ -35,18 +37,18 @@ func TestRWLockRLockAndUnRLock(t *testing.T) { duration := 10 * time.Second // 第一次加读锁 - err := rwLocker.RLock(duration) + err := rwLocker.RLock(ctx, duration) assert.Equal(t, nil, err) tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") - num, err := rdb.HGet(rwLocker.key, tokenKey).Int() + num, err := rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) - err = rwLocker.UnRLock() + err = rwLocker.UnRLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, redis.Nil, err) assert.Equal(t, 0, num) t.Log("rwLock rlock and unrlock test success") @@ -54,6 +56,7 @@ func TestRWLockRLockAndUnRLock(t *testing.T) { } func TestRWLockReentrantRLock(t *testing.T) { + ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", Addr: "192.168.2.104:6379", @@ -72,35 +75,35 @@ func TestRWLockReentrantRLock(t *testing.T) { duration := 10 * time.Second // 第一次加读锁 - err := rwLocker.RLock(duration) + err := rwLocker.RLock(ctx, duration) assert.Equal(t, nil, err) tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") - num, err := rdb.HGet(rwLocker.key, tokenKey).Int() + num, err := rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) // 第二次加读锁 - err = rwLocker.RLock(duration) + err = rwLocker.RLock(ctx, duration) assert.Equal(t, nil, err) - num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 2, num) // 第一次解读锁 - err = rwLocker.UnRLock() + err = rwLocker.UnRLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) // 第二次解读锁 - err = rwLocker.UnRLock() + err = rwLocker.UnRLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, redis.Nil, err) assert.Equal(t, 0, num) t.Log("rwLock reentrant lock test success") @@ -108,6 +111,7 @@ func TestRWLockReentrantRLock(t *testing.T) { } func TestRWLockRefreshRLock(t *testing.T) { + ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", Addr: "192.168.2.104:6379", @@ -126,17 +130,17 @@ func TestRWLockRefreshRLock(t *testing.T) { duration := 10 * time.Second // 第一次加读锁 - err := rwLocker.RLock(duration) + err := rwLocker.RLock(ctx, duration) assert.Equal(t, nil, err) tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") - num, err := rdb.HGet(rwLocker.key, tokenKey).Int() + num, err := rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) time.Sleep(10 * time.Second) script := `return redis.call('httl', KEYS[1], 'fields', '1', ARGV[1]);` - result, err := rdb.Eval(script, []string{rwLocker.key}, tokenKey).Result() + result, err := rdb.Eval(ctx, script, []string{rwLocker.key}, tokenKey).Result() assert.Equal(t, nil, err) ttls, ok := result.([]interface{}) assert.Equal(t, true, ok) @@ -145,10 +149,10 @@ func TestRWLockRefreshRLock(t *testing.T) { compareValue := int64(8) assert.Greater(t, ttl, compareValue) - err = rwLocker.UnRLock() + err = rwLocker.UnRLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, redis.Nil, err) assert.Equal(t, 0, num) t.Log("rwLock refresh lock test success") @@ -157,6 +161,7 @@ func TestRWLockRefreshRLock(t *testing.T) { // TODO 设计两个客户端分别加读锁,测试是否可以加锁成功 func TestRWLock2ClientRLock(t *testing.T) { + ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", Addr: "192.168.2.104:6379", @@ -183,39 +188,39 @@ func TestRWLock2ClientRLock(t *testing.T) { duration := 10 * time.Second // locker1加读锁 - err := rwLocker1.RLock(duration) + err := rwLocker1.RLock(ctx, duration) assert.Equal(t, nil, err) tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") - num, err := rdb.HGet(rwLocker1.key, tokenKey1).Int() + num, err := rdb.HGet(ctx, rwLocker1.key, tokenKey1).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) // locker2加读锁 - err = rwLocker2.RLock(duration) + err = rwLocker2.RLock(ctx, duration) assert.Equal(t, nil, err) tokenKey2 := strings.Join([]string{rwLocker2.rwTokenTimeoutPrefix, rwLocker2.token}, ":") - num, err = rdb.HGet(rwLocker2.key, tokenKey2).Int() + num, err = rdb.HGet(ctx, rwLocker2.key, tokenKey2).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) - err = rdb.HLen(rwLocker1.key).Err() + err = rdb.HLen(ctx, rwLocker1.key).Err() assert.Equal(t, nil, err) - hLen := rdb.HLen(rwLocker1.key).Val() + hLen := rdb.HLen(ctx, rwLocker1.key).Val() assert.Equal(t, 3, hLen) // locker1解读锁 - err = rwLocker1.UnRLock() + err = rwLocker1.UnRLock(ctx) assert.Equal(t, nil, err) // locker1解读锁 - err = rwLocker2.UnRLock() + err = rwLocker2.UnRLock(ctx) assert.Equal(t, nil, err) - err = rdb.Exists(rwLocker1.key).Err() + err = rdb.Exists(ctx, rwLocker1.key).Err() assert.Equal(t, redis.Nil, err) - existNum := rdb.Exists(rwLocker1.key).Val() + existNum := rdb.Exists(ctx, rwLocker1.key).Val() assert.Equal(t, 0, existNum) t.Log("rwLock 2 client lock test success") return @@ -223,6 +228,7 @@ func TestRWLock2ClientRLock(t *testing.T) { // TODO 设计两个客户端分别加时间不同的读锁,测试ttl时间在有一个key删除后是否可以变换成功 func TestRWLock2CWith2DifTimeRLock(t *testing.T) { + ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", Addr: "192.168.2.104:6379", @@ -249,45 +255,46 @@ func TestRWLock2CWith2DifTimeRLock(t *testing.T) { duration := 10 * time.Second // locker1加读锁 - err := rwLocker1.RLock(duration) + err := rwLocker1.RLock(ctx, duration) assert.Equal(t, nil, err) tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") - num, err := rdb.HGet(rwLocker1.key, tokenKey1).Int() + num, err := rdb.HGet(ctx, rwLocker1.key, tokenKey1).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) // locker2加读锁 - err = rwLocker2.RLock(duration) + err = rwLocker2.RLock(ctx, duration) assert.Equal(t, nil, err) tokenKey2 := strings.Join([]string{rwLocker2.rwTokenTimeoutPrefix, rwLocker2.token}, ":") - num, err = rdb.HGet(rwLocker2.key, tokenKey2).Int() + num, err = rdb.HGet(ctx, rwLocker2.key, tokenKey2).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) - err = rdb.HLen(rwLocker1.key).Err() + err = rdb.HLen(ctx, rwLocker1.key).Err() assert.Equal(t, nil, err) - hLen := rdb.HLen(rwLocker1.key).Val() + hLen := rdb.HLen(ctx, rwLocker1.key).Val() assert.Equal(t, 3, hLen) // locker1解读锁 - err = rwLocker1.UnRLock() + err = rwLocker1.UnRLock(ctx) assert.Equal(t, nil, err) // locker1解读锁 - err = rwLocker2.UnRLock() + err = rwLocker2.UnRLock(ctx) assert.Equal(t, nil, err) - err = rdb.Exists(rwLocker1.key).Err() + err = rdb.Exists(ctx, rwLocker1.key).Err() assert.Equal(t, redis.Nil, err) - existNum := rdb.Exists(rwLocker1.key).Val() + existNum := rdb.Exists(ctx, rwLocker1.key).Val() assert.Equal(t, 0, existNum) t.Log("rwLock 2 client lock test success") return } func TestRWLockWLockAndUnWLock(t *testing.T) { + ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", Addr: "192.168.2.104:6379", @@ -306,18 +313,18 @@ func TestRWLockWLockAndUnWLock(t *testing.T) { duration := 10 * time.Second // 第一次加读锁 - err := rwLocker.WLock(duration) + err := rwLocker.WLock(ctx, duration) assert.Equal(t, nil, err) tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") - num, err := rdb.HGet(rwLocker.key, tokenKey).Int() + num, err := rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) - err = rwLocker.UnWLock() + err = rwLocker.UnWLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, redis.Nil, err) assert.Equal(t, 0, num) t.Log("rwLock rlock and unrlock test success") @@ -325,6 +332,7 @@ func TestRWLockWLockAndUnWLock(t *testing.T) { } func TestRWLockReentrantWLock(t *testing.T) { + ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", Addr: "192.168.2.104:6379", @@ -343,35 +351,35 @@ func TestRWLockReentrantWLock(t *testing.T) { duration := 10 * time.Second // 第一次加写锁 - err := rwLocker.WLock(duration) + err := rwLocker.WLock(ctx, duration) assert.Equal(t, nil, err) tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") - num, err := rdb.HGet(rwLocker.key, tokenKey).Int() + num, err := rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) // 第二次加写锁 - err = rwLocker.WLock(duration) + err = rwLocker.WLock(ctx, duration) assert.Equal(t, nil, err) - num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 2, num) // 第一次解写锁 - err = rwLocker.UnWLock() + err = rwLocker.UnWLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) // 第二次解写锁 - err = rwLocker.UnWLock() + err = rwLocker.UnWLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() assert.Equal(t, redis.Nil, err) assert.Equal(t, 0, num) t.Log("rwLock reentrant lock test success") diff --git a/logger/init.go b/logger/init.go index 63818f9..a537bf8 100644 --- a/logger/init.go +++ b/logger/init.go @@ -31,13 +31,13 @@ func getEncoder() zapcore.Encoder { } // getLogWriter responsible for setting the location of log storage -func getLogWriter(mode, filename string, maxsize, maxBackup, maxAge int) zapcore.WriteSyncer { +func getLogWriter(mode, filename string, maxsize, maxBackup, maxAge int, compress bool) zapcore.WriteSyncer { lumberJackLogger := &lumberjack.Logger{ Filename: filename, // log file position MaxSize: maxsize, // log file maxsize MaxAge: maxAge, // maximum number of day files retained MaxBackups: maxBackup, // maximum number of old files retained - Compress: false, // whether to compress + Compress: compress, // whether to compress } syncConsole := zapcore.AddSync(os.Stderr) @@ -51,7 +51,7 @@ func getLogWriter(mode, filename string, maxsize, maxBackup, maxAge int) zapcore // initLogger return successfully initialized zap logger func initLogger(lCfg config.LoggerConfig) *zap.Logger { - writeSyncer := getLogWriter(lCfg.Mode, lCfg.FilePath, lCfg.MaxSize, lCfg.MaxBackups, lCfg.MaxAge) + writeSyncer := getLogWriter(lCfg.Mode, lCfg.FilePath, lCfg.MaxSize, lCfg.MaxBackups, lCfg.MaxAge, lCfg.Compress) encoder := getEncoder() l := new(zapcore.Level) @@ -61,10 +61,11 @@ func initLogger(lCfg config.LoggerConfig) *zap.Logger { } core := zapcore.NewCore(encoder, writeSyncer, l) - _globalLogger = zap.New(core, zap.AddCaller()) - zap.ReplaceGlobals(_globalLogger) + logger := zap.New(core, zap.AddCaller()) - return _globalLogger + // 替换全局日志实例 + zap.ReplaceGlobals(logger) + return logger } // InitLoggerInstance return instance of zap logger diff --git a/main.go b/main.go index 1bb84e0..db470ba 100644 --- a/main.go +++ b/main.go @@ -9,6 +9,8 @@ import ( "modelRT/alert" "modelRT/config" "modelRT/database" + "modelRT/diagram" + distributed_lock "modelRT/distributedlock" _ "modelRT/docs" "modelRT/handler" "modelRT/logger" @@ -77,6 +79,12 @@ func main() { } defer parsePool.Release() + storageClient := diagram.InitClientInstance(modelRTConfig.RedisConfig) + defer storageClient.Close() + + lockerClient := distributed_lock.InitClientInstance(modelRTConfig.RedisConfig) + defer lockerClient.Close() + // init anchor param ants pool anchorRealTimePool, err := pool.AnchorPoolInit(modelRTConfig.RTDReceiveConcurrentQuantity) if err != nil { diff --git a/util/redis_options.go b/util/redis_options.go new file mode 100644 index 0000000..79e9115 --- /dev/null +++ b/util/redis_options.go @@ -0,0 +1,109 @@ +package util + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/redis/go-redis/v9" +) + +// RedisOptions define struct of redis client config options +type RedisOptions struct { + Addr string + Password string + DB int + PoolSize int + Timeout time.Duration +} + +// RedisOption define a function type for modify RedisOptions +type RedisOption func(*RedisOptions) error + +// WithAddr define func of configure redis addr options +func WithAddr(addr string) RedisOption { + return func(o *RedisOptions) error { + if addr == "" { + return errors.New("地址不能为空") + } + o.Addr = addr + return nil + } +} + +// WithPassword define func of configure redis password options +func WithPassword(password string) RedisOption { + return func(o *RedisOptions) error { + o.Password = password + return nil + } +} + +// WithDB define func of configure redis db options +func WithDB(db int) RedisOption { + return func(o *RedisOptions) error { + if db < 0 { + return errors.New("数据库编号不能为负数") + } + o.DB = db + return nil + } +} + +// WithPoolSize define func of configure pool size options +func WithPoolSize(poolSize int) RedisOption { + return func(o *RedisOptions) error { + if poolSize <= 0 { + return errors.New("连接池大小必须大于 0") + } + o.PoolSize = poolSize + return nil + } +} + +// WithTimeout define func of configure timeout options +func WithTimeout(timeout time.Duration) RedisOption { + return func(o *RedisOptions) error { + if timeout <= 0 { + return errors.New("超时时间必须大于 0") + } + o.Timeout = timeout + return nil + } +} + +// NewRedisClient define func of initialize the Redis client +func NewRedisClient(opts ...RedisOption) (*redis.Client, error) { + // default options + options := &RedisOptions{ + Addr: "localhost:6379", + Password: "", + DB: 0, + PoolSize: 10, + Timeout: 5 * time.Second, + } + + // Apply configuration options from config + for _, opt := range opts { + if err := opt(options); err != nil { + return nil, err + } + } + + // create redis client + client := redis.NewClient(&redis.Options{ + Addr: options.Addr, + Password: options.Password, + DB: options.DB, + PoolSize: options.PoolSize, + }) + + // check if the connection is successful + ctx, cancel := context.WithTimeout(context.Background(), options.Timeout) + defer cancel() + if err := client.Ping(ctx).Err(); err != nil { + return nil, fmt.Errorf("can not connect redis:%v", err) + } + return client, nil +} From 1cf6137f9fc6fe5c01da58850d990b36a3a333e1 Mon Sep 17 00:00:00 2001 From: douxu Date: Tue, 25 Mar 2025 17:00:09 +0800 Subject: [PATCH 15/33] refactor(redis hash): fix bug of redis hash 1.optimize RedisOption struct fix(uuid): replace uuid mod dependencies 1.replace uuid mod dependencies fix(config): add new redis config 1.add new redis config --- config/config.go | 17 ++-- config/config.yaml | 9 +- diagram/redis_hash.go | 3 +- diagram/redis_init.go | 2 +- diagram/redis_set.go | 3 +- .../{locker_client_init.go => locker_init.go} | 5 +- distributedlock/redis_lock.go | 11 ++- distributedlock/redis_rwlock.go | 48 +++++----- distributedlock/rwlock_test.go | 2 +- go.mod | 4 - go.sum | 45 --------- main.go | 6 +- util/redis_init.go | 36 ++++++++ util/redis_options.go | 92 +++++-------------- 14 files changed, 118 insertions(+), 165 deletions(-) rename distributedlock/{locker_client_init.go => locker_init.go} (93%) create mode 100644 util/redis_init.go diff --git a/config/config.go b/config/config.go index 297dc51..9a3bfb8 100644 --- a/config/config.go +++ b/config/config.go @@ -69,14 +69,15 @@ type DataRTConfig struct { // ModelRTConfig define config stuct of model runtime server type ModelRTConfig struct { - BaseConfig `mapstructure:"base"` - PostgresConfig `mapstructure:"postgres"` - KafkaConfig `mapstructure:"kafka"` - LoggerConfig `mapstructure:"logger"` - AntsConfig `mapstructure:"ants"` - DataRTConfig `mapstructure:"dataRT"` - RedisConfig `mapstructure:"redis"` - PostgresDBURI string `mapstructure:"-"` + BaseConfig `mapstructure:"base"` + PostgresConfig `mapstructure:"postgres"` + KafkaConfig `mapstructure:"kafka"` + LoggerConfig `mapstructure:"logger"` + AntsConfig `mapstructure:"ants"` + DataRTConfig `mapstructure:"dataRT"` + LockerRedisConfig RedisConfig `mapstructure:"locker_redis"` + StorageRedisConfig RedisConfig `mapstructure:"storage_redis"` + PostgresDBURI string `mapstructure:"-"` } // ReadAndInitConfig return modelRT project config struct diff --git a/config/config.yaml b/config/config.yaml index 55bc8be..57ac79c 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -37,13 +37,20 @@ ants: rtd_receive_concurrent_quantity: 10 # redis config -redis: +locker_redis: addr: "192.168.2.104:6379" password: "" db: 1 poolsize: 50 timeout: 10 +storage_redis: + addr: "192.168.2.104:6379" + password: "" + db: 0 + poolsize: 50 + timeout: 10 + # modelRT base config base: grid_id: 1 diff --git a/diagram/redis_hash.go b/diagram/redis_hash.go index 13b883d..4446a69 100644 --- a/diagram/redis_hash.go +++ b/diagram/redis_hash.go @@ -3,7 +3,6 @@ package diagram import ( "context" - distributed_lock "modelRT/distributedlock" locker "modelRT/distributedlock" "modelRT/logger" @@ -24,7 +23,7 @@ type RedisHash struct { func NewRedisHash(ctx context.Context, hashKey string, token string, lockLeaseTime uint64, needRefresh bool) *RedisHash { return &RedisHash{ ctx: ctx, - rwLocker: distributed_lock.InitRWLocker(hashKey, token, lockLeaseTime, needRefresh), + rwLocker: locker.InitRWLocker(hashKey, token, lockLeaseTime, needRefresh), storageClient: GetRedisClientInstance(), logger: logger.GetLoggerInstance(), } diff --git a/diagram/redis_init.go b/diagram/redis_init.go index 1ee3207..3aa0fa5 100644 --- a/diagram/redis_init.go +++ b/diagram/redis_init.go @@ -18,7 +18,7 @@ var ( // initClient define func of return successfully initialized redis client func initClient(rCfg config.RedisConfig) *redis.Client { client, err := util.NewRedisClient( - util.WithAddr(rCfg.Addr), + rCfg.Addr, util.WithPassword(rCfg.Password), util.WithDB(rCfg.DB), util.WithPoolSize(rCfg.PoolSize), diff --git a/diagram/redis_set.go b/diagram/redis_set.go index d70a8b5..dcfdeaa 100644 --- a/diagram/redis_set.go +++ b/diagram/redis_set.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - distributed_lock "modelRT/distributedlock" locker "modelRT/distributedlock" "modelRT/logger" @@ -24,7 +23,7 @@ type RedisSet struct { func NewRedisSet(ctx context.Context, hashKey string, token string, lockLeaseTime uint64, needRefresh bool) *RedisSet { return &RedisSet{ ctx: ctx, - rwLocker: distributed_lock.InitRWLocker(hashKey, token, lockLeaseTime, needRefresh), + rwLocker: locker.InitRWLocker(hashKey, token, lockLeaseTime, needRefresh), storageClient: GetRedisClientInstance(), logger: logger.GetLoggerInstance(), } diff --git a/distributedlock/locker_client_init.go b/distributedlock/locker_init.go similarity index 93% rename from distributedlock/locker_client_init.go rename to distributedlock/locker_init.go index 2f1d82c..35ecc78 100644 --- a/distributedlock/locker_client_init.go +++ b/distributedlock/locker_init.go @@ -1,4 +1,4 @@ -package distributed_lock +package distributedlock import ( "sync" @@ -18,8 +18,9 @@ var ( // initClient define func of return successfully initialized redis client func initClient(rCfg config.RedisConfig) *redis.Client { client, err := util.NewRedisClient( - util.WithAddr(rCfg.Addr), + rCfg.Addr, util.WithPassword(rCfg.Password), + util.WithDB(rCfg.DB), util.WithPoolSize(rCfg.PoolSize), util.WithTimeout(time.Duration(rCfg.Timeout)*time.Second), ) diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index 08e34f4..0f6a5b9 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -1,4 +1,4 @@ -package distributed_lock +package distributedlock import ( "context" @@ -12,7 +12,7 @@ import ( luascript "modelRT/distributedlock/luascript" "modelRT/logger" - uuid "github.com/google/uuid" + uuid "github.com/gofrs/uuid" "github.com/redis/go-redis/v9" "go.uber.org/zap" ) @@ -220,9 +220,14 @@ func (rl *redissionLocker) UnLock(ctx context.Context) error { return nil } +// TODO 优化 panic func GetLocker(client *redis.Client, ops *RedissionLockConfig) *redissionLocker { if ops.Token == "" { - ops.Token = uuid.New().String() + token, err := uuid.NewV4() + if err != nil { + panic(err) + } + ops.Token = token.String() } if len(ops.Prefix) <= 0 { diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index afd1edb..d873b01 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -1,4 +1,4 @@ -package distributed_lock +package distributedlock import ( "context" @@ -12,7 +12,7 @@ import ( "modelRT/distributedlock/luascript" "modelRT/logger" - uuid "github.com/google/uuid" + uuid "github.com/gofrs/uuid" "github.com/redis/go-redis/v9" "go.uber.org/zap" ) @@ -240,33 +240,38 @@ func (rl *RedissionRWLocker) UnWLock(ctx context.Context) error { return nil } -func GetRWLocker(client *redis.Client, ops *RedissionLockConfig) *RedissionRWLocker { - if ops.Token == "" { - ops.Token = uuid.New().String() +// TODO 优化 panic +func GetRWLocker(client *redis.Client, conf *RedissionLockConfig) *RedissionRWLocker { + if conf.Token == "" { + token, err := uuid.NewV4() + if err != nil { + panic(err) + } + conf.Token = token.String() } - if ops.Prefix == "" { - ops.Prefix = "redission-rwlock" + if conf.Prefix == "" { + conf.Prefix = "redission-rwlock" } - if ops.TimeoutPrefix == "" { - ops.TimeoutPrefix = "rwlock_timeout" + if conf.TimeoutPrefix == "" { + conf.TimeoutPrefix = "rwlock_timeout" } - if ops.ChanPrefix == "" { - ops.ChanPrefix = "redission-rwlock-channel" + if conf.ChanPrefix == "" { + conf.ChanPrefix = "redission-rwlock-channel" } - if ops.LockLeaseTime == 0 { - ops.LockLeaseTime = internalLockLeaseTime + if conf.LockLeaseTime == 0 { + conf.LockLeaseTime = internalLockLeaseTime } r := &redissionLocker{ - token: ops.Token, - key: strings.Join([]string{ops.Prefix, ops.Key}, ":"), - needRefresh: ops.NeedRefresh, - lockLeaseTime: ops.LockLeaseTime, - waitChanKey: strings.Join([]string{ops.ChanPrefix, ops.Key, "write"}, ":"), + token: conf.Token, + key: strings.Join([]string{conf.Prefix, conf.Key}, ":"), + needRefresh: conf.NeedRefresh, + lockLeaseTime: conf.LockLeaseTime, + waitChanKey: strings.Join([]string{conf.ChanPrefix, conf.Key, "write"}, ":"), client: client, exit: make(chan struct{}), once: &sync.Once{}, @@ -275,18 +280,17 @@ func GetRWLocker(client *redis.Client, ops *RedissionLockConfig) *RedissionRWLoc rwLocker := &RedissionRWLocker{ redissionLocker: *r, - rwTokenTimeoutPrefix: ops.TimeoutPrefix, + rwTokenTimeoutPrefix: conf.TimeoutPrefix, } return rwLocker } -// TODO consider refactoring to use options mode func InitRWLocker(key string, token string, lockLeaseTime uint64, needRefresh bool) *RedissionRWLocker { - ops := &RedissionLockConfig{ + conf := &RedissionLockConfig{ Key: key, Token: token, LockLeaseTime: lockLeaseTime, NeedRefresh: needRefresh, } - return GetRWLocker(GetRedisClientInstance(), ops) + return GetRWLocker(GetRedisClientInstance(), conf) } diff --git a/distributedlock/rwlock_test.go b/distributedlock/rwlock_test.go index 0ecb718..2ad045e 100644 --- a/distributedlock/rwlock_test.go +++ b/distributedlock/rwlock_test.go @@ -1,4 +1,4 @@ -package distributed_lock +package distributedlock import ( "context" diff --git a/go.mod b/go.mod index 5e07739..f1591bf 100644 --- a/go.mod +++ b/go.mod @@ -6,9 +6,7 @@ require ( github.com/bitly/go-simplejson v0.5.1 github.com/confluentinc/confluent-kafka-go v1.9.2 github.com/gin-gonic/gin v1.10.0 - github.com/go-redis/redis v6.15.9+incompatible github.com/gofrs/uuid v4.4.0+incompatible - github.com/google/uuid v1.4.0 github.com/gorilla/websocket v1.5.3 github.com/json-iterator/go v1.1.12 github.com/natefinch/lumberjack v2.0.0+incompatible @@ -62,8 +60,6 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect - github.com/onsi/gomega v1.18.1 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect diff --git a/go.sum b/go.sum index 376699e..2f08fc6 100644 --- a/go.sum +++ b/go.sum @@ -60,8 +60,6 @@ github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA= @@ -89,9 +87,6 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o= github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= -github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= -github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= @@ -125,12 +120,9 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -138,9 +130,7 @@ github.com/hamba/avro v1.5.6/go.mod h1:3vNT0RLXXpFm2Tb/5KC71ZRJlOroggq1Rcitb6k4F github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/heetch/avro v0.3.1/go.mod h1:4xn38Oz/+hiEUTpbVfGVLfvOg0yKLlRP7Q9+gJJILgA= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -204,20 +194,6 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= github.com/nrwiersma/avro-benchmarks v0.0.0-20210913175520-21aec48c8f76/go.mod h1:iKyFMidsk/sVYONJRE372sJuX/QTRPacU7imPqqsu7g= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/panjf2000/ants/v2 v2.10.0 h1:zhRg1pQUtkyRiOFo2Sbqwjp0GfBNo9cUY2/Grpx1p+8= github.com/panjf2000/ants/v2 v2.10.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= @@ -276,7 +252,6 @@ github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2 github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -300,13 +275,11 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -314,13 +287,11 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= @@ -332,31 +303,22 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -370,7 +332,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -382,7 +343,6 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= @@ -428,7 +388,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/httprequest.v1 v1.2.1/go.mod h1:x2Otw96yda5+8+6ZeWwHIJTFkEHWP/qP8pJOzqEtWPM= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -436,14 +395,10 @@ gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3M gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/main.go b/main.go index db470ba..a0086cb 100644 --- a/main.go +++ b/main.go @@ -10,7 +10,7 @@ import ( "modelRT/config" "modelRT/database" "modelRT/diagram" - distributed_lock "modelRT/distributedlock" + locker "modelRT/distributedlock" _ "modelRT/docs" "modelRT/handler" "modelRT/logger" @@ -79,10 +79,10 @@ func main() { } defer parsePool.Release() - storageClient := diagram.InitClientInstance(modelRTConfig.RedisConfig) + storageClient := diagram.InitClientInstance(modelRTConfig.StorageRedisConfig) defer storageClient.Close() - lockerClient := distributed_lock.InitClientInstance(modelRTConfig.RedisConfig) + lockerClient := locker.InitClientInstance(modelRTConfig.LockerRedisConfig) defer lockerClient.Close() // init anchor param ants pool diff --git a/util/redis_init.go b/util/redis_init.go new file mode 100644 index 0000000..441f4c4 --- /dev/null +++ b/util/redis_init.go @@ -0,0 +1,36 @@ +package util + +import ( + "context" + "fmt" + + "github.com/redis/go-redis/v9" +) + +// NewRedisClient define func of initialize the Redis client +func NewRedisClient(addr string, opts ...RedisOption) (*redis.Client, error) { + // default options + options := RedisOptions{ + redisOptions: &redis.Options{ + Addr: addr, + }, + } + + // Apply configuration options from config + for _, opt := range opts { + opt(&options) + } + + // create redis client + client := redis.NewClient(options.redisOptions) + + if options.timeout > 0 { + // check if the connection is successful + ctx, cancel := context.WithTimeout(context.Background(), options.timeout) + defer cancel() + if err := client.Ping(ctx).Err(); err != nil { + return nil, fmt.Errorf("can not connect redis:%v", err) + } + } + return client, nil +} diff --git a/util/redis_options.go b/util/redis_options.go index 79e9115..23ed270 100644 --- a/util/redis_options.go +++ b/util/redis_options.go @@ -1,41 +1,37 @@ package util import ( - "context" "errors" - "fmt" "time" "github.com/redis/go-redis/v9" ) -// RedisOptions define struct of redis client config options type RedisOptions struct { - Addr string - Password string - DB int - PoolSize int - Timeout time.Duration + redisOptions *redis.Options + timeout time.Duration } -// RedisOption define a function type for modify RedisOptions type RedisOption func(*RedisOptions) error -// WithAddr define func of configure redis addr options -func WithAddr(addr string) RedisOption { - return func(o *RedisOptions) error { - if addr == "" { - return errors.New("地址不能为空") - } - o.Addr = addr - return nil - } -} - // WithPassword define func of configure redis password options func WithPassword(password string) RedisOption { return func(o *RedisOptions) error { - o.Password = password + if password == "" { + return errors.New("password is empty") + } + o.redisOptions.Password = password + return nil + } +} + +// WithTimeout define func of configure redis timeout options +func WithTimeout(timeout time.Duration) RedisOption { + return func(o *RedisOptions) error { + if timeout < 0 { + return errors.New("timeout can not be negative") + } + o.timeout = timeout return nil } } @@ -44,9 +40,9 @@ func WithPassword(password string) RedisOption { func WithDB(db int) RedisOption { return func(o *RedisOptions) error { if db < 0 { - return errors.New("数据库编号不能为负数") + return errors.New("db can not be negative") } - o.DB = db + o.redisOptions.DB = db return nil } } @@ -55,55 +51,9 @@ func WithDB(db int) RedisOption { func WithPoolSize(poolSize int) RedisOption { return func(o *RedisOptions) error { if poolSize <= 0 { - return errors.New("连接池大小必须大于 0") + return errors.New("pool size must be greater than 0") } - o.PoolSize = poolSize + o.redisOptions.PoolSize = poolSize return nil } } - -// WithTimeout define func of configure timeout options -func WithTimeout(timeout time.Duration) RedisOption { - return func(o *RedisOptions) error { - if timeout <= 0 { - return errors.New("超时时间必须大于 0") - } - o.Timeout = timeout - return nil - } -} - -// NewRedisClient define func of initialize the Redis client -func NewRedisClient(opts ...RedisOption) (*redis.Client, error) { - // default options - options := &RedisOptions{ - Addr: "localhost:6379", - Password: "", - DB: 0, - PoolSize: 10, - Timeout: 5 * time.Second, - } - - // Apply configuration options from config - for _, opt := range opts { - if err := opt(options); err != nil { - return nil, err - } - } - - // create redis client - client := redis.NewClient(&redis.Options{ - Addr: options.Addr, - Password: options.Password, - DB: options.DB, - PoolSize: options.PoolSize, - }) - - // check if the connection is successful - ctx, cancel := context.WithTimeout(context.Background(), options.Timeout) - defer cancel() - if err := client.Ping(ctx).Err(); err != nil { - return nil, fmt.Errorf("can not connect redis:%v", err) - } - return client, nil -} From 182f8ac634ba9418eb888972ff8b882d01ee8311 Mon Sep 17 00:00:00 2001 From: douxu Date: Fri, 28 Mar 2025 16:48:56 +0800 Subject: [PATCH 16/33] add redis lock test of rwlocker --- distributedlock/rwlock_test.go | 102 +++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/distributedlock/rwlock_test.go b/distributedlock/rwlock_test.go index 2ad045e..d71241b 100644 --- a/distributedlock/rwlock_test.go +++ b/distributedlock/rwlock_test.go @@ -385,3 +385,105 @@ func TestRWLockReentrantWLock(t *testing.T) { t.Log("rwLock reentrant lock test success") return } + +// TODO 设计两个客户端分别加读锁与写锁 +func TestRWLock2CWithRLockAndWLock(t *testing.T) { + ctx := context.TODO() + rdb := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "192.168.2.104:6379", + Password: "cnstar", + PoolSize: 50, + DialTimeout: 10 * time.Second, + }) + + rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 120, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", + }) + rwLocker1.logger = log + + rwLocker2 := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 30, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", + }) + rwLocker2.logger = log + + duration := 10 * time.Second + // locker1加读锁 + err := rwLocker1.RLock(ctx, duration) + assert.Equal(t, nil, err) + + tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") + num, err := rdb.HGet(ctx, rwLocker1.key, tokenKey1).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + // locker2加写锁锁 + duration = 2 * time.Second + err = rwLocker2.WLock(ctx, duration) + // 预测加写锁失败 + assert.Equal(t, nil, err) + + // locker1解写锁 + err = rwLocker1.UnRLock(ctx) + assert.Equal(t, nil, err) + + t.Log("rwLock 2 client lock test success") + return +} + +// TODO 设计两个客户端分别加读锁与写锁 +func TestRWLock2CWithWLockAndRLock(t *testing.T) { + ctx := context.TODO() + rdb := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "192.168.2.104:6379", + Password: "cnstar", + PoolSize: 50, + DialTimeout: 10 * time.Second, + }) + + rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 120, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", + }) + rwLocker1.logger = log + + rwLocker2 := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 30, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", + }) + rwLocker2.logger = log + + duration := 10 * time.Second + // locker1加读锁 + err := rwLocker1.WLock(ctx, duration) + assert.Equal(t, nil, err) + + tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") + num, err := rdb.HGet(ctx, rwLocker1.key, tokenKey1).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + // locker2加读锁 + duration = 2 * time.Second + err = rwLocker2.RLock(ctx, duration) + // TODO 预测加读锁失败 + assert.Equal(t, nil, err) + + // locker1解写锁 + err = rwLocker1.UnWLock(ctx) + assert.Equal(t, nil, err) + + t.Log("rwLock 2 client lock test success") + return +} From ae064236c76bce1ba70ee5495864304d58f9aa23 Mon Sep 17 00:00:00 2001 From: douxu Date: Tue, 1 Apr 2025 16:20:55 +0800 Subject: [PATCH 17/33] add redis lock refresh test of rwlocker --- distributedlock/rwlock_test.go | 115 +++++++++++++++++++++++++++++---- 1 file changed, 103 insertions(+), 12 deletions(-) diff --git a/distributedlock/rwlock_test.go b/distributedlock/rwlock_test.go index d71241b..ffc9bee 100644 --- a/distributedlock/rwlock_test.go +++ b/distributedlock/rwlock_test.go @@ -159,7 +159,6 @@ func TestRWLockRefreshRLock(t *testing.T) { return } -// TODO 设计两个客户端分别加读锁,测试是否可以加锁成功 func TestRWLock2ClientRLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ @@ -208,25 +207,24 @@ func TestRWLock2ClientRLock(t *testing.T) { err = rdb.HLen(ctx, rwLocker1.key).Err() assert.Equal(t, nil, err) hLen := rdb.HLen(ctx, rwLocker1.key).Val() - assert.Equal(t, 3, hLen) + assert.Equal(t, int64(3), hLen) // locker1解读锁 err = rwLocker1.UnRLock(ctx) assert.Equal(t, nil, err) - // locker1解读锁 + // locker2解读锁 err = rwLocker2.UnRLock(ctx) assert.Equal(t, nil, err) err = rdb.Exists(ctx, rwLocker1.key).Err() - assert.Equal(t, redis.Nil, err) + assert.Equal(t, nil, err) existNum := rdb.Exists(ctx, rwLocker1.key).Val() - assert.Equal(t, 0, existNum) + assert.Equal(t, int64(0), existNum) t.Log("rwLock 2 client lock test success") return } -// TODO 设计两个客户端分别加时间不同的读锁,测试ttl时间在有一个key删除后是否可以变换成功 func TestRWLock2CWith2DifTimeRLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ @@ -275,20 +273,113 @@ func TestRWLock2CWith2DifTimeRLock(t *testing.T) { err = rdb.HLen(ctx, rwLocker1.key).Err() assert.Equal(t, nil, err) hLen := rdb.HLen(ctx, rwLocker1.key).Val() - assert.Equal(t, 3, hLen) + assert.Equal(t, int64(3), hLen) + + script := `return redis.call('httl', KEYS[1], 'fields', '1', ARGV[1]);` + result, err := rdb.Eval(ctx, script, []string{rwLocker1.key}, tokenKey1).Result() + assert.Equal(t, nil, err) + ttls, ok := result.([]interface{}) + assert.Equal(t, true, ok) + ttl, ok := ttls[0].(int64) + assert.Equal(t, true, ok) + compareValue := int64(110) + assert.Greater(t, ttl, compareValue) // locker1解读锁 err = rwLocker1.UnRLock(ctx) assert.Equal(t, nil, err) - // locker1解读锁 + hashTTL := rdb.TTL(ctx, rwLocker1.key).Val().Seconds() + assert.Greater(t, hashTTL, float64(20)) + + // locker2解读锁 err = rwLocker2.UnRLock(ctx) assert.Equal(t, nil, err) err = rdb.Exists(ctx, rwLocker1.key).Err() - assert.Equal(t, redis.Nil, err) + assert.Equal(t, nil, err) existNum := rdb.Exists(ctx, rwLocker1.key).Val() - assert.Equal(t, 0, existNum) + assert.Equal(t, int64(0), existNum) + t.Log("rwLock 2 client lock test success") + return +} + +// TODO 设计两个客户端分别加时间不同的读锁,测试ttl时间在有一个key删除后是否可以变换成功 +func TestRWLock2CWithTimeTransformRLock(t *testing.T) { + ctx := context.TODO() + rdb := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "192.168.2.104:6379", + Password: "cnstar", + PoolSize: 50, + DialTimeout: 10 * time.Second, + }) + + rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 120, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", + }) + rwLocker1.logger = log + + rwLocker2 := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 30, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", + }) + rwLocker2.logger = log + + duration := 10 * time.Second + // locker1加读锁 + err := rwLocker1.RLock(ctx, duration) + assert.Equal(t, nil, err) + + tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") + num, err := rdb.HGet(ctx, rwLocker1.key, tokenKey1).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + // locker2加读锁 + err = rwLocker2.RLock(ctx, duration) + assert.Equal(t, nil, err) + + tokenKey2 := strings.Join([]string{rwLocker2.rwTokenTimeoutPrefix, rwLocker2.token}, ":") + num, err = rdb.HGet(ctx, rwLocker2.key, tokenKey2).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + err = rdb.HLen(ctx, rwLocker1.key).Err() + assert.Equal(t, nil, err) + hLen := rdb.HLen(ctx, rwLocker1.key).Val() + assert.Equal(t, int64(3), hLen) + + script := `return redis.call('httl', KEYS[1], 'fields', '1', ARGV[1]);` + result, err := rdb.Eval(ctx, script, []string{rwLocker1.key}, tokenKey1).Result() + assert.Equal(t, nil, err) + ttls, ok := result.([]interface{}) + assert.Equal(t, true, ok) + ttl, ok := ttls[0].(int64) + assert.Equal(t, true, ok) + compareValue := int64(110) + assert.Greater(t, ttl, compareValue) + + // locker1解读锁 + err = rwLocker1.UnRLock(ctx) + assert.Equal(t, nil, err) + + hashTTL := rdb.TTL(ctx, rwLocker1.key).Val().Seconds() + assert.Greater(t, hashTTL, float64(20)) + + // locker2解读锁 + err = rwLocker2.UnRLock(ctx) + assert.Equal(t, nil, err) + + err = rdb.Exists(ctx, rwLocker1.key).Err() + assert.Equal(t, nil, err) + existNum := rdb.Exists(ctx, rwLocker1.key).Val() + assert.Equal(t, int64(0), existNum) t.Log("rwLock 2 client lock test success") return } @@ -386,7 +477,7 @@ func TestRWLockReentrantWLock(t *testing.T) { return } -// TODO 设计两个客户端分别加读锁与写锁 +// TODO 设计两个客户端,C1先加读锁与C2后加写锁 func TestRWLock2CWithRLockAndWLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ @@ -437,7 +528,7 @@ func TestRWLock2CWithRLockAndWLock(t *testing.T) { return } -// TODO 设计两个客户端分别加读锁与写锁 +// TODO 设计两个客户端,C1先加写锁与C2后加读锁 func TestRWLock2CWithWLockAndRLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ From b27b999873ec599c00bbc618173f19a26ad213ca Mon Sep 17 00:00:00 2001 From: douxu Date: Wed, 2 Apr 2025 16:47:51 +0800 Subject: [PATCH 18/33] add redis read and write lock conflict test of rwlocker --- diagram/hash_test.go | 33 +++++++++ distributedlock/constant/lock_err.go | 6 ++ distributedlock/luascript/rwlock_script.go | 4 +- distributedlock/redis_rwlock.go | 18 ++--- distributedlock/rwlock_test.go | 78 ++++++++++------------ 5 files changed, 86 insertions(+), 53 deletions(-) create mode 100644 diagram/hash_test.go create mode 100644 distributedlock/constant/lock_err.go diff --git a/diagram/hash_test.go b/diagram/hash_test.go new file mode 100644 index 0000000..ed320f3 --- /dev/null +++ b/diagram/hash_test.go @@ -0,0 +1,33 @@ +package diagram + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/redis/go-redis/v9" +) + +func TestHMSet(t *testing.T) { + rdb := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "192.168.2.104:6379", + Password: "cnstar", + PoolSize: 50, + DialTimeout: 10 * time.Second, + }) + params := map[string]interface{}{ + "field1": "Hello1", + "field2": "World1", + "field3": 11, + } + + ctx := context.Background() + res, err := rdb.HSet(ctx, "myhash", params).Result() + if err != nil { + fmt.Printf("err:%v\n", err) + } + fmt.Printf("res:%v\n", res) + return +} diff --git a/distributedlock/constant/lock_err.go b/distributedlock/constant/lock_err.go new file mode 100644 index 0000000..fd831f2 --- /dev/null +++ b/distributedlock/constant/lock_err.go @@ -0,0 +1,6 @@ +package constant + +import "errors" + +// AcquireTimeoutErr define error of get lock timeout +var AcquireTimeoutErr = errors.New("the waiting time for obtaining the lock operation has timed out") diff --git a/distributedlock/luascript/rwlock_script.go b/distributedlock/luascript/rwlock_script.go index a103ecc..ac07244 100644 --- a/distributedlock/luascript/rwlock_script.go +++ b/distributedlock/luascript/rwlock_script.go @@ -122,8 +122,6 @@ else -- 优先写锁加锁,无写锁的情况通知读锁加锁 local counter = redis.call('llen',writeWait); if (counter >= 1) then - redis.call('publish', KEYS[4], ARGV[1]); - else redis.call('publish', KEYS[3], ARGV[1]); end; return 1; @@ -157,7 +155,7 @@ if (mode == false) then return 1; elseif (mode == 'read') then -- 放到 list 中等待读锁释放后再次尝试加锁并且订阅读锁释放的消息 - redis.call('rpush', waitkey, ARGV[2]); + redis.call('rpush', waitKey, ARGV[2]); return -3; else -- 可重入写锁逻辑 diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index d873b01..66bfc28 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -40,7 +40,7 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration go rl.refreshLockTimeout(ctx) }) } - rl.logger.Info("success get the read by key and token", zap.String("key", rl.key), zap.String("token", rl.token)) + rl.logger.Info("success get the read lock by key and token", zap.String("key", rl.key), zap.String("token", rl.token)) return nil } @@ -161,11 +161,14 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration return fmt.Errorf("get write lock failed:%w", result) } - if (result.Code == constant.LockSuccess) && rl.needRefresh { - rl.once.Do(func() { - // async refresh lock timeout unitl receive exit singal - go rl.refreshLockTimeout(ctx) - }) + if result.Code == constant.LockSuccess { + if rl.needRefresh { + rl.once.Do(func() { + // async refresh lock timeout unitl receive exit singal + go rl.refreshLockTimeout(ctx) + }) + } + rl.logger.Info("success get the write lock by key and token", zap.String("key", rl.key), zap.String("token", rl.token)) return nil } @@ -197,9 +200,8 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration return nil } case <-acquireTimer.C: - err := errors.New("the waiting time for obtaining the write lock operation has timed out") rl.logger.Info("the waiting time for obtaining the write lock operation has timed out") - return err + return constant.AcquireTimeoutErr } } } diff --git a/distributedlock/rwlock_test.go b/distributedlock/rwlock_test.go index ffc9bee..c64cccc 100644 --- a/distributedlock/rwlock_test.go +++ b/distributedlock/rwlock_test.go @@ -2,10 +2,13 @@ package distributedlock import ( "context" + "fmt" "strings" "testing" "time" + "modelRT/distributedlock/constant" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "go.uber.org/zap" @@ -21,8 +24,7 @@ func TestRWLockRLockAndUnRLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.104:6379", - Password: "cnstar", + Addr: "192.168.2.104:30001", PoolSize: 50, DialTimeout: 10 * time.Second, }) @@ -59,8 +61,7 @@ func TestRWLockReentrantRLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.104:6379", - Password: "cnstar", + Addr: "192.168.2.104:30001", PoolSize: 50, DialTimeout: 10 * time.Second, }) @@ -114,8 +115,7 @@ func TestRWLockRefreshRLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.104:6379", - Password: "cnstar", + Addr: "192.168.2.104:30001", PoolSize: 50, DialTimeout: 10 * time.Second, }) @@ -163,8 +163,7 @@ func TestRWLock2ClientRLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.104:6379", - Password: "cnstar", + Addr: "192.168.2.104:30001", PoolSize: 50, DialTimeout: 10 * time.Second, }) @@ -229,8 +228,7 @@ func TestRWLock2CWith2DifTimeRLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.104:6379", - Password: "cnstar", + Addr: "192.168.2.104:30001", PoolSize: 50, DialTimeout: 10 * time.Second, }) @@ -304,19 +302,17 @@ func TestRWLock2CWith2DifTimeRLock(t *testing.T) { return } -// TODO 设计两个客户端分别加时间不同的读锁,测试ttl时间在有一个key删除后是否可以变换成功 func TestRWLock2CWithTimeTransformRLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.104:6379", - Password: "cnstar", + Addr: "192.168.2.104:30001", PoolSize: 50, DialTimeout: 10 * time.Second, }) rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ - LockLeaseTime: 120, + LockLeaseTime: 30, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", @@ -324,7 +320,7 @@ func TestRWLock2CWithTimeTransformRLock(t *testing.T) { rwLocker1.logger = log rwLocker2 := GetRWLocker(rdb, &RedissionLockConfig{ - LockLeaseTime: 30, + LockLeaseTime: 120, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", @@ -355,27 +351,21 @@ func TestRWLock2CWithTimeTransformRLock(t *testing.T) { hLen := rdb.HLen(ctx, rwLocker1.key).Val() assert.Equal(t, int64(3), hLen) - script := `return redis.call('httl', KEYS[1], 'fields', '1', ARGV[1]);` - result, err := rdb.Eval(ctx, script, []string{rwLocker1.key}, tokenKey1).Result() - assert.Equal(t, nil, err) - ttls, ok := result.([]interface{}) - assert.Equal(t, true, ok) - ttl, ok := ttls[0].(int64) - assert.Equal(t, true, ok) - compareValue := int64(110) - assert.Greater(t, ttl, compareValue) - - // locker1解读锁 - err = rwLocker1.UnRLock(ctx) - assert.Equal(t, nil, err) - - hashTTL := rdb.TTL(ctx, rwLocker1.key).Val().Seconds() - assert.Greater(t, hashTTL, float64(20)) + hashTTL := rdb.TTL(ctx, rwLocker2.key).Val().Seconds() + assert.Greater(t, hashTTL, float64(100)) // locker2解读锁 err = rwLocker2.UnRLock(ctx) assert.Equal(t, nil, err) + time.Sleep(10 * time.Second) + hashTTL = rdb.TTL(ctx, rwLocker1.key).Val().Seconds() + assert.Greater(t, hashTTL, float64(15)) + + // locker1解读锁 + err = rwLocker1.UnRLock(ctx) + assert.Equal(t, nil, err) + err = rdb.Exists(ctx, rwLocker1.key).Err() assert.Equal(t, nil, err) existNum := rdb.Exists(ctx, rwLocker1.key).Val() @@ -388,8 +378,7 @@ func TestRWLockWLockAndUnWLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.104:6379", - Password: "cnstar", + Addr: "192.168.2.104:30001", PoolSize: 50, DialTimeout: 10 * time.Second, }) @@ -426,8 +415,7 @@ func TestRWLockReentrantWLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.104:6379", - Password: "cnstar", + Addr: "192.168.2.104:30001", PoolSize: 50, DialTimeout: 10 * time.Second, }) @@ -482,8 +470,7 @@ func TestRWLock2CWithRLockAndWLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.104:6379", - Password: "cnstar", + Addr: "192.168.2.104:30001", PoolSize: 50, DialTimeout: 10 * time.Second, }) @@ -514,13 +501,21 @@ func TestRWLock2CWithRLockAndWLock(t *testing.T) { assert.Equal(t, nil, err) assert.Equal(t, 1, num) + // go func() { + // // locker1解写锁 + // time.Sleep(10 * time.Second) + // err = rwLocker1.UnRLock(ctx) + // assert.Equal(t, nil, err) + // }() + // locker2加写锁锁 - duration = 2 * time.Second + duration = 10 * time.Second err = rwLocker2.WLock(ctx, duration) // 预测加写锁失败 - assert.Equal(t, nil, err) + // TODO 优化输出 + fmt.Printf("wlock err:%v\n", err) + assert.Equal(t, constant.AcquireTimeoutErr, err) - // locker1解写锁 err = rwLocker1.UnRLock(ctx) assert.Equal(t, nil, err) @@ -533,8 +528,7 @@ func TestRWLock2CWithWLockAndRLock(t *testing.T) { ctx := context.TODO() rdb := redis.NewClient(&redis.Options{ Network: "tcp", - Addr: "192.168.2.104:6379", - Password: "cnstar", + Addr: "192.168.2.104:30001", PoolSize: 50, DialTimeout: 10 * time.Second, }) From e4d45016f252bca19ccb3345e4ebe4f60583bd6c Mon Sep 17 00:00:00 2001 From: douxu Date: Thu, 3 Apr 2025 17:22:40 +0800 Subject: [PATCH 19/33] fix bug of redis read and write lock conflict test of rwlocker --- distributedlock/constant/redis_result.go | 4 +- distributedlock/luascript/rwlock_script.go | 17 ++- distributedlock/redis_lock.go | 35 ++++- distributedlock/redis_rwlock.go | 13 +- distributedlock/rwlock_test.go | 141 ++++++++++----------- 5 files changed, 115 insertions(+), 95 deletions(-) diff --git a/distributedlock/constant/redis_result.go b/distributedlock/constant/redis_result.go index 7c689fe..18eb21d 100644 --- a/distributedlock/constant/redis_result.go +++ b/distributedlock/constant/redis_result.go @@ -78,9 +78,9 @@ func NewRedisResult(res RedisCode, lockType RedisLockType, redisMsg string) erro case -4: return &RedisResult{Code: res, Message: "redis lock write lock failure,the lock is already occupied by anthor processes write lock"} case -5: - return &RedisResult{Code: res, Message: "redis un lock write lock failure,the lock is already occupied by another processes read lock"} + return &RedisResult{Code: res, Message: "redis unlock write lock failure,the lock is already occupied by another processes read lock"} case -6: - return &RedisResult{Code: res, Message: "redis un lock write lock failure,the lock is already occupied by another processes write lock"} + return &RedisResult{Code: res, Message: "redis unlock write lock failure,the lock is already occupied by another processes write lock"} case -7: return &RedisResult{Code: res, Message: "redis lock write lock failure,the first priority in the current process non-waiting queue"} case -8: diff --git a/distributedlock/luascript/rwlock_script.go b/distributedlock/luascript/rwlock_script.go index ac07244..3af9e27 100644 --- a/distributedlock/luascript/rwlock_script.go +++ b/distributedlock/luascript/rwlock_script.go @@ -60,7 +60,7 @@ end; /* KEYS[1]:锁的键名(key),通常是锁的唯一标识。 KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 -KEYS[3]:锁的释放通知写频道(chankey),用于通知其他客户端锁已释放。 +KEYS[3]:锁的释放通知写频道(chankey),用于通知其他写等待客户端锁已释放。 ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ @@ -69,10 +69,10 @@ local lockKey = KEYS[2] .. ':' .. ARGV[2]; local mode = redis.call('hget', KEYS[1], 'mode'); if (mode == false) then local writeWait = KEYS[1] .. ':write'; - -- 优先写锁加锁,无写锁的情况通知读锁加锁 + -- 优先写锁加锁 local counter = redis.call('llen',writeWait); if (counter >= 1) then - redis.call('publish', KEYS[4], ARGV[1]); + redis.call('publish', KEYS[3], ARGV[1]); end; return 1; elseif (mode == 'write') then @@ -119,7 +119,7 @@ if (redis.call('hlen', KEYS[1]) > 1) then else redis.call('del', KEYS[1]); local writeWait = KEYS[1] .. ':write'; - -- 优先写锁加锁,无写锁的情况通知读锁加锁 + -- 优先写锁加锁 local counter = redis.call('llen',writeWait); if (counter >= 1) then redis.call('publish', KEYS[3], ARGV[1]); @@ -178,7 +178,8 @@ end; /* KEYS[1]:锁的键名(key),通常是锁的唯一标识。 KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。 -KEYS[3]:锁的释放通知写频道(chankey),用于通知其他客户端锁已释放。 +KEYS[3]:锁的释放通知写频道(writeChankey),用于通知其他写等待客户端锁已释放。 +KEYS[4]:锁的释放通知读频道(readChankey),用于通知其他读等待客户端锁已释放。 ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ @@ -190,6 +191,8 @@ if (mode == false) then local counter = redis.call('llen',writeWait); if (counter >= 1) then redis.call('publish', KEYS[3], ARGV[1]); + else + redis.call('publish', KEYS[4], ARGV[1]); end; return 1; elseif (mode == 'read') then @@ -204,9 +207,9 @@ else redis.call('del', KEYS[1]); local counter = redis.call('llen',writeWait); if (counter >= 1) then - redis.call('publish', KEYS[4], ARGV[1]); - else redis.call('publish', KEYS[3], ARGV[1]); + else + redis.call('publish', KEYS[4], ARGV[1]); end; return 1; end; diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index 0f6a5b9..567160c 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -100,23 +100,42 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) e return fmt.Errorf("lock the redis lock failed:%w", result) } +// TODO 优化订阅流程 func (rl *redissionLocker) subscribeLock(ctx context.Context, sub *redis.PubSub, out chan struct{}) { if sub == nil || out == nil { return } rl.logger.Info("lock: enter sub routine", zap.String("token", rl.token)) - for { - msg, err := sub.Receive(ctx) - if err != nil { - rl.logger.Info("sub receive message failed", zap.Error(err)) - continue - } + // subCh := sub.Channel() + // for msg := range subCh { + // // 这里只会收到真正的数据消息 + // fmt.Printf("Channel: %s, Payload: %s\n", + // msg.Channel, + // msg.Payload) + // } + receiveChan := make(chan interface{}, 2) + go func() { + for { + msg, err := sub.Receive(ctx) + if err != nil { + if errors.Is(err, redis.ErrClosed) { + return + } + rl.logger.Error("sub receive message failed", zap.Error(err)) + continue + } + rl.logger.Info("sub receive message", zap.Any("msg", msg)) + receiveChan <- msg + } + }() + + for { select { case <-rl.exit: return - default: + case msg := <-receiveChan: switch msg.(type) { case *redis.Subscription: // Ignore. @@ -126,6 +145,8 @@ func (rl *redissionLocker) subscribeLock(ctx context.Context, sub *redis.PubSub, out <- struct{}{} default: } + // case <-subCh: + // out <- struct{}{} } } } diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index 66bfc28..af8e797 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -19,6 +19,8 @@ import ( type RedissionRWLocker struct { redissionLocker + writeWaitChanKey string + readWaitChanKey string rwTokenTimeoutPrefix string } @@ -46,7 +48,7 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration subMsg := make(chan struct{}, 1) defer close(subMsg) - sub := rl.client.Subscribe(ctx, rl.waitChanKey) + sub := rl.client.Subscribe(ctx, rl.readWaitChanKey) defer sub.Close() go rl.subscribeLock(ctx, sub, subMsg) @@ -127,7 +129,7 @@ func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) { func (rl *RedissionRWLocker) UnRLock(ctx context.Context) error { rl.logger.Info("unlock RLock by key and token", zap.String("key", rl.key), zap.String("token", rl.token)) - res := rl.client.Eval(ctx, luascript.UnRLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.waitChanKey}, unlockMessage, rl.token) + res := rl.client.Eval(ctx, luascript.UnRLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.writeWaitChanKey}, unlockMessage, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { rl.logger.Info("unlock read lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) @@ -174,7 +176,7 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration subMsg := make(chan struct{}, 1) defer close(subMsg) - sub := rl.client.Subscribe(ctx, rl.waitChanKey) + sub := rl.client.Subscribe(ctx, rl.writeWaitChanKey) defer sub.Close() go rl.subscribeLock(ctx, sub, subMsg) @@ -220,7 +222,7 @@ func (rl *RedissionRWLocker) tryWLock(ctx context.Context) error { } func (rl *RedissionRWLocker) UnWLock(ctx context.Context) error { - res := rl.client.Eval(ctx, luascript.UnWLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.waitChanKey}, unlockMessage, rl.token) + res := rl.client.Eval(ctx, luascript.UnWLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.writeWaitChanKey, rl.readWaitChanKey}, unlockMessage, rl.token) val, err := res.Int() if err != redis.Nil && err != nil { rl.logger.Error("unlock write lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) @@ -273,7 +275,6 @@ func GetRWLocker(client *redis.Client, conf *RedissionLockConfig) *RedissionRWLo key: strings.Join([]string{conf.Prefix, conf.Key}, ":"), needRefresh: conf.NeedRefresh, lockLeaseTime: conf.LockLeaseTime, - waitChanKey: strings.Join([]string{conf.ChanPrefix, conf.Key, "write"}, ":"), client: client, exit: make(chan struct{}), once: &sync.Once{}, @@ -282,6 +283,8 @@ func GetRWLocker(client *redis.Client, conf *RedissionLockConfig) *RedissionRWLo rwLocker := &RedissionRWLocker{ redissionLocker: *r, + writeWaitChanKey: strings.Join([]string{conf.ChanPrefix, conf.Key, "write"}, ":"), + readWaitChanKey: strings.Join([]string{conf.ChanPrefix, conf.Key, "read"}, ":"), rwTokenTimeoutPrefix: conf.TimeoutPrefix, } return rwLocker diff --git a/distributedlock/rwlock_test.go b/distributedlock/rwlock_test.go index c64cccc..82bc3d2 100644 --- a/distributedlock/rwlock_test.go +++ b/distributedlock/rwlock_test.go @@ -2,7 +2,6 @@ package distributedlock import ( "context" - "fmt" "strings" "testing" "time" @@ -14,20 +13,25 @@ import ( "go.uber.org/zap" ) -var log *zap.Logger +var ( + log *zap.Logger + rdb *redis.Client +) func init() { log = zap.Must(zap.NewDevelopment()) + rdb = redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "192.168.2.104:30001", + PoolSize: 50, + DialTimeout: 10 * time.Second, + MaxIdleConns: 10, + MaxActiveConns: 40, + }) } func TestRWLockRLockAndUnRLock(t *testing.T) { ctx := context.TODO() - rdb := redis.NewClient(&redis.Options{ - Network: "tcp", - Addr: "192.168.2.104:30001", - PoolSize: 50, - DialTimeout: 10 * time.Second, - }) rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ LockLeaseTime: 120, @@ -59,12 +63,6 @@ func TestRWLockRLockAndUnRLock(t *testing.T) { func TestRWLockReentrantRLock(t *testing.T) { ctx := context.TODO() - rdb := redis.NewClient(&redis.Options{ - Network: "tcp", - Addr: "192.168.2.104:30001", - PoolSize: 50, - DialTimeout: 10 * time.Second, - }) rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ LockLeaseTime: 120, @@ -113,12 +111,6 @@ func TestRWLockReentrantRLock(t *testing.T) { func TestRWLockRefreshRLock(t *testing.T) { ctx := context.TODO() - rdb := redis.NewClient(&redis.Options{ - Network: "tcp", - Addr: "192.168.2.104:30001", - PoolSize: 50, - DialTimeout: 10 * time.Second, - }) rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ LockLeaseTime: 10, @@ -161,12 +153,6 @@ func TestRWLockRefreshRLock(t *testing.T) { func TestRWLock2ClientRLock(t *testing.T) { ctx := context.TODO() - rdb := redis.NewClient(&redis.Options{ - Network: "tcp", - Addr: "192.168.2.104:30001", - PoolSize: 50, - DialTimeout: 10 * time.Second, - }) rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ LockLeaseTime: 120, @@ -226,12 +212,6 @@ func TestRWLock2ClientRLock(t *testing.T) { func TestRWLock2CWith2DifTimeRLock(t *testing.T) { ctx := context.TODO() - rdb := redis.NewClient(&redis.Options{ - Network: "tcp", - Addr: "192.168.2.104:30001", - PoolSize: 50, - DialTimeout: 10 * time.Second, - }) rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ LockLeaseTime: 120, @@ -304,12 +284,6 @@ func TestRWLock2CWith2DifTimeRLock(t *testing.T) { func TestRWLock2CWithTimeTransformRLock(t *testing.T) { ctx := context.TODO() - rdb := redis.NewClient(&redis.Options{ - Network: "tcp", - Addr: "192.168.2.104:30001", - PoolSize: 50, - DialTimeout: 10 * time.Second, - }) rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ LockLeaseTime: 30, @@ -376,12 +350,6 @@ func TestRWLock2CWithTimeTransformRLock(t *testing.T) { func TestRWLockWLockAndUnWLock(t *testing.T) { ctx := context.TODO() - rdb := redis.NewClient(&redis.Options{ - Network: "tcp", - Addr: "192.168.2.104:30001", - PoolSize: 50, - DialTimeout: 10 * time.Second, - }) rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ LockLeaseTime: 120, @@ -413,12 +381,6 @@ func TestRWLockWLockAndUnWLock(t *testing.T) { func TestRWLockReentrantWLock(t *testing.T) { ctx := context.TODO() - rdb := redis.NewClient(&redis.Options{ - Network: "tcp", - Addr: "192.168.2.104:30001", - PoolSize: 50, - DialTimeout: 10 * time.Second, - }) rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ LockLeaseTime: 120, @@ -465,15 +427,8 @@ func TestRWLockReentrantWLock(t *testing.T) { return } -// TODO 设计两个客户端,C1先加读锁与C2后加写锁 -func TestRWLock2CWithRLockAndWLock(t *testing.T) { +func TestRWLock2CWithRLockAndWLockFailed(t *testing.T) { ctx := context.TODO() - rdb := redis.NewClient(&redis.Options{ - Network: "tcp", - Addr: "192.168.2.104:30001", - PoolSize: 50, - DialTimeout: 10 * time.Second, - }) rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ LockLeaseTime: 120, @@ -501,19 +456,9 @@ func TestRWLock2CWithRLockAndWLock(t *testing.T) { assert.Equal(t, nil, err) assert.Equal(t, 1, num) - // go func() { - // // locker1解写锁 - // time.Sleep(10 * time.Second) - // err = rwLocker1.UnRLock(ctx) - // assert.Equal(t, nil, err) - // }() - // locker2加写锁锁 duration = 10 * time.Second err = rwLocker2.WLock(ctx, duration) - // 预测加写锁失败 - // TODO 优化输出 - fmt.Printf("wlock err:%v\n", err) assert.Equal(t, constant.AcquireTimeoutErr, err) err = rwLocker1.UnRLock(ctx) @@ -523,15 +468,63 @@ func TestRWLock2CWithRLockAndWLock(t *testing.T) { return } +// TODO 设计两个客户端,C1先加读锁成功与C2后加写锁成功 +func TestRWLock2CWithRLockAndWLockSucceed(t *testing.T) { + ctx := context.TODO() + rdb.Conn() + rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 120, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", + }) + rwLocker1.logger = log + + rwLocker2 := GetRWLocker(rdb, &RedissionLockConfig{ + LockLeaseTime: 120, + NeedRefresh: true, + Key: "component", + Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", + }) + rwLocker2.logger = log + duration := 10 * time.Second + // locker1加读锁 + err := rwLocker1.RLock(ctx, duration) + assert.Equal(t, nil, err) + + tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") + num, err := rdb.HGet(ctx, rwLocker1.key, tokenKey1).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + go func() { + // locker1解写锁 + time.Sleep(10 * time.Second) + err = rwLocker1.UnRLock(ctx) + assert.Equal(t, nil, err) + }() + + // locker2加写锁 + duration = 30 * time.Second + err = rwLocker2.WLock(ctx, duration) + assert.Equal(t, nil, err) + + tokenKey2 := strings.Join([]string{rwLocker2.rwTokenTimeoutPrefix, rwLocker2.token}, ":") + num, err = rdb.HGet(ctx, rwLocker2.key, tokenKey2).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + // locker2解写锁 + err = rwLocker2.UnWLock(ctx) + assert.Equal(t, nil, err) + + t.Log("rwLock 2 client lock test success") + return +} + // TODO 设计两个客户端,C1先加写锁与C2后加读锁 func TestRWLock2CWithWLockAndRLock(t *testing.T) { ctx := context.TODO() - rdb := redis.NewClient(&redis.Options{ - Network: "tcp", - Addr: "192.168.2.104:30001", - PoolSize: 50, - DialTimeout: 10 * time.Second, - }) rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ LockLeaseTime: 120, From fda43c65d288c488edaaf611a639a94cd5b23333 Mon Sep 17 00:00:00 2001 From: douxu Date: Mon, 7 Apr 2025 16:49:06 +0800 Subject: [PATCH 20/33] optimize the subscription process of redis locker --- distributedlock/redis_lock.go | 107 +++++++++++++------------------- distributedlock/redis_rwlock.go | 76 +++++++++++++---------- distributedlock/rwlock_test.go | 20 +++--- 3 files changed, 97 insertions(+), 106 deletions(-) diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index 567160c..f96522e 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -34,20 +34,21 @@ type RedissionLockConfig struct { } type redissionLocker struct { - lockLeaseTime uint64 - token string - key string - waitChanKey string - needRefresh bool - exit chan struct{} - client *redis.Client - once *sync.Once - logger *zap.Logger + lockLeaseTime uint64 + token string + key string + waitChanKey string + needRefresh bool + refreshExitChan chan struct{} + subExitChan chan struct{} + client *redis.Client + refreshOnce *sync.Once + logger *zap.Logger } func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) error { - if rl.exit == nil { - rl.exit = make(chan struct{}) + if rl.refreshExitChan == nil { + rl.refreshExitChan = make(chan struct{}) } result := rl.tryLock(ctx).(*constant.RedisResult) if result.Code == constant.UnknownInternalError { @@ -56,7 +57,7 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) e } if (result.Code == constant.LockSuccess) && rl.needRefresh { - rl.once.Do(func() { + rl.refreshOnce.Do(func() { // async refresh lock timeout unitl receive exit singal go rl.refreshLockTimeout(ctx) }) @@ -67,7 +68,7 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) e defer close(subMsg) sub := rl.client.Subscribe(ctx, rl.waitChanKey) defer sub.Close() - go rl.subscribeLock(ctx, sub, subMsg) + go rl.subscribeLock(sub, subMsg) if len(timeout) > 0 && timeout[0] > 0 { acquireTimer := time.NewTimer(timeout[0]) @@ -100,53 +101,21 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) e return fmt.Errorf("lock the redis lock failed:%w", result) } -// TODO 优化订阅流程 -func (rl *redissionLocker) subscribeLock(ctx context.Context, sub *redis.PubSub, out chan struct{}) { - if sub == nil || out == nil { +func (rl *redissionLocker) subscribeLock(sub *redis.PubSub, subMsgChan chan struct{}) { + if sub == nil || subMsgChan == nil { return } rl.logger.Info("lock: enter sub routine", zap.String("token", rl.token)) - // subCh := sub.Channel() - // for msg := range subCh { - // // 这里只会收到真正的数据消息 - // fmt.Printf("Channel: %s, Payload: %s\n", - // msg.Channel, - // msg.Payload) - // } - - receiveChan := make(chan interface{}, 2) - go func() { - for { - msg, err := sub.Receive(ctx) - if err != nil { - if errors.Is(err, redis.ErrClosed) { - return - } - rl.logger.Error("sub receive message failed", zap.Error(err)) - continue - } - rl.logger.Info("sub receive message", zap.Any("msg", msg)) - receiveChan <- msg - } - }() - for { select { - case <-rl.exit: + case <-rl.subExitChan: + close(subMsgChan) return - case msg := <-receiveChan: - switch msg.(type) { - case *redis.Subscription: - // Ignore. - case *redis.Pong: - // Ignore. - case *redis.Message: - out <- struct{}{} - default: - } - // case <-subCh: - // out <- struct{}{} + case <-sub.Channel(): + // 这里只会收到真正的数据消息 + subMsgChan <- struct{}{} + default: } } } @@ -183,16 +152,26 @@ func (rl *redissionLocker) refreshLockTimeout(ctx context.Context) { rl.logger.Info("lock refresh success by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) } timer.Reset(lockTime) - case <-rl.exit: + case <-rl.refreshExitChan: return } } } func (rl *redissionLocker) cancelRefreshLockTime() { - if rl.exit != nil { - close(rl.exit) - rl.once = &sync.Once{} + if rl.refreshExitChan != nil { + close(rl.refreshExitChan) + rl.refreshOnce = &sync.Once{} + } +} + +func (rl *redissionLocker) closeSub(sub *redis.PubSub, noticeChan chan struct{}) { + if sub != nil { + sub.Close() + } + + if noticeChan != nil { + close(noticeChan) } } @@ -264,13 +243,13 @@ func GetLocker(client *redis.Client, ops *RedissionLockConfig) *redissionLocker } r := &redissionLocker{ - token: ops.Token, - key: strings.Join([]string{ops.Prefix, ops.Key}, ":"), - waitChanKey: strings.Join([]string{ops.ChanPrefix, ops.Key, "wait"}, ":"), - needRefresh: ops.NeedRefresh, - client: client, - exit: make(chan struct{}), - logger: logger.GetLoggerInstance(), + token: ops.Token, + key: strings.Join([]string{ops.Prefix, ops.Key}, ":"), + waitChanKey: strings.Join([]string{ops.ChanPrefix, ops.Key, "wait"}, ":"), + needRefresh: ops.NeedRefresh, + client: client, + refreshExitChan: make(chan struct{}), + logger: logger.GetLoggerInstance(), } return r } diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index af8e797..f34c839 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -25,10 +25,6 @@ type RedissionRWLocker struct { } func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration) error { - if rl.exit == nil { - rl.exit = make(chan struct{}) - } - result := rl.tryRLock(ctx).(*constant.RedisResult) if result.Code == constant.UnknownInternalError { rl.logger.Error(result.OutputResultMessage()) @@ -37,7 +33,11 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration if result.Code == constant.LockSuccess { if rl.needRefresh { - rl.once.Do(func() { + rl.refreshOnce.Do(func() { + if rl.refreshExitChan == nil { + rl.refreshExitChan = make(chan struct{}) + } + // async refresh lock timeout unitl receive exit singal go rl.refreshLockTimeout(ctx) }) @@ -46,37 +46,41 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration return nil } - subMsg := make(chan struct{}, 1) - defer close(subMsg) - sub := rl.client.Subscribe(ctx, rl.readWaitChanKey) - defer sub.Close() - go rl.subscribeLock(ctx, sub, subMsg) - if len(timeout) > 0 && timeout[0] > 0 { + if rl.subExitChan == nil { + rl.subExitChan = make(chan struct{}) + } + + subMsgChan := make(chan struct{}, 1) + sub := rl.client.Subscribe(ctx, rl.readWaitChanKey) + go rl.subscribeLock(sub, subMsgChan) + acquireTimer := time.NewTimer(timeout[0]) for { select { - case _, ok := <-subMsg: + case _, ok := <-subMsgChan: if !ok { err := errors.New("failed to read the read lock waiting for for the channel message") rl.logger.Error("failed to read the read lock waiting for for the channel message") return err } - resultErr := rl.tryRLock(ctx).(*constant.RedisResult) - if (resultErr.Code == constant.RLockFailureWithWLockOccupancy) || (resultErr.Code == constant.UnknownInternalError) { - rl.logger.Info(resultErr.OutputResultMessage()) + result := rl.tryRLock(ctx).(*constant.RedisResult) + if (result.Code == constant.RLockFailureWithWLockOccupancy) || (result.Code == constant.UnknownInternalError) { + rl.logger.Info(result.OutputResultMessage()) continue } - if resultErr.Code == constant.LockSuccess { - rl.logger.Info(resultErr.OutputResultMessage()) + if result.Code == constant.LockSuccess { + rl.logger.Info(result.OutputResultMessage()) + rl.closeSub(sub, rl.subExitChan) return nil } case <-acquireTimer.C: - err := errors.New("the waiting time for obtaining the read lock operation has timed out") rl.logger.Info("the waiting time for obtaining the read lock operation has timed out") - return err + rl.closeSub(sub, rl.subExitChan) + // after acquire lock timeout,notice the sub channel to close + return constant.AcquireTimeoutErr } } } @@ -121,7 +125,7 @@ func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) { rl.logger.Info("lock refresh success by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) } timer.Reset(lockTime) - case <-rl.exit: + case <-rl.refreshExitChan: return } } @@ -153,10 +157,6 @@ func (rl *RedissionRWLocker) UnRLock(ctx context.Context) error { } func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration) error { - if rl.exit == nil { - rl.exit = make(chan struct{}) - } - result := rl.tryWLock(ctx).(*constant.RedisResult) if result.Code == constant.UnknownInternalError { rl.logger.Error(result.OutputResultMessage()) @@ -165,7 +165,11 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration if result.Code == constant.LockSuccess { if rl.needRefresh { - rl.once.Do(func() { + rl.refreshOnce.Do(func() { + if rl.refreshExitChan == nil { + rl.refreshExitChan = make(chan struct{}) + } + // async refresh lock timeout unitl receive exit singal go rl.refreshLockTimeout(ctx) }) @@ -174,17 +178,19 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration return nil } - subMsg := make(chan struct{}, 1) - defer close(subMsg) - sub := rl.client.Subscribe(ctx, rl.writeWaitChanKey) - defer sub.Close() - go rl.subscribeLock(ctx, sub, subMsg) - if len(timeout) > 0 && timeout[0] > 0 { + if rl.subExitChan == nil { + rl.subExitChan = make(chan struct{}) + } + + subMsgChan := make(chan struct{}, 1) + sub := rl.client.Subscribe(ctx, rl.writeWaitChanKey) + go rl.subscribeLock(sub, subMsgChan) + acquireTimer := time.NewTimer(timeout[0]) for { select { - case _, ok := <-subMsg: + case _, ok := <-subMsgChan: if !ok { err := errors.New("failed to read the write lock waiting for for the channel message") rl.logger.Error("failed to read the read lock waiting for for the channel message") @@ -199,10 +205,13 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration if result.Code == constant.LockSuccess { rl.logger.Info(result.OutputResultMessage()) + rl.closeSub(sub, rl.subExitChan) return nil } case <-acquireTimer.C: rl.logger.Info("the waiting time for obtaining the write lock operation has timed out") + rl.closeSub(sub, rl.subExitChan) + // after acquire lock timeout,notice the sub channel to close return constant.AcquireTimeoutErr } } @@ -276,8 +285,7 @@ func GetRWLocker(client *redis.Client, conf *RedissionLockConfig) *RedissionRWLo needRefresh: conf.NeedRefresh, lockLeaseTime: conf.LockLeaseTime, client: client, - exit: make(chan struct{}), - once: &sync.Once{}, + refreshOnce: &sync.Once{}, logger: logger.GetLoggerInstance(), } diff --git a/distributedlock/rwlock_test.go b/distributedlock/rwlock_test.go index 82bc3d2..341abec 100644 --- a/distributedlock/rwlock_test.go +++ b/distributedlock/rwlock_test.go @@ -21,12 +21,18 @@ var ( func init() { log = zap.Must(zap.NewDevelopment()) rdb = redis.NewClient(&redis.Options{ - Network: "tcp", - Addr: "192.168.2.104:30001", - PoolSize: 50, - DialTimeout: 10 * time.Second, - MaxIdleConns: 10, - MaxActiveConns: 40, + Network: "tcp", + Addr: "192.168.2.104:30001", + // pool config + PoolSize: 100, // max connections + PoolFIFO: true, + PoolTimeout: 4 * time.Second, + MinIdleConns: 10, // min idle connections + MaxIdleConns: 20, // max idle connections + // tiemout config + DialTimeout: 5 * time.Second, + ReadTimeout: 3 * time.Second, + WriteTimeout: 3 * time.Second, }) } @@ -468,10 +474,8 @@ func TestRWLock2CWithRLockAndWLockFailed(t *testing.T) { return } -// TODO 设计两个客户端,C1先加读锁成功与C2后加写锁成功 func TestRWLock2CWithRLockAndWLockSucceed(t *testing.T) { ctx := context.TODO() - rdb.Conn() rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ LockLeaseTime: 120, NeedRefresh: true, From d27a9bbafafed40311b951bd0b0628dd4f8b4026 Mon Sep 17 00:00:00 2001 From: douxu Date: Fri, 11 Apr 2025 16:36:54 +0800 Subject: [PATCH 21/33] refactor(locker script): optimize of redis locker script 1.optimize locker script of un read lock fix(locker_refresh): fix bug of locker refresh 1.fix bug of locker refresh when locker lock success after wait test(lock_case): add new redis locker case 1.add new redis locker case --- distributedlock/luascript/rwlock_script.go | 4 ++-- distributedlock/redis_lock.go | 5 ++++- distributedlock/redis_rwlock.go | 22 ++++++++++++++++++++++ distributedlock/rwlock_test.go | 22 ++++++++++++++++------ 4 files changed, 44 insertions(+), 9 deletions(-) diff --git a/distributedlock/luascript/rwlock_script.go b/distributedlock/luascript/rwlock_script.go index 3af9e27..db58a52 100644 --- a/distributedlock/luascript/rwlock_script.go +++ b/distributedlock/luascript/rwlock_script.go @@ -79,10 +79,10 @@ elseif (mode == 'write') then return -2; end; --- 判断当前的确是读模式但是当前 token 并没有加读锁的情况,返回 1 +-- 判断当前的确是读模式但是当前 token 并没有加读锁的情况,返回 0 local lockExists = redis.call('hexists', KEYS[1], lockKey); if ((mode == 'read') and (lockExists == 0)) then - return 1; + return 0; end; local counter = redis.call('hincrby', KEYS[1], lockKey, -1); diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index f96522e..8b312ef 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -167,7 +167,10 @@ func (rl *redissionLocker) cancelRefreshLockTime() { func (rl *redissionLocker) closeSub(sub *redis.PubSub, noticeChan chan struct{}) { if sub != nil { - sub.Close() + err := sub.Close() + if err != nil { + rl.logger.Error("close sub failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + } } if noticeChan != nil { diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index f34c839..a94d566 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -74,6 +74,17 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration if result.Code == constant.LockSuccess { rl.logger.Info(result.OutputResultMessage()) rl.closeSub(sub, rl.subExitChan) + + if rl.needRefresh { + rl.refreshOnce.Do(func() { + if rl.refreshExitChan == nil { + rl.refreshExitChan = make(chan struct{}) + } + + // async refresh lock timeout unitl receive exit singal + go rl.refreshLockTimeout(ctx) + }) + } return nil } case <-acquireTimer.C: @@ -206,6 +217,17 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration if result.Code == constant.LockSuccess { rl.logger.Info(result.OutputResultMessage()) rl.closeSub(sub, rl.subExitChan) + + if rl.needRefresh { + rl.refreshOnce.Do(func() { + if rl.refreshExitChan == nil { + rl.refreshExitChan = make(chan struct{}) + } + + // async refresh lock timeout unitl receive exit singal + go rl.refreshLockTimeout(ctx) + }) + } return nil } case <-acquireTimer.C: diff --git a/distributedlock/rwlock_test.go b/distributedlock/rwlock_test.go index 341abec..fc1c07b 100644 --- a/distributedlock/rwlock_test.go +++ b/distributedlock/rwlock_test.go @@ -526,7 +526,6 @@ func TestRWLock2CWithRLockAndWLockSucceed(t *testing.T) { return } -// TODO 设计两个客户端,C1先加写锁与C2后加读锁 func TestRWLock2CWithWLockAndRLock(t *testing.T) { ctx := context.TODO() @@ -547,7 +546,7 @@ func TestRWLock2CWithWLockAndRLock(t *testing.T) { rwLocker2.logger = log duration := 10 * time.Second - // locker1加读锁 + // locker1加写锁 err := rwLocker1.WLock(ctx, duration) assert.Equal(t, nil, err) @@ -556,14 +555,25 @@ func TestRWLock2CWithWLockAndRLock(t *testing.T) { assert.Equal(t, nil, err) assert.Equal(t, 1, num) + go func() { + // locker1解写锁 + time.Sleep(10 * time.Second) + err = rwLocker1.UnWLock(ctx) + assert.Equal(t, nil, err) + }() + // locker2加读锁 - duration = 2 * time.Second + duration = 30 * time.Second err = rwLocker2.RLock(ctx, duration) - // TODO 预测加读锁失败 assert.Equal(t, nil, err) - // locker1解写锁 - err = rwLocker1.UnWLock(ctx) + tokenKey2 := strings.Join([]string{rwLocker2.rwTokenTimeoutPrefix, rwLocker2.token}, ":") + num, err = rdb.HGet(ctx, rwLocker2.key, tokenKey2).Int() + assert.Equal(t, nil, err) + assert.Equal(t, 1, num) + + // locker2解读锁 + err = rwLocker2.UnRLock(ctx) assert.Equal(t, nil, err) t.Log("rwLock 2 client lock test success") From 310f4c043c154317eea7411561a2b66543c9dd72 Mon Sep 17 00:00:00 2001 From: douxu Date: Fri, 18 Apr 2025 14:02:03 +0800 Subject: [PATCH 22/33] refactor(locker params): optimize of redis locker params 1.optimize locker unexport params 2.optimize locker logging output 3.optimize locker init process --- distributedlock/redis_lock.go | 46 ++--- distributedlock/redis_rwlock.go | 66 +++--- .../distributedlock}/rwlock_test.go | 193 +++++++++--------- 3 files changed, 154 insertions(+), 151 deletions(-) rename {distributedlock => test/distributedlock}/rwlock_test.go (66%) diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index 8b312ef..0cdbb39 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -35,15 +35,15 @@ type RedissionLockConfig struct { type redissionLocker struct { lockLeaseTime uint64 - token string - key string + Token string + Key string waitChanKey string needRefresh bool refreshExitChan chan struct{} subExitChan chan struct{} client *redis.Client refreshOnce *sync.Once - logger *zap.Logger + Logger *zap.Logger } func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) error { @@ -52,7 +52,7 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) e } result := rl.tryLock(ctx).(*constant.RedisResult) if result.Code == constant.UnknownInternalError { - rl.logger.Error(result.OutputResultMessage()) + rl.Logger.Error(result.OutputResultMessage()) return fmt.Errorf("get lock failed:%w", result) } @@ -77,23 +77,23 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) e case _, ok := <-subMsg: if !ok { err := errors.New("failed to read the lock waiting for for the channel message") - rl.logger.Error("failed to read the lock waiting for for the channel message") + rl.Logger.Error("failed to read the lock waiting for for the channel message") return err } resultErr := rl.tryLock(ctx).(*constant.RedisResult) if (resultErr.Code == constant.LockFailure) || (resultErr.Code == constant.UnknownInternalError) { - rl.logger.Info(resultErr.OutputResultMessage()) + rl.Logger.Info(resultErr.OutputResultMessage()) continue } if resultErr.Code == constant.LockSuccess { - rl.logger.Info(resultErr.OutputResultMessage()) + rl.Logger.Info(resultErr.OutputResultMessage()) return nil } case <-acquireTimer.C: err := errors.New("the waiting time for obtaining the lock operation has timed out") - rl.logger.Info("the waiting time for obtaining the lock operation has timed out") + rl.Logger.Info("the waiting time for obtaining the lock operation has timed out") return err } } @@ -105,7 +105,7 @@ func (rl *redissionLocker) subscribeLock(sub *redis.PubSub, subMsgChan chan stru if sub == nil || subMsgChan == nil { return } - rl.logger.Info("lock: enter sub routine", zap.String("token", rl.token)) + rl.Logger.Info("lock: enter sub routine", zap.String("token", rl.Token)) for { select { @@ -126,7 +126,7 @@ ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ func (rl *redissionLocker) refreshLockTimeout(ctx context.Context) { - rl.logger.Info("lock refresh by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.Logger.Info("lock refresh by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key)) lockTime := time.Duration(rl.lockLeaseTime/3) * time.Second timer := time.NewTimer(lockTime) @@ -136,20 +136,20 @@ func (rl *redissionLocker) refreshLockTimeout(ctx context.Context) { select { case <-timer.C: // extend key lease time - res := rl.client.Eval(ctx, luascript.RefreshLockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(ctx, luascript.RefreshLockScript, []string{rl.Key}, rl.lockLeaseTime, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { - rl.logger.Info("lock refresh failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + rl.Logger.Info("lock refresh failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) return } if constant.RedisCode(val) == constant.RefreshLockFailure { - rl.logger.Error("lock refreash failed,can not find the lock by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.Logger.Error("lock refreash failed,can not find the lock by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key)) break } if constant.RedisCode(val) == constant.RefreshLockSuccess { - rl.logger.Info("lock refresh success by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.Logger.Info("lock refresh success by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key)) } timer.Reset(lockTime) case <-rl.refreshExitChan: @@ -169,7 +169,7 @@ func (rl *redissionLocker) closeSub(sub *redis.PubSub, noticeChan chan struct{}) if sub != nil { err := sub.Close() if err != nil { - rl.logger.Error("close sub failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + rl.Logger.Error("close sub failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) } } @@ -185,7 +185,7 @@ ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端 */ func (rl *redissionLocker) tryLock(ctx context.Context) error { lockType := constant.LockType - res := rl.client.Eval(ctx, luascript.LockScript, []string{rl.key}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(ctx, luascript.LockScript, []string{rl.Key}, rl.lockLeaseTime, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) @@ -200,10 +200,10 @@ ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ func (rl *redissionLocker) UnLock(ctx context.Context) error { - res := rl.client.Eval(ctx, luascript.UnLockScript, []string{rl.key, rl.waitChanKey}, unlockMessage, rl.token) + res := rl.client.Eval(ctx, luascript.UnLockScript, []string{rl.Key, rl.waitChanKey}, unlockMessage, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { - rl.logger.Info("unlock lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + rl.Logger.Info("unlock lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) return fmt.Errorf("unlock lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnLockType, err.Error())) } @@ -212,12 +212,12 @@ func (rl *redissionLocker) UnLock(ctx context.Context) error { rl.cancelRefreshLockTime() } - rl.logger.Info("unlock lock success", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.Logger.Info("unlock lock success", zap.String("token", rl.Token), zap.String("key", rl.Key)) return nil } if constant.RedisCode(val) == constant.UnLocakFailureWithLockOccupancy { - rl.logger.Info("unlock lock failed", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.Logger.Info("unlock lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key)) return fmt.Errorf("unlock lock failed:%w", constant.NewRedisResult(constant.UnLocakFailureWithLockOccupancy, constant.UnLockType, "")) } return nil @@ -246,13 +246,13 @@ func GetLocker(client *redis.Client, ops *RedissionLockConfig) *redissionLocker } r := &redissionLocker{ - token: ops.Token, - key: strings.Join([]string{ops.Prefix, ops.Key}, ":"), + Token: ops.Token, + Key: strings.Join([]string{ops.Prefix, ops.Key}, ":"), waitChanKey: strings.Join([]string{ops.ChanPrefix, ops.Key, "wait"}, ":"), needRefresh: ops.NeedRefresh, client: client, refreshExitChan: make(chan struct{}), - logger: logger.GetLoggerInstance(), + Logger: logger.GetLoggerInstance(), } return r } diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index a94d566..9cde98e 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -21,13 +21,13 @@ type RedissionRWLocker struct { redissionLocker writeWaitChanKey string readWaitChanKey string - rwTokenTimeoutPrefix string + RWTokenTimeoutPrefix string } func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration) error { result := rl.tryRLock(ctx).(*constant.RedisResult) if result.Code == constant.UnknownInternalError { - rl.logger.Error(result.OutputResultMessage()) + rl.Logger.Error(result.OutputResultMessage()) return fmt.Errorf("get read lock failed:%w", result) } @@ -42,7 +42,7 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration go rl.refreshLockTimeout(ctx) }) } - rl.logger.Info("success get the read lock by key and token", zap.String("key", rl.key), zap.String("token", rl.token)) + rl.Logger.Info("success get the read lock by key and token", zap.String("key", rl.Key), zap.String("token", rl.Token)) return nil } @@ -61,18 +61,18 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration case _, ok := <-subMsgChan: if !ok { err := errors.New("failed to read the read lock waiting for for the channel message") - rl.logger.Error("failed to read the read lock waiting for for the channel message") + rl.Logger.Error("failed to read the read lock waiting for for the channel message") return err } result := rl.tryRLock(ctx).(*constant.RedisResult) if (result.Code == constant.RLockFailureWithWLockOccupancy) || (result.Code == constant.UnknownInternalError) { - rl.logger.Info(result.OutputResultMessage()) + rl.Logger.Info(result.OutputResultMessage()) continue } if result.Code == constant.LockSuccess { - rl.logger.Info(result.OutputResultMessage()) + rl.Logger.Info(result.OutputResultMessage()) rl.closeSub(sub, rl.subExitChan) if rl.needRefresh { @@ -88,7 +88,7 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration return nil } case <-acquireTimer.C: - rl.logger.Info("the waiting time for obtaining the read lock operation has timed out") + rl.Logger.Info("the waiting time for obtaining the read lock operation has timed out") rl.closeSub(sub, rl.subExitChan) // after acquire lock timeout,notice the sub channel to close return constant.AcquireTimeoutErr @@ -101,7 +101,7 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration func (rl *RedissionRWLocker) tryRLock(ctx context.Context) error { lockType := constant.LockType - res := rl.client.Eval(ctx, luascript.RLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(ctx, luascript.RLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix}, rl.lockLeaseTime, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) @@ -110,7 +110,7 @@ func (rl *RedissionRWLocker) tryRLock(ctx context.Context) error { } func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) { - rl.logger.Info("lock refresh by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.Logger.Info("lock refresh by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key)) lockTime := time.Duration(rl.lockLeaseTime/3) * time.Second timer := time.NewTimer(lockTime) @@ -120,20 +120,20 @@ func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) { select { case <-timer.C: // extend key lease time - res := rl.client.Eval(ctx, luascript.RefreshRWLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(ctx, luascript.RefreshRWLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix}, rl.lockLeaseTime, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { - rl.logger.Info("lock refresh failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + rl.Logger.Info("lock refresh failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) return } if constant.RedisCode(val) == constant.RefreshLockFailure { - rl.logger.Error("lock refreash failed,can not find the read lock by key and token", zap.String("rwTokenPrefix", rl.rwTokenTimeoutPrefix), zap.String("token", rl.token), zap.String("key", rl.key)) + rl.Logger.Error("lock refreash failed,can not find the read lock by key and token", zap.String("rwTokenPrefix", rl.RWTokenTimeoutPrefix), zap.String("token", rl.Token), zap.String("key", rl.Key)) return } if constant.RedisCode(val) == constant.RefreshLockSuccess { - rl.logger.Info("lock refresh success by key and token", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.Logger.Info("lock refresh success by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key)) } timer.Reset(lockTime) case <-rl.refreshExitChan: @@ -143,11 +143,11 @@ func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) { } func (rl *RedissionRWLocker) UnRLock(ctx context.Context) error { - rl.logger.Info("unlock RLock by key and token", zap.String("key", rl.key), zap.String("token", rl.token)) - res := rl.client.Eval(ctx, luascript.UnRLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.writeWaitChanKey}, unlockMessage, rl.token) + rl.Logger.Info("unlock RLock by key and token", zap.String("key", rl.Key), zap.String("token", rl.Token)) + res := rl.client.Eval(ctx, luascript.UnRLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix, rl.writeWaitChanKey}, unlockMessage, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { - rl.logger.Info("unlock read lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + rl.Logger.Info("unlock read lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnRLockType, err.Error())) } @@ -156,12 +156,12 @@ func (rl *RedissionRWLocker) UnRLock(ctx context.Context) error { rl.cancelRefreshLockTime() } - rl.logger.Info("unlock read lock success", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.Logger.Info("unlock read lock success", zap.String("token", rl.Token), zap.String("key", rl.Key)) return nil } if constant.RedisCode(val) == constant.UnRLockFailureWithWLockOccupancy { - rl.logger.Info("unlock read lock failed", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.Logger.Info("unlock read lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key)) return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnRLockFailureWithWLockOccupancy, constant.UnRLockType, "")) } return nil @@ -170,7 +170,7 @@ func (rl *RedissionRWLocker) UnRLock(ctx context.Context) error { func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration) error { result := rl.tryWLock(ctx).(*constant.RedisResult) if result.Code == constant.UnknownInternalError { - rl.logger.Error(result.OutputResultMessage()) + rl.Logger.Error(result.OutputResultMessage()) return fmt.Errorf("get write lock failed:%w", result) } @@ -185,7 +185,7 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration go rl.refreshLockTimeout(ctx) }) } - rl.logger.Info("success get the write lock by key and token", zap.String("key", rl.key), zap.String("token", rl.token)) + rl.Logger.Info("success get the write lock by key and token", zap.String("key", rl.Key), zap.String("token", rl.Token)) return nil } @@ -204,18 +204,18 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration case _, ok := <-subMsgChan: if !ok { err := errors.New("failed to read the write lock waiting for for the channel message") - rl.logger.Error("failed to read the read lock waiting for for the channel message") + rl.Logger.Error("failed to read the read lock waiting for for the channel message") return err } result := rl.tryWLock(ctx).(*constant.RedisResult) if (result.Code == constant.UnknownInternalError) || (result.Code == constant.WLockFailureWithRLockOccupancy) || (result.Code == constant.WLockFailureWithWLockOccupancy) || (result.Code == constant.WLockFailureWithNotFirstPriority) { - rl.logger.Info(result.OutputResultMessage()) + rl.Logger.Info(result.OutputResultMessage()) continue } if result.Code == constant.LockSuccess { - rl.logger.Info(result.OutputResultMessage()) + rl.Logger.Info(result.OutputResultMessage()) rl.closeSub(sub, rl.subExitChan) if rl.needRefresh { @@ -231,7 +231,7 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration return nil } case <-acquireTimer.C: - rl.logger.Info("the waiting time for obtaining the write lock operation has timed out") + rl.Logger.Info("the waiting time for obtaining the write lock operation has timed out") rl.closeSub(sub, rl.subExitChan) // after acquire lock timeout,notice the sub channel to close return constant.AcquireTimeoutErr @@ -244,7 +244,7 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration func (rl *RedissionRWLocker) tryWLock(ctx context.Context) error { lockType := constant.LockType - res := rl.client.Eval(ctx, luascript.WLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix}, rl.lockLeaseTime, rl.token) + res := rl.client.Eval(ctx, luascript.WLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix}, rl.lockLeaseTime, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) @@ -253,10 +253,10 @@ func (rl *RedissionRWLocker) tryWLock(ctx context.Context) error { } func (rl *RedissionRWLocker) UnWLock(ctx context.Context) error { - res := rl.client.Eval(ctx, luascript.UnWLockScript, []string{rl.key, rl.rwTokenTimeoutPrefix, rl.writeWaitChanKey, rl.readWaitChanKey}, unlockMessage, rl.token) + res := rl.client.Eval(ctx, luascript.UnWLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix, rl.writeWaitChanKey, rl.readWaitChanKey}, unlockMessage, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { - rl.logger.Error("unlock write lock failed", zap.String("token", rl.token), zap.String("key", rl.key), zap.Error(err)) + rl.Logger.Error("unlock write lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) return fmt.Errorf("unlock write lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnWLockType, err.Error())) } @@ -264,12 +264,12 @@ func (rl *RedissionRWLocker) UnWLock(ctx context.Context) error { if rl.needRefresh && (constant.RedisCode(val) == constant.UnLockSuccess) { rl.cancelRefreshLockTime() } - rl.logger.Info("unlock write lock success", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.Logger.Info("unlock write lock success", zap.String("token", rl.Token), zap.String("key", rl.Key)) return nil } if (constant.RedisCode(val) == constant.UnWLockFailureWithRLockOccupancy) || (constant.RedisCode(val) == constant.UnWLockFailureWithWLockOccupancy) { - rl.logger.Info("unlock write lock failed", zap.String("token", rl.token), zap.String("key", rl.key)) + rl.Logger.Info("unlock write lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key)) return fmt.Errorf("unlock write lock failed:%w", constant.NewRedisResult(constant.RedisCode(val), constant.UnWLockType, "")) } return nil @@ -302,20 +302,20 @@ func GetRWLocker(client *redis.Client, conf *RedissionLockConfig) *RedissionRWLo } r := &redissionLocker{ - token: conf.Token, - key: strings.Join([]string{conf.Prefix, conf.Key}, ":"), + Token: conf.Token, + Key: strings.Join([]string{conf.Prefix, conf.Key}, ":"), needRefresh: conf.NeedRefresh, lockLeaseTime: conf.LockLeaseTime, client: client, refreshOnce: &sync.Once{}, - logger: logger.GetLoggerInstance(), + Logger: logger.GetLoggerInstance(), } rwLocker := &RedissionRWLocker{ redissionLocker: *r, writeWaitChanKey: strings.Join([]string{conf.ChanPrefix, conf.Key, "write"}, ":"), readWaitChanKey: strings.Join([]string{conf.ChanPrefix, conf.Key, "read"}, ":"), - rwTokenTimeoutPrefix: conf.TimeoutPrefix, + RWTokenTimeoutPrefix: conf.TimeoutPrefix, } return rwLocker } diff --git a/distributedlock/rwlock_test.go b/test/distributedlock/rwlock_test.go similarity index 66% rename from distributedlock/rwlock_test.go rename to test/distributedlock/rwlock_test.go index fc1c07b..f3a6990 100644 --- a/distributedlock/rwlock_test.go +++ b/test/distributedlock/rwlock_test.go @@ -1,4 +1,4 @@ -package distributedlock +package distributedlock_test import ( "context" @@ -6,6 +6,7 @@ import ( "testing" "time" + dl "modelRT/distributedlock" "modelRT/distributedlock/constant" "github.com/redis/go-redis/v9" @@ -39,28 +40,28 @@ func init() { func TestRWLockRLockAndUnRLock(t *testing.T) { ctx := context.TODO() - rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker.logger = log + rwLocker.Logger = log duration := 10 * time.Second // 第一次加读锁 err := rwLocker.RLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") - num, err := rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + tokenKey := strings.Join([]string{rwLocker.RWTokenTimeoutPrefix, rwLocker.Token}, ":") + num, err := rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) err = rwLocker.UnRLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, redis.Nil, err) assert.Equal(t, 0, num) t.Log("rwLock rlock and unrlock test success") @@ -70,21 +71,21 @@ func TestRWLockRLockAndUnRLock(t *testing.T) { func TestRWLockReentrantRLock(t *testing.T) { ctx := context.TODO() - rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker.logger = log + rwLocker.Logger = log duration := 10 * time.Second // 第一次加读锁 err := rwLocker.RLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") - num, err := rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + tokenKey := strings.Join([]string{rwLocker.RWTokenTimeoutPrefix, rwLocker.Token}, ":") + num, err := rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) @@ -92,7 +93,7 @@ func TestRWLockReentrantRLock(t *testing.T) { err = rwLocker.RLock(ctx, duration) assert.Equal(t, nil, err) - num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 2, num) @@ -100,7 +101,7 @@ func TestRWLockReentrantRLock(t *testing.T) { err = rwLocker.UnRLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) @@ -108,7 +109,7 @@ func TestRWLockReentrantRLock(t *testing.T) { err = rwLocker.UnRLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, redis.Nil, err) assert.Equal(t, 0, num) t.Log("rwLock reentrant lock test success") @@ -118,29 +119,30 @@ func TestRWLockReentrantRLock(t *testing.T) { func TestRWLockRefreshRLock(t *testing.T) { ctx := context.TODO() - rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 10, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker.logger = log + rwLocker.Logger = log duration := 10 * time.Second // 第一次加读锁 err := rwLocker.RLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") - num, err := rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + tokenKey := strings.Join([]string{rwLocker.RWTokenTimeoutPrefix, rwLocker.Token}, ":") + num, err := rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) time.Sleep(10 * time.Second) script := `return redis.call('httl', KEYS[1], 'fields', '1', ARGV[1]);` - result, err := rdb.Eval(ctx, script, []string{rwLocker.key}, tokenKey).Result() + result, err := rdb.Eval(ctx, script, []string{rwLocker.Key}, tokenKey).Result() assert.Equal(t, nil, err) - ttls, ok := result.([]interface{}) + // ttls, ok := result.([]interface{}) + ttls, ok := result.([]any) assert.Equal(t, true, ok) ttl, ok := ttls[0].(int64) assert.Equal(t, true, ok) @@ -150,7 +152,7 @@ func TestRWLockRefreshRLock(t *testing.T) { err = rwLocker.UnRLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, redis.Nil, err) assert.Equal(t, 0, num) t.Log("rwLock refresh lock test success") @@ -160,29 +162,29 @@ func TestRWLockRefreshRLock(t *testing.T) { func TestRWLock2ClientRLock(t *testing.T) { ctx := context.TODO() - rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker1 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker1.logger = log + rwLocker1.Logger = log - rwLocker2 := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker2 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", }) - rwLocker2.logger = log + rwLocker2.Logger = log duration := 10 * time.Second // locker1加读锁 err := rwLocker1.RLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") - num, err := rdb.HGet(ctx, rwLocker1.key, tokenKey1).Int() + tokenKey1 := strings.Join([]string{rwLocker1.RWTokenTimeoutPrefix, rwLocker1.Token}, ":") + num, err := rdb.HGet(ctx, rwLocker1.Key, tokenKey1).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) @@ -190,14 +192,14 @@ func TestRWLock2ClientRLock(t *testing.T) { err = rwLocker2.RLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey2 := strings.Join([]string{rwLocker2.rwTokenTimeoutPrefix, rwLocker2.token}, ":") - num, err = rdb.HGet(ctx, rwLocker2.key, tokenKey2).Int() + tokenKey2 := strings.Join([]string{rwLocker2.RWTokenTimeoutPrefix, rwLocker2.Token}, ":") + num, err = rdb.HGet(ctx, rwLocker2.Key, tokenKey2).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) - err = rdb.HLen(ctx, rwLocker1.key).Err() + err = rdb.HLen(ctx, rwLocker1.Key).Err() assert.Equal(t, nil, err) - hLen := rdb.HLen(ctx, rwLocker1.key).Val() + hLen := rdb.HLen(ctx, rwLocker1.Key).Val() assert.Equal(t, int64(3), hLen) // locker1解读锁 @@ -208,9 +210,9 @@ func TestRWLock2ClientRLock(t *testing.T) { err = rwLocker2.UnRLock(ctx) assert.Equal(t, nil, err) - err = rdb.Exists(ctx, rwLocker1.key).Err() + err = rdb.Exists(ctx, rwLocker1.Key).Err() assert.Equal(t, nil, err) - existNum := rdb.Exists(ctx, rwLocker1.key).Val() + existNum := rdb.Exists(ctx, rwLocker1.Key).Val() assert.Equal(t, int64(0), existNum) t.Log("rwLock 2 client lock test success") return @@ -219,29 +221,29 @@ func TestRWLock2ClientRLock(t *testing.T) { func TestRWLock2CWith2DifTimeRLock(t *testing.T) { ctx := context.TODO() - rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker1 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker1.logger = log + rwLocker1.Logger = log - rwLocker2 := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker2 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 30, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", }) - rwLocker2.logger = log + rwLocker2.Logger = log duration := 10 * time.Second // locker1加读锁 err := rwLocker1.RLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") - num, err := rdb.HGet(ctx, rwLocker1.key, tokenKey1).Int() + tokenKey1 := strings.Join([]string{rwLocker1.RWTokenTimeoutPrefix, rwLocker1.Token}, ":") + num, err := rdb.HGet(ctx, rwLocker1.Key, tokenKey1).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) @@ -249,20 +251,21 @@ func TestRWLock2CWith2DifTimeRLock(t *testing.T) { err = rwLocker2.RLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey2 := strings.Join([]string{rwLocker2.rwTokenTimeoutPrefix, rwLocker2.token}, ":") - num, err = rdb.HGet(ctx, rwLocker2.key, tokenKey2).Int() + tokenKey2 := strings.Join([]string{rwLocker2.RWTokenTimeoutPrefix, rwLocker2.Token}, ":") + num, err = rdb.HGet(ctx, rwLocker2.Key, tokenKey2).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) - err = rdb.HLen(ctx, rwLocker1.key).Err() + err = rdb.HLen(ctx, rwLocker1.Key).Err() assert.Equal(t, nil, err) - hLen := rdb.HLen(ctx, rwLocker1.key).Val() + hLen := rdb.HLen(ctx, rwLocker1.Key).Val() assert.Equal(t, int64(3), hLen) script := `return redis.call('httl', KEYS[1], 'fields', '1', ARGV[1]);` - result, err := rdb.Eval(ctx, script, []string{rwLocker1.key}, tokenKey1).Result() + result, err := rdb.Eval(ctx, script, []string{rwLocker1.Key}, tokenKey1).Result() assert.Equal(t, nil, err) - ttls, ok := result.([]interface{}) + // ttls, ok := result.([]interface{}) + ttls, ok := result.([]any) assert.Equal(t, true, ok) ttl, ok := ttls[0].(int64) assert.Equal(t, true, ok) @@ -273,16 +276,16 @@ func TestRWLock2CWith2DifTimeRLock(t *testing.T) { err = rwLocker1.UnRLock(ctx) assert.Equal(t, nil, err) - hashTTL := rdb.TTL(ctx, rwLocker1.key).Val().Seconds() + hashTTL := rdb.TTL(ctx, rwLocker1.Key).Val().Seconds() assert.Greater(t, hashTTL, float64(20)) // locker2解读锁 err = rwLocker2.UnRLock(ctx) assert.Equal(t, nil, err) - err = rdb.Exists(ctx, rwLocker1.key).Err() + err = rdb.Exists(ctx, rwLocker1.Key).Err() assert.Equal(t, nil, err) - existNum := rdb.Exists(ctx, rwLocker1.key).Val() + existNum := rdb.Exists(ctx, rwLocker1.Key).Val() assert.Equal(t, int64(0), existNum) t.Log("rwLock 2 client lock test success") return @@ -291,29 +294,29 @@ func TestRWLock2CWith2DifTimeRLock(t *testing.T) { func TestRWLock2CWithTimeTransformRLock(t *testing.T) { ctx := context.TODO() - rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker1 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 30, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker1.logger = log + rwLocker1.Logger = log - rwLocker2 := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker2 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", }) - rwLocker2.logger = log + rwLocker2.Logger = log duration := 10 * time.Second // locker1加读锁 err := rwLocker1.RLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") - num, err := rdb.HGet(ctx, rwLocker1.key, tokenKey1).Int() + tokenKey1 := strings.Join([]string{rwLocker1.RWTokenTimeoutPrefix, rwLocker1.Token}, ":") + num, err := rdb.HGet(ctx, rwLocker1.Key, tokenKey1).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) @@ -321,17 +324,17 @@ func TestRWLock2CWithTimeTransformRLock(t *testing.T) { err = rwLocker2.RLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey2 := strings.Join([]string{rwLocker2.rwTokenTimeoutPrefix, rwLocker2.token}, ":") - num, err = rdb.HGet(ctx, rwLocker2.key, tokenKey2).Int() + tokenKey2 := strings.Join([]string{rwLocker2.RWTokenTimeoutPrefix, rwLocker2.Token}, ":") + num, err = rdb.HGet(ctx, rwLocker2.Key, tokenKey2).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) - err = rdb.HLen(ctx, rwLocker1.key).Err() + err = rdb.HLen(ctx, rwLocker1.Key).Err() assert.Equal(t, nil, err) - hLen := rdb.HLen(ctx, rwLocker1.key).Val() + hLen := rdb.HLen(ctx, rwLocker1.Key).Val() assert.Equal(t, int64(3), hLen) - hashTTL := rdb.TTL(ctx, rwLocker2.key).Val().Seconds() + hashTTL := rdb.TTL(ctx, rwLocker2.Key).Val().Seconds() assert.Greater(t, hashTTL, float64(100)) // locker2解读锁 @@ -339,16 +342,16 @@ func TestRWLock2CWithTimeTransformRLock(t *testing.T) { assert.Equal(t, nil, err) time.Sleep(10 * time.Second) - hashTTL = rdb.TTL(ctx, rwLocker1.key).Val().Seconds() + hashTTL = rdb.TTL(ctx, rwLocker1.Key).Val().Seconds() assert.Greater(t, hashTTL, float64(15)) // locker1解读锁 err = rwLocker1.UnRLock(ctx) assert.Equal(t, nil, err) - err = rdb.Exists(ctx, rwLocker1.key).Err() + err = rdb.Exists(ctx, rwLocker1.Key).Err() assert.Equal(t, nil, err) - existNum := rdb.Exists(ctx, rwLocker1.key).Val() + existNum := rdb.Exists(ctx, rwLocker1.Key).Val() assert.Equal(t, int64(0), existNum) t.Log("rwLock 2 client lock test success") return @@ -357,28 +360,28 @@ func TestRWLock2CWithTimeTransformRLock(t *testing.T) { func TestRWLockWLockAndUnWLock(t *testing.T) { ctx := context.TODO() - rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker.logger = log + rwLocker.Logger = log duration := 10 * time.Second // 第一次加读锁 err := rwLocker.WLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") - num, err := rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + tokenKey := strings.Join([]string{rwLocker.RWTokenTimeoutPrefix, rwLocker.Token}, ":") + num, err := rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) err = rwLocker.UnWLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, redis.Nil, err) assert.Equal(t, 0, num) t.Log("rwLock rlock and unrlock test success") @@ -388,21 +391,21 @@ func TestRWLockWLockAndUnWLock(t *testing.T) { func TestRWLockReentrantWLock(t *testing.T) { ctx := context.TODO() - rwLocker := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker.logger = log + rwLocker.Logger = log duration := 10 * time.Second // 第一次加写锁 err := rwLocker.WLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey := strings.Join([]string{rwLocker.rwTokenTimeoutPrefix, rwLocker.token}, ":") - num, err := rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + tokenKey := strings.Join([]string{rwLocker.RWTokenTimeoutPrefix, rwLocker.Token}, ":") + num, err := rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) @@ -410,7 +413,7 @@ func TestRWLockReentrantWLock(t *testing.T) { err = rwLocker.WLock(ctx, duration) assert.Equal(t, nil, err) - num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 2, num) @@ -418,7 +421,7 @@ func TestRWLockReentrantWLock(t *testing.T) { err = rwLocker.UnWLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) @@ -426,7 +429,7 @@ func TestRWLockReentrantWLock(t *testing.T) { err = rwLocker.UnWLock(ctx) assert.Equal(t, nil, err) - num, err = rdb.HGet(ctx, rwLocker.key, tokenKey).Int() + num, err = rdb.HGet(ctx, rwLocker.Key, tokenKey).Int() assert.Equal(t, redis.Nil, err) assert.Equal(t, 0, num) t.Log("rwLock reentrant lock test success") @@ -436,29 +439,29 @@ func TestRWLockReentrantWLock(t *testing.T) { func TestRWLock2CWithRLockAndWLockFailed(t *testing.T) { ctx := context.TODO() - rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker1 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker1.logger = log + rwLocker1.Logger = log - rwLocker2 := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker2 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 30, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", }) - rwLocker2.logger = log + rwLocker2.Logger = log duration := 10 * time.Second // locker1加读锁 err := rwLocker1.RLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") - num, err := rdb.HGet(ctx, rwLocker1.key, tokenKey1).Int() + tokenKey1 := strings.Join([]string{rwLocker1.RWTokenTimeoutPrefix, rwLocker1.Token}, ":") + num, err := rdb.HGet(ctx, rwLocker1.Key, tokenKey1).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) @@ -476,28 +479,28 @@ func TestRWLock2CWithRLockAndWLockFailed(t *testing.T) { func TestRWLock2CWithRLockAndWLockSucceed(t *testing.T) { ctx := context.TODO() - rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker1 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker1.logger = log + rwLocker1.Logger = log - rwLocker2 := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker2 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", }) - rwLocker2.logger = log + rwLocker2.Logger = log duration := 10 * time.Second // locker1加读锁 err := rwLocker1.RLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") - num, err := rdb.HGet(ctx, rwLocker1.key, tokenKey1).Int() + tokenKey1 := strings.Join([]string{rwLocker1.RWTokenTimeoutPrefix, rwLocker1.Token}, ":") + num, err := rdb.HGet(ctx, rwLocker1.Key, tokenKey1).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) @@ -513,8 +516,8 @@ func TestRWLock2CWithRLockAndWLockSucceed(t *testing.T) { err = rwLocker2.WLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey2 := strings.Join([]string{rwLocker2.rwTokenTimeoutPrefix, rwLocker2.token}, ":") - num, err = rdb.HGet(ctx, rwLocker2.key, tokenKey2).Int() + tokenKey2 := strings.Join([]string{rwLocker2.RWTokenTimeoutPrefix, rwLocker2.Token}, ":") + num, err = rdb.HGet(ctx, rwLocker2.Key, tokenKey2).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) @@ -529,29 +532,29 @@ func TestRWLock2CWithRLockAndWLockSucceed(t *testing.T) { func TestRWLock2CWithWLockAndRLock(t *testing.T) { ctx := context.TODO() - rwLocker1 := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker1 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker1.logger = log + rwLocker1.Logger = log - rwLocker2 := GetRWLocker(rdb, &RedissionLockConfig{ + rwLocker2 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 30, NeedRefresh: true, Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", }) - rwLocker2.logger = log + rwLocker2.Logger = log duration := 10 * time.Second // locker1加写锁 err := rwLocker1.WLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey1 := strings.Join([]string{rwLocker1.rwTokenTimeoutPrefix, rwLocker1.token}, ":") - num, err := rdb.HGet(ctx, rwLocker1.key, tokenKey1).Int() + tokenKey1 := strings.Join([]string{rwLocker1.RWTokenTimeoutPrefix, rwLocker1.Token}, ":") + num, err := rdb.HGet(ctx, rwLocker1.Key, tokenKey1).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) @@ -567,8 +570,8 @@ func TestRWLock2CWithWLockAndRLock(t *testing.T) { err = rwLocker2.RLock(ctx, duration) assert.Equal(t, nil, err) - tokenKey2 := strings.Join([]string{rwLocker2.rwTokenTimeoutPrefix, rwLocker2.token}, ":") - num, err = rdb.HGet(ctx, rwLocker2.key, tokenKey2).Int() + tokenKey2 := strings.Join([]string{rwLocker2.RWTokenTimeoutPrefix, rwLocker2.Token}, ":") + num, err = rdb.HGet(ctx, rwLocker2.Key, tokenKey2).Int() assert.Equal(t, nil, err) assert.Equal(t, 1, num) From 23110cbba9d9dc2dc54daeb20d2873928716b1d6 Mon Sep 17 00:00:00 2001 From: douxu Date: Fri, 18 Apr 2025 15:17:51 +0800 Subject: [PATCH 23/33] refactor(locker params): modify the locker lease time unit 1.modify the locker lease time unit 2.modify internalLockLeaseTime params unit 3.modify refreshTime params unit 4.modify lua script --- distributedlock/luascript/rwlock_script.go | 46 +++++++++++----------- distributedlock/redis_lock.go | 4 +- distributedlock/redis_rwlock.go | 2 +- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/distributedlock/luascript/rwlock_script.go b/distributedlock/luascript/rwlock_script.go index db58a52..b26f557 100644 --- a/distributedlock/luascript/rwlock_script.go +++ b/distributedlock/luascript/rwlock_script.go @@ -14,8 +14,8 @@ local lockKey = KEYS[2] .. ':' .. ARGV[2]; if (mode == false) then redis.call('hset', KEYS[1], 'mode', 'read'); redis.call('hset', KEYS[1], lockKey, '1'); - redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); - redis.call('expire', KEYS[1], ARGV[1]); + redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); + redis.call('pexpire', KEYS[1], ARGV[1]); return 1; end; @@ -29,11 +29,11 @@ end; if (mode == 'read') then if (redis.call('exists', KEYS[1], ARGV[2]) == 1) then redis.call('hincrby', KEYS[1], lockKey, '1'); - local remainTime = redis.call('httl', KEYS[1], 'fields', '1', lockKey); - redis.call('hexpire', KEYS[1], math.max(tonumber(remainTime[1]), ARGV[1]), 'fields', '1', lockKey); + local remainTime = redis.call('hpttl', KEYS[1], 'fields', '1', lockKey); + redis.call('hpexpire', KEYS[1], math.max(tonumber(remainTime[1]), ARGV[1]), 'fields', '1', lockKey); else redis.call('hset', KEYS[1], lockKey, '1'); - redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); + redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); end; local cursor = 0; local maxRemainTime = tonumber(ARGV[1]); @@ -45,13 +45,13 @@ if (mode == 'read') then for i = 1, #fields,2 do local field = fields[i]; - local remainTime = redis.call('httl', KEYS[1], 'fields', '1', field); + local remainTime = redis.call('hpttl', KEYS[1], 'fields', '1', field); maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime); end; until cursor == 0; - local remainTime = redis.call('ttl', KEYS[1]); - redis.call('expire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime)); + local remainTime = redis.call('pttl', KEYS[1]); + redis.call('pexpire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime)); return 1; end; ` @@ -86,7 +86,7 @@ if ((mode == 'read') and (lockExists == 0)) then end; local counter = redis.call('hincrby', KEYS[1], lockKey, -1); -local delTTLs = redis.call('httl', KEYS[1], 'fields', '1', lockKey); +local delTTLs = redis.call('hpttl', KEYS[1], 'fields', '1', lockKey); local delTTL = tonumber(delTTLs[1]); if (counter == 0) then redis.call('hdel', KEYS[1], lockKey); @@ -103,17 +103,17 @@ if (redis.call('hlen', KEYS[1]) > 1) then for i = 1, #fields,2 do local field = fields[i]; - local remainTime = redis.call('httl', KEYS[1], 'fields', '1', field); + local remainTime = redis.call('hpttl', KEYS[1], 'fields', '1', field); maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime); end; until cursor == 0; if (maxRemainTime > 0) then if (delTTL > maxRemainTime) then - redis.call('expire', KEYS[1], maxRemainTime); + redis.call('pexpire', KEYS[1], maxRemainTime); else - local remainTime = redis.call('ttl', KEYS[1]); - redis.call('expire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime)); + local remainTime = redis.call('pttl', KEYS[1]); + redis.call('pexpire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime)); end; end; else @@ -149,8 +149,8 @@ if (mode == false) then end; redis.call('hset', KEYS[1], 'mode', 'write'); redis.call('hset', KEYS[1], lockKey, 1); - redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); - redis.call('expire', KEYS[1], ARGV[1]); + redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); + redis.call('pexpire', KEYS[1], ARGV[1]); redis.call('lpop', waitKey, '1'); return 1; elseif (mode == 'read') then @@ -163,8 +163,8 @@ else local lockExists = redis.call('hexists', KEYS[1], lockKey); if (lockExists == 1) then redis.call('hincrby', KEYS[1], lockKey, 1); - redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); - redis.call('expire', KEYS[1], ARGV[1]); + redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); + redis.call('pexpire', KEYS[1], ARGV[1]); return 1; end; -- 放到 list 中等待写锁释放后再次尝试加锁并且订阅写锁释放的消息 @@ -233,7 +233,7 @@ local lockExists = redis.call('hexists', KEYS[1], lockKey); local mode = redis.call('hget', KEYS[1], 'mode'); local maxRemainTime = tonumber(ARGV[1]); if (lockExists == 1) then - redis.call('hexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); + redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey); if (mode == 'read') then local cursor = 0; local pattern = KEYS[2] .. ':*'; @@ -244,19 +244,19 @@ if (lockExists == 1) then for i = 1, #fields,2 do local field = fields[i]; - local remainTime = redis.call('httl', KEYS[1], 'fields', '1', field); + local remainTime = redis.call('hpttl', KEYS[1], 'fields', '1', field); maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime); end; until cursor == 0; if (maxRemainTime > 0) then - local remainTime = redis.call('ttl', KEYS[1]); - redis.call('expire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime)); + local remainTime = redis.call('pttl', KEYS[1]); + redis.call('pexpire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime)); end; elseif (mode == 'write') then - redis.call('expire', KEYS[1], ARGV[1]); + redis.call('pexpire', KEYS[1], ARGV[1]); end; - -- return redis.call('ttl',KEYS[1]); + -- return redis.call('pttl',KEYS[1]); return 1; end; return -8; diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index 0cdbb39..aed73dc 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -18,7 +18,7 @@ import ( ) const ( - internalLockLeaseTime = uint64(30) + internalLockLeaseTime = uint64(30 * 1000) unlockMessage = 0 ) @@ -128,7 +128,7 @@ ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端 func (rl *redissionLocker) refreshLockTimeout(ctx context.Context) { rl.Logger.Info("lock refresh by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key)) - lockTime := time.Duration(rl.lockLeaseTime/3) * time.Second + lockTime := time.Duration(rl.lockLeaseTime/3) * time.Millisecond timer := time.NewTimer(lockTime) defer timer.Stop() diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index 9cde98e..52e44af 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -112,7 +112,7 @@ func (rl *RedissionRWLocker) tryRLock(ctx context.Context) error { func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) { rl.Logger.Info("lock refresh by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key)) - lockTime := time.Duration(rl.lockLeaseTime/3) * time.Second + lockTime := time.Duration(rl.lockLeaseTime/3) * time.Millisecond timer := time.NewTimer(lockTime) defer timer.Stop() From af0cfce78fa19e781e8c6e8b9e4d8b2f0600e194 Mon Sep 17 00:00:00 2001 From: douxu Date: Wed, 30 Apr 2025 16:44:58 +0800 Subject: [PATCH 24/33] refactor(component set): add the return value of component query func 1.add the return value of topologic query func refactor(diagram set): add the return value of topologic query func 1.add the return value of topologic query func 2.modify internalLockLeaseTime params unit 3.modify refreshTime params unit 4.modify lua script feat(bay info): init interval information constructor 1.init interval information constructor test(sql case): add new pg sql case 1. add new pg sql case --- database/query_component.go | 24 +++++++++++-- database/query_topologic.go | 20 ++++++++--- go.mod | 3 ++ go.sum | 8 +++++ main.go | 26 ++++++++++++-- test/orm/topologic_test.go | 69 +++++++++++++++++++++++++++++++++++++ 6 files changed, 140 insertions(+), 10 deletions(-) create mode 100644 test/orm/topologic_test.go diff --git a/database/query_component.go b/database/query_component.go index 18471a9..1d44f20 100644 --- a/database/query_component.go +++ b/database/query_component.go @@ -16,7 +16,7 @@ import ( ) // QueryCircuitDiagramComponentFromDB return the result of query circuit diagram component info order by page id from postgresDB -func QueryCircuitDiagramComponentFromDB(ctx context.Context, tx *gorm.DB, pool *ants.PoolWithFunc, logger *zap.Logger) error { +func QueryCircuitDiagramComponentFromDB(ctx context.Context, tx *gorm.DB, pool *ants.PoolWithFunc, logger *zap.Logger) (map[uuid.UUID]int, error) { var components []orm.Component // ctx超时判断 cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second) @@ -25,16 +25,20 @@ func QueryCircuitDiagramComponentFromDB(ctx context.Context, tx *gorm.DB, pool * result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&components) if result.Error != nil { logger.Error("query circuit diagram component info failed", zap.Error(result.Error)) - return result.Error + return nil, result.Error } + componentTypeMap := make(map[uuid.UUID]int, len(components)) + for _, component := range components { pool.Invoke(config.ModelParseConfig{ ComponentInfo: component, Context: ctx, }) + + componentTypeMap[component.GlobalUUID] = component.ComponentType } - return nil + return componentTypeMap, nil } // QueryComponentByUUID return the result of query circuit diagram component info by uuid from postgresDB @@ -50,3 +54,17 @@ func QueryComponentByUUID(ctx context.Context, tx *gorm.DB, uuid uuid.UUID) (orm } return component, nil } + +// QueryComponentByPageID return the result of query circuit diagram component info by page id from postgresDB +func QueryComponentByPageID(ctx context.Context, tx *gorm.DB, uuid uuid.UUID) (orm.Component, error) { + var component orm.Component + // ctx超时判断 + cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + result := tx.WithContext(cancelCtx).Where("page_id = ? ", uuid).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&component) + if result.Error != nil { + return orm.Component{}, result.Error + } + return component, nil +} diff --git a/database/query_topologic.go b/database/query_topologic.go index c21ba86..1aeecca 100644 --- a/database/query_topologic.go +++ b/database/query_topologic.go @@ -30,28 +30,30 @@ func QueryTopologicByPageID(ctx context.Context, tx *gorm.DB, logger *zap.Logger return topologics, nil } +// TODO 电流互感器不单独划分间隔 +// TODO 以母线、浇筑母线、变压器为间隔原件 // QueryTopologicFromDB return the result of query topologic info from postgresDB -func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB, logger *zap.Logger, gridID, zoneID, stationID int64) error { +func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB, logger *zap.Logger, gridID, zoneID, stationID int64) ([]orm.Page, error) { allPages, err := QueryAllPages(ctx, tx, logger, gridID, zoneID, stationID) if err != nil { logger.Error("query all pages info failed", zap.Int64("gridID", gridID), zap.Int64("zoneID", zoneID), zap.Int64("stationID", stationID), zap.Error(err)) - return err + return nil, err } for _, page := range allPages { topologicInfos, err := QueryTopologicByPageID(ctx, tx, logger, page.ID) if err != nil { logger.Error("query topologic info by pageID failed", zap.Int64("pageID", page.ID), zap.Error(err)) - return err + return nil, err } err = InitCircuitDiagramTopologic(page.ID, topologicInfos) if err != nil { logger.Error("init topologic failed", zap.Error(err)) - return err + return nil, err } } - return nil + return allPages, nil } // InitCircuitDiagramTopologic return circuit diagram topologic info from postgres @@ -77,3 +79,11 @@ func InitCircuitDiagramTopologic(pageID int64, topologicNodes []orm.Topologic) e diagram.StoreGraphMap(pageID, topologicSet) return nil } + +func IntervalBoundaryDetermine(pageID int64, uuid uuid.UUID) bool { + // TODO 从diagramsOverview中根据 uuid 获取 component 信息 + var componentID int64 + diagram.GetComponentMap(componentID) + // TODO 判断 component 的类型是否为间隔 + return true +} diff --git a/go.mod b/go.mod index f1591bf..bfab534 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module modelRT go 1.22.5 require ( + github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/bitly/go-simplejson v0.5.1 github.com/confluentinc/confluent-kafka-go v1.9.2 github.com/gin-gonic/gin v1.10.0 @@ -19,6 +20,7 @@ require ( github.com/swaggo/swag v1.16.4 go.uber.org/zap v1.27.0 golang.org/x/sys v0.28.0 + gorm.io/driver/mysql v1.5.7 gorm.io/driver/postgres v1.5.9 gorm.io/gorm v1.25.12 ) @@ -43,6 +45,7 @@ require ( github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.23.0 // indirect + github.com/go-sql-driver/mysql v1.7.0 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect diff --git a/go.sum b/go.sum index 2f08fc6..e0dd779 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,8 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/actgardner/gogen-avro/v10 v10.1.0/go.mod h1:o+ybmVjEa27AAr35FRqU98DJu1fXES56uXniYFv4yDA= @@ -87,6 +89,8 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o= github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= +github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= @@ -157,6 +161,7 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/juju/qthttptest v0.1.1/go.mod h1:aTlAv8TYaflIiTDIQYzxnl1QdPjAg8Q8qJMErpKy6A4= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= @@ -405,8 +410,11 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo= +gorm.io/driver/mysql v1.5.7/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM= gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8= gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI= +gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8= gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/main.go b/main.go index a0086cb..b185b00 100644 --- a/main.go +++ b/main.go @@ -4,6 +4,7 @@ package main import ( "context" "flag" + "fmt" "time" "modelRT/alert" @@ -101,18 +102,39 @@ func main() { postgresDBClient.Transaction(func(tx *gorm.DB) error { // load circuit diagram from postgres - err := database.QueryCircuitDiagramComponentFromDB(cancelCtx, tx, parsePool, zapLogger) + componentTypeMap, err := database.QueryCircuitDiagramComponentFromDB(cancelCtx, tx, parsePool, zapLogger) if err != nil { zapLogger.Error("load circuit diagrams from postgres failed", zap.Error(err)) panic(err) } // TODO 暂时屏蔽完成 swagger 启动测试 - err = database.QueryTopologicFromDB(ctx, tx, zapLogger, modelRTConfig.GridID, modelRTConfig.ZoneID, modelRTConfig.StationID) + // TODO 将componentTypeMap传入QueryTopologicFromDB中 + pages, err := database.QueryTopologicFromDB(ctx, tx, zapLogger, modelRTConfig.GridID, modelRTConfig.ZoneID, modelRTConfig.StationID) if err != nil { zapLogger.Error("load topologic info from postgres failed", zap.Error(err)) panic(err) } + + for _, page := range pages { + graph, err := diagram.GetGraphMap(page.ID) + if err != nil { + // TODO 增加报错日志错误 + continue + } + + rootNode := graph.RootVertex.String() + links := graph.VerticeLinks[rootNode] + for { + for _, link := range links { + fmt.Println(link) + } + // TODO 重置 links + } + + } + fmt.Println(componentTypeMap) + return nil }) diff --git a/test/orm/topologic_test.go b/test/orm/topologic_test.go new file mode 100644 index 0000000..c107d35 --- /dev/null +++ b/test/orm/topologic_test.go @@ -0,0 +1,69 @@ +package orm_test + +import ( + "context" + "database/sql" + "os" + "regexp" + "testing" + + "modelRT/database" + "modelRT/network" + "modelRT/orm" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" + "gorm.io/driver/mysql" + "gorm.io/gorm" +) + +var ( + mock sqlmock.Sqlmock + err error + baseDB *sql.DB + pgClient *gorm.DB +) + +func TestMain(m *testing.M) { + baseDB, mock, err = sqlmock.New() + if err != nil { + panic(err) + } + // 把项目使用的DB连接换成sqlmock的DB连接 + pgClient, _ = gorm.Open(mysql.New(mysql.Config{ + Conn: baseDB, + SkipInitializeWithVersion: true, + DefaultStringSize: 0, + })) + os.Exit(m.Run()) +} + +func TestUserDao_CreateUser(t *testing.T) { + topologicInfo := &orm.Topologic{ + PageID: 1, + UUIDFrom: uuid.FromStringOrNil("70c190f2-8a60-42a9-b143-ec5f87e0aa6b"), + UUIDTo: uuid.FromStringOrNil("70c190f2-8a75-42a9-b166-ec5f87e0aa6b"), + Comment: "test", + Flag: 1, + } + + // ud := dao2.NewUserDao(context.TODO()) + mock.ExpectBegin() + mock.ExpectExec(regexp.QuoteMeta("INSERT INTO `Topologic`")). + WithArgs(topologicInfo.PageID, topologicInfo.Flag, topologicInfo.UUIDFrom, topologicInfo.UUIDTo, topologicInfo.Comment). + WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit() + + err := database.CreateTopologicIntoDB(context.TODO(), pgClient, 1, []network.TopologicUUIDCreateInfo{ + { + UUIDFrom: uuid.FromStringOrNil("70c190f2-8a60-42a9-b143-ec5f87e0aa6b"), + UUIDTo: uuid.FromStringOrNil("70c190f2-8a75-42a9-b166-ec5f87e0aa6b"), + Comment: "test", + Flag: 1, + }, + }) + assert.Nil(t, err) + err = mock.ExpectationsWereMet() + assert.Nil(t, err) +} From daf30766ba17185ff8ced896b8894e1dfc7a355c Mon Sep 17 00:00:00 2001 From: douxu Date: Tue, 13 May 2025 16:34:25 +0800 Subject: [PATCH 25/33] refactor(topologic storage struct): refactor topologic storage struct 1.refactor topologic storage struct by multi branch tree 2.add new func of build multi branch tree 3.modify sql of query topologic from db 4.delete page id field from topologic struct --- constant/togologic.go | 10 +++ database/create_topologic.go | 1 - database/query_component.go | 1 + database/query_topologic.go | 146 ++++++++++++++++++++++++------- database/update_topologic.go | 1 - diagram/multi_branch_tree.go | 64 ++++++++++++++ main.go | 24 +---- orm/circuit_diagram_topologic.go | 1 - sql/topologic.go | 7 +- test/orm/topologic_test.go | 3 +- 10 files changed, 192 insertions(+), 66 deletions(-) create mode 100644 diagram/multi_branch_tree.go diff --git a/constant/togologic.go b/constant/togologic.go index 871ffc5..7ae90b0 100644 --- a/constant/togologic.go +++ b/constant/togologic.go @@ -1,5 +1,7 @@ package constant +import "github.com/gofrs/uuid" + const ( // UUIDErrChangeType 拓扑信息错误改变类型 UUIDErrChangeType = iota @@ -10,3 +12,11 @@ const ( // UUIDAddChangeType 拓扑信息新增类型 UUIDAddChangeType ) + +const ( + // SpecialUUIDStr 拓扑信息中开始节点与结束节点字符串形式 + SpecialUUIDStr = "00000000-0000-0000-0000-000000000000" +) + +// SpecialUUID 拓扑信息中开始节点与结束节点 UUID 格式 +var SpecialUUID = uuid.FromStringOrNil(SpecialUUIDStr) diff --git a/database/create_topologic.go b/database/create_topologic.go index 5af336c..57d2727 100644 --- a/database/create_topologic.go +++ b/database/create_topologic.go @@ -21,7 +21,6 @@ func CreateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, topol var topologicSlice []orm.Topologic for _, info := range topologicInfos { topologicInfo := orm.Topologic{ - PageID: pageID, UUIDFrom: info.UUIDFrom, UUIDTo: info.UUIDTo, Flag: info.Flag, diff --git a/database/query_component.go b/database/query_component.go index 1d44f20..21151c8 100644 --- a/database/query_component.go +++ b/database/query_component.go @@ -28,6 +28,7 @@ func QueryCircuitDiagramComponentFromDB(ctx context.Context, tx *gorm.DB, pool * return nil, result.Error } + // TODO 优化componentTypeMap输出 componentTypeMap := make(map[uuid.UUID]int, len(components)) for _, component := range components { diff --git a/database/query_topologic.go b/database/query_topologic.go index 1aeecca..bfae1eb 100644 --- a/database/query_topologic.go +++ b/database/query_topologic.go @@ -3,8 +3,10 @@ package database import ( "context" + "fmt" "time" + "modelRT/constant" "modelRT/diagram" "modelRT/orm" "modelRT/sql" @@ -15,16 +17,16 @@ import ( "gorm.io/gorm/clause" ) -// QueryTopologicByPageID return the topologic info of the circuit diagram query by pageID -func QueryTopologicByPageID(ctx context.Context, tx *gorm.DB, logger *zap.Logger, pageID int64) ([]orm.Topologic, error) { +// QueryTopologic return the topologic info of the circuit diagram +func QueryTopologic(ctx context.Context, tx *gorm.DB, logger *zap.Logger) ([]orm.Topologic, error) { var topologics []orm.Topologic // ctx超时判断 cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() - result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Raw(sql.RecursiveSQL, pageID).Scan(&topologics) + result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Raw(sql.RecursiveSQL, constant.SpecialUUIDStr).Scan(&topologics) if result.Error != nil { - logger.Error("query circuit diagram topologic info by pageID failed", zap.Int64("pageID", pageID), zap.Error(result.Error)) + logger.Error("query circuit diagram topologic info by start node uuid failed", zap.String("start_node_uuid", constant.SpecialUUIDStr), zap.Error(result.Error)) return nil, result.Error } return topologics, nil @@ -32,58 +34,136 @@ func QueryTopologicByPageID(ctx context.Context, tx *gorm.DB, logger *zap.Logger // TODO 电流互感器不单独划分间隔 // TODO 以母线、浇筑母线、变压器为间隔原件 -// QueryTopologicFromDB return the result of query topologic info from postgresDB -func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB, logger *zap.Logger, gridID, zoneID, stationID int64) ([]orm.Page, error) { - allPages, err := QueryAllPages(ctx, tx, logger, gridID, zoneID, stationID) +// QueryTopologicFromDB return the result of query topologic info from DB +func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB, logger *zap.Logger, componentTypeMap map[uuid.UUID]int) error { + topologicInfos, err := QueryTopologic(ctx, tx, logger) if err != nil { - logger.Error("query all pages info failed", zap.Int64("gridID", gridID), zap.Int64("zoneID", zoneID), zap.Int64("stationID", stationID), zap.Error(err)) - return nil, err + logger.Error("query topologic info failed", zap.Error(err)) + return err } - for _, page := range allPages { - topologicInfos, err := QueryTopologicByPageID(ctx, tx, logger, page.ID) - if err != nil { - logger.Error("query topologic info by pageID failed", zap.Int64("pageID", page.ID), zap.Error(err)) - return nil, err - } + // err = InitCircuitDiagramTopologic(topologicInfos, componentTypeMap) + // if err != nil { + // logger.Error("init topologic failed", zap.Error(err)) + // return err + // } - err = InitCircuitDiagramTopologic(page.ID, topologicInfos) - if err != nil { - logger.Error("init topologic failed", zap.Error(err)) - return nil, err - } + _, err = BuildMultiBranchTree(topologicInfos, componentTypeMap) + if err != nil { + logger.Error("init topologic failed", zap.Error(err)) + return err } - return allPages, nil + return nil } // InitCircuitDiagramTopologic return circuit diagram topologic info from postgres -func InitCircuitDiagramTopologic(pageID int64, topologicNodes []orm.Topologic) error { - var rootVertex uuid.UUID - +func InitCircuitDiagramTopologic(topologicNodes []orm.Topologic, componentTypeMap map[uuid.UUID]int) error { + var rootVertex *diagram.MultiBranchTreeNode for _, node := range topologicNodes { - if node.UUIDFrom.IsNil() { - rootVertex = node.UUIDTo + if node.UUIDFrom == constant.SpecialUUID { + // rootVertex = node.UUIDTo + var componentType int + componentType, ok := componentTypeMap[node.UUIDFrom] + if !ok { + return fmt.Errorf("can not get component type by uuid: %s", node.UUIDFrom) + } + rootVertex = diagram.NewMultiBranchTree(node.UUIDFrom, componentType) break } } - topologicSet := diagram.NewGraph(rootVertex) + if rootVertex == nil { + return fmt.Errorf("root vertex is nil") + } for _, node := range topologicNodes { - if node.UUIDFrom.IsNil() { - continue + if node.UUIDFrom == constant.SpecialUUID { + var componentType int + componentType, ok := componentTypeMap[node.UUIDTo] + if !ok { + return fmt.Errorf("can not get component type by uuid: %s", node.UUIDTo) + } + nodeVertex := diagram.NewMultiBranchTree(node.UUIDTo, componentType) + + rootVertex.AddChild(nodeVertex) } - // TODO 增加对 node.flag值的判断 - topologicSet.AddEdge(node.UUIDFrom, node.UUIDTo) } - diagram.StoreGraphMap(pageID, topologicSet) + + node := rootVertex + for _, nodeVertex := range node.Children { + nextVertexs := make([]*diagram.MultiBranchTreeNode, 0) + nextVertexs = append(nextVertexs, nodeVertex) + } return nil } -func IntervalBoundaryDetermine(pageID int64, uuid uuid.UUID) bool { +func IntervalBoundaryDetermine(uuid uuid.UUID) bool { // TODO 从diagramsOverview中根据 uuid 获取 component 信息 var componentID int64 diagram.GetComponentMap(componentID) // TODO 判断 component 的类型是否为间隔 return true } + +// BuildMultiBranchTree return the multi branch tree by topologic info and component type map +func BuildMultiBranchTree(topologics []orm.Topologic, componentTypeMap map[uuid.UUID]int) (*diagram.MultiBranchTreeNode, error) { + nodeMap := make(map[uuid.UUID]*diagram.MultiBranchTreeNode, len(topologics)) + + for _, topo := range topologics { + // skip special uuid + if topo.UUIDFrom != constant.SpecialUUID { + if _, exists := nodeMap[topo.UUIDFrom]; !exists { + componentType, ok := componentTypeMap[topo.UUIDFrom] + if !ok { + return nil, fmt.Errorf("can not get component type by uuid: %s", topo.UUIDFrom) + } + + nodeMap[topo.UUIDFrom] = &diagram.MultiBranchTreeNode{ + ID: topo.UUIDFrom, + NodeComponentType: componentType, + } + } + } + + if _, exists := nodeMap[topo.UUIDTo]; !exists { + componentType, ok := componentTypeMap[topo.UUIDTo] + if !ok { + return nil, fmt.Errorf("can not get component type by uuid: %s", topo.UUIDTo) + } + + nodeMap[topo.UUIDTo] = &diagram.MultiBranchTreeNode{ + ID: topo.UUIDTo, + NodeComponentType: componentType, + } + } + } + + for _, topo := range topologics { + var parent *diagram.MultiBranchTreeNode + if topo.UUIDFrom == constant.SpecialUUID { + componentType, ok := componentTypeMap[topo.UUIDTo] + if !ok { + return nil, fmt.Errorf("can not get component type by uuid: %s", topo.UUIDTo) + } + + parent = &diagram.MultiBranchTreeNode{ + ID: constant.SpecialUUID, + NodeComponentType: componentType, + } + nodeMap[constant.SpecialUUID] = parent + } else { + parent = nodeMap[topo.UUIDFrom] + } + + child := nodeMap[topo.UUIDTo] + child.Parent = parent + parent.Children = append(parent.Children, child) + } + + // return root vertex + root, exists := nodeMap[constant.SpecialUUID] + if !exists { + return nil, fmt.Errorf("root node not found") + } + return root, nil +} diff --git a/database/update_topologic.go b/database/update_topologic.go index d64a674..c077d31 100644 --- a/database/update_topologic.go +++ b/database/update_topologic.go @@ -47,7 +47,6 @@ func UpdateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, chang result = tx.WithContext(cancelCtx).Model(&orm.Topologic{}).Where("page_id = ? and uuid_from = ? and uuid_to = ?", pageID, changeInfo.OldUUIDFrom, changeInfo.OldUUIDTo).Updates(&orm.Topologic{UUIDTo: changeInfo.NewUUIDTo}) case constant.UUIDAddChangeType: topologic := orm.Topologic{ - PageID: pageID, Flag: changeInfo.Flag, UUIDFrom: changeInfo.NewUUIDFrom, UUIDTo: changeInfo.NewUUIDTo, diff --git a/diagram/multi_branch_tree.go b/diagram/multi_branch_tree.go new file mode 100644 index 0000000..7de6f03 --- /dev/null +++ b/diagram/multi_branch_tree.go @@ -0,0 +1,64 @@ +package diagram + +import ( + "fmt" + + "github.com/gofrs/uuid" +) + +// MultiBranchTreeNode represents a topological structure using an multi branch tree +type MultiBranchTreeNode struct { + ID uuid.UUID // 节点唯一标识 + NodeComponentType int // 节点组件类型 + Parent *MultiBranchTreeNode // 指向父节点的指针 + Children []*MultiBranchTreeNode // 指向所有子节点的指针切片 +} + +func NewMultiBranchTree(id uuid.UUID, componentType int) *MultiBranchTreeNode { + return &MultiBranchTreeNode{ + ID: id, + NodeComponentType: componentType, + Children: make([]*MultiBranchTreeNode, 0), + } +} + +func (n *MultiBranchTreeNode) AddChild(child *MultiBranchTreeNode) { + child.Parent = n + n.Children = append(n.Children, child) +} + +func (n *MultiBranchTreeNode) RemoveChild(childID uuid.UUID) bool { + for i, child := range n.Children { + if child.ID == childID { + n.Children = append(n.Children[:i], n.Children[i+1:]...) + child.Parent = nil + return true + } + } + return false +} + +func (n *MultiBranchTreeNode) FindNodeByID(id uuid.UUID) *MultiBranchTreeNode { + if n.ID == id { + return n + } + + for _, child := range n.Children { + if found := child.FindNodeByID(id); found != nil { + return found + } + } + return nil +} + +func (n *MultiBranchTreeNode) PrintTree(level int) { + for i := 0; i < level; i++ { + fmt.Print(" ") + } + + fmt.Printf("- ComponentType:%d,(ID: %s)\n", n.NodeComponentType, n.ID) + + for _, child := range n.Children { + child.PrintTree(level + 1) + } +} diff --git a/main.go b/main.go index b185b00..cee65ef 100644 --- a/main.go +++ b/main.go @@ -4,7 +4,6 @@ package main import ( "context" "flag" - "fmt" "time" "modelRT/alert" @@ -109,32 +108,11 @@ func main() { } // TODO 暂时屏蔽完成 swagger 启动测试 - // TODO 将componentTypeMap传入QueryTopologicFromDB中 - pages, err := database.QueryTopologicFromDB(ctx, tx, zapLogger, modelRTConfig.GridID, modelRTConfig.ZoneID, modelRTConfig.StationID) + err = database.QueryTopologicFromDB(ctx, tx, zapLogger, componentTypeMap) if err != nil { zapLogger.Error("load topologic info from postgres failed", zap.Error(err)) panic(err) } - - for _, page := range pages { - graph, err := diagram.GetGraphMap(page.ID) - if err != nil { - // TODO 增加报错日志错误 - continue - } - - rootNode := graph.RootVertex.String() - links := graph.VerticeLinks[rootNode] - for { - for _, link := range links { - fmt.Println(link) - } - // TODO 重置 links - } - - } - fmt.Println(componentTypeMap) - return nil }) diff --git a/orm/circuit_diagram_topologic.go b/orm/circuit_diagram_topologic.go index 5b4b5b0..08bd4b3 100644 --- a/orm/circuit_diagram_topologic.go +++ b/orm/circuit_diagram_topologic.go @@ -6,7 +6,6 @@ import "github.com/gofrs/uuid" // Topologic structure define topologic info set of circuit diagram type Topologic struct { ID int64 `gorm:"column:id"` - PageID int64 `gorm:"column:page_id"` Flag int `gorm:"column:flag"` UUIDFrom uuid.UUID `gorm:"column:uuid_from"` UUIDTo uuid.UUID `gorm:"column:uuid_to"` diff --git a/sql/topologic.go b/sql/topologic.go index 58be916..95f292d 100644 --- a/sql/topologic.go +++ b/sql/topologic.go @@ -3,15 +3,12 @@ package sql // RecursiveSQL define Topologic table recursive query statement var RecursiveSQL = `WITH RECURSIVE recursive_tree as ( - SELECT uuid_from,uuid_to,page_id,flag + SELECT uuid_from,uuid_to,flag FROM "Topologic" - WHERE uuid_from is null and page_id = ? + WHERE uuid_from = ? UNION ALL SELECT t.uuid_from,t.uuid_to,t.page_id,t.flag FROM "Topologic" t JOIN recursive_tree rt ON t.uuid_from = rt.uuid_to ) SELECT * FROM recursive_tree;` - -// TODO 为 Topologic 表增加唯一索引 -// CREATE UNIQUE INDEX uuid_from_to_page_id_idx ON public."Topologic"(uuid_from,uuid_to,page_id); diff --git a/test/orm/topologic_test.go b/test/orm/topologic_test.go index c107d35..f5443fc 100644 --- a/test/orm/topologic_test.go +++ b/test/orm/topologic_test.go @@ -41,7 +41,6 @@ func TestMain(m *testing.M) { func TestUserDao_CreateUser(t *testing.T) { topologicInfo := &orm.Topologic{ - PageID: 1, UUIDFrom: uuid.FromStringOrNil("70c190f2-8a60-42a9-b143-ec5f87e0aa6b"), UUIDTo: uuid.FromStringOrNil("70c190f2-8a75-42a9-b166-ec5f87e0aa6b"), Comment: "test", @@ -51,7 +50,7 @@ func TestUserDao_CreateUser(t *testing.T) { // ud := dao2.NewUserDao(context.TODO()) mock.ExpectBegin() mock.ExpectExec(regexp.QuoteMeta("INSERT INTO `Topologic`")). - WithArgs(topologicInfo.PageID, topologicInfo.Flag, topologicInfo.UUIDFrom, topologicInfo.UUIDTo, topologicInfo.Comment). + WithArgs(topologicInfo.Flag, topologicInfo.UUIDFrom, topologicInfo.UUIDTo, topologicInfo.Comment). WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectCommit() From 237c7ecf693f4f4d82c5f0f16e82adf69e618547 Mon Sep 17 00:00:00 2001 From: douxu Date: Fri, 16 May 2025 14:24:55 +0800 Subject: [PATCH 26/33] refactor(optimize storage struct): optimize topologic storage struct 1.optimize uuid start and end node of uuid nil node str 2.optimize topologic query sql of init topologic in memory --- constant/togologic.go | 8 +++--- database/query_topologic.go | 49 +++++++++++++++++++++---------------- 2 files changed, 32 insertions(+), 25 deletions(-) diff --git a/constant/togologic.go b/constant/togologic.go index 7ae90b0..68e3a8d 100644 --- a/constant/togologic.go +++ b/constant/togologic.go @@ -14,9 +14,9 @@ const ( ) const ( - // SpecialUUIDStr 拓扑信息中开始节点与结束节点字符串形式 - SpecialUUIDStr = "00000000-0000-0000-0000-000000000000" + // UUIDNilStr 拓扑信息中开始节点与结束节点字符串形式 + UUIDNilStr = "00000000-0000-0000-0000-000000000000" ) -// SpecialUUID 拓扑信息中开始节点与结束节点 UUID 格式 -var SpecialUUID = uuid.FromStringOrNil(SpecialUUIDStr) +// UUIDNil 拓扑信息中开始节点与结束节点 UUID 格式 +var UUIDNil = uuid.FromStringOrNil(UUIDNilStr) diff --git a/database/query_topologic.go b/database/query_topologic.go index bfae1eb..b65bea4 100644 --- a/database/query_topologic.go +++ b/database/query_topologic.go @@ -24,16 +24,14 @@ func QueryTopologic(ctx context.Context, tx *gorm.DB, logger *zap.Logger) ([]orm cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() - result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Raw(sql.RecursiveSQL, constant.SpecialUUIDStr).Scan(&topologics) + result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Raw(sql.RecursiveSQL, constant.UUIDNilStr).Scan(&topologics) if result.Error != nil { - logger.Error("query circuit diagram topologic info by start node uuid failed", zap.String("start_node_uuid", constant.SpecialUUIDStr), zap.Error(result.Error)) + logger.Error("query circuit diagram topologic info by start node uuid failed", zap.String("start_node_uuid", constant.UUIDNilStr), zap.Error(result.Error)) return nil, result.Error } return topologics, nil } -// TODO 电流互感器不单独划分间隔 -// TODO 以母线、浇筑母线、变压器为间隔原件 // QueryTopologicFromDB return the result of query topologic info from DB func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB, logger *zap.Logger, componentTypeMap map[uuid.UUID]int) error { topologicInfos, err := QueryTopologic(ctx, tx, logger) @@ -60,7 +58,7 @@ func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB, logger *zap.Logger, func InitCircuitDiagramTopologic(topologicNodes []orm.Topologic, componentTypeMap map[uuid.UUID]int) error { var rootVertex *diagram.MultiBranchTreeNode for _, node := range topologicNodes { - if node.UUIDFrom == constant.SpecialUUID { + if node.UUIDFrom == constant.UUIDNil { // rootVertex = node.UUIDTo var componentType int componentType, ok := componentTypeMap[node.UUIDFrom] @@ -77,7 +75,7 @@ func InitCircuitDiagramTopologic(topologicNodes []orm.Topologic, componentTypeMa } for _, node := range topologicNodes { - if node.UUIDFrom == constant.SpecialUUID { + if node.UUIDFrom == constant.UUIDNil { var componentType int componentType, ok := componentTypeMap[node.UUIDTo] if !ok { @@ -97,11 +95,18 @@ func InitCircuitDiagramTopologic(topologicNodes []orm.Topologic, componentTypeMa return nil } +// TODO 电流互感器不单独划分间隔 +// TODO 以母线、浇筑母线、变压器为间隔原件 func IntervalBoundaryDetermine(uuid uuid.UUID) bool { // TODO 从diagramsOverview中根据 uuid 获取 component 信息 var componentID int64 diagram.GetComponentMap(componentID) // TODO 判断 component 的类型是否为间隔 + // TODO 0x1111A1B2C3D4,高四位表示可以成为间隔的compoent类型的值为FFFF,普通 component 类型的值为 0000。低四位中前二位表示component的一级类型,例如母线 PT、母联/母分、进线等,低四位中后二位表示一级类型中包含的具体类型,例如母线 PT中包含的电压互感器、隔离开关、接地开关、避雷器、带电显示器等。 + num := uint32(0xA1B2C3D4) // 八位16进制数 + high16 := uint16(num >> 16) + fmt.Printf("原始值: 0x%X\n", num) // 输出: 0xA1B2C3D4 + fmt.Printf("高十六位: 0x%X\n", high16) // 输出: 0xA1B2 return true } @@ -111,9 +116,11 @@ func BuildMultiBranchTree(topologics []orm.Topologic, componentTypeMap map[uuid. for _, topo := range topologics { // skip special uuid - if topo.UUIDFrom != constant.SpecialUUID { - if _, exists := nodeMap[topo.UUIDFrom]; !exists { - componentType, ok := componentTypeMap[topo.UUIDFrom] + if _, exists := nodeMap[topo.UUIDFrom]; !exists { + var componentType int + if topo.UUIDTo != constant.UUIDNil { + var ok bool + componentType, ok = componentTypeMap[topo.UUIDFrom] if !ok { return nil, fmt.Errorf("can not get component type by uuid: %s", topo.UUIDFrom) } @@ -126,9 +133,13 @@ func BuildMultiBranchTree(topologics []orm.Topologic, componentTypeMap map[uuid. } if _, exists := nodeMap[topo.UUIDTo]; !exists { - componentType, ok := componentTypeMap[topo.UUIDTo] - if !ok { - return nil, fmt.Errorf("can not get component type by uuid: %s", topo.UUIDTo) + var componentType int + if topo.UUIDTo != constant.UUIDNil { + var ok bool + componentType, ok = componentTypeMap[topo.UUIDTo] + if !ok { + return nil, fmt.Errorf("can not get component type by uuid: %s", topo.UUIDTo) + } } nodeMap[topo.UUIDTo] = &diagram.MultiBranchTreeNode{ @@ -140,17 +151,13 @@ func BuildMultiBranchTree(topologics []orm.Topologic, componentTypeMap map[uuid. for _, topo := range topologics { var parent *diagram.MultiBranchTreeNode - if topo.UUIDFrom == constant.SpecialUUID { - componentType, ok := componentTypeMap[topo.UUIDTo] - if !ok { - return nil, fmt.Errorf("can not get component type by uuid: %s", topo.UUIDTo) - } - + if topo.UUIDFrom == constant.UUIDNil { + var componentType int parent = &diagram.MultiBranchTreeNode{ - ID: constant.SpecialUUID, + ID: constant.UUIDNil, NodeComponentType: componentType, } - nodeMap[constant.SpecialUUID] = parent + nodeMap[constant.UUIDNil] = parent } else { parent = nodeMap[topo.UUIDFrom] } @@ -161,7 +168,7 @@ func BuildMultiBranchTree(topologics []orm.Topologic, componentTypeMap map[uuid. } // return root vertex - root, exists := nodeMap[constant.SpecialUUID] + root, exists := nodeMap[constant.UUIDNil] if !exists { return nil, fmt.Errorf("root node not found") } From d2196701ec3e5fcb7403ef127466ed3bf367f35d Mon Sep 17 00:00:00 2001 From: douxu Date: Tue, 20 May 2025 16:08:17 +0800 Subject: [PATCH 27/33] fix(multi-branch-tree-of-topologic): add global tree variable and fix topologic info processing bug --- database/query_topologic.go | 55 +++++++++++++++++++----------------- diagram/multi_branch_tree.go | 2 ++ main.go | 3 +- 3 files changed, 33 insertions(+), 27 deletions(-) diff --git a/database/query_topologic.go b/database/query_topologic.go index b65bea4..f81659e 100644 --- a/database/query_topologic.go +++ b/database/query_topologic.go @@ -33,25 +33,19 @@ func QueryTopologic(ctx context.Context, tx *gorm.DB, logger *zap.Logger) ([]orm } // QueryTopologicFromDB return the result of query topologic info from DB -func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB, logger *zap.Logger, componentTypeMap map[uuid.UUID]int) error { +func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB, logger *zap.Logger, componentTypeMap map[uuid.UUID]int) (*diagram.MultiBranchTreeNode, error) { topologicInfos, err := QueryTopologic(ctx, tx, logger) if err != nil { logger.Error("query topologic info failed", zap.Error(err)) - return err + return nil, err } - // err = InitCircuitDiagramTopologic(topologicInfos, componentTypeMap) - // if err != nil { - // logger.Error("init topologic failed", zap.Error(err)) - // return err - // } - - _, err = BuildMultiBranchTree(topologicInfos, componentTypeMap) + tree, err := BuildMultiBranchTree(topologicInfos, componentTypeMap) if err != nil { logger.Error("init topologic failed", zap.Error(err)) - return err + return nil, err } - return nil + return tree, nil } // InitCircuitDiagramTopologic return circuit diagram topologic info from postgres @@ -95,14 +89,13 @@ func InitCircuitDiagramTopologic(topologicNodes []orm.Topologic, componentTypeMa return nil } -// TODO 电流互感器不单独划分间隔 -// TODO 以母线、浇筑母线、变压器为间隔原件 +// TODO 电流互感器不单独划分间隔,以母线、浇筑母线、变压器为间隔原件 func IntervalBoundaryDetermine(uuid uuid.UUID) bool { - // TODO 从diagramsOverview中根据 uuid 获取 component 信息 + fmt.Println(uuid) var componentID int64 diagram.GetComponentMap(componentID) // TODO 判断 component 的类型是否为间隔 - // TODO 0x1111A1B2C3D4,高四位表示可以成为间隔的compoent类型的值为FFFF,普通 component 类型的值为 0000。低四位中前二位表示component的一级类型,例如母线 PT、母联/母分、进线等,低四位中后二位表示一级类型中包含的具体类型,例如母线 PT中包含的电压互感器、隔离开关、接地开关、避雷器、带电显示器等。 + // TODO 0xA1B2C3D4,高四位表示可以成为间隔的compoent类型的值为FFFF,普通 component 类型的值为 0000。低四位中前二位表示component的一级类型,例如母线 PT、母联/母分、进线等,低四位中后二位表示一级类型中包含的具体类型,例如母线 PT中包含的电压互感器、隔离开关、接地开关、避雷器、带电显示器等。 num := uint32(0xA1B2C3D4) // 八位16进制数 high16 := uint16(num >> 16) fmt.Printf("原始值: 0x%X\n", num) // 输出: 0xA1B2C3D4 @@ -112,15 +105,14 @@ func IntervalBoundaryDetermine(uuid uuid.UUID) bool { // BuildMultiBranchTree return the multi branch tree by topologic info and component type map func BuildMultiBranchTree(topologics []orm.Topologic, componentTypeMap map[uuid.UUID]int) (*diagram.MultiBranchTreeNode, error) { - nodeMap := make(map[uuid.UUID]*diagram.MultiBranchTreeNode, len(topologics)) + nodeMap := make(map[uuid.UUID]*diagram.MultiBranchTreeNode, len(topologics)*2) for _, topo := range topologics { - // skip special uuid if _, exists := nodeMap[topo.UUIDFrom]; !exists { - var componentType int + // skip special uuid if topo.UUIDTo != constant.UUIDNil { var ok bool - componentType, ok = componentTypeMap[topo.UUIDFrom] + componentType, ok := componentTypeMap[topo.UUIDFrom] if !ok { return nil, fmt.Errorf("can not get component type by uuid: %s", topo.UUIDFrom) } @@ -128,23 +120,25 @@ func BuildMultiBranchTree(topologics []orm.Topologic, componentTypeMap map[uuid. nodeMap[topo.UUIDFrom] = &diagram.MultiBranchTreeNode{ ID: topo.UUIDFrom, NodeComponentType: componentType, + Children: make([]*diagram.MultiBranchTreeNode, 0), } } } if _, exists := nodeMap[topo.UUIDTo]; !exists { - var componentType int + // skip special uuid if topo.UUIDTo != constant.UUIDNil { var ok bool - componentType, ok = componentTypeMap[topo.UUIDTo] + componentType, ok := componentTypeMap[topo.UUIDTo] if !ok { return nil, fmt.Errorf("can not get component type by uuid: %s", topo.UUIDTo) } - } - nodeMap[topo.UUIDTo] = &diagram.MultiBranchTreeNode{ - ID: topo.UUIDTo, - NodeComponentType: componentType, + nodeMap[topo.UUIDTo] = &diagram.MultiBranchTreeNode{ + ID: topo.UUIDTo, + NodeComponentType: componentType, + Children: make([]*diagram.MultiBranchTreeNode, 0), + } } } } @@ -162,7 +156,16 @@ func BuildMultiBranchTree(topologics []orm.Topologic, componentTypeMap map[uuid. parent = nodeMap[topo.UUIDFrom] } - child := nodeMap[topo.UUIDTo] + var child *diagram.MultiBranchTreeNode + if topo.UUIDTo == constant.UUIDNil { + var componentType int + child = &diagram.MultiBranchTreeNode{ + ID: topo.UUIDTo, + NodeComponentType: componentType, + } + } else { + child = nodeMap[topo.UUIDTo] + } child.Parent = parent parent.Children = append(parent.Children, child) } diff --git a/diagram/multi_branch_tree.go b/diagram/multi_branch_tree.go index 7de6f03..04337e3 100644 --- a/diagram/multi_branch_tree.go +++ b/diagram/multi_branch_tree.go @@ -6,6 +6,8 @@ import ( "github.com/gofrs/uuid" ) +var GlobalTree *MultiBranchTreeNode + // MultiBranchTreeNode represents a topological structure using an multi branch tree type MultiBranchTreeNode struct { ID uuid.UUID // 节点唯一标识 diff --git a/main.go b/main.go index cee65ef..b2604cd 100644 --- a/main.go +++ b/main.go @@ -108,11 +108,12 @@ func main() { } // TODO 暂时屏蔽完成 swagger 启动测试 - err = database.QueryTopologicFromDB(ctx, tx, zapLogger, componentTypeMap) + tree, err := database.QueryTopologicFromDB(ctx, tx, zapLogger, componentTypeMap) if err != nil { zapLogger.Error("load topologic info from postgres failed", zap.Error(err)) panic(err) } + diagram.GlobalTree = tree return nil }) From 9aa5b0dcc6f3b45d7325df0c44c864df0ecd3ff0 Mon Sep 17 00:00:00 2001 From: douxu Date: Thu, 5 Jun 2025 15:56:40 +0800 Subject: [PATCH 28/33] refactor(logger): 1. optimize the logger log module design and add link tracking related designs 2. add logger facade functions to simplify the use of alarm functions --- config/model_config.go | 2 +- logger/facede.go | 54 ++++++++++++++++++ logger/logger.go | 109 +++++++++++++++++++++++++++++++++++++ logger/{init.go => zap.go} | 8 +-- main.go | 25 ++++----- middleware/trace.go | 23 ++++++++ util/trace.go | 45 +++++++++++++++ 7 files changed, 247 insertions(+), 19 deletions(-) create mode 100644 logger/facede.go create mode 100644 logger/logger.go rename logger/{init.go => zap.go} (89%) create mode 100644 middleware/trace.go create mode 100644 util/trace.go diff --git a/config/model_config.go b/config/model_config.go index dedc93c..e626070 100644 --- a/config/model_config.go +++ b/config/model_config.go @@ -9,6 +9,6 @@ import ( type ModelParseConfig struct { ComponentInfo orm.Component - Context context.Context + Ctx context.Context AnchorChan chan AnchorParamConfig } diff --git a/logger/facede.go b/logger/facede.go new file mode 100644 index 0000000..b9a5b54 --- /dev/null +++ b/logger/facede.go @@ -0,0 +1,54 @@ +// Package logger define log struct of modelRT project +package logger + +import ( + "context" + "sync" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var ( + f *facade + fOnce sync.Once +) + +type facade struct { + _logger *zap.Logger +} + +// Debug define facade func of debug level log +func Debug(ctx context.Context, msg string, kv ...any) { + logFacade().log(ctx, zapcore.DebugLevel, msg, kv...) +} + +// Info define facade func of info level log +func Info(ctx context.Context, msg string, kv ...any) { + logFacade().log(ctx, zapcore.InfoLevel, msg, kv...) +} + +// Warn define facade func of warn level log +func Warn(ctx context.Context, msg string, kv ...any) { + logFacade().log(ctx, zapcore.WarnLevel, msg, kv...) +} + +// Error define facade func of error level log +func Error(ctx context.Context, msg string, kv ...any) { + logFacade().log(ctx, zapcore.ErrorLevel, msg, kv...) +} + +func (f *facade) log(ctx context.Context, lvl zapcore.Level, msg string, kv ...any) { + fields := makeLogFields(ctx, kv...) + ce := f._logger.Check(lvl, msg) + ce.Write(fields...) +} + +func logFacade() *facade { + fOnce.Do(func() { + f = &facade{ + _logger: GetLoggerInstance(), + } + }) + return f +} diff --git a/logger/logger.go b/logger/logger.go new file mode 100644 index 0000000..eb97838 --- /dev/null +++ b/logger/logger.go @@ -0,0 +1,109 @@ +// Package logger define log struct of modelRT project +package logger + +import ( + "context" + "path" + "runtime" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +type logger struct { + ctx context.Context + traceID string + spanID string + pSpanID string + _logger *zap.Logger +} + +func (l *logger) Debug(msg string, kv ...any) { + l.log(zapcore.DebugLevel, msg, kv...) +} + +func (l *logger) Info(msg string, kv ...any) { + l.log(zapcore.InfoLevel, msg, kv...) +} + +func (l *logger) Warn(msg string, kv ...any) { + l.log(zapcore.WarnLevel, msg, kv...) +} + +func (l *logger) Error(msg string, kv ...any) { + l.log(zapcore.ErrorLevel, msg, kv...) +} + +func (l *logger) log(lvl zapcore.Level, msg string, kv ...any) { + fields := makeLogFields(l.ctx, kv...) + ce := l._logger.Check(lvl, msg) + ce.Write(fields...) +} + +func makeLogFields(ctx context.Context, kv ...any) []zap.Field { + // Ensure that log information appears in pairs in the form of key-value pairs + if len(kv)%2 != 0 { + kv = append(kv, "unknown") + } + + kv = append(kv, "traceID", ctx.Value("traceID"), "spanID", ctx.Value("spanID"), "pspanID", ctx.Value("pspanID")) + + funcName, file, line := getLoggerCallerInfo() + kv = append(kv, "func", funcName, "file", file, "line", line) + fields := make([]zap.Field, 0, len(kv)/2) + for i := 0; i < len(kv); i += 2 { + key := kv[i].(string) + value := kv[i+1] + switch v := value.(type) { + case string: + fields = append(fields, zap.String(key, v)) + case int: + fields = append(fields, zap.Int(key, v)) + case int64: + fields = append(fields, zap.Int64(key, v)) + case float32: + fields = append(fields, zap.Float32(key, v)) + case float64: + fields = append(fields, zap.Float64(key, v)) + case bool: + fields = append(fields, zap.Bool(key, v)) + case error: + fields = append(fields, zap.Error(v)) + default: + fields = append(fields, zap.Any(key, v)) + } + } + return fields +} + +// getLoggerCallerInfo define func of return log caller information、method name、file name、line number +func getLoggerCallerInfo() (funcName, file string, line int) { + pc, file, line, ok := runtime.Caller(4) + if !ok { + return + } + file = path.Base(file) + funcName = runtime.FuncForPC(pc).Name() + return +} + +func New(ctx context.Context) *logger { + var traceID, spanID, pSpanID string + if ctx.Value("traceID") != nil { + traceID = ctx.Value("traceID").(string) + } + if ctx.Value("spanID") != nil { + spanID = ctx.Value("spanID").(string) + } + if ctx.Value("psapnID") != nil { + pSpanID = ctx.Value("pspanID").(string) + } + + return &logger{ + ctx: ctx, + traceID: traceID, + spanID: spanID, + pSpanID: pSpanID, + _logger: GetLoggerInstance(), + } +} diff --git a/logger/init.go b/logger/zap.go similarity index 89% rename from logger/init.go rename to logger/zap.go index a537bf8..18270a6 100644 --- a/logger/init.go +++ b/logger/zap.go @@ -68,15 +68,15 @@ func initLogger(lCfg config.LoggerConfig) *zap.Logger { return logger } -// InitLoggerInstance return instance of zap logger -func InitLoggerInstance(lCfg config.LoggerConfig) *zap.Logger { +// InitLoggerInstance define func of return instance of zap logger +func InitLoggerInstance(lCfg config.LoggerConfig) { once.Do(func() { _globalLogger = initLogger(lCfg) }) - return _globalLogger + defer _globalLogger.Sync() } -// GetLoggerInstance returns the global logger instance It's safe for concurrent use. +// GetLoggerInstance define func of returns the global logger instance It's safe for concurrent use. func GetLoggerInstance() *zap.Logger { _globalLoggerMu.RLock() logger := _globalLogger diff --git a/main.go b/main.go index b2604cd..ccc53a2 100644 --- a/main.go +++ b/main.go @@ -24,7 +24,6 @@ import ( "github.com/gin-gonic/gin" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" "gorm.io/gorm" ) @@ -43,7 +42,6 @@ var ( var ( modelRTConfig config.ModelRTConfig postgresDBClient *gorm.DB - zapLogger *zap.Logger alertManager *alert.EventManager ) @@ -65,8 +63,7 @@ func main() { }() // init logger - zapLogger = logger.InitLoggerInstance(modelRTConfig.LoggerConfig) - defer zapLogger.Sync() + logger.InitLoggerInstance(modelRTConfig.LoggerConfig) // init alert manager _ = alert.InitAlertEventManager() @@ -74,7 +71,7 @@ func main() { // init model parse ants pool parsePool, err := ants.NewPoolWithFunc(modelRTConfig.ParseConcurrentQuantity, pool.ParseFunc) if err != nil { - zapLogger.Error("init concurrent parse task pool failed", zap.Error(err)) + logger.Error(ctx, "init concurrent parse task pool failed", "error", err) panic(err) } defer parsePool.Release() @@ -88,7 +85,7 @@ func main() { // init anchor param ants pool anchorRealTimePool, err := pool.AnchorPoolInit(modelRTConfig.RTDReceiveConcurrentQuantity) if err != nil { - zapLogger.Error("init concurrent anchor param task pool failed", zap.Error(err)) + logger.Error(ctx, "init concurrent anchor param task pool failed", "error", err) panic(err) } defer anchorRealTimePool.Release() @@ -101,16 +98,16 @@ func main() { postgresDBClient.Transaction(func(tx *gorm.DB) error { // load circuit diagram from postgres - componentTypeMap, err := database.QueryCircuitDiagramComponentFromDB(cancelCtx, tx, parsePool, zapLogger) + componentTypeMap, err := database.QueryCircuitDiagramComponentFromDB(cancelCtx, tx, parsePool) if err != nil { - zapLogger.Error("load circuit diagrams from postgres failed", zap.Error(err)) + logger.Error(ctx, "load circuit diagrams from postgres failed", "error", err) panic(err) } // TODO 暂时屏蔽完成 swagger 启动测试 - tree, err := database.QueryTopologicFromDB(ctx, tx, zapLogger, componentTypeMap) + tree, err := database.QueryTopologicFromDB(ctx, tx, componentTypeMap) if err != nil { - zapLogger.Error("load topologic info from postgres failed", zap.Error(err)) + logger.Error(ctx, "load topologic info from postgres failed", "error", err) panic(err) } diagram.GlobalTree = tree @@ -125,10 +122,10 @@ func main() { engine.Use(limiter.Middleware) // diagram api - engine.GET("/model/diagram_load", handler.CircuitDiagramLoadHandler) - engine.POST("/model/diagram_create", handler.CircuitDiagramCreateHandler) - engine.POST("/model/diagram_update", handler.CircuitDiagramUpdateHandler) - engine.POST("/model/diagram_delete", handler.CircuitDiagramDeleteHandler) + engine.GET("/diagram/load", handler.CircuitDiagramLoadHandler) + engine.POST("/diagram/create", handler.CircuitDiagramCreateHandler) + engine.POST("/diagram/update", handler.CircuitDiagramUpdateHandler) + engine.POST("/diagram/delete", handler.CircuitDiagramDeleteHandler) // real time data api engine.GET("/ws/rtdatas", handler.RealTimeDataReceivehandler) diff --git a/middleware/trace.go b/middleware/trace.go new file mode 100644 index 0000000..12b957e --- /dev/null +++ b/middleware/trace.go @@ -0,0 +1,23 @@ +package middleware + +import ( + "modelRT/util" + + "github.com/gin-gonic/gin" +) + +// StartTrace define func of set trace info from request header +func StartTrace() gin.HandlerFunc { + return func(c *gin.Context) { + traceID := c.Request.Header.Get("traceid") + pSpanID := c.Request.Header.Get("spanid") + spanID := util.GenerateSpanID(c.Request.RemoteAddr) + if traceID == "" { // 如果traceId 为空,证明是链路的发端,把它设置成此次的spanId,发端的spanId是root spanId + traceID = spanID // trace 标识整个请求的链路, span则标识链路中的不同服务 + } + c.Set("traceid", traceID) + c.Set("spanid", spanID) + c.Set("pspanid", pSpanID) + c.Next() + } +} diff --git a/util/trace.go b/util/trace.go new file mode 100644 index 0000000..eefe769 --- /dev/null +++ b/util/trace.go @@ -0,0 +1,45 @@ +package util + +import ( + "context" + "encoding/binary" + "math/rand" + "net" + "strconv" + "strings" + "time" +) + +// GenerateSpanID define func of generate spanID +func GenerateSpanID(addr string) string { + strAddr := strings.Split(addr, ":") + ip := strAddr[0] + ipLong, _ := IP2Long(ip) + times := uint64(time.Now().UnixNano()) + rand.NewSource(time.Now().UnixNano()) + spanID := ((times ^ uint64(ipLong)) << 32) | uint64(rand.Int31()) + return strconv.FormatUint(spanID, 16) +} + +// IP2Long define func of convert ip to unit32 type +func IP2Long(ip string) (uint32, error) { + ipAddr, err := net.ResolveIPAddr("ip", ip) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(ipAddr.IP.To4()), nil +} + +// GetTraceInfoFromCtx define func of get trace info from context +func GetTraceInfoFromCtx(ctx context.Context) (traceID, spanID, pSpanID string) { + if ctx.Value("traceid") != nil { + traceID = ctx.Value("traceid").(string) + } + if ctx.Value("spanid") != nil { + spanID = ctx.Value("spanid").(string) + } + if ctx.Value("pspanid") != nil { + pSpanID = ctx.Value("pspanid").(string) + } + return +} From f6cee44f847c8e949f79b17f2f6a68e0bfa2ba92 Mon Sep 17 00:00:00 2001 From: douxu Date: Fri, 6 Jun 2025 16:41:52 +0800 Subject: [PATCH 29/33] refactor(handler): use logger package log func replace zap log func --- database/query_component.go | 8 ++-- database/query_page.go | 9 ++-- database/query_topologic.go | 14 +++--- diagram/redis_hash.go | 20 ++++---- diagram/redis_set.go | 18 +++---- distributedlock/redis_lock.go | 36 +++++++------- distributedlock/redis_rwlock.go | 60 ++++++++++++------------ handler/alert_event_query.go | 5 +- handler/anchor_point_replace.go | 16 +++---- handler/circuit_diagram_create.go | 18 ++++--- handler/circuit_diagram_delete.go | 20 ++++---- handler/circuit_diagram_load.go | 14 +++--- handler/circuit_diagram_update.go | 20 ++++---- handler/real_time_data_query.go | 4 +- handler/real_time_data_receive.go | 21 ++++----- pool/concurrency_anchor_parse.go | 8 ++-- pool/concurrency_model_parse.go | 14 ++---- real-time-data/kafka.go | 14 ++---- real-time-data/real_time_data_receive.go | 8 +--- test/distributedlock/rwlock_test.go | 25 +--------- 20 files changed, 145 insertions(+), 207 deletions(-) diff --git a/database/query_component.go b/database/query_component.go index 21151c8..4c8245f 100644 --- a/database/query_component.go +++ b/database/query_component.go @@ -6,17 +6,17 @@ import ( "time" "modelRT/config" + "modelRT/logger" "modelRT/orm" "github.com/gofrs/uuid" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" "gorm.io/gorm" "gorm.io/gorm/clause" ) // QueryCircuitDiagramComponentFromDB return the result of query circuit diagram component info order by page id from postgresDB -func QueryCircuitDiagramComponentFromDB(ctx context.Context, tx *gorm.DB, pool *ants.PoolWithFunc, logger *zap.Logger) (map[uuid.UUID]int, error) { +func QueryCircuitDiagramComponentFromDB(ctx context.Context, tx *gorm.DB, pool *ants.PoolWithFunc) (map[uuid.UUID]int, error) { var components []orm.Component // ctx超时判断 cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second) @@ -24,7 +24,7 @@ func QueryCircuitDiagramComponentFromDB(ctx context.Context, tx *gorm.DB, pool * result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&components) if result.Error != nil { - logger.Error("query circuit diagram component info failed", zap.Error(result.Error)) + logger.Error(ctx, "query circuit diagram component info failed", "error", result.Error) return nil, result.Error } @@ -34,7 +34,7 @@ func QueryCircuitDiagramComponentFromDB(ctx context.Context, tx *gorm.DB, pool * for _, component := range components { pool.Invoke(config.ModelParseConfig{ ComponentInfo: component, - Context: ctx, + Ctx: ctx, }) componentTypeMap[component.GlobalUUID] = component.ComponentType diff --git a/database/query_page.go b/database/query_page.go index a974020..6b069b5 100644 --- a/database/query_page.go +++ b/database/query_page.go @@ -5,26 +5,25 @@ import ( "context" "time" + "modelRT/logger" "modelRT/orm" - "go.uber.org/zap" "gorm.io/gorm" "gorm.io/gorm/clause" ) // QueryAllPages return the all page info of the circuit diagram query by grid_id and zone_id and station_id -func QueryAllPages(ctx context.Context, tx *gorm.DB, logger *zap.Logger, gridID, zoneID, stationID int64) ([]orm.Page, error) { +func QueryAllPages(ctx context.Context, tx *gorm.DB, gridID, zoneID, stationID int64) ([]orm.Page, error) { var pages []orm.Page - // ctx超时判断 + // ctx timeout judgment cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() result := tx.Model(&orm.Page{}).WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Select(`"page".id, "page".Name, "page".status,"page".context`).Joins(`inner join "station" on "station".id = "page".station_id`).Joins(`inner join "zone" on "zone".id = "station".zone_id`).Joins(`inner join "grid" on "grid".id = "zone".grid_id`).Where(`"grid".id = ? and "zone".id = ? and "station".id = ?`, gridID, zoneID, stationID).Scan(&pages) if result.Error != nil { - logger.Error("query circuit diagram pages by gridID and zoneID and stationID failed", zap.Int64("grid_id", gridID), zap.Int64("zone_id", zoneID), zap.Int64("station_id", stationID), zap.Error(result.Error)) + logger.Error(ctx, "query circuit diagram pages by gridID and zoneID and stationID failed", "grid_id", gridID, "zone_id", zoneID, "station_id", stationID, "error", result.Error) return nil, result.Error } - return pages, nil } diff --git a/database/query_topologic.go b/database/query_topologic.go index f81659e..7faf230 100644 --- a/database/query_topologic.go +++ b/database/query_topologic.go @@ -8,17 +8,17 @@ import ( "modelRT/constant" "modelRT/diagram" + "modelRT/logger" "modelRT/orm" "modelRT/sql" "github.com/gofrs/uuid" - "go.uber.org/zap" "gorm.io/gorm" "gorm.io/gorm/clause" ) // QueryTopologic return the topologic info of the circuit diagram -func QueryTopologic(ctx context.Context, tx *gorm.DB, logger *zap.Logger) ([]orm.Topologic, error) { +func QueryTopologic(ctx context.Context, tx *gorm.DB) ([]orm.Topologic, error) { var topologics []orm.Topologic // ctx超时判断 cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second) @@ -26,23 +26,23 @@ func QueryTopologic(ctx context.Context, tx *gorm.DB, logger *zap.Logger) ([]orm result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Raw(sql.RecursiveSQL, constant.UUIDNilStr).Scan(&topologics) if result.Error != nil { - logger.Error("query circuit diagram topologic info by start node uuid failed", zap.String("start_node_uuid", constant.UUIDNilStr), zap.Error(result.Error)) + logger.Error(ctx, "query circuit diagram topologic info by start node uuid failed", "start_node_uuid", constant.UUIDNilStr, "error", result.Error) return nil, result.Error } return topologics, nil } // QueryTopologicFromDB return the result of query topologic info from DB -func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB, logger *zap.Logger, componentTypeMap map[uuid.UUID]int) (*diagram.MultiBranchTreeNode, error) { - topologicInfos, err := QueryTopologic(ctx, tx, logger) +func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB, componentTypeMap map[uuid.UUID]int) (*diagram.MultiBranchTreeNode, error) { + topologicInfos, err := QueryTopologic(ctx, tx) if err != nil { - logger.Error("query topologic info failed", zap.Error(err)) + logger.Error(ctx, "query topologic info failed", "error", err) return nil, err } tree, err := BuildMultiBranchTree(topologicInfos, componentTypeMap) if err != nil { - logger.Error("init topologic failed", zap.Error(err)) + logger.Error(ctx, "init topologic failed", "error", err) return nil, err } return tree, nil diff --git a/diagram/redis_hash.go b/diagram/redis_hash.go index 4446a69..e0724f0 100644 --- a/diagram/redis_hash.go +++ b/diagram/redis_hash.go @@ -7,8 +7,6 @@ import ( "modelRT/logger" "github.com/redis/go-redis/v9" - - "go.uber.org/zap" ) // RedisHash defines the encapsulation struct of redis hash type @@ -16,7 +14,6 @@ type RedisHash struct { ctx context.Context rwLocker *locker.RedissionRWLocker storageClient *redis.Client - logger *zap.Logger } // NewRedisHash define func of new redis hash instance @@ -25,7 +22,6 @@ func NewRedisHash(ctx context.Context, hashKey string, token string, lockLeaseTi ctx: ctx, rwLocker: locker.InitRWLocker(hashKey, token, lockLeaseTime, needRefresh), storageClient: GetRedisClientInstance(), - logger: logger.GetLoggerInstance(), } } @@ -33,14 +29,14 @@ func NewRedisHash(ctx context.Context, hashKey string, token string, lockLeaseTi func (rh *RedisHash) SetRedisHashByMap(hashKey string, fields map[string]interface{}) error { err := rh.rwLocker.WLock(rh.ctx) if err != nil { - rh.logger.Error("lock wLock by hashKey failed", zap.String("hashKey", hashKey), zap.Error(err)) + logger.Error(rh.ctx, "lock wLock by hash_key failed", "hash_key", hashKey, "error", err) return err } defer rh.rwLocker.UnWLock(rh.ctx) err = rh.storageClient.HSet(rh.ctx, hashKey, fields).Err() if err != nil { - rh.logger.Error("set hash by map failed", zap.String("hashKey", hashKey), zap.Any("fields", fields), zap.Error(err)) + logger.Error(rh.ctx, "set hash by map failed", "hash_key", hashKey, "fields", fields, "error", err) return err } return nil @@ -50,14 +46,14 @@ func (rh *RedisHash) SetRedisHashByMap(hashKey string, fields map[string]interfa func (rh *RedisHash) SetRedisHashByKV(hashKey string, field string, value interface{}) error { err := rh.rwLocker.WLock(rh.ctx) if err != nil { - rh.logger.Error("lock wLock by hashKey failed", zap.String("hashKey", hashKey), zap.Error(err)) + logger.Error(rh.ctx, "lock wLock by hash_key failed", "hash_key", hashKey, "error", err) return err } defer rh.rwLocker.UnWLock(rh.ctx) err = rh.storageClient.HSet(rh.ctx, hashKey, field, value).Err() if err != nil { - rh.logger.Error("set hash by kv failed", zap.String("hashKey", hashKey), zap.String("field", field), zap.Any("value", value), zap.Error(err)) + logger.Error(rh.ctx, "set hash by kv failed", "hash_key", hashKey, "field", field, "value", value, "error", err) return err } return nil @@ -67,14 +63,14 @@ func (rh *RedisHash) SetRedisHashByKV(hashKey string, field string, value interf func (rh *RedisHash) HGet(hashKey string, field string) (string, error) { err := rh.rwLocker.RLock(rh.ctx) if err != nil { - rh.logger.Error("lock rLock by hashKey failed", zap.String("hashKey", hashKey), zap.Error(err)) + logger.Error(rh.ctx, "lock rLock by hash_key failed", "hash_key", hashKey, "error", err) return "", err } defer rh.rwLocker.UnRLock(rh.ctx) result, err := rh.storageClient.HGet(rh.ctx, hashKey, field).Result() if err != nil { - rh.logger.Error("set hash by kv failed", zap.String("hashKey", hashKey), zap.String("field", field), zap.Error(err)) + logger.Error(rh.ctx, "set hash by kv failed", "hash_key", hashKey, "field", field, "error", err) return "", err } return result, nil @@ -84,14 +80,14 @@ func (rh *RedisHash) HGet(hashKey string, field string) (string, error) { func (rh *RedisHash) HGetAll(hashKey string) (map[string]string, error) { err := rh.rwLocker.RLock(rh.ctx) if err != nil { - rh.logger.Error("lock rLock by hashKey failed", zap.String("hashKey", hashKey), zap.Error(err)) + logger.Error(rh.ctx, "lock rLock by hash_key failed", "hash_key", hashKey, "error", err) return nil, err } defer rh.rwLocker.UnRLock(rh.ctx) result, err := rh.storageClient.HGetAll(rh.ctx, hashKey).Result() if err != nil { - rh.logger.Error("get all hash field by hash key failed", zap.String("hashKey", hashKey), zap.Error(err)) + logger.Error(rh.ctx, "get all hash field by hash key failed", "hash_key", hashKey, "error", err) return nil, err } return result, nil diff --git a/diagram/redis_set.go b/diagram/redis_set.go index dcfdeaa..cf0ba5a 100644 --- a/diagram/redis_set.go +++ b/diagram/redis_set.go @@ -33,14 +33,14 @@ func NewRedisSet(ctx context.Context, hashKey string, token string, lockLeaseTim func (rs *RedisSet) SADD(setKey string, members ...interface{}) error { err := rs.rwLocker.WLock(rs.ctx) if err != nil { - rs.logger.Error("lock wLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) + logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", setKey, "error", err) return err } defer rs.rwLocker.UnWLock(rs.ctx) err = rs.storageClient.SAdd(rs.ctx, setKey, members).Err() if err != nil { - rs.logger.Error("add set by memebers failed", zap.String("setKey", setKey), zap.Any("members", members), zap.Error(err)) + logger.Error(rs.ctx, "add set by memebers failed", "set_key", setKey, "members", members, "error", err) return err } return nil @@ -50,14 +50,14 @@ func (rs *RedisSet) SADD(setKey string, members ...interface{}) error { func (rs *RedisSet) SREM(setKey string, members ...interface{}) error { err := rs.rwLocker.WLock(rs.ctx) if err != nil { - rs.logger.Error("lock wLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) + logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", setKey, "error", err) return err } defer rs.rwLocker.UnWLock(rs.ctx) count, err := rs.storageClient.SRem(rs.ctx, setKey, members).Result() if err != nil || count != int64(len(members)) { - rs.logger.Error("rem members from set failed", zap.String("setKey", setKey), zap.Any("members", members), zap.Error(err)) + logger.Error(rs.ctx, "rem members from set failed", "set_key", setKey, "members", members, "error", err) return fmt.Errorf("rem members from set failed:%w", err) } @@ -68,24 +68,24 @@ func (rs *RedisSet) SREM(setKey string, members ...interface{}) error { func (rs *RedisSet) SMembers(setKey string) ([]string, error) { err := rs.rwLocker.RLock(rs.ctx) if err != nil { - rs.logger.Error("lock rLock by setKey failed", zap.String("setKey", setKey), zap.Error(err)) + logger.Error(rs.ctx, "lock rLock by setKey failed", "set_key", setKey, "error", err) return nil, err } defer rs.rwLocker.UnRLock(rs.ctx) result, err := rs.storageClient.SMembers(rs.ctx, setKey).Result() if err != nil { - rs.logger.Error("get all hash field by hash key failed", zap.String("setKey", setKey), zap.Error(err)) + logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", setKey, "error", err) return nil, err } return result, nil } // SIsMember define func of determine whether an member is in set by key -func (rh *RedisHash) SIsMember(setKey string, member interface{}) (bool, error) { - result, err := rh.storageClient.SIsMember(rh.ctx, setKey, member).Result() +func (rs *RedisSet) SIsMember(setKey string, member interface{}) (bool, error) { + result, err := rs.storageClient.SIsMember(rs.ctx, setKey, member).Result() if err != nil { - rh.logger.Error("get all hash field by hash key failed", zap.String("setKey", setKey), zap.Error(err)) + logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", setKey, "error", err) return false, err } return result, nil diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index aed73dc..53f9dab 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -43,7 +43,6 @@ type redissionLocker struct { subExitChan chan struct{} client *redis.Client refreshOnce *sync.Once - Logger *zap.Logger } func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) error { @@ -52,7 +51,7 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) e } result := rl.tryLock(ctx).(*constant.RedisResult) if result.Code == constant.UnknownInternalError { - rl.Logger.Error(result.OutputResultMessage()) + logger.Error(ctx, result.OutputResultMessage()) return fmt.Errorf("get lock failed:%w", result) } @@ -68,7 +67,7 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) e defer close(subMsg) sub := rl.client.Subscribe(ctx, rl.waitChanKey) defer sub.Close() - go rl.subscribeLock(sub, subMsg) + go rl.subscribeLock(ctx, sub, subMsg) if len(timeout) > 0 && timeout[0] > 0 { acquireTimer := time.NewTimer(timeout[0]) @@ -77,23 +76,23 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) e case _, ok := <-subMsg: if !ok { err := errors.New("failed to read the lock waiting for for the channel message") - rl.Logger.Error("failed to read the lock waiting for for the channel message") + logger.Error(ctx, "failed to read the lock waiting for for the channel message") return err } resultErr := rl.tryLock(ctx).(*constant.RedisResult) if (resultErr.Code == constant.LockFailure) || (resultErr.Code == constant.UnknownInternalError) { - rl.Logger.Info(resultErr.OutputResultMessage()) + logger.Info(ctx, resultErr.OutputResultMessage()) continue } if resultErr.Code == constant.LockSuccess { - rl.Logger.Info(resultErr.OutputResultMessage()) + logger.Info(ctx, resultErr.OutputResultMessage()) return nil } case <-acquireTimer.C: err := errors.New("the waiting time for obtaining the lock operation has timed out") - rl.Logger.Info("the waiting time for obtaining the lock operation has timed out") + logger.Info(ctx, "the waiting time for obtaining the lock operation has timed out") return err } } @@ -101,11 +100,11 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) e return fmt.Errorf("lock the redis lock failed:%w", result) } -func (rl *redissionLocker) subscribeLock(sub *redis.PubSub, subMsgChan chan struct{}) { +func (rl *redissionLocker) subscribeLock(ctx context.Context, sub *redis.PubSub, subMsgChan chan struct{}) { if sub == nil || subMsgChan == nil { return } - rl.Logger.Info("lock: enter sub routine", zap.String("token", rl.Token)) + logger.Info(ctx, "lock: enter sub routine", zap.String("token", rl.Token)) for { select { @@ -126,7 +125,7 @@ ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ func (rl *redissionLocker) refreshLockTimeout(ctx context.Context) { - rl.Logger.Info("lock refresh by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key)) + logger.Info(ctx, "lock refresh by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key)) lockTime := time.Duration(rl.lockLeaseTime/3) * time.Millisecond timer := time.NewTimer(lockTime) @@ -139,17 +138,17 @@ func (rl *redissionLocker) refreshLockTimeout(ctx context.Context) { res := rl.client.Eval(ctx, luascript.RefreshLockScript, []string{rl.Key}, rl.lockLeaseTime, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { - rl.Logger.Info("lock refresh failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) + logger.Info(ctx, "lock refresh failed", "token", rl.Token, "key", rl.Key, "error", err) return } if constant.RedisCode(val) == constant.RefreshLockFailure { - rl.Logger.Error("lock refreash failed,can not find the lock by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key)) + logger.Error(ctx, "lock refreash failed,can not find the lock by key and token", "token", rl.Token, "key", rl.Key) break } if constant.RedisCode(val) == constant.RefreshLockSuccess { - rl.Logger.Info("lock refresh success by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key)) + logger.Info(ctx, "lock refresh success by key and token", "token", rl.Token, "key", rl.Key) } timer.Reset(lockTime) case <-rl.refreshExitChan: @@ -165,11 +164,11 @@ func (rl *redissionLocker) cancelRefreshLockTime() { } } -func (rl *redissionLocker) closeSub(sub *redis.PubSub, noticeChan chan struct{}) { +func (rl *redissionLocker) closeSub(ctx context.Context, sub *redis.PubSub, noticeChan chan struct{}) { if sub != nil { err := sub.Close() if err != nil { - rl.Logger.Error("close sub failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) + logger.Error(ctx, "close sub failed", "token", rl.Token, "key", rl.Key, "error", err) } } @@ -203,7 +202,7 @@ func (rl *redissionLocker) UnLock(ctx context.Context) error { res := rl.client.Eval(ctx, luascript.UnLockScript, []string{rl.Key, rl.waitChanKey}, unlockMessage, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { - rl.Logger.Info("unlock lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) + logger.Info(ctx, "unlock lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) return fmt.Errorf("unlock lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnLockType, err.Error())) } @@ -212,12 +211,12 @@ func (rl *redissionLocker) UnLock(ctx context.Context) error { rl.cancelRefreshLockTime() } - rl.Logger.Info("unlock lock success", zap.String("token", rl.Token), zap.String("key", rl.Key)) + logger.Info(ctx, "unlock lock success", zap.String("token", rl.Token), zap.String("key", rl.Key)) return nil } if constant.RedisCode(val) == constant.UnLocakFailureWithLockOccupancy { - rl.Logger.Info("unlock lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key)) + logger.Info(ctx, "unlock lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key)) return fmt.Errorf("unlock lock failed:%w", constant.NewRedisResult(constant.UnLocakFailureWithLockOccupancy, constant.UnLockType, "")) } return nil @@ -252,7 +251,6 @@ func GetLocker(client *redis.Client, ops *RedissionLockConfig) *redissionLocker needRefresh: ops.NeedRefresh, client: client, refreshExitChan: make(chan struct{}), - Logger: logger.GetLoggerInstance(), } return r } diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index 52e44af..bd06afc 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -14,7 +14,6 @@ import ( uuid "github.com/gofrs/uuid" "github.com/redis/go-redis/v9" - "go.uber.org/zap" ) type RedissionRWLocker struct { @@ -27,7 +26,7 @@ type RedissionRWLocker struct { func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration) error { result := rl.tryRLock(ctx).(*constant.RedisResult) if result.Code == constant.UnknownInternalError { - rl.Logger.Error(result.OutputResultMessage()) + logger.Error(ctx, result.OutputResultMessage()) return fmt.Errorf("get read lock failed:%w", result) } @@ -42,7 +41,7 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration go rl.refreshLockTimeout(ctx) }) } - rl.Logger.Info("success get the read lock by key and token", zap.String("key", rl.Key), zap.String("token", rl.Token)) + logger.Info(ctx, "success get the read lock by key and token", "key", rl.Key, "token", rl.Token) return nil } @@ -53,7 +52,7 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration subMsgChan := make(chan struct{}, 1) sub := rl.client.Subscribe(ctx, rl.readWaitChanKey) - go rl.subscribeLock(sub, subMsgChan) + go rl.subscribeLock(ctx, sub, subMsgChan) acquireTimer := time.NewTimer(timeout[0]) for { @@ -61,19 +60,19 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration case _, ok := <-subMsgChan: if !ok { err := errors.New("failed to read the read lock waiting for for the channel message") - rl.Logger.Error("failed to read the read lock waiting for for the channel message") + logger.Error(ctx, "failed to read the read lock waiting for for the channel message") return err } result := rl.tryRLock(ctx).(*constant.RedisResult) if (result.Code == constant.RLockFailureWithWLockOccupancy) || (result.Code == constant.UnknownInternalError) { - rl.Logger.Info(result.OutputResultMessage()) + logger.Info(ctx, result.OutputResultMessage()) continue } if result.Code == constant.LockSuccess { - rl.Logger.Info(result.OutputResultMessage()) - rl.closeSub(sub, rl.subExitChan) + logger.Info(ctx, result.OutputResultMessage()) + rl.closeSub(ctx, sub, rl.subExitChan) if rl.needRefresh { rl.refreshOnce.Do(func() { @@ -88,8 +87,8 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration return nil } case <-acquireTimer.C: - rl.Logger.Info("the waiting time for obtaining the read lock operation has timed out") - rl.closeSub(sub, rl.subExitChan) + logger.Info(ctx, "the waiting time for obtaining the read lock operation has timed out") + rl.closeSub(ctx, sub, rl.subExitChan) // after acquire lock timeout,notice the sub channel to close return constant.AcquireTimeoutErr } @@ -110,7 +109,7 @@ func (rl *RedissionRWLocker) tryRLock(ctx context.Context) error { } func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) { - rl.Logger.Info("lock refresh by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key)) + logger.Info(ctx, "lock refresh by key and token", "token", rl.Token, "key", rl.Key) lockTime := time.Duration(rl.lockLeaseTime/3) * time.Millisecond timer := time.NewTimer(lockTime) @@ -123,17 +122,17 @@ func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) { res := rl.client.Eval(ctx, luascript.RefreshRWLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix}, rl.lockLeaseTime, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { - rl.Logger.Info("lock refresh failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) + logger.Info(ctx, "lock refresh failed", "token", rl.Token, "key", rl.Key, "error", err) return } if constant.RedisCode(val) == constant.RefreshLockFailure { - rl.Logger.Error("lock refreash failed,can not find the read lock by key and token", zap.String("rwTokenPrefix", rl.RWTokenTimeoutPrefix), zap.String("token", rl.Token), zap.String("key", rl.Key)) + logger.Error(ctx, "lock refreash failed,can not find the read lock by key and token", "rwTokenPrefix", rl.RWTokenTimeoutPrefix, "token", rl.Token, "key", rl.Key) return } if constant.RedisCode(val) == constant.RefreshLockSuccess { - rl.Logger.Info("lock refresh success by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key)) + logger.Info(ctx, "lock refresh success by key and token", "token", rl.Token, "key", rl.Key) } timer.Reset(lockTime) case <-rl.refreshExitChan: @@ -143,11 +142,11 @@ func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) { } func (rl *RedissionRWLocker) UnRLock(ctx context.Context) error { - rl.Logger.Info("unlock RLock by key and token", zap.String("key", rl.Key), zap.String("token", rl.Token)) + logger.Info(ctx, "unlock RLock by key and token", "key", rl.Key, "token", rl.Token) res := rl.client.Eval(ctx, luascript.UnRLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix, rl.writeWaitChanKey}, unlockMessage, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { - rl.Logger.Info("unlock read lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) + logger.Info(ctx, "unlock read lock failed", "token", rl.Token, "key", rl.Key, "error", err) return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnRLockType, err.Error())) } @@ -156,12 +155,12 @@ func (rl *RedissionRWLocker) UnRLock(ctx context.Context) error { rl.cancelRefreshLockTime() } - rl.Logger.Info("unlock read lock success", zap.String("token", rl.Token), zap.String("key", rl.Key)) + logger.Info(ctx, "unlock read lock success", "token", rl.Token, "key", rl.Key) return nil } if constant.RedisCode(val) == constant.UnRLockFailureWithWLockOccupancy { - rl.Logger.Info("unlock read lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key)) + logger.Info(ctx, "unlock read lock failed", "token", rl.Token, "key", rl.Key) return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnRLockFailureWithWLockOccupancy, constant.UnRLockType, "")) } return nil @@ -170,7 +169,7 @@ func (rl *RedissionRWLocker) UnRLock(ctx context.Context) error { func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration) error { result := rl.tryWLock(ctx).(*constant.RedisResult) if result.Code == constant.UnknownInternalError { - rl.Logger.Error(result.OutputResultMessage()) + logger.Error(ctx, result.OutputResultMessage()) return fmt.Errorf("get write lock failed:%w", result) } @@ -185,7 +184,7 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration go rl.refreshLockTimeout(ctx) }) } - rl.Logger.Info("success get the write lock by key and token", zap.String("key", rl.Key), zap.String("token", rl.Token)) + logger.Info(ctx, "success get the write lock by key and token", "key", rl.Key, "token", rl.Token) return nil } @@ -196,7 +195,7 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration subMsgChan := make(chan struct{}, 1) sub := rl.client.Subscribe(ctx, rl.writeWaitChanKey) - go rl.subscribeLock(sub, subMsgChan) + go rl.subscribeLock(ctx, sub, subMsgChan) acquireTimer := time.NewTimer(timeout[0]) for { @@ -204,19 +203,19 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration case _, ok := <-subMsgChan: if !ok { err := errors.New("failed to read the write lock waiting for for the channel message") - rl.Logger.Error("failed to read the read lock waiting for for the channel message") + logger.Error(ctx, "failed to read the read lock waiting for for the channel message") return err } result := rl.tryWLock(ctx).(*constant.RedisResult) if (result.Code == constant.UnknownInternalError) || (result.Code == constant.WLockFailureWithRLockOccupancy) || (result.Code == constant.WLockFailureWithWLockOccupancy) || (result.Code == constant.WLockFailureWithNotFirstPriority) { - rl.Logger.Info(result.OutputResultMessage()) + logger.Info(ctx, result.OutputResultMessage()) continue } if result.Code == constant.LockSuccess { - rl.Logger.Info(result.OutputResultMessage()) - rl.closeSub(sub, rl.subExitChan) + logger.Info(ctx, result.OutputResultMessage()) + rl.closeSub(ctx, sub, rl.subExitChan) if rl.needRefresh { rl.refreshOnce.Do(func() { @@ -231,8 +230,8 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration return nil } case <-acquireTimer.C: - rl.Logger.Info("the waiting time for obtaining the write lock operation has timed out") - rl.closeSub(sub, rl.subExitChan) + logger.Info(ctx, "the waiting time for obtaining the write lock operation has timed out") + rl.closeSub(ctx, sub, rl.subExitChan) // after acquire lock timeout,notice the sub channel to close return constant.AcquireTimeoutErr } @@ -256,7 +255,7 @@ func (rl *RedissionRWLocker) UnWLock(ctx context.Context) error { res := rl.client.Eval(ctx, luascript.UnWLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix, rl.writeWaitChanKey, rl.readWaitChanKey}, unlockMessage, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { - rl.Logger.Error("unlock write lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) + logger.Error(ctx, "unlock write lock failed", "token", rl.Token, "key", rl.Key, "error", err) return fmt.Errorf("unlock write lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnWLockType, err.Error())) } @@ -264,12 +263,12 @@ func (rl *RedissionRWLocker) UnWLock(ctx context.Context) error { if rl.needRefresh && (constant.RedisCode(val) == constant.UnLockSuccess) { rl.cancelRefreshLockTime() } - rl.Logger.Info("unlock write lock success", zap.String("token", rl.Token), zap.String("key", rl.Key)) + logger.Info(ctx, "unlock write lock success", "token", rl.Token, "key", rl.Key) return nil } if (constant.RedisCode(val) == constant.UnWLockFailureWithRLockOccupancy) || (constant.RedisCode(val) == constant.UnWLockFailureWithWLockOccupancy) { - rl.Logger.Info("unlock write lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key)) + logger.Info(ctx, "unlock write lock failed", "token", rl.Token, "key", rl.Key) return fmt.Errorf("unlock write lock failed:%w", constant.NewRedisResult(constant.RedisCode(val), constant.UnWLockType, "")) } return nil @@ -308,7 +307,6 @@ func GetRWLocker(client *redis.Client, conf *RedissionLockConfig) *RedissionRWLo lockLeaseTime: conf.LockLeaseTime, client: client, refreshOnce: &sync.Once{}, - Logger: logger.GetLoggerInstance(), } rwLocker := &RedissionRWLocker{ diff --git a/handler/alert_event_query.go b/handler/alert_event_query.go index a652758..bacde4f 100644 --- a/handler/alert_event_query.go +++ b/handler/alert_event_query.go @@ -11,20 +11,17 @@ import ( "modelRT/network" "github.com/gin-gonic/gin" - "go.uber.org/zap" ) // QueryAlertEventHandler define query alert event process API func QueryAlertEventHandler(c *gin.Context) { var targetLevel constant.AlertLevel - logger := logger.GetLoggerInstance() alertManger := alert.GetAlertMangerInstance() - levelStr := c.Query("level") level, err := strconv.Atoi(levelStr) if err != nil { - logger.Error("convert alert level string to int failed", zap.Error(err)) + logger.Error(c, "convert alert level string to int failed", "error", err) resp := network.FailureResponse{ Code: -1, diff --git a/handler/anchor_point_replace.go b/handler/anchor_point_replace.go index ba110e2..90401ee 100644 --- a/handler/anchor_point_replace.go +++ b/handler/anchor_point_replace.go @@ -16,21 +16,19 @@ import ( "modelRT/orm" "github.com/gin-gonic/gin" - "go.uber.org/zap" ) // ComponentAnchorReplaceHandler define component anchor point replace process API func ComponentAnchorReplaceHandler(c *gin.Context) { var uuid, anchorName string - logger := logger.GetLoggerInstance() - pgClient := database.GetPostgresDBClient() + pgClient := database.GetPostgresDBClient() cancelCtx, cancel := context.WithTimeout(c, 5*time.Second) defer cancel() var request network.ComponetAnchorReplaceRequest if err := c.ShouldBindJSON(&request); err != nil { - logger.Error("unmarshal component anchor point replace info failed", zap.Error(err)) + logger.Error(c, "unmarshal component anchor point replace info failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -45,7 +43,7 @@ func ComponentAnchorReplaceHandler(c *gin.Context) { var componentInfo orm.Component result := pgClient.WithContext(cancelCtx).Model(&orm.Component{}).Where("global_uuid = ?", uuid).Find(&componentInfo) if result.Error != nil { - logger.Error("query component detail info failed", zap.Error(result.Error)) + logger.Error(c, "query component detail info failed", "error", result.Error) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -57,7 +55,7 @@ func ComponentAnchorReplaceHandler(c *gin.Context) { if result.RowsAffected == 0 { err := fmt.Errorf("query component detail info by uuid failed:%w", constant.ErrQueryRowZero) - logger.Error("query component detail info from table is empty", zap.String("table_name", "component")) + logger.Error(c, "query component detail info from table is empty", "table_name", "component") resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -73,7 +71,7 @@ func ComponentAnchorReplaceHandler(c *gin.Context) { tableName := model.SelectModelNameByType(componentInfo.ComponentType) result = pgClient.WithContext(cancelCtx).Table(tableName).Where("global_uuid = ?", uuid).Find(&unmarshalMap) if result.Error != nil { - logger.Error("query model detail info failed", zap.Error(result.Error)) + logger.Error(c, "query model detail info failed", "error", result.Error) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -85,7 +83,7 @@ func ComponentAnchorReplaceHandler(c *gin.Context) { if unmarshalMap == nil { err := fmt.Errorf("query model detail info by uuid failed:%w", constant.ErrQueryRowZero) - logger.Error("query model detail info from table is empty", zap.String("table_name", tableName)) + logger.Error(c, "query model detail info from table is empty", "table_name", tableName) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -97,7 +95,7 @@ func ComponentAnchorReplaceHandler(c *gin.Context) { componentType := unmarshalMap["component_type"].(int) if componentType != constant.DemoType { - logger.Error("can not process real time data of component type not equal DemoType", zap.Int64("component_id", componentInfo.ID)) + logger.Error(c, "can not process real time data of component type not equal DemoType", "component_id", componentInfo.ID) } diagram.UpdateAnchorValue(componentInfo.ID, anchorName) diff --git a/handler/circuit_diagram_create.go b/handler/circuit_diagram_create.go index 633cb8c..d66968f 100644 --- a/handler/circuit_diagram_create.go +++ b/handler/circuit_diagram_create.go @@ -13,17 +13,15 @@ import ( "github.com/bitly/go-simplejson" "github.com/gin-gonic/gin" "github.com/gofrs/uuid" - "go.uber.org/zap" ) // CircuitDiagramCreateHandler define circuit diagram create process API func CircuitDiagramCreateHandler(c *gin.Context) { - logger := logger.GetLoggerInstance() pgClient := database.GetPostgresDBClient() var request network.CircuitDiagramCreateRequest if err := c.ShouldBindJSON(&request); err != nil { - logger.Error("unmarshal circuit diagram create info failed", zap.Error(err)) + logger.Error(c, "unmarshal circuit diagram create info failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -35,7 +33,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) { graph, err := diagram.GetGraphMap(request.PageID) if err != nil { - logger.Error("get topologic data from set by pageID failed", zap.Error(err)) + logger.Error(c, "get topologic data from set by pageID failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -63,7 +61,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) { err = fmt.Errorf("convert uuid from string failed:%w:%w", err1, err2) } - logger.Error("format uuid from string failed", zap.Error(err)) + logger.Error(c, "format uuid from string failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -87,7 +85,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) { if err != nil { tx.Rollback() - logger.Error("create topologic info into DB failed", zap.Any("topologic_info", topologicCreateInfos), zap.Error(err)) + logger.Error(c, "create topologic info into DB failed", "topologic_info", topologicCreateInfos, "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -109,7 +107,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) { if err != nil { tx.Rollback() - logger.Error("insert component info into DB failed", zap.Error(err)) + logger.Error(c, "insert component info into DB failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -127,7 +125,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) { if err != nil { tx.Rollback() - logger.Error("create component model into DB failed", zap.Any("component_infos", request.ComponentInfos), zap.Error(err)) + logger.Error(c, "create component model into DB failed", "component_infos", request.ComponentInfos, "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -147,7 +145,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) { if err != nil { tx.Rollback() - logger.Error("unmarshal component params info failed", zap.String("component_params", componentInfo.Params), zap.Error(err)) + logger.Error(c, "unmarshal component params info failed", "component_params", componentInfo.Params, "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -165,7 +163,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) { if err != nil { tx.Rollback() - logger.Error("format params json info to map failed", zap.Error(err)) + logger.Error(c, "format params json info to map failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, diff --git a/handler/circuit_diagram_delete.go b/handler/circuit_diagram_delete.go index 4f8797c..70031ed 100644 --- a/handler/circuit_diagram_delete.go +++ b/handler/circuit_diagram_delete.go @@ -17,18 +17,16 @@ import ( "github.com/gin-gonic/gin" "github.com/gofrs/uuid" - "go.uber.org/zap" "gorm.io/gorm/clause" ) // CircuitDiagramDeleteHandler define circuit diagram delete process API func CircuitDiagramDeleteHandler(c *gin.Context) { - logger := logger.GetLoggerInstance() pgClient := database.GetPostgresDBClient() var request network.CircuitDiagramDeleteRequest if err := c.ShouldBindJSON(&request); err != nil { - logger.Error("unmarshal circuit diagram del info failed", zap.Error(err)) + logger.Error(c, "unmarshal circuit diagram del info failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -40,7 +38,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) { graph, err := diagram.GetGraphMap(request.PageID) if err != nil { - logger.Error("get topologic data from set by pageID failed", zap.Error(err)) + logger.Error(c, "get topologic data from set by pageID failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -68,7 +66,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) { err = fmt.Errorf("convert uuid from string failed:%w:%w", err1, err2) } - logger.Error("format uuid from string failed", zap.Error(err)) + logger.Error(c, "format uuid from string failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -93,7 +91,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) { if err != nil { tx.Rollback() - logger.Error("delete topologic info into DB failed", zap.Any("topologic_info", topologicDelInfo), zap.Error(err)) + logger.Error(c, "delete topologic info into DB failed", "topologic_info", topologicDelInfo, "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -110,7 +108,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) { if err != nil { tx.Rollback() - logger.Error("delete topologic info failed", zap.Any("topologic_info", topologicDelInfo), zap.Error(err)) + logger.Error(c, "delete topologic info failed", "topologic_info", topologicDelInfo, "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -136,7 +134,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) { if err != nil { tx.Rollback() - logger.Error("format uuid from string failed", zap.Error(err)) + logger.Error(c, "format uuid from string failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -160,7 +158,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) { err = fmt.Errorf("%w:please check uuid conditions", constant.ErrDeleteRowZero) } - logger.Error("query component info into postgresDB failed", zap.String("component_global_uuid", componentInfo.UUID), zap.Error(err)) + logger.Error(c, "query component info into postgresDB failed", "component_global_uuid", componentInfo.UUID, "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -182,7 +180,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) { err = fmt.Errorf("%w:please check uuid conditions", constant.ErrDeleteRowZero) } - logger.Error("delete component info into postgresDB failed", zap.String("component_global_uuid", componentInfo.UUID), zap.Error(err)) + logger.Error(c, "delete component info into postgresDB failed", "component_global_uuid", componentInfo.UUID, "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -207,7 +205,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) { } msg := fmt.Sprintf("delete component info from table %s failed", modelStruct.ReturnTableName()) - logger.Error(msg, zap.String("component_global_uuid", componentInfo.UUID), zap.Error(err)) + logger.Error(c, msg, "component_global_uuid", componentInfo.UUID, "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, diff --git a/handler/circuit_diagram_load.go b/handler/circuit_diagram_load.go index 9174b6e..763acbd 100644 --- a/handler/circuit_diagram_load.go +++ b/handler/circuit_diagram_load.go @@ -11,7 +11,6 @@ import ( "modelRT/network" "github.com/gin-gonic/gin" - "go.uber.org/zap" ) // CircuitDiagramLoadHandler define circuit diagram load process API @@ -25,12 +24,11 @@ import ( // @Failure 400 {object} network.FailureResponse "request process failed" // @Router /model/diagram_load/{page_id} [get] func CircuitDiagramLoadHandler(c *gin.Context) { - logger := logger.GetLoggerInstance() pgClient := database.GetPostgresDBClient() pageID, err := strconv.ParseInt(c.Query("page_id"), 10, 64) if err != nil { - logger.Error("get pageID from url param failed", zap.Error(err)) + logger.Error(c, "get pageID from url param failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -45,7 +43,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) { topologicInfo, err := diagram.GetGraphMap(pageID) if err != nil { - logger.Error("get topologic data from set by pageID failed", zap.Error(err)) + logger.Error(c, "get topologic data from set by pageID failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -66,7 +64,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) { for _, componentUUID := range VerticeLink { component, err := database.QueryComponentByUUID(c, pgClient, componentUUID) if err != nil { - logger.Error("get component id info from DB by uuid failed", zap.Error(err)) + logger.Error(c, "get component id info from DB by uuid failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -81,7 +79,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) { componentParams, err := diagram.GetComponentMap(component.ID) if err != nil { - logger.Error("get component data from set by uuid failed", zap.Error(err)) + logger.Error(c, "get component data from set by uuid failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -100,7 +98,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) { rootVertexUUID := topologicInfo.RootVertex.String() rootComponent, err := database.QueryComponentByUUID(c, pgClient, topologicInfo.RootVertex) if err != nil { - logger.Error("get component id info from DB by uuid failed", zap.Error(err)) + logger.Error(c, "get component id info from DB by uuid failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -115,7 +113,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) { rootComponentParam, err := diagram.GetComponentMap(rootComponent.ID) if err != nil { - logger.Error("get component data from set by uuid failed", zap.Error(err)) + logger.Error(c, "get component data from set by uuid failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, diff --git a/handler/circuit_diagram_update.go b/handler/circuit_diagram_update.go index cd08bdb..2ab945f 100644 --- a/handler/circuit_diagram_update.go +++ b/handler/circuit_diagram_update.go @@ -11,17 +11,15 @@ import ( "github.com/bitly/go-simplejson" "github.com/gin-gonic/gin" - "go.uber.org/zap" ) // CircuitDiagramUpdateHandler define circuit diagram update process API func CircuitDiagramUpdateHandler(c *gin.Context) { - logger := logger.GetLoggerInstance() pgClient := database.GetPostgresDBClient() var request network.CircuitDiagramUpdateRequest if err := c.ShouldBindJSON(&request); err != nil { - logger.Error("unmarshal circuit diagram update info failed", zap.Error(err)) + logger.Error(c, "unmarshal circuit diagram update info failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -33,7 +31,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) { graph, err := diagram.GetGraphMap(request.PageID) if err != nil { - logger.Error("get topologic data from set by pageID failed", zap.Error(err)) + logger.Error(c, "get topologic data from set by pageID failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -50,7 +48,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) { for _, topologicLink := range request.TopologicLinks { changeInfo, err := network.ParseUUID(topologicLink) if err != nil { - logger.Error("format uuid from string failed", zap.Error(err)) + logger.Error(c, "format uuid from string failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -73,7 +71,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) { if err != nil { tx.Rollback() - logger.Error("update topologic info into DB failed", zap.Any("topologic_info", topologicChangeInfo), zap.Error(err)) + logger.Error(c, "update topologic info into DB failed", "topologic_info", topologicChangeInfo, "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -90,7 +88,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) { if err != nil { tx.Rollback() - logger.Error("update topologic info failed", zap.Any("topologic_info", topologicChangeInfo), zap.Error(err)) + logger.Error(c, "update topologic info failed", "topologic_info", topologicChangeInfo, "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -107,7 +105,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) { for index, componentInfo := range request.ComponentInfos { componentID, err := database.UpdateComponentIntoDB(c, tx, componentInfo) if err != nil { - logger.Error("udpate component info into DB failed", zap.Error(err)) + logger.Error(c, "udpate component info into DB failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -125,7 +123,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) { err = database.UpdateModelIntoDB(c, tx, componentID, componentInfo.ComponentType, componentInfo.Params) if err != nil { - logger.Error("udpate component model info into DB failed", zap.Error(err)) + logger.Error(c, "udpate component model info into DB failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -143,7 +141,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) { for _, componentInfo := range request.ComponentInfos { paramsJSON, err := simplejson.NewJson([]byte(componentInfo.Params)) if err != nil { - logger.Error("unmarshal component info by concurrent map failed", zap.String("component_params", componentInfo.Params), zap.Error(err)) + logger.Error(c, "unmarshal component info by concurrent map failed", "component_params", componentInfo.Params, "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, @@ -159,7 +157,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) { componentMap, err := paramsJSON.Map() if err != nil { - logger.Error("format params json info to map failed", zap.Error(err)) + logger.Error(c, "format params json info to map failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, diff --git a/handler/real_time_data_query.go b/handler/real_time_data_query.go index a8fc12f..8d72760 100644 --- a/handler/real_time_data_query.go +++ b/handler/real_time_data_query.go @@ -11,20 +11,18 @@ import ( "modelRT/network" "github.com/gin-gonic/gin" - "go.uber.org/zap" ) // QueryRealTimeDataHandler define query real time data process API func QueryRealTimeDataHandler(c *gin.Context) { var targetLevel constant.AlertLevel - logger := logger.GetLoggerInstance() alertManger := alert.GetAlertMangerInstance() levelStr := c.Query("level") level, err := strconv.Atoi(levelStr) if err != nil { - logger.Error("convert alert level string to int failed", zap.Error(err)) + logger.Error(c, "convert alert level string to int failed", "error", err) resp := network.FailureResponse{ Code: http.StatusBadRequest, diff --git a/handler/real_time_data_receive.go b/handler/real_time_data_receive.go index d84f534..3ba9318 100644 --- a/handler/real_time_data_receive.go +++ b/handler/real_time_data_receive.go @@ -7,7 +7,6 @@ import ( "github.com/gin-gonic/gin" "github.com/gorilla/websocket" jsoniter "github.com/json-iterator/go" - "go.uber.org/zap" realtimedata "modelRT/real-time-data" ) @@ -19,11 +18,9 @@ var upgrader = websocket.Upgrader{ // RealTimeDataReceivehandler define real time data receive and process API func RealTimeDataReceivehandler(c *gin.Context) { - logger := logger.GetLoggerInstance() - conn, err := upgrader.Upgrade(c.Writer, c.Request, nil) if err != nil { - logger.Error("upgrade http protocol to websocket protocal failed", zap.Error(err)) + logger.Error(c, "upgrade http protocol to websocket protocal failed", "error", err) return } defer conn.Close() @@ -31,17 +28,17 @@ func RealTimeDataReceivehandler(c *gin.Context) { for { messageType, p, err := conn.ReadMessage() if err != nil { - logger.Error("read message from websocket connection failed", zap.Error(err)) + logger.Error(c, "read message from websocket connection failed", "error", err) respByte := processResponse(-1, "read message from websocket connection failed", nil) if len(respByte) == 0 { - logger.Error("process message from byte failed", zap.Error(err)) + logger.Error(c, "process message from byte failed", "error", err) continue } err = conn.WriteMessage(messageType, respByte) if err != nil { - logger.Error("write message to websocket connection failed", zap.Error(err)) + logger.Error(c, "write message to websocket connection failed", "error", err) continue } continue @@ -50,17 +47,17 @@ func RealTimeDataReceivehandler(c *gin.Context) { var request network.RealTimeDataReceiveRequest err = jsoniter.Unmarshal([]byte(p), &request) if err != nil { - logger.Error("unmarshal message from byte failed", zap.Error(err)) + logger.Error(c, "unmarshal message from byte failed", "error", err) respByte := processResponse(-1, "unmarshal message from byte failed", nil) if len(respByte) == 0 { - logger.Error("process message from byte failed", zap.Error(err)) + logger.Error(c, "process message from byte failed", "error", err) continue } err = conn.WriteMessage(messageType, respByte) if err != nil { - logger.Error("write message to websocket connection failed", zap.Error(err)) + logger.Error(c, "write message to websocket connection failed", "error", err) continue } continue @@ -74,13 +71,13 @@ func RealTimeDataReceivehandler(c *gin.Context) { } respByte := processResponse(0, "success", payload) if len(respByte) == 0 { - logger.Error("process message from byte failed", zap.Error(err)) + logger.Error(c, "process message from byte failed", "error", err) continue } err = conn.WriteMessage(messageType, respByte) if err != nil { - logger.Error("write message to websocket connection failed", zap.Error(err)) + logger.Error(c, "write message to websocket connection failed", "error", err) continue } } diff --git a/pool/concurrency_anchor_parse.go b/pool/concurrency_anchor_parse.go index 8e2cb68..26f8963 100644 --- a/pool/concurrency_anchor_parse.go +++ b/pool/concurrency_anchor_parse.go @@ -12,7 +12,6 @@ import ( "modelRT/logger" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) // AnchorRealTimePool define anchor param pool of real time data @@ -31,12 +30,11 @@ func AnchorPoolInit(concurrentQuantity int) (pool *ants.PoolWithFunc, err error) // AnchorFunc defines func that process the real time data of component anchor params var AnchorFunc = func(poolConfig interface{}) { var firstStart bool - logger := logger.GetLoggerInstance() alertManager := alert.GetAlertMangerInstance() anchorChanConfig, ok := poolConfig.(config.AnchorChanConfig) if !ok { - logger.Error("conversion component anchor chan type failed") + logger.Error(anchorChanConfig.Ctx, "conversion component anchor chan type failed") return } @@ -56,12 +54,12 @@ var AnchorFunc = func(poolConfig interface{}) { for _, value := range anchorRealTimeDatas { anchorName, err := diagram.GetAnchorValue(componentID) if err != nil { - logger.Error("can not get anchor value from map by uuid", zap.Int64("component_id", componentID), zap.Error(err)) + logger.Error(anchorChanConfig.Ctx, "can not get anchor value from map by uuid", "component_id", componentID, "error", err) continue } if anchorName != anchorParaConfig.AnchorName { - logger.Error("anchor name not equal param config anchor value", zap.String("map_anchor_name", anchorName), zap.String("param_anchor_name", anchorParaConfig.AnchorName)) + logger.Error(anchorChanConfig.Ctx, "anchor name not equal param config anchor value", "map_anchor_name", anchorName, "param_anchor_name", anchorParaConfig.AnchorName) continue } diff --git a/pool/concurrency_model_parse.go b/pool/concurrency_model_parse.go index b0a4fa6..797d197 100644 --- a/pool/concurrency_model_parse.go +++ b/pool/concurrency_model_parse.go @@ -10,21 +10,17 @@ import ( "modelRT/diagram" "modelRT/logger" "modelRT/model" - - "go.uber.org/zap" ) // ParseFunc defines func that parses the model data from postgres var ParseFunc = func(parseConfig interface{}) { - logger := logger.GetLoggerInstance() - modelParseConfig, ok := parseConfig.(config.ModelParseConfig) if !ok { - logger.Error("conversion model parse config type failed") + logger.Error(modelParseConfig.Ctx, "conversion model parse config type failed") return } - cancelCtx, cancel := context.WithTimeout(modelParseConfig.Context, 5*time.Second) + cancelCtx, cancel := context.WithTimeout(modelParseConfig.Ctx, 5*time.Second) defer cancel() pgClient := database.GetPostgresDBClient() @@ -33,10 +29,10 @@ var ParseFunc = func(parseConfig interface{}) { result := pgClient.WithContext(cancelCtx).Table(tableName).Where("component_id = ?", modelParseConfig.ComponentInfo.ID).Find(&unmarshalMap) if result.Error != nil { - logger.Error("query component detail info failed", zap.Error(result.Error)) + logger.Error(modelParseConfig.Ctx, "query component detail info failed", "error", result.Error) return } else if result.RowsAffected == 0 { - logger.Error("query component detail info from table is empty", zap.String("table_name", tableName)) + logger.Error(modelParseConfig.Ctx, "query component detail info from table is empty", "table_name", tableName) return } @@ -48,7 +44,7 @@ var ParseFunc = func(parseConfig interface{}) { } diagram.StoreAnchorValue(modelParseConfig.ComponentInfo.ID, anchorName) - GetComponentChan(modelParseConfig.Context, modelParseConfig.ComponentInfo.ID) + GetComponentChan(modelParseConfig.Ctx, modelParseConfig.ComponentInfo.ID) uuid := modelParseConfig.ComponentInfo.GlobalUUID.String() unmarshalMap["id"] = modelParseConfig.ComponentInfo.ID diff --git a/real-time-data/kafka.go b/real-time-data/kafka.go index 1cfa31f..d6b857c 100644 --- a/real-time-data/kafka.go +++ b/real-time-data/kafka.go @@ -8,7 +8,6 @@ import ( "modelRT/logger" "github.com/confluentinc/confluent-kafka-go/kafka" - "go.uber.org/zap" ) // RealTimeDataComputer continuously processing real-time data from Kafka specified topics @@ -17,9 +16,6 @@ func RealTimeDataComputer(ctx context.Context, consumerConfig kafka.ConfigMap, t ctx, cancel := context.WithCancel(ctx) defer cancel() - // get a logger - logger := logger.GetLoggerInstance() - // setup a channel to listen for interrupt signals // TODO 将中断信号放到入参中 interrupt := make(chan struct{}, 1) @@ -30,13 +26,13 @@ func RealTimeDataComputer(ctx context.Context, consumerConfig kafka.ConfigMap, t // create a new consumer consumer, err := kafka.NewConsumer(&consumerConfig) if err != nil { - logger.Error("init kafka consume by config failed", zap.Any("config", consumerConfig), zap.Error(err)) + logger.Error(ctx, "init kafka consume by config failed", "config", consumerConfig, "error", err) } // subscribe to the topic err = consumer.SubscribeTopics(topics, nil) if err != nil { - logger.Error("subscribe to the topic failed", zap.Strings("topic", topics), zap.Error(err)) + logger.Error(ctx, "subscribe to the topic failed", "topic", topics, "error", err) } // start a goroutine to handle shutdown @@ -51,17 +47,17 @@ func RealTimeDataComputer(ctx context.Context, consumerConfig kafka.ConfigMap, t msg, err := consumer.ReadMessage(timeoutDuration) if err != nil { if ctx.Err() == context.Canceled { - logger.Info("context canceled, stopping read loop") + logger.Info(ctx, "context canceled, stopping read loop") break } - logger.Error("consumer read message failed", zap.Error(err)) + logger.Error(ctx, "consumer read message failed", "error", err) continue } // TODO 使用 ants.pool处理 kafka 的订阅数据 _, err = consumer.CommitMessage(msg) if err != nil { - logger.Error("manual submission information failed", zap.Any("message", msg), zap.Error(err)) + logger.Error(ctx, "manual submission information failed", "message", msg, "error", err) } } } diff --git a/real-time-data/real_time_data_receive.go b/real-time-data/real_time_data_receive.go index 2d70b5f..74e75bd 100644 --- a/real-time-data/real_time_data_receive.go +++ b/real-time-data/real_time_data_receive.go @@ -10,8 +10,6 @@ import ( "modelRT/logger" "modelRT/network" "modelRT/pool" - - "go.uber.org/zap" ) // RealTimeDataChan define channel of real time data receive @@ -23,8 +21,6 @@ func init() { // ReceiveChan define func of real time data receive and process func ReceiveChan(ctx context.Context) { - logger := logger.GetLoggerInstance() - for { select { case <-ctx.Done(): @@ -34,13 +30,13 @@ func ReceiveChan(ctx context.Context) { componentID := realTimeData.PayLoad.ComponentID component, err := diagram.GetComponentMap(componentID) if err != nil { - logger.Error("query component info from diagram map by componet id failed", zap.Int64("component_id", componentID), zap.Error(err)) + logger.Error(ctx, "query component info from diagram map by componet id failed", "component_id", componentID, "error", err) continue } componentType := component["component_type"].(int) if componentType != constant.DemoType { - logger.Error("can not process real time data of component type not equal DemoType", zap.Int64("component_id", componentID)) + logger.Error(ctx, "can not process real time data of component type not equal DemoType", "component_id", componentID) continue } diff --git a/test/distributedlock/rwlock_test.go b/test/distributedlock/rwlock_test.go index f3a6990..b3b0dee 100644 --- a/test/distributedlock/rwlock_test.go +++ b/test/distributedlock/rwlock_test.go @@ -11,16 +11,11 @@ import ( "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" - "go.uber.org/zap" ) -var ( - log *zap.Logger - rdb *redis.Client -) +var rdb *redis.Client func init() { - log = zap.Must(zap.NewDevelopment()) rdb = redis.NewClient(&redis.Options{ Network: "tcp", Addr: "192.168.2.104:30001", @@ -46,7 +41,6 @@ func TestRWLockRLockAndUnRLock(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker.Logger = log duration := 10 * time.Second // 第一次加读锁 @@ -77,7 +71,6 @@ func TestRWLockReentrantRLock(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker.Logger = log duration := 10 * time.Second // 第一次加读锁 @@ -125,7 +118,6 @@ func TestRWLockRefreshRLock(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker.Logger = log duration := 10 * time.Second // 第一次加读锁 @@ -168,7 +160,6 @@ func TestRWLock2ClientRLock(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker1.Logger = log rwLocker2 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, @@ -176,7 +167,6 @@ func TestRWLock2ClientRLock(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", }) - rwLocker2.Logger = log duration := 10 * time.Second // locker1加读锁 @@ -227,7 +217,6 @@ func TestRWLock2CWith2DifTimeRLock(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker1.Logger = log rwLocker2 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 30, @@ -235,7 +224,6 @@ func TestRWLock2CWith2DifTimeRLock(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", }) - rwLocker2.Logger = log duration := 10 * time.Second // locker1加读锁 @@ -300,7 +288,6 @@ func TestRWLock2CWithTimeTransformRLock(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker1.Logger = log rwLocker2 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, @@ -308,7 +295,6 @@ func TestRWLock2CWithTimeTransformRLock(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", }) - rwLocker2.Logger = log duration := 10 * time.Second // locker1加读锁 @@ -366,7 +352,6 @@ func TestRWLockWLockAndUnWLock(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker.Logger = log duration := 10 * time.Second // 第一次加读锁 @@ -397,7 +382,6 @@ func TestRWLockReentrantWLock(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker.Logger = log duration := 10 * time.Second // 第一次加写锁 @@ -445,7 +429,6 @@ func TestRWLock2CWithRLockAndWLockFailed(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker1.Logger = log rwLocker2 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 30, @@ -453,7 +436,6 @@ func TestRWLock2CWithRLockAndWLockFailed(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", }) - rwLocker2.Logger = log duration := 10 * time.Second // locker1加读锁 @@ -485,7 +467,6 @@ func TestRWLock2CWithRLockAndWLockSucceed(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker1.Logger = log rwLocker2 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 120, @@ -493,7 +474,7 @@ func TestRWLock2CWithRLockAndWLockSucceed(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", }) - rwLocker2.Logger = log + duration := 10 * time.Second // locker1加读锁 err := rwLocker1.RLock(ctx, duration) @@ -538,7 +519,6 @@ func TestRWLock2CWithWLockAndRLock(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc556a", }) - rwLocker1.Logger = log rwLocker2 := dl.GetRWLocker(rdb, &dl.RedissionLockConfig{ LockLeaseTime: 30, @@ -546,7 +526,6 @@ func TestRWLock2CWithWLockAndRLock(t *testing.T) { Key: "component", Token: "fd348a84-e07c-4a61-8c19-f753e6bc5577", }) - rwLocker2.Logger = log duration := 10 * time.Second // locker1加写锁 From 3fb78b81957fa4c762f438ed5739c90853733971 Mon Sep 17 00:00:00 2001 From: douxu Date: Tue, 10 Jun 2025 16:29:52 +0800 Subject: [PATCH 30/33] refactor(common/error): optimize error struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add msg 、cause and occurred field into error struct for logging detail wrong info --- common/errcode/error.go | 156 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 156 insertions(+) create mode 100644 common/errcode/error.go diff --git a/common/errcode/error.go b/common/errcode/error.go new file mode 100644 index 0000000..7b5dd71 --- /dev/null +++ b/common/errcode/error.go @@ -0,0 +1,156 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "path" + "runtime" +) + +var codes = map[int]struct{}{} + +// AppError define struct of internal error +type AppError struct { + code int + msg string + cause error + occurred string // 保存由底层错误导致AppErr发生时的位置 +} + +func (e *AppError) Error() string { + if e == nil { + return "" + } + errBytes, err := json.Marshal(e.toStructuredError()) + if err != nil { + return fmt.Sprintf("Error() is error: json marshal error: %v", err) + } + return string(errBytes) +} + +func (e *AppError) String() string { + return e.Error() +} + +// Code define func return error code +func (e *AppError) Code() int { + return e.code +} + +// Msg define func return error msg +func (e *AppError) Msg() string { + return e.msg +} + +// Cause define func return base error +func (e *AppError) Cause() error { + return e.cause +} + +// WithCause define func return top level predefined errors,where the cause field contains the underlying base error +// 在逻辑执行中出现错误, 比如dao层返回的数据库查询错误 +// 可以在领域层返回预定义的错误前附加上导致错误的基础错误。 +// 如果业务模块预定义的错误码比较详细, 可以使用这个方法, 反之错误码定义的比较笼统建议使用Wrap方法包装底层错误生成项目自定义Error +// 并将其记录到日志后再使用预定义错误码返回接口响应 +func (e *AppError) WithCause(err error) *AppError { + newErr := e.Clone() + newErr.cause = err + newErr.occurred = getAppErrOccurredInfo() + return newErr +} + +// Wrap define func packaging information and errors returned by the underlying logic +// 用于逻辑中包装底层函数返回的error 和 WithCause 一样都是为了记录错误链条 +// 该方法生成的error 用于日志记录, 返回响应请使用预定义好的error +func Wrap(msg string, err error) *AppError { + if err == nil { + return nil + } + appErr := &AppError{code: -1, msg: msg, cause: err} + appErr.occurred = getAppErrOccurredInfo() + return appErr +} + +// UnWrap define func return the error wrapped in structure +func (e *AppError) UnWrap() error { + return e.cause +} + +// Is define func return result of whether any error in err's tree matches target. implemented to support errors.Is(err, target) +func (e *AppError) Is(target error) bool { + targetErr, ok := target.(*AppError) + if !ok { + return false + } + return targetErr.Code() == e.Code() +} + +// Clone define func return a new AppError with source AppError's code, msg, cause, occurred +func (e *AppError) Clone() *AppError { + return &AppError{ + code: e.code, + msg: e.msg, + cause: e.cause, + occurred: e.occurred, + } +} + +func newError(code int, msg string) *AppError { + if code > -1 { + if _, duplicated := codes[code]; duplicated { + panic(fmt.Sprintf("预定义错误码 %d 不能重复, 请检查后更换", code)) + } + codes[code] = struct{}{} + } + + return &AppError{code: code, msg: msg} +} + +// getAppErrOccurredInfo 获取项目中调用Wrap或者WithCause方法时的程序位置, 方便排查问题 +func getAppErrOccurredInfo() string { + pc, file, line, ok := runtime.Caller(2) + if !ok { + return "" + } + file = path.Base(file) + funcName := runtime.FuncForPC(pc).Name() + triggerInfo := fmt.Sprintf("func: %s, file: %s, line: %d", funcName, file, line) + return triggerInfo +} + +// AppendMsg define func append a message to the existing error message +func (e *AppError) AppendMsg(msg string) *AppError { + n := e.Clone() + n.msg = fmt.Sprintf("%s, %s", e.msg, msg) + return n +} + +// SetMsg define func set error message into specify field +func (e *AppError) SetMsg(msg string) *AppError { + n := e.Clone() + n.msg = msg + return n +} + +type formattedErr struct { + Code int `json:"code"` + Msg string `json:"msg"` + Cause interface{} `json:"cause"` + Occurred string `json:"occurred"` +} + +// toStructuredError 在JSON Encode 前把Error进行格式化 +func (e *AppError) toStructuredError() *formattedErr { + fe := new(formattedErr) + fe.Code = e.Code() + fe.Msg = e.Msg() + fe.Occurred = e.occurred + if e.cause != nil { + if appErr, ok := e.cause.(*AppError); ok { + fe.Cause = appErr.toStructuredError() + } else { + fe.Cause = e.cause.Error() + } + } + return fe +} From b7009c351ee7c7f73e1295402cc6f7efbc3202e8 Mon Sep 17 00:00:00 2001 From: douxu Date: Fri, 13 Jun 2025 15:34:49 +0800 Subject: [PATCH 31/33] refactor(errer-package): optimize package name of constant 1.optimize package name of constant --- alert/init.go | 12 ++-- common/errcode/code.go | 72 +++++++++++++++++++++++ common/errcode/dao_error.go | 21 +++++++ config/anchor_param_config.go | 4 +- constant/alert.go | 4 +- constant/busbar_section.go | 3 +- constant/electrical_components.go | 4 +- constant/error.go | 18 +----- constant/log_mode.go | 4 +- constant/time.go | 4 +- constant/togologic.go | 3 +- database/create_component.go | 4 +- database/create_model_info.go | 4 +- database/create_topologic.go | 4 +- database/delete_topologic.go | 4 +- database/query_topologic.go | 24 ++++---- database/update_component.go | 6 +- database/update_model_info.go | 4 +- database/update_topologic.go | 13 ++-- diagram/graph.go | 4 +- distributedlock/constant/lock_err.go | 2 +- distributedlock/constant/redis_result.go | 2 +- distributedlock/redis_lock.go | 32 +++++----- distributedlock/redis_rwlock.go | 66 ++++++++++----------- handler/alert_event_query.go | 6 +- handler/anchor_point_replace.go | 9 +-- handler/circuit_diagram_delete.go | 8 +-- handler/real_time_data_query.go | 6 +- handler/real_time_data_receive.go | 3 +- logger/zap.go | 4 +- model/model_select.go | 8 +-- network/circuit_diagram_update_request.go | 23 ++++---- pool/concurrency_anchor_parse.go | 4 +- real-time-data/real_time_data_receive.go | 4 +- test/distributedlock/rwlock_test.go | 4 +- 35 files changed, 240 insertions(+), 157 deletions(-) create mode 100644 common/errcode/code.go create mode 100644 common/errcode/dao_error.go diff --git a/alert/init.go b/alert/init.go index f56de9a..e6e7ba3 100644 --- a/alert/init.go +++ b/alert/init.go @@ -5,7 +5,7 @@ import ( "sort" "sync" - "modelRT/constant" + constants "modelRT/constant" ) var ( @@ -18,7 +18,7 @@ var ( type Event struct { ComponentID int64 AnchorName string - Level constant.AlertLevel + Level constants.AlertLevel Message string StartTime int64 } @@ -26,7 +26,7 @@ type Event struct { // EventManager define store and manager alert event struct type EventManager struct { mu sync.RWMutex - events map[constant.AlertLevel][]Event + events map[constants.AlertLevel][]Event } // EventSet define alert event set implement sort.Interface @@ -53,7 +53,7 @@ func (am *EventManager) AddEvent(event Event) { } // GetEventsByLevel define get alert event by alert level -func (am *EventManager) GetEventsByLevel(level constant.AlertLevel) []Event { +func (am *EventManager) GetEventsByLevel(level constants.AlertLevel) []Event { am.mu.Lock() defer am.mu.Unlock() @@ -61,7 +61,7 @@ func (am *EventManager) GetEventsByLevel(level constant.AlertLevel) []Event { } // GetRangeEventsByLevel define get range alert event by alert level -func (am *EventManager) GetRangeEventsByLevel(targetLevel constant.AlertLevel) []Event { +func (am *EventManager) GetRangeEventsByLevel(targetLevel constants.AlertLevel) []Event { var targetEvents []Event am.mu.Lock() @@ -79,7 +79,7 @@ func (am *EventManager) GetRangeEventsByLevel(targetLevel constant.AlertLevel) [ // InitAlertEventManager define new alert event manager func InitAlertEventManager() *EventManager { return &EventManager{ - events: make(map[constant.AlertLevel][]Event), + events: make(map[constants.AlertLevel][]Event), } } diff --git a/common/errcode/code.go b/common/errcode/code.go new file mode 100644 index 0000000..abb4d09 --- /dev/null +++ b/common/errcode/code.go @@ -0,0 +1,72 @@ +package errcode + +import ( + "net/http" +) + +// 此处为公共的错误码, 预留 10000000 ~ 10000099 间的 100 个错误码 +var ( + Success = newError(0, "success") + ErrServer = newError(10000000, "服务器内部错误") + ErrParams = newError(10000001, "参数错误, 请检查") + ErrNotFound = newError(10000002, "资源未找到") + ErrPanic = newError(10000003, "(*^__^*)系统开小差了,请稍后重试") // 无预期的panic错误 + ErrToken = newError(10000004, "Token无效") + ErrForbidden = newError(10000005, "未授权") // 访问一些未授权的资源时的错误 + ErrTooManyRequests = newError(10000006, "请求过多") + ErrCoverData = newError(10000007, "ConvertDataError") // 数据转换错误 +) + +// 各个业务模块自定义的错误码, 从 10000100 开始, 可以按照不同的业务模块划分不同的号段 +// Example: +//var ( +// ErrOrderClosed = NewError(10000100, "订单已关闭") +//) + +// 用户模块相关错误码 10000100 ~ 1000199 +var ( + ErrUserInvalid = newError(10000101, "用户异常") + ErrUserNameOccupied = newError(10000102, "用户名已被占用") + ErrUserNotRight = newError(10000103, "用户名或密码不正确") +) + +// 商品模块相关错误码 10000200 ~ 1000299 +var ( + ErrCommodityNotExists = newError(10000200, "商品不存在") + ErrCommodityStockOut = newError(10000201, "库存不足") +) + +// 购物车模块相关错误码 10000300 ~ 1000399 +var ( + ErrCartItemParam = newError(10000300, "购物项参数异常") + ErrCartWrongUser = newError(10000301, "用户购物信息不匹配") +) + +// 订单模块相关错误码 10000500 ~ 10000599 +var ( + ErrOrderParams = newError(10000500, "订单参数异常") + ErrOrderCanNotBeChanged = newError(10000501, "订单不可修改") + ErrOrderUnsupportedPayScene = newError(10000502, "支付场景暂不支持") +) + +func (e *AppError) HttpStatusCode() int { + switch e.Code() { + case Success.Code(): + return http.StatusOK + case ErrServer.Code(), ErrPanic.Code(): + return http.StatusInternalServerError + case ErrParams.Code(), ErrUserInvalid.Code(), ErrUserNameOccupied.Code(), ErrUserNotRight.Code(), + ErrCommodityNotExists.Code(), ErrCommodityStockOut.Code(), ErrCartItemParam.Code(), ErrOrderParams.Code(): + return http.StatusBadRequest + case ErrNotFound.Code(): + return http.StatusNotFound + case ErrTooManyRequests.Code(): + return http.StatusTooManyRequests + case ErrToken.Code(): + return http.StatusUnauthorized + case ErrForbidden.Code(), ErrCartWrongUser.Code(), ErrOrderCanNotBeChanged.Code(): + return http.StatusForbidden + default: + return http.StatusInternalServerError + } +} diff --git a/common/errcode/dao_error.go b/common/errcode/dao_error.go new file mode 100644 index 0000000..df30ff8 --- /dev/null +++ b/common/errcode/dao_error.go @@ -0,0 +1,21 @@ +package errcode + +import "errors" + +// Database layer error +var ( + // ErrUUIDChangeType define error of check uuid from value failed in uuid from change type + ErrUUIDChangeType = errors.New("undefined uuid change type") + + // ErrUpdateRowZero define error of update affected row zero + ErrUpdateRowZero = errors.New("update affected rows is zero") + + // ErrDeleteRowZero define error of delete affected row zero + ErrDeleteRowZero = errors.New("delete affected rows is zero") + + // ErrQueryRowZero define error of query affected row zero + ErrQueryRowZero = errors.New("query affected rows is zero") + + // ErrInsertRowUnexpected define error of insert affected row not reach expected number + ErrInsertRowUnexpected = errors.New("the number of inserted data rows don't reach the expected value") +) diff --git a/config/anchor_param_config.go b/config/anchor_param_config.go index 989b870..e0a9c15 100644 --- a/config/anchor_param_config.go +++ b/config/anchor_param_config.go @@ -2,7 +2,7 @@ package config import ( - "modelRT/constant" + constants "modelRT/constant" ) // AnchorParamListConfig define anchor params list config struct @@ -43,7 +43,7 @@ var baseCurrentFunc = func(archorValue float64, args ...float64) float64 { // SelectAnchorCalculateFuncAndParams define select anchor func and anchor calculate value by component type 、 anchor name and component data func SelectAnchorCalculateFuncAndParams(componentType int, anchorName string, componentData map[string]interface{}) (func(archorValue float64, args ...float64) float64, []float64) { - if componentType == constant.DemoType { + if componentType == constants.DemoType { if anchorName == "voltage" { resistance := componentData["resistance"].(float64) return baseVoltageFunc, []float64{resistance} diff --git a/constant/alert.go b/constant/alert.go index 5b6ed6f..6f6d793 100644 --- a/constant/alert.go +++ b/constant/alert.go @@ -1,5 +1,5 @@ -// Package constant define alert level constant -package constant +// Package constants define constant variable +package constants // AlertLevel define alert level type type AlertLevel int diff --git a/constant/busbar_section.go b/constant/busbar_section.go index aa66cb3..7a6f86a 100644 --- a/constant/busbar_section.go +++ b/constant/busbar_section.go @@ -1,4 +1,5 @@ -package constant +// Package constants define constant variable +package constants const ( // 母线服役属性 diff --git a/constant/electrical_components.go b/constant/electrical_components.go index 2f7da65..077eea3 100644 --- a/constant/electrical_components.go +++ b/constant/electrical_components.go @@ -1,5 +1,5 @@ -// Package constant define constant value -package constant +// Package constants define constant variable +package constants const ( // NullableType 空类型类型 diff --git a/constant/error.go b/constant/error.go index cb48f93..d4c6242 100644 --- a/constant/error.go +++ b/constant/error.go @@ -1,22 +1,8 @@ -package constant +// Package constants define constant variable +package constants import "errors" -// ErrUUIDChangeType define error of check uuid from value failed in uuid from change type -var ErrUUIDChangeType = errors.New("undefined uuid change type") - -// ErrUpdateRowZero define error of update affected row zero -var ErrUpdateRowZero = errors.New("update affected rows is zero") - -// ErrDeleteRowZero define error of delete affected row zero -var ErrDeleteRowZero = errors.New("delete affected rows is zero") - -// ErrQueryRowZero define error of query affected row zero -var ErrQueryRowZero = errors.New("query affected rows is zero") - -// ErrInsertRowUnexpected define error of insert affected row not reach expected number -var ErrInsertRowUnexpected = errors.New("the number of inserted data rows don't reach the expected value") - var ( // ErrUUIDFromCheckT1 define error of check uuid from value failed in uuid from change type ErrUUIDFromCheckT1 = errors.New("in uuid from change type, value of new uuid_from is equal value of old uuid_from") diff --git a/constant/log_mode.go b/constant/log_mode.go index bb5ace7..faecb47 100644 --- a/constant/log_mode.go +++ b/constant/log_mode.go @@ -1,5 +1,5 @@ -// Package constant define constant value -package constant +// Package constants define constant variable +package constants const ( // DevelopmentLogMode define development operator environment for modelRT project diff --git a/constant/time.go b/constant/time.go index e10be69..a7b4d84 100644 --- a/constant/time.go +++ b/constant/time.go @@ -1,5 +1,5 @@ -// Package constant define constant value -package constant +// Package constants define constant variable +package constants const ( // LogTimeFormate define time format for log file name diff --git a/constant/togologic.go b/constant/togologic.go index 68e3a8d..a5bc57c 100644 --- a/constant/togologic.go +++ b/constant/togologic.go @@ -1,4 +1,5 @@ -package constant +// Package constants define constant variable +package constants import "github.com/gofrs/uuid" diff --git a/database/create_component.go b/database/create_component.go index cdfd99e..e4c1644 100644 --- a/database/create_component.go +++ b/database/create_component.go @@ -7,7 +7,7 @@ import ( "strconv" "time" - "modelRT/constant" + "modelRT/common/errcode" "modelRT/network" "modelRT/orm" @@ -43,7 +43,7 @@ func CreateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo netwo if result.Error != nil || result.RowsAffected == 0 { err := result.Error if result.RowsAffected == 0 { - err = fmt.Errorf("%w:please check insert component slice", constant.ErrInsertRowUnexpected) + err = fmt.Errorf("%w:please check insert component slice", errcode.ErrInsertRowUnexpected) } return -1, fmt.Errorf("insert component info failed:%w", err) } diff --git a/database/create_model_info.go b/database/create_model_info.go index 4a4e41d..0f41172 100644 --- a/database/create_model_info.go +++ b/database/create_model_info.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "modelRT/constant" + "modelRT/common/errcode" "modelRT/model" jsoniter "github.com/json-iterator/go" @@ -28,7 +28,7 @@ func CreateModelIntoDB(ctx context.Context, tx *gorm.DB, componentID int64, comp if result.Error != nil || result.RowsAffected == 0 { err := result.Error if result.RowsAffected == 0 { - err = fmt.Errorf("%w:please check insert model params", constant.ErrInsertRowUnexpected) + err = fmt.Errorf("%w:please check insert model params", errcode.ErrInsertRowUnexpected) } return fmt.Errorf("insert component model params into table %s failed:%w", modelStruct.ReturnTableName(), err) } diff --git a/database/create_topologic.go b/database/create_topologic.go index 57d2727..98d4d20 100644 --- a/database/create_topologic.go +++ b/database/create_topologic.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "modelRT/constant" + "modelRT/common/errcode" "modelRT/network" "modelRT/orm" @@ -34,7 +34,7 @@ func CreateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, topol if result.Error != nil || result.RowsAffected != int64(len(topologicSlice)) { err := result.Error if result.RowsAffected != int64(len(topologicSlice)) { - err = fmt.Errorf("%w:please check insert topologic slice", constant.ErrInsertRowUnexpected) + err = fmt.Errorf("%w:please check insert topologic slice", errcode.ErrInsertRowUnexpected) } return fmt.Errorf("insert topologic link failed:%w", err) } diff --git a/database/delete_topologic.go b/database/delete_topologic.go index 21e0264..52f4d97 100644 --- a/database/delete_topologic.go +++ b/database/delete_topologic.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "modelRT/constant" + "modelRT/common/errcode" "modelRT/network" "modelRT/orm" @@ -23,7 +23,7 @@ func DeleteTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, delIn if result.Error != nil || result.RowsAffected == 0 { err := result.Error if result.RowsAffected == 0 { - err = fmt.Errorf("%w:please check delete topologic where conditions", constant.ErrDeleteRowZero) + err = fmt.Errorf("%w:please check delete topologic where conditions", errcode.ErrDeleteRowZero) } return fmt.Errorf("delete topologic link failed:%w", err) } diff --git a/database/query_topologic.go b/database/query_topologic.go index 7faf230..9a72426 100644 --- a/database/query_topologic.go +++ b/database/query_topologic.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "modelRT/constant" + constants "modelRT/constant" "modelRT/diagram" "modelRT/logger" "modelRT/orm" @@ -24,9 +24,9 @@ func QueryTopologic(ctx context.Context, tx *gorm.DB) ([]orm.Topologic, error) { cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() - result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Raw(sql.RecursiveSQL, constant.UUIDNilStr).Scan(&topologics) + result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Raw(sql.RecursiveSQL, constants.UUIDNilStr).Scan(&topologics) if result.Error != nil { - logger.Error(ctx, "query circuit diagram topologic info by start node uuid failed", "start_node_uuid", constant.UUIDNilStr, "error", result.Error) + logger.Error(ctx, "query circuit diagram topologic info by start node uuid failed", "start_node_uuid", constants.UUIDNilStr, "error", result.Error) return nil, result.Error } return topologics, nil @@ -52,7 +52,7 @@ func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB, componentTypeMap map func InitCircuitDiagramTopologic(topologicNodes []orm.Topologic, componentTypeMap map[uuid.UUID]int) error { var rootVertex *diagram.MultiBranchTreeNode for _, node := range topologicNodes { - if node.UUIDFrom == constant.UUIDNil { + if node.UUIDFrom == constants.UUIDNil { // rootVertex = node.UUIDTo var componentType int componentType, ok := componentTypeMap[node.UUIDFrom] @@ -69,7 +69,7 @@ func InitCircuitDiagramTopologic(topologicNodes []orm.Topologic, componentTypeMa } for _, node := range topologicNodes { - if node.UUIDFrom == constant.UUIDNil { + if node.UUIDFrom == constants.UUIDNil { var componentType int componentType, ok := componentTypeMap[node.UUIDTo] if !ok { @@ -110,7 +110,7 @@ func BuildMultiBranchTree(topologics []orm.Topologic, componentTypeMap map[uuid. for _, topo := range topologics { if _, exists := nodeMap[topo.UUIDFrom]; !exists { // skip special uuid - if topo.UUIDTo != constant.UUIDNil { + if topo.UUIDTo != constants.UUIDNil { var ok bool componentType, ok := componentTypeMap[topo.UUIDFrom] if !ok { @@ -127,7 +127,7 @@ func BuildMultiBranchTree(topologics []orm.Topologic, componentTypeMap map[uuid. if _, exists := nodeMap[topo.UUIDTo]; !exists { // skip special uuid - if topo.UUIDTo != constant.UUIDNil { + if topo.UUIDTo != constants.UUIDNil { var ok bool componentType, ok := componentTypeMap[topo.UUIDTo] if !ok { @@ -145,19 +145,19 @@ func BuildMultiBranchTree(topologics []orm.Topologic, componentTypeMap map[uuid. for _, topo := range topologics { var parent *diagram.MultiBranchTreeNode - if topo.UUIDFrom == constant.UUIDNil { + if topo.UUIDFrom == constants.UUIDNil { var componentType int parent = &diagram.MultiBranchTreeNode{ - ID: constant.UUIDNil, + ID: constants.UUIDNil, NodeComponentType: componentType, } - nodeMap[constant.UUIDNil] = parent + nodeMap[constants.UUIDNil] = parent } else { parent = nodeMap[topo.UUIDFrom] } var child *diagram.MultiBranchTreeNode - if topo.UUIDTo == constant.UUIDNil { + if topo.UUIDTo == constants.UUIDNil { var componentType int child = &diagram.MultiBranchTreeNode{ ID: topo.UUIDTo, @@ -171,7 +171,7 @@ func BuildMultiBranchTree(topologics []orm.Topologic, componentTypeMap map[uuid. } // return root vertex - root, exists := nodeMap[constant.UUIDNil] + root, exists := nodeMap[constants.UUIDNil] if !exists { return nil, fmt.Errorf("root node not found") } diff --git a/database/update_component.go b/database/update_component.go index 22db5b9..2032f9d 100644 --- a/database/update_component.go +++ b/database/update_component.go @@ -7,7 +7,7 @@ import ( "strconv" "time" - "modelRT/constant" + "modelRT/common/errcode" "modelRT/network" "modelRT/orm" @@ -30,7 +30,7 @@ func UpdateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo netwo if result.Error != nil || result.RowsAffected == 0 { err := result.Error if result.RowsAffected == 0 { - err = fmt.Errorf("%w:please check update component conditions", constant.ErrUpdateRowZero) + err = fmt.Errorf("%w:please check update component conditions", errcode.ErrUpdateRowZero) } return -1, fmt.Errorf("query component info failed:%w", err) } @@ -54,7 +54,7 @@ func UpdateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo netwo if result.Error != nil || result.RowsAffected == 0 { err := result.Error if result.RowsAffected == 0 { - err = fmt.Errorf("%w:please check update component conditions", constant.ErrUpdateRowZero) + err = fmt.Errorf("%w:please check update component conditions", errcode.ErrUpdateRowZero) } return -1, fmt.Errorf("update component info failed:%w", err) } diff --git a/database/update_model_info.go b/database/update_model_info.go index 89fe21a..627f081 100644 --- a/database/update_model_info.go +++ b/database/update_model_info.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "modelRT/constant" + "modelRT/common/errcode" "modelRT/model" jsoniter "github.com/json-iterator/go" @@ -33,7 +33,7 @@ func UpdateModelIntoDB(ctx context.Context, tx *gorm.DB, componentID int64, comp if result.Error != nil || result.RowsAffected == 0 { err := result.Error if result.RowsAffected == 0 { - err = fmt.Errorf("%w:please check where conditions", constant.ErrUpdateRowZero) + err = fmt.Errorf("%w:please check where conditions", errcode.ErrUpdateRowZero) } return err } diff --git a/database/update_topologic.go b/database/update_topologic.go index c077d31..009b3ce 100644 --- a/database/update_topologic.go +++ b/database/update_topologic.go @@ -6,7 +6,8 @@ import ( "fmt" "time" - "modelRT/constant" + "modelRT/common/errcode" + constants "modelRT/constant" "modelRT/network" "modelRT/orm" @@ -21,9 +22,9 @@ func UpdateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, chang defer cancel() switch changeInfo.ChangeType { - case constant.UUIDFromChangeType: + case constants.UUIDFromChangeType: result = tx.WithContext(cancelCtx).Model(&orm.Topologic{}).Where("page_id = ? and uuid_from = ? and uuid_to = ?", pageID, changeInfo.OldUUIDFrom, changeInfo.OldUUIDTo).Updates(orm.Topologic{UUIDFrom: changeInfo.NewUUIDFrom}) - case constant.UUIDToChangeType: + case constants.UUIDToChangeType: var delTopologic orm.Topologic result = tx.WithContext(cancelCtx).Model(&orm.Topologic{}).Where("page_id = ? and uuid_to = ?", pageID, changeInfo.NewUUIDTo).Find(&delTopologic) @@ -38,14 +39,14 @@ func UpdateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, chang if result.Error != nil || result.RowsAffected == 0 { err := result.Error if result.RowsAffected == 0 { - err = fmt.Errorf("%w:please check delete topologic where conditions", constant.ErrDeleteRowZero) + err = fmt.Errorf("%w:please check delete topologic where conditions", errcode.ErrDeleteRowZero) } return fmt.Errorf("del old topologic link by new_uuid_to failed:%w", err) } } result = tx.WithContext(cancelCtx).Model(&orm.Topologic{}).Where("page_id = ? and uuid_from = ? and uuid_to = ?", pageID, changeInfo.OldUUIDFrom, changeInfo.OldUUIDTo).Updates(&orm.Topologic{UUIDTo: changeInfo.NewUUIDTo}) - case constant.UUIDAddChangeType: + case constants.UUIDAddChangeType: topologic := orm.Topologic{ Flag: changeInfo.Flag, UUIDFrom: changeInfo.NewUUIDFrom, @@ -60,7 +61,7 @@ func UpdateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, chang if result.Error != nil || result.RowsAffected == 0 { err := result.Error if result.RowsAffected == 0 { - err = fmt.Errorf("%w:please check update topologic where conditions", constant.ErrUpdateRowZero) + err = fmt.Errorf("%w:please check update topologic where conditions", errcode.ErrUpdateRowZero) } return fmt.Errorf("insert or update topologic link failed:%w", err) } diff --git a/diagram/graph.go b/diagram/graph.go index 3affcc9..e540e33 100644 --- a/diagram/graph.go +++ b/diagram/graph.go @@ -5,7 +5,7 @@ import ( "fmt" "sync" - "modelRT/constant" + constants "modelRT/constant" "modelRT/network" "github.com/gofrs/uuid" @@ -148,7 +148,7 @@ func (g *Graph) PrintGraph() { // UpdateEdge update edge link info between two verticeLinks func (g *Graph) UpdateEdge(changeInfo network.TopologicUUIDChangeInfos) error { - if changeInfo.ChangeType == constant.UUIDFromChangeType || changeInfo.ChangeType == constant.UUIDToChangeType { + if changeInfo.ChangeType == constants.UUIDFromChangeType || changeInfo.ChangeType == constants.UUIDToChangeType { g.DelEdge(changeInfo.OldUUIDFrom, changeInfo.OldUUIDTo) g.AddEdge(changeInfo.NewUUIDFrom, changeInfo.NewUUIDTo) } else { diff --git a/distributedlock/constant/lock_err.go b/distributedlock/constant/lock_err.go index fd831f2..f8aebf0 100644 --- a/distributedlock/constant/lock_err.go +++ b/distributedlock/constant/lock_err.go @@ -1,4 +1,4 @@ -package constant +package constants import "errors" diff --git a/distributedlock/constant/redis_result.go b/distributedlock/constant/redis_result.go index 18eb21d..d942faf 100644 --- a/distributedlock/constant/redis_result.go +++ b/distributedlock/constant/redis_result.go @@ -1,4 +1,4 @@ -package constant +package constants import ( "fmt" diff --git a/distributedlock/redis_lock.go b/distributedlock/redis_lock.go index 53f9dab..2234574 100644 --- a/distributedlock/redis_lock.go +++ b/distributedlock/redis_lock.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "modelRT/distributedlock/constant" + constants "modelRT/distributedlock/constant" luascript "modelRT/distributedlock/luascript" "modelRT/logger" @@ -49,13 +49,13 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) e if rl.refreshExitChan == nil { rl.refreshExitChan = make(chan struct{}) } - result := rl.tryLock(ctx).(*constant.RedisResult) - if result.Code == constant.UnknownInternalError { + result := rl.tryLock(ctx).(*constants.RedisResult) + if result.Code == constants.UnknownInternalError { logger.Error(ctx, result.OutputResultMessage()) return fmt.Errorf("get lock failed:%w", result) } - if (result.Code == constant.LockSuccess) && rl.needRefresh { + if (result.Code == constants.LockSuccess) && rl.needRefresh { rl.refreshOnce.Do(func() { // async refresh lock timeout unitl receive exit singal go rl.refreshLockTimeout(ctx) @@ -80,13 +80,13 @@ func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) e return err } - resultErr := rl.tryLock(ctx).(*constant.RedisResult) - if (resultErr.Code == constant.LockFailure) || (resultErr.Code == constant.UnknownInternalError) { + resultErr := rl.tryLock(ctx).(*constants.RedisResult) + if (resultErr.Code == constants.LockFailure) || (resultErr.Code == constants.UnknownInternalError) { logger.Info(ctx, resultErr.OutputResultMessage()) continue } - if resultErr.Code == constant.LockSuccess { + if resultErr.Code == constants.LockSuccess { logger.Info(ctx, resultErr.OutputResultMessage()) return nil } @@ -142,12 +142,12 @@ func (rl *redissionLocker) refreshLockTimeout(ctx context.Context) { return } - if constant.RedisCode(val) == constant.RefreshLockFailure { + if constants.RedisCode(val) == constants.RefreshLockFailure { logger.Error(ctx, "lock refreash failed,can not find the lock by key and token", "token", rl.Token, "key", rl.Key) break } - if constant.RedisCode(val) == constant.RefreshLockSuccess { + if constants.RedisCode(val) == constants.RefreshLockSuccess { logger.Info(ctx, "lock refresh success by key and token", "token", rl.Token, "key", rl.Key) } timer.Reset(lockTime) @@ -183,13 +183,13 @@ ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。 ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。 */ func (rl *redissionLocker) tryLock(ctx context.Context) error { - lockType := constant.LockType + lockType := constants.LockType res := rl.client.Eval(ctx, luascript.LockScript, []string{rl.Key}, rl.lockLeaseTime, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { - return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) + return constants.NewRedisResult(constants.UnknownInternalError, lockType, err.Error()) } - return constant.NewRedisResult(constant.RedisCode(val), lockType, "") + return constants.NewRedisResult(constants.RedisCode(val), lockType, "") } /* @@ -203,10 +203,10 @@ func (rl *redissionLocker) UnLock(ctx context.Context) error { val, err := res.Int() if err != redis.Nil && err != nil { logger.Info(ctx, "unlock lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err)) - return fmt.Errorf("unlock lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnLockType, err.Error())) + return fmt.Errorf("unlock lock failed:%w", constants.NewRedisResult(constants.UnknownInternalError, constants.UnLockType, err.Error())) } - if constant.RedisCode(val) == constant.UnLockSuccess { + if constants.RedisCode(val) == constants.UnLockSuccess { if rl.needRefresh { rl.cancelRefreshLockTime() } @@ -215,9 +215,9 @@ func (rl *redissionLocker) UnLock(ctx context.Context) error { return nil } - if constant.RedisCode(val) == constant.UnLocakFailureWithLockOccupancy { + if constants.RedisCode(val) == constants.UnLocakFailureWithLockOccupancy { logger.Info(ctx, "unlock lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key)) - return fmt.Errorf("unlock lock failed:%w", constant.NewRedisResult(constant.UnLocakFailureWithLockOccupancy, constant.UnLockType, "")) + return fmt.Errorf("unlock lock failed:%w", constants.NewRedisResult(constants.UnLocakFailureWithLockOccupancy, constants.UnLockType, "")) } return nil } diff --git a/distributedlock/redis_rwlock.go b/distributedlock/redis_rwlock.go index bd06afc..3dbfdc6 100644 --- a/distributedlock/redis_rwlock.go +++ b/distributedlock/redis_rwlock.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "modelRT/distributedlock/constant" + constants "modelRT/distributedlock/constant" "modelRT/distributedlock/luascript" "modelRT/logger" @@ -24,13 +24,13 @@ type RedissionRWLocker struct { } func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration) error { - result := rl.tryRLock(ctx).(*constant.RedisResult) - if result.Code == constant.UnknownInternalError { + result := rl.tryRLock(ctx).(*constants.RedisResult) + if result.Code == constants.UnknownInternalError { logger.Error(ctx, result.OutputResultMessage()) return fmt.Errorf("get read lock failed:%w", result) } - if result.Code == constant.LockSuccess { + if result.Code == constants.LockSuccess { if rl.needRefresh { rl.refreshOnce.Do(func() { if rl.refreshExitChan == nil { @@ -64,13 +64,13 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration return err } - result := rl.tryRLock(ctx).(*constant.RedisResult) - if (result.Code == constant.RLockFailureWithWLockOccupancy) || (result.Code == constant.UnknownInternalError) { + result := rl.tryRLock(ctx).(*constants.RedisResult) + if (result.Code == constants.RLockFailureWithWLockOccupancy) || (result.Code == constants.UnknownInternalError) { logger.Info(ctx, result.OutputResultMessage()) continue } - if result.Code == constant.LockSuccess { + if result.Code == constants.LockSuccess { logger.Info(ctx, result.OutputResultMessage()) rl.closeSub(ctx, sub, rl.subExitChan) @@ -90,7 +90,7 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration logger.Info(ctx, "the waiting time for obtaining the read lock operation has timed out") rl.closeSub(ctx, sub, rl.subExitChan) // after acquire lock timeout,notice the sub channel to close - return constant.AcquireTimeoutErr + return constants.AcquireTimeoutErr } } } @@ -98,14 +98,14 @@ func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration } func (rl *RedissionRWLocker) tryRLock(ctx context.Context) error { - lockType := constant.LockType + lockType := constants.LockType res := rl.client.Eval(ctx, luascript.RLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix}, rl.lockLeaseTime, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { - return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) + return constants.NewRedisResult(constants.UnknownInternalError, lockType, err.Error()) } - return constant.NewRedisResult(constant.RedisCode(val), lockType, "") + return constants.NewRedisResult(constants.RedisCode(val), lockType, "") } func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) { @@ -126,12 +126,12 @@ func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) { return } - if constant.RedisCode(val) == constant.RefreshLockFailure { + if constants.RedisCode(val) == constants.RefreshLockFailure { logger.Error(ctx, "lock refreash failed,can not find the read lock by key and token", "rwTokenPrefix", rl.RWTokenTimeoutPrefix, "token", rl.Token, "key", rl.Key) return } - if constant.RedisCode(val) == constant.RefreshLockSuccess { + if constants.RedisCode(val) == constants.RefreshLockSuccess { logger.Info(ctx, "lock refresh success by key and token", "token", rl.Token, "key", rl.Key) } timer.Reset(lockTime) @@ -147,11 +147,11 @@ func (rl *RedissionRWLocker) UnRLock(ctx context.Context) error { val, err := res.Int() if err != redis.Nil && err != nil { logger.Info(ctx, "unlock read lock failed", "token", rl.Token, "key", rl.Key, "error", err) - return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnRLockType, err.Error())) + return fmt.Errorf("unlock read lock failed:%w", constants.NewRedisResult(constants.UnknownInternalError, constants.UnRLockType, err.Error())) } - if (constant.RedisCode(val) == constant.UnLockSuccess) || (constant.RedisCode(val) == constant.UnRLockSuccess) { - if rl.needRefresh && (constant.RedisCode(val) == constant.UnLockSuccess) { + if (constants.RedisCode(val) == constants.UnLockSuccess) || (constants.RedisCode(val) == constants.UnRLockSuccess) { + if rl.needRefresh && (constants.RedisCode(val) == constants.UnLockSuccess) { rl.cancelRefreshLockTime() } @@ -159,21 +159,21 @@ func (rl *RedissionRWLocker) UnRLock(ctx context.Context) error { return nil } - if constant.RedisCode(val) == constant.UnRLockFailureWithWLockOccupancy { + if constants.RedisCode(val) == constants.UnRLockFailureWithWLockOccupancy { logger.Info(ctx, "unlock read lock failed", "token", rl.Token, "key", rl.Key) - return fmt.Errorf("unlock read lock failed:%w", constant.NewRedisResult(constant.UnRLockFailureWithWLockOccupancy, constant.UnRLockType, "")) + return fmt.Errorf("unlock read lock failed:%w", constants.NewRedisResult(constants.UnRLockFailureWithWLockOccupancy, constants.UnRLockType, "")) } return nil } func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration) error { - result := rl.tryWLock(ctx).(*constant.RedisResult) - if result.Code == constant.UnknownInternalError { + result := rl.tryWLock(ctx).(*constants.RedisResult) + if result.Code == constants.UnknownInternalError { logger.Error(ctx, result.OutputResultMessage()) return fmt.Errorf("get write lock failed:%w", result) } - if result.Code == constant.LockSuccess { + if result.Code == constants.LockSuccess { if rl.needRefresh { rl.refreshOnce.Do(func() { if rl.refreshExitChan == nil { @@ -207,13 +207,13 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration return err } - result := rl.tryWLock(ctx).(*constant.RedisResult) - if (result.Code == constant.UnknownInternalError) || (result.Code == constant.WLockFailureWithRLockOccupancy) || (result.Code == constant.WLockFailureWithWLockOccupancy) || (result.Code == constant.WLockFailureWithNotFirstPriority) { + result := rl.tryWLock(ctx).(*constants.RedisResult) + if (result.Code == constants.UnknownInternalError) || (result.Code == constants.WLockFailureWithRLockOccupancy) || (result.Code == constants.WLockFailureWithWLockOccupancy) || (result.Code == constants.WLockFailureWithNotFirstPriority) { logger.Info(ctx, result.OutputResultMessage()) continue } - if result.Code == constant.LockSuccess { + if result.Code == constants.LockSuccess { logger.Info(ctx, result.OutputResultMessage()) rl.closeSub(ctx, sub, rl.subExitChan) @@ -233,7 +233,7 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration logger.Info(ctx, "the waiting time for obtaining the write lock operation has timed out") rl.closeSub(ctx, sub, rl.subExitChan) // after acquire lock timeout,notice the sub channel to close - return constant.AcquireTimeoutErr + return constants.AcquireTimeoutErr } } } @@ -241,14 +241,14 @@ func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration } func (rl *RedissionRWLocker) tryWLock(ctx context.Context) error { - lockType := constant.LockType + lockType := constants.LockType res := rl.client.Eval(ctx, luascript.WLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix}, rl.lockLeaseTime, rl.Token) val, err := res.Int() if err != redis.Nil && err != nil { - return constant.NewRedisResult(constant.UnknownInternalError, lockType, err.Error()) + return constants.NewRedisResult(constants.UnknownInternalError, lockType, err.Error()) } - return constant.NewRedisResult(constant.RedisCode(val), lockType, "") + return constants.NewRedisResult(constants.RedisCode(val), lockType, "") } func (rl *RedissionRWLocker) UnWLock(ctx context.Context) error { @@ -256,20 +256,20 @@ func (rl *RedissionRWLocker) UnWLock(ctx context.Context) error { val, err := res.Int() if err != redis.Nil && err != nil { logger.Error(ctx, "unlock write lock failed", "token", rl.Token, "key", rl.Key, "error", err) - return fmt.Errorf("unlock write lock failed:%w", constant.NewRedisResult(constant.UnknownInternalError, constant.UnWLockType, err.Error())) + return fmt.Errorf("unlock write lock failed:%w", constants.NewRedisResult(constants.UnknownInternalError, constants.UnWLockType, err.Error())) } - if (constant.RedisCode(val) == constant.UnLockSuccess) || constant.RedisCode(val) == constant.UnWLockSuccess { - if rl.needRefresh && (constant.RedisCode(val) == constant.UnLockSuccess) { + if (constants.RedisCode(val) == constants.UnLockSuccess) || constants.RedisCode(val) == constants.UnWLockSuccess { + if rl.needRefresh && (constants.RedisCode(val) == constants.UnLockSuccess) { rl.cancelRefreshLockTime() } logger.Info(ctx, "unlock write lock success", "token", rl.Token, "key", rl.Key) return nil } - if (constant.RedisCode(val) == constant.UnWLockFailureWithRLockOccupancy) || (constant.RedisCode(val) == constant.UnWLockFailureWithWLockOccupancy) { + if (constants.RedisCode(val) == constants.UnWLockFailureWithRLockOccupancy) || (constants.RedisCode(val) == constants.UnWLockFailureWithWLockOccupancy) { logger.Info(ctx, "unlock write lock failed", "token", rl.Token, "key", rl.Key) - return fmt.Errorf("unlock write lock failed:%w", constant.NewRedisResult(constant.RedisCode(val), constant.UnWLockType, "")) + return fmt.Errorf("unlock write lock failed:%w", constants.NewRedisResult(constants.RedisCode(val), constants.UnWLockType, "")) } return nil } diff --git a/handler/alert_event_query.go b/handler/alert_event_query.go index bacde4f..1ddadc6 100644 --- a/handler/alert_event_query.go +++ b/handler/alert_event_query.go @@ -6,7 +6,7 @@ import ( "strconv" "modelRT/alert" - "modelRT/constant" + constants "modelRT/constant" "modelRT/logger" "modelRT/network" @@ -15,7 +15,7 @@ import ( // QueryAlertEventHandler define query alert event process API func QueryAlertEventHandler(c *gin.Context) { - var targetLevel constant.AlertLevel + var targetLevel constants.AlertLevel alertManger := alert.GetAlertMangerInstance() levelStr := c.Query("level") @@ -29,7 +29,7 @@ func QueryAlertEventHandler(c *gin.Context) { } c.JSON(http.StatusOK, resp) } - targetLevel = constant.AlertLevel(level) + targetLevel = constants.AlertLevel(level) events := alertManger.GetRangeEventsByLevel(targetLevel) resp := network.SuccessResponse{ diff --git a/handler/anchor_point_replace.go b/handler/anchor_point_replace.go index 90401ee..2b3956e 100644 --- a/handler/anchor_point_replace.go +++ b/handler/anchor_point_replace.go @@ -7,7 +7,8 @@ import ( "net/http" "time" - "modelRT/constant" + "modelRT/common/errcode" + constants "modelRT/constant" "modelRT/database" "modelRT/diagram" "modelRT/logger" @@ -54,7 +55,7 @@ func ComponentAnchorReplaceHandler(c *gin.Context) { } if result.RowsAffected == 0 { - err := fmt.Errorf("query component detail info by uuid failed:%w", constant.ErrQueryRowZero) + err := fmt.Errorf("query component detail info by uuid failed:%w", errcode.ErrQueryRowZero) logger.Error(c, "query component detail info from table is empty", "table_name", "component") resp := network.FailureResponse{ @@ -82,7 +83,7 @@ func ComponentAnchorReplaceHandler(c *gin.Context) { } if unmarshalMap == nil { - err := fmt.Errorf("query model detail info by uuid failed:%w", constant.ErrQueryRowZero) + err := fmt.Errorf("query model detail info by uuid failed:%w", errcode.ErrQueryRowZero) logger.Error(c, "query model detail info from table is empty", "table_name", tableName) resp := network.FailureResponse{ @@ -94,7 +95,7 @@ func ComponentAnchorReplaceHandler(c *gin.Context) { } componentType := unmarshalMap["component_type"].(int) - if componentType != constant.DemoType { + if componentType != constants.DemoType { logger.Error(c, "can not process real time data of component type not equal DemoType", "component_id", componentInfo.ID) } diagram.UpdateAnchorValue(componentInfo.ID, anchorName) diff --git a/handler/circuit_diagram_delete.go b/handler/circuit_diagram_delete.go index 70031ed..73c3151 100644 --- a/handler/circuit_diagram_delete.go +++ b/handler/circuit_diagram_delete.go @@ -7,7 +7,7 @@ import ( "net/http" "time" - "modelRT/constant" + "modelRT/common/errcode" "modelRT/database" "modelRT/diagram" "modelRT/logger" @@ -155,7 +155,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) { err := result.Error if result.RowsAffected == 0 { - err = fmt.Errorf("%w:please check uuid conditions", constant.ErrDeleteRowZero) + err = fmt.Errorf("%w:please check uuid conditions", errcode.ErrDeleteRowZero) } logger.Error(c, "query component info into postgresDB failed", "component_global_uuid", componentInfo.UUID, "error", err) @@ -177,7 +177,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) { err := result.Error if result.RowsAffected == 0 { - err = fmt.Errorf("%w:please check uuid conditions", constant.ErrDeleteRowZero) + err = fmt.Errorf("%w:please check uuid conditions", errcode.ErrDeleteRowZero) } logger.Error(c, "delete component info into postgresDB failed", "component_global_uuid", componentInfo.UUID, "error", err) @@ -201,7 +201,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) { err := result.Error if result.RowsAffected == 0 { - err = fmt.Errorf("%w:please check uuid conditions", constant.ErrDeleteRowZero) + err = fmt.Errorf("%w:please check uuid conditions", errcode.ErrDeleteRowZero) } msg := fmt.Sprintf("delete component info from table %s failed", modelStruct.ReturnTableName()) diff --git a/handler/real_time_data_query.go b/handler/real_time_data_query.go index 8d72760..5c4ed3c 100644 --- a/handler/real_time_data_query.go +++ b/handler/real_time_data_query.go @@ -6,7 +6,7 @@ import ( "strconv" "modelRT/alert" - "modelRT/constant" + constants "modelRT/constant" "modelRT/logger" "modelRT/network" @@ -15,7 +15,7 @@ import ( // QueryRealTimeDataHandler define query real time data process API func QueryRealTimeDataHandler(c *gin.Context) { - var targetLevel constant.AlertLevel + var targetLevel constants.AlertLevel alertManger := alert.GetAlertMangerInstance() @@ -30,7 +30,7 @@ func QueryRealTimeDataHandler(c *gin.Context) { } c.JSON(http.StatusOK, resp) } - targetLevel = constant.AlertLevel(level) + targetLevel = constants.AlertLevel(level) events := alertManger.GetRangeEventsByLevel(targetLevel) resp := network.SuccessResponse{ diff --git a/handler/real_time_data_receive.go b/handler/real_time_data_receive.go index 3ba9318..b2412fb 100644 --- a/handler/real_time_data_receive.go +++ b/handler/real_time_data_receive.go @@ -3,12 +3,11 @@ package handler import ( "modelRT/logger" "modelRT/network" + realtimedata "modelRT/real-time-data" "github.com/gin-gonic/gin" "github.com/gorilla/websocket" jsoniter "github.com/json-iterator/go" - - realtimedata "modelRT/real-time-data" ) var upgrader = websocket.Upgrader{ diff --git a/logger/zap.go b/logger/zap.go index 18270a6..f46c315 100644 --- a/logger/zap.go +++ b/logger/zap.go @@ -6,7 +6,7 @@ import ( "sync" "modelRT/config" - "modelRT/constant" + constants "modelRT/constant" "github.com/natefinch/lumberjack" "go.uber.org/zap" @@ -41,7 +41,7 @@ func getLogWriter(mode, filename string, maxsize, maxBackup, maxAge int, compres } syncConsole := zapcore.AddSync(os.Stderr) - if mode == constant.DevelopmentLogMode { + if mode == constants.DevelopmentLogMode { return syncConsole } diff --git a/model/model_select.go b/model/model_select.go index b860a61..d6eb7ef 100644 --- a/model/model_select.go +++ b/model/model_select.go @@ -2,17 +2,17 @@ package model import ( - "modelRT/constant" + constants "modelRT/constant" "modelRT/orm" ) // SelectModelByType define select the data structure for parsing based on the input model type func SelectModelByType(modelType int) BasicModelInterface { - if modelType == constant.BusbarType { + if modelType == constants.BusbarType { return &orm.BusbarSection{} - } else if modelType == constant.AsyncMotorType { + } else if modelType == constants.AsyncMotorType { return &orm.AsyncMotor{} - } else if modelType == constant.DemoType { + } else if modelType == constants.DemoType { return &orm.Demo{} } return nil diff --git a/network/circuit_diagram_update_request.go b/network/circuit_diagram_update_request.go index 2b3e3ba..1a6804c 100644 --- a/network/circuit_diagram_update_request.go +++ b/network/circuit_diagram_update_request.go @@ -4,7 +4,8 @@ package network import ( "fmt" - "modelRT/constant" + "modelRT/common/errcode" + constants "modelRT/constant" "github.com/gofrs/uuid" ) @@ -61,12 +62,12 @@ func ParseUUID(info TopologicChangeInfo) (TopologicUUIDChangeInfos, error) { UUIDChangeInfo.ChangeType = info.ChangeType switch info.ChangeType { - case constant.UUIDFromChangeType: + case constants.UUIDFromChangeType: if info.NewUUIDFrom == info.OldUUIDFrom { - return UUIDChangeInfo, fmt.Errorf("topologic change data check failed:%w", constant.ErrUUIDFromCheckT1) + return UUIDChangeInfo, fmt.Errorf("topologic change data check failed:%w", constants.ErrUUIDFromCheckT1) } if info.NewUUIDTo != info.OldUUIDTo { - return UUIDChangeInfo, fmt.Errorf("topologic change data check failed:%w", constant.ErrUUIDToCheckT1) + return UUIDChangeInfo, fmt.Errorf("topologic change data check failed:%w", constants.ErrUUIDToCheckT1) } oldUUIDFrom, err := uuid.FromString(info.OldUUIDFrom) @@ -87,12 +88,12 @@ func ParseUUID(info TopologicChangeInfo) (TopologicUUIDChangeInfos, error) { } UUIDChangeInfo.OldUUIDTo = OldUUIDTo UUIDChangeInfo.NewUUIDTo = OldUUIDTo - case constant.UUIDToChangeType: + case constants.UUIDToChangeType: if info.NewUUIDFrom != info.OldUUIDFrom { - return UUIDChangeInfo, fmt.Errorf("topologic change data check failed:%w", constant.ErrUUIDFromCheckT2) + return UUIDChangeInfo, fmt.Errorf("topologic change data check failed:%w", constants.ErrUUIDFromCheckT2) } if info.NewUUIDTo == info.OldUUIDTo { - return UUIDChangeInfo, fmt.Errorf("topologic change data check failed:%w", constant.ErrUUIDToCheckT2) + return UUIDChangeInfo, fmt.Errorf("topologic change data check failed:%w", constants.ErrUUIDToCheckT2) } oldUUIDFrom, err := uuid.FromString(info.OldUUIDFrom) @@ -113,12 +114,12 @@ func ParseUUID(info TopologicChangeInfo) (TopologicUUIDChangeInfos, error) { return UUIDChangeInfo, fmt.Errorf("convert data from string type to uuid type failed,new uuid_to value:%s", info.NewUUIDTo) } UUIDChangeInfo.NewUUIDTo = newUUIDTo - case constant.UUIDAddChangeType: + case constants.UUIDAddChangeType: if info.OldUUIDFrom != "" { - return UUIDChangeInfo, fmt.Errorf("topologic change data check failed:%w", constant.ErrUUIDFromCheckT3) + return UUIDChangeInfo, fmt.Errorf("topologic change data check failed:%w", constants.ErrUUIDFromCheckT3) } if info.OldUUIDTo != "" { - return UUIDChangeInfo, fmt.Errorf("topologic change data check failed:%w", constant.ErrUUIDToCheckT3) + return UUIDChangeInfo, fmt.Errorf("topologic change data check failed:%w", constants.ErrUUIDToCheckT3) } newUUIDFrom, err := uuid.FromString(info.NewUUIDFrom) @@ -133,7 +134,7 @@ func ParseUUID(info TopologicChangeInfo) (TopologicUUIDChangeInfos, error) { } UUIDChangeInfo.NewUUIDTo = newUUIDTo default: - return UUIDChangeInfo, constant.ErrUUIDChangeType + return UUIDChangeInfo, errcode.ErrUUIDChangeType } UUIDChangeInfo.Flag = info.Flag UUIDChangeInfo.Comment = info.Comment diff --git a/pool/concurrency_anchor_parse.go b/pool/concurrency_anchor_parse.go index 26f8963..59411e3 100644 --- a/pool/concurrency_anchor_parse.go +++ b/pool/concurrency_anchor_parse.go @@ -7,7 +7,7 @@ import ( "modelRT/alert" "modelRT/config" - "modelRT/constant" + constants "modelRT/constant" "modelRT/diagram" "modelRT/logger" @@ -72,7 +72,7 @@ var AnchorFunc = func(poolConfig interface{}) { event := alert.Event{ ComponentID: componentID, AnchorName: anchorName, - Level: constant.InfoAlertLevel, + Level: constants.InfoAlertLevel, Message: message, StartTime: time.Now().Unix(), } diff --git a/real-time-data/real_time_data_receive.go b/real-time-data/real_time_data_receive.go index 74e75bd..61bc2f5 100644 --- a/real-time-data/real_time_data_receive.go +++ b/real-time-data/real_time_data_receive.go @@ -5,7 +5,7 @@ import ( "context" "modelRT/config" - "modelRT/constant" + constants "modelRT/constant" "modelRT/diagram" "modelRT/logger" "modelRT/network" @@ -35,7 +35,7 @@ func ReceiveChan(ctx context.Context) { } componentType := component["component_type"].(int) - if componentType != constant.DemoType { + if componentType != constants.DemoType { logger.Error(ctx, "can not process real time data of component type not equal DemoType", "component_id", componentID) continue } diff --git a/test/distributedlock/rwlock_test.go b/test/distributedlock/rwlock_test.go index b3b0dee..e81936c 100644 --- a/test/distributedlock/rwlock_test.go +++ b/test/distributedlock/rwlock_test.go @@ -7,7 +7,7 @@ import ( "time" dl "modelRT/distributedlock" - "modelRT/distributedlock/constant" + constants "modelRT/distributedlock/constant" "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" @@ -450,7 +450,7 @@ func TestRWLock2CWithRLockAndWLockFailed(t *testing.T) { // locker2加写锁锁 duration = 10 * time.Second err = rwLocker2.WLock(ctx, duration) - assert.Equal(t, constant.AcquireTimeoutErr, err) + assert.Equal(t, constants.AcquireTimeoutErr, err) err = rwLocker1.UnRLock(ctx) assert.Equal(t, nil, err) From a70f77464ca02d17db74baf7a973e07342acad70 Mon Sep 17 00:00:00 2001 From: douxu Date: Mon, 23 Jun 2025 16:00:48 +0800 Subject: [PATCH 32/33] refactor(gorm-logger): 1. add gorm logger in gorm config 2.use faced func in gorm logger --- database/postgres_init.go | 4 ++- logger/gorm_logger.go | 61 +++++++++++++++++++++++++++++++++++++++ main.go | 8 +++-- 3 files changed, 69 insertions(+), 4 deletions(-) create mode 100644 logger/gorm_logger.go diff --git a/database/postgres_init.go b/database/postgres_init.go index d4edcc8..a80d9fc 100644 --- a/database/postgres_init.go +++ b/database/postgres_init.go @@ -6,6 +6,8 @@ import ( "sync" "time" + "modelRT/logger" + "gorm.io/driver/postgres" "gorm.io/gorm" ) @@ -36,7 +38,7 @@ func InitPostgresDBInstance(ctx context.Context, PostgresDBURI string) *gorm.DB func initPostgresDBClient(ctx context.Context, PostgresDBURI string) *gorm.DB { ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() - db, err := gorm.Open(postgres.Open(PostgresDBURI), &gorm.Config{}) + db, err := gorm.Open(postgres.Open(PostgresDBURI), &gorm.Config{Logger: logger.NewGormLogger()}) if err != nil { panic(err) } diff --git a/logger/gorm_logger.go b/logger/gorm_logger.go new file mode 100644 index 0000000..511d677 --- /dev/null +++ b/logger/gorm_logger.go @@ -0,0 +1,61 @@ +// Package logger define log struct of modelRT project +package logger + +import ( + "context" + "errors" + "time" + + "gorm.io/gorm" + gormLogger "gorm.io/gorm/logger" +) + +// GormLogger define struct for implementing gormLogger.Interface +type GormLogger struct { + SlowThreshold time.Duration +} + +// NewGormLogger define func for init GormLogger +func NewGormLogger() *GormLogger { + return &GormLogger{ + SlowThreshold: 500 * time.Millisecond, + } +} + +// LogMode define func for implementing gormLogger.Interface +func (l *GormLogger) LogMode(_ gormLogger.LogLevel) gormLogger.Interface { + return &GormLogger{} +} + +// Info define func for implementing gormLogger.Interface +func (l *GormLogger) Info(ctx context.Context, msg string, data ...any) { + Info(ctx, msg, "data", data) +} + +// Warn define func for implementing gormLogger.Interface +func (l *GormLogger) Warn(ctx context.Context, msg string, data ...any) { + Warn(ctx, msg, "data", data) +} + +// Error define func for implementing gormLogger.Interface +func (l *GormLogger) Error(ctx context.Context, msg string, data ...any) { + Error(ctx, msg, "data", data) +} + +// Trace define func for implementing gormLogger.Interface +func (l *GormLogger) Trace(ctx context.Context, begin time.Time, fc func() (sql string, rowsAffected int64), err error) { + // get SQL running time + duration := time.Since(begin).Milliseconds() + // get gorm exec sql and rows affected + sql, rows := fc() + // gorm error judgment + if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + Error(ctx, "SQL ERROR", "sql", sql, "rows", rows, "dur(ms)", duration) + } + // slow query judgment + if duration > l.SlowThreshold.Milliseconds() { + Warn(ctx, "SQL SLOW", "sql", sql, "rows", rows, "dur(ms)", duration) + } else { + Debug(ctx, "SQL DEBUG", "sql", sql, "rows", rows, "dur(ms)", duration) + } +} diff --git a/main.go b/main.go index ccc53a2..a058a44 100644 --- a/main.go +++ b/main.go @@ -17,13 +17,12 @@ import ( "modelRT/middleware" "modelRT/pool" - swaggerFiles "github.com/swaggo/files" - ginSwagger "github.com/swaggo/gin-swagger" - realtimedata "modelRT/real-time-data" "github.com/gin-gonic/gin" "github.com/panjf2000/ants/v2" + swaggerFiles "github.com/swaggo/files" + ginSwagger "github.com/swaggo/gin-swagger" "gorm.io/gorm" ) @@ -50,6 +49,9 @@ func main() { flag.Parse() ctx := context.TODO() + // init logger + logger.InitLoggerInstance(modelRTConfig.LoggerConfig) + modelRTConfig = config.ReadAndInitConfig(*modelRTConfigDir, *modelRTConfigName, *modelRTConfigType) // init postgresDBClient postgresDBClient = database.InitPostgresDBInstance(ctx, modelRTConfig.PostgresDBURI) From 65e0c5da928ac2ea3f24f0d629720913db03dc6c Mon Sep 17 00:00:00 2001 From: douxu Date: Thu, 31 Jul 2025 10:31:26 +0800 Subject: [PATCH 33/33] optimize modelRT routing structure --- main.go | 42 +++++++++++++++++++------ middleware/trace.go | 74 +++++++++++++++++++++++++++++++++++++++++++++ router/diagram.go | 17 +++++++++++ router/router.go | 22 ++++++++++++++ 4 files changed, 145 insertions(+), 10 deletions(-) create mode 100644 router/diagram.go create mode 100644 router/router.go diff --git a/main.go b/main.go index a058a44..c2c890c 100644 --- a/main.go +++ b/main.go @@ -4,6 +4,10 @@ package main import ( "context" "flag" + "net/http" + "os" + "os/signal" + "syscall" "time" "modelRT/alert" @@ -16,6 +20,7 @@ import ( "modelRT/logger" "modelRT/middleware" "modelRT/pool" + "modelRT/router" realtimedata "modelRT/real-time-data" @@ -64,9 +69,6 @@ func main() { sqlDB.Close() }() - // init logger - logger.InitLoggerInstance(modelRTConfig.LoggerConfig) - // init alert manager _ = alert.InitAlertEventManager() @@ -120,14 +122,34 @@ func main() { // TODO 暂时屏蔽完成 swagger 启动测试 // go realtimedata.RealTimeDataComputer(ctx, nil, []string{}, "") - engine := gin.Default() - engine.Use(limiter.Middleware) + engine := gin.New() + router.RegisterRoutes(engine) + server := http.Server{ + Addr: ":8080", + Handler: engine, + } - // diagram api - engine.GET("/diagram/load", handler.CircuitDiagramLoadHandler) - engine.POST("/diagram/create", handler.CircuitDiagramCreateHandler) - engine.POST("/diagram/update", handler.CircuitDiagramUpdateHandler) - engine.POST("/diagram/delete", handler.CircuitDiagramDeleteHandler) + // creating a System Signal Receiver + done := make(chan os.Signal, 10) + signal.Notify(done, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-done + if err := server.Shutdown(context.Background()); err != nil { + logger.Error(ctx, "ShutdownServerError", "err", err) + } + }() + + logger.Info(ctx, "Starting ModelRT server...") + err = server.ListenAndServe() + if err != nil { + if err == http.ErrServerClosed { + // the service receives the shutdown signal normally and then closes + logger.Info(ctx, "Server closed under request") + } else { + // abnormal shutdown of service + logger.Error(ctx, "Server closed unexpected", "err", err) + } + } // real time data api engine.GET("/ws/rtdatas", handler.RealTimeDataReceivehandler) diff --git a/middleware/trace.go b/middleware/trace.go index 12b957e..ef2dad9 100644 --- a/middleware/trace.go +++ b/middleware/trace.go @@ -1,6 +1,12 @@ package middleware import ( + "bytes" + "io" + "strings" + "time" + + "modelRT/logger" "modelRT/util" "github.com/gin-gonic/gin" @@ -21,3 +27,71 @@ func StartTrace() gin.HandlerFunc { c.Next() } } + +type bodyLogWriter struct { + gin.ResponseWriter + body *bytes.Buffer +} + +// 包装一下 gin.ResponseWriter,通过这种方式拦截写响应 +// 让gin写响应的时候先写到 bodyLogWriter 再写gin.ResponseWriter , +// 这样利用中间件里输出访问日志时就能拿到响应了 +// https://stackoverflow.com/questions/38501325/how-to-log-response-body-in-gin +func (w bodyLogWriter) Write(b []byte) (int, error) { + w.body.Write(b) + return w.ResponseWriter.Write(b) +} + +func LogAccess() gin.HandlerFunc { + return func(c *gin.Context) { + // 保存body + var reqBody []byte + contentType := c.GetHeader("Content-Type") + // multipart/form-data 文件上传请求, 不在日志里记录body + if !strings.Contains(contentType, "multipart/form-data") { + reqBody, _ = io.ReadAll(c.Request.Body) + c.Request.Body = io.NopCloser(bytes.NewReader(reqBody)) + + // var request map[string]interface{} + // if err := c.ShouldBindBodyWith(&request, binding.JSON); err != nil { + // c.JSON(400, gin.H{"error": err.Error()}) + // return + // } + } + start := time.Now() + blw := &bodyLogWriter{body: bytes.NewBufferString(""), ResponseWriter: c.Writer} + c.Writer = blw + + accessLog(c, "access_start", time.Since(start), reqBody, nil) + defer func() { + var responseLogging string + if c.Writer.Size() > 10*1024 { // 响应大于10KB 不记录 + responseLogging = "Response data size is too Large to log" + } else { + responseLogging = blw.body.String() + } + accessLog(c, "access_end", time.Since(start), reqBody, responseLogging) + }() + c.Next() + return + } +} + +func accessLog(c *gin.Context, accessType string, dur time.Duration, body []byte, dataOut interface{}) { + req := c.Request + bodyStr := string(body) + query := req.URL.RawQuery + path := req.URL.Path + // TODO: 实现Token认证后再把访问日志里也加上token记录 + // token := c.Request.Header.Get("token") + logger.New(c).Info("AccessLog", + "type", accessType, + "ip", c.ClientIP(), + //"token", token, + "method", req.Method, + "path", path, + "query", query, + "body", bodyStr, + "output", dataOut, + "time(ms)", int64(dur/time.Millisecond)) +} diff --git a/router/diagram.go b/router/diagram.go new file mode 100644 index 0000000..6bf9451 --- /dev/null +++ b/router/diagram.go @@ -0,0 +1,17 @@ +package router + +import ( + "modelRT/handler" + + "github.com/gin-gonic/gin" +) + +// RegisterRoutes define func of register diagram routes +func registerDiagramRoutes(rg *gin.RouterGroup) { + g := rg.Group("/diagram/") + // TODO add diagram middleware + g.GET("load", handler.CircuitDiagramLoadHandler) + g.POST("create", handler.CircuitDiagramCreateHandler) + g.POST("update", handler.CircuitDiagramUpdateHandler) + g.POST("delete", handler.CircuitDiagramDeleteHandler) +} diff --git a/router/router.go b/router/router.go new file mode 100644 index 0000000..3e9b3dc --- /dev/null +++ b/router/router.go @@ -0,0 +1,22 @@ +package router + +import ( + "time" + + "modelRT/middleware" + + "github.com/gin-gonic/gin" +) + +var limiter *middleware.Limiter + +func init() { + limiter = middleware.NewLimiter(10, 1*time.Minute) // 设置限流器,允许每分钟最多请求10次 +} + +func RegisterRoutes(engine *gin.Engine) { + // use global middlewares + engine.Use(middleware.StartTrace(), limiter.Middleware) + routeGroup := engine.Group("") + registerDiagramRoutes(routeGroup) +}