Compare commits

...

148 Commits

Author SHA1 Message Date
douxu 13433f93e5 optimize code of custom error structure 2026-01-20 16:42:54 +08:00
douxu e1886bc347 optimize code of judge wether success or failed return content 2026-01-19 16:39:35 +08:00
douxu ba5e5b3d1c optimzie code of constants package 2026-01-16 17:08:28 +08:00
douxu d3b1f0afbe add code of send all target removed system signal in real time data pull api and fix bug of component attribute query api 2026-01-14 17:32:01 +08:00
douxu cf880279e4 optimize real time data query api 2026-01-13 17:23:47 +08:00
douxu 34684bd5f1 fix: eliminate server error code definition conflicts 2026-01-13 11:45:03 +08:00
douxu d75b9a624c optimize handler of compoent attribute query api 2026-01-13 11:39:00 +08:00
douxu cceffa8219 add handler of compoent attribute query api 2026-01-12 17:21:04 +08:00
douxu d1495b7ab8 optimize code of component attribute update api 2026-01-09 17:26:45 +08:00
douxu 60eab0675e optimize code of component attrbute update api 2026-01-08 17:34:44 +08:00
douxu f47e278f85 fix bug of first subscription different interval measurement data 2026-01-07 17:28:09 +08:00
douxu a31bd6f395 add component attribute group update api and optimzie diagram hash set params 2026-01-05 17:20:41 +08:00
douxu 29d0e06c94 add new structure field of measurement table and add new test data of component table and station table 2026-01-04 17:12:00 +08:00
douxu fcf4ef3f7d fix bug of token4-token7 type attribute recommend api 2025-12-31 16:52:40 +08:00
douxu e74bedd47f fix bug of token4-token7 type recommend api 2025-12-31 16:24:27 +08:00
douxu 36e196bedd add nspath filter of recommend api 2025-12-30 16:35:29 +08:00
douxu 941d521328 fix bug of token6 all search result return case 2025-12-29 15:58:59 +08:00
douxu 7969861746 optimize func of cleanup recommend redis cache 2025-12-26 17:10:22 +08:00
douxu 8e4bdfd0e9 add fullpath completion of component attribute group recommend 2025-12-26 12:00:00 +08:00
douxu 42751c1020 optimize the logic for loading the cache of measurement nodes for traversing components 2025-12-25 17:17:20 +08:00
douxu 51f65500f3 add func of init component measurement recommend 2025-12-24 16:55:55 +08:00
douxu 7ea38615b4 unified caching system collection key names 2025-12-24 09:34:03 +08:00
douxu 6e16a9a39a fix bug of measurement recommend injection func 2025-12-24 09:06:42 +08:00
douxu c29f58f388 fix bug of token4-token7 model config complete op 2025-12-23 16:44:31 +08:00
douxu 8313b16dfe fix bug of measurement recommend of token6 complete op and token7 hierarchy recommend 2025-12-23 15:09:33 +08:00
douxu f45f10507b fix bug of measurement recommend of token6 complete op and token7 hierarchy recommend 2025-12-23 14:52:39 +08:00
douxu 41e2998739 optimize deploy doc and optimize TraverseAttributeGroupTables func 2025-12-22 17:38:15 +08:00
douxu c16680d4c2 fix bug of t4-t7 model combine prefix string func 2025-12-22 10:45:47 +08:00
douxu 9499e579b3 optimize code of sql struct and measurement node recommend api 2025-12-19 17:33:12 +08:00
douxu 70bcb00062 add code of component attribute group store 2025-12-18 17:50:43 +08:00
douxu df77f80475 add func of component param group recommend func 2025-12-17 17:10:47 +08:00
douxu 689d31c246 optimize dockerfile and config generate of docker deploy 2025-12-17 17:09:20 +08:00
douxu 4f5d998659 optimzie rate limit of measurement group api 2025-12-17 14:12:35 +08:00
douxu 252699cb77 fix bug of measurement api and add func of traverse attribute group table 2025-12-16 16:34:19 +08:00
douxu 0add3cf6db feat:implement search support for abbreviated token ranges (e.g., token4-token7) 2025-12-15 16:49:38 +08:00
douxu c92cee9575 optimzie logic of real time data pull api 2025-12-12 17:22:20 +08:00
douxu d4d8c2c975 optimize deploy file 2025-12-12 15:20:50 +08:00
douxu c68cc9436a fix bug of measurement recommend api 2025-12-12 14:19:50 +08:00
douxu 716f56babb optimize docker deploy file 2025-12-12 11:01:18 +08:00
douxu 5021e7fda1 comment out unused kafka code 2025-12-12 10:23:04 +08:00
douxu befb4e8971 fix bug of server deploy 2025-12-11 16:42:25 +08:00
douxu 2a3852a246 add diagram node link process api 2025-12-10 16:12:13 +08:00
douxu f48807e5e5 optimize measurement link api 2025-12-08 17:01:24 +08:00
douxu 3f70be0d1c fix bug of new version of measurement recommend api 2025-12-06 18:32:00 +08:00
douxu a21a423624 optimize func of measurement data injection 2025-12-05 17:12:14 +08:00
douxu 666e1a9289 optimzie shell of measurement recommend api 2025-12-05 16:36:11 +08:00
douxu 46e72ce588 optimize redis test data lua shell 2025-12-04 17:26:35 +08:00
douxu b99c03296a optimize measurement recommend api 2025-12-03 16:55:14 +08:00
douxu 8a4116879b add real time data measurement target update func 2025-12-02 17:26:15 +08:00
douxu 10b91abee9 optimize real time data computing api 2025-12-01 17:22:29 +08:00
douxu 329b4827f8 fix bug of real time data injection shell 2025-12-01 11:27:38 +08:00
douxu a7d894d2de write code for real time data compute shell 2025-11-28 17:17:58 +08:00
douxu fca6905d74 optimize real time data pulling and subscription api 2025-11-27 16:59:03 +08:00
douxu 6f3134b5e9 optimize struct of real time data subscription api and fix bug of real time data pull api 2025-11-26 17:49:24 +08:00
douxu b6e47177fb debugging API using single measurement point subscription case 2025-11-25 16:13:55 +08:00
douxu 5e311a7071 optimize redis real time data injection func 2025-11-21 17:02:07 +08:00
douxu 36f267aec7 add data_injection func to mock real time data in redis 2025-11-20 17:37:12 +08:00
douxu 357d06868e optimize deploy doc 2025-11-19 17:44:45 +08:00
douxu 46ee2a39f4 optimize database struct 2025-11-19 17:44:08 +08:00
douxu dff74222c6 optimize send event of real time data compute api 2025-11-18 16:46:47 +08:00
douxu 9593c77c18 optimize real time data analyze of continuousComputation func 2025-11-17 16:39:26 +08:00
douxu 8cbbfbd695 implement real time data computing api 2025-11-14 16:34:34 +08:00
douxu d434a7737d optimize variable naming and init real time data compute func 2025-11-13 17:29:49 +08:00
douxu 984ee3003d optimize variable naming and api swagger comment 2025-11-13 11:48:26 +08:00
douxu 041d7e5788 optimize variable naming and optimize real time data computing api 2025-11-12 17:34:18 +08:00
douxu b43adf9b67 optimize logger info of real time data subscription api 2025-11-11 17:45:36 +08:00
douxu a82e02126d extracting duplicate code snippets to form a common function 2025-11-11 17:37:06 +08:00
douxu 93d1eea61f optimize sendRealTimeDataStream of real time data pull api 2025-11-11 11:50:25 +08:00
douxu 8d6efe8bb1 optimize real time data pull api 2025-11-10 17:32:18 +08:00
douxu 6de3c5955b optimize real time data pull api 2025-11-08 17:11:45 +08:00
douxu 8090751914 optimize update monitor config func of real time data query api 2025-11-06 17:22:14 +08:00
douxu b75358e676 optimize first create monitor config func of real time data query api 2025-11-05 18:20:54 +08:00
douxu f5ea909120 optimize real time data query api 2025-11-04 17:12:15 +08:00
douxu 594dc68ab1 create api of real time data monitor 2025-11-03 17:35:03 +08:00
douxu 2584f6dacb optimize real time data query api 2025-10-28 16:59:16 +08:00
douxu 09700a86ee optimize real time data receive api 2025-10-27 16:47:04 +08:00
douxu 954203b84d optimize real time data query api 2025-10-24 16:52:14 +08:00
douxu 458f7afdbf optimize doc of measurement recommend api 2025-10-20 17:30:55 +08:00
douxu 54128bedac fix bug of measurement recommend api 2025-10-20 15:06:23 +08:00
douxu 86199269f8 add deploy.md of deploy modelRT project 2025-10-17 17:10:10 +08:00
douxu 14d2a7ff65 update ingore file 2025-10-17 11:16:39 +08:00
douxu 3442984657 Stop tracking config/config.yaml 2025-10-16 17:48:03 +08:00
douxu 68a800ce63 add shield item of config in .gitignore 2025-10-16 17:45:10 +08:00
douxu f0a66263a3 optimize measurement recommend api 2025-10-16 17:18:57 +08:00
douxu 62e897190d optimize code of measurement recommend and logger output 2025-10-15 17:08:32 +08:00
douxu bcf80842b0 fix bug of main.go 2025-10-14 16:12:00 +08:00
douxu 5d02ca9fca add measurement recommend api 2025-09-29 16:37:38 +08:00
douxu 453e6f9851 add GetLongestCommonPrefixLength func 2025-09-27 15:56:46 +08:00
douxu 0d7890f6aa optimize code of RedissearchRecommend func 2025-09-26 16:47:40 +08:00
douxu 5f5eb22b39 optimize code of redis search query 2025-09-25 16:39:45 +08:00
douxu 151f7f22c5 fix bug of grid condition process 2025-09-24 17:26:46 +08:00
douxu 4ee836c70f add redis search code of query model object 2025-09-24 16:43:11 +08:00
douxu 7d8c442f9f optimize cache item of kafka monitor topic 2025-09-19 16:15:59 +08:00
douxu 51e8a677ca optimize real time data model 2025-09-18 16:53:25 +08:00
douxu 71366828f4 add real time cache 2025-09-17 16:41:30 +08:00
douxu a9532debe9 add client token of redis operation 2025-09-16 15:50:22 +08:00
douxu 0c09e7bd25 add func of generate service token 2025-09-12 17:12:02 +08:00
douxu e670720a96 optimize query measurement api 2025-09-10 17:03:33 +08:00
douxu 55a606a3f3 add redis zset structure 2025-09-09 16:02:36 +08:00
douxu 3120cfc3a5 add code of init measurement api 2025-09-05 17:10:34 +08:00
douxu 727b9a98ec add CL3611 and power104 proto code 2025-09-02 16:38:03 +08:00
douxu 3aab2c8a37 add telemetry machine code 2025-09-01 16:15:30 +08:00
douxu 37a1ccaadc modify the query conditions to tagname and fix building bug 2025-08-29 15:24:21 +08:00
douxu 858d02f955 add attr handlers 2025-08-27 17:33:10 +08:00
douxu 349d3398b2 add TAGNAME column in table grid、zone、station 2025-08-26 17:09:49 +08:00
douxu f8f83c38d9 add del func of redis string type 2025-08-21 17:04:10 +08:00
douxu 3fa0a8c6ca optimize covert func of component info 2025-08-18 17:02:38 +08:00
douxu f4ab4e4ea4 refactor(orm/circuit_diagram_component): fix compilation issues caused by structure field changes
http://server.baseware.net:9000/project/datart/task/47
2025-08-15 16:25:48 +08:00
douxu f7a1ea2540 feat(Implement-measurement-and-bay-structure-by-go): Implement measurement and bay structure by go
http://server.baseware.net:9000/project/datart/us/38?milestone=13
2025-08-13 16:24:29 +08:00
douxu 49fbd04644 optimize component struct http://server.baseware.net:9000/project/datart/task/47 2025-08-12 17:19:38 +08:00
douxu 426409ed91 feat(redis-string-class): 1. data token parse 2. redis string get 3. redis string set 4. redis string incr 2025-08-08 15:27:51 +08:00
douxu 3e833909d1 feat(token-parse): 1. add func of parse token 2.add func of query grid、zone、station、component 3.modify package of constant
http://server.baseware.net:9000/project/datart/task/22
2025-08-05 15:20:07 +08:00
douxu 1b6211b34b init attribute key struct 2025-07-31 16:52:21 +08:00
douxu 8520790989 merge feature-cgo branch 2025-07-31 10:48:56 +08:00
douxu 65e0c5da92 optimize modelRT routing structure 2025-07-31 10:31:26 +08:00
douxu a70f77464c refactor(gorm-logger): 1. add gorm logger in gorm config 2.use faced func in gorm logger 2025-06-23 16:00:48 +08:00
douxu b7009c351e refactor(errer-package): optimize package name of constant
1.optimize package name of constant
2025-06-13 15:34:49 +08:00
douxu 3fb78b8195 refactor(common/error): optimize error struct
add msg 、cause and occurred field into error struct for logging detail wrong info
2025-06-10 16:29:52 +08:00
douxu f6cee44f84 refactor(handler): use logger package log func replace zap log func 2025-06-06 16:41:52 +08:00
douxu 9aa5b0dcc6 refactor(logger): 1. optimize the logger log module design and add link tracking related designs
2. add logger facade functions to simplify the use of alarm functions
2025-06-05 15:56:40 +08:00
douxu d2196701ec fix(multi-branch-tree-of-topologic): add global tree variable and fix topologic info processing bug 2025-05-20 16:08:17 +08:00
douxu 237c7ecf69 refactor(optimize storage struct): optimize topologic storage struct
1.optimize uuid start and end node of uuid nil node str
        2.optimize topologic query sql of init topologic in memory
2025-05-16 14:24:55 +08:00
douxu daf30766ba refactor(topologic storage struct): refactor topologic storage struct
1.refactor topologic storage struct by multi branch tree
        2.add new func of build multi branch tree
        3.modify sql of query topologic from db
        4.delete page id field from topologic struct
2025-05-13 16:34:25 +08:00
douxu af0cfce78f refactor(component set): add the return value of component query func
1.add the return value of topologic query func
refactor(diagram set): add the return value of topologic query func
    1.add the return value of topologic query func
    2.modify internalLockLeaseTime params unit
    3.modify refreshTime params unit
    4.modify lua script
feat(bay info): init interval information constructor
    1.init interval information constructor
test(sql case): add new pg sql case
    1. add new pg sql case
2025-04-30 16:44:58 +08:00
douxu 23110cbba9 refactor(locker params): modify the locker lease time unit
1.modify the locker lease time unit
    2.modify internalLockLeaseTime params unit
    3.modify refreshTime params unit
    4.modify lua script
2025-04-18 15:17:51 +08:00
douxu 310f4c043c refactor(locker params): optimize of redis locker params
1.optimize locker unexport params
    2.optimize locker logging output
    3.optimize locker init process
2025-04-18 14:02:03 +08:00
douxu d27a9bbafa refactor(locker script): optimize of redis locker script
1.optimize locker script of un read lock
fix(locker_refresh): fix bug of locker refresh
    1.fix bug of locker refresh when locker lock success after wait
test(lock_case): add new redis locker case
    1.add new redis locker case
2025-04-11 16:36:54 +08:00
douxu fda43c65d2 optimize the subscription process of redis locker 2025-04-07 16:49:06 +08:00
douxu e4d45016f2 fix bug of redis read and write lock conflict test of rwlocker 2025-04-03 17:22:40 +08:00
douxu b27b999873 add redis read and write lock conflict test of rwlocker 2025-04-02 16:47:51 +08:00
douxu ae064236c7 add redis lock refresh test of rwlocker 2025-04-01 16:20:55 +08:00
douxu 182f8ac634 add redis lock test of rwlocker 2025-03-28 16:48:56 +08:00
douxu 1cf6137f9f refactor(redis hash): fix bug of redis hash
1.optimize RedisOption struct
fix(uuid): replace uuid mod dependencies
    1.replace uuid mod dependencies
fix(config): add new redis config
    1.add new redis config
2025-03-25 17:00:09 +08:00
douxu 2f1b9d26b8 feat(redis hash): fix bug of redis hash
1.add redis hash init func
2.replace redis model version in go mod
3.add context parameter in redis exec statement

feat(redis set): add new test of RLock and WLock

1.add redis set init func
2.replace redis model version in go mod
3.add context parameter in redis exec statement

fix(logger): add new test of RLock and WLock

1.add compress parameter
2.optimize initLogger function
2025-03-24 16:37:43 +08:00
douxu 25a55b94e8 fix bug of structure pointer func 2025-03-21 16:38:47 +08:00
douxu 3d79993de2 init redis hash and redis set struct with rwlocker 2025-03-21 16:21:33 +08:00
douxu 13809b6a31 add new test of RWLock 2025-03-17 17:19:46 +08:00
douxu 7b282c49f7 fix(lock script): fix bug of lock srcipt
1.fix bug of reset time wrong with ReentrantRLock in RLock script
2.fix bug of write lock failing to lock for the first time
3.fix bug of unlock failed with ReentrantWLock in UnWLock script

test(lock script): add new test of RLock and WLock

1.add refresh test of RLock
2.add new test of ReentrantWLock#
2025-03-13 16:51:50 +08:00
douxu d962462c42 add rlock lock&unlock test and rlock reentrant test 2025-03-12 16:24:28 +08:00
douxu 9381e547b6 add ignore item in the .gitignore file 2025-03-11 15:53:53 +08:00
douxu d404dc4335 fix bug of lock script and refresh script in redission rw lock 2025-03-11 15:35:15 +08:00
douxu 7e3d94db4b optimize structer of redisLock and acquisition statements of lock 2025-03-07 16:16:26 +08:00
douxu 09225fc96f optimize structer of redisRWLock and acquisition statements of write lock 2025-03-06 16:35:36 +08:00
douxu c08f4b91f5 optimize read lock acquisition statements of redisRWLock 2025-03-05 16:42:59 +08:00
douxu b894d61b54 init UnRLockScript 、WLockScript、UnWLockScript、RefreshLockScript script 2025-03-04 16:33:35 +08:00
douxu 2c2c2811a7 init read lock script of distributedlock 2025-02-28 16:00:16 +08:00
douxu 1899546ba4 init code of share memory 2025-02-21 15:27:25 +08:00
douxu 4975c6a5c1 add link for readme file 2025-02-13 14:46:44 +08:00
176 changed files with 13892 additions and 1282 deletions

6
.gitignore vendored
View File

@ -21,3 +21,9 @@
# Go workspace file
go.work
.vscode
.idea
# Shield all log files in the log folder
/log/
# Shield config files in the configs folder
/configs/**/*.yaml

View File

@ -1,2 +1,3 @@
# ModelRT
[![Build Status](http://192.168.46.100:4080/api/badges/CL-Softwares/modelRT/status.svg)](http://192.168.46.100:4080/CL-Softwares/modelRT)

View File

@ -5,7 +5,7 @@ import (
"sort"
"sync"
"modelRT/constant"
"modelRT/constants"
)
var (
@ -16,17 +16,17 @@ var (
// Event define alert event struct
type Event struct {
ComponentID int64
AnchorName string
Level constant.AlertLevel
Message string
StartTime int64
ComponentUUID string
AnchorName string
Level constants.AlertLevel
Message string
StartTime int64
}
// EventManager define store and manager alert event struct
type EventManager struct {
mu sync.RWMutex
events map[constant.AlertLevel][]Event
events map[constants.AlertLevel][]Event
}
// EventSet define alert event set implement sort.Interface
@ -53,7 +53,7 @@ func (am *EventManager) AddEvent(event Event) {
}
// GetEventsByLevel define get alert event by alert level
func (am *EventManager) GetEventsByLevel(level constant.AlertLevel) []Event {
func (am *EventManager) GetEventsByLevel(level constants.AlertLevel) []Event {
am.mu.Lock()
defer am.mu.Unlock()
@ -61,7 +61,7 @@ func (am *EventManager) GetEventsByLevel(level constant.AlertLevel) []Event {
}
// GetRangeEventsByLevel define get range alert event by alert level
func (am *EventManager) GetRangeEventsByLevel(targetLevel constant.AlertLevel) []Event {
func (am *EventManager) GetRangeEventsByLevel(targetLevel constants.AlertLevel) []Event {
var targetEvents []Event
am.mu.Lock()
@ -79,7 +79,7 @@ func (am *EventManager) GetRangeEventsByLevel(targetLevel constant.AlertLevel) [
// InitAlertEventManager define new alert event manager
func InitAlertEventManager() *EventManager {
return &EventManager{
events: make(map[constant.AlertLevel][]Event),
events: make(map[constants.AlertLevel][]Event),
}
}

View File

@ -0,0 +1,43 @@
// Package errcode provides internal error definition and business error definition
package errcode
var (
// ErrProcessSuccess define variable to indicates request process success
ErrProcessSuccess = newError(20000, "request process success")
// ErrInvalidToken define variable to provided token does not conform to the expected format (e.g., missing segments)
ErrInvalidToken = newError(40001, "invalid token format")
// ErrCrossToken define variable to occurs when an update attempt involves multiple components, which is restricted by business logic
ErrCrossToken = newError(40002, "cross-component update not allowed")
// ErrRetrieveFailed define variable to indicates a failure in fetching the project-to-table name mapping from the configuration.
ErrRetrieveFailed = newError(40003, "retrieve table mapping failed")
// ErrFoundTargetFailed define variable to returned when the specific database table cannot be identified using the provided token info.
ErrFoundTargetFailed = newError(40004, "found target table by token failed")
// ErrDBQueryFailed define variable to represents a generic failure during a PostgreSQL SELECT or SCAN operation.
ErrDBQueryFailed = newError(50001, "query postgres database data failed")
// ErrDBUpdateFailed define variable to represents a failure during a PostgreSQL UPDATE or SAVE operation.
ErrDBUpdateFailed = newError(50002, "update postgres database data failed")
// ErrDBzeroAffectedRows define variable to occurs when a database operation executes successfully but modifies no records.
ErrDBzeroAffectedRows = newError(50003, "zero affected rows")
// ErrBeginTxFailed indicates that the system failed to start a new PostgreSQL transaction.
ErrBeginTxFailed = newError(50004, "begin postgres transaction failed")
// ErrCommitTxFailed indicates that the PostgreSQL transaction could not be committed successfully.
ErrCommitTxFailed = newError(50005, "postgres database transaction commit failed")
// ErrCachedQueryFailed define variable to indicates an error occurred while attempting to fetch data from the Redis cache.
ErrCachedQueryFailed = newError(60001, "query redis cached data failed")
// ErrCacheSyncWarn define variable to partial success state: the database was updated, but the subsequent Redis cache refresh failed.
ErrCacheSyncWarn = newError(60002, "postgres database updated, but cache sync failed")
// ErrCacheQueryFailed define variable to indicates query cached data by token failed.
ErrCacheQueryFailed = newError(60003, "query cached data by token failed")
)

View File

@ -0,0 +1,22 @@
// Package errcode provides internal error definition and business error definition
package errcode
import "errors"
// Database layer error
var (
// ErrUUIDChangeType define error of check uuid from value failed in uuid from change type
ErrUUIDChangeType = errors.New("undefined uuid change type")
// ErrUpdateRowZero define error of update affected row zero
ErrUpdateRowZero = errors.New("update affected rows is zero")
// ErrDeleteRowZero define error of delete affected row zero
ErrDeleteRowZero = errors.New("delete affected rows is zero")
// ErrQueryRowZero define error of query affected row zero
ErrQueryRowZero = errors.New("query affected rows is zero")
// ErrInsertRowUnexpected define error of insert affected row not reach expected number
ErrInsertRowUnexpected = errors.New("the number of inserted data rows don't reach the expected value")
)

162
common/errcode/error.go Normal file
View File

@ -0,0 +1,162 @@
// Package errcode provides internal error definition and business error definition
package errcode
import (
"encoding/json"
"fmt"
"path"
"runtime"
)
var codes = map[int]struct{}{}
// AppError define struct of internal error. occurred field records the location where the error is triggered
type AppError struct {
code int
msg string
cause error
occurred string
}
func (e *AppError) Error() string {
if e == nil {
return ""
}
errBytes, err := json.Marshal(e.toStructuredError())
if err != nil {
return fmt.Sprintf("Error() is error: json marshal error: %v", err)
}
return string(errBytes)
}
func (e *AppError) String() string {
return e.Error()
}
// Code define func return error code
func (e *AppError) Code() int {
return e.code
}
// Msg define func return error msg
func (e *AppError) Msg() string {
return e.msg
}
// Cause define func return base error
func (e *AppError) Cause() error {
return e.cause
}
// WithCause define func return top level predefined errors,where the cause field contains the underlying base error
func (e *AppError) WithCause(err error) *AppError {
newErr := e.Clone()
newErr.cause = err
newErr.occurred = getAppErrOccurredInfo()
return newErr
}
// Wrap define func packaging information and errors returned by the underlying logic
func Wrap(msg string, err error) *AppError {
if err == nil {
return nil
}
appErr := &AppError{code: -1, msg: msg, cause: err}
appErr.occurred = getAppErrOccurredInfo()
return appErr
}
// UnWrap define func return the error wrapped in structure
func (e *AppError) UnWrap() error {
return e.cause
}
// Is define func return result of whether any error in err's tree matches target. implemented to support errors.Is(err, target)
func (e *AppError) Is(target error) bool {
targetErr, ok := target.(*AppError)
if !ok {
return false
}
return targetErr.Code() == e.Code()
}
// As define func return result of whether any error in err's tree matches target. implemented to support errors.As(err, target)
func (e *AppError) As(target any) bool {
t, ok := target.(**AppError)
if !ok {
return false
}
*t = e
return true
}
// Clone define func return a new AppError with source AppError's code, msg, cause, occurred
func (e *AppError) Clone() *AppError {
return &AppError{
code: e.code,
msg: e.msg,
cause: e.cause,
occurred: e.occurred,
}
}
func newError(code int, msg string) *AppError {
if code > -1 {
if _, duplicated := codes[code]; duplicated {
panic(fmt.Sprintf("预定义错误码 %d 不能重复, 请检查后更换", code))
}
codes[code] = struct{}{}
}
return &AppError{code: code, msg: msg}
}
// getAppErrOccurredInfo define func return the location where the error is triggered
func getAppErrOccurredInfo() string {
pc, file, line, ok := runtime.Caller(2)
if !ok {
return ""
}
file = path.Base(file)
funcName := runtime.FuncForPC(pc).Name()
triggerInfo := fmt.Sprintf("func: %s, file: %s, line: %d", funcName, file, line)
return triggerInfo
}
// AppendMsg define func append a message to the existing error message
func (e *AppError) AppendMsg(msg string) *AppError {
n := e.Clone()
n.msg = fmt.Sprintf("%s, %s", e.msg, msg)
return n
}
// SetMsg define func set error message into specify field
func (e *AppError) SetMsg(msg string) *AppError {
n := e.Clone()
n.msg = msg
return n
}
type formattedErr struct {
Code int `json:"code"`
Msg string `json:"msg"`
Cause interface{} `json:"cause"`
Occurred string `json:"occurred"`
}
// toStructuredError define func convert AppError to structured error for better readability
func (e *AppError) toStructuredError() *formattedErr {
fe := new(formattedErr)
fe.Code = e.Code()
fe.Msg = e.Msg()
fe.Occurred = e.occurred
if e.cause != nil {
if appErr, ok := e.cause.(*AppError); ok {
fe.Cause = appErr.toStructuredError()
} else {
fe.Cause = e.cause.Error()
}
}
return fe
}

View File

@ -2,7 +2,7 @@
package config
import (
"modelRT/constant"
"modelRT/constants"
)
// AnchorParamListConfig define anchor params list config struct
@ -15,7 +15,7 @@ type AnchorParamListConfig struct {
// AnchorParamBaseConfig define anchor params base config struct
type AnchorParamBaseConfig struct {
ComponentID int64 // component表 ID
ComponentUUID string // componentUUID
AnchorName string // 锚定参量名称
CompareValUpperLimit float64 // 比较值上限
CompareValLowerLimit float64 // 比较值下限
@ -43,7 +43,7 @@ var baseCurrentFunc = func(archorValue float64, args ...float64) float64 {
// SelectAnchorCalculateFuncAndParams define select anchor func and anchor calculate value by component type 、 anchor name and component data
func SelectAnchorCalculateFuncAndParams(componentType int, anchorName string, componentData map[string]interface{}) (func(archorValue float64, args ...float64) float64, []float64) {
if componentType == constant.DemoType {
if componentType == constants.DemoType {
if anchorName == "voltage" {
resistance := componentData["resistance"].(float64)
return baseVoltageFunc, []float64{resistance}

View File

@ -7,24 +7,31 @@ import (
"github.com/spf13/viper"
)
// BaseConfig define config stuct of base params config
// BaseConfig define config struct of base params config
type BaseConfig struct {
GridID int64 `mapstructure:"grid_id"`
ZoneID int64 `mapstructure:"zone_id"`
StationID int64 `mapstructure:"station_id"`
}
// KafkaConfig define config stuct of kafka config
type KafkaConfig struct {
Servers string `mapstructure:"Servers"`
GroupID string `mapstructure:"group_id"`
Topic string `mapstructure:"topic"`
AutoOffsetReset string `mapstructure:"auto_offset_reset"`
EnableAutoCommit string `mapstructure:"enable_auto_commit"`
ReadMessageTimeDuration string `mapstructure:"read_message_time_duration"`
// ServiceConfig define config struct of service config
type ServiceConfig struct {
ServiceAddr string `mapstructure:"service_addr"`
ServiceName string `mapstructure:"service_name"`
SecretKey string `mapstructure:"secret_key"`
}
// PostgresConfig define config stuct of postgres config
// KafkaConfig define config struct of kafka config
type KafkaConfig struct {
Servers string `mapstructure:"Servers"`
GroupID string `mapstructure:"group_id"`
Topic string `mapstructure:"topic"`
AutoOffsetReset string `mapstructure:"auto_offset_reset"`
EnableAutoCommit string `mapstructure:"enable_auto_commit"`
ReadMessageTimeDuration float32 `mapstructure:"read_message_time_duration"`
}
// PostgresConfig define config struct of postgres config
type PostgresConfig struct {
Port int `mapstructure:"port"`
Host string `mapstructure:"host"`
@ -33,7 +40,7 @@ type PostgresConfig struct {
Password string `mapstructure:"password"`
}
// LoggerConfig define config stuct of zap logger config
// LoggerConfig define config struct of zap logger config
type LoggerConfig struct {
Mode string `mapstructure:"mode"`
Level string `mapstructure:"level"`
@ -41,15 +48,25 @@ type LoggerConfig struct {
MaxSize int `mapstructure:"maxsize"`
MaxBackups int `mapstructure:"maxbackups"`
MaxAge int `mapstructure:"maxage"`
Compress bool `mapstructure:"compress"`
}
// AntsConfig define config stuct of ants pool config
// RedisConfig define config struct of redis config
type RedisConfig struct {
Addr string `mapstructure:"addr"`
Password string `mapstructure:"password"`
DB int `mapstructure:"db"`
PoolSize int `mapstructure:"poolsize"`
Timeout int `mapstructure:"timeout"`
}
// AntsConfig define config struct of ants pool config
type AntsConfig struct {
ParseConcurrentQuantity int `mapstructure:"parse_concurrent_quantity"` // parse comtrade file concurrent quantity
RTDReceiveConcurrentQuantity int `mapstructure:"rtd_receive_concurrent_quantity"` // polling real time data concurrent quantity
}
// DataRTConfig define config stuct of data runtime server api config
// DataRTConfig define config struct of data runtime server api config
type DataRTConfig struct {
Host string `mapstructure:"host"`
Port int64 `mapstructure:"port"`
@ -57,15 +74,18 @@ type DataRTConfig struct {
Method string `mapstructure:"polling_api_method"`
}
// ModelRTConfig define config stuct of model runtime server
// ModelRTConfig define config struct of model runtime server
type ModelRTConfig struct {
BaseConfig `mapstructure:"base"`
PostgresConfig `mapstructure:"postgres"`
KafkaConfig `mapstructure:"kafka"`
LoggerConfig `mapstructure:"logger"`
AntsConfig `mapstructure:"ants"`
DataRTConfig `mapstructure:"dataRT"`
PostgresDBURI string `mapstructure:"-"`
BaseConfig `mapstructure:"base"`
ServiceConfig `mapstructure:"service"`
PostgresConfig `mapstructure:"postgres"`
KafkaConfig `mapstructure:"kafka"`
LoggerConfig `mapstructure:"logger"`
AntsConfig `mapstructure:"ants"`
DataRTConfig `mapstructure:"dataRT"`
LockerRedisConfig RedisConfig `mapstructure:"locker_redis"`
StorageRedisConfig RedisConfig `mapstructure:"storage_redis"`
PostgresDBURI string `mapstructure:"-"`
}
// ReadAndInitConfig return modelRT project config struct

View File

@ -1,49 +0,0 @@
postgres:
host: "192.168.2.103"
port: 5432
database: "demo"
user: "postgres"
password: "coslight"
kafka:
servers: "localhost:9092"
port: 9092
group_id: "modelRT"
topic: ""
auto_offset_reset: "earliest"
enable_auto_commit: "false"
read_message_time_duration: ”0.5s"
# influxdb:
# host: "localhost"
# port: "8086"
# token: "lCuiQ316qlly3iFeoi1EUokPJ0XxW-5lnG-3rXsKaaZSjfuxO5EaZfFdrNGM7Zlrdk1PrN_7TOsM_SCu9Onyew=="
# org: "coslight"
# bucket: "wave_record"
# zap logger config
logger:
mode: "development"
level: "debug"
filepath: "/home/douxu/log/modelRT-%s.log"
maxsize: 1
maxbackups: 5
maxage: 30
# ants config
ants:
parse_concurrent_quantity: 10
rtd_receive_concurrent_quantity: 10
# modelRT base config
base:
grid_id: 1
zone_id: 1
station_id: 1
# dataRT api config
dataRT:
host: "http://127.0.0.1"
port: 8888
polling_api: "datart/getPointData"
polling_api_method: "GET"

View File

@ -9,6 +9,6 @@ import (
type ModelParseConfig struct {
ComponentInfo orm.Component
Context context.Context
Ctx context.Context
AnchorChan chan AnchorParamConfig
}

View File

@ -1,35 +0,0 @@
package constant
import "errors"
// ErrUUIDChangeType define error of check uuid from value failed in uuid from change type
var ErrUUIDChangeType = errors.New("undefined uuid change type")
// ErrUpdateRowZero define error of update affected row zero
var ErrUpdateRowZero = errors.New("update affected rows is zero")
// ErrDeleteRowZero define error of delete affected row zero
var ErrDeleteRowZero = errors.New("delete affected rows is zero")
// ErrQueryRowZero define error of query affected row zero
var ErrQueryRowZero = errors.New("query affected rows is zero")
// ErrInsertRowUnexpected define error of insert affected row not reach expected number
var ErrInsertRowUnexpected = errors.New("the number of inserted data rows don't reach the expected value")
var (
// ErrUUIDFromCheckT1 define error of check uuid from value failed in uuid from change type
ErrUUIDFromCheckT1 = errors.New("in uuid from change type, value of new uuid_from is equal value of old uuid_from")
// ErrUUIDToCheckT1 define error of check uuid to value failed in uuid from change type
ErrUUIDToCheckT1 = errors.New("in uuid from change type, value of new uuid_to is not equal value of old uuid_to")
// ErrUUIDFromCheckT2 define error of check uuid from value failed in uuid to change type
ErrUUIDFromCheckT2 = errors.New("in uuid to change type, value of new uuid_from is not equal value of old uuid_from")
// ErrUUIDToCheckT2 define error of check uuid to value failed in uuid to change type
ErrUUIDToCheckT2 = errors.New("in uuid to change type, value of new uuid_to is equal value of old uuid_to")
// ErrUUIDFromCheckT3 define error of check uuid from value failed in uuid add change type
ErrUUIDFromCheckT3 = errors.New("in uuid add change type, value of old uuid_from is not empty")
// ErrUUIDToCheckT3 define error of check uuid to value failed in uuid add change type
ErrUUIDToCheckT3 = errors.New("in uuid add change type, value of old uuid_to is not empty")
)

View File

@ -1,12 +0,0 @@
package constant
const (
// UUIDErrChangeType 拓扑信息错误改变类型
UUIDErrChangeType = iota
// UUIDFromChangeType 拓扑信息父节点改变类型
UUIDFromChangeType
// UUIDToChangeType 拓扑信息子节点改变类型
UUIDToChangeType
// UUIDAddChangeType 拓扑信息新增类型
UUIDAddChangeType
)

View File

@ -1,5 +1,5 @@
// Package constant define alert level constant
package constant
// Package constants define constant variable
package constants
// AlertLevel define alert level type
type AlertLevel int

View File

@ -0,0 +1,17 @@
// Package constants define constant variable
package constants
const (
// CodeSuccess define constant to indicates that the API was successfully processed
CodeSuccess = 20000
// CodeInvalidParamFailed define constant to indicates request parameter parsing failed
CodeInvalidParamFailed = 40001
// CodeDBQueryFailed define constant to indicates database query operation failed
CodeDBQueryFailed = 50001
// CodeDBUpdateailed define constant to indicates database update operation failed
CodeDBUpdateailed = 50002
// CodeRedisQueryFailed define constant to indicates redis query operation failed
CodeRedisQueryFailed = 60001
// CodeRedisUpdateFailed define constant to indicates redis update operation failed
CodeRedisUpdateFailed = 60002
)

11
constants/attrs_key.go Normal file
View File

@ -0,0 +1,11 @@
// Package constants define constant variable
package constants
const (
// ShortAttrKeyLenth define short attribute key length
ShortAttrKeyLenth int = 4
// LongAttrKeyLenth define long attribute key length
LongAttrKeyLenth int = 7
)
// component、base_extend、rated、setup、model、stable、bay、craft、integrity、behavior

17
constants/buffer.go Normal file
View File

@ -0,0 +1,17 @@
// Package constants define constant variable
package constants
import "time"
const (
// FanInChanMaxSize define maximum buffer capacity by fanChannel
FanInChanMaxSize = 10000
// SendMaxBatchSize define maximum buffer capacity
// TODO 后续优化批处理大小
SendMaxBatchSize = 100
// SendChanBufferSize define maximum buffer capacity by channel
SendChanBufferSize = 100
// SendMaxBatchInterval define maximum aggregate latency
SendMaxBatchInterval = 20 * time.Millisecond
)

View File

@ -1,4 +1,5 @@
package constant
// Package constants define constant variable
package constants
const (
// 母线服役属性

7
constants/context.go Normal file
View File

@ -0,0 +1,7 @@
// Package constants define constant variable
package constants
type contextKey string
// MeasurementUUIDKey define measurement uuid key into context
const MeasurementUUIDKey contextKey = "measurement_uuid"

View File

@ -1,5 +1,5 @@
// Package constant define constant value
package constant
// Package constants define constant variable
package constants
const (
// NullableType 空类型类型

57
constants/error.go Normal file
View File

@ -0,0 +1,57 @@
// Package constants define constant variable
package constants
import "errors"
var (
// ErrUUIDFromCheckT1 define error of check uuid from value failed in uuid from change type
ErrUUIDFromCheckT1 = errors.New("in uuid from change type, value of new uuid_from is equal value of old uuid_from")
// ErrUUIDToCheckT1 define error of check uuid to value failed in uuid from change type
ErrUUIDToCheckT1 = errors.New("in uuid from change type, value of new uuid_to is not equal value of old uuid_to")
// ErrUUIDFromCheckT2 define error of check uuid from value failed in uuid to change type
ErrUUIDFromCheckT2 = errors.New("in uuid to change type, value of new uuid_from is not equal value of old uuid_from")
// ErrUUIDToCheckT2 define error of check uuid to value failed in uuid to change type
ErrUUIDToCheckT2 = errors.New("in uuid to change type, value of new uuid_to is equal value of old uuid_to")
// ErrUUIDFromCheckT3 define error of check uuid from value failed in uuid add change type
ErrUUIDFromCheckT3 = errors.New("in uuid add change type, value of old uuid_from is not empty")
// ErrUUIDToCheckT3 define error of check uuid to value failed in uuid add change type
ErrUUIDToCheckT3 = errors.New("in uuid add change type, value of old uuid_to is not empty")
)
var (
// ErrInvalidAddressType define error of invalid io address type
ErrInvalidAddressType = errors.New("invalid address type")
// ErrUnknownDataType define error of unknown measurement data source type
ErrUnknownDataType = errors.New("unknown data type")
// ErrExceedsLimitType define error of channel number exceeds limit for telemetry
ErrExceedsLimitType = errors.New("channel number exceeds limit for Telemetry")
// ErrUnsupportedChannelPrefixType define error of unsupported channel prefix
ErrUnsupportedChannelPrefixType = errors.New("unsupported channel prefix")
)
var (
// ErrFormatUUID define error of format uuid string to uuid.UUID type failed
ErrFormatUUID = errors.New("format string type to uuid.UUID type failed")
// ErrFormatCache define error of format cache with any type to cacheItem type failed
ErrFormatCache = errors.New("format any teype to cache item type failed")
)
// ErrGetClientToken define error of can not get client_token from context
var ErrGetClientToken = errors.New("can not get client_token from context")
// ErrQueryComponentByUUID define error of query component from db by uuid failed
var ErrQueryComponentByUUID = errors.New("query component from db failed by uuid")
// ErrChanIsNil define error of channel is nil
var ErrChanIsNil = errors.New("this channel is nil")
// ErrConcurrentModify define error of concurrent modification detected
var ErrConcurrentModify = errors.New("existed concurrent modification risk")
// ErrUnsupportedSubAction define error of unsupported real time data subscription action
var ErrUnsupportedSubAction = errors.New("unsupported real time data subscription action")
// ErrUnsupportedLinkAction define error of unsupported measurement link process action
var ErrUnsupportedLinkAction = errors.New("unsupported rmeasurement link process action")

31
constants/event.go Normal file
View File

@ -0,0 +1,31 @@
// Package constants define constant variable
package constants
const (
// TIBreachTriggerType define out of bounds type constant
TIBreachTriggerType = "trigger"
)
const (
// TelemetryUpLimit define telemetry upper limit
TelemetryUpLimit = "up"
// TelemetryUpUpLimit define telemetry upper upper limit
TelemetryUpUpLimit = "upup"
// TelemetryDownLimit define telemetry limit
TelemetryDownLimit = "down"
// TelemetryDownDownLimit define telemetry lower lower limit
TelemetryDownDownLimit = "downdown"
)
const (
// TelesignalRaising define telesignal raising edge
TelesignalRaising = "raising"
// TelesignalFalling define telesignal falling edge
TelesignalFalling = "falling"
)
const (
// MinBreachCount define min breach count of real time data
MinBreachCount = 10
)

View File

@ -1,9 +1,11 @@
// Package constant define constant value
package constant
// Package constants define constant variable
package constants
const (
// DevelopmentLogMode define development operator environment for modelRT project
DevelopmentLogMode = "development"
// DebugLogMode define debug operator environment for modelRT project
DebugLogMode = "debug"
// ProductionLogMode define production operator environment for modelRT project
ProductionLogMode = "production"
)

37
constants/measurement.go Normal file
View File

@ -0,0 +1,37 @@
// Package constants define constant variable
package constants
const (
// DataSourceTypeCL3611 define CL3611 type
DataSourceTypeCL3611 = 1
// DataSourceTypePower104 define electricity 104 protocol type
DataSourceTypePower104 = 2
)
// channel name prefix
const (
ChannelPrefixTelemetry = "Telemetry"
ChannelPrefixTelesignal = "Telesignal"
ChannelPrefixTelecommand = "Telecommand"
ChannelPrefixTeleadjusting = "Teleadjusting"
ChannelPrefixSetpoints = "Setpoints"
)
// channel name suffix
const (
ChannelSuffixP = "P"
ChannelSuffixQ = "Q"
ChannelSuffixS = "S"
ChannelSuffixPS = "PS"
ChannelSuffixF = "F"
ChannelSuffixDeltaF = "deltaF"
ChannelSuffixUAB = "UAB"
ChannelSuffixUBC = "UBC"
ChannelSuffixUCA = "UCA"
)
const (
// MaxIdentifyHierarchy define max data indentify syntax hierarchy
MaxIdentifyHierarchy = 7
IdentifyHierarchy = 4
)

104
constants/recommend_keys.go Normal file
View File

@ -0,0 +1,104 @@
// Package constants define constant variable
package constants
const (
// DefaultScore define the default score for redissearch suggestion
DefaultScore = 1.0
)
const (
// RedisAllGridSetKey define redis set key which store all grid tag keys
RedisAllGridSetKey = "grid_tag_keys"
// RedisAllZoneSetKey define redis set key which store all zone tag keys
RedisAllZoneSetKey = "zone_tag_keys"
// RedisAllStationSetKey define redis set key which store all station tag keys
RedisAllStationSetKey = "station_tag_keys"
// RedisAllCompNSPathSetKey define redis set key which store all component nspath keys
RedisAllCompNSPathSetKey = "component_nspath_keys"
// RedisAllCompTagSetKey define redis set key which store all component tag keys
RedisAllCompTagSetKey = "component_tag_keys"
// RedisAllConfigSetKey define redis set key which store all config keys
RedisAllConfigSetKey = "config_keys"
// RedisAllMeasTagSetKey define redis set key which store all measurement tag keys
RedisAllMeasTagSetKey = "measurement_tag_keys"
// RedisSpecGridZoneSetKey define redis set key which store all zone tag keys under specific grid
RedisSpecGridZoneSetKey = "%s_zone_tag_keys"
// RedisSpecZoneStationSetKey define redis set key which store all station tag keys under specific zone
RedisSpecZoneStationSetKey = "%s_station_tag_keys"
// RedisSpecStationCompNSPATHSetKey define redis set key which store all component nspath keys under specific station
RedisSpecStationCompNSPATHSetKey = "%s_component_nspath_keys"
// RedisSpecCompNSPathCompTagSetKey define redis set key which store all component tag keys under specific component nspath
RedisSpecCompNSPathCompTagSetKey = "%s_component_tag_keys"
// RedisSpecCompTagMeasSetKey define redis set key which store all measurement tag keys under specific component tag
RedisSpecCompTagMeasSetKey = "%s_measurement_tag_keys"
)
const (
// SearchLinkAddAction define search link add action
SearchLinkAddAction = "add"
// SearchLinkDelAction define search link del action
SearchLinkDelAction = "del"
)
// RecommendHierarchyType define the hierarchy levels used for redis recommend search
type RecommendHierarchyType int
const (
// GridRecommendHierarchyType define grid hierarch for redis recommend search
GridRecommendHierarchyType RecommendHierarchyType = iota + 1
// ZoneRecommendHierarchyType define zone hierarch for redis recommend search
ZoneRecommendHierarchyType
// StationRecommendHierarchyType define station hierarch for redis recommend search
StationRecommendHierarchyType
// CompNSPathRecommendHierarchyType define component nspath hierarch for redis recommend search
CompNSPathRecommendHierarchyType
// CompTagRecommendHierarchyType define component tag hierarch for redis recommend search
CompTagRecommendHierarchyType
// ConfigRecommendHierarchyType define config hierarch for redis recommend search
ConfigRecommendHierarchyType
// MeasTagRecommendHierarchyType define measurement tag hierarch for redis recommend search
MeasTagRecommendHierarchyType
)
// String implements fmt.Stringer interface and returns the string representation of the type.
func (r RecommendHierarchyType) String() string {
switch r {
case GridRecommendHierarchyType:
return "grid_tag"
case ZoneRecommendHierarchyType:
return "zone_tag"
case StationRecommendHierarchyType:
return "station_tag"
case CompNSPathRecommendHierarchyType:
return "comp_nspath"
case CompTagRecommendHierarchyType:
return "comp_tag"
case ConfigRecommendHierarchyType:
return "config"
case MeasTagRecommendHierarchyType:
return "meas_tag"
default:
// 返回一个包含原始数值的默认字符串,以便于调试
return "unknown_recommend_type(" + string(rune(r)) + ")"
}
}
const (
// FullRecommendLength define full recommend length with all tokens
FullRecommendLength = "t1.t2.t3.t4.t5.t6.t7"
// IsLocalRecommendLength define is local recommend length with specific tokens
IsLocalRecommendLength = "t4.t5.t6.t7"
// token1.token2.token3.token4.token7
// token4.token7
)

7
constants/redis.go Normal file
View File

@ -0,0 +1,7 @@
// Package constants define constant variable
package constants
const (
// RedisSearchDictName define redis search dictionary name
RedisSearchDictName = "search_suggestions_dict"
)

View File

@ -0,0 +1,22 @@
// Package constants define constant variable
package constants
const (
// RespCodeSuccess define constant to indicates that the API was processed success
RespCodeSuccess = 2000
// RespCodeSuccessWithNoSub define constant to ndicates that the request was processed successfully, with all subscriptions removed for the given client_id.
RespCodeSuccessWithNoSub = 2101
// RespCodeFailed define constant to indicates that the API was processed failed
RespCodeFailed = 3000
// RespCodeInvalidParams define constant to indicates that the request parameters failed to validate, parsing failed, or the action is invalid
RespCodeInvalidParams = 4001
// RespCodeUnauthorized define constant to indicates insufficient permissions or an invalid ClientID
RespCodeUnauthorized = 4002
// RespCodeServerError define constants to indicates a serious internal server error (such as database disconnection or code panic)
RespCodeServerError = 5000
)

View File

@ -0,0 +1,85 @@
// Package constants define constant variable
package constants
const (
// SubStartAction define the real time subscription start action
SubStartAction string = "start"
// SubStopAction define the real time subscription stop action
SubStopAction string = "stop"
// SubAppendAction define the real time subscription append action
SubAppendAction string = "append"
// SubUpdateAction define the real time subscription update action
SubUpdateAction string = "update"
)
// 定义状态常量
// TODO 从4位格式修改为5位格式
const (
// SubSuccessCode define subscription success code
SubSuccessCode = "1001"
// SubFailedCode define subscription failed code
SubFailedCode = "1002"
// RTDSuccessCode define real time data return success code
RTDSuccessCode = "1003"
// RTDFailedCode define real time data return failed code
RTDFailedCode = "1004"
// CancelSubSuccessCode define cancel subscription success code
CancelSubSuccessCode = "1005"
// CancelSubFailedCode define cancel subscription failed code
CancelSubFailedCode = "1006"
// SubRepeatCode define subscription repeat code
SubRepeatCode = "1007"
// UpdateSubSuccessCode define update subscription success code
UpdateSubSuccessCode = "1008"
// UpdateSubFailedCode define update subscription failed code
UpdateSubFailedCode = "1009"
)
const (
// SysCtrlPrefix define to indicates the prefix for all system control directives,facilitating unified parsing within the sendDataStream goroutine
SysCtrlPrefix = "SYS_CTRL_"
// SysCtrlAllRemoved define to indicates that all active polling targets have been removed for the current client, and no further data streams are active
SysCtrlAllRemoved = "SYS_CTRL_ALL_REMOVED"
// SysCtrlSessionExpired define to indicates reserved for indicating that the current websocket session has timed out or is no longer valid
SysCtrlSessionExpired = "SYS_CTRL_SESSION_EXPIRED"
)
const (
// SubSuccessMsg define subscription success message
SubSuccessMsg = "subscription success"
// SubFailedMsg define subscription failed message
SubFailedMsg = "subscription failed"
// RTDSuccessMsg define real time data return success message
RTDSuccessMsg = "real time data return success"
// RTDFailedMsg define real time data return failed message
RTDFailedMsg = "real time data return failed"
// CancelSubSuccessMsg define cancel subscription success message
CancelSubSuccessMsg = "cancel subscription success"
// CancelSubFailedMsg define cancel subscription failed message
CancelSubFailedMsg = "cancel subscription failed"
// SubRepeatMsg define subscription repeat message
SubRepeatMsg = "subscription repeat in target interval"
// UpdateSubSuccessMsg define update subscription success message
UpdateSubSuccessMsg = "update subscription success"
// UpdateSubFailedMsg define update subscription failed message
UpdateSubFailedMsg = "update subscription failed"
)
// TargetOperationType define constant to the target operation type
type TargetOperationType int
const (
// OpAppend define append new target to the subscription list
OpAppend TargetOperationType = iota
// OpRemove define remove exist target from the subscription list
OpRemove
// OpUpdate define update exist target from the subscription list
OpUpdate
)
const (
// NoticeChanCap define real time data notice channel capacity
NoticeChanCap = 10000
)

View File

@ -1,5 +1,5 @@
// Package constant define constant value
package constant
// Package constants define constant variable
package constants
const (
// LogTimeFormate define time format for log file name

23
constants/togologic.go Normal file
View File

@ -0,0 +1,23 @@
// Package constants define constant variable
package constants
import "github.com/gofrs/uuid"
const (
// UUIDErrChangeType 拓扑信息错误改变类型
UUIDErrChangeType = iota
// UUIDFromChangeType 拓扑信息父节点改变类型
UUIDFromChangeType
// UUIDToChangeType 拓扑信息子节点改变类型
UUIDToChangeType
// UUIDAddChangeType 拓扑信息新增类型
UUIDAddChangeType
)
const (
// UUIDNilStr 拓扑信息中开始节点与结束节点字符串形式
UUIDNilStr = "00000000-0000-0000-0000-000000000000"
)
// UUIDNil 拓扑信息中开始节点与结束节点 UUID 格式
var UUIDNil = uuid.FromStringOrNil(UUIDNilStr)

9
constants/trace.go Normal file
View File

@ -0,0 +1,9 @@
// Package constants define constant variable
package constants
// Assuming the B3 specification
const (
HeaderTraceID = "X-B3-TraceId"
HeaderSpanID = "X-B3-SpanId"
HeaderParentSpanID = "X-B3-ParentSpanId"
)

View File

@ -4,10 +4,9 @@ package database
import (
"context"
"fmt"
"strconv"
"time"
"modelRT/constant"
"modelRT/common/errcode"
"modelRT/network"
"modelRT/orm"
@ -16,36 +15,34 @@ import (
)
// CreateComponentIntoDB define create component info of the circuit diagram into DB
func CreateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo network.ComponentCreateInfo) (int64, error) {
func CreateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo network.ComponentCreateInfo) (string, error) {
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
globalUUID, err := uuid.FromString(componentInfo.UUID)
if err != nil {
return -1, fmt.Errorf("format uuid from string type failed:%w", err)
return "", fmt.Errorf("format uuid from string type failed:%w", err)
}
component := orm.Component{
GlobalUUID: globalUUID,
GridID: strconv.FormatInt(componentInfo.GridID, 10),
ZoneID: strconv.FormatInt(componentInfo.ZoneID, 10),
StationID: strconv.FormatInt(componentInfo.StationID, 10),
PageID: componentInfo.PageID,
Tag: componentInfo.Tag,
ComponentType: componentInfo.ComponentType,
Name: componentInfo.Name,
Context: componentInfo.Context,
Op: componentInfo.Op,
Ts: time.Now(),
GlobalUUID: globalUUID,
GridName: componentInfo.GridName,
ZoneName: componentInfo.ZoneName,
StationName: componentInfo.StationName,
Tag: componentInfo.Tag,
Name: componentInfo.Name,
Context: componentInfo.Context,
Op: componentInfo.Op,
Ts: time.Now(),
}
result := tx.WithContext(cancelCtx).Create(&component)
if result.Error != nil || result.RowsAffected == 0 {
err := result.Error
if result.RowsAffected == 0 {
err = fmt.Errorf("%w:please check insert component slice", constant.ErrInsertRowUnexpected)
err = fmt.Errorf("%w:please check insert component slice", errcode.ErrInsertRowUnexpected)
}
return -1, fmt.Errorf("insert component info failed:%w", err)
return "", fmt.Errorf("insert component info failed:%w", err)
}
return component.ID, nil
return component.GlobalUUID.String(), nil
}

View File

@ -0,0 +1,50 @@
// Package database define database operation functions
package database
import (
"context"
"fmt"
"strconv"
"time"
"modelRT/common/errcode"
"modelRT/network"
"modelRT/orm"
"github.com/gofrs/uuid"
"gorm.io/gorm"
)
// CreateMeasurement define create measurement info of the circuit diagram into DB
func CreateMeasurement(ctx context.Context, tx *gorm.DB, measurementInfo network.MeasurementCreateInfo) (string, error) {
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
globalUUID, err := uuid.FromString(measurementInfo.UUID)
if err != nil {
return "", fmt.Errorf("format uuid from string type failed:%w", err)
}
measurement := orm.Measurement{
Tag: "",
Name: "",
Type: -1,
Size: -1,
DataSource: nil,
EventPlan: nil,
BayUUID: globalUUID,
ComponentUUID: globalUUID,
Op: -1,
Ts: time.Now(),
}
result := tx.WithContext(cancelCtx).Create(&measurement)
if result.Error != nil || result.RowsAffected == 0 {
err := result.Error
if result.RowsAffected == 0 {
err = fmt.Errorf("%w:please check insert component slice", errcode.ErrInsertRowUnexpected)
}
return "", fmt.Errorf("insert component info failed:%w", err)
}
return strconv.FormatInt(measurement.ID, 10), nil
}

View File

@ -6,7 +6,7 @@ import (
"fmt"
"time"
"modelRT/constant"
"modelRT/common/errcode"
"modelRT/model"
jsoniter "github.com/json-iterator/go"
@ -28,7 +28,7 @@ func CreateModelIntoDB(ctx context.Context, tx *gorm.DB, componentID int64, comp
if result.Error != nil || result.RowsAffected == 0 {
err := result.Error
if result.RowsAffected == 0 {
err = fmt.Errorf("%w:please check insert model params", constant.ErrInsertRowUnexpected)
err = fmt.Errorf("%w:please check insert model params", errcode.ErrInsertRowUnexpected)
}
return fmt.Errorf("insert component model params into table %s failed:%w", modelStruct.ReturnTableName(), err)
}

View File

@ -6,7 +6,7 @@ import (
"fmt"
"time"
"modelRT/constant"
"modelRT/common/errcode"
"modelRT/network"
"modelRT/orm"
@ -21,11 +21,9 @@ func CreateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, topol
var topologicSlice []orm.Topologic
for _, info := range topologicInfos {
topologicInfo := orm.Topologic{
PageID: pageID,
UUIDFrom: info.UUIDFrom,
UUIDTo: info.UUIDTo,
Flag: info.Flag,
Comment: info.Comment,
}
topologicSlice = append(topologicSlice, topologicInfo)
}
@ -35,7 +33,7 @@ func CreateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, topol
if result.Error != nil || result.RowsAffected != int64(len(topologicSlice)) {
err := result.Error
if result.RowsAffected != int64(len(topologicSlice)) {
err = fmt.Errorf("%w:please check insert topologic slice", constant.ErrInsertRowUnexpected)
err = fmt.Errorf("%w:please check insert topologic slice", errcode.ErrInsertRowUnexpected)
}
return fmt.Errorf("insert topologic link failed:%w", err)
}

View File

@ -6,7 +6,7 @@ import (
"fmt"
"time"
"modelRT/constant"
"modelRT/common/errcode"
"modelRT/network"
"modelRT/orm"
@ -23,7 +23,7 @@ func DeleteTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, delIn
if result.Error != nil || result.RowsAffected == 0 {
err := result.Error
if result.RowsAffected == 0 {
err = fmt.Errorf("%w:please check delete topologic where conditions", constant.ErrDeleteRowZero)
err = fmt.Errorf("%w:please check delete topologic where conditions", errcode.ErrDeleteRowZero)
}
return fmt.Errorf("delete topologic link failed:%w", err)
}

View File

@ -0,0 +1,88 @@
// Package database define database operation functions
package database
import (
"context"
"fmt"
"strings"
"modelRT/logger"
"modelRT/model"
"modelRT/orm"
"gorm.io/gorm"
)
// FillingShortTokenModel define filling short token model info
func FillingShortTokenModel(ctx context.Context, tx *gorm.DB, identModel *model.ShortIdentityTokenModel) error {
filterComponent := &orm.Component{
GridName: identModel.GetGridName(),
ZoneName: identModel.GetZoneName(),
StationName: identModel.GetStationName(),
}
component, measurement, err := QueryLongIdentModelInfoByToken(ctx, tx, identModel.MeasurementTag, filterComponent)
if err != nil {
logger.Error(ctx, "query long identity token model info failed", "error", err)
return err
}
identModel.ComponentInfo = component
identModel.MeasurementInfo = measurement
return nil
}
// FillingLongTokenModel define filling long token model info
func FillingLongTokenModel(ctx context.Context, tx *gorm.DB, identModel *model.LongIdentityTokenModel) error {
filterComponent := &orm.Component{
GridName: identModel.GetGridName(),
ZoneName: identModel.GetZoneName(),
StationName: identModel.GetStationName(),
Tag: identModel.GetComponentTag(),
}
component, measurement, err := QueryLongIdentModelInfoByToken(ctx, tx, identModel.MeasurementTag, filterComponent)
if err != nil {
logger.Error(ctx, "query long identity token model info failed", "error", err)
return err
}
identModel.ComponentInfo = component
identModel.MeasurementInfo = measurement
return nil
}
// ParseDataIdentifierToken define function to parse data identifier token function
func ParseDataIdentifierToken(ctx context.Context, tx *gorm.DB, identToken string) (model.IndentityTokenModelInterface, error) {
identSlice := strings.Split(identToken, ".")
identSliceLen := len(identSlice)
if identSliceLen == 4 {
// token1.token2.token3.token4.token7
shortIndentModel := &model.ShortIdentityTokenModel{
GridTag: identSlice[0],
ZoneTag: identSlice[1],
StationTag: identSlice[2],
NamespacePath: identSlice[3],
MeasurementTag: identSlice[6],
}
err := FillingShortTokenModel(ctx, tx, shortIndentModel)
if err != nil {
return nil, err
}
return shortIndentModel, nil
} else if identSliceLen == 7 {
// token1.token2.token3.token4.token5.token6.token7
longIndentModel := &model.LongIdentityTokenModel{
GridTag: identSlice[0],
ZoneTag: identSlice[1],
StationTag: identSlice[2],
NamespacePath: identSlice[3],
ComponentTag: identSlice[4],
AttributeGroup: identSlice[5],
MeasurementTag: identSlice[6],
}
err := FillingLongTokenModel(ctx, tx, longIndentModel)
if err != nil {
return nil, err
}
return longIndentModel, nil
}
return nil, fmt.Errorf("invalid identity token format: %s", identToken)
}

View File

@ -0,0 +1,97 @@
// Package database define database operation functions
package database
import (
"context"
"errors"
"fmt"
"strings"
"modelRT/diagram"
"modelRT/model"
"gorm.io/gorm"
)
// ParseAttrToken define return the attribute model interface based on the input attribute token. doc addr http://server.baseware.net:6875/books/product-design-docs/page/d6baf
func ParseAttrToken(ctx context.Context, tx *gorm.DB, attrToken, clientToken string) (model.AttrModelInterface, error) {
rs := diagram.NewRedisString(ctx, attrToken, clientToken, 10, true)
attrSlice := strings.Split(attrToken, ".")
attrLen := len(attrSlice)
if attrLen == 4 {
short := &model.ShortAttrInfo{
AttrGroupName: attrSlice[2],
AttrKey: attrSlice[3],
}
err := FillingShortAttrModel(ctx, tx, attrSlice, short)
if err != nil {
return nil, err
}
attrValue, err := rs.Get(attrToken)
if err != nil {
return nil, err
}
short.AttrValue = attrValue
return short, nil
} else if attrLen == 7 {
long := &model.LongAttrInfo{
AttrGroupName: attrSlice[5],
AttrKey: attrSlice[6],
}
err := FillingLongAttrModel(ctx, tx, attrSlice, long)
if err != nil {
return nil, err
}
attrValue, err := rs.Get(attrToken)
if err != nil {
return nil, err
}
long.AttrValue = attrValue
return long, nil
}
return nil, errors.New("invalid attribute token format")
}
// FillingShortAttrModel define filling short attribute model info
func FillingShortAttrModel(ctx context.Context, tx *gorm.DB, attrItems []string, attrModel *model.ShortAttrInfo) error {
component, err := QueryComponentByNSPath(ctx, tx, attrItems[0])
if err != nil {
return err
}
attrModel.ComponentInfo = &component
return nil
}
// FillingLongAttrModel define filling long attribute model info
func FillingLongAttrModel(ctx context.Context, tx *gorm.DB, attrItems []string, attrModel *model.LongAttrInfo) error {
grid, err := QueryGridByTagName(ctx, tx, attrItems[0])
if err != nil {
return err
}
attrModel.GridInfo = &grid
zone, err := QueryZoneByTagName(ctx, tx, attrItems[1])
if err != nil {
return err
}
attrModel.ZoneInfo = &zone
station, err := QueryStationByTagName(ctx, tx, attrItems[2])
if err != nil {
return err
}
attrModel.StationInfo = &station
component, err := QueryComponentByNSPath(ctx, tx, attrItems[3])
if err != nil {
return err
}
attrModel.ComponentInfo = &component
return nil
}
// QueryAttrValueFromRedis define query attribute value from redis by attrKey
func QueryAttrValueFromRedis(attrKey string) string {
fmt.Println(attrKey)
return ""
}

View File

@ -6,6 +6,8 @@ import (
"sync"
"time"
"modelRT/logger"
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
@ -36,7 +38,7 @@ func InitPostgresDBInstance(ctx context.Context, PostgresDBURI string) *gorm.DB
func initPostgresDBClient(ctx context.Context, PostgresDBURI string) *gorm.DB {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
db, err := gorm.Open(postgres.Open(PostgresDBURI), &gorm.Config{})
db, err := gorm.Open(postgres.Open(PostgresDBURI), &gorm.Config{Logger: logger.NewGormLogger()})
if err != nil {
panic(err)
}

View File

@ -3,39 +3,40 @@ package database
import (
"context"
"fmt"
"time"
"modelRT/config"
"modelRT/orm"
"github.com/gofrs/uuid"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// QueryCircuitDiagramComponentFromDB return the result of query circuit diagram component info order by page id from postgresDB
func QueryCircuitDiagramComponentFromDB(ctx context.Context, tx *gorm.DB, pool *ants.PoolWithFunc, logger *zap.Logger) error {
var components []orm.Component
// ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
// func QueryCircuitDiagramComponentFromDB(ctx context.Context, tx *gorm.DB, pool *ants.PoolWithFunc) (map[uuid.UUID]string, error) {
// var components []orm.Component
// // ctx超时判断
// cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
// defer cancel()
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&components)
if result.Error != nil {
logger.Error("query circuit diagram component info failed", zap.Error(result.Error))
return result.Error
}
// result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&components)
// if result.Error != nil {
// logger.Error(ctx, "query circuit diagram component info failed", "error", result.Error)
// return nil, result.Error
// }
for _, component := range components {
pool.Invoke(config.ModelParseConfig{
ComponentInfo: component,
Context: ctx,
})
}
return nil
}
// componentTypeMap := make(map[uuid.UUID]string, len(components))
// for _, component := range components {
// pool.Invoke(config.ModelParseConfig{
// ComponentInfo: component,
// Ctx: ctx,
// })
// componentTypeMap[component.GlobalUUID] = component.GlobalUUID.String()
// }
// return componentTypeMap, nil
// }
// QueryComponentByUUID return the result of query circuit diagram component info by uuid from postgresDB
func QueryComponentByUUID(ctx context.Context, tx *gorm.DB, uuid uuid.UUID) (orm.Component, error) {
@ -43,10 +44,133 @@ func QueryComponentByUUID(ctx context.Context, tx *gorm.DB, uuid uuid.UUID) (orm
// ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).
Where("global_uuid = ?", uuid).
Clauses(clause.Locking{Strength: "UPDATE"}).
First(&component)
result := tx.WithContext(cancelCtx).Where("global_uuid = ? ", uuid).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&component)
if result.Error != nil {
return orm.Component{}, result.Error
}
return component, nil
}
// QueryComponentByCompTag return the result of query circuit diagram component info by component tag from postgresDB
func QueryComponentByCompTag(ctx context.Context, tx *gorm.DB, tag string) (orm.Component, error) {
var component orm.Component
result := tx.WithContext(ctx).
Where("tag = ?", tag).
Clauses(clause.Locking{Strength: "UPDATE"}).
First(&component)
if result.Error != nil {
return orm.Component{}, result.Error
}
return component, nil
}
// QueryComponentByCompTags return the result of query circuit diagram component info by components tag from postgresDB
func QueryComponentByCompTags(ctx context.Context, tx *gorm.DB, tags []string) (map[string]orm.Component, error) {
if len(tags) == 0 {
return make(map[string]orm.Component), nil
}
var results []orm.Component
err := tx.WithContext(ctx).
Model(orm.Component{}).
Select("global_uuid,tag, model_name").
Where("tag IN ?", tags).
Find(&results).Error
if err != nil {
return nil, err
}
compModelMap := make(map[string]orm.Component, len(results))
for _, result := range results {
compModelMap[result.Tag] = result
}
return compModelMap, nil
}
// QueryComponentByPageID return the result of query circuit diagram component info by page id from postgresDB
func QueryComponentByPageID(ctx context.Context, tx *gorm.DB, uuid uuid.UUID) (orm.Component, error) {
var component orm.Component
// ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).Where("page_id = ? ", uuid).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&component)
if result.Error != nil {
return orm.Component{}, result.Error
}
return component, nil
}
// QueryComponentByNSPath return the result of query circuit diagram component info by ns path from postgresDB
func QueryComponentByNSPath(ctx context.Context, tx *gorm.DB, nsPath string) (orm.Component, error) {
var component orm.Component
// ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).Where("NAME = ? ", nsPath).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&component)
if result.Error != nil {
return orm.Component{}, result.Error
}
return component, nil
}
// QueryLongIdentModelInfoByToken define func to query long identity model info by long token
func QueryLongIdentModelInfoByToken(ctx context.Context, tx *gorm.DB, measTag string, condition *orm.Component) (*orm.Component, *orm.Measurement, error) {
var resultComp orm.Component
var meauserment orm.Measurement
// ctx timeout judgment
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).First(&resultComp, &condition)
if result.Error != nil {
if result.Error == gorm.ErrRecordNotFound {
return nil, nil, fmt.Errorf("component record not found by %v:%w", condition, result.Error)
}
return nil, nil, result.Error
}
filterMap := map[string]any{"component_uuid": resultComp.GlobalUUID, "tag": measTag}
result = tx.WithContext(cancelCtx).Where(filterMap).Clauses(clause.Locking{Strength: "UPDATE"}).First(&meauserment)
if result.Error != nil {
if result.Error == gorm.ErrRecordNotFound {
return nil, nil, fmt.Errorf("measurement record not found by %v:%w", filterMap, result.Error)
}
return nil, nil, result.Error
}
return &resultComp, &meauserment, nil
}
// QueryShortIdentModelInfoByToken define func to query short identity model info by short token
func QueryShortIdentModelInfoByToken(ctx context.Context, tx *gorm.DB, measTag string, condition *orm.Component) (*orm.Component, *orm.Measurement, error) {
var resultComp orm.Component
var meauserment orm.Measurement
// ctx timeout judgment
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).First(&resultComp, &condition)
if result.Error != nil {
if result.Error == gorm.ErrRecordNotFound {
return nil, nil, fmt.Errorf("component record not found by %v:%w", condition, result.Error)
}
return nil, nil, result.Error
}
filterMap := map[string]any{"component_uuid": resultComp.GlobalUUID, "tag": measTag}
result = tx.WithContext(cancelCtx).Where(filterMap).Clauses(clause.Locking{Strength: "UPDATE"}).First(&meauserment)
if result.Error != nil {
if result.Error == gorm.ErrRecordNotFound {
return nil, nil, fmt.Errorf("measurement record not found by %v:%w", filterMap, result.Error)
}
return nil, nil, result.Error
}
return &resultComp, &meauserment, nil
}

View File

@ -0,0 +1,27 @@
// Package database define database operation functions
package database
import (
"modelRT/orm"
"gorm.io/gorm"
)
// GenAllAttributeMap define func to query global_uuid、component tag、component nspath field for attribute group
func GenAllAttributeMap(db *gorm.DB) (map[string]orm.AttributeSet, error) {
var compResults []orm.Component
resMap := make(map[string]orm.AttributeSet)
err := db.Model(&orm.Component{}).Select("global_uuid", "station_id", "tag", "nspath").Find(&compResults).Error
if err != nil {
return nil, err
}
for _, r := range compResults {
resMap[r.GlobalUUID.String()] = orm.AttributeSet{
CompTag: r.Tag,
CompNSPath: r.NSPath,
}
}
return resMap, nil
}

View File

@ -0,0 +1,110 @@
// Package database define database operation functions
package database
import (
"modelRT/orm"
"gorm.io/gorm"
)
type ZoneWithParent struct {
orm.Zone
GridTag string `gorm:"column:grid_tag"`
}
type StationWithParent struct {
orm.Zone
ZoneTag string `gorm:"column:zone_tag"`
}
func GetFullMeasurementSet(db *gorm.DB) (*orm.MeasurementSet, error) {
mSet := &orm.MeasurementSet{
GridToZoneTags: make(map[string][]string),
ZoneToStationTags: make(map[string][]string),
StationToCompNSPaths: make(map[string][]string),
CompNSPathToCompTags: make(map[string][]string),
CompTagToMeasTags: make(map[string][]string),
}
var grids []orm.Grid
if err := db.Table("grid").Select("tagname").Scan(&grids).Error; err == nil {
for _, g := range grids {
if g.TAGNAME != "" {
mSet.AllGridTags = append(mSet.AllGridTags, g.TAGNAME)
}
}
}
var zones []struct {
orm.Zone
GridTag string `gorm:"column:grid_tag"`
}
if err := db.Table("zone").
Select("zone.*, grid.tagname as grid_tag").
Joins("left join grid on zone.grid_id = grid.id").
Scan(&zones).Error; err == nil {
for _, z := range zones {
mSet.AllZoneTags = append(mSet.AllZoneTags, z.TAGNAME)
if z.GridTag != "" {
mSet.GridToZoneTags[z.GridTag] = append(mSet.GridToZoneTags[z.GridTag], z.TAGNAME)
}
}
}
var stations []struct {
orm.Station
ZoneTag string `gorm:"column:zone_tag"`
}
if err := db.Table("station").
Select("station.*, zone.tagname as zone_tag").
Joins("left join zone on station.zone_id = zone.id").
Scan(&stations).Error; err == nil {
for _, s := range stations {
mSet.AllStationTags = append(mSet.AllStationTags, s.TAGNAME)
if s.ZoneTag != "" {
mSet.ZoneToStationTags[s.ZoneTag] = append(mSet.ZoneToStationTags[s.ZoneTag], s.TAGNAME)
}
}
}
var comps []struct {
orm.Component
StationTag string `gorm:"column:station_tag"`
}
if err := db.Table("component").
Select("component.*, station.tagname as station_tag").
Joins("left join station on component.station_id = station.id").
Scan(&comps).Error; err == nil {
for _, c := range comps {
mSet.AllCompNSPaths = append(mSet.AllCompNSPaths, c.NSPath)
mSet.AllCompTags = append(mSet.AllCompTags, c.Tag)
if c.StationTag != "" {
mSet.StationToCompNSPaths[c.StationTag] = append(mSet.StationToCompNSPaths[c.StationTag], c.NSPath)
}
if c.NSPath != "" {
mSet.CompNSPathToCompTags[c.NSPath] = append(mSet.CompNSPathToCompTags[c.NSPath], c.Tag)
}
}
}
mSet.AllConfigTags = append(mSet.AllConfigTags, "bay")
var measurements []struct {
orm.Measurement
CompTag string `gorm:"column:comp_tag"`
}
if err := db.Table("measurement").
Select("measurement.*, component.tag as comp_tag").
Joins("left join component on measurement.component_uuid = component.global_uuid").
Scan(&measurements).Error; err == nil {
for _, m := range measurements {
mSet.AllMeasTags = append(mSet.AllMeasTags, m.Tag)
if m.CompTag != "" {
mSet.CompTagToMeasTags[m.CompTag] = append(mSet.CompTagToMeasTags[m.CompTag], m.Tag)
}
}
}
return mSet, nil
}

26
database/query_grid.go Normal file
View File

@ -0,0 +1,26 @@
// Package database define database operation functions
package database
import (
"context"
"time"
"modelRT/orm"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// QueryGridByTagName return the result of query circuit diagram grid info by tagName from postgresDB
func QueryGridByTagName(ctx context.Context, tx *gorm.DB, tagName string) (orm.Grid, error) {
var grid orm.Grid
// ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).Where("TAGNAME = ? ", tagName).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&grid)
if result.Error != nil {
return orm.Grid{}, result.Error
}
return grid, nil
}

View File

@ -0,0 +1,62 @@
// Package database define database operation functions
package database
import (
"context"
"time"
"modelRT/orm"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// QueryMeasurementByID return the result of query circuit diagram component measurement info by id from postgresDB
func QueryMeasurementByID(ctx context.Context, tx *gorm.DB, id int64) (orm.Measurement, error) {
var measurement orm.Measurement
// ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).
Where("id = ?", id).
Clauses(clause.Locking{Strength: "UPDATE"}).
First(&measurement)
if result.Error != nil {
return orm.Measurement{}, result.Error
}
return measurement, nil
}
// QueryMeasurementByToken define function query circuit diagram component measurement info by token from postgresDB
func QueryMeasurementByToken(ctx context.Context, tx *gorm.DB, token string) (orm.Measurement, error) {
// TODO parse token to avoid SQL injection
var component orm.Measurement
// ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).
Where(" = ?", token).
Clauses(clause.Locking{Strength: "UPDATE"}).
First(&component)
if result.Error != nil {
return orm.Measurement{}, result.Error
}
return component, nil
}
// GetAllMeasurements define func to query all measurement info from postgresDB
func GetAllMeasurements(ctx context.Context, tx *gorm.DB) ([]orm.Measurement, error) {
var measurements []orm.Measurement
// ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&measurements)
if result.Error != nil {
return nil, result.Error
}
return measurements, nil
}

View File

@ -0,0 +1,81 @@
// Package database define database operation functions
package database
import (
"context"
"fmt"
"time"
"modelRT/orm"
"gorm.io/gorm"
)
func queryFirstByID(ctx context.Context, tx *gorm.DB, id any, dest any) error {
result := tx.WithContext(ctx).Where("id = ?", id).First(dest)
return result.Error
}
func queryFirstByTag(ctx context.Context, tx *gorm.DB, tagName any, dest any) error {
result := tx.WithContext(ctx).Where("tagname = ?", tagName).First(dest)
return result.Error
}
// QueryNodeInfoByID return the result of query circuit diagram node info by id and level from postgresDB
func QueryNodeInfoByID(ctx context.Context, tx *gorm.DB, id int64, level int) (orm.CircuitDiagramNodeInterface, orm.CircuitDiagramNodeInterface, error) {
// 设置 Context 超时
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
var currentNodeInfo orm.CircuitDiagramNodeInterface
var previousNodeInfo orm.CircuitDiagramNodeInterface
var err error
switch level {
case 0:
var grid orm.Grid
err = queryFirstByID(cancelCtx, tx, id, &grid)
currentNodeInfo = grid
case 1:
// current:Zone,Previous:Grid
var zone orm.Zone
err = queryFirstByID(cancelCtx, tx, id, &zone)
currentNodeInfo = zone
if err == nil {
var grid orm.Grid
err = queryFirstByID(cancelCtx, tx, zone.GridID, &grid)
previousNodeInfo = grid
}
case 2:
// current:Station,Previous:Zone
var station orm.Station
err = queryFirstByID(cancelCtx, tx, id, &station)
currentNodeInfo = station
if err == nil {
var zone orm.Zone
err = queryFirstByID(cancelCtx, tx, station.ZoneID, &zone)
previousNodeInfo = zone
}
case 3, 4:
// current:Component, Previous:Station
var component orm.Component
err = queryFirstByID(cancelCtx, tx, id, &component)
currentNodeInfo = component
if err == nil {
var station orm.Station
// TODO 修改staion name为通过 station id 查询
err = queryFirstByTag(cancelCtx, tx, component.StationName, &station)
previousNodeInfo = station
}
case 5:
// TODO[NONEED-ISSUE]暂无此层级增加或删除需求 #2
return nil, nil, nil
default:
return nil, nil, fmt.Errorf("unsupported node level: %d", level)
}
if err != nil {
return nil, nil, err
}
return previousNodeInfo, currentNodeInfo, nil
}

View File

@ -5,26 +5,25 @@ import (
"context"
"time"
"modelRT/logger"
"modelRT/orm"
"go.uber.org/zap"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// QueryAllPages return the all page info of the circuit diagram query by grid_id and zone_id and station_id
func QueryAllPages(ctx context.Context, tx *gorm.DB, logger *zap.Logger, gridID, zoneID, stationID int64) ([]orm.Page, error) {
func QueryAllPages(ctx context.Context, tx *gorm.DB, gridID, zoneID, stationID int64) ([]orm.Page, error) {
var pages []orm.Page
// ctx超时判断
// ctx timeout judgment
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.Model(&orm.Page{}).WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Select(`"page".id, "page".Name, "page".status,"page".context`).Joins(`inner join "station" on "station".id = "page".station_id`).Joins(`inner join "zone" on "zone".id = "station".zone_id`).Joins(`inner join "grid" on "grid".id = "zone".grid_id`).Where(`"grid".id = ? and "zone".id = ? and "station".id = ?`, gridID, zoneID, stationID).Scan(&pages)
if result.Error != nil {
logger.Error("query circuit diagram pages by gridID and zoneID and stationID failed", zap.Int64("grid_id", gridID), zap.Int64("zone_id", zoneID), zap.Int64("station_id", stationID), zap.Error(result.Error))
logger.Error(ctx, "query circuit diagram pages by gridID and zoneID and stationID failed", "grid_id", gridID, "zone_id", zoneID, "station_id", stationID, "error", result.Error)
return nil, result.Error
}
return pages, nil
}

View File

@ -0,0 +1,78 @@
// Package database define database operation functions
package database
import (
"context"
"errors"
"fmt"
"time"
"modelRT/logger"
"modelRT/orm"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// QueryArrtibuteRecordByUUID return the attribute table record info of the component attribute by uuid
func QueryArrtibuteRecordByUUID(ctx context.Context, tx *gorm.DB, gridID, zoneID, stationID int64) ([]orm.Page, error) {
var pages []orm.Page
// ctx timeout judgment
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.Model(&orm.Page{}).WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Select(`"page".id, "page".Name, "page".status,"page".context`).Joins(`inner join "station" on "station".id = "page".station_id`).Joins(`inner join "zone" on "zone".id = "station".zone_id`).Joins(`inner join "grid" on "grid".id = "zone".grid_id`).Where(`"grid".id = ? and "zone".id = ? and "station".id = ?`, gridID, zoneID, stationID).Scan(&pages)
if result.Error != nil {
logger.Error(ctx, "query circuit diagram pages by gridID and zoneID and stationID failed", "grid_id", gridID, "zone_id", zoneID, "station_id", stationID, "error", result.Error)
return nil, result.Error
}
return pages, nil
}
// GetProjectNameByTagAndGroupName 根据 tag 和 meta_model 获取项目名称
func GetProjectNameByTagAndGroupName(db *gorm.DB, tag string, groupName string) (string, error) {
var project orm.ProjectManager
// 使用 Select 只提取 name 字段,提高查询效率
// 使用 Where 进行多列条件过滤
err := db.Select("name").
Where("tag = ? AND meta_model = ?", tag, groupName).
First(&project).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return "", fmt.Errorf("project not found with tag: %s and model: %s", tag, groupName)
}
return "", err
}
return project.Name, nil
}
// BatchGetProjectNames define func to batch retrieve name based on multiple tags and metaModel
func BatchGetProjectNames(db *gorm.DB, identifiers []orm.ProjectIdentifier) (map[orm.ProjectIdentifier]string, error) {
if len(identifiers) == 0 {
return nil, nil
}
var projects []orm.ProjectManager
queryArgs := make([][]any, len(identifiers))
for i, id := range identifiers {
queryArgs[i] = []any{id.Tag, id.GroupName}
}
err := db.Select("tag", "group_name", "name").
Where("(tag, group_name) IN ?", queryArgs).
Find(&projects).Error
if err != nil {
return nil, err
}
resultMap := make(map[orm.ProjectIdentifier]string)
for _, p := range projects {
key := orm.ProjectIdentifier{Tag: p.Tag, GroupName: p.GroupName}
resultMap[key] = p.Name
}
return resultMap, nil
}

26
database/query_station.go Normal file
View File

@ -0,0 +1,26 @@
// Package database define database operation functions
package database
import (
"context"
"time"
"modelRT/orm"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// QueryStationByTagName return the result of query circuit diagram Station info by tagName from postgresDB
func QueryStationByTagName(ctx context.Context, tx *gorm.DB, tagName string) (orm.Station, error) {
var station orm.Station
// ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).Where("TAGNAME = ? ", tagName).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&station)
if result.Error != nil {
return orm.Station{}, result.Error
}
return station, nil
}

View File

@ -3,77 +3,145 @@ package database
import (
"context"
"fmt"
"time"
"modelRT/constants"
"modelRT/diagram"
"modelRT/logger"
"modelRT/orm"
"modelRT/sql"
"github.com/gofrs/uuid"
"go.uber.org/zap"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// QueryTopologicByPageID return the topologic info of the circuit diagram query by pageID
func QueryTopologicByPageID(ctx context.Context, tx *gorm.DB, logger *zap.Logger, pageID int64) ([]orm.Topologic, error) {
// QueryTopologic return the topologic info of the circuit diagram
func QueryTopologic(ctx context.Context, tx *gorm.DB) ([]orm.Topologic, error) {
var topologics []orm.Topologic
// ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Raw(sql.RecursiveSQL, pageID).Scan(&topologics)
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Raw(sql.RecursiveSQL, constants.UUIDNilStr).Scan(&topologics)
if result.Error != nil {
logger.Error("query circuit diagram topologic info by pageID failed", zap.Int64("pageID", pageID), zap.Error(result.Error))
logger.Error(ctx, "query circuit diagram topologic info by start node uuid failed", "start_node_uuid", constants.UUIDNilStr, "error", result.Error)
return nil, result.Error
}
return topologics, nil
}
// QueryTopologicFromDB return the result of query topologic info from postgresDB
func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB, logger *zap.Logger, gridID, zoneID, stationID int64) error {
allPages, err := QueryAllPages(ctx, tx, logger, gridID, zoneID, stationID)
// QueryTopologicFromDB return the result of query topologic info from DB
func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB) (*diagram.MultiBranchTreeNode, error) {
topologicInfos, err := QueryTopologic(ctx, tx)
if err != nil {
logger.Error("query all pages info failed", zap.Int64("gridID", gridID), zap.Int64("zoneID", zoneID), zap.Int64("stationID", stationID), zap.Error(err))
return err
logger.Error(ctx, "query topologic info failed", "error", err)
return nil, err
}
for _, page := range allPages {
topologicInfos, err := QueryTopologicByPageID(ctx, tx, logger, page.ID)
if err != nil {
logger.Error("query topologic info by pageID failed", zap.Int64("pageID", page.ID), zap.Error(err))
return err
}
err = InitCircuitDiagramTopologic(page.ID, topologicInfos)
if err != nil {
logger.Error("init topologic failed", zap.Error(err))
return err
}
tree, err := BuildMultiBranchTree(topologicInfos)
if err != nil {
logger.Error(ctx, "init topologic failed", "error", err)
return nil, err
}
return nil
return tree, nil
}
// InitCircuitDiagramTopologic return circuit diagram topologic info from postgres
func InitCircuitDiagramTopologic(pageID int64, topologicNodes []orm.Topologic) error {
var rootVertex uuid.UUID
func InitCircuitDiagramTopologic(topologicNodes []orm.Topologic) error {
var rootVertex *diagram.MultiBranchTreeNode
for _, node := range topologicNodes {
if node.UUIDFrom.IsNil() {
rootVertex = node.UUIDTo
if node.UUIDFrom == constants.UUIDNil {
rootVertex = diagram.NewMultiBranchTree(node.UUIDFrom)
break
}
}
topologicSet := diagram.NewGraph(rootVertex)
if rootVertex == nil {
return fmt.Errorf("root vertex is nil")
}
for _, node := range topologicNodes {
if node.UUIDFrom.IsNil() {
continue
if node.UUIDFrom == constants.UUIDNil {
nodeVertex := diagram.NewMultiBranchTree(node.UUIDTo)
rootVertex.AddChild(nodeVertex)
}
// TODO 增加对 node.flag值的判断
topologicSet.AddEdge(node.UUIDFrom, node.UUIDTo)
}
diagram.StoreGraphMap(pageID, topologicSet)
node := rootVertex
for _, nodeVertex := range node.Children {
nextVertexs := make([]*diagram.MultiBranchTreeNode, 0)
nextVertexs = append(nextVertexs, nodeVertex)
}
return nil
}
// TODO 电流互感器不单独划分间隔,以母线、浇筑母线、变压器为间隔原件
func IntervalBoundaryDetermine(uuid uuid.UUID) bool {
diagram.GetComponentMap(uuid.String())
// TODO 判断 component 的类型是否为间隔
// TODO 0xA1B2C3D4,高四位表示可以成为间隔的compoent类型的值为FFFF,普通 component 类型的值为 0000。低四位中前二位表示component的一级类型例如母线 PT、母联/母分、进线等,低四位中后二位表示一级类型中包含的具体类型,例如母线 PT中包含的电压互感器、隔离开关、接地开关、避雷器、带电显示器等。
num := uint32(0xA1B2C3D4) // 八位16进制数
high16 := uint16(num >> 16)
fmt.Printf("原始值: 0x%X\n", num) // 输出: 0xA1B2C3D4
fmt.Printf("高十六位: 0x%X\n", high16) // 输出: 0xA1B2
return true
}
// BuildMultiBranchTree return the multi branch tree by topologic info and component type map
func BuildMultiBranchTree(topologics []orm.Topologic) (*diagram.MultiBranchTreeNode, error) {
nodeMap := make(map[uuid.UUID]*diagram.MultiBranchTreeNode, len(topologics)*2)
for _, topo := range topologics {
if _, exists := nodeMap[topo.UUIDFrom]; !exists {
// skip special uuid
if topo.UUIDTo != constants.UUIDNil {
nodeMap[topo.UUIDFrom] = &diagram.MultiBranchTreeNode{
ID: topo.UUIDFrom,
Children: make([]*diagram.MultiBranchTreeNode, 0),
}
}
}
if _, exists := nodeMap[topo.UUIDTo]; !exists {
// skip special uuid
if topo.UUIDTo != constants.UUIDNil {
nodeMap[topo.UUIDTo] = &diagram.MultiBranchTreeNode{
ID: topo.UUIDTo,
Children: make([]*diagram.MultiBranchTreeNode, 0),
}
}
}
}
for _, topo := range topologics {
var parent *diagram.MultiBranchTreeNode
if topo.UUIDFrom == constants.UUIDNil {
parent = &diagram.MultiBranchTreeNode{
ID: constants.UUIDNil,
}
nodeMap[constants.UUIDNil] = parent
} else {
parent = nodeMap[topo.UUIDFrom]
}
var child *diagram.MultiBranchTreeNode
if topo.UUIDTo == constants.UUIDNil {
child = &diagram.MultiBranchTreeNode{
ID: topo.UUIDTo,
}
} else {
child = nodeMap[topo.UUIDTo]
}
child.Parent = parent
parent.Children = append(parent.Children, child)
}
// return root vertex
root, exists := nodeMap[constants.UUIDNil]
if !exists {
return nil, fmt.Errorf("root node not found")
}
return root, nil
}

26
database/query_zone.go Normal file
View File

@ -0,0 +1,26 @@
// Package database define database operation functions
package database
import (
"context"
"time"
"modelRT/orm"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// QueryZoneByTagName return the result of query circuit diagram Zone info by tagName from postgresDB
func QueryZoneByTagName(ctx context.Context, tx *gorm.DB, tagName string) (orm.Zone, error) {
var zone orm.Zone
// ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).Where("TAGNAME = ? ", tagName).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&zone)
if result.Error != nil {
return orm.Zone{}, result.Error
}
return zone, nil
}

View File

@ -4,10 +4,9 @@ package database
import (
"context"
"fmt"
"strconv"
"time"
"modelRT/constant"
"modelRT/common/errcode"
"modelRT/network"
"modelRT/orm"
@ -16,13 +15,13 @@ import (
)
// UpdateComponentIntoDB define update component info of the circuit diagram into DB
func UpdateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo network.ComponentUpdateInfo) (int64, error) {
func UpdateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo network.ComponentUpdateInfo) (string, error) {
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
globalUUID, err := uuid.FromString(componentInfo.UUID)
if err != nil {
return -1, fmt.Errorf("format uuid from string type failed:%w", err)
return "", fmt.Errorf("format uuid from string type failed:%w", err)
}
var component orm.Component
@ -30,33 +29,31 @@ func UpdateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo netwo
if result.Error != nil || result.RowsAffected == 0 {
err := result.Error
if result.RowsAffected == 0 {
err = fmt.Errorf("%w:please check update component conditions", constant.ErrUpdateRowZero)
err = fmt.Errorf("%w:please check update component conditions", errcode.ErrUpdateRowZero)
}
return -1, fmt.Errorf("query component info failed:%w", err)
return "", fmt.Errorf("query component info failed:%w", err)
}
updateParams := orm.Component{
GlobalUUID: globalUUID,
GridID: strconv.FormatInt(componentInfo.GridID, 10),
ZoneID: strconv.FormatInt(componentInfo.ZoneID, 10),
StationID: strconv.FormatInt(componentInfo.StationID, 10),
PageID: componentInfo.PageID,
Tag: componentInfo.Tag,
ComponentType: componentInfo.ComponentType,
Name: componentInfo.Name,
Context: componentInfo.Context,
Op: componentInfo.Op,
Ts: time.Now(),
GlobalUUID: globalUUID,
GridName: componentInfo.GridName,
ZoneName: componentInfo.ZoneName,
StationName: componentInfo.StationName,
Tag: componentInfo.Tag,
Name: componentInfo.Name,
Context: componentInfo.Context,
Op: componentInfo.Op,
Ts: time.Now(),
}
result = tx.Model(&orm.Component{}).WithContext(cancelCtx).Where("id = ?", component.ID).Updates(&updateParams)
result = tx.Model(&orm.Component{}).WithContext(cancelCtx).Where("GLOBAL_UUID = ?", component.GlobalUUID).Updates(&updateParams)
if result.Error != nil || result.RowsAffected == 0 {
err := result.Error
if result.RowsAffected == 0 {
err = fmt.Errorf("%w:please check update component conditions", constant.ErrUpdateRowZero)
err = fmt.Errorf("%w:please check update component conditions", errcode.ErrUpdateRowZero)
}
return -1, fmt.Errorf("update component info failed:%w", err)
return "", fmt.Errorf("update component info failed:%w", err)
}
return component.ID, nil
return component.GlobalUUID.String(), nil
}

View File

@ -6,7 +6,7 @@ import (
"fmt"
"time"
"modelRT/constant"
"modelRT/common/errcode"
"modelRT/model"
jsoniter "github.com/json-iterator/go"
@ -33,7 +33,7 @@ func UpdateModelIntoDB(ctx context.Context, tx *gorm.DB, componentID int64, comp
if result.Error != nil || result.RowsAffected == 0 {
err := result.Error
if result.RowsAffected == 0 {
err = fmt.Errorf("%w:please check where conditions", constant.ErrUpdateRowZero)
err = fmt.Errorf("%w:please check where conditions", errcode.ErrUpdateRowZero)
}
return err
}

View File

@ -6,7 +6,8 @@ import (
"fmt"
"time"
"modelRT/constant"
"modelRT/common/errcode"
"modelRT/constants"
"modelRT/network"
"modelRT/orm"
@ -21,9 +22,9 @@ func UpdateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, chang
defer cancel()
switch changeInfo.ChangeType {
case constant.UUIDFromChangeType:
case constants.UUIDFromChangeType:
result = tx.WithContext(cancelCtx).Model(&orm.Topologic{}).Where("page_id = ? and uuid_from = ? and uuid_to = ?", pageID, changeInfo.OldUUIDFrom, changeInfo.OldUUIDTo).Updates(orm.Topologic{UUIDFrom: changeInfo.NewUUIDFrom})
case constant.UUIDToChangeType:
case constants.UUIDToChangeType:
var delTopologic orm.Topologic
result = tx.WithContext(cancelCtx).Model(&orm.Topologic{}).Where("page_id = ? and uuid_to = ?", pageID, changeInfo.NewUUIDTo).Find(&delTopologic)
@ -38,20 +39,18 @@ func UpdateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, chang
if result.Error != nil || result.RowsAffected == 0 {
err := result.Error
if result.RowsAffected == 0 {
err = fmt.Errorf("%w:please check delete topologic where conditions", constant.ErrDeleteRowZero)
err = fmt.Errorf("%w:please check delete topologic where conditions", errcode.ErrDeleteRowZero)
}
return fmt.Errorf("del old topologic link by new_uuid_to failed:%w", err)
}
}
result = tx.WithContext(cancelCtx).Model(&orm.Topologic{}).Where("page_id = ? and uuid_from = ? and uuid_to = ?", pageID, changeInfo.OldUUIDFrom, changeInfo.OldUUIDTo).Updates(&orm.Topologic{UUIDTo: changeInfo.NewUUIDTo})
case constant.UUIDAddChangeType:
case constants.UUIDAddChangeType:
topologic := orm.Topologic{
PageID: pageID,
Flag: changeInfo.Flag,
UUIDFrom: changeInfo.NewUUIDFrom,
UUIDTo: changeInfo.NewUUIDTo,
Comment: changeInfo.Comment,
}
result = tx.WithContext(cancelCtx).Create(&topologic)
}
@ -61,7 +60,7 @@ func UpdateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, chang
if result.Error != nil || result.RowsAffected == 0 {
err := result.Error
if result.RowsAffected == 0 {
err = fmt.Errorf("%w:please check update topologic where conditions", constant.ErrUpdateRowZero)
err = fmt.Errorf("%w:please check update topologic where conditions", errcode.ErrUpdateRowZero)
}
return fmt.Errorf("insert or update topologic link failed:%w", err)
}

351
deploy/deploy.md Normal file
View File

@ -0,0 +1,351 @@
# 项目依赖服务部署指南
本项目依赖于 $\text{PostgreSQL}$ 数据库和 $\text{Redis Stack Server}$(包含 $\text{Redisearch}$ 等模块)部署文档将使用 $\text{Docker}$ 容器化技术部署这两个依赖服务
## 前提条件
1. 已安装 $\text{Docker}$
2. 下载相关容器镜像
3. 确保主机的 $\text{5432}$ 端口($\text{Postgres}$)和 $\text{6379}$ 端口($\text{Redis}$)未被占用
### 1\. 部署 PostgreSQL 数据库
使用官方的 `postgres:13.16` 镜像,并设置默认的用户、密码和端口
#### 1.1 部署命令
运行以下命令启动 $\text{PostgreSQL}$ 容器
```bash
docker run --name postgres \
-e POSTGRES_USER=postgres \
-e POSTGRES_PASSWORD=coslight \
-p 5432:5432 \
-d postgres:13.16
```
#### 1.2 连接信息
| 参数 | 值 | 说明 |
| :--- | :--- | :--- |
| **容器名称** | `postgres` | 容器名 |
| **镜像版本** | `postgres:13.16` | 镜像名 |
| **主机端口** | `5432` | 外部应用连接使用的端口 |
| **用户名** | `postgres` | 默认超级用户 |
| **密码** | `coslight` | 配置的密码 |
#### 1.3 状态检查
要确认容器是否正在运行,请执行
```bash
# 检查容器启动状态
docker ps -a grep postgres
# 检查容器启动日志信息
docker logs postgres
```
### 2\. 部署 Redis Stack Server
我们将使用 `redis/redis-stack-server:latest` 镜像该镜像内置了 $\text{Redisearch}$ 模块,用于 $\text{ModelRT}$ 项目中补全功能
#### 2.1 部署命令
运行以下命令启动 $\text{Redis Stack Server}$ 容器
```bash
docker run --name redis -p 6379:6379 \
-d redis/redis-stack-server:latest
```
#### 2.2 连接信息
| 参数 | 值 | 说明 |
| :--- | :--- | :--- |
| **容器名称** | `redis` | 容器名 |
| **镜像版本** | `redis/redis-stack-server:latest` | 镜像名 |
| **主机端口** | `6379` | 外部应用连接使用的端口 |
| **地址** | `localhost:6379` | |
| **密码** | **无** | 默认未设置密码 |
> **注意:** 生产环境中建议使用 `-e REDIS_PASSWORD=<your_secure_password>` 参数来设置 $\text{Redis}$ 访问密码
#### 2.3 状态检查
要确认容器是否正在运行,请执行
```bash
# 检查容器启动状态
docker ps -a grep redis
# 检查容器启动日志信息
docker logs redis
```
#### 2.4 数据注入
测试数据注入
##### 2.4.1 Postgres数据注入
```SQL
insert into public.grid(id,tagname,name,description,op,ts) VALUES (1, 'grid1', '网格1', '测试网格1', -1,CURRENT_TIMESTAMP);
insert into public.zone(id,grid_id,tagname,name,description,op,ts) VALUES (1, 1,'zone1', '区域1_1', '测试区域1_1', -1,CURRENT_TIMESTAMP);
insert into public.station(id,zone_id,tagname,name,description,is_local,op,ts) VALUES (1, 1,'station1', '站1_1_1', '测试站1_1_1', true, -1,CURRENT_TIMESTAMP),
(2, 1, 'station2', '站1_1_2', '测试站1_1_2', false, -1, CURRENT_TIMESTAMP);
INSERT INTO public.topologic(flag, uuid_from, uuid_to, context, description, op, ts)
VALUES
(1, '00000000-0000-0000-0000-000000000000', '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '{}', '', 1, CURRENT_TIMESTAMP),
(1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '10f155cf-bd27-4557-85b2-d126b6e2657f', '{}', '', 1, CURRENT_TIMESTAMP),
(1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '{}', '', 1, CURRENT_TIMESTAMP),
(1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '70c190f2-8a75-42a9-b166-ec5f87e0aa6b', '{}', '', 1, CURRENT_TIMESTAMP),
(1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '70c200f2-8a75-42a9-c166-bf5f87e0aa6b', '{}', '', 1, CURRENT_TIMESTAMP),
(1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '968dd6e6-faec-4f78-b58a-d6e68426b09e', '{}', '', 1, CURRENT_TIMESTAMP),
(1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '968dd6e6-faec-4f78-b58a-d6e68426b08e', '{}', '', 1, CURRENT_TIMESTAMP);
INSERT INTO public.bay (bay_uuid, name, tag, type, unom, fla, capacity, description, in_service, state, grid, zone, station, business, context, from_uuids, to_uuids, dev_protect, dev_fault_record, dev_status, dev_dyn_sense, dev_instruct, dev_etc, components, op, ts)
VALUES (
'18e71a24-694a-43fa-93a7-c4d02a27d1bc',
'', '', '',
-1, -1, -1,
'',
false,
-1,
'', '', '',
'{}',
'{}',
'[]',
'[]',
'[]',
'[]',
'[]',
'[]',
'[]',
'[]',
ARRAY['968dd6e6-faec-4f78-b58a-d6e68426b09e', '968dd6e6-faec-4f78-b58a-d6e68426b08e']::uuid[],
-1,
CURRENT_TIMESTAMP
);
INSERT INTO public.component (global_uuid, nspath, tag, name, model_name, description, grid, zone, station, station_id, type, in_service, state, status, connection, label, context, op, ts)
VALUES
(
'968dd6e6-faec-4f78-b58a-d6e68426b09e',
'ns1', 'tag1', 'component1', 'bus_1', '',
'grid1', 'zone1', 'station1', 1,
-1,
false,
-1, -1,
'{}',
'{}',
'{}',
-1,
CURRENT_TIMESTAMP
),
(
'968dd6e6-faec-4f78-b58a-d6e68426b08e',
'ns2', 'tag2', 'component2', 'bus_1', '',
'grid1', 'zone1', 'station1', 1,
-1,
false,
-1, -1,
'{}',
'{}',
'{}',
-1,
CURRENT_TIMESTAMP
),
(
'968dd6e6-faec-4f78-b58a-d6e88426b09e',
'ns3', 'tag3', 'component3', 'bus_1', '',
'grid1', 'zone1', 'station2', 2,
-1,
false,
-1, -1,
'{}',
'{}',
'{}',
-1,
CURRENT_TIMESTAMP
);
INSERT INTO public.measurement (id, tag, name, type, size, data_source, event_plan, bay_uuid, component_uuid, op, ts)
VALUES
(3, 'I11_C_rms', '45母甲侧互连电流C相1', -1, 200, '{"type": 1, "io_address": {"device": "ssu001", "channel": "TM1", "station": "001"}}', '{"cause": {"up": 55.0, "down": 45.0}, "action": {"command": "warning", "parameters": ["I段母线甲侧互连电流C相1"]}, "enable": true}', '18e71a24-694a-43fa-93a7-c4d02a27d1bc', '968dd6e6-faec-4f78-b58a-d6e68426b09e', -1, CURRENT_TIMESTAMP),
(4, 'I11_B_rms', '45母甲侧互连电流B相1', -1, 300, '{"type": 1, "io_address": {"device": "ssu001", "channel": "TM2", "station": "001"}}', '{"cause": {"upup": 65, "downdown": 35}, "action": {"command": "warning", "parameters": ["I段母线甲侧互连电流B相1"]}, "enable": true}', '18e71a24-694a-43fa-93a7-c4d02a27d1bc', '968dd6e6-faec-4f78-b58a-d6e68426b09e', -1, CURRENT_TIMESTAMP),
(5, 'I11_A_rms', '45母甲侧互连电流A相1', -1, 300, '{"type": 1, "io_address": {"device": "ssu001", "channel": "TM3", "station": "001"}}', '{"cause": {"up": 55, "down": 45, "upup": 65, "downdown": 35}, "action": {"command": "warning", "parameters": ["I段母线甲侧互连电流A相1"]}, "enable": true}', '18e71a24-694a-43fa-93a7-c4d02a27d1bc', '968dd6e6-faec-4f78-b58a-d6e68426b09e', -1, CURRENT_TIMESTAMP);
INSERT INTO public.project_manager (id, name, tag, meta_model, group_name, link_type, check_state, is_public, op, ts
) VALUES
(1, 'component', 'component', '', 'component', 0,
'{"checkState": [{"name": "global_uuid", "type": "UUID", "checked": 1, "isVisible": 1, "defaultValue": "", "lengthPrecision": -1}, {"name": "nspath", "type": "VARCHAR(32)", "checked": 1, "isVisible": 1, "defaultValue": "", "lengthPrecision": 32}, {"name": "tag", "type": "VARCHAR(32)", "checked": 1, "isVisible": 1, "defaultValue": "null", "lengthPrecision": 32}, {"name": "name", "type": "VARCHAR(64)", "checked": 1, "isVisible": 1, "defaultValue": "null", "lengthPrecision": 64}, {"name": "description", "type": "VARCHAR(512)", "checked": 1, "isVisible": 1, "defaultValue": "", "lengthPrecision": 512}, {"name": "station", "type": "VARCHAR(64)", "checked": 1, "isVisible": 1, "defaultValue": "null", "lengthPrecision": 64}, {"name": "zone", "type": "VARCHAR(64)", "checked": 1, "isVisible": 1, "defaultValue": "null", "lengthPrecision": 64}, {"name": "grid", "type": "VARCHAR(64)", "checked": 1, "isVisible": 1, "defaultValue": "null", "lengthPrecision": 64}, {"name": "type", "type": "INTEGER", "checked": 1, "isVisible": 0, "defaultValue": "0", "lengthPrecision": -1}, {"name": "in_service", "type": "SMALLINT", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "state", "type": "INTEGER", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "connection", "type": "JSONB", "checked": 1, "isVisible": 1, "defaultValue": "{}", "lengthPrecision": -1}, {"name": "label", "type": "JSONB", "checked": 1, "isVisible": 1, "defaultValue": "{}", "lengthPrecision": -1}, {"name": "context", "type": "JSONB", "checked": 1, "isVisible": 0, "defaultValue": "{}", "lengthPrecision": -1}, {"name": "op", "type": "INTEGER", "checked": 1, "isVisible": 0, "defaultValue": "-1", "lengthPrecision": -1}, {"name": "ts", "type": "TIMESTAMP", "checked": 1, "isVisible": 0, "defaultValue": "null", "lengthPrecision": -1}, {"name": "model_name", "type": "VARCHAR(64)", "checked": 1, "isVisible": 0, "defaultValue": "null", "lengthPrecision": 64}, {"name": "status", "type": "SMALLINT", "checked": 1, "isVisible": 0, "defaultValue": "null", "lengthPrecision": -1}]}', TRUE, -1, CURRENT_TIMESTAMP
),
(2, 'bus_bus_1_base_extend', 'bus_1', 'bus', 'base_extend', 0,
'{"checkState": [{"name": "bus_num", "type": "INTEGER", "checked": 1, "isVisible": 0, "defaultValue": "1", "lengthPrecision": -1}, {"name": "unom_kv", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "null", "lengthPrecision": -1}]}', FALSE, -1, CURRENT_TIMESTAMP
),
(3, 'bus_bus_1_model', 'bus_1', 'bus', 'model', 0,
'{"checkState": [{"name": "ui_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "100", "lengthPrecision": -1}, {"name": "ui_kv", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "35", "lengthPrecision": -1}, {"name": "ui_pa", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "stability_rated_current", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "1000", "lengthPrecision": -1}, {"name": "stability_dynamic_steady_current", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "40", "lengthPrecision": -1}, {"name": "load_adjustment_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "100", "lengthPrecision": -1}, {"name": "load_adjustment_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "100", "lengthPrecision": -1}, {"name": "bus_type", "type": "VARCHAR(10)", "checked": 1, "isVisible": 1, "defaultValue": "PQ母线", "lengthPrecision": 10}, {"name": "csc_s3_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_s3_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_i3_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_i3_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_z3s_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0.05", "lengthPrecision": -1}, {"name": "csc_z3s_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0.1", "lengthPrecision": -1}, {"name": "csc_s1_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_s1_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_i1_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_i1_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_z1s_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0.05", "lengthPrecision": -1}, {"name": "csc_z1s_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0.1", "lengthPrecision": -1}, {"name": "csc_base_voltage", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "37", "lengthPrecision": -1}, {"name": "csc_base_capacity", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "100", "lengthPrecision": -1}]}', FALSE, -1, CURRENT_TIMESTAMP
),
(4, 'bus_bus_1_stable', 'bus_1', 'bus', 'stable', 0,
'{"checkState": [{"name": "uvpw_threshold_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "95", "lengthPrecision": -1}, {"name": "uvpw_runtime", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "10", "lengthPrecision": -1}, {"name": "uvw_threshold_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "90", "lengthPrecision": -1}, {"name": "uvw_runtime", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "10", "lengthPrecision": -1}, {"name": "ovpw_threshold_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "105", "lengthPrecision": -1}, {"name": "ovpw_runtime", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "60", "lengthPrecision": -1}, {"name": "ovw_threshold_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "110", "lengthPrecision": -1}, {"name": "ovw_runtime", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "10", "lengthPrecision": -1}, {"name": "umargin_pmax", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "umargin_qmax", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "umargin_ulim", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "90", "lengthPrecision": -1}, {"name": "umargin_plim_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "15", "lengthPrecision": -1}, {"name": "umargin_qlim_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "15", "lengthPrecision": -1}, {"name": "umargin_ulim_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "15", "lengthPrecision": -1}]}', FALSE, -1, CURRENT_TIMESTAMP);
INSERT INTO public.bus_bus_1_stable (id, global_uuid, attribute_group, uvpw_threshold_percent, uvpw_runtime, uvw_threshold_percent, uvw_runtime, ovpw_threshold_percent, ovpw_runtime, ovw_threshold_percent, ovw_runtime,
umargin_pmax, umargin_qmax, umargin_ulim, umargin_plim_percent, umargin_qlim_percent, umargin_ulim_percent
) VALUES (
1,
'968dd6e6-faec-4f78-b58a-d6e68426b08e',
'stable',
95,
10,
90,
10,
105,
60,
110,
10,
0,
0,
90,
15,
15,
15
);
INSERT INTO public.bus_bus_1_model (id, global_uuid, attribute_group,
ui_percent, ui_kv, ui_pa, stability_rated_current, stability_dynamic_steady_current, load_adjustment_min, load_adjustment_max, bus_type, csc_s3_max, csc_s3_min, csc_i3_max, csc_i3_min, csc_z3s_max, csc_z3s_min, csc_s1_max, csc_s1_min, csc_i1_max, csc_i1_min, csc_z1s_max, csc_z1s_min, csc_base_voltage, csc_base_capacity
) VALUES (
1,
'968dd6e6-faec-4f78-b58a-d6e68426b08e',
'model',
100,
35,
0,
1000,
40,
100,
100,
'PQ母线',
0,
0,
0,
0,
0.05,
0.1,
0,
0,
0,
0,
0.05,
0.1,
37,
100
);
INSERT INTO public.bus_bus_1_base_extend (id, global_uuid, attribute_group,
bus_num, unom_kv
) VALUES (
1,
'968dd6e6-faec-4f78-b58a-d6e68426b08e',
'base_extend',
1,
NULL
);
```
##### 2.4.2 Redis数据注入
Redis数据脚本
```shell
deploy/redis-test-data/measurments-recommend/measurement_injection.go
```
运行脚本向 Reids 导入数据
```shell
go run deploy/redis-test-data/measurments-recommend/measurement_injection.go
```
### 3\. 启动 ModelRT 服务
#### 3.1 配置服务配置文件
以下表格为配置文件参数说明表
| 类别 | 参数名 | 作用描述 | 示例值 |
| :--- | :--- | :--- | :--- |
| **Postgres** | `host` | PostgreSQL 数据库服务器的 $\text{IP}$ 地址或域名。 | `"192.168.1.101"` |
| | `port` | PostgreSQL 数据库服务器的端口号。 | `5432` |
| | `database` | 连接的数据库名称。 | `"demo"` |
| | `user` | 连接数据库所使用的用户名。 | `"postgres"` |
| | `password` | 连接数据库所使用的密码。 | `"coslight"` |
| **Kafka** | `servers` | Kafka 集群的 $\text{Bootstrap Server}$ 地址列表(通常是 $\text{host:port}$ 形式,多个地址用逗号分隔)。 | `"localhost:9092"` |
| | `port` | Kafka 服务器的端口号。 | `9092` |
| | `group_id` | 消费者组 $\text{ID}$,用于标识和管理一组相关的消费者。 | `"modelRT"` |
| | `topic` | Kafka 消息的主题名称。 | `""` |
| | `auto_offset_reset` | 消费者首次启动或 $\text{Offset}$ 无效时,从哪个位置开始消费(如 `earliest``latest`)。 | `"earliest"` |
| | `enable_auto_commit` | 是否自动提交 $\text{Offset}$。设为 $\text{false}$ 通常用于手动控制 $\text{Offset}$ 提交。 | `"false"` |
| | `read_message_time_duration` | 读取消息时的超时或等待时间。 | `”0.5s"` |
| **Logger (Zap)** | `mode` | 日志模式,通常为 `development`(开发)或 `production`(生产)。影响日志格式。 | `"development"` |
| | `level` | 最低日志级别(如 $\text{debug, info, warn, error}$)。 | `"debug"` |
| | `filepath` | 日志文件的输出路径和名称格式(`%s` 会被替换为日期等)。 | `"/Users/douxu/Workspace/coslight/modelRT/modelRT-%s.log"` |
| | `maxsize` | 单个日志文件最大大小(单位:$\text{MB}$)。 | `1` |
| | `maxbackups` | 保留旧日志文件的最大个数。 | `5` |
| | `maxage` | 保留旧日志文件的最大天数。 | `30` |
| | `compress` | 是否压缩备份的日志文件。 | `false` |
| **Ants Pool** | `parse_concurrent_quantity` | 用于解析任务的协程池最大并发数量。 | `10` |
| | `rtd_receive_concurrent_quantity` | 用于实时数据接收任务的协程池最大并发数量。 | `10` |
| **Locker Redis** | `addr` | 分布式锁服务所使用的 $\text{Redis}$ 地址。 | `"127.0.0.1:6379"` |
| | `password` | $\text{Locker Redis}$ 的密码。 | `""` |
| | `db` | $\text{Locker Redis}$ 使用的数据库编号。 | `1` |
| | `poolsize` | $\text{Locker Redis}$ 连接池的最大连接数。 | `50` |
| | `timeout` | $\text{Locker Redis}$ 连接操作的超时时间(单位:毫秒)。 | `10` |
| **Storage Redis** | `addr` | 数据存储服务所使用的 $\text{Redis}$ 地址(例如 $\text{Redisearch}$)。 | `"127.0.0.1:6379"` |
| | `password` | $\text{Storage Redis}$ 的密码。 | `""` |
| | `db` | $\text{Storage Redis}$ 使用的数据库编号。 | `0` |
| | `poolsize` | $\text{Storage Redis}$ 连接池的最大连接数。 | `50` |
| | `timeout` | $\text{Storage Redis}$ 连接操作的超时时间(单位:毫秒)。 | `10` |
| **Base Config** | `grid_id` | 项目所操作的默认电网 $\text{ID}$。 | `1` |
| | `zone_id` | 项目所操作的默认区域 $\text{ID}$。 | `1` |
| | `station_id` | 项目所操作的默认变电站 $\text{ID}$。 | `1` |
| **Service Config** | `service_name` | 服务名称,用于日志、监控等标识。 | `"modelRT"` |
| | `secret_key` | 服务内部使用的秘钥,用于签名或认证。 | `"modelrt_key"` |
| **DataRT API** | `host` | 外部 $\text{DataRT}$ 服务的主机地址。 | `"http://127.0.0.1"` |
| | `port` | $\text{DataRT}$ 服务的端口号。 | `8888` |
| | `polling_api` | 轮询数据的 $\text{API}$ 路径。 | `"datart/getPointData"` |
| | `polling_api_method` | 调用该 $\text{API}$ 使用的 $\text{HTTP}$ 方法。 | `"GET"` |
#### 3.2 编译 ModelRT 服务
```bash
go build -o model-rt main.go
```
#### 3.3 启动服务
使用编译好的二进制文件进行启动
```bash
./model-rt
```
#### 3.4 检测服务启动日志
在发现控制台输出如下信息`starting ModelRT server`
后即代表服务启动成功
### 4\. 后续操作(停止与清理)
#### 4.1 停止容器
```bash
docker stop postgres redis
```
#### 4.2 删除容器(删除后数据将丢失)
```bash
docker rm postgres redis
```

View File

@ -0,0 +1,19 @@
FROM golang:1.24-alpine AS builder
WORKDIR /app
COPY go.mod .
COPY go.sum .
RUN GOPROXY="https://goproxy.cn,direct" go mod download
COPY . .
RUN CGO_ENABLED=0 go build -ldflags="-s -w" -o modelrt main.go
FROM alpine:latest
WORKDIR /app
ARG USER_ID=1000
RUN adduser -D -u ${USER_ID} modelrt
COPY --from=builder /app/modelrt ./modelrt
COPY configs/config.example.yaml ./configs/config.example.yaml
RUN chown -R modelrt:modelrt /app
RUN chmod +x /app/modelrt
USER modelrt
CMD ["/app/modelrt", "-modelRT_config_dir=/app/configs"]

View File

@ -0,0 +1,327 @@
// Package main implement redis test data injection
package main
import (
"context"
"fmt"
"log"
"github.com/RediSearch/redisearch-go/v2/redisearch"
"github.com/redis/go-redis/v9"
)
var ac *redisearch.Autocompleter
// InitAutocompleterWithPool define func of initialize the Autocompleter with redigo pool
func init() {
// ac = redisearch.NewAutocompleterFromPool(pool, redisSearchDictName)
ac = redisearch.NewAutocompleter("localhost:6379", redisSearchDictName)
}
const (
gridKeysSet = "grid_tag_keys"
zoneKeysSet = "zone_tag_keys"
stationKeysSet = "station_tag_keys"
componentNSPathKeysSet = "component_nspath_keys"
componentTagKeysSet = "component_tag_keys"
configKeysSet = "config_keys"
measurementTagKeysSet = "measurement_tag_keys"
// Grid -> Zone (e.g., grid1_zones_keys)
gridZoneSetKeyFormat = "grid%d_zone_tag_keys"
// Zone -> Station (e.g., zone1_1_stations_keys)
zoneStationSetKeyFormat = "zone%d_%d_station_tag_keys"
// Station -> NSPath (e.g., station1_1_1_components_nspath_keys)
stationNSPathKeyFormat = "station%d_%d_%d_component_nspath_keys"
// NSPath -> CompTag (e.g., ns1_1_1_1_components_tag_keys)
nsPathCompTagKeyFormat = "ns%d_%d_%d_%d_component_tag_keys"
// CompTag -> Measurement (e.g., comptag1_1_1_1_1_measurement_keys)
compTagMeasKeyFormat = "comptag%d_%d_%d_%d_%d_measurement_tag_keys"
)
const (
redisSearchDictName = "search_suggestions_dict"
defaultScore = 1.0
)
var configMetrics = []any{
"component", "base_extend", "rated", "setup", "model",
"stable", "bay", "craft", "integrity", "behavior",
}
func bulkInsertAllHierarchySets(ctx context.Context, rdb *redis.Client) error {
log.Println("starting bulk insertion of Redis hierarchy sets")
if err := insertStaticSets(ctx, rdb); err != nil {
return fmt.Errorf("static set insertion failed: %w", err)
}
if err := insertDynamicHierarchy(ctx, rdb); err != nil {
return fmt.Errorf("dynamic hierarchy insertion failed: %w", err)
}
if err := insertAllHierarchySuggestions(ac); err != nil {
return fmt.Errorf("dynamic hierarchy insertion failed: %w", err)
}
log.Println("bulk insertion complete")
return nil
}
func insertStaticSets(ctx context.Context, rdb *redis.Client) error {
// grid_keys
if err := rdb.SAdd(ctx, gridKeysSet, "grid1", "grid2", "grid3").Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", gridKeysSet, err)
}
// zone_keys (3x3 = 9 members)
zoneMembers := make([]any, 0, 9)
for i := 1; i <= 3; i++ {
for j := 1; j <= 3; j++ {
zoneMembers = append(zoneMembers, fmt.Sprintf("zone%d_%d", i, j))
}
}
if err := rdb.SAdd(ctx, zoneKeysSet, zoneMembers...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", zoneKeysSet, err)
}
// config_keys
if err := rdb.SAdd(ctx, configKeysSet, "bay").Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", configKeysSet, err)
}
log.Println("Static sets (grid_keys, zone_keys, config_keys) inserted.")
return nil
}
func insertDynamicHierarchy(ctx context.Context, rdb *redis.Client) error {
allStationKeys := make([]any, 0, 27)
allNSPathKeys := make([]any, 0, 81)
allCompTagKeys := make([]any, 0, 243)
allMeasurementTagKeys := make([]any, 0, 729)
// S: Grid Prefix (1-3)
for S := 1; S <= 3; S++ {
// Grid-Zone Set Key: gridS_zones_keys
gridZoneKey := fmt.Sprintf(gridZoneSetKeyFormat, S)
gridZoneMembers := make([]any, 0, 3)
// Y: Zone Index (1-3)
for Y := 1; Y <= 3; Y++ {
zoneID := fmt.Sprintf("%d_%d", S, Y)
zoneMember := "zone" + zoneID
gridZoneMembers = append(gridZoneMembers, zoneMember)
// Zone-Station Set Key: zoneS_Y_stations_keys
zoneStationKey := fmt.Sprintf(zoneStationSetKeyFormat, S, Y)
zoneStationMembers := make([]any, 0, 3)
// Z: Station Index (1-3)
for Z := 1; Z <= 3; Z++ {
stationID := fmt.Sprintf("%d_%d_%d", S, Y, Z)
stationKey := "station" + stationID
allStationKeys = append(allStationKeys, stationKey)
zoneStationMembers = append(zoneStationMembers, stationKey)
// Station-NSPath Set Key: stationS_Y_Z_components_nspath_keys
stationNSPathKey := fmt.Sprintf(stationNSPathKeyFormat, S, Y, Z)
stationNSMembers := make([]any, 0, 3)
// D: NSPath Index (1-3)
for D := 1; D <= 3; D++ {
nsPathID := fmt.Sprintf("%s_%d", stationID, D)
nsPathKey := "ns" + nsPathID
allNSPathKeys = append(allNSPathKeys, nsPathKey)
stationNSMembers = append(stationNSMembers, nsPathKey)
// NSPath-CompTag Set Key: nsS_Y_Z_D_components_tag_keys
nsCompTagKey := fmt.Sprintf(nsPathCompTagKeyFormat, S, Y, Z, D)
nsCompTagMembers := make([]any, 0, 3)
// I: CompTag Index (1-3)
for I := 1; I <= 3; I++ {
compTagID := fmt.Sprintf("%s_%d", nsPathID, I)
compTagKey := "comptag" + compTagID
allCompTagKeys = append(allCompTagKeys, compTagKey)
nsCompTagMembers = append(nsCompTagMembers, compTagKey)
// CompTag-Measurement Set Key: comptagS_Y_Z_D_I_measurement_keys
compTagMeasKey := fmt.Sprintf(compTagMeasKeyFormat, S, Y, Z, D, I)
compTagMeasMembers := make([]any, 0, 3)
// M: Measurement Index (1-3)
for M := 1; M <= 3; M++ {
measurementID := fmt.Sprintf("%s_%d", compTagID, M)
measurementKey := "meas" + measurementID
allMeasurementTagKeys = append(allMeasurementTagKeys, measurementKey)
compTagMeasMembers = append(compTagMeasMembers, measurementKey)
}
if err := rdb.SAdd(ctx, compTagMeasKey, compTagMeasMembers...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", compTagMeasKey, err)
}
}
if err := rdb.SAdd(ctx, nsCompTagKey, nsCompTagMembers...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", nsCompTagKey, err)
}
}
if err := rdb.SAdd(ctx, stationNSPathKey, stationNSMembers...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", stationNSPathKey, err)
}
}
if err := rdb.SAdd(ctx, zoneStationKey, zoneStationMembers...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", zoneStationKey, err)
}
}
if err := rdb.SAdd(ctx, gridZoneKey, gridZoneMembers...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", gridZoneKey, err)
}
}
// 插入所有顶层动态 Set (将所有成员一次性插入到全局 Set 中)
if err := rdb.SAdd(ctx, stationKeysSet, allStationKeys...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", stationKeysSet, err)
}
if err := rdb.SAdd(ctx, componentNSPathKeysSet, allNSPathKeys...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", componentNSPathKeysSet, err)
}
if err := rdb.SAdd(ctx, componentTagKeysSet, allCompTagKeys...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", componentTagKeysSet, err)
}
if err := rdb.SAdd(ctx, measurementTagKeysSet, allMeasurementTagKeys...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", measurementTagKeysSet, err)
}
log.Printf("inserted %d stations, %d nspaths, %d comptags, and %d measurements.\n",
len(allStationKeys), len(allNSPathKeys), len(allCompTagKeys), len(allMeasurementTagKeys))
return nil
}
func insertAllHierarchySuggestions(ac *redisearch.Autocompleter) error {
suggestions := make([]redisearch.Suggestion, 0, 10000)
// S: grid Index (1-3)
for S := 1; S <= 3; S++ {
gridStr := fmt.Sprintf("grid%d", S)
suggestions = append(suggestions, redisearch.Suggestion{Term: gridStr, Score: defaultScore})
// Y: zone Index (1-3)
for Y := 1; Y <= 3; Y++ {
zoneStr := fmt.Sprintf("zone%d_%d", S, Y)
gridZonePath := fmt.Sprintf("%s.%s", gridStr, zoneStr)
suggestions = append(suggestions, redisearch.Suggestion{Term: gridZonePath, Score: defaultScore})
// Z: station Index (1-3)
for Z := 1; Z <= 3; Z++ {
stationStr := fmt.Sprintf("station%d_%d_%d", S, Y, Z)
gridZoneStationPath := fmt.Sprintf("%s.%s", gridZonePath, stationStr)
suggestions = append(suggestions, redisearch.Suggestion{Term: gridZoneStationPath, Score: defaultScore})
// D: nsPath Index (1-3)
for D := 1; D <= 3; D++ {
nsPathStr := fmt.Sprintf("ns%d_%d_%d_%d", S, Y, Z, D)
gridZoneStationNSPath := fmt.Sprintf("%s.%s", gridZoneStationPath, nsPathStr)
suggestions = append(suggestions, redisearch.Suggestion{Term: gridZoneStationNSPath, Score: defaultScore})
// I: compTag Index (1-3)
for I := 1; I <= 3; I++ {
compTagStr := fmt.Sprintf("comptag%d_%d_%d_%d_%d", S, Y, Z, D, I)
fullCompTagPath := fmt.Sprintf("%s.%s", gridZoneStationNSPath, compTagStr)
suggestions = append(suggestions, redisearch.Suggestion{Term: fullCompTagPath, Score: defaultScore})
fullConfigPath := fmt.Sprintf("%s.%s", fullCompTagPath, "bay")
suggestions = append(suggestions, redisearch.Suggestion{Term: fullConfigPath, Score: defaultScore})
// J: measTag Index (1-3)
for J := 1; J <= 3; J++ {
measTagStr := fmt.Sprintf("meas%d_%d_%d_%d_%d_%d", S, Y, Z, D, I, J)
fullMeasurementPath := fmt.Sprintf("%s.%s", fullCompTagPath, measTagStr)
suggestions = append(suggestions, redisearch.Suggestion{Term: fullMeasurementPath, Score: defaultScore})
}
}
}
}
}
}
log.Printf("generated %d suggestions. starting bulk insertion into dictionary '%s'.", len(suggestions), redisSearchDictName)
// del ac suggestion
ac.Delete()
err := ac.AddTerms(suggestions...)
if err != nil {
return fmt.Errorf("failed to add %d suggestions: %w", len(suggestions), err)
}
return nil
}
func deleteAllHierarchySets(ctx context.Context, rdb *redis.Client) error {
log.Println("starting to collect all Redis Set keys for deletion...")
keysToDelete := []string{
gridKeysSet,
zoneKeysSet,
stationKeysSet,
componentNSPathKeysSet,
componentTagKeysSet,
configKeysSet,
measurementTagKeysSet,
}
for S := 1; S <= 3; S++ {
keysToDelete = append(keysToDelete, fmt.Sprintf(gridZoneSetKeyFormat, S))
for Y := 1; Y <= 3; Y++ {
keysToDelete = append(keysToDelete, fmt.Sprintf(zoneStationSetKeyFormat, S, Y))
for Z := 1; Z <= 3; Z++ {
keysToDelete = append(keysToDelete, fmt.Sprintf(stationNSPathKeyFormat, S, Y, Z))
for D := 1; D <= 3; D++ {
keysToDelete = append(keysToDelete, fmt.Sprintf(nsPathCompTagKeyFormat, S, Y, Z, D))
for I := 1; I <= 3; I++ {
keysToDelete = append(keysToDelete, fmt.Sprintf(compTagMeasKeyFormat, S, Y, Z, D, I))
}
}
}
}
}
log.Printf("collected %d unique keys. Starting batch deletion...", len(keysToDelete))
deletedCount, err := rdb.Del(ctx, keysToDelete...).Result()
if err != nil {
return fmt.Errorf("batch deletion failed: %w", err)
}
log.Printf("Successfully deleted %d keys (Sets) from Redis.", deletedCount)
return nil
}
func main() {
rdb := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "",
DB: 0,
})
ctx := context.Background()
if err := rdb.Ping(ctx).Err(); err != nil {
log.Fatalf("could not connect to Redis: %v", err)
}
log.Println("connected to Redis successfully")
if err := deleteAllHierarchySets(ctx, rdb); err != nil {
log.Fatalf("error delete exist set before bulk insertion: %v", err)
}
if err := bulkInsertAllHierarchySets(ctx, rdb); err != nil {
log.Fatalf("error during bulk insertion: %v", err)
}
}

View File

@ -0,0 +1,224 @@
// Package main implement redis test data injection
package main
import (
"context"
"fmt"
"log"
"math/rand"
"strconv"
"time"
"modelRT/orm"
util "modelRT/deploy/redis-test-data/util"
"github.com/redis/go-redis/v9"
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
const (
redisAddr = "localhost:6379"
)
var globalRedisClient *redis.Client
var (
highEnd, highStart, lowStart, lowEnd int
totalLength int
highSegmentLength int
lowSegmentLength int
)
func selectRandomInt() int {
options := []int{0, 2}
randomIndex := rand.Intn(len(options))
return options[randomIndex]
}
// generateMixedData define func to generate a set of floating-point data that meets specific conditions
func generateMixedData(highMin, lowMin, highBase, lowBase, baseValue, normalBase float64) []float64 {
totalLength = 500
highSegmentLength = 20
lowSegmentLength = 20
seed := time.Now().UnixNano()
source := rand.NewSource(seed)
r := rand.New(source)
data := make([]float64, totalLength)
highStart = rand.Intn(totalLength - highSegmentLength - lowSegmentLength - 1)
highEnd = highStart + highSegmentLength
lowStart = rand.Intn(totalLength-lowSegmentLength-highEnd) + highEnd
lowEnd = lowStart + lowSegmentLength
for i := 0; i < totalLength; i++ {
if i >= highStart && i < highStart+highSegmentLength {
// 数据值均大于 55.0,在 [55.5, 60.0] 范围内随机
// rand.Float64() 生成 [0.0, 1.0) 范围的浮点数
data[i] = highMin + r.Float64()*(highBase)
} else if i >= lowStart && i < lowStart+lowSegmentLength {
// 数据值均小于 45.0,在 [40.0, 44.5] 范围内随机
data[i] = lowMin + r.Float64()*(lowBase)
} else {
// 数据在 [45.0, 55.0] 范围内随机 (baseValue ± 5)
// 50 + rand.Float64() * 10 - 5
change := normalBase - r.Float64()*normalBase*2
data[i] = baseValue + change
}
}
return data
}
func generateNormalData(baseValue, normalBase float64) []float64 {
totalLength = 500
seed := time.Now().UnixNano()
source := rand.NewSource(seed)
r := rand.New(source)
data := make([]float64, totalLength)
for i := 0; i < totalLength; i++ {
change := normalBase - r.Float64()*normalBase*2
data[i] = baseValue + change
}
return data
}
func main() {
rootCtx := context.Background()
pgURI := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", "192.168.1.101", 5432, "postgres", "coslight", "demo")
postgresDBClient, err := gorm.Open(postgres.Open(pgURI))
if err != nil {
panic(err)
}
defer func() {
sqlDB, err := postgresDBClient.DB()
if err != nil {
panic(err)
}
sqlDB.Close()
}()
cancelCtx, cancel := context.WithTimeout(rootCtx, 5*time.Second)
defer cancel()
var measurements []orm.Measurement
result := postgresDBClient.WithContext(cancelCtx).Find(&measurements)
if result.Error != nil {
panic(result.Error)
}
log.Println("总共读取到测量点数量:", len(measurements))
measInfos := util.ProcessMeasurements(measurements)
globalRedisClient = util.InitRedisClient(redisAddr)
rCancelCtx, cancel := context.WithCancel(rootCtx)
defer cancel()
for key, measInfo := range measInfos {
randomType := selectRandomType()
var datas []float64
if randomType {
// 生成正常数据
log.Printf("key:%s generate normal data\n", key)
baseValue := measInfo.BaseValue
changes := measInfo.Changes
normalBase := changes[0]
noramlMin := baseValue - normalBase
normalMax := baseValue + normalBase
datas = generateNormalData(baseValue, normalBase)
allTrue := true
for i := 0; i < totalLength-1; i++ {
value := datas[i]
// log.Printf("index:%d, value:%.2f\n", i, value)
if value < noramlMin && value > normalMax {
allTrue = false
}
}
log.Printf("// 验证结果: 所有值是否 >= %.2f或 <= %.2f %t\n", noramlMin, normalMax, allTrue)
} else {
// 生成异常数据
log.Printf("key:%s generate abnormal data\n", key)
var highMin, highBase float64
var lowMin, lowBase float64
var normalBase float64
// TODO 生成一次测试数据
changes := measInfo.Changes
baseValue := measInfo.BaseValue
if len(changes) == 2 {
highMin = baseValue + changes[0]
lowMin = baseValue + changes[1]
highBase = changes[0]
lowBase = changes[1]
normalBase = changes[0]
} else {
randomIndex := selectRandomInt()
highMin = baseValue + changes[randomIndex]
lowMin = baseValue + changes[randomIndex+1]
highBase = changes[randomIndex]
lowBase = changes[randomIndex+1]
normalBase = changes[0]
}
datas = generateMixedData(highMin, lowMin, highBase, lowBase, baseValue, normalBase)
// log.Printf("key:%s\n datas:%v\n", key, datas)
allHigh := true
for i := highStart; i < highEnd; i++ {
if datas[i] <= highMin {
allHigh = false
break
}
}
log.Printf("// 验证结果 (高值段在 %d-%d): 所有值是否 > %.2f? %t\n", highStart, highEnd-1, highMin, allHigh)
allLow := true
for i := lowStart; i < lowEnd; i++ {
if datas[i] >= lowMin {
allLow = false
break
}
}
log.Printf("// 验证结果 (低值段在 %d-%d): 所有值是否 < %.2f? %t\n", lowStart, lowEnd-1, lowMin, allLow)
allTrue := true
for i := 0; i < totalLength-1; i++ {
value := datas[i]
if i < highStart || (i >= highEnd && i < lowStart) || i >= lowEnd {
// log.Printf("index:%d, value:%.2f\n", i, value)
if value >= highMin && value <= lowMin {
allTrue = false
}
}
}
log.Printf("// 验证结果 (正常段在 %d-%d): 所有值是否 <= %.2f或>= %.2f %t\n", 0, totalLength-1, highMin, lowMin, allTrue)
}
log.Printf("启动数据写入程序, Redis Key: %s, 基准值: %.4f, 变化范围: %+v\n", key, measInfo.BaseValue, measInfo.Changes)
pipe := globalRedisClient.Pipeline()
redisZs := make([]redis.Z, 0, totalLength)
currentTime := time.Now().UnixNano()
for i := range totalLength {
sequentialTime := currentTime + int64(i)
z := redis.Z{
Score: datas[i],
Member: strconv.FormatInt(sequentialTime, 10),
}
redisZs = append(redisZs, z)
}
log.Printf("启动数据写入程序, Redis Key: %s, 写入数据量: %d\n", key, len(redisZs))
pipe.ZAdd(rCancelCtx, key, redisZs...)
_, err = pipe.Exec(rCancelCtx)
if err != nil {
log.Printf("redis pipeline execution failed: %v\n", err)
}
}
}
func selectRandomType() bool {
options := []int{0, 2}
randomValue := rand.Intn(len(options))
return randomValue != 0
}

View File

@ -0,0 +1,449 @@
// Package main implement redis test data injection
package main
import (
"context"
"fmt"
"log"
"math/rand"
"os"
"os/signal"
"strconv"
"syscall"
"time"
"modelRT/deploy/redis-test-data/util"
"modelRT/orm"
redis "github.com/redis/go-redis/v9"
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
// Redis配置
const (
redisAddr = "localhost:6379"
)
var globalRedisClient *redis.Client
// outlierConfig 异常段配置
type outlierConfig struct {
Enabled bool // 是否启用异常段
Count int // 异常段数量 (0=随机, 1-5=指定数量)
MinLength int // 异常段最小长度
MaxLength int // 异常段最大长度
Intensity float64 // 异常强度系数 (1.0=轻微超出, 2.0=显著超出)
Distribution string // 分布类型 "both"-上下都有, "upper"-只向上, "lower"-只向下
}
// GenerateFloatSliceWithOutliers 生成包含连续异常段的数据
// baseValue: 基准值
// changes: 变化范围每2个元素为一组 [minChange1, maxChange1, minChange2, maxChange2, ...]
// size: 生成的切片长度
// variationType: 变化类型
// outlierConfig: 异常段配置
func generateFloatSliceWithOutliers(baseValue float64, changes []float64, size int, variationType string, outlierConfig outlierConfig) ([]float64, error) {
// 先生成正常数据
data, err := generateFloatSlice(baseValue, changes, size, variationType)
if err != nil {
return nil, err
}
// 插入异常段
if outlierConfig.Enabled {
data = insertOutliers(data, baseValue, changes, outlierConfig)
}
return data, nil
}
// 插入异常段
func insertOutliers(data []float64, baseValue float64, changes []float64, config outlierConfig) []float64 {
if len(data) == 0 || !config.Enabled {
return data
}
// 获取变化范围的边界
minBound, maxBound := getChangeBounds(baseValue, changes)
// TODO delete
log.Printf("获取变化范围的边界,min:%.4f,max:%.4f\n", minBound, maxBound)
// 确定异常段数量
outlierCount := config.Count
if outlierCount == 0 {
// 随机生成1-3个异常段
outlierCount = rand.Intn(3) + 1
}
// 计算最大可能的异常段数量
maxPossibleOutliers := len(data) / (config.MinLength + 10)
if outlierCount > maxPossibleOutliers {
outlierCount = maxPossibleOutliers
}
// 生成异常段位置
segments := generateOutlierSegments(len(data), config.MinLength, config.MaxLength, outlierCount, config.Distribution)
// TODO 调试信息待删除
log.Printf("生成异常段位置:%+v\n", segments)
// 插入异常数据
for _, segment := range segments {
data = insertOutlierSegment(data, segment, minBound, maxBound, config)
}
return data
}
// 获取变化范围的边界
func getChangeBounds(baseValue float64, changes []float64) (minBound, maxBound float64) {
if len(changes) == 0 {
return baseValue - 10, baseValue + 10
}
ranges := normalizeRanges(changes)
minBound, maxBound = baseValue+ranges[0][0], baseValue+ranges[0][1]
for _, r := range ranges {
if baseValue+r[0] < minBound {
minBound = baseValue + r[0]
}
if baseValue+r[1] > maxBound {
maxBound = baseValue + r[1]
}
}
return minBound, maxBound
}
// OutlierSegment 异常段定义
type OutlierSegment struct {
Start int
Length int
Type string // "upper"-向上异常, "lower"-向下异常
}
func generateOutlierSegments(totalSize, minLength, maxLength, count int, distribution string) []OutlierSegment {
if count == 0 {
return nil
}
segments := make([]OutlierSegment, 0, count)
usedPositions := make(map[int]bool)
for i := 0; i < count; i++ {
// 尝试多次寻找合适的位置
for attempt := 0; attempt < 10; attempt++ {
length := rand.Intn(maxLength-minLength+1) + minLength
start := rand.Intn(totalSize - length)
// 检查是否与已有段重叠
overlap := false
for pos := start; pos < start+length; pos++ {
if usedPositions[pos] {
overlap = true
break
}
}
if !overlap {
// 标记已使用的位置
for pos := start; pos < start+length; pos++ {
usedPositions[pos] = true
}
// 根据 distribution 配置决定异常类型
var outlierType string
switch distribution {
case "upper":
outlierType = "upper"
case "lower":
outlierType = "lower"
case "both":
fallthrough
default:
if rand.Float64() < 0.5 {
outlierType = "upper"
} else {
outlierType = "lower"
}
}
segments = append(segments, OutlierSegment{
Start: start,
Length: length,
Type: outlierType,
})
break
}
}
}
return segments
}
func insertOutlierSegment(data []float64, segment OutlierSegment, minBound, maxBound float64, config outlierConfig) []float64 {
rangeWidth := maxBound - minBound
// 确定整个异常段的方向
outlierType := segment.Type
if outlierType == "" {
switch config.Distribution {
case "upper":
outlierType = "upper"
case "lower":
outlierType = "lower"
default:
if rand.Float64() < 0.5 {
outlierType = "upper"
} else {
outlierType = "lower"
}
}
}
// 为整个段生成同方向异常值
for i := segment.Start; i < segment.Start+segment.Length && i < len(data); i++ {
excess := rangeWidth * (0.3 + rand.Float64()*config.Intensity)
if outlierType == "upper" {
data[i] = maxBound + excess
} else {
data[i] = minBound - excess
}
}
return data
}
func detectOutlierSegments(data []float64, baseValue float64, changes []float64, minSegmentLength int) []OutlierSegment {
if len(data) == 0 {
return nil
}
minBound, maxBound := getChangeBounds(baseValue, changes)
var segments []OutlierSegment
currentStart := -1
currentType := ""
for i, value := range data {
isOutlier := value > maxBound || value < minBound
if isOutlier {
outlierType := "upper"
if value < minBound {
outlierType = "lower"
}
if currentStart == -1 {
// 开始新的异常段
currentStart = i
currentType = outlierType
} else if currentType != outlierType {
// 类型变化,结束当前段
if i-currentStart >= minSegmentLength {
segments = append(segments, OutlierSegment{
Start: currentStart,
Length: i - currentStart,
Type: currentType,
})
}
currentStart = i
currentType = outlierType
}
} else {
if currentStart != -1 {
// 结束当前异常段
if i-currentStart >= minSegmentLength {
segments = append(segments, OutlierSegment{
Start: currentStart,
Length: i - currentStart,
Type: currentType,
})
}
currentStart = -1
currentType = ""
}
}
}
// 处理最后的异常段
if currentStart != -1 && len(data)-currentStart >= minSegmentLength {
segments = append(segments, OutlierSegment{
Start: currentStart,
Length: len(data) - currentStart,
Type: currentType,
})
}
return segments
}
func generateFloatSlice(baseValue float64, changes []float64, size int, variationType string) ([]float64, error) {
return generateRandomData(baseValue, changes, size), nil
}
func normalizeRanges(changes []float64) [][2]float64 {
ranges := make([][2]float64, len(changes)/2)
for i := 0; i < len(changes); i += 2 {
min, max := changes[i], changes[i+1]
if min > max {
min, max = max, min
}
ranges[i/2] = [2]float64{min, max}
}
return ranges
}
func generateRandomData(baseValue float64, changes []float64, size int) []float64 {
data := make([]float64, size)
ranges := normalizeRanges(changes)
for i := range data {
rangeIdx := rand.Intn(len(ranges))
minChange := ranges[rangeIdx][0]
maxChange := ranges[rangeIdx][1]
change := minChange + rand.Float64()*(maxChange-minChange)
data[i] = baseValue + change
}
return data
}
// simulateDataWrite 定时生成并写入模拟数据到 Redis ZSet
func simulateDataWrite(ctx context.Context, rdb *redis.Client, redisKey string, config outlierConfig, measInfo util.CalculationResult) {
log.Printf("启动数据写入程序, Redis Key: %s, 基准值: %.4f, 变化范围: %+v\n", redisKey, measInfo.BaseValue, measInfo.Changes)
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
pipe := rdb.Pipeline()
for {
select {
case <-ctx.Done():
log.Printf("\n[%s] 写入程序已停止\n", redisKey)
return
case <-ticker.C:
minBound, maxBound := getChangeBounds(measInfo.BaseValue, measInfo.Changes)
log.Printf("计算边界: [%.4f, %.4f]\n", minBound, maxBound)
// 根据基准值类型决定如何处理
switch measInfo.BaseType {
case "TI":
// 边沿触发类型,生成特殊处理的数据
log.Printf("边沿触发类型,跳过异常数据生成\n")
return
case "TE":
// 正常上下限类型,生成包含异常的数据
if len(measInfo.Changes) == 0 {
log.Printf("无变化范围数据,跳过\n")
return
}
// 根据变化范围数量调整异常配置
if len(measInfo.Changes) == 2 {
// 只有上下限
config.Distribution = "both"
} else if len(measInfo.Changes) == 4 {
// 有上下限和预警上下限
config.Distribution = "both"
config.Intensity = 2.0 // 增强异常强度
}
// 生成包含异常的数据
data, err := generateFloatSliceWithOutliers(
measInfo.BaseValue,
measInfo.Changes,
measInfo.Size,
"random",
config,
)
if err != nil {
log.Printf("生成异常数据失败:%v\n", err)
continue
}
segments := detectOutlierSegments(data, measInfo.BaseValue, measInfo.Changes, config.MinLength)
log.Printf("检测到异常段数量:%d\n", len(segments))
for i, segment := range segments {
log.Printf("异常段%d: 位置[%d-%d], 长度=%d, 类型=%s\n",
i+1, segment.Start, segment.Start+segment.Length-1, segment.Length, segment.Type)
}
redisZs := make([]redis.Z, 0, len(data))
for i := range len(data) {
z := redis.Z{
Score: data[i],
Member: strconv.FormatInt(time.Now().UnixNano(), 10),
}
redisZs = append(redisZs, z)
}
pipe.ZAdd(ctx, redisKey, redisZs...)
_, err = pipe.Exec(ctx)
if err != nil {
log.Printf("redis pipeline execution failed: %v", err)
}
log.Printf("生成 redis 实时数据成功\n")
}
}
}
}
func gracefulShutdown() {
if globalRedisClient != nil {
if err := globalRedisClient.Close(); err != nil {
log.Printf("关闭 Redis 客户端失败:%v", err)
} else {
log.Println("关闭 Redis 客户端成功")
}
}
time.Sleep(500 * time.Millisecond)
os.Exit(0)
}
func main() {
rootCtx := context.Background()
pgURI := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", "192.168.1.101", 5432, "postgres", "coslight", "demo")
postgresDBClient, err := gorm.Open(postgres.Open(pgURI))
if err != nil {
panic(err)
}
defer func() {
sqlDB, err := postgresDBClient.DB()
if err != nil {
panic(err)
}
sqlDB.Close()
}()
cancelCtx, cancel := context.WithTimeout(rootCtx, 5*time.Second)
defer cancel()
var measurements []orm.Measurement
result := postgresDBClient.WithContext(cancelCtx).Find(&measurements)
if result.Error != nil {
panic(result.Error)
}
log.Println("总共读取到测量点数量:", len(measurements))
measInfos := util.ProcessMeasurements(measurements)
// 测量点数据生成(包含异常数据)
// 配置异常段参数
outlierConfig := outlierConfig{
Enabled: true, // 是否产生异常段数据
Count: 2, // 异常段数量
MinLength: 10, // 异常段最小连续长度
MaxLength: 15, // 异常段最大连续长度
Intensity: 1.5, // 异常强度
Distribution: "both", // 分布类型
}
globalRedisClient = util.InitRedisClient(redisAddr)
rCancelCtx, cancel := context.WithCancel(rootCtx)
defer cancel()
for key, measInfo := range measInfos {
go simulateDataWrite(rCancelCtx, globalRedisClient, key, outlierConfig, measInfo)
}
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
<-sigChan
gracefulShutdown()
}

View File

@ -0,0 +1,266 @@
// Package util provide some utility fun
package util
import (
"fmt"
"modelRT/orm"
)
type CalculationResult struct {
BaseValue float64
Changes []float64
Size int
BaseType string // "normal", "warning", "edge"
Message string
}
func ProcessMeasurements(measurements []orm.Measurement) map[string]CalculationResult {
results := make(map[string]CalculationResult, len(measurements))
for _, measurement := range measurements {
// 检查 DataSource 是否存在且 type 为 1
if measurement.DataSource == nil {
continue
}
// 检查 type 是否为 1
dataType, typeExists := measurement.DataSource["type"]
if !typeExists {
continue
}
// 类型断言,处理不同的数字类型
var typeValue int
switch v := dataType.(type) {
case int:
typeValue = v
case float64:
typeValue = int(v)
case int64:
typeValue = int(v)
default:
continue
}
if typeValue != 1 {
continue
}
// 获取 io_address
ioAddressRaw, ioExists := measurement.DataSource["io_address"]
if !ioExists {
continue
}
ioAddress, ok := ioAddressRaw.(map[string]any)
if !ok {
continue
}
station, _ := ioAddress["station"].(string)
device, _ := ioAddress["device"].(string)
channel, _ := ioAddress["channel"].(string)
result := fmt.Sprintf("%s:%s:phasor:%s", station, device, channel)
if measurement.EventPlan == nil {
continue
}
causeValue, causeExist := measurement.EventPlan["cause"]
if !causeExist {
continue
}
causeMap, ok := causeValue.(map[string]any)
if !ok {
continue
}
calResult, err := calculateBaseValueEnhanced(causeMap)
if err != nil {
continue
}
calResult.Size = measurement.Size
results[result] = calResult
}
return results
}
func calculateBaseValueEnhanced(data map[string]any) (CalculationResult, error) {
result := CalculationResult{}
if edge, exists := data["edge"]; exists {
value, err := calculateEdgeValue(edge)
if err != nil {
return result, err
}
if edge == "raising" {
result.Changes = []float64{1.0}
} else {
result.Changes = []float64{0.0}
}
result.BaseValue = value
result.BaseType = "TI"
result.Message = "边沿触发基准值"
return result, nil
}
hasUpDown := HasKeys(data, "up", "down")
hasUpUpDownDown := HasKeys(data, "upup", "downdown")
result.BaseType = "TE"
switch {
case hasUpDown && hasUpUpDownDown:
value, err := calculateAverage(data, "up", "down")
if err != nil {
return result, err
}
result.BaseValue = value
result.Changes, err = calculateChanges(data, value, false, 4)
if err != nil {
return result, err
}
result.Message = "上下限基准值(忽略预警上上下下限)"
return result, nil
case hasUpDown:
value, err := calculateAverage(data, "up", "down")
if err != nil {
return result, err
}
result.BaseValue = value
result.Changes, err = calculateChanges(data, value, false, 2)
if err != nil {
return result, err
}
result.Message = "上下限基准值"
return result, nil
case hasUpUpDownDown:
value, err := calculateAverage(data, "upup", "downdown")
if err != nil {
return result, err
}
result.BaseValue = value
result.Changes, err = calculateChanges(data, value, true, 2)
if err != nil {
return result, err
}
result.Message = "上上下下限基准值"
return result, nil
default:
return result, fmt.Errorf("不支持的数据结构: %v", data)
}
}
func calculateAverage(data map[string]any, key1, key2 string) (float64, error) {
val1, err := getFloatValue(data, key1)
if err != nil {
return 0, err
}
val2, err := getFloatValue(data, key2)
if err != nil {
return 0, err
}
return (val1 + val2) / 2.0, nil
}
func calculateChanges(data map[string]any, baseValue float64, maxLimt bool, limitNum int) ([]float64, error) {
results := make([]float64, 0, limitNum)
switch limitNum {
case 2:
var key1, key2 string
if maxLimt {
key1 = "upup"
key2 = "downdown"
} else {
key1 = "up"
key2 = "down"
}
val1, err := getFloatValue(data, key1)
if err != nil {
return nil, err
}
results = append(results, val1-baseValue)
val2, err := getFloatValue(data, key2)
if err != nil {
return nil, err
}
results = append(results, val2-baseValue)
case 4:
key1 := "up"
key2 := "down"
key3 := "upup"
key4 := "downdown"
val1, err := getFloatValue(data, key1)
if err != nil {
return nil, err
}
results = append(results, val1-baseValue)
val2, err := getFloatValue(data, key2)
if err != nil {
return nil, err
}
results = append(results, val2-baseValue)
val3, err := getFloatValue(data, key3)
if err != nil {
return nil, err
}
results = append(results, val3-baseValue)
val4, err := getFloatValue(data, key4)
if err != nil {
return nil, err
}
results = append(results, val4-baseValue)
}
return results, nil
}
func getFloatValue(data map[string]any, key string) (float64, error) {
value, exists := data[key]
if !exists {
return 0, fmt.Errorf("缺少必需的键:%s", key)
}
switch v := value.(type) {
case float64:
return v, nil
case int:
return float64(v), nil
case float32:
return float64(v), nil
default:
return 0, fmt.Errorf("键 %s 的值类型错误,期望数字类型,得到 %T", key, value)
}
}
func HasKeys(data map[string]any, keys ...string) bool {
for _, key := range keys {
if _, exists := data[key]; !exists {
return false
}
}
return true
}
func calculateEdgeValue(edge any) (float64, error) {
edgeStr, ok := edge.(string)
if !ok {
return 0, fmt.Errorf("edge 字段类型错误,期望 string,得到 %T", edge)
}
switch edgeStr {
case "raising":
return 1.0, nil
case "falling":
return 0.0, nil
default:
return 0, fmt.Errorf("不支持的 edge 值: %s", edgeStr)
}
}

View File

@ -0,0 +1,27 @@
// Package util provide some utility fun
package util
import (
"context"
"time"
"github.com/redis/go-redis/v9"
)
// InitRedisClient define func to initialize and return a redis client
func InitRedisClient(redisAddr string) *redis.Client {
rdb := redis.NewClient(&redis.Options{
Addr: redisAddr,
Password: "",
DB: 0,
})
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, err := rdb.Ping(ctx).Result()
if err != nil {
return nil
}
return rdb
}

View File

@ -10,10 +10,10 @@ import (
var anchorValueOverview sync.Map
// GetAnchorValue define func of get circuit diagram data by componentID
func GetAnchorValue(componentID int64) (string, error) {
value, ok := diagramsOverview.Load(componentID)
func GetAnchorValue(componentUUID string) (string, error) {
value, ok := diagramsOverview.Load(componentUUID)
if !ok {
return "", fmt.Errorf("can not find anchor value by componentID:%d", componentID)
return "", fmt.Errorf("can not find anchor value by componentUUID:%s", componentUUID)
}
anchorValue, ok := value.(string)
if !ok {
@ -22,20 +22,20 @@ func GetAnchorValue(componentID int64) (string, error) {
return anchorValue, nil
}
// UpdateAnchorValue define func of update anchor value by componentID and anchor name
func UpdateAnchorValue(componentID int64, anchorValue string) bool {
_, result := anchorValueOverview.Swap(componentID, anchorValue)
// UpdateAnchorValue define func of update anchor value by componentUUID and anchor name
func UpdateAnchorValue(componentUUID string, anchorValue string) bool {
_, result := anchorValueOverview.Swap(componentUUID, anchorValue)
return result
}
// StoreAnchorValue define func of store anchor value with componentID and anchor name
func StoreAnchorValue(componentID int64, anchorValue string) {
anchorValueOverview.Store(componentID, anchorValue)
// StoreAnchorValue define func of store anchor value with componentUUID and anchor name
func StoreAnchorValue(componentUUID string, anchorValue string) {
anchorValueOverview.Store(componentUUID, anchorValue)
return
}
// DeleteAnchorValue define func of delete anchor value with componentID
func DeleteAnchorValue(componentID int64) {
anchorValueOverview.Delete(componentID)
// DeleteAnchorValue define func of delete anchor value with componentUUID
func DeleteAnchorValue(componentUUID string) {
anchorValueOverview.Delete(componentUUID)
return
}

View File

@ -4,38 +4,40 @@ import (
"errors"
"fmt"
"sync"
"modelRT/orm"
)
// diagramsOverview define struct of storage all circuit diagram data
var diagramsOverview sync.Map
// GetComponentMap define func of get circuit diagram data by component id
func GetComponentMap(componentID int64) (map[string]interface{}, error) {
value, ok := diagramsOverview.Load(componentID)
// GetComponentMap define func of get circuit diagram data by component uuid
func GetComponentMap(componentUUID string) (*orm.Component, error) {
value, ok := diagramsOverview.Load(componentUUID)
if !ok {
return nil, fmt.Errorf("can not find graph by global uuid:%d", componentID)
return nil, fmt.Errorf("can not find graph by global uuid:%s", componentUUID)
}
paramsMap, ok := value.(map[string]interface{})
componentInfo, ok := value.(*orm.Component)
if !ok {
return nil, errors.New("convert to component map struct failed")
}
return paramsMap, nil
return componentInfo, nil
}
// UpdateComponentMap define func of update circuit diagram data by component id and component info
func UpdateComponentMap(componentID int64, componentInfo map[string]interface{}) bool {
// UpdateComponentMap define func of update circuit diagram data by component uuid and component info
func UpdateComponentMap(componentID int64, componentInfo *orm.Component) bool {
_, result := diagramsOverview.Swap(componentID, componentInfo)
return result
}
// StoreComponentMap define func of store circuit diagram data with component id and component info
func StoreComponentMap(componentID int64, componentInfo map[string]interface{}) {
diagramsOverview.Store(componentID, componentInfo)
// StoreComponentMap define func of store circuit diagram data with component uuid and component info
func StoreComponentMap(componentUUID string, componentInfo *orm.Component) {
diagramsOverview.Store(componentUUID, componentInfo)
return
}
// DeleteComponentMap define func of delete circuit diagram data with component id
func DeleteComponentMap(componentID int64) {
diagramsOverview.Delete(componentID)
// DeleteComponentMap define func of delete circuit diagram data with component uuid
func DeleteComponentMap(componentUUID string) {
diagramsOverview.Delete(componentUUID)
return
}

View File

@ -5,7 +5,7 @@ import (
"fmt"
"sync"
"modelRT/constant"
"modelRT/constants"
"modelRT/network"
"github.com/gofrs/uuid"
@ -148,7 +148,7 @@ func (g *Graph) PrintGraph() {
// UpdateEdge update edge link info between two verticeLinks
func (g *Graph) UpdateEdge(changeInfo network.TopologicUUIDChangeInfos) error {
if changeInfo.ChangeType == constant.UUIDFromChangeType || changeInfo.ChangeType == constant.UUIDToChangeType {
if changeInfo.ChangeType == constants.UUIDFromChangeType || changeInfo.ChangeType == constants.UUIDToChangeType {
g.DelEdge(changeInfo.OldUUIDFrom, changeInfo.OldUUIDTo)
g.AddEdge(changeInfo.NewUUIDFrom, changeInfo.NewUUIDTo)
} else {

33
diagram/hash_test.go Normal file
View File

@ -0,0 +1,33 @@
package diagram
import (
"context"
"fmt"
"testing"
"time"
"github.com/redis/go-redis/v9"
)
func TestHMSet(t *testing.T) {
rdb := redis.NewClient(&redis.Options{
Network: "tcp",
Addr: "192.168.2.104:6379",
Password: "cnstar",
PoolSize: 50,
DialTimeout: 10 * time.Second,
})
params := map[string]interface{}{
"field1": "Hello1",
"field2": "World1",
"field3": 11,
}
ctx := context.Background()
res, err := rdb.HSet(ctx, "myhash", params).Result()
if err != nil {
fmt.Printf("err:%v\n", err)
}
fmt.Printf("res:%v\n", res)
return
}

View File

@ -0,0 +1,64 @@
package diagram
import (
"fmt"
"github.com/gofrs/uuid"
)
var GlobalTree *MultiBranchTreeNode
// MultiBranchTreeNode represents a topological structure using an multi branch tree
type MultiBranchTreeNode struct {
ID uuid.UUID // 节点唯一标识
Parent *MultiBranchTreeNode // 指向父节点的指针
Children []*MultiBranchTreeNode // 指向所有子节点的指针切片
}
func NewMultiBranchTree(id uuid.UUID) *MultiBranchTreeNode {
return &MultiBranchTreeNode{
ID: id,
Children: make([]*MultiBranchTreeNode, 0),
}
}
func (n *MultiBranchTreeNode) AddChild(child *MultiBranchTreeNode) {
child.Parent = n
n.Children = append(n.Children, child)
}
func (n *MultiBranchTreeNode) RemoveChild(childID uuid.UUID) bool {
for i, child := range n.Children {
if child.ID == childID {
n.Children = append(n.Children[:i], n.Children[i+1:]...)
child.Parent = nil
return true
}
}
return false
}
func (n *MultiBranchTreeNode) FindNodeByID(id uuid.UUID) *MultiBranchTreeNode {
if n.ID == id {
return n
}
for _, child := range n.Children {
if found := child.FindNodeByID(id); found != nil {
return found
}
}
return nil
}
func (n *MultiBranchTreeNode) PrintTree(level int) {
for i := 0; i < level; i++ {
fmt.Print(" ")
}
fmt.Printf("-ID: %s\n", n.ID)
for _, child := range n.Children {
child.PrintTree(level + 1)
}
}

36
diagram/redis_client.go Normal file
View File

@ -0,0 +1,36 @@
// Package diagram provide diagram data structure and operation
package diagram
import (
"context"
"github.com/redis/go-redis/v9"
)
// RedisClient define struct to accessing redis data that does not require the use of distributed locks
type RedisClient struct {
Client *redis.Client
}
// NewRedisClient define func of new redis client instance
func NewRedisClient() *RedisClient {
return &RedisClient{
Client: GetRedisClientInstance(),
}
}
// QueryByZRangeByLex define func to query real time data from redis zset
func (rc *RedisClient) QueryByZRangeByLex(ctx context.Context, key string, size int64) ([]redis.Z, error) {
client := rc.Client
args := redis.ZRangeArgs{
Key: key,
Start: 0,
Stop: size,
ByScore: false,
ByLex: false,
Rev: false,
Offset: 0,
Count: 0,
}
return client.ZRangeArgsWithScores(ctx, args).Result()
}

97
diagram/redis_hash.go Normal file
View File

@ -0,0 +1,97 @@
package diagram
import (
"context"
locker "modelRT/distributedlock"
"modelRT/logger"
"github.com/redis/go-redis/v9"
)
// RedisHash defines the encapsulation struct of redis hash type
type RedisHash struct {
ctx context.Context
hashKey string
rwLocker *locker.RedissionRWLocker
storageClient *redis.Client
}
// NewRedisHash define func of new redis hash instance
func NewRedisHash(ctx context.Context, hashKey string, lockLeaseTime uint64, needRefresh bool) *RedisHash {
token := ctx.Value("client_token").(string)
return &RedisHash{
ctx: ctx,
hashKey: hashKey,
rwLocker: locker.InitRWLocker(hashKey, token, lockLeaseTime, needRefresh),
storageClient: GetRedisClientInstance(),
}
}
// SetRedisHashByMap define func of set redis hash by map struct
func (rh *RedisHash) SetRedisHashByMap(fields map[string]interface{}) error {
err := rh.rwLocker.WLock(rh.ctx)
if err != nil {
logger.Error(rh.ctx, "lock wLock by hash_key failed", "hash_key", rh.hashKey, "error", err)
return err
}
defer rh.rwLocker.UnWLock(rh.ctx)
err = rh.storageClient.HSet(rh.ctx, rh.hashKey, fields).Err()
if err != nil {
logger.Error(rh.ctx, "set hash by map failed", "hash_key", rh.hashKey, "fields", fields, "error", err)
return err
}
return nil
}
// SetRedisHashByKV define func of set redis hash by kv struct
func (rh *RedisHash) SetRedisHashByKV(field string, value interface{}) error {
err := rh.rwLocker.WLock(rh.ctx)
if err != nil {
logger.Error(rh.ctx, "lock wLock by hash_key failed", "hash_key", rh.hashKey, "error", err)
return err
}
defer rh.rwLocker.UnWLock(rh.ctx)
err = rh.storageClient.HSet(rh.ctx, rh.hashKey, field, value).Err()
if err != nil {
logger.Error(rh.ctx, "set hash by kv failed", "hash_key", rh.hashKey, "field", field, "value", value, "error", err)
return err
}
return nil
}
// HGet define func of get specified field value from redis hash by key and field name
func (rh *RedisHash) HGet(field string) (string, error) {
err := rh.rwLocker.RLock(rh.ctx)
if err != nil {
logger.Error(rh.ctx, "lock rLock by hash_key failed", "hash_key", rh.hashKey, "error", err)
return "", err
}
defer rh.rwLocker.UnRLock(rh.ctx)
result, err := rh.storageClient.HGet(rh.ctx, rh.hashKey, field).Result()
if err != nil {
logger.Error(rh.ctx, "set hash by kv failed", "hash_key", rh.hashKey, "field", field, "error", err)
return "", err
}
return result, nil
}
// HGetAll define func of get all filelds from redis hash by key
func (rh *RedisHash) HGetAll() (map[string]string, error) {
err := rh.rwLocker.RLock(rh.ctx)
if err != nil {
logger.Error(rh.ctx, "lock rLock by hash_key failed", "hash_key", rh.hashKey, "error", err)
return nil, err
}
defer rh.rwLocker.UnRLock(rh.ctx)
result, err := rh.storageClient.HGetAll(rh.ctx, rh.hashKey).Result()
if err != nil {
logger.Error(rh.ctx, "get all hash field by hash key failed", "hash_key", rh.hashKey, "error", err)
return nil, err
}
return result, nil
}

45
diagram/redis_init.go Normal file
View File

@ -0,0 +1,45 @@
package diagram
import (
"sync"
"time"
"modelRT/config"
"modelRT/util"
"github.com/redis/go-redis/v9"
)
var (
_globalStorageClient *redis.Client
once sync.Once
)
// initClient define func of return successfully initialized redis client
func initClient(rCfg config.RedisConfig) *redis.Client {
client, err := util.NewRedisClient(
rCfg.Addr,
util.WithPassword(rCfg.Password),
util.WithDB(rCfg.DB),
util.WithPoolSize(rCfg.PoolSize),
util.WithTimeout(time.Duration(rCfg.Timeout)*time.Second),
)
if err != nil {
panic(err)
}
return client
}
// InitRedisClientInstance define func of return instance of redis client
func InitRedisClientInstance(rCfg config.RedisConfig) *redis.Client {
once.Do(func() {
_globalStorageClient = initClient(rCfg)
})
return _globalStorageClient
}
// GetRedisClientInstance define func of get redis client instance
func GetRedisClientInstance() *redis.Client {
client := _globalStorageClient
return client
}

95
diagram/redis_set.go Normal file
View File

@ -0,0 +1,95 @@
package diagram
import (
"context"
"fmt"
locker "modelRT/distributedlock"
"modelRT/logger"
"github.com/redis/go-redis/v9"
"go.uber.org/zap"
)
// RedisSet defines the encapsulation struct of redis hash type
type RedisSet struct {
ctx context.Context
key string
rwLocker *locker.RedissionRWLocker
storageClient *redis.Client
logger *zap.Logger
}
// NewRedisSet define func of new redis set instance
func NewRedisSet(ctx context.Context, setKey string, lockLeaseTime uint64, needRefresh bool) *RedisSet {
token := ctx.Value("client_token").(string)
return &RedisSet{
ctx: ctx,
key: setKey,
rwLocker: locker.InitRWLocker(setKey, token, lockLeaseTime, needRefresh),
storageClient: GetRedisClientInstance(),
logger: logger.GetLoggerInstance(),
}
}
// SADD define func of add redis set by members
func (rs *RedisSet) SADD(members ...any) error {
err := rs.rwLocker.WLock(rs.ctx)
if err != nil {
logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", rs.key, "error", err)
return err
}
defer rs.rwLocker.UnWLock(rs.ctx)
err = rs.storageClient.SAdd(rs.ctx, rs.key, members).Err()
if err != nil {
logger.Error(rs.ctx, "add set by memebers failed", "set_key", rs.key, "members", members, "error", err)
return err
}
return nil
}
// SREM define func of remove the specified members from redis set by key
func (rs *RedisSet) SREM(members ...any) error {
err := rs.rwLocker.WLock(rs.ctx)
if err != nil {
logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", rs.key, "error", err)
return err
}
defer rs.rwLocker.UnWLock(rs.ctx)
count, err := rs.storageClient.SRem(rs.ctx, rs.key, members).Result()
if err != nil || count != int64(len(members)) {
logger.Error(rs.ctx, "rem members from set failed", "set_key", rs.key, "members", members, "error", err)
return fmt.Errorf("rem members from set failed:%w", err)
}
return nil
}
// SMembers define func of get all memebers from redis set by key
func (rs *RedisSet) SMembers() ([]string, error) {
err := rs.rwLocker.RLock(rs.ctx)
if err != nil {
logger.Error(rs.ctx, "lock rLock by setKey failed", "set_key", rs.key, "error", err)
return nil, err
}
defer rs.rwLocker.UnRLock(rs.ctx)
result, err := rs.storageClient.SMembers(rs.ctx, rs.key).Result()
if err != nil {
logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", rs.key, "error", err)
return nil, err
}
return result, nil
}
// SIsMember define func of determine whether an member is in set by key
func (rs *RedisSet) SIsMember(member any) (bool, error) {
result, err := rs.storageClient.SIsMember(rs.ctx, rs.key, member).Result()
if err != nil {
logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", rs.key, "error", err)
return false, err
}
return result, nil
}

114
diagram/redis_string.go Normal file
View File

@ -0,0 +1,114 @@
package diagram
import (
"context"
locker "modelRT/distributedlock"
"modelRT/logger"
"github.com/redis/go-redis/v9"
"go.uber.org/zap"
)
// RedisString defines the encapsulation struct of redis string type
type RedisString struct {
ctx context.Context
rwLocker *locker.RedissionRWLocker
storageClient *redis.Client
logger *zap.Logger
}
// NewRedisString define func of new redis string instance
func NewRedisString(ctx context.Context, stringKey string, token string, lockLeaseTime uint64, needRefresh bool) *RedisString {
return &RedisString{
ctx: ctx,
rwLocker: locker.InitRWLocker(stringKey, token, lockLeaseTime, needRefresh),
storageClient: GetRedisClientInstance(),
logger: logger.GetLoggerInstance(),
}
}
// Get define func of get the value of key
func (rs *RedisString) Get(stringKey string) (string, error) {
err := rs.rwLocker.RLock(rs.ctx)
if err != nil {
logger.Error(rs.ctx, "lock rLock by stringKey failed", "string_key", stringKey, "error", err)
return "", err
}
defer rs.rwLocker.UnRLock(rs.ctx)
value, err := rs.storageClient.Get(rs.ctx, stringKey).Result()
if err != nil {
logger.Error(rs.ctx, "get string value by key failed", "string_key", stringKey, "error", err)
return "", err
}
return value, nil
}
// Set define func of set the value of key
func (rs *RedisString) Set(stringKey string, value interface{}) error {
err := rs.rwLocker.WLock(rs.ctx)
if err != nil {
logger.Error(rs.ctx, "lock wLock by stringKey failed", "string_key", stringKey, "error", err)
return err
}
defer rs.rwLocker.UnWLock(rs.ctx)
err = rs.storageClient.Set(rs.ctx, stringKey, value, redis.KeepTTL).Err()
if err != nil {
logger.Error(rs.ctx, "get string value by key failed", "string_key", stringKey, "error", err)
return err
}
return nil
}
// Incr define func of increments the number stored at key by one
func (rs *RedisString) Incr(stringKey string) error {
err := rs.rwLocker.WLock(rs.ctx)
if err != nil {
logger.Error(rs.ctx, "lock wLock by stringKey failed", "string_key", stringKey, "error", err)
return err
}
defer rs.rwLocker.UnWLock(rs.ctx)
err = rs.storageClient.Incr(rs.ctx, stringKey).Err()
if err != nil {
logger.Error(rs.ctx, "incr the number stored at key by one failed", "string_key", stringKey, "error", err)
return err
}
return nil
}
// IncrBy define func of increments the number stored at key by increment
func (rs *RedisString) IncrBy(stringKey string, value int64) error {
err := rs.rwLocker.WLock(rs.ctx)
if err != nil {
logger.Error(rs.ctx, "lock wLock by stringKey failed", "string_key", stringKey, "error", err)
return err
}
defer rs.rwLocker.UnWLock(rs.ctx)
err = rs.storageClient.IncrBy(rs.ctx, stringKey, value).Err()
if err != nil {
logger.Error(rs.ctx, "incr the number stored at key by increment", "string_key", stringKey, "error", err)
return err
}
return nil
}
// GETDEL define func of get the value of key and delete the key
func (rs *RedisString) GETDEL(stringKey string) error {
err := rs.rwLocker.WLock(rs.ctx)
if err != nil {
logger.Error(rs.ctx, "lock wLock by stringKey failed", "string_key", stringKey, "error", err)
return err
}
defer rs.rwLocker.UnWLock(rs.ctx)
err = rs.storageClient.GetDel(rs.ctx, stringKey).Err()
if err != nil {
logger.Error(rs.ctx, "del the key failed", "string_key", stringKey, "error", err)
return err
}
return nil
}

124
diagram/redis_zset.go Normal file
View File

@ -0,0 +1,124 @@
// Package diagram provide diagram data structure and operation
package diagram
import (
"context"
"iter"
"maps"
locker "modelRT/distributedlock"
"modelRT/logger"
"github.com/redis/go-redis/v9"
)
// RedisZSet defines the encapsulation struct of redis zset type
type RedisZSet struct {
ctx context.Context
rwLocker *locker.RedissionRWLocker
storageClient *redis.Client
}
// NewRedisZSet define func of new redis zset instance
func NewRedisZSet(ctx context.Context, key string, lockLeaseTime uint64, needRefresh bool) *RedisZSet {
token := ctx.Value("client_token").(string)
return &RedisZSet{
ctx: ctx,
rwLocker: locker.InitRWLocker(key, token, lockLeaseTime, needRefresh),
storageClient: GetRedisClientInstance(),
}
}
// ZADD define func of add redis zset by members
func (rs *RedisZSet) ZADD(setKey string, score float64, member interface{}) error {
err := rs.rwLocker.WLock(rs.ctx)
if err != nil {
logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", setKey, "error", err)
return err
}
defer rs.rwLocker.UnWLock(rs.ctx)
err = rs.storageClient.ZAdd(rs.ctx, setKey, redis.Z{Score: score, Member: member}).Err()
if err != nil {
logger.Error(rs.ctx, "add set by score and memebers failed", "set_key", setKey, "members", member, "error", err)
return err
}
return nil
}
// ZRANGE define func of returns the specified range of elements in the sorted set stored by key
func (rs *RedisZSet) ZRANGE(setKey string, start, stop int64) ([]string, error) {
var results []string
err := rs.rwLocker.RLock(rs.ctx)
if err != nil {
logger.Error(rs.ctx, "lock RLock by setKey failed", "set_key", setKey, "error", err)
return nil, err
}
defer func() {
err = rs.rwLocker.UnRLock(rs.ctx)
if err != nil {
logger.Error(rs.ctx, "unlock RLock by setKey failed", "set_key", setKey, "error", err)
}
}()
results, err = rs.storageClient.ZRange(rs.ctx, setKey, start, stop).Result()
if err != nil {
logger.Error(rs.ctx, "range set by key failed", "set_key", setKey, "start", start, "stop", stop, "error", err)
return nil, err
}
return results, nil
}
type Comparer[T any] interface {
Compare(T) int
}
type ComparableComparer[T any] interface {
Compare(T) int
comparable // 直接嵌入 comparable 约束
}
type methodNode[E Comparer[E]] struct {
value E
left *methodNode[E]
right *methodNode[E]
}
type MethodTree[E Comparer[E]] struct {
root *methodNode[E]
}
type OrderedSet[E interface {
comparable
Comparer[E]
}] struct {
tree MethodTree[E]
elements map[E]bool
}
type ComparableOrderedSet[E ComparableComparer[E]] struct {
tree MethodTree[E]
elements map[E]bool
}
type Set[E any] interface {
Insert(E)
Delete(E)
Has(E) bool
All() iter.Seq[E]
}
func InsertAll[E any](set Set[E], seq iter.Seq[E]) {
for v := range seq {
set.Insert(v)
}
}
type HashSet[E comparable] map[E]bool
func (s HashSet[E]) Insert(v E) { s[v] = true }
func (s HashSet[E]) Delete(v E) { delete(s, v) }
func (s HashSet[E]) Has(v E) bool { return s[v] }
func (s HashSet[E]) All() iter.Seq[E] { return maps.Keys(s) }

View File

@ -0,0 +1,6 @@
package constants
import "errors"
// AcquireTimeoutErr define error of get lock timeout
var AcquireTimeoutErr = errors.New("the waiting time for obtaining the lock operation has timed out")

View File

@ -0,0 +1,136 @@
package constants
import (
"fmt"
)
type RedisCode int
const (
LockSuccess = RedisCode(1)
UnLockSuccess = RedisCode(1)
RefreshLockSuccess = RedisCode(1)
UnRLockSuccess = RedisCode(0)
UnWLockSuccess = RedisCode(0)
RLockFailureWithWLockOccupancy = RedisCode(-1)
UnRLockFailureWithWLockOccupancy = RedisCode(-2)
WLockFailureWithRLockOccupancy = RedisCode(-3)
WLockFailureWithWLockOccupancy = RedisCode(-4)
UnWLockFailureWithRLockOccupancy = RedisCode(-5)
UnWLockFailureWithWLockOccupancy = RedisCode(-6)
WLockFailureWithNotFirstPriority = RedisCode(-7)
RefreshLockFailure = RedisCode(-8)
LockFailure = RedisCode(-9)
UnLocakFailureWithLockOccupancy = RedisCode(-10)
UnknownInternalError = RedisCode(-99)
)
type RedisLockType int
const (
LockType = RedisLockType(iota)
UnRLockType
UnWLockType
UnLockType
RefreshLockType
)
type RedisResult struct {
Code RedisCode
Message string
}
func (e *RedisResult) Error() string {
return fmt.Sprintf("redis execution code:%d,message:%s\n", e.Code, e.Message)
}
func (e *RedisResult) OutputResultMessage() string {
return e.Message
}
func (e *RedisResult) OutputResultCode() int {
return int(e.Code)
}
func NewRedisResult(res RedisCode, lockType RedisLockType, redisMsg string) error {
resInt := int(res)
switch resInt {
case 1:
if lockType == LockType {
return &RedisResult{Code: res, Message: "redis lock success"}
} else if (lockType == UnRLockType) || (lockType == UnWLockType) || (lockType == UnLockType) {
return &RedisResult{Code: res, Message: "redis unlock success"}
} else {
return &RedisResult{Code: res, Message: "redis refresh lock success"}
}
case 0:
if lockType == UnRLockType {
return &RedisResult{Code: res, Message: "redis unlock read lock success, the lock is still occupied by other processes read lock"}
} else {
return &RedisResult{Code: res, Message: "redis unlock write lock success, the lock is still occupied by other processes write lock"}
}
case -1:
return &RedisResult{Code: res, Message: "redis lock read lock failure,the lock is already occupied by another processes write lock"}
case -2:
return &RedisResult{Code: res, Message: "redis un lock read lock failure,the lock is already occupied by another processes write lock"}
case -3:
return &RedisResult{Code: res, Message: "redis lock write lock failure,the lock is already occupied by anthor processes read lock"}
case -4:
return &RedisResult{Code: res, Message: "redis lock write lock failure,the lock is already occupied by anthor processes write lock"}
case -5:
return &RedisResult{Code: res, Message: "redis unlock write lock failure,the lock is already occupied by another processes read lock"}
case -6:
return &RedisResult{Code: res, Message: "redis unlock write lock failure,the lock is already occupied by another processes write lock"}
case -7:
return &RedisResult{Code: res, Message: "redis lock write lock failure,the first priority in the current process non-waiting queue"}
case -8:
return &RedisResult{Code: res, Message: "redis refresh lock failure,the lock not exist"}
case -9:
return &RedisResult{Code: res, Message: "redis lock failure,the lock is already occupied by another processes lock"}
case -99:
return &RedisResult{Code: res, Message: fmt.Sprintf("redis internal execution error:%v\n", redisMsg)}
default:
msg := "unkown redis execution result"
if redisMsg != "" {
msg = fmt.Sprintf("%s:%s\n", msg, redisMsg)
}
return &RedisResult{Code: res, Message: msg}
}
}
func TranslateResultToStr(res RedisCode, lockType RedisLockType) string {
resInt := int(res)
switch resInt {
case 1:
if lockType == LockType {
return "redis lock success"
} else if (lockType == UnRLockType) || (lockType == UnWLockType) || (lockType == UnLockType) {
return "redis unlock success"
} else {
return "redis refresh lock success"
}
case 0:
if lockType == UnRLockType {
return "redis unlock read lock success, the lock is still occupied by other processes read lock"
} else {
return "redis unlock write lock success, the lock is still occupied by other processes write lock"
}
case -1:
return "redis lock read lock failure,the lock is already occupied by another processes write lock"
case -2:
return "redis un lock read lock failure,the lock is already occupied by another processes write lock"
case -3:
return "redis lock write lock failure,the lock is already occupied by anthor processes read lock"
case -4:
return "redis lock write lock failure,the lock is already occupied by anthor processes write lock"
case -5:
return "redis un lock write lock failure,the lock is already occupied by another processes read lock"
case -6:
return "redis un lock write lock failure,the lock is already occupied by another processes write lock"
case -7:
return "redis lock write lock failure,the first priority in the current process non-waiting queue"
case -8:
return "redis refresh lock failure,the lock not exist"
}
return "unkown redis execution result"
}

View File

@ -0,0 +1,45 @@
package distributedlock
import (
"sync"
"time"
"modelRT/config"
"modelRT/util"
"github.com/redis/go-redis/v9"
)
var (
_globalLockerClient *redis.Client
once sync.Once
)
// initClient define func of return successfully initialized redis client
func initClient(rCfg config.RedisConfig) *redis.Client {
client, err := util.NewRedisClient(
rCfg.Addr,
util.WithPassword(rCfg.Password),
util.WithDB(rCfg.DB),
util.WithPoolSize(rCfg.PoolSize),
util.WithTimeout(time.Duration(rCfg.Timeout)*time.Second),
)
if err != nil {
panic(err)
}
return client
}
// InitClientInstance define func of return instance of redis client
func InitClientInstance(rCfg config.RedisConfig) *redis.Client {
once.Do(func() {
_globalLockerClient = initClient(rCfg)
})
return _globalLockerClient
}
// GetRedisClientInstance define func of get redis client instance
func GetRedisClientInstance() *redis.Client {
client := _globalLockerClient
return client
}

View File

@ -0,0 +1,62 @@
package luascript
/*
KEYS[1]:锁的键名key,通常是锁的唯一标识
ARGV[1]:锁的过期时间lockLeaseTime,单位为秒
ARGV[2]:当前客户端的唯一标识token,用于区分不同的客户端
*/
var LockScript = `
-- 锁不存在的情况下加锁
if (redis.call('exists', KEYS[1]) == 0) then
redis.call('hset', KEYS[1], ARGV[2], 1);
redis.call('expire', KEYS[1], ARGV[1]);
return 1;
end;
-- 重入锁逻辑
if (redis.call('hexists', KEYS[1], ARGV[2]) == 1) then
redis.call('hincrby', KEYS[1], ARGV[2], 1);
redis.call('expire', KEYS[1], ARGV[1]);
return 1;
end;
-- 持有锁的 token 不是当前客户端的 token,返回加锁失败
return -9;
`
/*
KEYS[1]:锁的键名key,通常是锁的唯一标识
ARGV[1]:锁的过期时间lockLeaseTime,单位为秒
ARGV[2]:当前客户端的唯一标识token,用于区分不同的客户端
*/
var RefreshLockScript = `
if (redis.call('hexists', KEYS[1], ARGV[2]) == 1) then
redis.call('expire', KEYS[1], ARGV[1]);
return 1;
end;
return -8;
`
/*
KEYS[1]:锁的键名key,通常是锁的唯一标识
KEYS[2]:锁的释放通知频道chankey,用于通知其他客户端锁已释放
ARGV[1]:解锁消息unlockMessage,用于通知其他客户端锁已释放
ARGV[2]:当前客户端的唯一标识token,用于区分不同的客户端
*/
var UnLockScript = `
if (redis.call('exists', KEYS[1]) == 0) then
redis.call('publish', KEYS[2], ARGV[1]);
return 1;
end;
if (redis.call('hexists', KEYS[1], ARGV[2]) == 0) then
return 1;
end;
local counter = redis.call('hincrby', KEYS[1], ARGV[2], -1);
if (counter > 0) then
return 1;
else
redis.call('del', KEYS[1]);
redis.call('publish', KEYS[2], ARGV[1]);
return 1;
end;
-- 持有锁的 token 不是当前客户端的 token,返回解锁失败
return -10;
`

View File

@ -0,0 +1,263 @@
// Package luascript defines the lua script used for redis distributed lock
package luascript
// RLockScript is the lua script for the lock read lock command
/*
KEYS[1]:锁的键名key,通常是锁的唯一标识
KEYS[2]:锁的超时键名前缀rwTimeoutPrefix,用于存储每个读锁的超时键
ARGV[1]:锁的过期时间lockLeaseTime,单位为秒
ARGV[2]:当前客户端的唯一标识token,用于区分不同的客户端
*/
var RLockScript = `
local mode = redis.call('hget', KEYS[1], 'mode');
local lockKey = KEYS[2] .. ':' .. ARGV[2];
if (mode == false) then
redis.call('hset', KEYS[1], 'mode', 'read');
redis.call('hset', KEYS[1], lockKey, '1');
redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey);
redis.call('pexpire', KEYS[1], ARGV[1]);
return 1;
end;
if (mode == 'write') then
-- 放到 list 中等待写锁释放后再次尝试加锁并且订阅写锁释放的消息
local waitKey = KEYS[1] .. ':read';
redis.call('rpush', waitKey, ARGV[2]);
return -1;
end;
if (mode == 'read') then
if (redis.call('exists', KEYS[1], ARGV[2]) == 1) then
redis.call('hincrby', KEYS[1], lockKey, '1');
local remainTime = redis.call('hpttl', KEYS[1], 'fields', '1', lockKey);
redis.call('hpexpire', KEYS[1], math.max(tonumber(remainTime[1]), ARGV[1]), 'fields', '1', lockKey);
else
redis.call('hset', KEYS[1], lockKey, '1');
redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey);
end;
local cursor = 0;
local maxRemainTime = tonumber(ARGV[1]);
local pattern = KEYS[2] .. ':*';
repeat
local hscanResult = redis.call('hscan', KEYS[1], cursor, 'match', pattern, 'count', '100');
cursor = tonumber(hscanResult[1]);
local fields = hscanResult[2];
for i = 1, #fields,2 do
local field = fields[i];
local remainTime = redis.call('hpttl', KEYS[1], 'fields', '1', field);
maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime);
end;
until cursor == 0;
local remainTime = redis.call('pttl', KEYS[1]);
redis.call('pexpire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime));
return 1;
end;
`
// UnRLockScript is the lua script for the unlock read lock command
/*
KEYS[1]:锁的键名key,通常是锁的唯一标识
KEYS[2]:锁的超时键名前缀rwTimeoutPrefix,用于存储每个读锁的超时键
KEYS[3]:锁的释放通知写频道chankey,用于通知其他写等待客户端锁已释放
ARGV[1]:解锁消息unlockMessage,用于通知其他客户端锁已释放
ARGV[2]:当前客户端的唯一标识token,用于区分不同的客户端
*/
var UnRLockScript = `
local lockKey = KEYS[2] .. ':' .. ARGV[2];
local mode = redis.call('hget', KEYS[1], 'mode');
if (mode == false) then
local writeWait = KEYS[1] .. ':write';
-- 优先写锁加锁
local counter = redis.call('llen',writeWait);
if (counter >= 1) then
redis.call('publish', KEYS[3], ARGV[1]);
end;
return 1;
elseif (mode == 'write') then
return -2;
end;
-- 判断当前的确是读模式但是当前 token 并没有加读锁的情况返回 0
local lockExists = redis.call('hexists', KEYS[1], lockKey);
if ((mode == 'read') and (lockExists == 0)) then
return 0;
end;
local counter = redis.call('hincrby', KEYS[1], lockKey, -1);
local delTTLs = redis.call('hpttl', KEYS[1], 'fields', '1', lockKey);
local delTTL = tonumber(delTTLs[1]);
if (counter == 0) then
redis.call('hdel', KEYS[1], lockKey);
end;
if (redis.call('hlen', KEYS[1]) > 1) then
local cursor = 0;
local maxRemainTime = 0;
local pattern = KEYS[2] .. ':*';
repeat
local hscanResult = redis.call('hscan', KEYS[1], cursor, 'match', pattern, 'count', '100');
cursor = tonumber(hscanResult[1]);
local fields = hscanResult[2];
for i = 1, #fields,2 do
local field = fields[i];
local remainTime = redis.call('hpttl', KEYS[1], 'fields', '1', field);
maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime);
end;
until cursor == 0;
if (maxRemainTime > 0) then
if (delTTL > maxRemainTime) then
redis.call('pexpire', KEYS[1], maxRemainTime);
else
local remainTime = redis.call('pttl', KEYS[1]);
redis.call('pexpire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime));
end;
end;
else
redis.call('del', KEYS[1]);
local writeWait = KEYS[1] .. ':write';
-- 优先写锁加锁
local counter = redis.call('llen',writeWait);
if (counter >= 1) then
redis.call('publish', KEYS[3], ARGV[1]);
end;
return 1;
end;
`
// WLockScript is the lua script for the lock write lock command
/*
KEYS[1]:锁的键名key,通常是锁的唯一标识
KEYS[2]:锁的超时键名前缀rwTimeoutPrefix,用于存储每个读锁的超时键
ARGV[1]:锁的过期时间lockLeaseTime,单位为秒
ARGV[2]:当前客户端的唯一标识token,用于区分不同的客户端
*/
var WLockScript = `
local mode = redis.call('hget', KEYS[1], 'mode');
local lockKey = KEYS[2] .. ':' .. ARGV[2];
local waitKey = KEYS[1] .. ':write';
if (mode == false) then
local waitListLen = redis.call('llen', waitKey);
if (waitListLen > 0) then
local firstToken = redis.call('lindex', waitKey,'0');
if (firstToken ~= ARGV[2]) then
return -7;
end;
end;
redis.call('hset', KEYS[1], 'mode', 'write');
redis.call('hset', KEYS[1], lockKey, 1);
redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey);
redis.call('pexpire', KEYS[1], ARGV[1]);
redis.call('lpop', waitKey, '1');
return 1;
elseif (mode == 'read') then
-- 放到 list 中等待读锁释放后再次尝试加锁并且订阅读锁释放的消息
redis.call('rpush', waitKey, ARGV[2]);
return -3;
else
-- 可重入写锁逻辑
local lockKey = KEYS[2] .. ':' .. ARGV[2];
local lockExists = redis.call('hexists', KEYS[1], lockKey);
if (lockExists == 1) then
redis.call('hincrby', KEYS[1], lockKey, 1);
redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey);
redis.call('pexpire', KEYS[1], ARGV[1]);
return 1;
end;
-- 放到 list 中等待写锁释放后再次尝试加锁并且订阅写锁释放的消息
local key = KEYS[1] .. ':write';
redis.call('rpush', key, ARGV[2]);
return -4;
end;
`
// UnWLockScript is the lua script for the unlock write lock command
/*
KEYS[1]:锁的键名key,通常是锁的唯一标识
KEYS[2]:锁的超时键名前缀rwTimeoutPrefix,用于存储每个读锁的超时键
KEYS[3]:锁的释放通知写频道writeChankey,用于通知其他写等待客户端锁已释放
KEYS[4]:锁的释放通知读频道readChankey,用于通知其他读等待客户端锁已释放
ARGV[1]:解锁消息unlockMessage,用于通知其他客户端锁已释放
ARGV[2]:当前客户端的唯一标识token,用于区分不同的客户端
*/
var UnWLockScript = `
local mode = redis.call('hget', KEYS[1], 'mode');
local writeWait = KEYS[1] .. ':write';
if (mode == false) then
-- 优先写锁加锁,无写锁的情况通知读锁加锁
local counter = redis.call('llen',writeWait);
if (counter >= 1) then
redis.call('publish', KEYS[3], ARGV[1]);
else
redis.call('publish', KEYS[4], ARGV[1]);
end;
return 1;
elseif (mode == 'read') then
return -5;
else
local lockKey = KEYS[2] .. ':' .. ARGV[2];
local lockExists = redis.call('hexists', KEYS[1], lockKey);
if (lockExists >= 1) then
-- 可重入写锁逻辑
local incrRes = redis.call('hincrby', KEYS[1], lockKey, -1);
if (incrRes == 0) then
redis.call('del', KEYS[1]);
local counter = redis.call('llen',writeWait);
if (counter >= 1) then
redis.call('publish', KEYS[3], ARGV[1]);
else
redis.call('publish', KEYS[4], ARGV[1]);
end;
return 1;
end;
return 0;
else
return -6;
end;
end;
`
// RefreshRWLockScript is the lua script for the refresh lock command
/*
KEYS[1]:锁的键名key,通常是锁的唯一标识
KEYS[2]:锁的超时键名前缀rwTimeoutPrefix,用于存储每个读锁的超时键
ARGV[1]:锁的过期时间lockLeaseTime,单位为秒
ARGV[2]:当前客户端的唯一标识token,用于区分不同的客户端
*/
var RefreshRWLockScript = `
local lockKey = KEYS[2] .. ':' .. ARGV[2];
local lockExists = redis.call('hexists', KEYS[1], lockKey);
local mode = redis.call('hget', KEYS[1], 'mode');
local maxRemainTime = tonumber(ARGV[1]);
if (lockExists == 1) then
redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey);
if (mode == 'read') then
local cursor = 0;
local pattern = KEYS[2] .. ':*';
repeat
local hscanResult = redis.call('hscan', KEYS[1], cursor, 'match', pattern, 'count', '100');
cursor = tonumber(hscanResult[1]);
local fields = hscanResult[2];
for i = 1, #fields,2 do
local field = fields[i];
local remainTime = redis.call('hpttl', KEYS[1], 'fields', '1', field);
maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime);
end;
until cursor == 0;
if (maxRemainTime > 0) then
local remainTime = redis.call('pttl', KEYS[1]);
redis.call('pexpire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime));
end;
elseif (mode == 'write') then
redis.call('pexpire', KEYS[1], ARGV[1]);
end;
-- return redis.call('pttl',KEYS[1]);
return 1;
end;
return -8;
`

View File

@ -0,0 +1,256 @@
package distributedlock
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
constants "modelRT/distributedlock/constant"
luascript "modelRT/distributedlock/luascript"
"modelRT/logger"
uuid "github.com/gofrs/uuid"
"github.com/redis/go-redis/v9"
"go.uber.org/zap"
)
const (
internalLockLeaseTime = uint64(30 * 1000)
unlockMessage = 0
)
// RedissionLockConfig define redission lock config
type RedissionLockConfig struct {
LockLeaseTime uint64
Token string
Prefix string
ChanPrefix string
TimeoutPrefix string
Key string
NeedRefresh bool
}
type redissionLocker struct {
lockLeaseTime uint64
Token string
Key string
waitChanKey string
needRefresh bool
refreshExitChan chan struct{}
subExitChan chan struct{}
client *redis.Client
refreshOnce *sync.Once
}
func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) error {
if rl.refreshExitChan == nil {
rl.refreshExitChan = make(chan struct{})
}
result := rl.tryLock(ctx).(*constants.RedisResult)
if result.Code == constants.UnknownInternalError {
logger.Error(ctx, result.OutputResultMessage())
return fmt.Errorf("get lock failed:%w", result)
}
if (result.Code == constants.LockSuccess) && rl.needRefresh {
rl.refreshOnce.Do(func() {
// async refresh lock timeout unitl receive exit singal
go rl.refreshLockTimeout(ctx)
})
return nil
}
subMsg := make(chan struct{}, 1)
defer close(subMsg)
sub := rl.client.Subscribe(ctx, rl.waitChanKey)
defer sub.Close()
go rl.subscribeLock(ctx, sub, subMsg)
if len(timeout) > 0 && timeout[0] > 0 {
acquireTimer := time.NewTimer(timeout[0])
for {
select {
case _, ok := <-subMsg:
if !ok {
err := errors.New("failed to read the lock waiting for for the channel message")
logger.Error(ctx, "failed to read the lock waiting for for the channel message")
return err
}
resultErr := rl.tryLock(ctx).(*constants.RedisResult)
if (resultErr.Code == constants.LockFailure) || (resultErr.Code == constants.UnknownInternalError) {
logger.Info(ctx, resultErr.OutputResultMessage())
continue
}
if resultErr.Code == constants.LockSuccess {
logger.Info(ctx, resultErr.OutputResultMessage())
return nil
}
case <-acquireTimer.C:
err := errors.New("the waiting time for obtaining the lock operation has timed out")
logger.Info(ctx, "the waiting time for obtaining the lock operation has timed out")
return err
}
}
}
return fmt.Errorf("lock the redis lock failed:%w", result)
}
func (rl *redissionLocker) subscribeLock(ctx context.Context, sub *redis.PubSub, subMsgChan chan struct{}) {
if sub == nil || subMsgChan == nil {
return
}
logger.Info(ctx, "lock: enter sub routine", zap.String("token", rl.Token))
for {
select {
case <-rl.subExitChan:
close(subMsgChan)
return
case <-sub.Channel():
// 这里只会收到真正的数据消息
subMsgChan <- struct{}{}
default:
}
}
}
/*
KEYS[1]:锁的键名key,通常是锁的唯一标识
ARGV[1]:锁的过期时间lockLeaseTime,单位为秒
ARGV[2]:当前客户端的唯一标识token,用于区分不同的客户端
*/
func (rl *redissionLocker) refreshLockTimeout(ctx context.Context) {
logger.Info(ctx, "lock refresh by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key))
lockTime := time.Duration(rl.lockLeaseTime/3) * time.Millisecond
timer := time.NewTimer(lockTime)
defer timer.Stop()
for {
select {
case <-timer.C:
// extend key lease time
res := rl.client.Eval(ctx, luascript.RefreshLockScript, []string{rl.Key}, rl.lockLeaseTime, rl.Token)
val, err := res.Int()
if err != redis.Nil && err != nil {
logger.Info(ctx, "lock refresh failed", "token", rl.Token, "key", rl.Key, "error", err)
return
}
if constants.RedisCode(val) == constants.RefreshLockFailure {
logger.Error(ctx, "lock refreash failed,can not find the lock by key and token", "token", rl.Token, "key", rl.Key)
break
}
if constants.RedisCode(val) == constants.RefreshLockSuccess {
logger.Info(ctx, "lock refresh success by key and token", "token", rl.Token, "key", rl.Key)
}
timer.Reset(lockTime)
case <-rl.refreshExitChan:
return
}
}
}
func (rl *redissionLocker) cancelRefreshLockTime() {
if rl.refreshExitChan != nil {
close(rl.refreshExitChan)
rl.refreshOnce = &sync.Once{}
}
}
func (rl *redissionLocker) closeSub(ctx context.Context, sub *redis.PubSub, noticeChan chan struct{}) {
if sub != nil {
err := sub.Close()
if err != nil {
logger.Error(ctx, "close sub failed", "token", rl.Token, "key", rl.Key, "error", err)
}
}
if noticeChan != nil {
close(noticeChan)
}
}
/*
KEYS[1]:锁的键名key,通常是锁的唯一标识
ARGV[1]:锁的过期时间lockLeaseTime,单位为秒
ARGV[2]:当前客户端的唯一标识token,用于区分不同的客户端
*/
func (rl *redissionLocker) tryLock(ctx context.Context) error {
lockType := constants.LockType
res := rl.client.Eval(ctx, luascript.LockScript, []string{rl.Key}, rl.lockLeaseTime, rl.Token)
val, err := res.Int()
if err != redis.Nil && err != nil {
return constants.NewRedisResult(constants.UnknownInternalError, lockType, err.Error())
}
return constants.NewRedisResult(constants.RedisCode(val), lockType, "")
}
/*
KEYS[1]:锁的键名key,通常是锁的唯一标识
KEYS[2]:锁的释放通知频道chankey,用于通知其他客户端锁已释放
ARGV[1]:解锁消息unlockMessage,用于通知其他客户端锁已释放
ARGV[2]:当前客户端的唯一标识token,用于区分不同的客户端
*/
func (rl *redissionLocker) UnLock(ctx context.Context) error {
res := rl.client.Eval(ctx, luascript.UnLockScript, []string{rl.Key, rl.waitChanKey}, unlockMessage, rl.Token)
val, err := res.Int()
if err != redis.Nil && err != nil {
logger.Info(ctx, "unlock lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err))
return fmt.Errorf("unlock lock failed:%w", constants.NewRedisResult(constants.UnknownInternalError, constants.UnLockType, err.Error()))
}
if constants.RedisCode(val) == constants.UnLockSuccess {
if rl.needRefresh {
rl.cancelRefreshLockTime()
}
logger.Info(ctx, "unlock lock success", zap.String("token", rl.Token), zap.String("key", rl.Key))
return nil
}
if constants.RedisCode(val) == constants.UnLocakFailureWithLockOccupancy {
logger.Info(ctx, "unlock lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key))
return fmt.Errorf("unlock lock failed:%w", constants.NewRedisResult(constants.UnLocakFailureWithLockOccupancy, constants.UnLockType, ""))
}
return nil
}
// TODO 优化 panic
func GetLocker(client *redis.Client, ops *RedissionLockConfig) *redissionLocker {
if ops.Token == "" {
token, err := uuid.NewV4()
if err != nil {
panic(err)
}
ops.Token = token.String()
}
if len(ops.Prefix) <= 0 {
ops.Prefix = "redission-lock"
}
if len(ops.ChanPrefix) <= 0 {
ops.ChanPrefix = "redission-lock-channel"
}
if ops.LockLeaseTime == 0 {
ops.LockLeaseTime = internalLockLeaseTime
}
r := &redissionLocker{
Token: ops.Token,
Key: strings.Join([]string{ops.Prefix, ops.Key}, ":"),
waitChanKey: strings.Join([]string{ops.ChanPrefix, ops.Key, "wait"}, ":"),
needRefresh: ops.NeedRefresh,
client: client,
refreshExitChan: make(chan struct{}),
}
return r
}

View File

@ -0,0 +1,329 @@
package distributedlock
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
constants "modelRT/distributedlock/constant"
"modelRT/distributedlock/luascript"
"modelRT/logger"
uuid "github.com/gofrs/uuid"
"github.com/redis/go-redis/v9"
)
type RedissionRWLocker struct {
redissionLocker
writeWaitChanKey string
readWaitChanKey string
RWTokenTimeoutPrefix string
}
func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration) error {
result := rl.tryRLock(ctx).(*constants.RedisResult)
if result.Code == constants.UnknownInternalError {
logger.Error(ctx, result.OutputResultMessage())
return fmt.Errorf("get read lock failed:%w", result)
}
if result.Code == constants.LockSuccess {
if rl.needRefresh {
rl.refreshOnce.Do(func() {
if rl.refreshExitChan == nil {
rl.refreshExitChan = make(chan struct{})
}
// async refresh lock timeout unitl receive exit singal
go rl.refreshLockTimeout(ctx)
})
}
logger.Info(ctx, "success get the read lock by key and token", "key", rl.Key, "token", rl.Token)
return nil
}
if len(timeout) > 0 && timeout[0] > 0 {
if rl.subExitChan == nil {
rl.subExitChan = make(chan struct{})
}
subMsgChan := make(chan struct{}, 1)
sub := rl.client.Subscribe(ctx, rl.readWaitChanKey)
go rl.subscribeLock(ctx, sub, subMsgChan)
acquireTimer := time.NewTimer(timeout[0])
for {
select {
case _, ok := <-subMsgChan:
if !ok {
err := errors.New("failed to read the read lock waiting for for the channel message")
logger.Error(ctx, "failed to read the read lock waiting for for the channel message")
return err
}
result := rl.tryRLock(ctx).(*constants.RedisResult)
if (result.Code == constants.RLockFailureWithWLockOccupancy) || (result.Code == constants.UnknownInternalError) {
logger.Info(ctx, result.OutputResultMessage())
continue
}
if result.Code == constants.LockSuccess {
logger.Info(ctx, result.OutputResultMessage())
rl.closeSub(ctx, sub, rl.subExitChan)
if rl.needRefresh {
rl.refreshOnce.Do(func() {
if rl.refreshExitChan == nil {
rl.refreshExitChan = make(chan struct{})
}
// async refresh lock timeout unitl receive exit singal
go rl.refreshLockTimeout(ctx)
})
}
return nil
}
case <-acquireTimer.C:
logger.Info(ctx, "the waiting time for obtaining the read lock operation has timed out")
rl.closeSub(ctx, sub, rl.subExitChan)
// after acquire lock timeout,notice the sub channel to close
return constants.AcquireTimeoutErr
}
}
}
return fmt.Errorf("lock the redis read lock failed:%w", result)
}
func (rl *RedissionRWLocker) tryRLock(ctx context.Context) error {
lockType := constants.LockType
res := rl.client.Eval(ctx, luascript.RLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix}, rl.lockLeaseTime, rl.Token)
val, err := res.Int()
if err != redis.Nil && err != nil {
return constants.NewRedisResult(constants.UnknownInternalError, lockType, err.Error())
}
return constants.NewRedisResult(constants.RedisCode(val), lockType, "")
}
func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) {
logger.Info(ctx, "lock refresh by key and token", "token", rl.Token, "key", rl.Key)
lockTime := time.Duration(rl.lockLeaseTime/3) * time.Millisecond
timer := time.NewTimer(lockTime)
defer timer.Stop()
for {
select {
case <-timer.C:
// extend key lease time
res := rl.client.Eval(ctx, luascript.RefreshRWLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix}, rl.lockLeaseTime, rl.Token)
val, err := res.Int()
if err != redis.Nil && err != nil {
logger.Info(ctx, "lock refresh failed", "token", rl.Token, "key", rl.Key, "error", err)
return
}
if constants.RedisCode(val) == constants.RefreshLockFailure {
logger.Error(ctx, "lock refreash failed,can not find the read lock by key and token", "rwTokenPrefix", rl.RWTokenTimeoutPrefix, "token", rl.Token, "key", rl.Key)
return
}
if constants.RedisCode(val) == constants.RefreshLockSuccess {
logger.Info(ctx, "lock refresh success by key and token", "token", rl.Token, "key", rl.Key)
}
timer.Reset(lockTime)
case <-rl.refreshExitChan:
return
}
}
}
func (rl *RedissionRWLocker) UnRLock(ctx context.Context) error {
logger.Info(ctx, "unlock RLock by key and token", "key", rl.Key, "token", rl.Token)
res := rl.client.Eval(ctx, luascript.UnRLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix, rl.writeWaitChanKey}, unlockMessage, rl.Token)
val, err := res.Int()
if err != redis.Nil && err != nil {
logger.Info(ctx, "unlock read lock failed", "token", rl.Token, "key", rl.Key, "error", err)
return fmt.Errorf("unlock read lock failed:%w", constants.NewRedisResult(constants.UnknownInternalError, constants.UnRLockType, err.Error()))
}
if (constants.RedisCode(val) == constants.UnLockSuccess) || (constants.RedisCode(val) == constants.UnRLockSuccess) {
if rl.needRefresh && (constants.RedisCode(val) == constants.UnLockSuccess) {
rl.cancelRefreshLockTime()
}
logger.Info(ctx, "unlock read lock success", "token", rl.Token, "key", rl.Key)
return nil
}
if constants.RedisCode(val) == constants.UnRLockFailureWithWLockOccupancy {
logger.Info(ctx, "unlock read lock failed", "token", rl.Token, "key", rl.Key)
return fmt.Errorf("unlock read lock failed:%w", constants.NewRedisResult(constants.UnRLockFailureWithWLockOccupancy, constants.UnRLockType, ""))
}
return nil
}
func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration) error {
result := rl.tryWLock(ctx).(*constants.RedisResult)
if result.Code == constants.UnknownInternalError {
logger.Error(ctx, result.OutputResultMessage())
return fmt.Errorf("get write lock failed:%w", result)
}
if result.Code == constants.LockSuccess {
if rl.needRefresh {
rl.refreshOnce.Do(func() {
if rl.refreshExitChan == nil {
rl.refreshExitChan = make(chan struct{})
}
// async refresh lock timeout unitl receive exit singal
go rl.refreshLockTimeout(ctx)
})
}
logger.Info(ctx, "success get the write lock by key and token", "key", rl.Key, "token", rl.Token)
return nil
}
if len(timeout) > 0 && timeout[0] > 0 {
if rl.subExitChan == nil {
rl.subExitChan = make(chan struct{})
}
subMsgChan := make(chan struct{}, 1)
sub := rl.client.Subscribe(ctx, rl.writeWaitChanKey)
go rl.subscribeLock(ctx, sub, subMsgChan)
acquireTimer := time.NewTimer(timeout[0])
for {
select {
case _, ok := <-subMsgChan:
if !ok {
err := errors.New("failed to read the write lock waiting for for the channel message")
logger.Error(ctx, "failed to read the read lock waiting for for the channel message")
return err
}
result := rl.tryWLock(ctx).(*constants.RedisResult)
if (result.Code == constants.UnknownInternalError) || (result.Code == constants.WLockFailureWithRLockOccupancy) || (result.Code == constants.WLockFailureWithWLockOccupancy) || (result.Code == constants.WLockFailureWithNotFirstPriority) {
logger.Info(ctx, result.OutputResultMessage())
continue
}
if result.Code == constants.LockSuccess {
logger.Info(ctx, result.OutputResultMessage())
rl.closeSub(ctx, sub, rl.subExitChan)
if rl.needRefresh {
rl.refreshOnce.Do(func() {
if rl.refreshExitChan == nil {
rl.refreshExitChan = make(chan struct{})
}
// async refresh lock timeout unitl receive exit singal
go rl.refreshLockTimeout(ctx)
})
}
return nil
}
case <-acquireTimer.C:
logger.Info(ctx, "the waiting time for obtaining the write lock operation has timed out")
rl.closeSub(ctx, sub, rl.subExitChan)
// after acquire lock timeout,notice the sub channel to close
return constants.AcquireTimeoutErr
}
}
}
return fmt.Errorf("lock write lock failed:%w", result)
}
func (rl *RedissionRWLocker) tryWLock(ctx context.Context) error {
lockType := constants.LockType
res := rl.client.Eval(ctx, luascript.WLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix}, rl.lockLeaseTime, rl.Token)
val, err := res.Int()
if err != redis.Nil && err != nil {
return constants.NewRedisResult(constants.UnknownInternalError, lockType, err.Error())
}
return constants.NewRedisResult(constants.RedisCode(val), lockType, "")
}
func (rl *RedissionRWLocker) UnWLock(ctx context.Context) error {
res := rl.client.Eval(ctx, luascript.UnWLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix, rl.writeWaitChanKey, rl.readWaitChanKey}, unlockMessage, rl.Token)
val, err := res.Int()
if err != redis.Nil && err != nil {
logger.Error(ctx, "unlock write lock failed", "token", rl.Token, "key", rl.Key, "error", err)
return fmt.Errorf("unlock write lock failed:%w", constants.NewRedisResult(constants.UnknownInternalError, constants.UnWLockType, err.Error()))
}
if (constants.RedisCode(val) == constants.UnLockSuccess) || constants.RedisCode(val) == constants.UnWLockSuccess {
if rl.needRefresh && (constants.RedisCode(val) == constants.UnLockSuccess) {
rl.cancelRefreshLockTime()
}
logger.Info(ctx, "unlock write lock success", "token", rl.Token, "key", rl.Key)
return nil
}
if (constants.RedisCode(val) == constants.UnWLockFailureWithRLockOccupancy) || (constants.RedisCode(val) == constants.UnWLockFailureWithWLockOccupancy) {
logger.Info(ctx, "unlock write lock failed", "token", rl.Token, "key", rl.Key)
return fmt.Errorf("unlock write lock failed:%w", constants.NewRedisResult(constants.RedisCode(val), constants.UnWLockType, ""))
}
return nil
}
// TODO 优化 panic
func GetRWLocker(client *redis.Client, conf *RedissionLockConfig) *RedissionRWLocker {
if conf.Token == "" {
token, err := uuid.NewV4()
if err != nil {
panic(err)
}
conf.Token = token.String()
}
if conf.Prefix == "" {
conf.Prefix = "redission-rwlock"
}
if conf.TimeoutPrefix == "" {
conf.TimeoutPrefix = "rwlock_timeout"
}
if conf.ChanPrefix == "" {
conf.ChanPrefix = "redission-rwlock-channel"
}
if conf.LockLeaseTime == 0 {
conf.LockLeaseTime = internalLockLeaseTime
}
r := &redissionLocker{
Token: conf.Token,
Key: strings.Join([]string{conf.Prefix, conf.Key}, ":"),
needRefresh: conf.NeedRefresh,
lockLeaseTime: conf.LockLeaseTime,
client: client,
refreshOnce: &sync.Once{},
}
rwLocker := &RedissionRWLocker{
redissionLocker: *r,
writeWaitChanKey: strings.Join([]string{conf.ChanPrefix, conf.Key, "write"}, ":"),
readWaitChanKey: strings.Join([]string{conf.ChanPrefix, conf.Key, "read"}, ":"),
RWTokenTimeoutPrefix: conf.TimeoutPrefix,
}
return rwLocker
}
func InitRWLocker(key string, token string, lockLeaseTime uint64, needRefresh bool) *RedissionRWLocker {
conf := &RedissionLockConfig{
Key: key,
Token: token,
LockLeaseTime: lockLeaseTime,
NeedRefresh: needRefresh,
}
return GetRWLocker(GetRedisClientInstance(), conf)
}

View File

@ -9,12 +9,136 @@ const docTemplate = `{
"info": {
"description": "{{escape .Description}}",
"title": "{{.Title}}",
"contact": {},
"contact": {
"name": "douxu",
"url": "http://www.swagger.io/support",
"email": "douxu@clea.com.cn"
},
"license": {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
"/data/realtime": {
"get": {
"description": "根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"RealTime Component"
],
"summary": "获取实时测点数据",
"parameters": [
{
"type": "string",
"description": "测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)",
"name": "token",
"in": "query",
"required": true
},
{
"type": "integer",
"description": "查询起始时间 (Unix时间戳, e.g., 1761008266)",
"name": "begin",
"in": "query",
"required": true
},
{
"type": "integer",
"description": "查询结束时间 (Unix时间戳, e.g., 1761526675)",
"name": "end",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "返回实时数据成功",
"schema": {
"allOf": [
{
"$ref": "#/definitions/network.SuccessResponse"
},
{
"type": "object",
"properties": {
"payload": {
"$ref": "#/definitions/network.RealTimeDataPayload"
}
}
}
]
}
},
"400": {
"description": "返回实时数据失败",
"schema": {
"$ref": "#/definitions/network.FailureResponse"
}
}
}
}
},
"/measurement/recommend": {
"get": {
"description": "根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Measurement Recommend"
],
"summary": "测量点推荐(搜索框自动补全)",
"parameters": [
{
"description": "查询输入参数,例如 'trans' 或 'transformfeeder1_220.'",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/network.MeasurementRecommendRequest"
}
}
],
"responses": {
"200": {
"description": "返回推荐列表成功",
"schema": {
"allOf": [
{
"$ref": "#/definitions/network.SuccessResponse"
},
{
"type": "object",
"properties": {
"payload": {
"$ref": "#/definitions/network.MeasurementRecommendPayload"
}
}
}
]
}
},
"400": {
"description": "返回推荐列表失败",
"schema": {
"$ref": "#/definitions/network.FailureResponse"
}
}
}
}
},
"/model/diagram_load/{page_id}": {
"get": {
"description": "load circuit diagram info by page id",
@ -55,56 +179,77 @@ const docTemplate = `{
}
},
"definitions": {
"network.FailResponseHeader": {
"type": "object",
"properties": {
"err_msg": {
"type": "string"
},
"status": {
"type": "integer",
"example": 400
}
}
},
"network.FailureResponse": {
"type": "object",
"properties": {
"header": {
"$ref": "#/definitions/network.FailResponseHeader"
"code": {
"type": "integer",
"example": 500
},
"msg": {
"type": "string",
"example": "failed to get recommend data from redis"
},
"payload": {
"type": "object",
"additionalProperties": true
"type": "object"
}
}
},
"network.MeasurementRecommendPayload": {
"type": "object",
"properties": {
"input": {
"type": "string",
"example": "transformfeeder1_220."
},
"offset": {
"type": "integer",
"example": 21
},
"recommended_list": {
"type": "array",
"items": {
"type": "string"
},
"example": [
"[\"I_A_rms\"",
" \"I_B_rms\"",
"\"I_C_rms\"]"
]
}
}
},
"network.MeasurementRecommendRequest": {
"type": "object",
"properties": {
"input": {
"type": "string",
"example": "trans"
}
}
},
"network.RealTimeDataPayload": {
"type": "object",
"properties": {
"sub_pos": {
"description": "TODO 增加example tag",
"type": "object"
}
}
},
"network.SuccessResponse": {
"type": "object",
"properties": {
"header": {
"$ref": "#/definitions/network.SuccessResponseHeader"
},
"payload": {
"type": "object",
"additionalProperties": {
"type": "string"
},
"example": {
"key": "value"
}
}
}
},
"network.SuccessResponseHeader": {
"type": "object",
"properties": {
"err_msg": {
"type": "string"
},
"status": {
"code": {
"type": "integer",
"example": 200
},
"msg": {
"type": "string",
"example": "success"
},
"payload": {
"type": "object"
}
}
}
@ -113,12 +258,12 @@ const docTemplate = `{
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = &swag.Spec{
Version: "",
Host: "",
BasePath: "",
Version: "1.0",
Host: "localhost:8080",
BasePath: "/api/v1",
Schemes: []string{},
Title: "",
Description: "",
Title: "ModelRT 实时模型服务 API 文档",
Description: "实时数据计算和模型运行服务的 API 服务",
InfoInstanceName: "swagger",
SwaggerTemplate: docTemplate,
LeftDelim: "{{",

View File

@ -1,9 +1,138 @@
{
"swagger": "2.0",
"info": {
"contact": {}
"description": "实时数据计算和模型运行服务的 API 服务",
"title": "ModelRT 实时模型服务 API 文档",
"contact": {
"name": "douxu",
"url": "http://www.swagger.io/support",
"email": "douxu@clea.com.cn"
},
"license": {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
},
"version": "1.0"
},
"host": "localhost:8080",
"basePath": "/api/v1",
"paths": {
"/data/realtime": {
"get": {
"description": "根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"RealTime Component"
],
"summary": "获取实时测点数据",
"parameters": [
{
"type": "string",
"description": "测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)",
"name": "token",
"in": "query",
"required": true
},
{
"type": "integer",
"description": "查询起始时间 (Unix时间戳, e.g., 1761008266)",
"name": "begin",
"in": "query",
"required": true
},
{
"type": "integer",
"description": "查询结束时间 (Unix时间戳, e.g., 1761526675)",
"name": "end",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "返回实时数据成功",
"schema": {
"allOf": [
{
"$ref": "#/definitions/network.SuccessResponse"
},
{
"type": "object",
"properties": {
"payload": {
"$ref": "#/definitions/network.RealTimeDataPayload"
}
}
}
]
}
},
"400": {
"description": "返回实时数据失败",
"schema": {
"$ref": "#/definitions/network.FailureResponse"
}
}
}
}
},
"/measurement/recommend": {
"get": {
"description": "根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Measurement Recommend"
],
"summary": "测量点推荐(搜索框自动补全)",
"parameters": [
{
"description": "查询输入参数,例如 'trans' 或 'transformfeeder1_220.'",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/network.MeasurementRecommendRequest"
}
}
],
"responses": {
"200": {
"description": "返回推荐列表成功",
"schema": {
"allOf": [
{
"$ref": "#/definitions/network.SuccessResponse"
},
{
"type": "object",
"properties": {
"payload": {
"$ref": "#/definitions/network.MeasurementRecommendPayload"
}
}
}
]
}
},
"400": {
"description": "返回推荐列表失败",
"schema": {
"$ref": "#/definitions/network.FailureResponse"
}
}
}
}
},
"/model/diagram_load/{page_id}": {
"get": {
"description": "load circuit diagram info by page id",
@ -44,56 +173,77 @@
}
},
"definitions": {
"network.FailResponseHeader": {
"type": "object",
"properties": {
"err_msg": {
"type": "string"
},
"status": {
"type": "integer",
"example": 400
}
}
},
"network.FailureResponse": {
"type": "object",
"properties": {
"header": {
"$ref": "#/definitions/network.FailResponseHeader"
"code": {
"type": "integer",
"example": 500
},
"msg": {
"type": "string",
"example": "failed to get recommend data from redis"
},
"payload": {
"type": "object",
"additionalProperties": true
"type": "object"
}
}
},
"network.MeasurementRecommendPayload": {
"type": "object",
"properties": {
"input": {
"type": "string",
"example": "transformfeeder1_220."
},
"offset": {
"type": "integer",
"example": 21
},
"recommended_list": {
"type": "array",
"items": {
"type": "string"
},
"example": [
"[\"I_A_rms\"",
" \"I_B_rms\"",
"\"I_C_rms\"]"
]
}
}
},
"network.MeasurementRecommendRequest": {
"type": "object",
"properties": {
"input": {
"type": "string",
"example": "trans"
}
}
},
"network.RealTimeDataPayload": {
"type": "object",
"properties": {
"sub_pos": {
"description": "TODO 增加example tag",
"type": "object"
}
}
},
"network.SuccessResponse": {
"type": "object",
"properties": {
"header": {
"$ref": "#/definitions/network.SuccessResponseHeader"
},
"payload": {
"type": "object",
"additionalProperties": {
"type": "string"
},
"example": {
"key": "value"
}
}
}
},
"network.SuccessResponseHeader": {
"type": "object",
"properties": {
"err_msg": {
"type": "string"
},
"status": {
"code": {
"type": "integer",
"example": 200
},
"msg": {
"type": "string",
"example": "success"
},
"payload": {
"type": "object"
}
}
}

View File

@ -1,42 +1,140 @@
basePath: /api/v1
definitions:
network.FailResponseHeader:
properties:
err_msg:
type: string
status:
example: 400
type: integer
type: object
network.FailureResponse:
properties:
header:
$ref: '#/definitions/network.FailResponseHeader'
code:
example: 500
type: integer
msg:
example: failed to get recommend data from redis
type: string
payload:
additionalProperties: true
type: object
type: object
network.MeasurementRecommendPayload:
properties:
input:
example: transformfeeder1_220.
type: string
offset:
example: 21
type: integer
recommended_list:
example:
- '["I_A_rms"'
- ' "I_B_rms"'
- '"I_C_rms"]'
items:
type: string
type: array
type: object
network.MeasurementRecommendRequest:
properties:
input:
example: trans
type: string
type: object
network.RealTimeDataPayload:
properties:
sub_pos:
description: TODO 增加example tag
type: object
type: object
network.SuccessResponse:
properties:
header:
$ref: '#/definitions/network.SuccessResponseHeader'
payload:
additionalProperties:
type: string
example:
key: value
type: object
type: object
network.SuccessResponseHeader:
properties:
err_msg:
type: string
status:
code:
example: 200
type: integer
msg:
example: success
type: string
payload:
type: object
type: object
host: localhost:8080
info:
contact: {}
contact:
email: douxu@clea.com.cn
name: douxu
url: http://www.swagger.io/support
description: 实时数据计算和模型运行服务的 API 服务
license:
name: Apache 2.0
url: http://www.apache.org/licenses/LICENSE-2.0.html
title: ModelRT 实时模型服务 API 文档
version: "1.0"
paths:
/data/realtime:
get:
consumes:
- application/json
description: 根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据
parameters:
- description: 测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)
in: query
name: token
required: true
type: string
- description: 查询起始时间 (Unix时间戳, e.g., 1761008266)
in: query
name: begin
required: true
type: integer
- description: 查询结束时间 (Unix时间戳, e.g., 1761526675)
in: query
name: end
required: true
type: integer
produces:
- application/json
responses:
"200":
description: 返回实时数据成功
schema:
allOf:
- $ref: '#/definitions/network.SuccessResponse'
- properties:
payload:
$ref: '#/definitions/network.RealTimeDataPayload'
type: object
"400":
description: 返回实时数据失败
schema:
$ref: '#/definitions/network.FailureResponse'
summary: 获取实时测点数据
tags:
- RealTime Component
/measurement/recommend:
get:
consumes:
- application/json
description: 根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。
parameters:
- description: 查询输入参数,例如 'trans' 或 'transformfeeder1_220.'
in: body
name: request
required: true
schema:
$ref: '#/definitions/network.MeasurementRecommendRequest'
produces:
- application/json
responses:
"200":
description: 返回推荐列表成功
schema:
allOf:
- $ref: '#/definitions/network.SuccessResponse'
- properties:
payload:
$ref: '#/definitions/network.MeasurementRecommendPayload'
type: object
"400":
description: 返回推荐列表失败
schema:
$ref: '#/definitions/network.FailureResponse'
summary: 测量点推荐(搜索框自动补全)
tags:
- Measurement Recommend
/model/diagram_load/{page_id}:
get:
consumes:

16
go.mod
View File

@ -1,21 +1,27 @@
module modelRT
go 1.22.5
go 1.24
require (
github.com/DATA-DOG/go-sqlmock v1.5.2
github.com/RediSearch/redisearch-go/v2 v2.1.1
github.com/bitly/go-simplejson v0.5.1
github.com/confluentinc/confluent-kafka-go v1.9.2
github.com/gin-gonic/gin v1.10.0
github.com/gofrs/uuid v4.4.0+incompatible
github.com/gomodule/redigo v1.8.9
github.com/gorilla/websocket v1.5.3
github.com/json-iterator/go v1.1.12
github.com/natefinch/lumberjack v2.0.0+incompatible
github.com/panjf2000/ants/v2 v2.10.0
github.com/redis/go-redis/v9 v9.7.3
github.com/spf13/viper v1.19.0
github.com/stretchr/testify v1.9.0
github.com/swaggo/files v1.0.1
github.com/swaggo/gin-swagger v1.6.0
github.com/swaggo/swag v1.16.4
go.uber.org/zap v1.27.0
golang.org/x/sys v0.28.0
gorm.io/driver/mysql v1.5.7
gorm.io/driver/postgres v1.5.9
gorm.io/gorm v1.25.12
)
@ -25,8 +31,11 @@ require (
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/bytedance/sonic v1.12.5 // indirect
github.com/bytedance/sonic/loader v0.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cloudwego/base64x v0.1.4 // indirect
github.com/cloudwego/iasm v0.2.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.7 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
@ -37,6 +46,7 @@ require (
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.23.0 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/goccy/go-json v0.10.3 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
@ -55,6 +65,7 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
@ -70,7 +81,6 @@ require (
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/net v0.32.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/tools v0.28.0 // indirect
google.golang.org/protobuf v1.35.2 // indirect

209
go.sum
View File

@ -1,62 +1,40 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/actgardner/gogen-avro/v10 v10.1.0/go.mod h1:o+ybmVjEa27AAr35FRqU98DJu1fXES56uXniYFv4yDA=
github.com/actgardner/gogen-avro/v10 v10.2.1/go.mod h1:QUhjeHPchheYmMDni/Nx7VB0RsT/ee8YIgGY/xpEQgQ=
github.com/actgardner/gogen-avro/v9 v9.1.0/go.mod h1:nyTj6wPqDJoxM3qdnjcLv+EnMDSDFqE0qDpva2QRmKc=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/RediSearch/redisearch-go/v2 v2.1.1 h1:cCn3i40uLsVD8cxwrdrGfhdAgbR5Cld9q11eYyVOwpM=
github.com/RediSearch/redisearch-go/v2 v2.1.1/go.mod h1:Uw93Wi97QqAsw1DwbQrhVd88dBorGTfSuCS42zfh1iA=
github.com/bitly/go-simplejson v0.5.1 h1:xgwPbetQScXt1gh9BmoJ6j9JMr3TElvuIyjR8pgdoow=
github.com/bitly/go-simplejson v0.5.1/go.mod h1:YOPVLzCfwK14b4Sff3oP1AmGhI9T9Vsg84etUnlyp+Q=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/bytedance/sonic v1.12.5 h1:hoZxY8uW+mT+OpkcUWw4k0fDINtOcVavEsGfzwzFU/w=
github.com/bytedance/sonic v1.12.5/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/bytedance/sonic/loader v0.2.1 h1:1GgorWTqf12TA8mma4DDSbaQigE2wOgQo7iCjjJv3+E=
github.com/bytedance/sonic/loader v0.2.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/confluentinc/confluent-kafka-go v1.9.2 h1:gV/GxhMBUb03tFWkN+7kdhg+zf+QUM+wVkI9zwh770Q=
github.com/confluentinc/confluent-kafka-go v1.9.2/go.mod h1:ptXNqsuDfYbAE/LBW6pnwWZElUoWxHoV8E43DCrliyo=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20=
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y=
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA=
github.com/gabriel-vasile/mimetype v1.4.7/go.mod h1:GDlAgAyIRT27BhFl53XNAFtfjzOkLaF35JdEG0P7LtU=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=
github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
@ -79,52 +57,21 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o=
github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws=
github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hamba/avro v1.5.6/go.mod h1:3vNT0RLXXpFm2Tb/5KC71ZRJlOroggq1Rcitb6k4Fr8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/heetch/avro v0.3.1/go.mod h1:4xn38Oz/+hiEUTpbVfGVLfvOg0yKLlRP7Q9+gJJILgA=
github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
@ -133,41 +80,25 @@ github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ=
github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E=
github.com/jhump/protoreflect v1.12.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/juju/qthttptest v0.1.1/go.mod h1:aTlAv8TYaflIiTDIQYzxnl1QdPjAg8Q8qJMErpKy6A4=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM=
github.com/linkedin/goavro/v2 v2.10.0/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
github.com/linkedin/goavro/v2 v2.10.1/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
github.com/linkedin/goavro/v2 v2.11.1/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
@ -179,33 +110,25 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM=
github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk=
github.com/nrwiersma/avro-benchmarks v0.0.0-20210913175520-21aec48c8f76/go.mod h1:iKyFMidsk/sVYONJRE372sJuX/QTRPacU7imPqqsu7g=
github.com/panjf2000/ants/v2 v2.10.0 h1:zhRg1pQUtkyRiOFo2Sbqwjp0GfBNo9cUY2/Grpx1p+8=
github.com/panjf2000/ants/v2 v2.10.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/santhosh-tekuri/jsonschema/v5 v5.0.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
@ -220,8 +143,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
@ -241,9 +162,7 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
@ -253,62 +172,28 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/arch v0.12.0 h1:UsYJhbzPYGsT0HbEdmYcqtCv8UNGvnaL561NnIUvaKg=
golang.org/x/arch v0.12.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -319,86 +204,36 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20220503193339-ba3ae3f07e29/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/avro.v0 v0.0.0-20171217001914-a730b5802183/go.mod h1:FvqrFXt+jCsyQibeRv4xxEJBL5iG2DDW5aeJwzDiq4A=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/httprequest.v1 v1.2.1/go.mod h1:x2Otw96yda5+8+6ZeWwHIJTFkEHWP/qP8pJOzqEtWPM=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo=
gorm.io/driver/mysql v1.5.7/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM=
gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8=
gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=

View File

@ -6,25 +6,22 @@ import (
"strconv"
"modelRT/alert"
"modelRT/constant"
"modelRT/constants"
"modelRT/logger"
"modelRT/network"
"github.com/gin-gonic/gin"
"go.uber.org/zap"
)
// QueryAlertEventHandler define query alert event process API
func QueryAlertEventHandler(c *gin.Context) {
var targetLevel constant.AlertLevel
var targetLevel constants.AlertLevel
logger := logger.GetLoggerInstance()
alertManger := alert.GetAlertMangerInstance()
levelStr := c.Query("level")
level, err := strconv.Atoi(levelStr)
if err != nil {
logger.Error("convert alert level string to int failed", zap.Error(err))
logger.Error(c, "convert alert level string to int failed", "error", err)
resp := network.FailureResponse{
Code: -1,
@ -32,13 +29,13 @@ func QueryAlertEventHandler(c *gin.Context) {
}
c.JSON(http.StatusOK, resp)
}
targetLevel = constant.AlertLevel(level)
targetLevel = constants.AlertLevel(level)
events := alertManger.GetRangeEventsByLevel(targetLevel)
resp := network.SuccessResponse{
Code: 0,
Msg: "success",
PayLoad: map[string]interface{}{
Payload: map[string]any{
"events": events,
},
}

View File

@ -7,30 +7,27 @@ import (
"net/http"
"time"
"modelRT/constant"
"modelRT/common/errcode"
"modelRT/database"
"modelRT/diagram"
"modelRT/logger"
"modelRT/model"
"modelRT/network"
"modelRT/orm"
"github.com/gin-gonic/gin"
"go.uber.org/zap"
)
// ComponentAnchorReplaceHandler define component anchor point replace process API
func ComponentAnchorReplaceHandler(c *gin.Context) {
var uuid, anchorName string
logger := logger.GetLoggerInstance()
pgClient := database.GetPostgresDBClient()
pgClient := database.GetPostgresDBClient()
cancelCtx, cancel := context.WithTimeout(c, 5*time.Second)
defer cancel()
var request network.ComponetAnchorReplaceRequest
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error("unmarshal component anchor point replace info failed", zap.Error(err))
logger.Error(c, "unmarshal component anchor point replace info failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
@ -45,7 +42,7 @@ func ComponentAnchorReplaceHandler(c *gin.Context) {
var componentInfo orm.Component
result := pgClient.WithContext(cancelCtx).Model(&orm.Component{}).Where("global_uuid = ?", uuid).Find(&componentInfo)
if result.Error != nil {
logger.Error("query component detail info failed", zap.Error(result.Error))
logger.Error(c, "query component detail info failed", "error", result.Error)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
@ -56,8 +53,8 @@ func ComponentAnchorReplaceHandler(c *gin.Context) {
}
if result.RowsAffected == 0 {
err := fmt.Errorf("query component detail info by uuid failed:%w", constant.ErrQueryRowZero)
logger.Error("query component detail info from table is empty", zap.String("table_name", "component"))
err := fmt.Errorf("query component detail info by uuid failed:%w", errcode.ErrQueryRowZero)
logger.Error(c, "query component detail info from table is empty", "table_name", "component")
resp := network.FailureResponse{
Code: http.StatusBadRequest,
@ -66,45 +63,12 @@ func ComponentAnchorReplaceHandler(c *gin.Context) {
c.JSON(http.StatusOK, resp)
return
}
cancelCtx, cancel = context.WithTimeout(c, 5*time.Second)
defer cancel()
unmarshalMap := make(map[string]interface{})
tableName := model.SelectModelNameByType(componentInfo.ComponentType)
result = pgClient.WithContext(cancelCtx).Table(tableName).Where("global_uuid = ?", uuid).Find(&unmarshalMap)
if result.Error != nil {
logger.Error("query model detail info failed", zap.Error(result.Error))
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: result.Error.Error(),
}
c.JSON(http.StatusOK, resp)
return
}
if unmarshalMap == nil {
err := fmt.Errorf("query model detail info by uuid failed:%w", constant.ErrQueryRowZero)
logger.Error("query model detail info from table is empty", zap.String("table_name", tableName))
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
}
c.JSON(http.StatusOK, resp)
return
}
componentType := unmarshalMap["component_type"].(int)
if componentType != constant.DemoType {
logger.Error("can not process real time data of component type not equal DemoType", zap.Int64("component_id", componentInfo.ID))
}
diagram.UpdateAnchorValue(componentInfo.ID, anchorName)
diagram.UpdateAnchorValue(componentInfo.GlobalUUID.String(), anchorName)
resp := network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"uuid": request.UUID,
},
}

56
handler/attr_delete.go Normal file
View File

@ -0,0 +1,56 @@
package handler
import (
"net/http"
"modelRT/constants"
"modelRT/diagram"
"modelRT/logger"
"modelRT/network"
"github.com/gin-gonic/gin"
)
// AttrDeleteHandler deletes a data attribute
func AttrDeleteHandler(c *gin.Context) {
var request network.AttrDeleteRequest
clientToken := c.GetString("client_token")
if clientToken == "" {
err := constants.ErrGetClientToken
logger.Error(c, "failed to get client token from context", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error(c, "failed to unmarshal attribute delete request", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
rs := diagram.NewRedisString(c, request.AttrToken, clientToken, 10, true)
if err := rs.GETDEL(request.AttrToken); err != nil {
logger.Error(c, "failed to delete attribute from Redis", "attr_token", request.AttrToken, "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
Payload: map[string]interface{}{"attr_token": request.AttrToken},
})
return
}
c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
Payload: map[string]interface{}{
"attr_token": request.AttrToken,
},
})
}

67
handler/attr_load.go Normal file
View File

@ -0,0 +1,67 @@
package handler
import (
"net/http"
"modelRT/constants"
"modelRT/database"
"modelRT/logger"
"modelRT/network"
"github.com/gin-gonic/gin"
)
// AttrGetHandler retrieves the value of a data attribute
func AttrGetHandler(c *gin.Context) {
var request network.AttrGetRequest
clientToken := c.GetString("client_token")
if clientToken == "" {
err := constants.ErrGetClientToken
logger.Error(c, "failed to get client token from context", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error(c, "failed to unmarshal attribute get request", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
pgClient := database.GetPostgresDBClient()
tx := pgClient.Begin()
attrModel, err := database.ParseAttrToken(c, tx, request.AttrToken, clientToken)
if err != nil {
tx.Rollback()
logger.Error(c, "failed to parse attribute token", "attr_token", request.AttrToken, "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
Payload: map[string]interface{}{"attr_token": request.AttrToken},
})
return
}
tx.Commit()
// The GetAttrValue method is assumed to exist on the AttrModelInterface.
// You need to add this method to your attribute_model.go interface definition.
attrValue := attrModel.GetAttrValue()
c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
Payload: map[string]interface{}{
"attr_token": request.AttrToken,
"attr_value": attrValue,
},
})
}

58
handler/attr_update.go Normal file
View File

@ -0,0 +1,58 @@
package handler
import (
"net/http"
"modelRT/constants"
"modelRT/diagram"
"modelRT/logger"
"modelRT/network"
"github.com/gin-gonic/gin"
)
// AttrSetHandler sets the value of a data attribute
func AttrSetHandler(c *gin.Context) {
var request network.AttrSetRequest
clientToken := c.GetString("client_token")
if clientToken == "" {
err := constants.ErrGetClientToken
logger.Error(c, "failed to get client token from context", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error(c, "failed to unmarshal attribute set request", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
// The logic for handling Redis operations directly from the handler
rs := diagram.NewRedisString(c, request.AttrToken, clientToken, 10, true)
if err := rs.Set(request.AttrToken, request.AttrValue); err != nil {
logger.Error(c, "failed to set attribute value in Redis", "attr_token", request.AttrToken, "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
Payload: map[string]interface{}{"attr_token": request.AttrToken},
})
return
}
c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
Payload: map[string]interface{}{
"attr_token": request.AttrToken,
},
})
}

View File

@ -10,20 +10,17 @@ import (
"modelRT/logger"
"modelRT/network"
"github.com/bitly/go-simplejson"
"github.com/gin-gonic/gin"
"github.com/gofrs/uuid"
"go.uber.org/zap"
)
// CircuitDiagramCreateHandler define circuit diagram create process API
func CircuitDiagramCreateHandler(c *gin.Context) {
logger := logger.GetLoggerInstance()
pgClient := database.GetPostgresDBClient()
var request network.CircuitDiagramCreateRequest
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error("unmarshal circuit diagram create info failed", zap.Error(err))
logger.Error(c, "unmarshal circuit diagram create info failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
@ -35,12 +32,12 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
graph, err := diagram.GetGraphMap(request.PageID)
if err != nil {
logger.Error("get topologic data from set by pageID failed", zap.Error(err))
logger.Error(c, "get topologic data from set by pageID failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"page_id": request.PageID,
},
}
@ -63,12 +60,12 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
err = fmt.Errorf("convert uuid from string failed:%w:%w", err1, err2)
}
logger.Error("format uuid from string failed", zap.Error(err))
logger.Error(c, "format uuid from string failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"topologic_info": topologicLink,
},
}
@ -87,12 +84,12 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
if err != nil {
tx.Rollback()
logger.Error("create topologic info into DB failed", zap.Any("topologic_info", topologicCreateInfos), zap.Error(err))
logger.Error(c, "create topologic info into DB failed", "topologic_info", topologicCreateInfos, "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"topologic_infos": topologicCreateInfos,
},
}
@ -104,81 +101,44 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
graph.AddEdge(topologicCreateInfo.UUIDFrom, topologicCreateInfo.UUIDTo)
}
for index, componentInfo := range request.ComponentInfos {
componentID, err := database.CreateComponentIntoDB(c, tx, componentInfo)
for index, info := range request.ComponentInfos {
componentUUID, err := database.CreateComponentIntoDB(c, tx, info)
if err != nil {
tx.Rollback()
logger.Error("insert component info into DB failed", zap.Error(err))
logger.Error(c, "insert component info into DB failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
"component_infos": request.ComponentInfos,
},
}
c.JSON(http.StatusOK, resp)
return
}
request.ComponentInfos[index].ID = componentID
err = database.CreateModelIntoDB(c, tx, componentID, componentInfo.ComponentType, componentInfo.Params)
if err != nil {
tx.Rollback()
logger.Error("create component model into DB failed", zap.Any("component_infos", request.ComponentInfos), zap.Error(err))
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
"uuid": request.PageID,
Payload: map[string]interface{}{
"component_infos": request.ComponentInfos,
},
}
c.JSON(http.StatusOK, resp)
return
}
request.ComponentInfos[index].UUID = componentUUID
}
for _, componentInfo := range request.ComponentInfos {
paramsJSON, err := simplejson.NewJson([]byte(componentInfo.Params))
for _, info := range request.ComponentInfos {
// TODO 修复赋值问题
component, err := network.ConvertComponentCreateInfosToComponents(info)
if err != nil {
tx.Rollback()
logger.Error("unmarshal component params info failed", zap.String("component_params", componentInfo.Params), zap.Error(err))
logger.Error(c, "convert component params info failed", "component_info", info, "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
"uuid": componentInfo.UUID,
"component_params": componentInfo.Params,
Payload: map[string]interface{}{
"uuid": info.UUID,
"component_params": info.Params,
},
}
c.JSON(http.StatusOK, resp)
return
}
componentMap, err := paramsJSON.Map()
if err != nil {
tx.Rollback()
logger.Error("format params json info to map failed", zap.Error(err))
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
"uuid": componentInfo.UUID,
"component_params": componentInfo.Params,
},
}
c.JSON(http.StatusOK, resp)
return
}
diagram.StoreComponentMap(componentInfo.ID, componentMap)
diagram.StoreComponentMap(info.UUID, component)
}
if len(request.FreeVertexs) > 0 {
@ -192,7 +152,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
resp := network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"page_id": request.PageID,
},
}

View File

@ -7,28 +7,25 @@ import (
"net/http"
"time"
"modelRT/constant"
"modelRT/common/errcode"
"modelRT/database"
"modelRT/diagram"
"modelRT/logger"
"modelRT/model"
"modelRT/network"
"modelRT/orm"
"github.com/gin-gonic/gin"
"github.com/gofrs/uuid"
"go.uber.org/zap"
"gorm.io/gorm/clause"
)
// CircuitDiagramDeleteHandler define circuit diagram delete process API
func CircuitDiagramDeleteHandler(c *gin.Context) {
logger := logger.GetLoggerInstance()
pgClient := database.GetPostgresDBClient()
var request network.CircuitDiagramDeleteRequest
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error("unmarshal circuit diagram del info failed", zap.Error(err))
logger.Error(c, "unmarshal circuit diagram del info failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
@ -40,12 +37,12 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
graph, err := diagram.GetGraphMap(request.PageID)
if err != nil {
logger.Error("get topologic data from set by pageID failed", zap.Error(err))
logger.Error(c, "get topologic data from set by pageID failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"page_id": request.PageID,
},
}
@ -68,12 +65,12 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
err = fmt.Errorf("convert uuid from string failed:%w:%w", err1, err2)
}
logger.Error("format uuid from string failed", zap.Error(err))
logger.Error(c, "format uuid from string failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"topologic_info": topologicLink,
},
}
@ -93,12 +90,12 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
if err != nil {
tx.Rollback()
logger.Error("delete topologic info into DB failed", zap.Any("topologic_info", topologicDelInfo), zap.Error(err))
logger.Error(c, "delete topologic info into DB failed", "topologic_info", topologicDelInfo, "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"topologic_info": topologicDelInfo,
},
}
@ -110,12 +107,12 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
if err != nil {
tx.Rollback()
logger.Error("delete topologic info failed", zap.Any("topologic_info", topologicDelInfo), zap.Error(err))
logger.Error(c, "delete topologic info failed", "topologic_info", topologicDelInfo, "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"topologic_info": topologicDelInfo,
},
}
@ -136,12 +133,12 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
if err != nil {
tx.Rollback()
logger.Error("format uuid from string failed", zap.Error(err))
logger.Error(c, "format uuid from string failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"uuid": componentInfo.UUID,
},
}
@ -157,15 +154,15 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
err := result.Error
if result.RowsAffected == 0 {
err = fmt.Errorf("%w:please check uuid conditions", constant.ErrDeleteRowZero)
err = fmt.Errorf("%w:please check uuid conditions", errcode.ErrDeleteRowZero)
}
logger.Error("query component info into postgresDB failed", zap.String("component_global_uuid", componentInfo.UUID), zap.Error(err))
logger.Error(c, "query component info into postgresDB failed", "component_global_uuid", componentInfo.UUID, "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"uuid": componentInfo.UUID,
},
}
@ -179,47 +176,22 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
err := result.Error
if result.RowsAffected == 0 {
err = fmt.Errorf("%w:please check uuid conditions", constant.ErrDeleteRowZero)
err = fmt.Errorf("%w:please check uuid conditions", errcode.ErrDeleteRowZero)
}
logger.Error("delete component info into postgresDB failed", zap.String("component_global_uuid", componentInfo.UUID), zap.Error(err))
logger.Error(c, "delete component info into postgresDB failed", "component_global_uuid", componentInfo.UUID, "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"uuid": componentInfo.UUID,
},
}
c.JSON(http.StatusOK, resp)
return
}
modelStruct := model.SelectModelByType(component.ComponentType)
modelStruct.SetComponentID(component.ID)
result = tx.WithContext(cancelCtx).Where("component_id = ?", component.ID).Delete(modelStruct)
if result.Error != nil || result.RowsAffected == 0 {
tx.Rollback()
err := result.Error
if result.RowsAffected == 0 {
err = fmt.Errorf("%w:please check uuid conditions", constant.ErrDeleteRowZero)
}
msg := fmt.Sprintf("delete component info from table %s failed", modelStruct.ReturnTableName())
logger.Error(msg, zap.String("component_global_uuid", componentInfo.UUID), zap.Error(err))
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
"uuid": componentInfo.UUID,
},
}
c.JSON(http.StatusOK, resp)
return
}
diagram.DeleteComponentMap(component.ID)
diagram.DeleteComponentMap(component.GlobalUUID.String())
}
if len(request.FreeVertexs) > 0 {
@ -233,7 +205,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
resp := network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"page_id": request.PageID,
},
}

View File

@ -11,7 +11,6 @@ import (
"modelRT/network"
"github.com/gin-gonic/gin"
"go.uber.org/zap"
)
// CircuitDiagramLoadHandler define circuit diagram load process API
@ -25,17 +24,16 @@ import (
// @Failure 400 {object} network.FailureResponse "request process failed"
// @Router /model/diagram_load/{page_id} [get]
func CircuitDiagramLoadHandler(c *gin.Context) {
logger := logger.GetLoggerInstance()
pgClient := database.GetPostgresDBClient()
pageID, err := strconv.ParseInt(c.Query("page_id"), 10, 64)
if err != nil {
logger.Error("get pageID from url param failed", zap.Error(err))
logger.Error(c, "get pageID from url param failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"page_id": pageID,
},
}
@ -45,33 +43,33 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
topologicInfo, err := diagram.GetGraphMap(pageID)
if err != nil {
logger.Error("get topologic data from set by pageID failed", zap.Error(err))
logger.Error(c, "get topologic data from set by pageID failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"page_id": pageID,
},
}
c.JSON(http.StatusOK, resp)
return
}
payLoad := make(map[string]interface{})
payLoad["root_vertex"] = topologicInfo.RootVertex
payLoad["topologic"] = topologicInfo.VerticeLinks
payload := make(map[string]interface{})
payload["root_vertex"] = topologicInfo.RootVertex
payload["topologic"] = topologicInfo.VerticeLinks
componentParamMap := make(map[string]any)
for _, VerticeLink := range topologicInfo.VerticeLinks {
for _, componentUUID := range VerticeLink {
component, err := database.QueryComponentByUUID(c, pgClient, componentUUID)
if err != nil {
logger.Error("get component id info from DB by uuid failed", zap.Error(err))
logger.Error(c, "get component id info from DB by uuid failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"uuid": componentUUID,
},
}
@ -79,14 +77,14 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
return
}
componentParams, err := diagram.GetComponentMap(component.ID)
componentParams, err := diagram.GetComponentMap(component.GlobalUUID.String())
if err != nil {
logger.Error("get component data from set by uuid failed", zap.Error(err))
logger.Error(c, "get component data from set by uuid failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"uuid": componentUUID,
},
}
@ -100,12 +98,12 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
rootVertexUUID := topologicInfo.RootVertex.String()
rootComponent, err := database.QueryComponentByUUID(c, pgClient, topologicInfo.RootVertex)
if err != nil {
logger.Error("get component id info from DB by uuid failed", zap.Error(err))
logger.Error(c, "get component id info from DB by uuid failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"uuid": topologicInfo.RootVertex,
},
}
@ -113,14 +111,14 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
return
}
rootComponentParam, err := diagram.GetComponentMap(rootComponent.ID)
rootComponentParam, err := diagram.GetComponentMap(rootComponent.GlobalUUID.String())
if err != nil {
logger.Error("get component data from set by uuid failed", zap.Error(err))
logger.Error(c, "get component data from set by uuid failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"uuid": rootVertexUUID,
},
}
@ -129,12 +127,12 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
}
componentParamMap[rootVertexUUID] = rootComponentParam
payLoad["component_params"] = componentParamMap
payload["component_params"] = componentParamMap
resp := network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
PayLoad: payLoad,
Payload: payload,
}
c.JSON(http.StatusOK, resp)
}

View File

@ -9,19 +9,16 @@ import (
"modelRT/logger"
"modelRT/network"
"github.com/bitly/go-simplejson"
"github.com/gin-gonic/gin"
"go.uber.org/zap"
)
// CircuitDiagramUpdateHandler define circuit diagram update process API
func CircuitDiagramUpdateHandler(c *gin.Context) {
logger := logger.GetLoggerInstance()
pgClient := database.GetPostgresDBClient()
var request network.CircuitDiagramUpdateRequest
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error("unmarshal circuit diagram update info failed", zap.Error(err))
logger.Error(c, "unmarshal circuit diagram update info failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
@ -33,12 +30,12 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
graph, err := diagram.GetGraphMap(request.PageID)
if err != nil {
logger.Error("get topologic data from set by pageID failed", zap.Error(err))
logger.Error(c, "get topologic data from set by pageID failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"page_id": request.PageID,
},
}
@ -50,12 +47,12 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
for _, topologicLink := range request.TopologicLinks {
changeInfo, err := network.ParseUUID(topologicLink)
if err != nil {
logger.Error("format uuid from string failed", zap.Error(err))
logger.Error(c, "format uuid from string failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"topologic_info": topologicLink,
},
}
@ -73,12 +70,12 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
if err != nil {
tx.Rollback()
logger.Error("update topologic info into DB failed", zap.Any("topologic_info", topologicChangeInfo), zap.Error(err))
logger.Error(c, "update topologic info into DB failed", "topologic_info", topologicChangeInfo, "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"topologic_info": topologicChangeInfo,
},
}
@ -90,12 +87,12 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
if err != nil {
tx.Rollback()
logger.Error("update topologic info failed", zap.Any("topologic_info", topologicChangeInfo), zap.Error(err))
logger.Error(c, "update topologic info failed", "topologic_info", topologicChangeInfo, "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"topologic_info": topologicChangeInfo,
},
}
@ -105,32 +102,14 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
}
for index, componentInfo := range request.ComponentInfos {
componentID, err := database.UpdateComponentIntoDB(c, tx, componentInfo)
componentUUID, err := database.UpdateComponentIntoDB(c, tx, componentInfo)
if err != nil {
logger.Error("udpate component info into DB failed", zap.Error(err))
logger.Error(c, "udpate component info into DB failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
"page_id": request.PageID,
"component_info": request.ComponentInfos,
},
}
c.JSON(http.StatusOK, resp)
return
}
request.ComponentInfos[index].ID = componentID
err = database.UpdateModelIntoDB(c, tx, componentID, componentInfo.ComponentType, componentInfo.Params)
if err != nil {
logger.Error("udpate component model info into DB failed", zap.Error(err))
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"page_id": request.PageID,
"component_info": request.ComponentInfos,
},
@ -138,41 +117,27 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
c.JSON(http.StatusOK, resp)
return
}
request.ComponentInfos[index].UUID = componentUUID
}
for _, componentInfo := range request.ComponentInfos {
paramsJSON, err := simplejson.NewJson([]byte(componentInfo.Params))
for _, info := range request.ComponentInfos {
// TODO 修复赋值问题
component, err := network.ConvertComponentUpdateInfosToComponents(info)
if err != nil {
logger.Error("unmarshal component info by concurrent map failed", zap.String("component_params", componentInfo.Params), zap.Error(err))
logger.Error(c, "convert component params info failed", "component_info", info, "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
"uuid": componentInfo.UUID,
"component_params": componentInfo.Params,
Payload: map[string]interface{}{
"uuid": info.UUID,
"component_params": info.Params,
},
}
c.JSON(http.StatusOK, resp)
return
}
componentMap, err := paramsJSON.Map()
if err != nil {
logger.Error("format params json info to map failed", zap.Error(err))
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
PayLoad: map[string]interface{}{
"uuid": componentInfo.UUID,
"component_params": componentInfo.Params,
},
}
c.JSON(http.StatusOK, resp)
return
}
diagram.UpdateComponentMap(componentInfo.ID, componentMap)
diagram.UpdateComponentMap(info.ID, component)
}
if len(request.FreeVertexs) > 0 {
@ -187,7 +152,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
resp := network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
PayLoad: map[string]interface{}{
Payload: map[string]interface{}{
"page_id": request.PageID,
},
}

View File

@ -0,0 +1,253 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"context"
"fmt"
"maps"
"slices"
"strings"
"github.com/gofrs/uuid"
"modelRT/common/errcode"
"modelRT/constants"
"modelRT/database"
"modelRT/diagram"
"modelRT/logger"
"modelRT/orm"
"github.com/gin-gonic/gin"
)
// ComponentAttributeQueryHandler define circuit diagram component attribute value query process API
func ComponentAttributeQueryHandler(c *gin.Context) {
pgClient := database.GetPostgresDBClient()
tokens := c.Param("tokens")
if tokens == "" {
err := fmt.Errorf("tokens is missing from the path")
logger.Error(c, "query tokens from path failed", "error", err, "url", c.Request.RequestURI)
renderRespFailure(c, constants.RespCodeInvalidParams, err.Error(), nil)
return
}
tokenSlice := strings.Split(tokens, ",")
queryResults := make(map[string]queryResult)
cacheQueryMap := make(map[string][]cacheQueryItem)
for _, token := range tokenSlice {
slices := strings.Split(token, ".")
if len(slices) < 7 {
queryResults[token] = queryResult{err: errcode.ErrInvalidToken}
continue
}
hSetKey := fmt.Sprintf("%s_%s", slices[4], slices[5])
cacheQueryMap[hSetKey] = append(cacheQueryMap[hSetKey], cacheQueryItem{
token: token,
attributeCompTag: slices[4],
attributeExtendType: slices[5],
attributeName: slices[6],
})
}
dbQueryMap := make(map[string][]cacheQueryItem)
var secondaryQueryCount int
for hSetKey, items := range cacheQueryMap {
hset := diagram.NewRedisHash(c, hSetKey, 5000, false)
cacheData, err := hset.HGetAll()
if err != nil {
logger.Warn(c, "redis hgetall failed", "key", hSetKey, "err", err)
}
for _, item := range items {
if val, ok := cacheData[item.attributeName]; ok {
queryResults[item.token] = queryResult{err: errcode.ErrProcessSuccess, value: val}
} else {
dbQueryMap[item.attributeCompTag] = append(dbQueryMap[item.attributeCompTag], item)
secondaryQueryCount++
}
}
}
if secondaryQueryCount == 0 {
payload := genQueryRespPayload(queryResults, tokenSlice)
renderRespSuccess(c, constants.RespCodeSuccess, "query dynamic parameter values success", payload)
return
}
tx := pgClient.WithContext(c).Begin()
if tx.Error != nil {
logger.Error(c, "begin postgres transaction failed", "error", tx.Error)
fillRemainingErrors(queryResults, tokenSlice, errcode.ErrBeginTxFailed)
payload := genQueryRespPayload(queryResults, tokenSlice)
renderRespFailure(c, constants.RespCodeServerError, "begin postgres database transaction failed", payload)
return
}
defer tx.Rollback()
allCompTags := slices.Collect(maps.Keys(dbQueryMap))
compModelMap, err := database.QueryComponentByCompTags(c, tx, allCompTags)
if err != nil {
logger.Error(c, "query component info from postgres database failed", "error", err)
fillRemainingErrors(queryResults, tokenSlice, errcode.ErrDBQueryFailed)
payload := genQueryRespPayload(queryResults, tokenSlice)
renderRespFailure(c, constants.RespCodeServerError, "query component meta failed", payload)
return
}
// batch retrieve component metadata
identifiers := make([]orm.ProjectIdentifier, 0, secondaryQueryCount)
for tag, items := range dbQueryMap {
comp, ok := compModelMap[tag]
if !ok {
for _, it := range items {
queryResults[it.token] = queryResult{err: errcode.ErrFoundTargetFailed}
}
continue
}
for i := range items {
items[i].attributeModelName = comp.ModelName
items[i].globalUUID = comp.GlobalUUID
identifiers = append(identifiers, orm.ProjectIdentifier{
Token: items[i].token, Tag: comp.ModelName, GroupName: items[i].attributeExtendType,
})
}
}
tableNameMap, err := database.BatchGetProjectNames(tx, identifiers)
if err != nil {
logger.Error(c, "batch get table names from postgres database failed", "error", err)
fillRemainingErrors(queryResults, tokenSlice, errcode.ErrRetrieveFailed)
payload := genQueryRespPayload(queryResults, tokenSlice)
renderRespFailure(c, constants.RespCodeServerError, "batch get table names from postgres database failed", payload)
return
}
redisSyncMap := make(map[string][]cacheQueryItem)
for _, items := range dbQueryMap {
for _, item := range items {
if _, exists := queryResults[item.token]; exists {
continue
}
tbl, ok := tableNameMap[orm.ProjectIdentifier{Tag: item.attributeModelName, GroupName: item.attributeExtendType}]
if !ok {
queryResults[item.token] = queryResult{err: errcode.ErrFoundTargetFailed}
continue
}
var dbVal string
res := tx.Table(tbl).Select(item.attributeName).Where("global_uuid = ?", item.globalUUID).Scan(&dbVal)
if res.Error != nil || res.RowsAffected == 0 {
queryResults[item.token] = queryResult{err: errcode.ErrDBQueryFailed}
continue
}
queryResults[item.token] = queryResult{err: errcode.ErrProcessSuccess, value: dbVal}
item.attributeVal = dbVal
hKey := fmt.Sprintf("%s_%s", item.attributeCompTag, item.attributeExtendType)
redisSyncMap[hKey] = append(redisSyncMap[hKey], item)
}
}
if err := tx.Commit().Error; err != nil {
logger.Warn(c, "postgres transaction commit failed, but returning scanned data", "error", err)
} else {
for hKey, items := range redisSyncMap {
go backfillRedis(c.Copy(), hKey, items)
}
}
payload := genQueryRespPayload(queryResults, tokenSlice)
if hasAnyError(queryResults) {
renderRespFailure(c, constants.RespCodeFailed, "query completed with partial failures", payload)
} else {
renderRespSuccess(c, constants.RespCodeSuccess, "query completed successfully", payload)
}
}
func hasAnyError(results map[string]queryResult) bool {
for _, res := range results {
if res.err != nil && res.err.Code() != constants.RespCodeSuccess {
return true
}
}
return false
}
func fillRemainingErrors(results map[string]queryResult, tokens []string, err *errcode.AppError) {
for _, token := range tokens {
if _, exists := results[token]; !exists {
results[token] = queryResult{err: err}
}
}
}
func backfillRedis(ctx context.Context, hSetKey string, items []cacheQueryItem) {
hset := diagram.NewRedisHash(ctx, hSetKey, 5000, false)
fields := make(map[string]any, len(items))
for _, item := range items {
if item.attributeVal != "" {
fields[item.attributeName] = item.attributeVal
}
}
if len(fields) > 0 {
if err := hset.SetRedisHashByMap(fields); err != nil {
logger.Error(ctx, "async backfill redis failed", "hash_key", hSetKey, "error", err)
} else {
logger.Info(ctx, "async backfill redis success", "hash_key", hSetKey, "count", len(fields))
}
}
}
func genQueryRespPayload(queryResults map[string]queryResult, requestTokens []string) map[string]any {
attributes := make([]attributeQueryResult, 0, len(requestTokens))
for _, token := range requestTokens {
if queryResult, exists := queryResults[token]; exists {
attributes = append(attributes, attributeQueryResult{
Token: token,
Code: queryResult.err.Code(),
Msg: queryResult.err.Msg(),
Value: queryResult.value,
})
} else {
err := errcode.ErrCacheQueryFailed
attributes = append(attributes, attributeQueryResult{
Token: token,
Code: err.Code(),
Msg: err.Msg(),
Value: "",
})
}
}
payload := map[string]any{
"attributes": attributes,
}
return payload
}
type cacheQueryItem struct {
globalUUID uuid.UUID
token string
attributeCompTag string
attributeModelName string
attributeExtendType string
attributeName string
attributeVal string
}
type attributeQueryResult struct {
Token string `json:"token"`
Msg string `json:"msg"`
Value string `json:"value"`
Code int `json:"code"`
}
type queryResult struct {
err *errcode.AppError
value string
}

View File

@ -0,0 +1,214 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"fmt"
"strings"
"modelRT/common/errcode"
"modelRT/constants"
"modelRT/database"
"modelRT/diagram"
"modelRT/logger"
"modelRT/network"
"modelRT/orm"
"github.com/gin-gonic/gin"
)
// ComponentAttributeUpdateHandler define circuit diagram component attribute value update process API
func ComponentAttributeUpdateHandler(c *gin.Context) {
pgClient := database.GetPostgresDBClient()
var request network.ComponentAttributeUpdateInfo
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error(c, "unmarshal request params failed", "error", err)
renderRespFailure(c, constants.RespCodeInvalidParams, err.Error(), nil)
return
}
updateResults := make(map[string]*errcode.AppError)
attriModifyConfs := make([]attributeModifyConfig, 0, len(request.AttributeConfigs))
var attributeComponentTag string
for index, attribute := range request.AttributeConfigs {
slices := strings.Split(attribute.AttributeToken, ".")
if len(slices) < 7 {
updateResults[attribute.AttributeToken] = errcode.ErrInvalidToken
continue
}
componentTag := slices[4]
if index == 0 {
attributeComponentTag = componentTag
} else if componentTag != attributeComponentTag {
updateResults[attribute.AttributeToken] = errcode.ErrCrossToken
continue
}
attriModifyConfs = append(attriModifyConfs, attributeModifyConfig{
attributeToken: attribute.AttributeToken,
attributeExtendType: slices[5],
attributeName: slices[6],
attributeOldVal: attribute.AttributeOldVal,
attributeNewVal: attribute.AttributeNewVal,
})
}
// open transaction
tx := pgClient.WithContext(c).Begin()
if tx.Error != nil {
logger.Error(c, "begin postgres transaction failed", "error", tx.Error)
renderRespFailure(c, constants.RespCodeServerError, "begin postgres transaction failed", nil)
return
}
compInfo, err := database.QueryComponentByCompTag(c, tx, attributeComponentTag)
if err != nil {
logger.Error(c, "query component info by component tag failed", "error", err, "tag", attributeComponentTag)
for _, attribute := range request.AttributeConfigs {
if _, exists := updateResults[attribute.AttributeToken]; !exists {
updateResults[attribute.AttributeToken] = errcode.ErrDBQueryFailed.WithCause(err)
}
}
tx.Rollback()
payload := genUpdateRespPayload(updateResults, request.AttributeConfigs)
renderRespFailure(c, constants.RespCodeFailed, "query component metadata failed", payload)
return
}
identifiers := make([]orm.ProjectIdentifier, len(attriModifyConfs))
for i, mod := range attriModifyConfs {
identifiers[i] = orm.ProjectIdentifier{
Token: mod.attributeToken,
Tag: compInfo.ModelName,
GroupName: mod.attributeExtendType,
}
}
tableNameMap, err := database.BatchGetProjectNames(tx, identifiers)
if err != nil {
tx.Rollback()
for _, id := range identifiers {
if _, exists := updateResults[id.Token]; !exists {
updateResults[id.Token] = errcode.ErrRetrieveFailed.WithCause(err)
}
}
payload := genUpdateRespPayload(updateResults, request.AttributeConfigs)
renderRespFailure(c, constants.RespCodeFailed, "batch retrieve table names failed", payload)
return
}
redisUpdateMap := make(map[string][]cacheUpdateItem)
for _, mod := range attriModifyConfs {
id := orm.ProjectIdentifier{Tag: compInfo.ModelName, GroupName: mod.attributeExtendType}
tableName, exists := tableNameMap[id]
if !exists {
updateResults[mod.attributeToken] = errcode.ErrFoundTargetFailed
continue
}
result := tx.Table(tableName).
Where(fmt.Sprintf("%s = ? AND global_uuid = ?", mod.attributeName), mod.attributeOldVal, compInfo.GlobalUUID).
Updates(map[string]any{mod.attributeName: mod.attributeNewVal})
if result.Error != nil {
updateResults[mod.attributeToken] = errcode.ErrDBUpdateFailed
continue
}
if result.RowsAffected == 0 {
updateResults[mod.attributeToken] = errcode.ErrDBzeroAffectedRows
continue
}
cacheKey := fmt.Sprintf("%s_%s", attributeComponentTag, mod.attributeExtendType)
redisUpdateMap[cacheKey] = append(redisUpdateMap[cacheKey],
cacheUpdateItem{
token: mod.attributeToken,
name: mod.attributeName,
newVal: mod.attributeNewVal,
})
}
// commit transaction
if err := tx.Commit().Error; err != nil {
renderRespFailure(c, constants.RespCodeServerError, "transaction commit failed", nil)
return
}
for key, items := range redisUpdateMap {
hset := diagram.NewRedisHash(c, key, 5000, false)
fields := make(map[string]any, len(items))
for _, item := range items {
fields[item.name] = item.newVal
}
if err := hset.SetRedisHashByMap(fields); err != nil {
logger.Error(c, "batch sync redis failed", "hash_key", key, "error", err)
for _, item := range items {
if _, exists := updateResults[item.token]; exists {
updateResults[item.token] = errcode.ErrCacheSyncWarn.WithCause(err)
}
}
}
}
payload := genUpdateRespPayload(updateResults, request.AttributeConfigs)
if len(updateResults) > 0 {
renderRespFailure(c, constants.RespCodeFailed, "process completed with partial failures", payload)
return
}
renderRespSuccess(c, constants.RespCodeSuccess, "process completed successfully", payload)
}
type attributeModifyConfig struct {
attributeToken string
attributeExtendType string
attributeName string
attributeOldVal string
attributeNewVal string
}
type cacheUpdateItem struct {
token string
name string
newVal string
}
type attributeUpdateResult struct {
Token string `json:"token"`
Code int `json:"code"`
Msg string `json:"msg"`
}
func genUpdateRespPayload(updateResults map[string]*errcode.AppError, originalRequests []network.ComponentAttributeConfig) map[string]any {
attributes := make([]attributeUpdateResult, 0, len(originalRequests))
for _, req := range originalRequests {
token := req.AttributeToken
if appErr, exists := updateResults[token]; exists {
attributes = append(attributes, attributeUpdateResult{
Token: token,
Code: appErr.Code(),
Msg: appErr.Msg(),
})
} else {
attributes = append(attributes, attributeUpdateResult{
Token: token,
Code: constants.CodeSuccess,
Msg: "token value update success",
})
}
}
payload := map[string]any{
"attributes": attributes,
}
return payload
}

View File

@ -0,0 +1,188 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"context"
"errors"
"fmt"
"net/http"
"modelRT/constants"
"modelRT/database"
"modelRT/diagram"
"modelRT/logger"
"modelRT/network"
"modelRT/orm"
"github.com/gin-gonic/gin"
)
type linkSetConfig struct {
CurrKey string
PrevKeyTemplate string
PrevIsNil bool
}
var linkSetConfigs = map[int]linkSetConfig{
// grid hierarchy
0: {CurrKey: constants.RedisAllGridSetKey, PrevIsNil: true},
// zone hierarchy
1: {CurrKey: constants.RedisAllZoneSetKey, PrevKeyTemplate: constants.RedisSpecGridZoneSetKey},
// station hierarchy
2: {CurrKey: constants.RedisAllStationSetKey, PrevKeyTemplate: constants.RedisSpecZoneStationSetKey},
// component nspath hierarchy
3: {CurrKey: constants.RedisAllCompNSPathSetKey, PrevKeyTemplate: constants.RedisSpecStationCompNSPATHSetKey},
// component tag hierarchy
4: {CurrKey: constants.RedisAllCompTagSetKey, PrevKeyTemplate: constants.RedisSpecCompNSPathCompTagSetKey},
// config hierarchy
5: {CurrKey: constants.RedisAllConfigSetKey, PrevIsNil: true},
}
// DiagramNodeLinkHandler defines the diagram node link process api
func DiagramNodeLinkHandler(c *gin.Context) {
var request network.DiagramNodeLinkRequest
clientToken := c.GetString("client_token")
if clientToken == "" {
err := constants.ErrGetClientToken
logger.Error(c, "failed to get client token from context", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error(c, "failed to unmarshal diagram node process request", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: "invalid request body format: " + err.Error(),
})
return
}
var err error
pgClient := database.GetPostgresDBClient()
nodeID := request.NodeID
nodeLevel := request.NodeLevel
action := request.Action
prevNodeInfo, currNodeInfo, err := database.QueryNodeInfoByID(c, pgClient, nodeID, nodeLevel)
if err != nil {
logger.Error(c, "failed to query diagram node info by nodeID and level from postgres", "node_id", nodeID, "level", nodeLevel, "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: "failed to query measurement info record: " + err.Error(),
Payload: map[string]any{
"node_id": nodeID,
"node_level": nodeLevel,
"action": action,
},
})
return
}
prevLinkSet, currLinkSet := generateLinkSet(c, nodeLevel, prevNodeInfo)
err = processLinkSetData(c, action, nodeLevel, prevLinkSet, currLinkSet, prevNodeInfo, currNodeInfo)
if err != nil {
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
Payload: map[string]any{
"node_id": nodeID,
"node_level": nodeLevel,
"action": action,
},
})
return
}
logger.Info(c, "process diagram node link success", "node_id", nodeID, "level", nodeLevel, "action", request.Action)
c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK,
Msg: "diagram node link process success",
Payload: map[string]any{
"node_id": nodeID,
"node_level": nodeLevel,
"action": action,
},
})
}
func generateLinkSet(ctx context.Context, level int, prevNodeInfo orm.CircuitDiagramNodeInterface) (*diagram.RedisSet, *diagram.RedisSet) {
config, ok := linkSetConfigs[level]
// level not supported
if !ok {
return nil, nil
}
currLinkSet := diagram.NewRedisSet(ctx, config.CurrKey, 0, false)
if config.PrevIsNil {
return nil, currLinkSet
}
prevLinkSetKey := fmt.Sprintf(config.PrevKeyTemplate, prevNodeInfo.GetTagName())
prevLinkSet := diagram.NewRedisSet(ctx, prevLinkSetKey, 0, false)
return prevLinkSet, currLinkSet
}
func processLinkSetData(ctx context.Context, action string, level int, prevLinkSet, currLinkSet *diagram.RedisSet, prevNodeInfo, currNodeInfo orm.CircuitDiagramNodeInterface) error {
var currMember string
var prevMember string
var err1, err2 error
switch level {
case 0, 1, 2, 4:
// grid、zone、station、component tag hierarchy
currMember = currNodeInfo.GetTagName()
if prevLinkSet != nil {
prevMember = prevNodeInfo.GetTagName()
}
case 3:
// component NSPath hierarchy
currMember = currNodeInfo.GetNSPath()
prevMember = prevNodeInfo.GetTagName()
case 5:
// TODO[NONEED-ISSUE]暂无此层级增加或删除需求 #2
err := fmt.Errorf("currently hierarchy no need to add or delete this level: %d", level)
logger.Error(ctx, "no need level for link process", "level", level, "action", action, "error", err)
return nil
default:
err := fmt.Errorf("unsupported diagram node level: %d", level)
logger.Error(ctx, "unsupport diagram node level for link process", "level", level, "action", action, "error", err)
return err
}
switch action {
case constants.SearchLinkAddAction:
err1 = currLinkSet.SADD(currMember)
if prevLinkSet != nil {
err2 = prevLinkSet.SADD(prevMember)
}
case constants.SearchLinkDelAction:
err1 = currLinkSet.SREM(currMember)
if prevLinkSet != nil {
err2 = prevLinkSet.SREM(prevMember)
}
default:
err := constants.ErrUnsupportedLinkAction
logger.Error(ctx, "unsupport diagram node link process action", "action", action, "error", err)
return err
}
return processDiagramLinkError(err1, err2, action)
}
func processDiagramLinkError(err1, err2 error, action string) error {
var err error
if err1 != nil && err2 != nil {
err = errors.Join(err1, err2)
err = fmt.Errorf("process diagram node link failed, currLinkSet %s operation and prevLinkSet %s operation failed: %w", action, action, err)
} else if err1 != nil {
err = fmt.Errorf("process diagram node currLinkSet link failed: currLinkSet %s operation failed: %w", action, err1)
} else {
err = fmt.Errorf("process diagram node prevLinkSet link failed: prevLinkSet %s operation: %w", action, err2)
}
return err
}

32
handler/helper.go Normal file
View File

@ -0,0 +1,32 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"net/http"
"modelRT/network"
"github.com/gin-gonic/gin"
)
func renderRespFailure(c *gin.Context, code int, msg string, payload any) {
resp := network.FailureResponse{
Code: code,
Msg: msg,
}
if payload != nil {
resp.Payload = payload
}
c.JSON(http.StatusOK, resp)
}
func renderRespSuccess(c *gin.Context, code int, msg string, payload any) {
resp := network.SuccessResponse{
Code: code,
Msg: msg,
}
if payload != nil {
resp.Payload = payload
}
c.JSON(http.StatusOK, resp)
}

View File

@ -0,0 +1,58 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"fmt"
"net/http"
"strconv"
"modelRT/alert"
"modelRT/constants"
"modelRT/logger"
"modelRT/network"
"github.com/gin-gonic/gin"
)
// QueryHistoryDataHandler define query history data process API
func QueryHistoryDataHandler(c *gin.Context) {
token := c.Query("token")
beginStr := c.Query("begin")
begin, err := strconv.Atoi(beginStr)
if err != nil {
logger.Error(c, "convert begin param from string to int failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
}
c.JSON(http.StatusOK, resp)
}
endStr := c.Query("end")
end, err := strconv.Atoi(endStr)
if err != nil {
logger.Error(c, "convert end param from string to int failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
}
c.JSON(http.StatusOK, resp)
}
fmt.Println(token, begin, end)
// TODO parse token to dataRT query params
var level int
var targetLevel constants.AlertLevel
alertManger := alert.GetAlertMangerInstance()
targetLevel = constants.AlertLevel(level)
events := alertManger.GetRangeEventsByLevel(targetLevel)
resp := network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
Payload: map[string]interface{}{
"events": events,
},
}
c.JSON(http.StatusOK, resp)
}

View File

@ -0,0 +1,82 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"net/http"
"modelRT/constants"
"modelRT/database"
"modelRT/diagram"
"modelRT/logger"
"modelRT/network"
"github.com/gin-gonic/gin"
)
// MeasurementGetHandler define measurement query API
func MeasurementGetHandler(c *gin.Context) {
var request network.MeasurementGetRequest
clientToken := c.GetString("client_token")
if clientToken == "" {
err := constants.ErrGetClientToken
logger.Error(c, "failed to get client token from context", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error(c, "failed to unmarshal measurement get request", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
zset := diagram.NewRedisZSet(c, request.MeasurementToken, 0, false)
points, err := zset.ZRANGE(request.MeasurementToken, 0, -1)
if err != nil {
logger.Error(c, "failed to get measurement data from redis", "measurement_token", request.MeasurementToken, "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusInternalServerError,
Msg: err.Error(),
Payload: map[string]any{
"measurement_id": request.MeasurementID,
"measurement_token": request.MeasurementToken,
},
})
return
}
pgClient := database.GetPostgresDBClient()
measurementInfo, err := database.QueryMeasurementByID(c, pgClient, request.MeasurementID)
if err != nil {
logger.Error(c, "failed to query measurement by id", "measurement_id", request.MeasurementID, "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
Payload: map[string]any{
"measurement_id": request.MeasurementID,
"measurement_token": request.MeasurementToken,
"measurement_value": points,
},
})
return
}
c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
Payload: map[string]any{
"measurement_id": request.MeasurementID,
"measurement_token": request.MeasurementToken,
"measurement_info": measurementInfo,
"measurement_value": points,
},
})
}

View File

@ -0,0 +1,121 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"modelRT/logger"
"modelRT/model"
"modelRT/network"
"modelRT/util"
"net/http"
"github.com/gin-gonic/gin"
)
// MeasurementRecommendHandler define measurement recommend API
// @Summary 测量点推荐(搜索框自动补全)
// @Description 根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。
// @Tags Measurement Recommend
// @Accept json
// @Produce json
// @Param input query string true "推荐关键词,例如 'grid1' 或 'grid1.'" Example("grid1")
// @Success 200 {object} network.SuccessResponse{payload=network.MeasurementRecommendPayload} "返回推荐列表成功"
//
// @Example 200 {
// "code": 200,
// "msg": "success",
// "payload": {
// "input": "grid1.zone1.station1.ns1.tag1.bay.",
// "offset": 21,
// "recommended_list": [
// "I11_A_rms",
// "I11_B_rms.",
// "I11_C_rms.",
// ]
// }
// }
//
// @Failure 400 {object} network.FailureResponse "返回推荐列表失败"
//
// @Example 400 {
// "code": 400,
// "msg": "failed to get recommend data from redis",
// }
//
// @Router /measurement/recommend [get]
func MeasurementRecommendHandler(c *gin.Context) {
var request network.MeasurementRecommendRequest
if err := c.ShouldBindQuery(&request); err != nil {
logger.Error(c, "failed to bind measurement recommend request", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
recommendResults := model.RedisSearchRecommend(c, request.Input)
payloads := make([]network.MeasurementRecommendPayload, 0, len(recommendResults))
for _, recommendResult := range recommendResults {
if recommendResult.Err != nil {
err := recommendResult.Err
logger.Error(c, "failed to get recommend data from redis", "input", request.Input, "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusInternalServerError,
Msg: err.Error(),
Payload: map[string]any{
"input": request.Input,
},
})
return
}
var finalOffset int
recommends := recommendResult.QueryDatas
if recommendResult.IsFuzzy {
var maxOffset int
for index, recommend := range recommends {
offset := util.GetLongestCommonPrefixLength(request.Input, recommend)
if index == 0 || offset > maxOffset {
maxOffset = offset
}
}
finalOffset = maxOffset
} else {
var minOffset int
for index, recommend := range recommends {
offset := util.GetLongestCommonPrefixLength(request.Input, recommend)
if index == 0 || offset < minOffset {
minOffset = offset
}
}
finalOffset = minOffset
}
resultRecommends := make([]string, 0, len(recommends))
seen := make(map[string]struct{})
for _, recommend := range recommends {
recommendTerm := recommend[finalOffset:]
if len(recommendTerm) != 0 {
if _, exists := seen[recommendTerm]; !exists {
seen[recommendTerm] = struct{}{}
resultRecommends = append(resultRecommends, recommendTerm)
}
}
}
payloads = append(payloads, network.MeasurementRecommendPayload{
Input: request.Input,
Offset: finalOffset,
RecommendType: recommendResult.RecommendType.String(),
RecommendedList: resultRecommends,
})
}
c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
Payload: &payloads,
})
}

135
handler/mesurement_link.go Normal file
View File

@ -0,0 +1,135 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"errors"
"fmt"
"net/http"
"modelRT/constants"
"modelRT/database"
"modelRT/diagram"
"modelRT/logger"
"modelRT/network"
"github.com/gin-gonic/gin"
)
// MeasurementLinkHandler defines the measurement link process api
func MeasurementLinkHandler(c *gin.Context) {
var request network.MeasurementLinkRequest
clientToken := c.GetString("client_token")
if clientToken == "" {
err := constants.ErrGetClientToken
logger.Error(c, "failed to get client token from context", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error(c, "failed to unmarshal measurement process request", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: "invalid request body format: " + err.Error(),
})
return
}
var err error
pgClient := database.GetPostgresDBClient()
measurementID := request.MeasurementID
action := request.Action
measurementInfo, err := database.QueryMeasurementByID(c, pgClient, measurementID)
if err != nil {
logger.Error(c, "failed to query measurement info by measurement id from postgres", "meauserement_id", measurementID, "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: "failed to query measurement info record: " + err.Error(),
Payload: map[string]any{
"id": measurementID,
"action": action,
},
})
return
}
componentInfo, err := database.QueryComponentByUUID(c, pgClient, measurementInfo.ComponentUUID)
if err != nil {
logger.Error(c, "failed to query component info by component uuid from postgres", "component_uuid", measurementInfo.ComponentUUID, "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: "failed to query component info record: " + err.Error(),
Payload: map[string]any{
"id": measurementID,
"action": action,
},
})
return
}
allMeasSet := diagram.NewRedisSet(c, constants.RedisAllMeasTagSetKey, 0, false)
compMeasLinkKey := fmt.Sprintf(constants.RedisSpecCompTagMeasSetKey, componentInfo.Tag)
compMeasLinkSet := diagram.NewRedisSet(c, compMeasLinkKey, 0, false)
switch action {
case constants.SearchLinkAddAction:
err1 := allMeasSet.SADD(measurementInfo.Tag)
err2 := compMeasLinkSet.SADD(measurementInfo.Tag)
err = processActionError(err1, err2, action)
if err != nil {
logger.Error(c, "add measurement link process operation failed", "measurement_id", measurementID, "action", action, "error", err)
}
case constants.SearchLinkDelAction:
err1 := allMeasSet.SREM(measurementInfo.Tag)
err2 := compMeasLinkSet.SREM(measurementInfo.Tag)
err = processActionError(err1, err2, action)
if err != nil {
logger.Error(c, "del measurement link process operation failed", "measurement_id", measurementID, "action", action, "error", err)
}
default:
err = constants.ErrUnsupportedLinkAction
logger.Error(c, "unsupport measurement link process action", "measurement_id", measurementID, "action", action, "error", err)
}
if err != nil {
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
Payload: map[string]any{
"measurement_id": request.MeasurementID,
"action": request.Action,
},
})
return
}
logger.Info(c, "process measurement link success", "measurement_id", measurementID, "action", request.Action)
c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK,
Msg: "measurement link process success",
Payload: map[string]any{
"measurement_id": measurementID,
"action": request.Action,
},
})
}
func processActionError(err1, err2 error, action string) error {
var err error
if err1 != nil && err2 != nil {
err = errors.Join(err1, err2)
err = fmt.Errorf("process measurement link failed, allMeasSet %s operation and compMeasLinkSet %s operation failed: %w", action, action, err)
} else if err1 != nil {
err = fmt.Errorf("process measurement link failed: allMeasSet %s operation failed: %w", action, err1)
} else {
err = fmt.Errorf("process measurement link failed: compMeasLinkSet %s operation: %w", action, err2)
}
return err
}

View File

@ -0,0 +1,508 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"context"
"fmt"
"maps"
"net/http"
"slices"
"sort"
"strconv"
"time"
"modelRT/constants"
"modelRT/diagram"
"modelRT/logger"
"modelRT/model"
"modelRT/network"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
)
var pullUpgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(_ *http.Request) bool {
return true
},
}
// PullRealTimeDataHandler define real time data pull API
// @Summary 实时数据拉取 websocket api
// @Description 根据用户输入的clientID拉取对应的实时数据
// @Tags RealTime Component Websocket
// @Router /monitors/data/realtime/stream/:clientID [get]
func PullRealTimeDataHandler(c *gin.Context) {
clientID := c.Param("clientID")
if clientID == "" {
err := fmt.Errorf("clientID is missing from the path")
logger.Error(c, "query clientID from path failed", "error", err, "url", c.Request.RequestURI)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
conn, err := pullUpgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
logger.Error(c, "upgrade http protocol to websocket protocal failed", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
defer conn.Close()
ctx, cancel := context.WithCancel(c.Request.Context())
defer cancel()
// TODO[BACKPRESSURE-ISSUE] 先期使用固定大容量对扇入模型进行定义 #1
fanInChan := make(chan network.RealTimePullTarget, constants.FanInChanMaxSize)
sendChan := make(chan []network.RealTimePullTarget, constants.SendChanBufferSize)
go processTargetPolling(ctx, globalSubState, clientID, fanInChan, sendChan)
go readClientMessages(ctx, conn, clientID, cancel)
go sendDataStream(ctx, conn, clientID, sendChan, cancel)
defer close(sendChan)
bufferMaxSize := constants.SendMaxBatchSize
sendMaxInterval := constants.SendMaxBatchInterval
buffer := make([]network.RealTimePullTarget, 0, bufferMaxSize)
ticker := time.NewTicker(sendMaxInterval)
defer ticker.Stop()
for {
select {
case targetData, ok := <-fanInChan:
if !ok {
logger.Error(ctx, "fanInChan closed unexpectedly", "client_id", clientID)
return
}
buffer = append(buffer, targetData)
if len(buffer) >= bufferMaxSize {
// buffer is full, send immediately
select {
case sendChan <- buffer:
default:
logger.Warn(ctx, "sendChan is full, dropping aggregated data batch (buffer is full)", "client_id", clientID)
}
// reset buffer
buffer = make([]network.RealTimePullTarget, 0, bufferMaxSize)
// reset the ticker to prevent it from triggering immediately after the ticker is sent
ticker.Reset(sendMaxInterval)
}
case <-ticker.C:
if len(buffer) > 0 {
// when the ticker is triggered, all data in the send buffer is sent
select {
case sendChan <- buffer:
default:
logger.Warn(ctx, "sendChan is full, dropping aggregated data batch (ticker is triggered)", "client_id", clientID)
}
// reset buffer
buffer = make([]network.RealTimePullTarget, 0, bufferMaxSize)
}
case <-ctx.Done():
// send the last remaining data
if len(buffer) > 0 {
select {
case sendChan <- buffer:
default:
logger.Warn(ctx, "sendChan is full, cannot send last remaining data during shutdown.", "client_id", clientID)
}
}
logger.Info(ctx, "pullRealTimeDataHandler exiting as context is done.", "client_id", clientID)
return
}
}
}
// readClientMessages 负责持续监听客户端发送的消息(例如 Ping/Pong, Close Frame, 或控制命令)
func readClientMessages(ctx context.Context, conn *websocket.Conn, clientID string, cancel context.CancelFunc) {
// conn.SetReadLimit(512)
for {
msgType, msgBytes, err := conn.ReadMessage()
if err != nil {
if websocket.IsCloseError(err, websocket.CloseNormalClosure) {
logger.Info(ctx, "client actively and normally closed the connection", "client_id", clientID)
} else if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
logger.Error(ctx, "an unexpected error occurred while reading the webSocket connection", "client_id", clientID, "error", err)
} else {
// handle other read errors (eg, I/O errors)
logger.Error(ctx, "an error occurred while reading the webSocket connection", "client_id", clientID, "error", err)
}
cancel()
break
}
// process normal message from client
if msgType == websocket.TextMessage || msgType == websocket.BinaryMessage {
logger.Info(ctx, "read normal message from client", "client_id", clientID, "content", string(msgBytes))
}
}
}
// sendAggregateRealTimeDataStream define func to responsible for continuously pushing aggregate real-time data to the client
func sendAggregateRealTimeDataStream(conn *websocket.Conn, targetsData []network.RealTimePullTarget) error {
if len(targetsData) == 0 {
return nil
}
response := network.SuccessResponse{
Code: 200,
Msg: "success",
Payload: network.RealTimePullPayload{
Targets: targetsData,
},
}
return conn.WriteJSON(response)
}
// sendDataStream define func to manages a dedicated goroutine to push data batches or system signals to the websocket client
func sendDataStream(ctx context.Context, conn *websocket.Conn, clientID string, sendChan <-chan []network.RealTimePullTarget, cancel context.CancelFunc) {
logger.Info(ctx, "start dedicated websocket sender goroutine", "client_id", clientID)
for targetsData := range sendChan {
// TODO 使用 constants.SysCtrlPrefix + switch-case 形式应对可能的业务扩展
if len(targetsData) == 1 && targetsData[0].ID == constants.SysCtrlAllRemoved {
err := conn.WriteJSON(map[string]any{
"code": 2101,
"msg": "all targets removed in given client_id",
"payload": map[string]int{
"active_targets_count": 0,
},
})
if err != nil {
logger.Error(ctx, "send all targets removed system signal failed", "client_id", clientID, "error", err)
cancel()
}
continue
}
if err := sendAggregateRealTimeDataStream(conn, targetsData); err != nil {
logger.Error(ctx, "send the real time aggregate data failed in sender goroutine", "client_id", clientID, "error", err)
cancel()
return
}
}
logger.Info(ctx, "sender goroutine exiting as channel is closed", "client_id", clientID)
}
// processTargetPolling define func to process target in subscription map and data is continuously retrieved from redis based on the target
func processTargetPolling(ctx context.Context, s *SharedSubState, clientID string, fanInChan chan network.RealTimePullTarget, sendChan chan<- []network.RealTimePullTarget) {
// ensure the fanInChan will not leak
defer close(fanInChan)
logger.Info(ctx, fmt.Sprintf("start processing real time data polling for clientID:%s", clientID))
stopChanMap := make(map[string]chan struct{})
s.globalMutex.RLock()
config, confExist := s.subMap[clientID]
if !confExist {
logger.Error(ctx, "can not found config into local stored map by clientID", "clientID", clientID)
s.globalMutex.RUnlock()
return
}
s.globalMutex.RUnlock()
config.mutex.RLock()
for interval, measurementTargets := range config.measurements {
for _, target := range measurementTargets {
// add a secondary check to prevent the target from already existing in the stopChanMap
if _, exists := stopChanMap[target]; exists {
logger.Warn(ctx, "target already exists in polling map, skipping start-up", "target", target)
continue
}
targetContext, exist := config.targetContext[target]
if !exist {
logger.Error(ctx, "can not found subscription node param into param map", "target", target)
continue
}
measurementInfo := targetContext.measurement
queryGStopChan := make(chan struct{})
// store stop channel with target into map
stopChanMap[target] = queryGStopChan
queryKey, err := model.GenerateMeasureIdentifier(measurementInfo.DataSource)
if err != nil {
logger.Error(ctx, "generate measurement indentifier by data_source field failed", "data_source", measurementInfo.DataSource, "error", err)
continue
}
pollingConfig := redisPollingConfig{
targetID: target,
queryKey: queryKey,
interval: interval,
dataSize: int64(measurementInfo.Size),
}
go realTimeDataQueryFromRedis(ctx, pollingConfig, fanInChan, queryGStopChan)
}
}
config.mutex.RUnlock()
for {
select {
case transportTargets, ok := <-config.noticeChan:
if !ok {
logger.Error(ctx, "notice channel was closed unexpectedly", "clientID", clientID)
stopAllPolling(ctx, stopChanMap)
return
}
config.mutex.Lock()
switch transportTargets.OperationType {
case constants.OpAppend:
appendTargets(ctx, config, stopChanMap, fanInChan, transportTargets.Targets)
case constants.OpRemove:
removeTargets(ctx, stopChanMap, transportTargets.Targets, sendChan)
case constants.OpUpdate:
updateTargets(ctx, config, stopChanMap, fanInChan, transportTargets.Targets)
}
config.mutex.Unlock()
case <-ctx.Done():
logger.Info(ctx, fmt.Sprintf("stop all data retrieval goroutines under this clientID:%s", clientID))
stopAllPolling(ctx, stopChanMap)
return
}
}
}
// appendTargets starts new polling goroutines for targets that were just added
func appendTargets(ctx context.Context, config *RealTimeSubConfig, stopChanMap map[string]chan struct{}, fanInChan chan network.RealTimePullTarget, appendTargets []string) {
appendTargetsSet := make(map[string]struct{}, len(appendTargets))
for _, target := range appendTargets {
appendTargetsSet[target] = struct{}{}
}
for _, target := range appendTargets {
targetContext, exists := config.targetContext[target]
if !exists {
logger.Error(ctx, "the append target does not exist in the real time data config context map,skipping the startup step", "target", target)
continue
}
if _, exists := stopChanMap[target]; exists {
logger.Error(ctx, "the append target already has a stop channel, skipping the startup step", "target", target)
continue
}
queryGStopChan := make(chan struct{})
stopChanMap[target] = queryGStopChan
interval := targetContext.interval
_, exists = config.measurements[interval]
if !exists {
logger.Error(ctx, "targetContext exist but measurements is missing, cannot update config", "target", target, "interval", interval)
continue
}
delete(appendTargetsSet, target)
queryKey, err := model.GenerateMeasureIdentifier(targetContext.measurement.DataSource)
if err != nil {
logger.Error(ctx, "the append target generate redis query key identifier failed", "target", target, "error", err)
continue
}
pollingConfig := redisPollingConfig{
targetID: target,
queryKey: queryKey,
interval: targetContext.interval,
dataSize: int64(targetContext.measurement.Size),
}
go realTimeDataQueryFromRedis(ctx, pollingConfig, fanInChan, queryGStopChan)
logger.Info(ctx, "started new polling goroutine for appended target", "target", target, "interval", targetContext.interval)
}
// allKeys := util.GetKeysFromSet(appendTargetsSet)
allKeys := slices.Sorted(maps.Keys(appendTargetsSet))
if len(allKeys) > 0 {
logger.Warn(ctx, fmt.Sprintf("the following targets:%v start up fetch real time data process goroutine not started", allKeys))
clear(appendTargetsSet)
}
}
// updateTargets starts new polling goroutines for targets that were just updated
func updateTargets(ctx context.Context, config *RealTimeSubConfig, stopChanMap map[string]chan struct{}, fanInChan chan network.RealTimePullTarget, updateTargets []string) {
updateTargetsSet := make(map[string]struct{}, len(updateTargets))
for _, target := range updateTargets {
updateTargetsSet[target] = struct{}{}
}
for _, target := range updateTargets {
targetContext, exists := config.targetContext[target]
if !exists {
logger.Error(ctx, "the update target does not exist in the real time data config context map,skipping the startup step", "target", target)
continue
}
if _, exist := stopChanMap[target]; !exist {
logger.Error(ctx, "the update target does not has a stop channel, skipping the startup step", "target", target)
continue
}
oldQueryGStopChan := stopChanMap[target]
logger.Info(ctx, "stopped old polling goroutine for updated target", "target", target)
close(oldQueryGStopChan)
newQueryGStopChan := make(chan struct{})
stopChanMap[target] = newQueryGStopChan
interval := targetContext.interval
_, exists = config.measurements[interval]
if !exists {
logger.Error(ctx, "targetContext exist but measurements is missing, cannot update config", "target", target, "interval", interval)
continue
}
delete(updateTargetsSet, target)
queryKey, err := model.GenerateMeasureIdentifier(targetContext.measurement.DataSource)
if err != nil {
logger.Error(ctx, "the update target generate redis query key identifier failed", "target", target, "error", err)
continue
}
pollingConfig := redisPollingConfig{
targetID: target,
queryKey: queryKey,
interval: targetContext.interval,
dataSize: int64(targetContext.measurement.Size),
}
go realTimeDataQueryFromRedis(ctx, pollingConfig, fanInChan, newQueryGStopChan)
logger.Info(ctx, "started new polling goroutine for update target", "target", target, "interval", targetContext.interval)
}
// allKeys := util.GetKeysFromSet(updateTargetsSet)
allKeys := slices.Sorted(maps.Keys(updateTargetsSet))
if len(allKeys) > 0 {
logger.Warn(ctx, fmt.Sprintf("the following targets:%v start up fetch real time data process goroutine not started", allKeys))
clear(updateTargetsSet)
}
}
// removeTargets define func to stops running polling goroutines for targets that were removed
func removeTargets(ctx context.Context, stopChanMap map[string]chan struct{}, removeTargets []string, sendChan chan<- []network.RealTimePullTarget) {
for _, target := range removeTargets {
stopChan, exists := stopChanMap[target]
if !exists {
logger.Warn(ctx, "removeTarget was not running, skipping remove operation", "target", target)
continue
}
close(stopChan)
delete(stopChanMap, target)
logger.Info(ctx, "stopped polling goroutine for removed target", "target", target)
}
if len(stopChanMap) == 0 {
logger.Info(ctx, "all polling goroutines have been stopped for this client")
sendSpecialStatusToClient(ctx, sendChan)
}
}
func sendSpecialStatusToClient(ctx context.Context, sendChan chan<- []network.RealTimePullTarget) {
specialTarget := network.RealTimePullTarget{
ID: constants.SysCtrlAllRemoved,
Datas: []network.RealTimePullData{},
}
select {
case sendChan <- []network.RealTimePullTarget{specialTarget}:
logger.Info(ctx, "sent 2101 status request to sendChan")
default:
logger.Warn(ctx, "sendChan is full, skipping 2101 status message")
}
}
// stopAllPolling stops all running query goroutines for a specific client
func stopAllPolling(ctx context.Context, stopChanMap map[string]chan struct{}) {
for target, stopChan := range stopChanMap {
logger.Info(ctx, fmt.Sprintf("stop the data fetching behavior for the corresponding target:%s", target))
close(stopChan)
}
clear(stopChanMap)
return
}
// redisPollingConfig define struct for param which query real time data from redis
type redisPollingConfig struct {
targetID string
queryKey string
interval string
dataSize int64
}
func realTimeDataQueryFromRedis(ctx context.Context, config redisPollingConfig, fanInChan chan network.RealTimePullTarget, stopChan chan struct{}) {
logger.Info(ctx, "start a redis query goroutine for real time data pulling", "targetID", config.targetID, "queryKey", config.queryKey, "interval", config.interval, "dataSize", config.dataSize)
duration, err := time.ParseDuration(config.interval)
if err != nil {
logger.Error(ctx, "failed to parse the time string", "interval", config.interval, "error", err)
return
}
ticker := time.NewTicker(duration)
defer ticker.Stop()
client := diagram.NewRedisClient()
needPerformQuery := true
for {
if needPerformQuery {
performQuery(ctx, client, config, fanInChan)
needPerformQuery = false
}
select {
case <-ticker.C:
needPerformQuery = true
case <-stopChan:
logger.Info(ctx, "stop the redis query goroutine via a singal")
return
}
}
}
func performQuery(ctx context.Context, client *diagram.RedisClient, config redisPollingConfig, fanInChan chan network.RealTimePullTarget) {
members, err := client.QueryByZRangeByLex(ctx, config.queryKey, config.dataSize)
if err != nil {
logger.Error(ctx, "query real time data from redis failed", "key", config.queryKey, "error", err)
return
}
pullDatas := make([]network.RealTimePullData, 0, len(members))
for _, member := range members {
pullDatas = append(pullDatas, network.RealTimePullData{
Time: member.Member.(string),
Value: member.Score,
})
}
sortPullDataByTimeAscending(ctx, pullDatas)
targetData := network.RealTimePullTarget{
ID: config.targetID,
Datas: pullDatas,
}
select {
case fanInChan <- targetData:
default:
// TODO[BACKPRESSURE-ISSUE] 考虑 fanInChan 阻塞,当出现大量数据阻塞查询循环并丢弃时,采取背压方式解决问题 #1
logger.Warn(ctx, "fanInChan is full, dropping real-time data frame", "key", config.queryKey, "data_size", len(members))
}
}
func sortPullDataByTimeAscending(ctx context.Context, data []network.RealTimePullData) {
sort.Slice(data, func(i, j int) bool {
t1, err1 := strconv.ParseInt(data[i].Time, 10, 64)
if err1 != nil {
logger.Error(ctx, "parsing real time data timestamp failed", "index", i, "time", data[i].Time, "error", err1)
return false
}
t2, err2 := strconv.ParseInt(data[j].Time, 10, 64)
if err2 != nil {
logger.Error(ctx, "parsing real time data timestamp failed", "index", j, "time", data[j].Time, "error", err2)
return true
}
return t1 < t2
})
}

Some files were not shown because too many files have changed in this diff Show More