Compare commits
No commits in common. "develop" and "feature-loadCircuitDiagram" have entirely different histories.
develop
...
feature-lo
12
.drone.yml
12
.drone.yml
|
|
@ -1,12 +0,0 @@
|
|||
kind: pipeline
|
||||
type: docker
|
||||
name: default
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: golang:latest
|
||||
environment:
|
||||
GO111MODULE: on
|
||||
GOPROXY: https://goproxy.cn,direct
|
||||
commands:
|
||||
- go build main.go
|
||||
|
|
@ -21,9 +21,3 @@
|
|||
# Go workspace file
|
||||
go.work
|
||||
|
||||
.vscode
|
||||
.idea
|
||||
# Shield all log files in the log folder
|
||||
/log/
|
||||
# Shield config files in the configs folder
|
||||
/configs/**/*.yaml
|
||||
|
|
|
|||
|
|
@ -1,3 +1,2 @@
|
|||
# ModelRT
|
||||
|
||||
[](http://192.168.46.100:4080/CL-Softwares/modelRT)
|
||||
|
|
|
|||
100
alert/init.go
100
alert/init.go
|
|
@ -1,100 +0,0 @@
|
|||
// Package alert define alert event struct of modelRT project
|
||||
package alert
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"modelRT/constants"
|
||||
)
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
_globalManagerMu sync.RWMutex
|
||||
_globalManager *EventManager
|
||||
)
|
||||
|
||||
// Event define alert event struct
|
||||
type Event struct {
|
||||
ComponentUUID string
|
||||
AnchorName string
|
||||
Level constants.AlertLevel
|
||||
Message string
|
||||
StartTime int64
|
||||
}
|
||||
|
||||
// EventManager define store and manager alert event struct
|
||||
type EventManager struct {
|
||||
mu sync.RWMutex
|
||||
events map[constants.AlertLevel][]Event
|
||||
}
|
||||
|
||||
// EventSet define alert event set implement sort.Interface
|
||||
type EventSet []Event
|
||||
|
||||
func (es EventSet) Len() int {
|
||||
return len(es)
|
||||
}
|
||||
|
||||
func (es EventSet) Less(i, j int) bool {
|
||||
return es[i].StartTime < es[j].StartTime
|
||||
}
|
||||
|
||||
func (es EventSet) Swap(i, j int) {
|
||||
es[i], es[j] = es[j], es[i]
|
||||
}
|
||||
|
||||
// AddEvent define add a alert event to event manager
|
||||
func (am *EventManager) AddEvent(event Event) {
|
||||
am.mu.Lock()
|
||||
defer am.mu.Unlock()
|
||||
|
||||
am.events[event.Level] = append(am.events[event.Level], event)
|
||||
}
|
||||
|
||||
// GetEventsByLevel define get alert event by alert level
|
||||
func (am *EventManager) GetEventsByLevel(level constants.AlertLevel) []Event {
|
||||
am.mu.Lock()
|
||||
defer am.mu.Unlock()
|
||||
|
||||
return am.events[level]
|
||||
}
|
||||
|
||||
// GetRangeEventsByLevel define get range alert event by alert level
|
||||
func (am *EventManager) GetRangeEventsByLevel(targetLevel constants.AlertLevel) []Event {
|
||||
var targetEvents []Event
|
||||
|
||||
am.mu.Lock()
|
||||
defer am.mu.Unlock()
|
||||
|
||||
for level, events := range am.events {
|
||||
if targetLevel <= level {
|
||||
targetEvents = append(targetEvents, events...)
|
||||
}
|
||||
}
|
||||
sort.Sort(EventSet(targetEvents))
|
||||
return targetEvents
|
||||
}
|
||||
|
||||
// InitAlertEventManager define new alert event manager
|
||||
func InitAlertEventManager() *EventManager {
|
||||
return &EventManager{
|
||||
events: make(map[constants.AlertLevel][]Event),
|
||||
}
|
||||
}
|
||||
|
||||
// InitAlertManagerInstance return instance of zap logger
|
||||
func InitAlertManagerInstance() *EventManager {
|
||||
once.Do(func() {
|
||||
_globalManager = InitAlertEventManager()
|
||||
})
|
||||
return _globalManager
|
||||
}
|
||||
|
||||
// GetAlertMangerInstance returns the global alert manager instance It's safe for concurrent use.
|
||||
func GetAlertMangerInstance() *EventManager {
|
||||
_globalManagerMu.RLock()
|
||||
manager := _globalManager
|
||||
_globalManagerMu.RUnlock()
|
||||
return manager
|
||||
}
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
// Package errcode provides internal error definition and business error definition
|
||||
package errcode
|
||||
|
||||
var (
|
||||
// ErrProcessSuccess define variable to indicates request process success
|
||||
ErrProcessSuccess = newError(20000, "request process success")
|
||||
|
||||
// ErrInvalidToken define variable to provided token does not conform to the expected format (e.g., missing segments)
|
||||
ErrInvalidToken = newError(40001, "invalid token format")
|
||||
|
||||
// ErrCrossToken define variable to occurs when an update attempt involves multiple components, which is restricted by business logic
|
||||
ErrCrossToken = newError(40002, "cross-component update not allowed")
|
||||
|
||||
// ErrRetrieveFailed define variable to indicates a failure in fetching the project-to-table name mapping from the configuration.
|
||||
ErrRetrieveFailed = newError(40003, "retrieve table mapping failed")
|
||||
|
||||
// ErrFoundTargetFailed define variable to returned when the specific database table cannot be identified using the provided token info.
|
||||
ErrFoundTargetFailed = newError(40004, "found target table by token failed")
|
||||
|
||||
// ErrDBQueryFailed define variable to represents a generic failure during a PostgreSQL SELECT or SCAN operation.
|
||||
ErrDBQueryFailed = newError(50001, "query postgres database data failed")
|
||||
|
||||
// ErrDBUpdateFailed define variable to represents a failure during a PostgreSQL UPDATE or SAVE operation.
|
||||
ErrDBUpdateFailed = newError(50002, "update postgres database data failed")
|
||||
|
||||
// ErrDBzeroAffectedRows define variable to occurs when a database operation executes successfully but modifies no records.
|
||||
ErrDBzeroAffectedRows = newError(50003, "zero affected rows")
|
||||
|
||||
// ErrBeginTxFailed indicates that the system failed to start a new PostgreSQL transaction.
|
||||
ErrBeginTxFailed = newError(50004, "begin postgres transaction failed")
|
||||
|
||||
// ErrCommitTxFailed indicates that the PostgreSQL transaction could not be committed successfully.
|
||||
ErrCommitTxFailed = newError(50005, "postgres database transaction commit failed")
|
||||
|
||||
// ErrCachedQueryFailed define variable to indicates an error occurred while attempting to fetch data from the Redis cache.
|
||||
ErrCachedQueryFailed = newError(60001, "query redis cached data failed")
|
||||
|
||||
// ErrCacheSyncWarn define variable to partial success state: the database was updated, but the subsequent Redis cache refresh failed.
|
||||
ErrCacheSyncWarn = newError(60002, "postgres database updated, but cache sync failed")
|
||||
|
||||
// ErrCacheQueryFailed define variable to indicates query cached data by token failed.
|
||||
ErrCacheQueryFailed = newError(60003, "query cached data by token failed")
|
||||
)
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
// Package errcode provides internal error definition and business error definition
|
||||
package errcode
|
||||
|
||||
import "errors"
|
||||
|
||||
// Database layer error
|
||||
var (
|
||||
// ErrUUIDChangeType define error of check uuid from value failed in uuid from change type
|
||||
ErrUUIDChangeType = errors.New("undefined uuid change type")
|
||||
|
||||
// ErrUpdateRowZero define error of update affected row zero
|
||||
ErrUpdateRowZero = errors.New("update affected rows is zero")
|
||||
|
||||
// ErrDeleteRowZero define error of delete affected row zero
|
||||
ErrDeleteRowZero = errors.New("delete affected rows is zero")
|
||||
|
||||
// ErrQueryRowZero define error of query affected row zero
|
||||
ErrQueryRowZero = errors.New("query affected rows is zero")
|
||||
|
||||
// ErrInsertRowUnexpected define error of insert affected row not reach expected number
|
||||
ErrInsertRowUnexpected = errors.New("the number of inserted data rows don't reach the expected value")
|
||||
)
|
||||
|
|
@ -1,162 +0,0 @@
|
|||
// Package errcode provides internal error definition and business error definition
|
||||
package errcode
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
var codes = map[int]struct{}{}
|
||||
|
||||
// AppError define struct of internal error. occurred field records the location where the error is triggered
|
||||
type AppError struct {
|
||||
code int
|
||||
msg string
|
||||
cause error
|
||||
occurred string
|
||||
}
|
||||
|
||||
func (e *AppError) Error() string {
|
||||
if e == nil {
|
||||
return ""
|
||||
}
|
||||
errBytes, err := json.Marshal(e.toStructuredError())
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error() is error: json marshal error: %v", err)
|
||||
}
|
||||
return string(errBytes)
|
||||
}
|
||||
|
||||
func (e *AppError) String() string {
|
||||
return e.Error()
|
||||
}
|
||||
|
||||
// Code define func return error code
|
||||
func (e *AppError) Code() int {
|
||||
return e.code
|
||||
}
|
||||
|
||||
// Msg define func return error msg
|
||||
func (e *AppError) Msg() string {
|
||||
return e.msg
|
||||
}
|
||||
|
||||
// Cause define func return base error
|
||||
func (e *AppError) Cause() error {
|
||||
return e.cause
|
||||
}
|
||||
|
||||
// WithCause define func return top level predefined errors,where the cause field contains the underlying base error
|
||||
func (e *AppError) WithCause(err error) *AppError {
|
||||
newErr := e.Clone()
|
||||
newErr.cause = err
|
||||
newErr.occurred = getAppErrOccurredInfo()
|
||||
return newErr
|
||||
}
|
||||
|
||||
// Wrap define func packaging information and errors returned by the underlying logic
|
||||
func Wrap(msg string, err error) *AppError {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
appErr := &AppError{code: -1, msg: msg, cause: err}
|
||||
appErr.occurred = getAppErrOccurredInfo()
|
||||
return appErr
|
||||
}
|
||||
|
||||
// UnWrap define func return the error wrapped in structure
|
||||
func (e *AppError) UnWrap() error {
|
||||
return e.cause
|
||||
}
|
||||
|
||||
// Is define func return result of whether any error in err's tree matches target. implemented to support errors.Is(err, target)
|
||||
func (e *AppError) Is(target error) bool {
|
||||
targetErr, ok := target.(*AppError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return targetErr.Code() == e.Code()
|
||||
}
|
||||
|
||||
// As define func return result of whether any error in err's tree matches target. implemented to support errors.As(err, target)
|
||||
func (e *AppError) As(target any) bool {
|
||||
t, ok := target.(**AppError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
*t = e
|
||||
return true
|
||||
}
|
||||
|
||||
// Clone define func return a new AppError with source AppError's code, msg, cause, occurred
|
||||
func (e *AppError) Clone() *AppError {
|
||||
return &AppError{
|
||||
code: e.code,
|
||||
msg: e.msg,
|
||||
cause: e.cause,
|
||||
occurred: e.occurred,
|
||||
}
|
||||
}
|
||||
|
||||
func newError(code int, msg string) *AppError {
|
||||
if code > -1 {
|
||||
if _, duplicated := codes[code]; duplicated {
|
||||
panic(fmt.Sprintf("预定义错误码 %d 不能重复, 请检查后更换", code))
|
||||
}
|
||||
codes[code] = struct{}{}
|
||||
}
|
||||
|
||||
return &AppError{code: code, msg: msg}
|
||||
}
|
||||
|
||||
// getAppErrOccurredInfo define func return the location where the error is triggered
|
||||
func getAppErrOccurredInfo() string {
|
||||
pc, file, line, ok := runtime.Caller(2)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
file = path.Base(file)
|
||||
funcName := runtime.FuncForPC(pc).Name()
|
||||
triggerInfo := fmt.Sprintf("func: %s, file: %s, line: %d", funcName, file, line)
|
||||
return triggerInfo
|
||||
}
|
||||
|
||||
// AppendMsg define func append a message to the existing error message
|
||||
func (e *AppError) AppendMsg(msg string) *AppError {
|
||||
n := e.Clone()
|
||||
n.msg = fmt.Sprintf("%s, %s", e.msg, msg)
|
||||
return n
|
||||
}
|
||||
|
||||
// SetMsg define func set error message into specify field
|
||||
func (e *AppError) SetMsg(msg string) *AppError {
|
||||
n := e.Clone()
|
||||
n.msg = msg
|
||||
return n
|
||||
}
|
||||
|
||||
type formattedErr struct {
|
||||
Code int `json:"code"`
|
||||
Msg string `json:"msg"`
|
||||
Cause interface{} `json:"cause"`
|
||||
Occurred string `json:"occurred"`
|
||||
}
|
||||
|
||||
// toStructuredError define func convert AppError to structured error for better readability
|
||||
func (e *AppError) toStructuredError() *formattedErr {
|
||||
fe := new(formattedErr)
|
||||
fe.Code = e.Code()
|
||||
fe.Msg = e.Msg()
|
||||
fe.Occurred = e.occurred
|
||||
if e.cause != nil {
|
||||
if appErr, ok := e.cause.(*AppError); ok {
|
||||
fe.Cause = appErr.toStructuredError()
|
||||
} else {
|
||||
fe.Cause = e.cause.Error()
|
||||
}
|
||||
}
|
||||
return fe
|
||||
}
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
package config
|
||||
|
||||
import "context"
|
||||
|
||||
// AnchorChanConfig define anchor params channel config struct
|
||||
type AnchorChanConfig struct {
|
||||
Ctx context.Context // 结束 context
|
||||
AnchorChan chan AnchorParamConfig // 锚定参量实时值传递通道
|
||||
ReadyChan chan struct{} // 就绪通知通道
|
||||
}
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
// Package config define config struct of model runtime service
|
||||
package config
|
||||
|
||||
import (
|
||||
"modelRT/constants"
|
||||
)
|
||||
|
||||
// AnchorParamListConfig define anchor params list config struct
|
||||
type AnchorParamListConfig struct {
|
||||
AnchorName string
|
||||
FuncType string // 函数类型
|
||||
UpperLimit float64 // 比较值上限
|
||||
LowerLimit float64 // 比较值下限
|
||||
}
|
||||
|
||||
// AnchorParamBaseConfig define anchor params base config struct
|
||||
type AnchorParamBaseConfig struct {
|
||||
ComponentUUID string // componentUUID
|
||||
AnchorName string // 锚定参量名称
|
||||
CompareValUpperLimit float64 // 比较值上限
|
||||
CompareValLowerLimit float64 // 比较值下限
|
||||
AnchorRealTimeData []float64 // 锚定参数实时值
|
||||
}
|
||||
|
||||
// AnchorParamConfig define anchor params config struct
|
||||
type AnchorParamConfig struct {
|
||||
AnchorParamBaseConfig
|
||||
CalculateFunc func(archorValue float64, args ...float64) float64 // 计算函数
|
||||
CalculateParams []float64 // 计算参数
|
||||
}
|
||||
|
||||
var baseVoltageFunc = func(archorValue float64, args ...float64) float64 {
|
||||
voltage := archorValue
|
||||
resistance := args[1]
|
||||
return voltage / resistance
|
||||
}
|
||||
|
||||
var baseCurrentFunc = func(archorValue float64, args ...float64) float64 {
|
||||
current := archorValue
|
||||
resistance := args[1]
|
||||
return current * resistance
|
||||
}
|
||||
|
||||
// SelectAnchorCalculateFuncAndParams define select anchor func and anchor calculate value by component type 、 anchor name and component data
|
||||
func SelectAnchorCalculateFuncAndParams(componentType int, anchorName string, componentData map[string]interface{}) (func(archorValue float64, args ...float64) float64, []float64) {
|
||||
if componentType == constants.DemoType {
|
||||
if anchorName == "voltage" {
|
||||
resistance := componentData["resistance"].(float64)
|
||||
return baseVoltageFunc, []float64{resistance}
|
||||
} else if anchorName == "current" {
|
||||
resistance := componentData["resistance"].(float64)
|
||||
return baseCurrentFunc, []float64{resistance}
|
||||
}
|
||||
}
|
||||
return nil, []float64{}
|
||||
}
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
// Package config define config struct of model runtime service
|
||||
// Package config define config struct of wave record project
|
||||
package config
|
||||
|
||||
import (
|
||||
|
|
@ -7,31 +7,23 @@ import (
|
|||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// BaseConfig define config struct of base params config
|
||||
// BaseConfig define config stuct of base params config
|
||||
type BaseConfig struct {
|
||||
GridID int64 `mapstructure:"grid_id"`
|
||||
ZoneID int64 `mapstructure:"zone_id"`
|
||||
StationID int64 `mapstructure:"station_id"`
|
||||
}
|
||||
|
||||
// ServiceConfig define config struct of service config
|
||||
type ServiceConfig struct {
|
||||
ServiceAddr string `mapstructure:"service_addr"`
|
||||
ServiceName string `mapstructure:"service_name"`
|
||||
SecretKey string `mapstructure:"secret_key"`
|
||||
}
|
||||
|
||||
// KafkaConfig define config struct of kafka config
|
||||
// KafkaConfig define config stuct of kafka config
|
||||
type KafkaConfig struct {
|
||||
Servers string `mapstructure:"Servers"`
|
||||
GroupID string `mapstructure:"group_id"`
|
||||
Topic string `mapstructure:"topic"`
|
||||
AutoOffsetReset string `mapstructure:"auto_offset_reset"`
|
||||
EnableAutoCommit string `mapstructure:"enable_auto_commit"`
|
||||
ReadMessageTimeDuration float32 `mapstructure:"read_message_time_duration"`
|
||||
Servers string `mapstructure:"Servers"`
|
||||
GroupID string `mapstructure:"group_id"`
|
||||
Topic string `mapstructure:"topic"`
|
||||
AutoOffsetReset string `mapstructure:"auto_offset_reset"`
|
||||
EnableAutoCommit string `mapstructure:"enable_auto_commit"`
|
||||
}
|
||||
|
||||
// PostgresConfig define config struct of postgres config
|
||||
// PostgresConfig define config stuct of postgres config
|
||||
type PostgresConfig struct {
|
||||
Port int `mapstructure:"port"`
|
||||
Host string `mapstructure:"host"`
|
||||
|
|
@ -40,7 +32,7 @@ type PostgresConfig struct {
|
|||
Password string `mapstructure:"password"`
|
||||
}
|
||||
|
||||
// LoggerConfig define config struct of zap logger config
|
||||
// LoggerConfig define config stuct of zap logger config
|
||||
type LoggerConfig struct {
|
||||
Mode string `mapstructure:"mode"`
|
||||
Level string `mapstructure:"level"`
|
||||
|
|
@ -48,47 +40,24 @@ type LoggerConfig struct {
|
|||
MaxSize int `mapstructure:"maxsize"`
|
||||
MaxBackups int `mapstructure:"maxbackups"`
|
||||
MaxAge int `mapstructure:"maxage"`
|
||||
Compress bool `mapstructure:"compress"`
|
||||
}
|
||||
|
||||
// RedisConfig define config struct of redis config
|
||||
type RedisConfig struct {
|
||||
Addr string `mapstructure:"addr"`
|
||||
Password string `mapstructure:"password"`
|
||||
DB int `mapstructure:"db"`
|
||||
PoolSize int `mapstructure:"poolsize"`
|
||||
Timeout int `mapstructure:"timeout"`
|
||||
}
|
||||
|
||||
// AntsConfig define config struct of ants pool config
|
||||
// AntsConfig define config stuct of ants pool config
|
||||
type AntsConfig struct {
|
||||
ParseConcurrentQuantity int `mapstructure:"parse_concurrent_quantity"` // parse comtrade file concurrent quantity
|
||||
RTDReceiveConcurrentQuantity int `mapstructure:"rtd_receive_concurrent_quantity"` // polling real time data concurrent quantity
|
||||
ParseConcurrentQuantity int `mapstructure:"parse_concurrent_quantity"` // parse comtrade file concurrent quantity
|
||||
}
|
||||
|
||||
// DataRTConfig define config struct of data runtime server api config
|
||||
type DataRTConfig struct {
|
||||
Host string `mapstructure:"host"`
|
||||
Port int64 `mapstructure:"port"`
|
||||
PollingAPI string `mapstructure:"polling_api"`
|
||||
Method string `mapstructure:"polling_api_method"`
|
||||
}
|
||||
|
||||
// ModelRTConfig define config struct of model runtime server
|
||||
// ModelRTConfig define config stuct of model runtime server
|
||||
type ModelRTConfig struct {
|
||||
BaseConfig `mapstructure:"base"`
|
||||
ServiceConfig `mapstructure:"service"`
|
||||
PostgresConfig `mapstructure:"postgres"`
|
||||
KafkaConfig `mapstructure:"kafka"`
|
||||
LoggerConfig `mapstructure:"logger"`
|
||||
AntsConfig `mapstructure:"ants"`
|
||||
DataRTConfig `mapstructure:"dataRT"`
|
||||
LockerRedisConfig RedisConfig `mapstructure:"locker_redis"`
|
||||
StorageRedisConfig RedisConfig `mapstructure:"storage_redis"`
|
||||
PostgresDBURI string `mapstructure:"-"`
|
||||
BaseConfig `mapstructure:"base"`
|
||||
PostgresConfig `mapstructure:"postgres"`
|
||||
KafkaConfig `mapstructure:"kafka"`
|
||||
LoggerConfig `mapstructure:"logger"`
|
||||
AntsConfig `mapstructure:"ants"`
|
||||
PostgresDBURI string `mapstructure:"-"`
|
||||
}
|
||||
|
||||
// ReadAndInitConfig return modelRT project config struct
|
||||
// ReadAndInitConfig return wave record project config struct
|
||||
func ReadAndInitConfig(configDir, configName, configType string) (modelRTConfig ModelRTConfig) {
|
||||
config := viper.New()
|
||||
config.AddConfigPath(configDir)
|
||||
|
|
@ -101,11 +70,12 @@ func ReadAndInitConfig(configDir, configName, configType string) (modelRTConfig
|
|||
panic(err)
|
||||
}
|
||||
|
||||
if err := config.Unmarshal(&modelRTConfig); err != nil {
|
||||
rtConfig := ModelRTConfig{}
|
||||
if err := config.Unmarshal(&rtConfig); err != nil {
|
||||
panic(fmt.Sprintf("unmarshal modelRT config failed:%s\n", err.Error()))
|
||||
}
|
||||
|
||||
// init postgres db uri
|
||||
modelRTConfig.PostgresDBURI = fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", modelRTConfig.PostgresConfig.Host, modelRTConfig.PostgresConfig.Port, modelRTConfig.PostgresConfig.User, modelRTConfig.PostgresConfig.Password, modelRTConfig.PostgresConfig.DataBase)
|
||||
modelRTConfig.PostgresDBURI = fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", rtConfig.Host, rtConfig.Port, rtConfig.User, rtConfig.Password, rtConfig.DataBase)
|
||||
|
||||
return modelRTConfig
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,40 @@
|
|||
postgres:
|
||||
host: "localhost"
|
||||
port: 5432
|
||||
database: "model_rt"
|
||||
user: "postgres"
|
||||
password: "coslight"
|
||||
|
||||
kafka:
|
||||
servers: "localhost:9092"
|
||||
port: 9092
|
||||
group_id: "modelRT"
|
||||
topic: ""
|
||||
auto_offset_reset: "earliest"
|
||||
enable_auto_commit: "false"
|
||||
|
||||
# influxdb:
|
||||
# host: "localhost"
|
||||
# port: "8086"
|
||||
# token: "lCuiQ316qlly3iFeoi1EUokPJ0XxW-5lnG-3rXsKaaZSjfuxO5EaZfFdrNGM7Zlrdk1PrN_7TOsM_SCu9Onyew=="
|
||||
# org: "coslight"
|
||||
# bucket: "wave_record"
|
||||
|
||||
# zap logger config
|
||||
logger:
|
||||
mode: "development"
|
||||
level: "debug"
|
||||
filepath: "/home/douxu/log/wave_record-%s.log"
|
||||
maxsize: 1
|
||||
maxbackups: 5
|
||||
maxage: 30
|
||||
|
||||
# ants config
|
||||
ants:
|
||||
parse_concurrent_quantity: 10
|
||||
|
||||
# modelRT base config
|
||||
base:
|
||||
grid_id: 1
|
||||
zone_id: 1
|
||||
station_id: 1
|
||||
|
|
@ -1,4 +1,3 @@
|
|||
// Package config define config struct of model runtime service
|
||||
package config
|
||||
|
||||
import (
|
||||
|
|
@ -9,6 +8,5 @@ import (
|
|||
|
||||
type ModelParseConfig struct {
|
||||
ComponentInfo orm.Component
|
||||
Ctx context.Context
|
||||
AnchorChan chan AnchorParamConfig
|
||||
Context context.Context
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
package constant
|
||||
|
||||
const (
|
||||
// 母线服役属性
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
// Package constant define constant value
|
||||
package constant
|
||||
|
||||
const (
|
||||
// NullableType 空类型类型
|
||||
NullableType = iota
|
||||
// BusbarType 母线类型
|
||||
BusbarType
|
||||
// AsynchronousMotorType 异步电动机类型
|
||||
AsynchronousMotorType
|
||||
)
|
||||
|
|
@ -1,11 +1,9 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
// Package constant define constant value
|
||||
package constant
|
||||
|
||||
const (
|
||||
// DevelopmentLogMode define development operator environment for modelRT project
|
||||
// DevelopmentLogMode define development operator environment for wave record project
|
||||
DevelopmentLogMode = "development"
|
||||
// DebugLogMode define debug operator environment for modelRT project
|
||||
DebugLogMode = "debug"
|
||||
// ProductionLogMode define production operator environment for modelRT project
|
||||
// ProductionLogMode define production operator environment for wave record project
|
||||
ProductionLogMode = "production"
|
||||
)
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
// Package constant define constant value
|
||||
package constant
|
||||
|
||||
const (
|
||||
// LogTimeFormate define time format for log file name
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
// AlertLevel define alert level type
|
||||
type AlertLevel int
|
||||
|
||||
const (
|
||||
// AllAlertLevel define all alert level
|
||||
AllAlertLevel AlertLevel = iota
|
||||
// InfoAlertLevel define info alert level
|
||||
InfoAlertLevel
|
||||
// WarningAlertLevel define warning alert level
|
||||
WarningAlertLevel
|
||||
// ErrorAlertLevel define error alert level
|
||||
ErrorAlertLevel
|
||||
// FatalAlertLevel define fatal alert level
|
||||
FatalAlertLevel
|
||||
)
|
||||
|
||||
func (a AlertLevel) String() string {
|
||||
switch a {
|
||||
case AllAlertLevel:
|
||||
return "ALL"
|
||||
case InfoAlertLevel:
|
||||
return "INFO"
|
||||
case WarningAlertLevel:
|
||||
return "WARNING"
|
||||
case ErrorAlertLevel:
|
||||
return "ERROR"
|
||||
case FatalAlertLevel:
|
||||
return "FATAL"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func (a AlertLevel) LevelCompare(b AlertLevel) bool {
|
||||
return a <= b
|
||||
}
|
||||
|
||||
// // AlertLevelFromString convert string to alert level
|
||||
// func AlertLevelFromString(level int64) AlertLevel {
|
||||
// switch level {
|
||||
// case :
|
||||
// return AllAlertLevel
|
||||
// case "INFO":
|
||||
// return InfoAlertLevel
|
||||
// case "WARNING":
|
||||
// return WarningAlertLevel
|
||||
// case "ERROR":
|
||||
// return ErrorAlertLevel
|
||||
// case "FATAL":
|
||||
// return FatalAlertLevel
|
||||
// default:
|
||||
// return AllAlertLevel
|
||||
// }
|
||||
// }
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
const (
|
||||
// CodeSuccess define constant to indicates that the API was successfully processed
|
||||
CodeSuccess = 20000
|
||||
// CodeInvalidParamFailed define constant to indicates request parameter parsing failed
|
||||
CodeInvalidParamFailed = 40001
|
||||
// CodeDBQueryFailed define constant to indicates database query operation failed
|
||||
CodeDBQueryFailed = 50001
|
||||
// CodeDBUpdateailed define constant to indicates database update operation failed
|
||||
CodeDBUpdateailed = 50002
|
||||
// CodeRedisQueryFailed define constant to indicates redis query operation failed
|
||||
CodeRedisQueryFailed = 60001
|
||||
// CodeRedisUpdateFailed define constant to indicates redis update operation failed
|
||||
CodeRedisUpdateFailed = 60002
|
||||
)
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
const (
|
||||
// ShortAttrKeyLenth define short attribute key length
|
||||
ShortAttrKeyLenth int = 4
|
||||
// LongAttrKeyLenth define long attribute key length
|
||||
LongAttrKeyLenth int = 7
|
||||
)
|
||||
|
||||
// component、base_extend、rated、setup、model、stable、bay、craft、integrity、behavior
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
// FanInChanMaxSize define maximum buffer capacity by fanChannel
|
||||
FanInChanMaxSize = 10000
|
||||
// SendMaxBatchSize define maximum buffer capacity
|
||||
// TODO 后续优化批处理大小
|
||||
SendMaxBatchSize = 100
|
||||
// SendChanBufferSize define maximum buffer capacity by channel
|
||||
SendChanBufferSize = 100
|
||||
|
||||
// SendMaxBatchInterval define maximum aggregate latency
|
||||
SendMaxBatchInterval = 20 * time.Millisecond
|
||||
)
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
type contextKey string
|
||||
|
||||
// MeasurementUUIDKey define measurement uuid key into context
|
||||
const MeasurementUUIDKey contextKey = "measurement_uuid"
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
const (
|
||||
// NullableType 空类型类型
|
||||
NullableType = iota
|
||||
// BusbarType 母线类型
|
||||
BusbarType
|
||||
// AsyncMotorType 异步电动机类型
|
||||
AsyncMotorType
|
||||
// DemoType Demo类型
|
||||
DemoType
|
||||
)
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrUUIDFromCheckT1 define error of check uuid from value failed in uuid from change type
|
||||
ErrUUIDFromCheckT1 = errors.New("in uuid from change type, value of new uuid_from is equal value of old uuid_from")
|
||||
// ErrUUIDToCheckT1 define error of check uuid to value failed in uuid from change type
|
||||
ErrUUIDToCheckT1 = errors.New("in uuid from change type, value of new uuid_to is not equal value of old uuid_to")
|
||||
|
||||
// ErrUUIDFromCheckT2 define error of check uuid from value failed in uuid to change type
|
||||
ErrUUIDFromCheckT2 = errors.New("in uuid to change type, value of new uuid_from is not equal value of old uuid_from")
|
||||
// ErrUUIDToCheckT2 define error of check uuid to value failed in uuid to change type
|
||||
ErrUUIDToCheckT2 = errors.New("in uuid to change type, value of new uuid_to is equal value of old uuid_to")
|
||||
|
||||
// ErrUUIDFromCheckT3 define error of check uuid from value failed in uuid add change type
|
||||
ErrUUIDFromCheckT3 = errors.New("in uuid add change type, value of old uuid_from is not empty")
|
||||
// ErrUUIDToCheckT3 define error of check uuid to value failed in uuid add change type
|
||||
ErrUUIDToCheckT3 = errors.New("in uuid add change type, value of old uuid_to is not empty")
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidAddressType define error of invalid io address type
|
||||
ErrInvalidAddressType = errors.New("invalid address type")
|
||||
// ErrUnknownDataType define error of unknown measurement data source type
|
||||
ErrUnknownDataType = errors.New("unknown data type")
|
||||
// ErrExceedsLimitType define error of channel number exceeds limit for telemetry
|
||||
ErrExceedsLimitType = errors.New("channel number exceeds limit for Telemetry")
|
||||
// ErrUnsupportedChannelPrefixType define error of unsupported channel prefix
|
||||
ErrUnsupportedChannelPrefixType = errors.New("unsupported channel prefix")
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrFormatUUID define error of format uuid string to uuid.UUID type failed
|
||||
ErrFormatUUID = errors.New("format string type to uuid.UUID type failed")
|
||||
// ErrFormatCache define error of format cache with any type to cacheItem type failed
|
||||
ErrFormatCache = errors.New("format any teype to cache item type failed")
|
||||
)
|
||||
|
||||
// ErrGetClientToken define error of can not get client_token from context
|
||||
var ErrGetClientToken = errors.New("can not get client_token from context")
|
||||
|
||||
// ErrQueryComponentByUUID define error of query component from db by uuid failed
|
||||
var ErrQueryComponentByUUID = errors.New("query component from db failed by uuid")
|
||||
|
||||
// ErrChanIsNil define error of channel is nil
|
||||
var ErrChanIsNil = errors.New("this channel is nil")
|
||||
|
||||
// ErrConcurrentModify define error of concurrent modification detected
|
||||
var ErrConcurrentModify = errors.New("existed concurrent modification risk")
|
||||
|
||||
// ErrUnsupportedSubAction define error of unsupported real time data subscription action
|
||||
var ErrUnsupportedSubAction = errors.New("unsupported real time data subscription action")
|
||||
|
||||
// ErrUnsupportedLinkAction define error of unsupported measurement link process action
|
||||
var ErrUnsupportedLinkAction = errors.New("unsupported rmeasurement link process action")
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
const (
|
||||
// TIBreachTriggerType define out of bounds type constant
|
||||
TIBreachTriggerType = "trigger"
|
||||
)
|
||||
|
||||
const (
|
||||
// TelemetryUpLimit define telemetry upper limit
|
||||
TelemetryUpLimit = "up"
|
||||
// TelemetryUpUpLimit define telemetry upper upper limit
|
||||
TelemetryUpUpLimit = "upup"
|
||||
|
||||
// TelemetryDownLimit define telemetry limit
|
||||
TelemetryDownLimit = "down"
|
||||
// TelemetryDownDownLimit define telemetry lower lower limit
|
||||
TelemetryDownDownLimit = "downdown"
|
||||
)
|
||||
|
||||
const (
|
||||
// TelesignalRaising define telesignal raising edge
|
||||
TelesignalRaising = "raising"
|
||||
// TelesignalFalling define telesignal falling edge
|
||||
TelesignalFalling = "falling"
|
||||
)
|
||||
|
||||
const (
|
||||
// MinBreachCount define min breach count of real time data
|
||||
MinBreachCount = 10
|
||||
)
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
const (
|
||||
// DataSourceTypeCL3611 define CL3611 type
|
||||
DataSourceTypeCL3611 = 1
|
||||
// DataSourceTypePower104 define electricity 104 protocol type
|
||||
DataSourceTypePower104 = 2
|
||||
)
|
||||
|
||||
// channel name prefix
|
||||
const (
|
||||
ChannelPrefixTelemetry = "Telemetry"
|
||||
ChannelPrefixTelesignal = "Telesignal"
|
||||
ChannelPrefixTelecommand = "Telecommand"
|
||||
ChannelPrefixTeleadjusting = "Teleadjusting"
|
||||
ChannelPrefixSetpoints = "Setpoints"
|
||||
)
|
||||
|
||||
// channel name suffix
|
||||
const (
|
||||
ChannelSuffixP = "P"
|
||||
ChannelSuffixQ = "Q"
|
||||
ChannelSuffixS = "S"
|
||||
ChannelSuffixPS = "PS"
|
||||
ChannelSuffixF = "F"
|
||||
ChannelSuffixDeltaF = "deltaF"
|
||||
ChannelSuffixUAB = "UAB"
|
||||
ChannelSuffixUBC = "UBC"
|
||||
ChannelSuffixUCA = "UCA"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxIdentifyHierarchy define max data indentify syntax hierarchy
|
||||
MaxIdentifyHierarchy = 7
|
||||
IdentifyHierarchy = 4
|
||||
)
|
||||
|
|
@ -1,104 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
const (
|
||||
// DefaultScore define the default score for redissearch suggestion
|
||||
DefaultScore = 1.0
|
||||
)
|
||||
|
||||
const (
|
||||
// RedisAllGridSetKey define redis set key which store all grid tag keys
|
||||
RedisAllGridSetKey = "grid_tag_keys"
|
||||
|
||||
// RedisAllZoneSetKey define redis set key which store all zone tag keys
|
||||
RedisAllZoneSetKey = "zone_tag_keys"
|
||||
|
||||
// RedisAllStationSetKey define redis set key which store all station tag keys
|
||||
RedisAllStationSetKey = "station_tag_keys"
|
||||
|
||||
// RedisAllCompNSPathSetKey define redis set key which store all component nspath keys
|
||||
RedisAllCompNSPathSetKey = "component_nspath_keys"
|
||||
|
||||
// RedisAllCompTagSetKey define redis set key which store all component tag keys
|
||||
RedisAllCompTagSetKey = "component_tag_keys"
|
||||
|
||||
// RedisAllConfigSetKey define redis set key which store all config keys
|
||||
RedisAllConfigSetKey = "config_keys"
|
||||
|
||||
// RedisAllMeasTagSetKey define redis set key which store all measurement tag keys
|
||||
RedisAllMeasTagSetKey = "measurement_tag_keys"
|
||||
|
||||
// RedisSpecGridZoneSetKey define redis set key which store all zone tag keys under specific grid
|
||||
RedisSpecGridZoneSetKey = "%s_zone_tag_keys"
|
||||
|
||||
// RedisSpecZoneStationSetKey define redis set key which store all station tag keys under specific zone
|
||||
RedisSpecZoneStationSetKey = "%s_station_tag_keys"
|
||||
|
||||
// RedisSpecStationCompNSPATHSetKey define redis set key which store all component nspath keys under specific station
|
||||
RedisSpecStationCompNSPATHSetKey = "%s_component_nspath_keys"
|
||||
|
||||
// RedisSpecCompNSPathCompTagSetKey define redis set key which store all component tag keys under specific component nspath
|
||||
RedisSpecCompNSPathCompTagSetKey = "%s_component_tag_keys"
|
||||
|
||||
// RedisSpecCompTagMeasSetKey define redis set key which store all measurement tag keys under specific component tag
|
||||
RedisSpecCompTagMeasSetKey = "%s_measurement_tag_keys"
|
||||
)
|
||||
|
||||
const (
|
||||
// SearchLinkAddAction define search link add action
|
||||
SearchLinkAddAction = "add"
|
||||
// SearchLinkDelAction define search link del action
|
||||
SearchLinkDelAction = "del"
|
||||
)
|
||||
|
||||
// RecommendHierarchyType define the hierarchy levels used for redis recommend search
|
||||
type RecommendHierarchyType int
|
||||
|
||||
const (
|
||||
// GridRecommendHierarchyType define grid hierarch for redis recommend search
|
||||
GridRecommendHierarchyType RecommendHierarchyType = iota + 1
|
||||
// ZoneRecommendHierarchyType define zone hierarch for redis recommend search
|
||||
ZoneRecommendHierarchyType
|
||||
// StationRecommendHierarchyType define station hierarch for redis recommend search
|
||||
StationRecommendHierarchyType
|
||||
// CompNSPathRecommendHierarchyType define component nspath hierarch for redis recommend search
|
||||
CompNSPathRecommendHierarchyType
|
||||
// CompTagRecommendHierarchyType define component tag hierarch for redis recommend search
|
||||
CompTagRecommendHierarchyType
|
||||
// ConfigRecommendHierarchyType define config hierarch for redis recommend search
|
||||
ConfigRecommendHierarchyType
|
||||
// MeasTagRecommendHierarchyType define measurement tag hierarch for redis recommend search
|
||||
MeasTagRecommendHierarchyType
|
||||
)
|
||||
|
||||
// String implements fmt.Stringer interface and returns the string representation of the type.
|
||||
func (r RecommendHierarchyType) String() string {
|
||||
switch r {
|
||||
case GridRecommendHierarchyType:
|
||||
return "grid_tag"
|
||||
case ZoneRecommendHierarchyType:
|
||||
return "zone_tag"
|
||||
case StationRecommendHierarchyType:
|
||||
return "station_tag"
|
||||
case CompNSPathRecommendHierarchyType:
|
||||
return "comp_nspath"
|
||||
case CompTagRecommendHierarchyType:
|
||||
return "comp_tag"
|
||||
case ConfigRecommendHierarchyType:
|
||||
return "config"
|
||||
case MeasTagRecommendHierarchyType:
|
||||
return "meas_tag"
|
||||
default:
|
||||
// 返回一个包含原始数值的默认字符串,以便于调试
|
||||
return "unknown_recommend_type(" + string(rune(r)) + ")"
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// FullRecommendLength define full recommend length with all tokens
|
||||
FullRecommendLength = "t1.t2.t3.t4.t5.t6.t7"
|
||||
// IsLocalRecommendLength define is local recommend length with specific tokens
|
||||
IsLocalRecommendLength = "t4.t5.t6.t7"
|
||||
// token1.token2.token3.token4.token7
|
||||
// token4.token7
|
||||
)
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
const (
|
||||
// RedisSearchDictName define redis search dictionary name
|
||||
RedisSearchDictName = "search_suggestions_dict"
|
||||
)
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
const (
|
||||
// RespCodeSuccess define constant to indicates that the API was processed success
|
||||
RespCodeSuccess = 2000
|
||||
|
||||
// RespCodeSuccessWithNoSub define constant to ndicates that the request was processed successfully, with all subscriptions removed for the given client_id.
|
||||
RespCodeSuccessWithNoSub = 2101
|
||||
|
||||
// RespCodeFailed define constant to indicates that the API was processed failed
|
||||
RespCodeFailed = 3000
|
||||
|
||||
// RespCodeInvalidParams define constant to indicates that the request parameters failed to validate, parsing failed, or the action is invalid
|
||||
RespCodeInvalidParams = 4001
|
||||
|
||||
// RespCodeUnauthorized define constant to indicates insufficient permissions or an invalid ClientID
|
||||
RespCodeUnauthorized = 4002
|
||||
|
||||
// RespCodeServerError define constants to indicates a serious internal server error (such as database disconnection or code panic)
|
||||
RespCodeServerError = 5000
|
||||
)
|
||||
|
|
@ -1,85 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
const (
|
||||
// SubStartAction define the real time subscription start action
|
||||
SubStartAction string = "start"
|
||||
// SubStopAction define the real time subscription stop action
|
||||
SubStopAction string = "stop"
|
||||
// SubAppendAction define the real time subscription append action
|
||||
SubAppendAction string = "append"
|
||||
// SubUpdateAction define the real time subscription update action
|
||||
SubUpdateAction string = "update"
|
||||
)
|
||||
|
||||
// 定义状态常量
|
||||
// TODO 从4位格式修改为5位格式
|
||||
const (
|
||||
// SubSuccessCode define subscription success code
|
||||
SubSuccessCode = "1001"
|
||||
// SubFailedCode define subscription failed code
|
||||
SubFailedCode = "1002"
|
||||
// RTDSuccessCode define real time data return success code
|
||||
RTDSuccessCode = "1003"
|
||||
// RTDFailedCode define real time data return failed code
|
||||
RTDFailedCode = "1004"
|
||||
// CancelSubSuccessCode define cancel subscription success code
|
||||
CancelSubSuccessCode = "1005"
|
||||
// CancelSubFailedCode define cancel subscription failed code
|
||||
CancelSubFailedCode = "1006"
|
||||
// SubRepeatCode define subscription repeat code
|
||||
SubRepeatCode = "1007"
|
||||
// UpdateSubSuccessCode define update subscription success code
|
||||
UpdateSubSuccessCode = "1008"
|
||||
// UpdateSubFailedCode define update subscription failed code
|
||||
UpdateSubFailedCode = "1009"
|
||||
)
|
||||
|
||||
const (
|
||||
// SysCtrlPrefix define to indicates the prefix for all system control directives,facilitating unified parsing within the sendDataStream goroutine
|
||||
SysCtrlPrefix = "SYS_CTRL_"
|
||||
|
||||
// SysCtrlAllRemoved define to indicates that all active polling targets have been removed for the current client, and no further data streams are active
|
||||
SysCtrlAllRemoved = "SYS_CTRL_ALL_REMOVED"
|
||||
|
||||
// SysCtrlSessionExpired define to indicates reserved for indicating that the current websocket session has timed out or is no longer valid
|
||||
SysCtrlSessionExpired = "SYS_CTRL_SESSION_EXPIRED"
|
||||
)
|
||||
|
||||
const (
|
||||
// SubSuccessMsg define subscription success message
|
||||
SubSuccessMsg = "subscription success"
|
||||
// SubFailedMsg define subscription failed message
|
||||
SubFailedMsg = "subscription failed"
|
||||
// RTDSuccessMsg define real time data return success message
|
||||
RTDSuccessMsg = "real time data return success"
|
||||
// RTDFailedMsg define real time data return failed message
|
||||
RTDFailedMsg = "real time data return failed"
|
||||
// CancelSubSuccessMsg define cancel subscription success message
|
||||
CancelSubSuccessMsg = "cancel subscription success"
|
||||
// CancelSubFailedMsg define cancel subscription failed message
|
||||
CancelSubFailedMsg = "cancel subscription failed"
|
||||
// SubRepeatMsg define subscription repeat message
|
||||
SubRepeatMsg = "subscription repeat in target interval"
|
||||
// UpdateSubSuccessMsg define update subscription success message
|
||||
UpdateSubSuccessMsg = "update subscription success"
|
||||
// UpdateSubFailedMsg define update subscription failed message
|
||||
UpdateSubFailedMsg = "update subscription failed"
|
||||
)
|
||||
|
||||
// TargetOperationType define constant to the target operation type
|
||||
type TargetOperationType int
|
||||
|
||||
const (
|
||||
// OpAppend define append new target to the subscription list
|
||||
OpAppend TargetOperationType = iota
|
||||
// OpRemove define remove exist target from the subscription list
|
||||
OpRemove
|
||||
// OpUpdate define update exist target from the subscription list
|
||||
OpUpdate
|
||||
)
|
||||
|
||||
const (
|
||||
// NoticeChanCap define real time data notice channel capacity
|
||||
NoticeChanCap = 10000
|
||||
)
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
import "github.com/gofrs/uuid"
|
||||
|
||||
const (
|
||||
// UUIDErrChangeType 拓扑信息错误改变类型
|
||||
UUIDErrChangeType = iota
|
||||
// UUIDFromChangeType 拓扑信息父节点改变类型
|
||||
UUIDFromChangeType
|
||||
// UUIDToChangeType 拓扑信息子节点改变类型
|
||||
UUIDToChangeType
|
||||
// UUIDAddChangeType 拓扑信息新增类型
|
||||
UUIDAddChangeType
|
||||
)
|
||||
|
||||
const (
|
||||
// UUIDNilStr 拓扑信息中开始节点与结束节点字符串形式
|
||||
UUIDNilStr = "00000000-0000-0000-0000-000000000000"
|
||||
)
|
||||
|
||||
// UUIDNil 拓扑信息中开始节点与结束节点 UUID 格式
|
||||
var UUIDNil = uuid.FromStringOrNil(UUIDNilStr)
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
// Package constants define constant variable
|
||||
package constants
|
||||
|
||||
// Assuming the B3 specification
|
||||
const (
|
||||
HeaderTraceID = "X-B3-TraceId"
|
||||
HeaderSpanID = "X-B3-SpanId"
|
||||
HeaderParentSpanID = "X-B3-ParentSpanId"
|
||||
)
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"modelRT/common/errcode"
|
||||
"modelRT/network"
|
||||
"modelRT/orm"
|
||||
|
||||
"github.com/gofrs/uuid"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// CreateComponentIntoDB define create component info of the circuit diagram into DB
|
||||
func CreateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo network.ComponentCreateInfo) (string, error) {
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
globalUUID, err := uuid.FromString(componentInfo.UUID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("format uuid from string type failed:%w", err)
|
||||
}
|
||||
|
||||
component := orm.Component{
|
||||
GlobalUUID: globalUUID,
|
||||
GridName: componentInfo.GridName,
|
||||
ZoneName: componentInfo.ZoneName,
|
||||
StationName: componentInfo.StationName,
|
||||
Tag: componentInfo.Tag,
|
||||
Name: componentInfo.Name,
|
||||
Context: componentInfo.Context,
|
||||
Op: componentInfo.Op,
|
||||
Ts: time.Now(),
|
||||
}
|
||||
|
||||
result := tx.WithContext(cancelCtx).Create(&component)
|
||||
if result.Error != nil || result.RowsAffected == 0 {
|
||||
err := result.Error
|
||||
if result.RowsAffected == 0 {
|
||||
err = fmt.Errorf("%w:please check insert component slice", errcode.ErrInsertRowUnexpected)
|
||||
}
|
||||
return "", fmt.Errorf("insert component info failed:%w", err)
|
||||
}
|
||||
return component.GlobalUUID.String(), nil
|
||||
}
|
||||
|
|
@ -1,50 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"modelRT/common/errcode"
|
||||
"modelRT/network"
|
||||
"modelRT/orm"
|
||||
|
||||
"github.com/gofrs/uuid"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// CreateMeasurement define create measurement info of the circuit diagram into DB
|
||||
func CreateMeasurement(ctx context.Context, tx *gorm.DB, measurementInfo network.MeasurementCreateInfo) (string, error) {
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
globalUUID, err := uuid.FromString(measurementInfo.UUID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("format uuid from string type failed:%w", err)
|
||||
}
|
||||
|
||||
measurement := orm.Measurement{
|
||||
Tag: "",
|
||||
Name: "",
|
||||
Type: -1,
|
||||
Size: -1,
|
||||
DataSource: nil,
|
||||
EventPlan: nil,
|
||||
BayUUID: globalUUID,
|
||||
ComponentUUID: globalUUID,
|
||||
Op: -1,
|
||||
Ts: time.Now(),
|
||||
}
|
||||
|
||||
result := tx.WithContext(cancelCtx).Create(&measurement)
|
||||
if result.Error != nil || result.RowsAffected == 0 {
|
||||
err := result.Error
|
||||
if result.RowsAffected == 0 {
|
||||
err = fmt.Errorf("%w:please check insert component slice", errcode.ErrInsertRowUnexpected)
|
||||
}
|
||||
return "", fmt.Errorf("insert component info failed:%w", err)
|
||||
}
|
||||
return strconv.FormatInt(measurement.ID, 10), nil
|
||||
}
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"modelRT/common/errcode"
|
||||
"modelRT/model"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// CreateModelIntoDB define create component model params of the circuit diagram into DB
|
||||
func CreateModelIntoDB(ctx context.Context, tx *gorm.DB, componentID int64, componentType int, modelParas string) error {
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
modelStruct := model.SelectModelByType(componentType)
|
||||
modelStruct.SetComponentID(componentID)
|
||||
err := jsoniter.Unmarshal([]byte(modelParas), modelStruct)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unmarshal component model params failed:%w", err)
|
||||
}
|
||||
|
||||
result := tx.Model(modelStruct).WithContext(cancelCtx).Create(modelStruct)
|
||||
if result.Error != nil || result.RowsAffected == 0 {
|
||||
err := result.Error
|
||||
if result.RowsAffected == 0 {
|
||||
err = fmt.Errorf("%w:please check insert model params", errcode.ErrInsertRowUnexpected)
|
||||
}
|
||||
return fmt.Errorf("insert component model params into table %s failed:%w", modelStruct.ReturnTableName(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"modelRT/common/errcode"
|
||||
"modelRT/network"
|
||||
"modelRT/orm"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// CreateTopologicIntoDB define create topologic info of the circuit diagram query by pageID and topologic info
|
||||
func CreateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, topologicInfos []network.TopologicUUIDCreateInfo) error {
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var topologicSlice []orm.Topologic
|
||||
for _, info := range topologicInfos {
|
||||
topologicInfo := orm.Topologic{
|
||||
UUIDFrom: info.UUIDFrom,
|
||||
UUIDTo: info.UUIDTo,
|
||||
Flag: info.Flag,
|
||||
}
|
||||
topologicSlice = append(topologicSlice, topologicInfo)
|
||||
}
|
||||
|
||||
result := tx.WithContext(cancelCtx).Create(&topologicSlice)
|
||||
|
||||
if result.Error != nil || result.RowsAffected != int64(len(topologicSlice)) {
|
||||
err := result.Error
|
||||
if result.RowsAffected != int64(len(topologicSlice)) {
|
||||
err = fmt.Errorf("%w:please check insert topologic slice", errcode.ErrInsertRowUnexpected)
|
||||
}
|
||||
return fmt.Errorf("insert topologic link failed:%w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"modelRT/common/errcode"
|
||||
"modelRT/network"
|
||||
"modelRT/orm"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// DeleteTopologicIntoDB define delete topologic info of the circuit diagram query by pageID and topologic info
|
||||
func DeleteTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, delInfo network.TopologicUUIDDelInfo) error {
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result := tx.Model(&orm.Topologic{}).WithContext(cancelCtx).Where("page_id = ? and uuid_from = ? and uuid_to = ?", pageID, delInfo.UUIDFrom, delInfo.UUIDTo).Delete(&orm.Topologic{})
|
||||
|
||||
if result.Error != nil || result.RowsAffected == 0 {
|
||||
err := result.Error
|
||||
if result.RowsAffected == 0 {
|
||||
err = fmt.Errorf("%w:please check delete topologic where conditions", errcode.ErrDeleteRowZero)
|
||||
}
|
||||
return fmt.Errorf("delete topologic link failed:%w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,88 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"modelRT/logger"
|
||||
"modelRT/model"
|
||||
"modelRT/orm"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// FillingShortTokenModel define filling short token model info
|
||||
func FillingShortTokenModel(ctx context.Context, tx *gorm.DB, identModel *model.ShortIdentityTokenModel) error {
|
||||
filterComponent := &orm.Component{
|
||||
GridName: identModel.GetGridName(),
|
||||
ZoneName: identModel.GetZoneName(),
|
||||
StationName: identModel.GetStationName(),
|
||||
}
|
||||
|
||||
component, measurement, err := QueryLongIdentModelInfoByToken(ctx, tx, identModel.MeasurementTag, filterComponent)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "query long identity token model info failed", "error", err)
|
||||
return err
|
||||
}
|
||||
identModel.ComponentInfo = component
|
||||
identModel.MeasurementInfo = measurement
|
||||
return nil
|
||||
}
|
||||
|
||||
// FillingLongTokenModel define filling long token model info
|
||||
func FillingLongTokenModel(ctx context.Context, tx *gorm.DB, identModel *model.LongIdentityTokenModel) error {
|
||||
filterComponent := &orm.Component{
|
||||
GridName: identModel.GetGridName(),
|
||||
ZoneName: identModel.GetZoneName(),
|
||||
StationName: identModel.GetStationName(),
|
||||
Tag: identModel.GetComponentTag(),
|
||||
}
|
||||
component, measurement, err := QueryLongIdentModelInfoByToken(ctx, tx, identModel.MeasurementTag, filterComponent)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "query long identity token model info failed", "error", err)
|
||||
return err
|
||||
}
|
||||
identModel.ComponentInfo = component
|
||||
identModel.MeasurementInfo = measurement
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseDataIdentifierToken define function to parse data identifier token function
|
||||
func ParseDataIdentifierToken(ctx context.Context, tx *gorm.DB, identToken string) (model.IndentityTokenModelInterface, error) {
|
||||
identSlice := strings.Split(identToken, ".")
|
||||
identSliceLen := len(identSlice)
|
||||
if identSliceLen == 4 {
|
||||
// token1.token2.token3.token4.token7
|
||||
shortIndentModel := &model.ShortIdentityTokenModel{
|
||||
GridTag: identSlice[0],
|
||||
ZoneTag: identSlice[1],
|
||||
StationTag: identSlice[2],
|
||||
NamespacePath: identSlice[3],
|
||||
MeasurementTag: identSlice[6],
|
||||
}
|
||||
err := FillingShortTokenModel(ctx, tx, shortIndentModel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return shortIndentModel, nil
|
||||
} else if identSliceLen == 7 {
|
||||
// token1.token2.token3.token4.token5.token6.token7
|
||||
longIndentModel := &model.LongIdentityTokenModel{
|
||||
GridTag: identSlice[0],
|
||||
ZoneTag: identSlice[1],
|
||||
StationTag: identSlice[2],
|
||||
NamespacePath: identSlice[3],
|
||||
ComponentTag: identSlice[4],
|
||||
AttributeGroup: identSlice[5],
|
||||
MeasurementTag: identSlice[6],
|
||||
}
|
||||
err := FillingLongTokenModel(ctx, tx, longIndentModel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return longIndentModel, nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid identity token format: %s", identToken)
|
||||
}
|
||||
|
|
@ -1,97 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"modelRT/diagram"
|
||||
"modelRT/model"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// ParseAttrToken define return the attribute model interface based on the input attribute token. doc addr http://server.baseware.net:6875/books/product-design-docs/page/d6baf
|
||||
func ParseAttrToken(ctx context.Context, tx *gorm.DB, attrToken, clientToken string) (model.AttrModelInterface, error) {
|
||||
rs := diagram.NewRedisString(ctx, attrToken, clientToken, 10, true)
|
||||
|
||||
attrSlice := strings.Split(attrToken, ".")
|
||||
attrLen := len(attrSlice)
|
||||
if attrLen == 4 {
|
||||
short := &model.ShortAttrInfo{
|
||||
AttrGroupName: attrSlice[2],
|
||||
AttrKey: attrSlice[3],
|
||||
}
|
||||
err := FillingShortAttrModel(ctx, tx, attrSlice, short)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attrValue, err := rs.Get(attrToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
short.AttrValue = attrValue
|
||||
return short, nil
|
||||
} else if attrLen == 7 {
|
||||
long := &model.LongAttrInfo{
|
||||
AttrGroupName: attrSlice[5],
|
||||
AttrKey: attrSlice[6],
|
||||
}
|
||||
err := FillingLongAttrModel(ctx, tx, attrSlice, long)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attrValue, err := rs.Get(attrToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
long.AttrValue = attrValue
|
||||
return long, nil
|
||||
}
|
||||
return nil, errors.New("invalid attribute token format")
|
||||
}
|
||||
|
||||
// FillingShortAttrModel define filling short attribute model info
|
||||
func FillingShortAttrModel(ctx context.Context, tx *gorm.DB, attrItems []string, attrModel *model.ShortAttrInfo) error {
|
||||
component, err := QueryComponentByNSPath(ctx, tx, attrItems[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attrModel.ComponentInfo = &component
|
||||
return nil
|
||||
}
|
||||
|
||||
// FillingLongAttrModel define filling long attribute model info
|
||||
func FillingLongAttrModel(ctx context.Context, tx *gorm.DB, attrItems []string, attrModel *model.LongAttrInfo) error {
|
||||
grid, err := QueryGridByTagName(ctx, tx, attrItems[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attrModel.GridInfo = &grid
|
||||
zone, err := QueryZoneByTagName(ctx, tx, attrItems[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attrModel.ZoneInfo = &zone
|
||||
station, err := QueryStationByTagName(ctx, tx, attrItems[2])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attrModel.StationInfo = &station
|
||||
component, err := QueryComponentByNSPath(ctx, tx, attrItems[3])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attrModel.ComponentInfo = &component
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueryAttrValueFromRedis define query attribute value from redis by attrKey
|
||||
func QueryAttrValueFromRedis(attrKey string) string {
|
||||
fmt.Println(attrKey)
|
||||
return ""
|
||||
}
|
||||
|
|
@ -6,8 +6,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"modelRT/logger"
|
||||
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
|
@ -18,16 +16,16 @@ var (
|
|||
_globalPostgresMu sync.RWMutex
|
||||
)
|
||||
|
||||
// GetPostgresDBClient returns the global PostgresDB client.It's safe for concurrent use.
|
||||
func GetPostgresDBClient() *gorm.DB {
|
||||
// PostgresDBClient returns the global PostgresDB client.It's safe for concurrent use.
|
||||
func PostgresDBClient() *gorm.DB {
|
||||
_globalPostgresMu.RLock()
|
||||
client := _globalPostgresClient
|
||||
_globalPostgresMu.RUnlock()
|
||||
return client
|
||||
}
|
||||
|
||||
// InitPostgresDBInstance return instance of PostgresDB client
|
||||
func InitPostgresDBInstance(ctx context.Context, PostgresDBURI string) *gorm.DB {
|
||||
// GetPostgresDBInstance return instance of PostgresDB client
|
||||
func GetPostgresDBInstance(ctx context.Context, PostgresDBURI string) *gorm.DB {
|
||||
postgresOnce.Do(func() {
|
||||
_globalPostgresClient = initPostgresDBClient(ctx, PostgresDBURI)
|
||||
})
|
||||
|
|
@ -38,7 +36,7 @@ func InitPostgresDBInstance(ctx context.Context, PostgresDBURI string) *gorm.DB
|
|||
func initPostgresDBClient(ctx context.Context, PostgresDBURI string) *gorm.DB {
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
db, err := gorm.Open(postgres.Open(PostgresDBURI), &gorm.Config{Logger: logger.NewGormLogger()})
|
||||
db, err := gorm.Open(postgres.Open(PostgresDBURI), &gorm.Config{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,173 +4,60 @@ package database
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"modelRT/config"
|
||||
"modelRT/diagram"
|
||||
"modelRT/orm"
|
||||
|
||||
"github.com/gofrs/uuid"
|
||||
"gorm.io/gorm"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
"go.uber.org/zap"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// QueryCircuitDiagramComponentFromDB return the result of query circuit diagram component info order by page id from postgresDB
|
||||
// func QueryCircuitDiagramComponentFromDB(ctx context.Context, tx *gorm.DB, pool *ants.PoolWithFunc) (map[uuid.UUID]string, error) {
|
||||
// var components []orm.Component
|
||||
// // ctx超时判断
|
||||
// cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
// defer cancel()
|
||||
|
||||
// result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&components)
|
||||
// if result.Error != nil {
|
||||
// logger.Error(ctx, "query circuit diagram component info failed", "error", result.Error)
|
||||
// return nil, result.Error
|
||||
// }
|
||||
|
||||
// componentTypeMap := make(map[uuid.UUID]string, len(components))
|
||||
// for _, component := range components {
|
||||
// pool.Invoke(config.ModelParseConfig{
|
||||
// ComponentInfo: component,
|
||||
// Ctx: ctx,
|
||||
// })
|
||||
|
||||
// componentTypeMap[component.GlobalUUID] = component.GlobalUUID.String()
|
||||
// }
|
||||
// return componentTypeMap, nil
|
||||
// }
|
||||
|
||||
// QueryComponentByUUID return the result of query circuit diagram component info by uuid from postgresDB
|
||||
func QueryComponentByUUID(ctx context.Context, tx *gorm.DB, uuid uuid.UUID) (orm.Component, error) {
|
||||
var component orm.Component
|
||||
func QueryCircuitDiagramComponentFromDB(ctx context.Context, pool *ants.PoolWithFunc, logger *zap.Logger) error {
|
||||
var Components []orm.Component
|
||||
// ctx超时判断
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
result := tx.WithContext(cancelCtx).
|
||||
Where("global_uuid = ?", uuid).
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
First(&component)
|
||||
|
||||
result := _globalPostgresClient.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&Components)
|
||||
if result.Error != nil {
|
||||
return orm.Component{}, result.Error
|
||||
logger.Error("query circuit diagram component info failed", zap.Error(result.Error))
|
||||
return result.Error
|
||||
}
|
||||
return component, nil
|
||||
|
||||
for _, component := range Components {
|
||||
pool.Invoke(config.ModelParseConfig{
|
||||
ComponentInfo: component,
|
||||
Context: ctx,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueryComponentByCompTag return the result of query circuit diagram component info by component tag from postgresDB
|
||||
func QueryComponentByCompTag(ctx context.Context, tx *gorm.DB, tag string) (orm.Component, error) {
|
||||
var component orm.Component
|
||||
result := tx.WithContext(ctx).
|
||||
Where("tag = ?", tag).
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
First(&component)
|
||||
|
||||
if result.Error != nil {
|
||||
return orm.Component{}, result.Error
|
||||
}
|
||||
return component, nil
|
||||
}
|
||||
|
||||
// QueryComponentByCompTags return the result of query circuit diagram component info by components tag from postgresDB
|
||||
func QueryComponentByCompTags(ctx context.Context, tx *gorm.DB, tags []string) (map[string]orm.Component, error) {
|
||||
if len(tags) == 0 {
|
||||
return make(map[string]orm.Component), nil
|
||||
}
|
||||
|
||||
var results []orm.Component
|
||||
err := tx.WithContext(ctx).
|
||||
Model(orm.Component{}).
|
||||
Select("global_uuid,tag, model_name").
|
||||
Where("tag IN ?", tags).
|
||||
Find(&results).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
compModelMap := make(map[string]orm.Component, len(results))
|
||||
for _, result := range results {
|
||||
compModelMap[result.Tag] = result
|
||||
}
|
||||
return compModelMap, nil
|
||||
}
|
||||
|
||||
// QueryComponentByPageID return the result of query circuit diagram component info by page id from postgresDB
|
||||
func QueryComponentByPageID(ctx context.Context, tx *gorm.DB, uuid uuid.UUID) (orm.Component, error) {
|
||||
var component orm.Component
|
||||
// QueryElectricalEquipmentUUID return the result of query electrical equipment uuid from postgresDB by circuit diagram id info
|
||||
func QueryElectricalEquipmentUUID(ctx context.Context, diagramID int64, logger *zap.Logger) error {
|
||||
var uuids []string
|
||||
// ctx超时判断
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result := tx.WithContext(cancelCtx).Where("page_id = ? ", uuid).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&component)
|
||||
tableName := "circuit_diagram_" + strconv.FormatInt(diagramID, 10)
|
||||
result := _globalPostgresClient.Table(tableName).WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Select("uuid").Find(&uuids)
|
||||
if result.Error != nil {
|
||||
return orm.Component{}, result.Error
|
||||
logger.Error("query circuit diagram overview info failed", zap.Error(result.Error))
|
||||
return result.Error
|
||||
}
|
||||
return component, nil
|
||||
}
|
||||
|
||||
// QueryComponentByNSPath return the result of query circuit diagram component info by ns path from postgresDB
|
||||
func QueryComponentByNSPath(ctx context.Context, tx *gorm.DB, nsPath string) (orm.Component, error) {
|
||||
var component orm.Component
|
||||
// ctx超时判断
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result := tx.WithContext(cancelCtx).Where("NAME = ? ", nsPath).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&component)
|
||||
if result.Error != nil {
|
||||
return orm.Component{}, result.Error
|
||||
}
|
||||
return component, nil
|
||||
}
|
||||
|
||||
// QueryLongIdentModelInfoByToken define func to query long identity model info by long token
|
||||
func QueryLongIdentModelInfoByToken(ctx context.Context, tx *gorm.DB, measTag string, condition *orm.Component) (*orm.Component, *orm.Measurement, error) {
|
||||
var resultComp orm.Component
|
||||
var meauserment orm.Measurement
|
||||
|
||||
// ctx timeout judgment
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).First(&resultComp, &condition)
|
||||
if result.Error != nil {
|
||||
if result.Error == gorm.ErrRecordNotFound {
|
||||
return nil, nil, fmt.Errorf("component record not found by %v:%w", condition, result.Error)
|
||||
for _, uuid := range uuids {
|
||||
diagramParamsMap, err := diagram.GetComponentMap(uuid)
|
||||
if err != nil {
|
||||
logger.Error("get electrical circuit diagram overview info failed", zap.Error(result.Error))
|
||||
return result.Error
|
||||
}
|
||||
return nil, nil, result.Error
|
||||
fmt.Println(diagramParamsMap, err)
|
||||
}
|
||||
|
||||
filterMap := map[string]any{"component_uuid": resultComp.GlobalUUID, "tag": measTag}
|
||||
result = tx.WithContext(cancelCtx).Where(filterMap).Clauses(clause.Locking{Strength: "UPDATE"}).First(&meauserment)
|
||||
if result.Error != nil {
|
||||
if result.Error == gorm.ErrRecordNotFound {
|
||||
return nil, nil, fmt.Errorf("measurement record not found by %v:%w", filterMap, result.Error)
|
||||
}
|
||||
return nil, nil, result.Error
|
||||
}
|
||||
return &resultComp, &meauserment, nil
|
||||
}
|
||||
|
||||
// QueryShortIdentModelInfoByToken define func to query short identity model info by short token
|
||||
func QueryShortIdentModelInfoByToken(ctx context.Context, tx *gorm.DB, measTag string, condition *orm.Component) (*orm.Component, *orm.Measurement, error) {
|
||||
var resultComp orm.Component
|
||||
var meauserment orm.Measurement
|
||||
// ctx timeout judgment
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).First(&resultComp, &condition)
|
||||
if result.Error != nil {
|
||||
if result.Error == gorm.ErrRecordNotFound {
|
||||
return nil, nil, fmt.Errorf("component record not found by %v:%w", condition, result.Error)
|
||||
}
|
||||
return nil, nil, result.Error
|
||||
}
|
||||
|
||||
filterMap := map[string]any{"component_uuid": resultComp.GlobalUUID, "tag": measTag}
|
||||
result = tx.WithContext(cancelCtx).Where(filterMap).Clauses(clause.Locking{Strength: "UPDATE"}).First(&meauserment)
|
||||
if result.Error != nil {
|
||||
if result.Error == gorm.ErrRecordNotFound {
|
||||
return nil, nil, fmt.Errorf("measurement record not found by %v:%w", filterMap, result.Error)
|
||||
}
|
||||
return nil, nil, result.Error
|
||||
}
|
||||
return &resultComp, &meauserment, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,27 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"modelRT/orm"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// GenAllAttributeMap define func to query global_uuid、component tag、component nspath field for attribute group
|
||||
func GenAllAttributeMap(db *gorm.DB) (map[string]orm.AttributeSet, error) {
|
||||
var compResults []orm.Component
|
||||
resMap := make(map[string]orm.AttributeSet)
|
||||
|
||||
err := db.Model(&orm.Component{}).Select("global_uuid", "station_id", "tag", "nspath").Find(&compResults).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, r := range compResults {
|
||||
resMap[r.GlobalUUID.String()] = orm.AttributeSet{
|
||||
CompTag: r.Tag,
|
||||
CompNSPath: r.NSPath,
|
||||
}
|
||||
}
|
||||
return resMap, nil
|
||||
}
|
||||
|
|
@ -1,110 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"modelRT/orm"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type ZoneWithParent struct {
|
||||
orm.Zone
|
||||
GridTag string `gorm:"column:grid_tag"`
|
||||
}
|
||||
|
||||
type StationWithParent struct {
|
||||
orm.Zone
|
||||
ZoneTag string `gorm:"column:zone_tag"`
|
||||
}
|
||||
|
||||
func GetFullMeasurementSet(db *gorm.DB) (*orm.MeasurementSet, error) {
|
||||
mSet := &orm.MeasurementSet{
|
||||
GridToZoneTags: make(map[string][]string),
|
||||
ZoneToStationTags: make(map[string][]string),
|
||||
StationToCompNSPaths: make(map[string][]string),
|
||||
CompNSPathToCompTags: make(map[string][]string),
|
||||
CompTagToMeasTags: make(map[string][]string),
|
||||
}
|
||||
|
||||
var grids []orm.Grid
|
||||
if err := db.Table("grid").Select("tagname").Scan(&grids).Error; err == nil {
|
||||
for _, g := range grids {
|
||||
if g.TAGNAME != "" {
|
||||
mSet.AllGridTags = append(mSet.AllGridTags, g.TAGNAME)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var zones []struct {
|
||||
orm.Zone
|
||||
GridTag string `gorm:"column:grid_tag"`
|
||||
}
|
||||
if err := db.Table("zone").
|
||||
Select("zone.*, grid.tagname as grid_tag").
|
||||
Joins("left join grid on zone.grid_id = grid.id").
|
||||
Scan(&zones).Error; err == nil {
|
||||
for _, z := range zones {
|
||||
mSet.AllZoneTags = append(mSet.AllZoneTags, z.TAGNAME)
|
||||
if z.GridTag != "" {
|
||||
mSet.GridToZoneTags[z.GridTag] = append(mSet.GridToZoneTags[z.GridTag], z.TAGNAME)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var stations []struct {
|
||||
orm.Station
|
||||
ZoneTag string `gorm:"column:zone_tag"`
|
||||
}
|
||||
if err := db.Table("station").
|
||||
Select("station.*, zone.tagname as zone_tag").
|
||||
Joins("left join zone on station.zone_id = zone.id").
|
||||
Scan(&stations).Error; err == nil {
|
||||
for _, s := range stations {
|
||||
mSet.AllStationTags = append(mSet.AllStationTags, s.TAGNAME)
|
||||
if s.ZoneTag != "" {
|
||||
mSet.ZoneToStationTags[s.ZoneTag] = append(mSet.ZoneToStationTags[s.ZoneTag], s.TAGNAME)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var comps []struct {
|
||||
orm.Component
|
||||
StationTag string `gorm:"column:station_tag"`
|
||||
}
|
||||
if err := db.Table("component").
|
||||
Select("component.*, station.tagname as station_tag").
|
||||
Joins("left join station on component.station_id = station.id").
|
||||
Scan(&comps).Error; err == nil {
|
||||
for _, c := range comps {
|
||||
mSet.AllCompNSPaths = append(mSet.AllCompNSPaths, c.NSPath)
|
||||
mSet.AllCompTags = append(mSet.AllCompTags, c.Tag)
|
||||
|
||||
if c.StationTag != "" {
|
||||
mSet.StationToCompNSPaths[c.StationTag] = append(mSet.StationToCompNSPaths[c.StationTag], c.NSPath)
|
||||
}
|
||||
|
||||
if c.NSPath != "" {
|
||||
mSet.CompNSPathToCompTags[c.NSPath] = append(mSet.CompNSPathToCompTags[c.NSPath], c.Tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mSet.AllConfigTags = append(mSet.AllConfigTags, "bay")
|
||||
|
||||
var measurements []struct {
|
||||
orm.Measurement
|
||||
CompTag string `gorm:"column:comp_tag"`
|
||||
}
|
||||
if err := db.Table("measurement").
|
||||
Select("measurement.*, component.tag as comp_tag").
|
||||
Joins("left join component on measurement.component_uuid = component.global_uuid").
|
||||
Scan(&measurements).Error; err == nil {
|
||||
for _, m := range measurements {
|
||||
mSet.AllMeasTags = append(mSet.AllMeasTags, m.Tag)
|
||||
if m.CompTag != "" {
|
||||
mSet.CompTagToMeasTags[m.CompTag] = append(mSet.CompTagToMeasTags[m.CompTag], m.Tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
return mSet, nil
|
||||
}
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"modelRT/orm"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// QueryGridByTagName return the result of query circuit diagram grid info by tagName from postgresDB
|
||||
func QueryGridByTagName(ctx context.Context, tx *gorm.DB, tagName string) (orm.Grid, error) {
|
||||
var grid orm.Grid
|
||||
// ctx超时判断
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result := tx.WithContext(cancelCtx).Where("TAGNAME = ? ", tagName).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&grid)
|
||||
if result.Error != nil {
|
||||
return orm.Grid{}, result.Error
|
||||
}
|
||||
return grid, nil
|
||||
}
|
||||
|
|
@ -1,62 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"modelRT/orm"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// QueryMeasurementByID return the result of query circuit diagram component measurement info by id from postgresDB
|
||||
func QueryMeasurementByID(ctx context.Context, tx *gorm.DB, id int64) (orm.Measurement, error) {
|
||||
var measurement orm.Measurement
|
||||
// ctx超时判断
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
result := tx.WithContext(cancelCtx).
|
||||
Where("id = ?", id).
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
First(&measurement)
|
||||
|
||||
if result.Error != nil {
|
||||
return orm.Measurement{}, result.Error
|
||||
}
|
||||
return measurement, nil
|
||||
}
|
||||
|
||||
// QueryMeasurementByToken define function query circuit diagram component measurement info by token from postgresDB
|
||||
func QueryMeasurementByToken(ctx context.Context, tx *gorm.DB, token string) (orm.Measurement, error) {
|
||||
// TODO parse token to avoid SQL injection
|
||||
|
||||
var component orm.Measurement
|
||||
// ctx超时判断
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
result := tx.WithContext(cancelCtx).
|
||||
Where(" = ?", token).
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
First(&component)
|
||||
|
||||
if result.Error != nil {
|
||||
return orm.Measurement{}, result.Error
|
||||
}
|
||||
return component, nil
|
||||
}
|
||||
|
||||
// GetAllMeasurements define func to query all measurement info from postgresDB
|
||||
func GetAllMeasurements(ctx context.Context, tx *gorm.DB) ([]orm.Measurement, error) {
|
||||
var measurements []orm.Measurement
|
||||
|
||||
// ctx超时判断
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&measurements)
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
return measurements, nil
|
||||
}
|
||||
|
|
@ -1,81 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"modelRT/orm"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
func queryFirstByID(ctx context.Context, tx *gorm.DB, id any, dest any) error {
|
||||
result := tx.WithContext(ctx).Where("id = ?", id).First(dest)
|
||||
return result.Error
|
||||
}
|
||||
|
||||
func queryFirstByTag(ctx context.Context, tx *gorm.DB, tagName any, dest any) error {
|
||||
result := tx.WithContext(ctx).Where("tagname = ?", tagName).First(dest)
|
||||
return result.Error
|
||||
}
|
||||
|
||||
// QueryNodeInfoByID return the result of query circuit diagram node info by id and level from postgresDB
|
||||
func QueryNodeInfoByID(ctx context.Context, tx *gorm.DB, id int64, level int) (orm.CircuitDiagramNodeInterface, orm.CircuitDiagramNodeInterface, error) {
|
||||
// 设置 Context 超时
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var currentNodeInfo orm.CircuitDiagramNodeInterface
|
||||
var previousNodeInfo orm.CircuitDiagramNodeInterface
|
||||
var err error
|
||||
|
||||
switch level {
|
||||
case 0:
|
||||
var grid orm.Grid
|
||||
err = queryFirstByID(cancelCtx, tx, id, &grid)
|
||||
currentNodeInfo = grid
|
||||
case 1:
|
||||
// current:Zone,Previous:Grid
|
||||
var zone orm.Zone
|
||||
err = queryFirstByID(cancelCtx, tx, id, &zone)
|
||||
currentNodeInfo = zone
|
||||
if err == nil {
|
||||
var grid orm.Grid
|
||||
err = queryFirstByID(cancelCtx, tx, zone.GridID, &grid)
|
||||
previousNodeInfo = grid
|
||||
}
|
||||
case 2:
|
||||
// current:Station,Previous:Zone
|
||||
var station orm.Station
|
||||
err = queryFirstByID(cancelCtx, tx, id, &station)
|
||||
currentNodeInfo = station
|
||||
if err == nil {
|
||||
var zone orm.Zone
|
||||
err = queryFirstByID(cancelCtx, tx, station.ZoneID, &zone)
|
||||
previousNodeInfo = zone
|
||||
}
|
||||
case 3, 4:
|
||||
// current:Component, Previous:Station
|
||||
var component orm.Component
|
||||
err = queryFirstByID(cancelCtx, tx, id, &component)
|
||||
currentNodeInfo = component
|
||||
if err == nil {
|
||||
var station orm.Station
|
||||
// TODO 修改staion name为通过 station id 查询
|
||||
err = queryFirstByTag(cancelCtx, tx, component.StationName, &station)
|
||||
previousNodeInfo = station
|
||||
}
|
||||
case 5:
|
||||
// TODO[NONEED-ISSUE]暂无此层级增加或删除需求 #2
|
||||
return nil, nil, nil
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unsupported node level: %d", level)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return previousNodeInfo, currentNodeInfo, nil
|
||||
}
|
||||
|
|
@ -5,25 +5,32 @@ import (
|
|||
"context"
|
||||
"time"
|
||||
|
||||
"modelRT/logger"
|
||||
"modelRT/orm"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"go.uber.org/zap"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// QueryAllPages return the all page info of the circuit diagram query by grid_id and zone_id and station_id
|
||||
func QueryAllPages(ctx context.Context, tx *gorm.DB, gridID, zoneID, stationID int64) ([]orm.Page, error) {
|
||||
func QueryAllPages(ctx context.Context, logger *zap.Logger, gridID, zoneID, stationID int64) ([]orm.Page, error) {
|
||||
var pages []orm.Page
|
||||
// ctx timeout judgment
|
||||
// ctx超时判断
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
// result := _globalPostgresClient.Model(&orm.Page{}).WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Select("Page.id, Page.Name, Page.status,Page.context").InnerJoins("Station on Station.id = Page.station_id").InnerJoins("Zone on Zone.id = Station.zone_id").InnerJoins("Grid on Grid.id = Zone.grid_id").Scan(&pages)
|
||||
|
||||
result := tx.Model(&orm.Page{}).WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Select(`"page".id, "page".Name, "page".status,"page".context`).Joins(`inner join "station" on "station".id = "page".station_id`).Joins(`inner join "zone" on "zone".id = "station".zone_id`).Joins(`inner join "grid" on "grid".id = "zone".grid_id`).Where(`"grid".id = ? and "zone".id = ? and "station".id = ?`, gridID, zoneID, stationID).Scan(&pages)
|
||||
result := _globalPostgresClient.Model(&orm.Page{}).WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Select(`"Page".id, "Page".Name, "Page".status,"Page".context`).Joins(`inner join "Station" on "Station".id = "Page".station_id`).Joins(`inner join "Zone" on "Zone".id = "Station".zone_id`).Joins(`inner join "Grid" on "Grid".id = "Zone".grid_id`).Where(`"Grid".id = ? and "Zone".id = ? and "Station".id = ?`, gridID, zoneID, stationID).Scan(&pages)
|
||||
|
||||
if result.Error != nil {
|
||||
logger.Error(ctx, "query circuit diagram pages by gridID and zoneID and stationID failed", "grid_id", gridID, "zone_id", zoneID, "station_id", stationID, "error", result.Error)
|
||||
logger.Error("query circuit diagram pages by gridID and zoneID and stationID failed", zap.Int64("grid_id", gridID), zap.Int64("zone_id", zoneID), zap.Int64("station_id", stationID), zap.Error(result.Error))
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return pages, nil
|
||||
}
|
||||
|
||||
// select "Page".id, "Page".station_id,"Station".zone_id,"Zone".grid_id,"Page".Name, "Page".status,"Page".context from "Page" inner join "Station" on "Station".id = "Page".station_id
|
||||
// inner join "Zone" on "Zone".id = "Station".zone_id inner join "Grid" on "Grid".id = "Zone".grid_id
|
||||
// where "Grid".id = 1 and "Zone".id=1 and "Station".id=1
|
||||
|
||||
// _globalPostgresClient.Model(&orm.Page{}).Clauses(clause.Locking{Strength: "UPDATE"}).Select(`"Page".id, "Page".Name, "Page".status,"Page".context`).Joins(`inner join "Station" on "Station".id = "Page".station_id`).Joins(`inner join "Zone" on "Zone".id = "Station".zone_id`).Joins(`inner join "Grid" on "Grid".id = "Zone".grid_id`).Where(`"Grid".id = ? and "Zone".id = ? and "Station".id = ?`, 1, 1, 1).Scan(&pages)
|
||||
|
|
|
|||
|
|
@ -1,78 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"modelRT/logger"
|
||||
"modelRT/orm"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// QueryArrtibuteRecordByUUID return the attribute table record info of the component attribute by uuid
|
||||
func QueryArrtibuteRecordByUUID(ctx context.Context, tx *gorm.DB, gridID, zoneID, stationID int64) ([]orm.Page, error) {
|
||||
var pages []orm.Page
|
||||
// ctx timeout judgment
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result := tx.Model(&orm.Page{}).WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Select(`"page".id, "page".Name, "page".status,"page".context`).Joins(`inner join "station" on "station".id = "page".station_id`).Joins(`inner join "zone" on "zone".id = "station".zone_id`).Joins(`inner join "grid" on "grid".id = "zone".grid_id`).Where(`"grid".id = ? and "zone".id = ? and "station".id = ?`, gridID, zoneID, stationID).Scan(&pages)
|
||||
|
||||
if result.Error != nil {
|
||||
logger.Error(ctx, "query circuit diagram pages by gridID and zoneID and stationID failed", "grid_id", gridID, "zone_id", zoneID, "station_id", stationID, "error", result.Error)
|
||||
return nil, result.Error
|
||||
}
|
||||
return pages, nil
|
||||
}
|
||||
|
||||
// GetProjectNameByTagAndGroupName 根据 tag 和 meta_model 获取项目名称
|
||||
func GetProjectNameByTagAndGroupName(db *gorm.DB, tag string, groupName string) (string, error) {
|
||||
var project orm.ProjectManager
|
||||
|
||||
// 使用 Select 只提取 name 字段,提高查询效率
|
||||
// 使用 Where 进行多列条件过滤
|
||||
err := db.Select("name").
|
||||
Where("tag = ? AND meta_model = ?", tag, groupName).
|
||||
First(&project).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return "", fmt.Errorf("project not found with tag: %s and model: %s", tag, groupName)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
return project.Name, nil
|
||||
}
|
||||
|
||||
// BatchGetProjectNames define func to batch retrieve name based on multiple tags and metaModel
|
||||
func BatchGetProjectNames(db *gorm.DB, identifiers []orm.ProjectIdentifier) (map[orm.ProjectIdentifier]string, error) {
|
||||
if len(identifiers) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var projects []orm.ProjectManager
|
||||
queryArgs := make([][]any, len(identifiers))
|
||||
for i, id := range identifiers {
|
||||
queryArgs[i] = []any{id.Tag, id.GroupName}
|
||||
}
|
||||
|
||||
err := db.Select("tag", "group_name", "name").
|
||||
Where("(tag, group_name) IN ?", queryArgs).
|
||||
Find(&projects).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultMap := make(map[orm.ProjectIdentifier]string)
|
||||
for _, p := range projects {
|
||||
key := orm.ProjectIdentifier{Tag: p.Tag, GroupName: p.GroupName}
|
||||
resultMap[key] = p.Name
|
||||
}
|
||||
|
||||
return resultMap, nil
|
||||
}
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"modelRT/orm"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// QueryStationByTagName return the result of query circuit diagram Station info by tagName from postgresDB
|
||||
func QueryStationByTagName(ctx context.Context, tx *gorm.DB, tagName string) (orm.Station, error) {
|
||||
var station orm.Station
|
||||
// ctx超时判断
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result := tx.WithContext(cancelCtx).Where("TAGNAME = ? ", tagName).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&station)
|
||||
if result.Error != nil {
|
||||
return orm.Station{}, result.Error
|
||||
}
|
||||
return station, nil
|
||||
}
|
||||
|
|
@ -3,145 +3,83 @@ package database
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"modelRT/constants"
|
||||
"modelRT/diagram"
|
||||
"modelRT/logger"
|
||||
"modelRT/orm"
|
||||
"modelRT/sql"
|
||||
|
||||
"github.com/gofrs/uuid"
|
||||
"gorm.io/gorm"
|
||||
"go.uber.org/zap"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// QueryTopologic return the topologic info of the circuit diagram
|
||||
func QueryTopologic(ctx context.Context, tx *gorm.DB) ([]orm.Topologic, error) {
|
||||
var recursiveSQL = `WITH RECURSIVE recursive_tree as (
|
||||
SELECT uuid_from,uuid_to,page_id,flag
|
||||
FROM "Topologic"
|
||||
WHERE uuid_from is null and page_id = ?
|
||||
UNION ALL
|
||||
SELECT t.uuid_from,t.uuid_to,t.page_id,t.flag
|
||||
FROM "Topologic" t
|
||||
JOIN recursive_tree rt ON t.uuid_from = rt.uuid_to
|
||||
)
|
||||
SELECT * FROM recursive_tree;`
|
||||
|
||||
// QueryTopologicByPageID return the topologic info of the circuit diagram query by pageID
|
||||
func QueryTopologicByPageID(ctx context.Context, logger *zap.Logger, pageID int64) ([]orm.Topologic, error) {
|
||||
var topologics []orm.Topologic
|
||||
// ctx超时判断
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Raw(sql.RecursiveSQL, constants.UUIDNilStr).Scan(&topologics)
|
||||
result := _globalPostgresClient.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Raw(recursiveSQL, pageID).Scan(&topologics)
|
||||
if result.Error != nil {
|
||||
logger.Error(ctx, "query circuit diagram topologic info by start node uuid failed", "start_node_uuid", constants.UUIDNilStr, "error", result.Error)
|
||||
logger.Error("query circuit diagram topologic info by pageID failed", zap.Int64("pageID", pageID), zap.Error(result.Error))
|
||||
return nil, result.Error
|
||||
}
|
||||
return topologics, nil
|
||||
}
|
||||
|
||||
// QueryTopologicFromDB return the result of query topologic info from DB
|
||||
func QueryTopologicFromDB(ctx context.Context, tx *gorm.DB) (*diagram.MultiBranchTreeNode, error) {
|
||||
topologicInfos, err := QueryTopologic(ctx, tx)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "query topologic info failed", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tree, err := BuildMultiBranchTree(topologicInfos)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "init topologic failed", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
return tree, nil
|
||||
}
|
||||
|
||||
// InitCircuitDiagramTopologic return circuit diagram topologic info from postgres
|
||||
func InitCircuitDiagramTopologic(topologicNodes []orm.Topologic) error {
|
||||
var rootVertex *diagram.MultiBranchTreeNode
|
||||
func InitCircuitDiagramTopologic(pageID int64, topologicNodes []orm.Topologic) error {
|
||||
var rootVertex uuid.UUID
|
||||
|
||||
for _, node := range topologicNodes {
|
||||
if node.UUIDFrom == constants.UUIDNil {
|
||||
rootVertex = diagram.NewMultiBranchTree(node.UUIDFrom)
|
||||
if node.UUIDFrom.IsNil() {
|
||||
rootVertex = node.UUIDTo
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if rootVertex == nil {
|
||||
return fmt.Errorf("root vertex is nil")
|
||||
}
|
||||
topologicSet := diagram.NewGraph(rootVertex)
|
||||
|
||||
for _, node := range topologicNodes {
|
||||
if node.UUIDFrom == constants.UUIDNil {
|
||||
nodeVertex := diagram.NewMultiBranchTree(node.UUIDTo)
|
||||
rootVertex.AddChild(nodeVertex)
|
||||
if node.UUIDFrom.IsNil() {
|
||||
continue
|
||||
}
|
||||
topologicSet.AddEdge(node.UUIDFrom, node.UUIDTo)
|
||||
}
|
||||
|
||||
node := rootVertex
|
||||
for _, nodeVertex := range node.Children {
|
||||
nextVertexs := make([]*diagram.MultiBranchTreeNode, 0)
|
||||
nextVertexs = append(nextVertexs, nodeVertex)
|
||||
}
|
||||
diagram.DiagramsOverview.Store(pageID, topologicSet)
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO 电流互感器不单独划分间隔,以母线、浇筑母线、变压器为间隔原件
|
||||
func IntervalBoundaryDetermine(uuid uuid.UUID) bool {
|
||||
diagram.GetComponentMap(uuid.String())
|
||||
// TODO 判断 component 的类型是否为间隔
|
||||
// TODO 0xA1B2C3D4,高四位表示可以成为间隔的compoent类型的值为FFFF,普通 component 类型的值为 0000。低四位中前二位表示component的一级类型,例如母线 PT、母联/母分、进线等,低四位中后二位表示一级类型中包含的具体类型,例如母线 PT中包含的电压互感器、隔离开关、接地开关、避雷器、带电显示器等。
|
||||
num := uint32(0xA1B2C3D4) // 八位16进制数
|
||||
high16 := uint16(num >> 16)
|
||||
fmt.Printf("原始值: 0x%X\n", num) // 输出: 0xA1B2C3D4
|
||||
fmt.Printf("高十六位: 0x%X\n", high16) // 输出: 0xA1B2
|
||||
return true
|
||||
}
|
||||
|
||||
// BuildMultiBranchTree return the multi branch tree by topologic info and component type map
|
||||
func BuildMultiBranchTree(topologics []orm.Topologic) (*diagram.MultiBranchTreeNode, error) {
|
||||
nodeMap := make(map[uuid.UUID]*diagram.MultiBranchTreeNode, len(topologics)*2)
|
||||
|
||||
for _, topo := range topologics {
|
||||
if _, exists := nodeMap[topo.UUIDFrom]; !exists {
|
||||
// skip special uuid
|
||||
if topo.UUIDTo != constants.UUIDNil {
|
||||
nodeMap[topo.UUIDFrom] = &diagram.MultiBranchTreeNode{
|
||||
ID: topo.UUIDFrom,
|
||||
Children: make([]*diagram.MultiBranchTreeNode, 0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, exists := nodeMap[topo.UUIDTo]; !exists {
|
||||
// skip special uuid
|
||||
if topo.UUIDTo != constants.UUIDNil {
|
||||
nodeMap[topo.UUIDTo] = &diagram.MultiBranchTreeNode{
|
||||
ID: topo.UUIDTo,
|
||||
Children: make([]*diagram.MultiBranchTreeNode, 0),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, topo := range topologics {
|
||||
var parent *diagram.MultiBranchTreeNode
|
||||
if topo.UUIDFrom == constants.UUIDNil {
|
||||
parent = &diagram.MultiBranchTreeNode{
|
||||
ID: constants.UUIDNil,
|
||||
}
|
||||
nodeMap[constants.UUIDNil] = parent
|
||||
} else {
|
||||
parent = nodeMap[topo.UUIDFrom]
|
||||
}
|
||||
|
||||
var child *diagram.MultiBranchTreeNode
|
||||
if topo.UUIDTo == constants.UUIDNil {
|
||||
child = &diagram.MultiBranchTreeNode{
|
||||
ID: topo.UUIDTo,
|
||||
}
|
||||
} else {
|
||||
child = nodeMap[topo.UUIDTo]
|
||||
}
|
||||
child.Parent = parent
|
||||
parent.Children = append(parent.Children, child)
|
||||
}
|
||||
|
||||
// return root vertex
|
||||
root, exists := nodeMap[constants.UUIDNil]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("root node not found")
|
||||
}
|
||||
return root, nil
|
||||
// QueryTopologicFromDB return the result of query topologic info from postgresDB
|
||||
func QueryTopologicFromDB(ctx context.Context, logger *zap.Logger, gridID, zoneID, stationID int64) error {
|
||||
allPages, err := QueryAllPages(ctx, logger, gridID, zoneID, stationID)
|
||||
if err != nil {
|
||||
logger.Error("query all pages info failed", zap.Int64("gridID", gridID), zap.Int64("zoneID", zoneID), zap.Int64("stationID", stationID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
for _, page := range allPages {
|
||||
topologicInfos, err := QueryTopologicByPageID(ctx, logger, page.ID)
|
||||
if err != nil {
|
||||
logger.Error("query topologic info by pageID failed", zap.Int64("pageID", page.ID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
err = InitCircuitDiagramTopologic(page.ID, topologicInfos)
|
||||
if err != nil {
|
||||
logger.Error("init topologic failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,26 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"modelRT/orm"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// QueryZoneByTagName return the result of query circuit diagram Zone info by tagName from postgresDB
|
||||
func QueryZoneByTagName(ctx context.Context, tx *gorm.DB, tagName string) (orm.Zone, error) {
|
||||
var zone orm.Zone
|
||||
// ctx超时判断
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result := tx.WithContext(cancelCtx).Where("TAGNAME = ? ", tagName).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&zone)
|
||||
if result.Error != nil {
|
||||
return orm.Zone{}, result.Error
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
|
@ -1,59 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"modelRT/common/errcode"
|
||||
"modelRT/network"
|
||||
"modelRT/orm"
|
||||
|
||||
"github.com/gofrs/uuid"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// UpdateComponentIntoDB define update component info of the circuit diagram into DB
|
||||
func UpdateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo network.ComponentUpdateInfo) (string, error) {
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
globalUUID, err := uuid.FromString(componentInfo.UUID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("format uuid from string type failed:%w", err)
|
||||
}
|
||||
|
||||
var component orm.Component
|
||||
result := tx.Model(&orm.Component{}).WithContext(cancelCtx).Where("global_uuid = ?", globalUUID).Find(&component)
|
||||
if result.Error != nil || result.RowsAffected == 0 {
|
||||
err := result.Error
|
||||
if result.RowsAffected == 0 {
|
||||
err = fmt.Errorf("%w:please check update component conditions", errcode.ErrUpdateRowZero)
|
||||
}
|
||||
return "", fmt.Errorf("query component info failed:%w", err)
|
||||
}
|
||||
|
||||
updateParams := orm.Component{
|
||||
GlobalUUID: globalUUID,
|
||||
GridName: componentInfo.GridName,
|
||||
ZoneName: componentInfo.ZoneName,
|
||||
StationName: componentInfo.StationName,
|
||||
Tag: componentInfo.Tag,
|
||||
Name: componentInfo.Name,
|
||||
Context: componentInfo.Context,
|
||||
Op: componentInfo.Op,
|
||||
Ts: time.Now(),
|
||||
}
|
||||
|
||||
result = tx.Model(&orm.Component{}).WithContext(cancelCtx).Where("GLOBAL_UUID = ?", component.GlobalUUID).Updates(&updateParams)
|
||||
|
||||
if result.Error != nil || result.RowsAffected == 0 {
|
||||
err := result.Error
|
||||
if result.RowsAffected == 0 {
|
||||
err = fmt.Errorf("%w:please check update component conditions", errcode.ErrUpdateRowZero)
|
||||
}
|
||||
return "", fmt.Errorf("update component info failed:%w", err)
|
||||
}
|
||||
return component.GlobalUUID.String(), nil
|
||||
}
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"modelRT/common/errcode"
|
||||
"modelRT/model"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// UpdateModelIntoDB define update component model params of the circuit diagram into DB
|
||||
func UpdateModelIntoDB(ctx context.Context, tx *gorm.DB, componentID int64, componentType int, modelParas string) error {
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
modelStruct := model.SelectModelByType(componentType)
|
||||
if modelStruct == nil {
|
||||
return fmt.Errorf("can not get component model by model type %d", componentType)
|
||||
}
|
||||
|
||||
err := jsoniter.Unmarshal([]byte(modelParas), modelStruct)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unmarshal component info by component struct %s,failed", model.SelectModelNameByType(componentType))
|
||||
}
|
||||
modelStruct.SetComponentID(componentID)
|
||||
|
||||
result := tx.Model(modelStruct).WithContext(cancelCtx).Where("component_id = ?", componentID).Updates(modelStruct)
|
||||
if result.Error != nil || result.RowsAffected == 0 {
|
||||
err := result.Error
|
||||
if result.RowsAffected == 0 {
|
||||
err = fmt.Errorf("%w:please check where conditions", errcode.ErrUpdateRowZero)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
// Package database define database operation functions
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"modelRT/common/errcode"
|
||||
"modelRT/constants"
|
||||
"modelRT/network"
|
||||
"modelRT/orm"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// UpdateTopologicIntoDB define update topologic info of the circuit diagram query by pageID and topologic info
|
||||
func UpdateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, changeInfo network.TopologicUUIDChangeInfos) error {
|
||||
var result *gorm.DB
|
||||
|
||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
switch changeInfo.ChangeType {
|
||||
case constants.UUIDFromChangeType:
|
||||
result = tx.WithContext(cancelCtx).Model(&orm.Topologic{}).Where("page_id = ? and uuid_from = ? and uuid_to = ?", pageID, changeInfo.OldUUIDFrom, changeInfo.OldUUIDTo).Updates(orm.Topologic{UUIDFrom: changeInfo.NewUUIDFrom})
|
||||
case constants.UUIDToChangeType:
|
||||
var delTopologic orm.Topologic
|
||||
result = tx.WithContext(cancelCtx).Model(&orm.Topologic{}).Where("page_id = ? and uuid_to = ?", pageID, changeInfo.NewUUIDTo).Find(&delTopologic)
|
||||
|
||||
if result.Error != nil {
|
||||
return fmt.Errorf("find topologic link by new_uuid_to failed:%w", result.Error)
|
||||
}
|
||||
|
||||
if result.RowsAffected == 1 {
|
||||
// delete old topologic link
|
||||
result = tx.WithContext(cancelCtx).Where("id = ?", delTopologic.ID).Delete(&delTopologic)
|
||||
|
||||
if result.Error != nil || result.RowsAffected == 0 {
|
||||
err := result.Error
|
||||
if result.RowsAffected == 0 {
|
||||
err = fmt.Errorf("%w:please check delete topologic where conditions", errcode.ErrDeleteRowZero)
|
||||
}
|
||||
return fmt.Errorf("del old topologic link by new_uuid_to failed:%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
result = tx.WithContext(cancelCtx).Model(&orm.Topologic{}).Where("page_id = ? and uuid_from = ? and uuid_to = ?", pageID, changeInfo.OldUUIDFrom, changeInfo.OldUUIDTo).Updates(&orm.Topologic{UUIDTo: changeInfo.NewUUIDTo})
|
||||
case constants.UUIDAddChangeType:
|
||||
topologic := orm.Topologic{
|
||||
Flag: changeInfo.Flag,
|
||||
UUIDFrom: changeInfo.NewUUIDFrom,
|
||||
UUIDTo: changeInfo.NewUUIDTo,
|
||||
}
|
||||
result = tx.WithContext(cancelCtx).Create(&topologic)
|
||||
}
|
||||
// update 检查 result.RowsAffected 结果时,RowsAffected==0
|
||||
// 可能存在 result.Error 为 nil 的情况,谨慎使用 result.Error.Error()
|
||||
// 函数,避免造成空指针问题
|
||||
if result.Error != nil || result.RowsAffected == 0 {
|
||||
err := result.Error
|
||||
if result.RowsAffected == 0 {
|
||||
err = fmt.Errorf("%w:please check update topologic where conditions", errcode.ErrUpdateRowZero)
|
||||
}
|
||||
return fmt.Errorf("insert or update topologic link failed:%w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
351
deploy/deploy.md
351
deploy/deploy.md
|
|
@ -1,351 +0,0 @@
|
|||
# 项目依赖服务部署指南
|
||||
|
||||
本项目依赖于 $\text{PostgreSQL}$ 数据库和 $\text{Redis Stack Server}$(包含 $\text{Redisearch}$ 等模块)部署文档将使用 $\text{Docker}$ 容器化技术部署这两个依赖服务
|
||||
|
||||
## 前提条件
|
||||
|
||||
1. 已安装 $\text{Docker}$
|
||||
2. 下载相关容器镜像
|
||||
3. 确保主机的 $\text{5432}$ 端口($\text{Postgres}$)和 $\text{6379}$ 端口($\text{Redis}$)未被占用
|
||||
|
||||
### 1\. 部署 PostgreSQL 数据库
|
||||
|
||||
使用官方的 `postgres:13.16` 镜像,并设置默认的用户、密码和端口
|
||||
|
||||
#### 1.1 部署命令
|
||||
|
||||
运行以下命令启动 $\text{PostgreSQL}$ 容器
|
||||
|
||||
```bash
|
||||
docker run --name postgres \
|
||||
-e POSTGRES_USER=postgres \
|
||||
-e POSTGRES_PASSWORD=coslight \
|
||||
-p 5432:5432 \
|
||||
-d postgres:13.16
|
||||
```
|
||||
|
||||
#### 1.2 连接信息
|
||||
|
||||
| 参数 | 值 | 说明 |
|
||||
| :--- | :--- | :--- |
|
||||
| **容器名称** | `postgres` | 容器名 |
|
||||
| **镜像版本** | `postgres:13.16` | 镜像名 |
|
||||
| **主机端口** | `5432` | 外部应用连接使用的端口 |
|
||||
| **用户名** | `postgres` | 默认超级用户 |
|
||||
| **密码** | `coslight` | 配置的密码 |
|
||||
|
||||
#### 1.3 状态检查
|
||||
|
||||
要确认容器是否正在运行,请执行
|
||||
|
||||
```bash
|
||||
# 检查容器启动状态
|
||||
docker ps -a |grep postgres
|
||||
# 检查容器启动日志信息
|
||||
docker logs postgres
|
||||
```
|
||||
|
||||
### 2\. 部署 Redis Stack Server
|
||||
|
||||
我们将使用 `redis/redis-stack-server:latest` 镜像该镜像内置了 $\text{Redisearch}$ 模块,用于 $\text{ModelRT}$ 项目中补全功能
|
||||
|
||||
#### 2.1 部署命令
|
||||
|
||||
运行以下命令启动 $\text{Redis Stack Server}$ 容器
|
||||
|
||||
```bash
|
||||
docker run --name redis -p 6379:6379 \
|
||||
-d redis/redis-stack-server:latest
|
||||
```
|
||||
|
||||
#### 2.2 连接信息
|
||||
|
||||
| 参数 | 值 | 说明 |
|
||||
| :--- | :--- | :--- |
|
||||
| **容器名称** | `redis` | 容器名 |
|
||||
| **镜像版本** | `redis/redis-stack-server:latest` | 镜像名 |
|
||||
| **主机端口** | `6379` | 外部应用连接使用的端口 |
|
||||
| **地址** | `localhost:6379` | |
|
||||
| **密码** | **无** | 默认未设置密码 |
|
||||
|
||||
> **注意:** 生产环境中建议使用 `-e REDIS_PASSWORD=<your_secure_password>` 参数来设置 $\text{Redis}$ 访问密码
|
||||
|
||||
#### 2.3 状态检查
|
||||
|
||||
要确认容器是否正在运行,请执行
|
||||
|
||||
```bash
|
||||
# 检查容器启动状态
|
||||
docker ps -a |grep redis
|
||||
# 检查容器启动日志信息
|
||||
docker logs redis
|
||||
```
|
||||
|
||||
#### 2.4 数据注入
|
||||
|
||||
测试数据注入
|
||||
|
||||
##### 2.4.1 Postgres数据注入
|
||||
|
||||
```SQL
|
||||
insert into public.grid(id,tagname,name,description,op,ts) VALUES (1, 'grid1', '网格1', '测试网格1', -1,CURRENT_TIMESTAMP);
|
||||
|
||||
insert into public.zone(id,grid_id,tagname,name,description,op,ts) VALUES (1, 1,'zone1', '区域1_1', '测试区域1_1', -1,CURRENT_TIMESTAMP);
|
||||
|
||||
insert into public.station(id,zone_id,tagname,name,description,is_local,op,ts) VALUES (1, 1,'station1', '站1_1_1', '测试站1_1_1', true, -1,CURRENT_TIMESTAMP),
|
||||
(2, 1, 'station2', '站1_1_2', '测试站1_1_2', false, -1, CURRENT_TIMESTAMP);
|
||||
|
||||
INSERT INTO public.topologic(flag, uuid_from, uuid_to, context, description, op, ts)
|
||||
VALUES
|
||||
(1, '00000000-0000-0000-0000-000000000000', '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '{}', '', 1, CURRENT_TIMESTAMP),
|
||||
(1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '10f155cf-bd27-4557-85b2-d126b6e2657f', '{}', '', 1, CURRENT_TIMESTAMP),
|
||||
(1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '{}', '', 1, CURRENT_TIMESTAMP),
|
||||
(1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '70c190f2-8a75-42a9-b166-ec5f87e0aa6b', '{}', '', 1, CURRENT_TIMESTAMP),
|
||||
(1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '70c200f2-8a75-42a9-c166-bf5f87e0aa6b', '{}', '', 1, CURRENT_TIMESTAMP),
|
||||
(1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '968dd6e6-faec-4f78-b58a-d6e68426b09e', '{}', '', 1, CURRENT_TIMESTAMP),
|
||||
(1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '968dd6e6-faec-4f78-b58a-d6e68426b08e', '{}', '', 1, CURRENT_TIMESTAMP);
|
||||
|
||||
INSERT INTO public.bay (bay_uuid, name, tag, type, unom, fla, capacity, description, in_service, state, grid, zone, station, business, context, from_uuids, to_uuids, dev_protect, dev_fault_record, dev_status, dev_dyn_sense, dev_instruct, dev_etc, components, op, ts)
|
||||
VALUES (
|
||||
'18e71a24-694a-43fa-93a7-c4d02a27d1bc',
|
||||
'', '', '',
|
||||
-1, -1, -1,
|
||||
'',
|
||||
false,
|
||||
-1,
|
||||
'', '', '',
|
||||
'{}',
|
||||
'{}',
|
||||
'[]',
|
||||
'[]',
|
||||
'[]',
|
||||
'[]',
|
||||
'[]',
|
||||
'[]',
|
||||
'[]',
|
||||
'[]',
|
||||
ARRAY['968dd6e6-faec-4f78-b58a-d6e68426b09e', '968dd6e6-faec-4f78-b58a-d6e68426b08e']::uuid[],
|
||||
-1,
|
||||
CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
INSERT INTO public.component (global_uuid, nspath, tag, name, model_name, description, grid, zone, station, station_id, type, in_service, state, status, connection, label, context, op, ts)
|
||||
VALUES
|
||||
(
|
||||
'968dd6e6-faec-4f78-b58a-d6e68426b09e',
|
||||
'ns1', 'tag1', 'component1', 'bus_1', '',
|
||||
'grid1', 'zone1', 'station1', 1,
|
||||
-1,
|
||||
false,
|
||||
-1, -1,
|
||||
'{}',
|
||||
'{}',
|
||||
'{}',
|
||||
-1,
|
||||
CURRENT_TIMESTAMP
|
||||
),
|
||||
(
|
||||
'968dd6e6-faec-4f78-b58a-d6e68426b08e',
|
||||
'ns2', 'tag2', 'component2', 'bus_1', '',
|
||||
'grid1', 'zone1', 'station1', 1,
|
||||
-1,
|
||||
false,
|
||||
-1, -1,
|
||||
'{}',
|
||||
'{}',
|
||||
'{}',
|
||||
-1,
|
||||
CURRENT_TIMESTAMP
|
||||
),
|
||||
(
|
||||
'968dd6e6-faec-4f78-b58a-d6e88426b09e',
|
||||
'ns3', 'tag3', 'component3', 'bus_1', '',
|
||||
'grid1', 'zone1', 'station2', 2,
|
||||
-1,
|
||||
false,
|
||||
-1, -1,
|
||||
'{}',
|
||||
'{}',
|
||||
'{}',
|
||||
-1,
|
||||
CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
INSERT INTO public.measurement (id, tag, name, type, size, data_source, event_plan, bay_uuid, component_uuid, op, ts)
|
||||
VALUES
|
||||
(3, 'I11_C_rms', '45母甲侧互连电流C相1', -1, 200, '{"type": 1, "io_address": {"device": "ssu001", "channel": "TM1", "station": "001"}}', '{"cause": {"up": 55.0, "down": 45.0}, "action": {"command": "warning", "parameters": ["I段母线甲侧互连电流C相1"]}, "enable": true}', '18e71a24-694a-43fa-93a7-c4d02a27d1bc', '968dd6e6-faec-4f78-b58a-d6e68426b09e', -1, CURRENT_TIMESTAMP),
|
||||
(4, 'I11_B_rms', '45母甲侧互连电流B相1', -1, 300, '{"type": 1, "io_address": {"device": "ssu001", "channel": "TM2", "station": "001"}}', '{"cause": {"upup": 65, "downdown": 35}, "action": {"command": "warning", "parameters": ["I段母线甲侧互连电流B相1"]}, "enable": true}', '18e71a24-694a-43fa-93a7-c4d02a27d1bc', '968dd6e6-faec-4f78-b58a-d6e68426b09e', -1, CURRENT_TIMESTAMP),
|
||||
(5, 'I11_A_rms', '45母甲侧互连电流A相1', -1, 300, '{"type": 1, "io_address": {"device": "ssu001", "channel": "TM3", "station": "001"}}', '{"cause": {"up": 55, "down": 45, "upup": 65, "downdown": 35}, "action": {"command": "warning", "parameters": ["I段母线甲侧互连电流A相1"]}, "enable": true}', '18e71a24-694a-43fa-93a7-c4d02a27d1bc', '968dd6e6-faec-4f78-b58a-d6e68426b09e', -1, CURRENT_TIMESTAMP);
|
||||
|
||||
INSERT INTO public.project_manager (id, name, tag, meta_model, group_name, link_type, check_state, is_public, op, ts
|
||||
) VALUES
|
||||
(1, 'component', 'component', '', 'component', 0,
|
||||
'{"checkState": [{"name": "global_uuid", "type": "UUID", "checked": 1, "isVisible": 1, "defaultValue": "", "lengthPrecision": -1}, {"name": "nspath", "type": "VARCHAR(32)", "checked": 1, "isVisible": 1, "defaultValue": "", "lengthPrecision": 32}, {"name": "tag", "type": "VARCHAR(32)", "checked": 1, "isVisible": 1, "defaultValue": "null", "lengthPrecision": 32}, {"name": "name", "type": "VARCHAR(64)", "checked": 1, "isVisible": 1, "defaultValue": "null", "lengthPrecision": 64}, {"name": "description", "type": "VARCHAR(512)", "checked": 1, "isVisible": 1, "defaultValue": "", "lengthPrecision": 512}, {"name": "station", "type": "VARCHAR(64)", "checked": 1, "isVisible": 1, "defaultValue": "null", "lengthPrecision": 64}, {"name": "zone", "type": "VARCHAR(64)", "checked": 1, "isVisible": 1, "defaultValue": "null", "lengthPrecision": 64}, {"name": "grid", "type": "VARCHAR(64)", "checked": 1, "isVisible": 1, "defaultValue": "null", "lengthPrecision": 64}, {"name": "type", "type": "INTEGER", "checked": 1, "isVisible": 0, "defaultValue": "0", "lengthPrecision": -1}, {"name": "in_service", "type": "SMALLINT", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "state", "type": "INTEGER", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "connection", "type": "JSONB", "checked": 1, "isVisible": 1, "defaultValue": "{}", "lengthPrecision": -1}, {"name": "label", "type": "JSONB", "checked": 1, "isVisible": 1, "defaultValue": "{}", "lengthPrecision": -1}, {"name": "context", "type": "JSONB", "checked": 1, "isVisible": 0, "defaultValue": "{}", "lengthPrecision": -1}, {"name": "op", "type": "INTEGER", "checked": 1, "isVisible": 0, "defaultValue": "-1", "lengthPrecision": -1}, {"name": "ts", "type": "TIMESTAMP", "checked": 1, "isVisible": 0, "defaultValue": "null", "lengthPrecision": -1}, {"name": "model_name", "type": "VARCHAR(64)", "checked": 1, "isVisible": 0, "defaultValue": "null", "lengthPrecision": 64}, {"name": "status", "type": "SMALLINT", "checked": 1, "isVisible": 0, "defaultValue": "null", "lengthPrecision": -1}]}', TRUE, -1, CURRENT_TIMESTAMP
|
||||
),
|
||||
(2, 'bus_bus_1_base_extend', 'bus_1', 'bus', 'base_extend', 0,
|
||||
'{"checkState": [{"name": "bus_num", "type": "INTEGER", "checked": 1, "isVisible": 0, "defaultValue": "1", "lengthPrecision": -1}, {"name": "unom_kv", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "null", "lengthPrecision": -1}]}', FALSE, -1, CURRENT_TIMESTAMP
|
||||
),
|
||||
(3, 'bus_bus_1_model', 'bus_1', 'bus', 'model', 0,
|
||||
'{"checkState": [{"name": "ui_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "100", "lengthPrecision": -1}, {"name": "ui_kv", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "35", "lengthPrecision": -1}, {"name": "ui_pa", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "stability_rated_current", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "1000", "lengthPrecision": -1}, {"name": "stability_dynamic_steady_current", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "40", "lengthPrecision": -1}, {"name": "load_adjustment_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "100", "lengthPrecision": -1}, {"name": "load_adjustment_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "100", "lengthPrecision": -1}, {"name": "bus_type", "type": "VARCHAR(10)", "checked": 1, "isVisible": 1, "defaultValue": "PQ母线", "lengthPrecision": 10}, {"name": "csc_s3_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_s3_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_i3_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_i3_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_z3s_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0.05", "lengthPrecision": -1}, {"name": "csc_z3s_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0.1", "lengthPrecision": -1}, {"name": "csc_s1_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_s1_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_i1_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_i1_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "csc_z1s_max", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0.05", "lengthPrecision": -1}, {"name": "csc_z1s_min", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0.1", "lengthPrecision": -1}, {"name": "csc_base_voltage", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "37", "lengthPrecision": -1}, {"name": "csc_base_capacity", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "100", "lengthPrecision": -1}]}', FALSE, -1, CURRENT_TIMESTAMP
|
||||
),
|
||||
(4, 'bus_bus_1_stable', 'bus_1', 'bus', 'stable', 0,
|
||||
'{"checkState": [{"name": "uvpw_threshold_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "95", "lengthPrecision": -1}, {"name": "uvpw_runtime", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "10", "lengthPrecision": -1}, {"name": "uvw_threshold_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "90", "lengthPrecision": -1}, {"name": "uvw_runtime", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "10", "lengthPrecision": -1}, {"name": "ovpw_threshold_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "105", "lengthPrecision": -1}, {"name": "ovpw_runtime", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "60", "lengthPrecision": -1}, {"name": "ovw_threshold_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "110", "lengthPrecision": -1}, {"name": "ovw_runtime", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "10", "lengthPrecision": -1}, {"name": "umargin_pmax", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "umargin_qmax", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "0", "lengthPrecision": -1}, {"name": "umargin_ulim", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "90", "lengthPrecision": -1}, {"name": "umargin_plim_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "15", "lengthPrecision": -1}, {"name": "umargin_qlim_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "15", "lengthPrecision": -1}, {"name": "umargin_ulim_percent", "type": "DOUBLE PRECISION", "checked": 1, "isVisible": 1, "defaultValue": "15", "lengthPrecision": -1}]}', FALSE, -1, CURRENT_TIMESTAMP);
|
||||
|
||||
INSERT INTO public.bus_bus_1_stable (id, global_uuid, attribute_group, uvpw_threshold_percent, uvpw_runtime, uvw_threshold_percent, uvw_runtime, ovpw_threshold_percent, ovpw_runtime, ovw_threshold_percent, ovw_runtime,
|
||||
umargin_pmax, umargin_qmax, umargin_ulim, umargin_plim_percent, umargin_qlim_percent, umargin_ulim_percent
|
||||
) VALUES (
|
||||
1,
|
||||
'968dd6e6-faec-4f78-b58a-d6e68426b08e',
|
||||
'stable',
|
||||
95,
|
||||
10,
|
||||
90,
|
||||
10,
|
||||
105,
|
||||
60,
|
||||
110,
|
||||
10,
|
||||
0,
|
||||
0,
|
||||
90,
|
||||
15,
|
||||
15,
|
||||
15
|
||||
);
|
||||
|
||||
INSERT INTO public.bus_bus_1_model (id, global_uuid, attribute_group,
|
||||
ui_percent, ui_kv, ui_pa, stability_rated_current, stability_dynamic_steady_current, load_adjustment_min, load_adjustment_max, bus_type, csc_s3_max, csc_s3_min, csc_i3_max, csc_i3_min, csc_z3s_max, csc_z3s_min, csc_s1_max, csc_s1_min, csc_i1_max, csc_i1_min, csc_z1s_max, csc_z1s_min, csc_base_voltage, csc_base_capacity
|
||||
) VALUES (
|
||||
1,
|
||||
'968dd6e6-faec-4f78-b58a-d6e68426b08e',
|
||||
'model',
|
||||
100,
|
||||
35,
|
||||
0,
|
||||
1000,
|
||||
40,
|
||||
100,
|
||||
100,
|
||||
'PQ母线',
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0.05,
|
||||
0.1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0.05,
|
||||
0.1,
|
||||
37,
|
||||
100
|
||||
);
|
||||
|
||||
INSERT INTO public.bus_bus_1_base_extend (id, global_uuid, attribute_group,
|
||||
bus_num, unom_kv
|
||||
) VALUES (
|
||||
1,
|
||||
'968dd6e6-faec-4f78-b58a-d6e68426b08e',
|
||||
'base_extend',
|
||||
1,
|
||||
NULL
|
||||
);
|
||||
```
|
||||
|
||||
##### 2.4.2 Redis数据注入
|
||||
|
||||
Redis数据脚本
|
||||
|
||||
```shell
|
||||
deploy/redis-test-data/measurments-recommend/measurement_injection.go
|
||||
```
|
||||
|
||||
运行脚本向 Reids 导入数据
|
||||
|
||||
```shell
|
||||
go run deploy/redis-test-data/measurments-recommend/measurement_injection.go
|
||||
```
|
||||
|
||||
### 3\. 启动 ModelRT 服务
|
||||
|
||||
#### 3.1 配置服务配置文件
|
||||
|
||||
以下表格为配置文件参数说明表
|
||||
|
||||
| 类别 | 参数名 | 作用描述 | 示例值 |
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **Postgres** | `host` | PostgreSQL 数据库服务器的 $\text{IP}$ 地址或域名。 | `"192.168.1.101"` |
|
||||
| | `port` | PostgreSQL 数据库服务器的端口号。 | `5432` |
|
||||
| | `database` | 连接的数据库名称。 | `"demo"` |
|
||||
| | `user` | 连接数据库所使用的用户名。 | `"postgres"` |
|
||||
| | `password` | 连接数据库所使用的密码。 | `"coslight"` |
|
||||
| **Kafka** | `servers` | Kafka 集群的 $\text{Bootstrap Server}$ 地址列表(通常是 $\text{host:port}$ 形式,多个地址用逗号分隔)。 | `"localhost:9092"` |
|
||||
| | `port` | Kafka 服务器的端口号。 | `9092` |
|
||||
| | `group_id` | 消费者组 $\text{ID}$,用于标识和管理一组相关的消费者。 | `"modelRT"` |
|
||||
| | `topic` | Kafka 消息的主题名称。 | `""` |
|
||||
| | `auto_offset_reset` | 消费者首次启动或 $\text{Offset}$ 无效时,从哪个位置开始消费(如 `earliest` 或 `latest`)。 | `"earliest"` |
|
||||
| | `enable_auto_commit` | 是否自动提交 $\text{Offset}$。设为 $\text{false}$ 通常用于手动控制 $\text{Offset}$ 提交。 | `"false"` |
|
||||
| | `read_message_time_duration` | 读取消息时的超时或等待时间。 | `”0.5s"` |
|
||||
| **Logger (Zap)** | `mode` | 日志模式,通常为 `development`(开发)或 `production`(生产)。影响日志格式。 | `"development"` |
|
||||
| | `level` | 最低日志级别(如 $\text{debug, info, warn, error}$)。 | `"debug"` |
|
||||
| | `filepath` | 日志文件的输出路径和名称格式(`%s` 会被替换为日期等)。 | `"/Users/douxu/Workspace/coslight/modelRT/modelRT-%s.log"` |
|
||||
| | `maxsize` | 单个日志文件最大大小(单位:$\text{MB}$)。 | `1` |
|
||||
| | `maxbackups` | 保留旧日志文件的最大个数。 | `5` |
|
||||
| | `maxage` | 保留旧日志文件的最大天数。 | `30` |
|
||||
| | `compress` | 是否压缩备份的日志文件。 | `false` |
|
||||
| **Ants Pool** | `parse_concurrent_quantity` | 用于解析任务的协程池最大并发数量。 | `10` |
|
||||
| | `rtd_receive_concurrent_quantity` | 用于实时数据接收任务的协程池最大并发数量。 | `10` |
|
||||
| **Locker Redis** | `addr` | 分布式锁服务所使用的 $\text{Redis}$ 地址。 | `"127.0.0.1:6379"` |
|
||||
| | `password` | $\text{Locker Redis}$ 的密码。 | `""` |
|
||||
| | `db` | $\text{Locker Redis}$ 使用的数据库编号。 | `1` |
|
||||
| | `poolsize` | $\text{Locker Redis}$ 连接池的最大连接数。 | `50` |
|
||||
| | `timeout` | $\text{Locker Redis}$ 连接操作的超时时间(单位:毫秒)。 | `10` |
|
||||
| **Storage Redis** | `addr` | 数据存储服务所使用的 $\text{Redis}$ 地址(例如 $\text{Redisearch}$)。 | `"127.0.0.1:6379"` |
|
||||
| | `password` | $\text{Storage Redis}$ 的密码。 | `""` |
|
||||
| | `db` | $\text{Storage Redis}$ 使用的数据库编号。 | `0` |
|
||||
| | `poolsize` | $\text{Storage Redis}$ 连接池的最大连接数。 | `50` |
|
||||
| | `timeout` | $\text{Storage Redis}$ 连接操作的超时时间(单位:毫秒)。 | `10` |
|
||||
| **Base Config** | `grid_id` | 项目所操作的默认电网 $\text{ID}$。 | `1` |
|
||||
| | `zone_id` | 项目所操作的默认区域 $\text{ID}$。 | `1` |
|
||||
| | `station_id` | 项目所操作的默认变电站 $\text{ID}$。 | `1` |
|
||||
| **Service Config** | `service_name` | 服务名称,用于日志、监控等标识。 | `"modelRT"` |
|
||||
| | `secret_key` | 服务内部使用的秘钥,用于签名或认证。 | `"modelrt_key"` |
|
||||
| **DataRT API** | `host` | 外部 $\text{DataRT}$ 服务的主机地址。 | `"http://127.0.0.1"` |
|
||||
| | `port` | $\text{DataRT}$ 服务的端口号。 | `8888` |
|
||||
| | `polling_api` | 轮询数据的 $\text{API}$ 路径。 | `"datart/getPointData"` |
|
||||
| | `polling_api_method` | 调用该 $\text{API}$ 使用的 $\text{HTTP}$ 方法。 | `"GET"` |
|
||||
|
||||
#### 3.2 编译 ModelRT 服务
|
||||
|
||||
```bash
|
||||
go build -o model-rt main.go
|
||||
```
|
||||
|
||||
#### 3.3 启动服务
|
||||
|
||||
使用编译好的二进制文件进行启动
|
||||
|
||||
```bash
|
||||
./model-rt
|
||||
```
|
||||
|
||||
#### 3.4 检测服务启动日志
|
||||
|
||||
在发现控制台输出如下信息`starting ModelRT server`
|
||||
后即代表服务启动成功
|
||||
|
||||
### 4\. 后续操作(停止与清理)
|
||||
|
||||
#### 4.1 停止容器
|
||||
|
||||
```bash
|
||||
docker stop postgres redis
|
||||
```
|
||||
|
||||
#### 4.2 删除容器(删除后数据将丢失)
|
||||
|
||||
```bash
|
||||
docker rm postgres redis
|
||||
```
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
FROM golang:1.24-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
RUN GOPROXY="https://goproxy.cn,direct" go mod download
|
||||
COPY . .
|
||||
RUN CGO_ENABLED=0 go build -ldflags="-s -w" -o modelrt main.go
|
||||
|
||||
FROM alpine:latest
|
||||
WORKDIR /app
|
||||
ARG USER_ID=1000
|
||||
RUN adduser -D -u ${USER_ID} modelrt
|
||||
COPY --from=builder /app/modelrt ./modelrt
|
||||
COPY configs/config.example.yaml ./configs/config.example.yaml
|
||||
RUN chown -R modelrt:modelrt /app
|
||||
RUN chmod +x /app/modelrt
|
||||
USER modelrt
|
||||
CMD ["/app/modelrt", "-modelRT_config_dir=/app/configs"]
|
||||
|
|
@ -1,327 +0,0 @@
|
|||
// Package main implement redis test data injection
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/RediSearch/redisearch-go/v2/redisearch"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var ac *redisearch.Autocompleter
|
||||
|
||||
// InitAutocompleterWithPool define func of initialize the Autocompleter with redigo pool
|
||||
func init() {
|
||||
// ac = redisearch.NewAutocompleterFromPool(pool, redisSearchDictName)
|
||||
ac = redisearch.NewAutocompleter("localhost:6379", redisSearchDictName)
|
||||
}
|
||||
|
||||
const (
|
||||
gridKeysSet = "grid_tag_keys"
|
||||
zoneKeysSet = "zone_tag_keys"
|
||||
stationKeysSet = "station_tag_keys"
|
||||
componentNSPathKeysSet = "component_nspath_keys"
|
||||
componentTagKeysSet = "component_tag_keys"
|
||||
configKeysSet = "config_keys"
|
||||
measurementTagKeysSet = "measurement_tag_keys"
|
||||
|
||||
// Grid -> Zone (e.g., grid1_zones_keys)
|
||||
gridZoneSetKeyFormat = "grid%d_zone_tag_keys"
|
||||
// Zone -> Station (e.g., zone1_1_stations_keys)
|
||||
zoneStationSetKeyFormat = "zone%d_%d_station_tag_keys"
|
||||
// Station -> NSPath (e.g., station1_1_1_components_nspath_keys)
|
||||
stationNSPathKeyFormat = "station%d_%d_%d_component_nspath_keys"
|
||||
// NSPath -> CompTag (e.g., ns1_1_1_1_components_tag_keys)
|
||||
nsPathCompTagKeyFormat = "ns%d_%d_%d_%d_component_tag_keys"
|
||||
// CompTag -> Measurement (e.g., comptag1_1_1_1_1_measurement_keys)
|
||||
compTagMeasKeyFormat = "comptag%d_%d_%d_%d_%d_measurement_tag_keys"
|
||||
)
|
||||
|
||||
const (
|
||||
redisSearchDictName = "search_suggestions_dict"
|
||||
defaultScore = 1.0
|
||||
)
|
||||
|
||||
var configMetrics = []any{
|
||||
"component", "base_extend", "rated", "setup", "model",
|
||||
"stable", "bay", "craft", "integrity", "behavior",
|
||||
}
|
||||
|
||||
func bulkInsertAllHierarchySets(ctx context.Context, rdb *redis.Client) error {
|
||||
log.Println("starting bulk insertion of Redis hierarchy sets")
|
||||
|
||||
if err := insertStaticSets(ctx, rdb); err != nil {
|
||||
return fmt.Errorf("static set insertion failed: %w", err)
|
||||
}
|
||||
|
||||
if err := insertDynamicHierarchy(ctx, rdb); err != nil {
|
||||
return fmt.Errorf("dynamic hierarchy insertion failed: %w", err)
|
||||
}
|
||||
|
||||
if err := insertAllHierarchySuggestions(ac); err != nil {
|
||||
return fmt.Errorf("dynamic hierarchy insertion failed: %w", err)
|
||||
}
|
||||
|
||||
log.Println("bulk insertion complete")
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertStaticSets(ctx context.Context, rdb *redis.Client) error {
|
||||
// grid_keys
|
||||
if err := rdb.SAdd(ctx, gridKeysSet, "grid1", "grid2", "grid3").Err(); err != nil {
|
||||
return fmt.Errorf("sadd failed for %s: %w", gridKeysSet, err)
|
||||
}
|
||||
|
||||
// zone_keys (3x3 = 9 members)
|
||||
zoneMembers := make([]any, 0, 9)
|
||||
for i := 1; i <= 3; i++ {
|
||||
for j := 1; j <= 3; j++ {
|
||||
zoneMembers = append(zoneMembers, fmt.Sprintf("zone%d_%d", i, j))
|
||||
}
|
||||
}
|
||||
if err := rdb.SAdd(ctx, zoneKeysSet, zoneMembers...).Err(); err != nil {
|
||||
return fmt.Errorf("sadd failed for %s: %w", zoneKeysSet, err)
|
||||
}
|
||||
|
||||
// config_keys
|
||||
if err := rdb.SAdd(ctx, configKeysSet, "bay").Err(); err != nil {
|
||||
return fmt.Errorf("sadd failed for %s: %w", configKeysSet, err)
|
||||
}
|
||||
|
||||
log.Println("Static sets (grid_keys, zone_keys, config_keys) inserted.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertDynamicHierarchy(ctx context.Context, rdb *redis.Client) error {
|
||||
allStationKeys := make([]any, 0, 27)
|
||||
allNSPathKeys := make([]any, 0, 81)
|
||||
allCompTagKeys := make([]any, 0, 243)
|
||||
allMeasurementTagKeys := make([]any, 0, 729)
|
||||
|
||||
// S: Grid Prefix (1-3)
|
||||
for S := 1; S <= 3; S++ {
|
||||
// Grid-Zone Set Key: gridS_zones_keys
|
||||
gridZoneKey := fmt.Sprintf(gridZoneSetKeyFormat, S)
|
||||
gridZoneMembers := make([]any, 0, 3)
|
||||
|
||||
// Y: Zone Index (1-3)
|
||||
for Y := 1; Y <= 3; Y++ {
|
||||
zoneID := fmt.Sprintf("%d_%d", S, Y)
|
||||
zoneMember := "zone" + zoneID
|
||||
gridZoneMembers = append(gridZoneMembers, zoneMember)
|
||||
|
||||
// Zone-Station Set Key: zoneS_Y_stations_keys
|
||||
zoneStationKey := fmt.Sprintf(zoneStationSetKeyFormat, S, Y)
|
||||
zoneStationMembers := make([]any, 0, 3)
|
||||
|
||||
// Z: Station Index (1-3)
|
||||
for Z := 1; Z <= 3; Z++ {
|
||||
stationID := fmt.Sprintf("%d_%d_%d", S, Y, Z)
|
||||
stationKey := "station" + stationID
|
||||
allStationKeys = append(allStationKeys, stationKey)
|
||||
zoneStationMembers = append(zoneStationMembers, stationKey)
|
||||
|
||||
// Station-NSPath Set Key: stationS_Y_Z_components_nspath_keys
|
||||
stationNSPathKey := fmt.Sprintf(stationNSPathKeyFormat, S, Y, Z)
|
||||
stationNSMembers := make([]any, 0, 3)
|
||||
|
||||
// D: NSPath Index (1-3)
|
||||
for D := 1; D <= 3; D++ {
|
||||
nsPathID := fmt.Sprintf("%s_%d", stationID, D)
|
||||
nsPathKey := "ns" + nsPathID
|
||||
allNSPathKeys = append(allNSPathKeys, nsPathKey)
|
||||
stationNSMembers = append(stationNSMembers, nsPathKey)
|
||||
|
||||
// NSPath-CompTag Set Key: nsS_Y_Z_D_components_tag_keys
|
||||
nsCompTagKey := fmt.Sprintf(nsPathCompTagKeyFormat, S, Y, Z, D)
|
||||
nsCompTagMembers := make([]any, 0, 3)
|
||||
|
||||
// I: CompTag Index (1-3)
|
||||
for I := 1; I <= 3; I++ {
|
||||
compTagID := fmt.Sprintf("%s_%d", nsPathID, I)
|
||||
compTagKey := "comptag" + compTagID
|
||||
allCompTagKeys = append(allCompTagKeys, compTagKey)
|
||||
nsCompTagMembers = append(nsCompTagMembers, compTagKey)
|
||||
|
||||
// CompTag-Measurement Set Key: comptagS_Y_Z_D_I_measurement_keys
|
||||
compTagMeasKey := fmt.Sprintf(compTagMeasKeyFormat, S, Y, Z, D, I)
|
||||
compTagMeasMembers := make([]any, 0, 3)
|
||||
|
||||
// M: Measurement Index (1-3)
|
||||
for M := 1; M <= 3; M++ {
|
||||
measurementID := fmt.Sprintf("%s_%d", compTagID, M)
|
||||
measurementKey := "meas" + measurementID
|
||||
allMeasurementTagKeys = append(allMeasurementTagKeys, measurementKey)
|
||||
compTagMeasMembers = append(compTagMeasMembers, measurementKey)
|
||||
}
|
||||
|
||||
if err := rdb.SAdd(ctx, compTagMeasKey, compTagMeasMembers...).Err(); err != nil {
|
||||
return fmt.Errorf("sadd failed for %s: %w", compTagMeasKey, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := rdb.SAdd(ctx, nsCompTagKey, nsCompTagMembers...).Err(); err != nil {
|
||||
return fmt.Errorf("sadd failed for %s: %w", nsCompTagKey, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := rdb.SAdd(ctx, stationNSPathKey, stationNSMembers...).Err(); err != nil {
|
||||
return fmt.Errorf("sadd failed for %s: %w", stationNSPathKey, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := rdb.SAdd(ctx, zoneStationKey, zoneStationMembers...).Err(); err != nil {
|
||||
return fmt.Errorf("sadd failed for %s: %w", zoneStationKey, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if err := rdb.SAdd(ctx, gridZoneKey, gridZoneMembers...).Err(); err != nil {
|
||||
return fmt.Errorf("sadd failed for %s: %w", gridZoneKey, err)
|
||||
}
|
||||
}
|
||||
|
||||
// 插入所有顶层动态 Set (将所有成员一次性插入到全局 Set 中)
|
||||
if err := rdb.SAdd(ctx, stationKeysSet, allStationKeys...).Err(); err != nil {
|
||||
return fmt.Errorf("sadd failed for %s: %w", stationKeysSet, err)
|
||||
}
|
||||
if err := rdb.SAdd(ctx, componentNSPathKeysSet, allNSPathKeys...).Err(); err != nil {
|
||||
return fmt.Errorf("sadd failed for %s: %w", componentNSPathKeysSet, err)
|
||||
}
|
||||
if err := rdb.SAdd(ctx, componentTagKeysSet, allCompTagKeys...).Err(); err != nil {
|
||||
return fmt.Errorf("sadd failed for %s: %w", componentTagKeysSet, err)
|
||||
}
|
||||
if err := rdb.SAdd(ctx, measurementTagKeysSet, allMeasurementTagKeys...).Err(); err != nil {
|
||||
return fmt.Errorf("sadd failed for %s: %w", measurementTagKeysSet, err)
|
||||
}
|
||||
|
||||
log.Printf("inserted %d stations, %d nspaths, %d comptags, and %d measurements.\n",
|
||||
len(allStationKeys), len(allNSPathKeys), len(allCompTagKeys), len(allMeasurementTagKeys))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertAllHierarchySuggestions(ac *redisearch.Autocompleter) error {
|
||||
suggestions := make([]redisearch.Suggestion, 0, 10000)
|
||||
// S: grid Index (1-3)
|
||||
for S := 1; S <= 3; S++ {
|
||||
gridStr := fmt.Sprintf("grid%d", S)
|
||||
suggestions = append(suggestions, redisearch.Suggestion{Term: gridStr, Score: defaultScore})
|
||||
|
||||
// Y: zone Index (1-3)
|
||||
for Y := 1; Y <= 3; Y++ {
|
||||
zoneStr := fmt.Sprintf("zone%d_%d", S, Y)
|
||||
gridZonePath := fmt.Sprintf("%s.%s", gridStr, zoneStr)
|
||||
suggestions = append(suggestions, redisearch.Suggestion{Term: gridZonePath, Score: defaultScore})
|
||||
|
||||
// Z: station Index (1-3)
|
||||
for Z := 1; Z <= 3; Z++ {
|
||||
stationStr := fmt.Sprintf("station%d_%d_%d", S, Y, Z)
|
||||
gridZoneStationPath := fmt.Sprintf("%s.%s", gridZonePath, stationStr)
|
||||
suggestions = append(suggestions, redisearch.Suggestion{Term: gridZoneStationPath, Score: defaultScore})
|
||||
|
||||
// D: nsPath Index (1-3)
|
||||
for D := 1; D <= 3; D++ {
|
||||
nsPathStr := fmt.Sprintf("ns%d_%d_%d_%d", S, Y, Z, D)
|
||||
gridZoneStationNSPath := fmt.Sprintf("%s.%s", gridZoneStationPath, nsPathStr)
|
||||
suggestions = append(suggestions, redisearch.Suggestion{Term: gridZoneStationNSPath, Score: defaultScore})
|
||||
|
||||
// I: compTag Index (1-3)
|
||||
for I := 1; I <= 3; I++ {
|
||||
compTagStr := fmt.Sprintf("comptag%d_%d_%d_%d_%d", S, Y, Z, D, I)
|
||||
fullCompTagPath := fmt.Sprintf("%s.%s", gridZoneStationNSPath, compTagStr)
|
||||
suggestions = append(suggestions, redisearch.Suggestion{Term: fullCompTagPath, Score: defaultScore})
|
||||
fullConfigPath := fmt.Sprintf("%s.%s", fullCompTagPath, "bay")
|
||||
suggestions = append(suggestions, redisearch.Suggestion{Term: fullConfigPath, Score: defaultScore})
|
||||
// J: measTag Index (1-3)
|
||||
for J := 1; J <= 3; J++ {
|
||||
measTagStr := fmt.Sprintf("meas%d_%d_%d_%d_%d_%d", S, Y, Z, D, I, J)
|
||||
fullMeasurementPath := fmt.Sprintf("%s.%s", fullCompTagPath, measTagStr)
|
||||
suggestions = append(suggestions, redisearch.Suggestion{Term: fullMeasurementPath, Score: defaultScore})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("generated %d suggestions. starting bulk insertion into dictionary '%s'.", len(suggestions), redisSearchDictName)
|
||||
|
||||
// del ac suggestion
|
||||
ac.Delete()
|
||||
|
||||
err := ac.AddTerms(suggestions...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add %d suggestions: %w", len(suggestions), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteAllHierarchySets(ctx context.Context, rdb *redis.Client) error {
|
||||
log.Println("starting to collect all Redis Set keys for deletion...")
|
||||
|
||||
keysToDelete := []string{
|
||||
gridKeysSet,
|
||||
zoneKeysSet,
|
||||
stationKeysSet,
|
||||
componentNSPathKeysSet,
|
||||
componentTagKeysSet,
|
||||
configKeysSet,
|
||||
measurementTagKeysSet,
|
||||
}
|
||||
|
||||
for S := 1; S <= 3; S++ {
|
||||
keysToDelete = append(keysToDelete, fmt.Sprintf(gridZoneSetKeyFormat, S))
|
||||
|
||||
for Y := 1; Y <= 3; Y++ {
|
||||
keysToDelete = append(keysToDelete, fmt.Sprintf(zoneStationSetKeyFormat, S, Y))
|
||||
|
||||
for Z := 1; Z <= 3; Z++ {
|
||||
keysToDelete = append(keysToDelete, fmt.Sprintf(stationNSPathKeyFormat, S, Y, Z))
|
||||
|
||||
for D := 1; D <= 3; D++ {
|
||||
keysToDelete = append(keysToDelete, fmt.Sprintf(nsPathCompTagKeyFormat, S, Y, Z, D))
|
||||
|
||||
for I := 1; I <= 3; I++ {
|
||||
keysToDelete = append(keysToDelete, fmt.Sprintf(compTagMeasKeyFormat, S, Y, Z, D, I))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("collected %d unique keys. Starting batch deletion...", len(keysToDelete))
|
||||
|
||||
deletedCount, err := rdb.Del(ctx, keysToDelete...).Result()
|
||||
if err != nil {
|
||||
return fmt.Errorf("batch deletion failed: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("Successfully deleted %d keys (Sets) from Redis.", deletedCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
rdb := redis.NewClient(&redis.Options{
|
||||
Addr: "localhost:6379",
|
||||
Password: "",
|
||||
DB: 0,
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := rdb.Ping(ctx).Err(); err != nil {
|
||||
log.Fatalf("could not connect to Redis: %v", err)
|
||||
}
|
||||
log.Println("connected to Redis successfully")
|
||||
|
||||
if err := deleteAllHierarchySets(ctx, rdb); err != nil {
|
||||
log.Fatalf("error delete exist set before bulk insertion: %v", err)
|
||||
}
|
||||
|
||||
if err := bulkInsertAllHierarchySets(ctx, rdb); err != nil {
|
||||
log.Fatalf("error during bulk insertion: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,224 +0,0 @@
|
|||
// Package main implement redis test data injection
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"modelRT/orm"
|
||||
|
||||
util "modelRT/deploy/redis-test-data/util"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const (
|
||||
redisAddr = "localhost:6379"
|
||||
)
|
||||
|
||||
var globalRedisClient *redis.Client
|
||||
|
||||
var (
|
||||
highEnd, highStart, lowStart, lowEnd int
|
||||
totalLength int
|
||||
highSegmentLength int
|
||||
lowSegmentLength int
|
||||
)
|
||||
|
||||
func selectRandomInt() int {
|
||||
options := []int{0, 2}
|
||||
randomIndex := rand.Intn(len(options))
|
||||
return options[randomIndex]
|
||||
}
|
||||
|
||||
// generateMixedData define func to generate a set of floating-point data that meets specific conditions
|
||||
func generateMixedData(highMin, lowMin, highBase, lowBase, baseValue, normalBase float64) []float64 {
|
||||
totalLength = 500
|
||||
highSegmentLength = 20
|
||||
lowSegmentLength = 20
|
||||
|
||||
seed := time.Now().UnixNano()
|
||||
source := rand.NewSource(seed)
|
||||
r := rand.New(source)
|
||||
|
||||
data := make([]float64, totalLength)
|
||||
highStart = rand.Intn(totalLength - highSegmentLength - lowSegmentLength - 1)
|
||||
highEnd = highStart + highSegmentLength
|
||||
lowStart = rand.Intn(totalLength-lowSegmentLength-highEnd) + highEnd
|
||||
lowEnd = lowStart + lowSegmentLength
|
||||
|
||||
for i := 0; i < totalLength; i++ {
|
||||
if i >= highStart && i < highStart+highSegmentLength {
|
||||
// 数据值均大于 55.0,在 [55.5, 60.0] 范围内随机
|
||||
// rand.Float64() 生成 [0.0, 1.0) 范围的浮点数
|
||||
data[i] = highMin + r.Float64()*(highBase)
|
||||
} else if i >= lowStart && i < lowStart+lowSegmentLength {
|
||||
// 数据值均小于 45.0,在 [40.0, 44.5] 范围内随机
|
||||
data[i] = lowMin + r.Float64()*(lowBase)
|
||||
} else {
|
||||
// 数据在 [45.0, 55.0] 范围内随机 (baseValue ± 5)
|
||||
// 50 + rand.Float64() * 10 - 5
|
||||
change := normalBase - r.Float64()*normalBase*2
|
||||
data[i] = baseValue + change
|
||||
}
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func generateNormalData(baseValue, normalBase float64) []float64 {
|
||||
totalLength = 500
|
||||
seed := time.Now().UnixNano()
|
||||
source := rand.NewSource(seed)
|
||||
r := rand.New(source)
|
||||
|
||||
data := make([]float64, totalLength)
|
||||
for i := 0; i < totalLength; i++ {
|
||||
change := normalBase - r.Float64()*normalBase*2
|
||||
data[i] = baseValue + change
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func main() {
|
||||
rootCtx := context.Background()
|
||||
|
||||
pgURI := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", "192.168.1.101", 5432, "postgres", "coslight", "demo")
|
||||
|
||||
postgresDBClient, err := gorm.Open(postgres.Open(pgURI))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer func() {
|
||||
sqlDB, err := postgresDBClient.DB()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sqlDB.Close()
|
||||
}()
|
||||
|
||||
cancelCtx, cancel := context.WithTimeout(rootCtx, 5*time.Second)
|
||||
defer cancel()
|
||||
var measurements []orm.Measurement
|
||||
result := postgresDBClient.WithContext(cancelCtx).Find(&measurements)
|
||||
if result.Error != nil {
|
||||
panic(result.Error)
|
||||
}
|
||||
log.Println("总共读取到测量点数量:", len(measurements))
|
||||
measInfos := util.ProcessMeasurements(measurements)
|
||||
|
||||
globalRedisClient = util.InitRedisClient(redisAddr)
|
||||
rCancelCtx, cancel := context.WithCancel(rootCtx)
|
||||
defer cancel()
|
||||
|
||||
for key, measInfo := range measInfos {
|
||||
randomType := selectRandomType()
|
||||
var datas []float64
|
||||
if randomType {
|
||||
// 生成正常数据
|
||||
log.Printf("key:%s generate normal data\n", key)
|
||||
baseValue := measInfo.BaseValue
|
||||
changes := measInfo.Changes
|
||||
normalBase := changes[0]
|
||||
noramlMin := baseValue - normalBase
|
||||
normalMax := baseValue + normalBase
|
||||
datas = generateNormalData(baseValue, normalBase)
|
||||
allTrue := true
|
||||
|
||||
for i := 0; i < totalLength-1; i++ {
|
||||
value := datas[i]
|
||||
// log.Printf("index:%d, value:%.2f\n", i, value)
|
||||
if value < noramlMin && value > normalMax {
|
||||
allTrue = false
|
||||
}
|
||||
}
|
||||
log.Printf("// 验证结果: 所有值是否 >= %.2f或 <= %.2f %t\n", noramlMin, normalMax, allTrue)
|
||||
} else {
|
||||
// 生成异常数据
|
||||
log.Printf("key:%s generate abnormal data\n", key)
|
||||
var highMin, highBase float64
|
||||
var lowMin, lowBase float64
|
||||
var normalBase float64
|
||||
|
||||
// TODO 生成一次测试数据
|
||||
changes := measInfo.Changes
|
||||
baseValue := measInfo.BaseValue
|
||||
if len(changes) == 2 {
|
||||
highMin = baseValue + changes[0]
|
||||
lowMin = baseValue + changes[1]
|
||||
highBase = changes[0]
|
||||
lowBase = changes[1]
|
||||
normalBase = changes[0]
|
||||
} else {
|
||||
randomIndex := selectRandomInt()
|
||||
highMin = baseValue + changes[randomIndex]
|
||||
lowMin = baseValue + changes[randomIndex+1]
|
||||
highBase = changes[randomIndex]
|
||||
lowBase = changes[randomIndex+1]
|
||||
normalBase = changes[0]
|
||||
}
|
||||
|
||||
datas = generateMixedData(highMin, lowMin, highBase, lowBase, baseValue, normalBase)
|
||||
// log.Printf("key:%s\n datas:%v\n", key, datas)
|
||||
|
||||
allHigh := true
|
||||
for i := highStart; i < highEnd; i++ {
|
||||
if datas[i] <= highMin {
|
||||
allHigh = false
|
||||
break
|
||||
}
|
||||
}
|
||||
log.Printf("// 验证结果 (高值段在 %d-%d): 所有值是否 > %.2f? %t\n", highStart, highEnd-1, highMin, allHigh)
|
||||
|
||||
allLow := true
|
||||
for i := lowStart; i < lowEnd; i++ {
|
||||
if datas[i] >= lowMin {
|
||||
allLow = false
|
||||
break
|
||||
}
|
||||
}
|
||||
log.Printf("// 验证结果 (低值段在 %d-%d): 所有值是否 < %.2f? %t\n", lowStart, lowEnd-1, lowMin, allLow)
|
||||
|
||||
allTrue := true
|
||||
for i := 0; i < totalLength-1; i++ {
|
||||
value := datas[i]
|
||||
if i < highStart || (i >= highEnd && i < lowStart) || i >= lowEnd {
|
||||
// log.Printf("index:%d, value:%.2f\n", i, value)
|
||||
if value >= highMin && value <= lowMin {
|
||||
allTrue = false
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Printf("// 验证结果 (正常段在 %d-%d): 所有值是否 <= %.2f或>= %.2f %t\n", 0, totalLength-1, highMin, lowMin, allTrue)
|
||||
}
|
||||
log.Printf("启动数据写入程序, Redis Key: %s, 基准值: %.4f, 变化范围: %+v\n", key, measInfo.BaseValue, measInfo.Changes)
|
||||
pipe := globalRedisClient.Pipeline()
|
||||
redisZs := make([]redis.Z, 0, totalLength)
|
||||
currentTime := time.Now().UnixNano()
|
||||
for i := range totalLength {
|
||||
sequentialTime := currentTime + int64(i)
|
||||
z := redis.Z{
|
||||
Score: datas[i],
|
||||
Member: strconv.FormatInt(sequentialTime, 10),
|
||||
}
|
||||
redisZs = append(redisZs, z)
|
||||
}
|
||||
log.Printf("启动数据写入程序, Redis Key: %s, 写入数据量: %d\n", key, len(redisZs))
|
||||
pipe.ZAdd(rCancelCtx, key, redisZs...)
|
||||
_, err = pipe.Exec(rCancelCtx)
|
||||
if err != nil {
|
||||
log.Printf("redis pipeline execution failed: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func selectRandomType() bool {
|
||||
options := []int{0, 2}
|
||||
randomValue := rand.Intn(len(options))
|
||||
return randomValue != 0
|
||||
}
|
||||
|
|
@ -1,449 +0,0 @@
|
|||
// Package main implement redis test data injection
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"modelRT/deploy/redis-test-data/util"
|
||||
"modelRT/orm"
|
||||
|
||||
redis "github.com/redis/go-redis/v9"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// Redis配置
|
||||
const (
|
||||
redisAddr = "localhost:6379"
|
||||
)
|
||||
|
||||
var globalRedisClient *redis.Client
|
||||
|
||||
// outlierConfig 异常段配置
|
||||
type outlierConfig struct {
|
||||
Enabled bool // 是否启用异常段
|
||||
Count int // 异常段数量 (0=随机, 1-5=指定数量)
|
||||
MinLength int // 异常段最小长度
|
||||
MaxLength int // 异常段最大长度
|
||||
Intensity float64 // 异常强度系数 (1.0=轻微超出, 2.0=显著超出)
|
||||
Distribution string // 分布类型 "both"-上下都有, "upper"-只向上, "lower"-只向下
|
||||
}
|
||||
|
||||
// GenerateFloatSliceWithOutliers 生成包含连续异常段的数据
|
||||
// baseValue: 基准值
|
||||
// changes: 变化范围,每2个元素为一组 [minChange1, maxChange1, minChange2, maxChange2, ...]
|
||||
// size: 生成的切片长度
|
||||
// variationType: 变化类型
|
||||
// outlierConfig: 异常段配置
|
||||
func generateFloatSliceWithOutliers(baseValue float64, changes []float64, size int, variationType string, outlierConfig outlierConfig) ([]float64, error) {
|
||||
// 先生成正常数据
|
||||
data, err := generateFloatSlice(baseValue, changes, size, variationType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 插入异常段
|
||||
if outlierConfig.Enabled {
|
||||
data = insertOutliers(data, baseValue, changes, outlierConfig)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// 插入异常段
|
||||
func insertOutliers(data []float64, baseValue float64, changes []float64, config outlierConfig) []float64 {
|
||||
if len(data) == 0 || !config.Enabled {
|
||||
return data
|
||||
}
|
||||
|
||||
// 获取变化范围的边界
|
||||
minBound, maxBound := getChangeBounds(baseValue, changes)
|
||||
// TODO delete
|
||||
log.Printf("获取变化范围的边界,min:%.4f,max:%.4f\n", minBound, maxBound)
|
||||
|
||||
// 确定异常段数量
|
||||
outlierCount := config.Count
|
||||
if outlierCount == 0 {
|
||||
// 随机生成1-3个异常段
|
||||
outlierCount = rand.Intn(3) + 1
|
||||
}
|
||||
|
||||
// 计算最大可能的异常段数量
|
||||
maxPossibleOutliers := len(data) / (config.MinLength + 10)
|
||||
if outlierCount > maxPossibleOutliers {
|
||||
outlierCount = maxPossibleOutliers
|
||||
}
|
||||
|
||||
// 生成异常段位置
|
||||
segments := generateOutlierSegments(len(data), config.MinLength, config.MaxLength, outlierCount, config.Distribution)
|
||||
// TODO 调试信息待删除
|
||||
log.Printf("生成异常段位置:%+v\n", segments)
|
||||
// 插入异常数据
|
||||
for _, segment := range segments {
|
||||
data = insertOutlierSegment(data, segment, minBound, maxBound, config)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// 获取变化范围的边界
|
||||
func getChangeBounds(baseValue float64, changes []float64) (minBound, maxBound float64) {
|
||||
if len(changes) == 0 {
|
||||
return baseValue - 10, baseValue + 10
|
||||
}
|
||||
|
||||
ranges := normalizeRanges(changes)
|
||||
minBound, maxBound = baseValue+ranges[0][0], baseValue+ranges[0][1]
|
||||
|
||||
for _, r := range ranges {
|
||||
if baseValue+r[0] < minBound {
|
||||
minBound = baseValue + r[0]
|
||||
}
|
||||
if baseValue+r[1] > maxBound {
|
||||
maxBound = baseValue + r[1]
|
||||
}
|
||||
}
|
||||
|
||||
return minBound, maxBound
|
||||
}
|
||||
|
||||
// OutlierSegment 异常段定义
|
||||
type OutlierSegment struct {
|
||||
Start int
|
||||
Length int
|
||||
Type string // "upper"-向上异常, "lower"-向下异常
|
||||
}
|
||||
|
||||
func generateOutlierSegments(totalSize, minLength, maxLength, count int, distribution string) []OutlierSegment {
|
||||
if count == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
segments := make([]OutlierSegment, 0, count)
|
||||
usedPositions := make(map[int]bool)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
// 尝试多次寻找合适的位置
|
||||
for attempt := 0; attempt < 10; attempt++ {
|
||||
length := rand.Intn(maxLength-minLength+1) + minLength
|
||||
start := rand.Intn(totalSize - length)
|
||||
|
||||
// 检查是否与已有段重叠
|
||||
overlap := false
|
||||
for pos := start; pos < start+length; pos++ {
|
||||
if usedPositions[pos] {
|
||||
overlap = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !overlap {
|
||||
// 标记已使用的位置
|
||||
for pos := start; pos < start+length; pos++ {
|
||||
usedPositions[pos] = true
|
||||
}
|
||||
|
||||
// 根据 distribution 配置决定异常类型
|
||||
var outlierType string
|
||||
switch distribution {
|
||||
case "upper":
|
||||
outlierType = "upper"
|
||||
case "lower":
|
||||
outlierType = "lower"
|
||||
case "both":
|
||||
fallthrough
|
||||
default:
|
||||
if rand.Float64() < 0.5 {
|
||||
outlierType = "upper"
|
||||
} else {
|
||||
outlierType = "lower"
|
||||
}
|
||||
}
|
||||
|
||||
segments = append(segments, OutlierSegment{
|
||||
Start: start,
|
||||
Length: length,
|
||||
Type: outlierType,
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return segments
|
||||
}
|
||||
|
||||
func insertOutlierSegment(data []float64, segment OutlierSegment, minBound, maxBound float64, config outlierConfig) []float64 {
|
||||
rangeWidth := maxBound - minBound
|
||||
|
||||
// 确定整个异常段的方向
|
||||
outlierType := segment.Type
|
||||
if outlierType == "" {
|
||||
switch config.Distribution {
|
||||
case "upper":
|
||||
outlierType = "upper"
|
||||
case "lower":
|
||||
outlierType = "lower"
|
||||
default:
|
||||
if rand.Float64() < 0.5 {
|
||||
outlierType = "upper"
|
||||
} else {
|
||||
outlierType = "lower"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 为整个段生成同方向异常值
|
||||
for i := segment.Start; i < segment.Start+segment.Length && i < len(data); i++ {
|
||||
excess := rangeWidth * (0.3 + rand.Float64()*config.Intensity)
|
||||
|
||||
if outlierType == "upper" {
|
||||
data[i] = maxBound + excess
|
||||
} else {
|
||||
data[i] = minBound - excess
|
||||
}
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
func detectOutlierSegments(data []float64, baseValue float64, changes []float64, minSegmentLength int) []OutlierSegment {
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
minBound, maxBound := getChangeBounds(baseValue, changes)
|
||||
var segments []OutlierSegment
|
||||
currentStart := -1
|
||||
currentType := ""
|
||||
|
||||
for i, value := range data {
|
||||
isOutlier := value > maxBound || value < minBound
|
||||
|
||||
if isOutlier {
|
||||
outlierType := "upper"
|
||||
if value < minBound {
|
||||
outlierType = "lower"
|
||||
}
|
||||
|
||||
if currentStart == -1 {
|
||||
// 开始新的异常段
|
||||
currentStart = i
|
||||
currentType = outlierType
|
||||
} else if currentType != outlierType {
|
||||
// 类型变化,结束当前段
|
||||
if i-currentStart >= minSegmentLength {
|
||||
segments = append(segments, OutlierSegment{
|
||||
Start: currentStart,
|
||||
Length: i - currentStart,
|
||||
Type: currentType,
|
||||
})
|
||||
}
|
||||
currentStart = i
|
||||
currentType = outlierType
|
||||
}
|
||||
} else {
|
||||
if currentStart != -1 {
|
||||
// 结束当前异常段
|
||||
if i-currentStart >= minSegmentLength {
|
||||
segments = append(segments, OutlierSegment{
|
||||
Start: currentStart,
|
||||
Length: i - currentStart,
|
||||
Type: currentType,
|
||||
})
|
||||
}
|
||||
currentStart = -1
|
||||
currentType = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 处理最后的异常段
|
||||
if currentStart != -1 && len(data)-currentStart >= minSegmentLength {
|
||||
segments = append(segments, OutlierSegment{
|
||||
Start: currentStart,
|
||||
Length: len(data) - currentStart,
|
||||
Type: currentType,
|
||||
})
|
||||
}
|
||||
|
||||
return segments
|
||||
}
|
||||
|
||||
func generateFloatSlice(baseValue float64, changes []float64, size int, variationType string) ([]float64, error) {
|
||||
return generateRandomData(baseValue, changes, size), nil
|
||||
}
|
||||
|
||||
func normalizeRanges(changes []float64) [][2]float64 {
|
||||
ranges := make([][2]float64, len(changes)/2)
|
||||
for i := 0; i < len(changes); i += 2 {
|
||||
min, max := changes[i], changes[i+1]
|
||||
if min > max {
|
||||
min, max = max, min
|
||||
}
|
||||
ranges[i/2] = [2]float64{min, max}
|
||||
}
|
||||
return ranges
|
||||
}
|
||||
|
||||
func generateRandomData(baseValue float64, changes []float64, size int) []float64 {
|
||||
data := make([]float64, size)
|
||||
ranges := normalizeRanges(changes)
|
||||
for i := range data {
|
||||
rangeIdx := rand.Intn(len(ranges))
|
||||
minChange := ranges[rangeIdx][0]
|
||||
maxChange := ranges[rangeIdx][1]
|
||||
change := minChange + rand.Float64()*(maxChange-minChange)
|
||||
data[i] = baseValue + change
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// simulateDataWrite 定时生成并写入模拟数据到 Redis ZSet
|
||||
func simulateDataWrite(ctx context.Context, rdb *redis.Client, redisKey string, config outlierConfig, measInfo util.CalculationResult) {
|
||||
log.Printf("启动数据写入程序, Redis Key: %s, 基准值: %.4f, 变化范围: %+v\n", redisKey, measInfo.BaseValue, measInfo.Changes)
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
pipe := rdb.Pipeline()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Printf("\n[%s] 写入程序已停止\n", redisKey)
|
||||
return
|
||||
case <-ticker.C:
|
||||
minBound, maxBound := getChangeBounds(measInfo.BaseValue, measInfo.Changes)
|
||||
log.Printf("计算边界: [%.4f, %.4f]\n", minBound, maxBound)
|
||||
|
||||
// 根据基准值类型决定如何处理
|
||||
switch measInfo.BaseType {
|
||||
case "TI":
|
||||
// 边沿触发类型,生成特殊处理的数据
|
||||
log.Printf("边沿触发类型,跳过异常数据生成\n")
|
||||
return
|
||||
case "TE":
|
||||
// 正常上下限类型,生成包含异常的数据
|
||||
if len(measInfo.Changes) == 0 {
|
||||
log.Printf("无变化范围数据,跳过\n")
|
||||
return
|
||||
}
|
||||
|
||||
// 根据变化范围数量调整异常配置
|
||||
if len(measInfo.Changes) == 2 {
|
||||
// 只有上下限
|
||||
config.Distribution = "both"
|
||||
} else if len(measInfo.Changes) == 4 {
|
||||
// 有上下限和预警上下限
|
||||
config.Distribution = "both"
|
||||
config.Intensity = 2.0 // 增强异常强度
|
||||
}
|
||||
|
||||
// 生成包含异常的数据
|
||||
data, err := generateFloatSliceWithOutliers(
|
||||
measInfo.BaseValue,
|
||||
measInfo.Changes,
|
||||
measInfo.Size,
|
||||
"random",
|
||||
config,
|
||||
)
|
||||
if err != nil {
|
||||
log.Printf("生成异常数据失败:%v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
segments := detectOutlierSegments(data, measInfo.BaseValue, measInfo.Changes, config.MinLength)
|
||||
log.Printf("检测到异常段数量:%d\n", len(segments))
|
||||
for i, segment := range segments {
|
||||
log.Printf("异常段%d: 位置[%d-%d], 长度=%d, 类型=%s\n",
|
||||
i+1, segment.Start, segment.Start+segment.Length-1, segment.Length, segment.Type)
|
||||
}
|
||||
|
||||
redisZs := make([]redis.Z, 0, len(data))
|
||||
for i := range len(data) {
|
||||
z := redis.Z{
|
||||
Score: data[i],
|
||||
Member: strconv.FormatInt(time.Now().UnixNano(), 10),
|
||||
}
|
||||
redisZs = append(redisZs, z)
|
||||
}
|
||||
pipe.ZAdd(ctx, redisKey, redisZs...)
|
||||
_, err = pipe.Exec(ctx)
|
||||
if err != nil {
|
||||
log.Printf("redis pipeline execution failed: %v", err)
|
||||
}
|
||||
log.Printf("生成 redis 实时数据成功\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func gracefulShutdown() {
|
||||
if globalRedisClient != nil {
|
||||
if err := globalRedisClient.Close(); err != nil {
|
||||
log.Printf("关闭 Redis 客户端失败:%v", err)
|
||||
} else {
|
||||
log.Println("关闭 Redis 客户端成功")
|
||||
}
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func main() {
|
||||
rootCtx := context.Background()
|
||||
|
||||
pgURI := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", "192.168.1.101", 5432, "postgres", "coslight", "demo")
|
||||
|
||||
postgresDBClient, err := gorm.Open(postgres.Open(pgURI))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer func() {
|
||||
sqlDB, err := postgresDBClient.DB()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sqlDB.Close()
|
||||
}()
|
||||
|
||||
cancelCtx, cancel := context.WithTimeout(rootCtx, 5*time.Second)
|
||||
defer cancel()
|
||||
var measurements []orm.Measurement
|
||||
result := postgresDBClient.WithContext(cancelCtx).Find(&measurements)
|
||||
if result.Error != nil {
|
||||
panic(result.Error)
|
||||
}
|
||||
log.Println("总共读取到测量点数量:", len(measurements))
|
||||
measInfos := util.ProcessMeasurements(measurements)
|
||||
|
||||
// 测量点数据生成(包含异常数据)
|
||||
// 配置异常段参数
|
||||
outlierConfig := outlierConfig{
|
||||
Enabled: true, // 是否产生异常段数据
|
||||
Count: 2, // 异常段数量
|
||||
MinLength: 10, // 异常段最小连续长度
|
||||
MaxLength: 15, // 异常段最大连续长度
|
||||
Intensity: 1.5, // 异常强度
|
||||
Distribution: "both", // 分布类型
|
||||
}
|
||||
|
||||
globalRedisClient = util.InitRedisClient(redisAddr)
|
||||
rCancelCtx, cancel := context.WithCancel(rootCtx)
|
||||
defer cancel()
|
||||
|
||||
for key, measInfo := range measInfos {
|
||||
go simulateDataWrite(rCancelCtx, globalRedisClient, key, outlierConfig, measInfo)
|
||||
}
|
||||
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-sigChan
|
||||
gracefulShutdown()
|
||||
}
|
||||
|
|
@ -1,266 +0,0 @@
|
|||
// Package util provide some utility fun
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"modelRT/orm"
|
||||
)
|
||||
|
||||
type CalculationResult struct {
|
||||
BaseValue float64
|
||||
Changes []float64
|
||||
Size int
|
||||
BaseType string // "normal", "warning", "edge"
|
||||
Message string
|
||||
}
|
||||
|
||||
func ProcessMeasurements(measurements []orm.Measurement) map[string]CalculationResult {
|
||||
results := make(map[string]CalculationResult, len(measurements))
|
||||
for _, measurement := range measurements {
|
||||
// 检查 DataSource 是否存在且 type 为 1
|
||||
if measurement.DataSource == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// 检查 type 是否为 1
|
||||
dataType, typeExists := measurement.DataSource["type"]
|
||||
if !typeExists {
|
||||
continue
|
||||
}
|
||||
|
||||
// 类型断言,处理不同的数字类型
|
||||
var typeValue int
|
||||
switch v := dataType.(type) {
|
||||
case int:
|
||||
typeValue = v
|
||||
case float64:
|
||||
typeValue = int(v)
|
||||
case int64:
|
||||
typeValue = int(v)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
if typeValue != 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
// 获取 io_address
|
||||
ioAddressRaw, ioExists := measurement.DataSource["io_address"]
|
||||
if !ioExists {
|
||||
continue
|
||||
}
|
||||
|
||||
ioAddress, ok := ioAddressRaw.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
station, _ := ioAddress["station"].(string)
|
||||
device, _ := ioAddress["device"].(string)
|
||||
channel, _ := ioAddress["channel"].(string)
|
||||
|
||||
result := fmt.Sprintf("%s:%s:phasor:%s", station, device, channel)
|
||||
if measurement.EventPlan == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
causeValue, causeExist := measurement.EventPlan["cause"]
|
||||
if !causeExist {
|
||||
continue
|
||||
}
|
||||
causeMap, ok := causeValue.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
calResult, err := calculateBaseValueEnhanced(causeMap)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
calResult.Size = measurement.Size
|
||||
results[result] = calResult
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func calculateBaseValueEnhanced(data map[string]any) (CalculationResult, error) {
|
||||
result := CalculationResult{}
|
||||
if edge, exists := data["edge"]; exists {
|
||||
value, err := calculateEdgeValue(edge)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if edge == "raising" {
|
||||
result.Changes = []float64{1.0}
|
||||
} else {
|
||||
result.Changes = []float64{0.0}
|
||||
}
|
||||
|
||||
result.BaseValue = value
|
||||
result.BaseType = "TI"
|
||||
result.Message = "边沿触发基准值"
|
||||
return result, nil
|
||||
}
|
||||
|
||||
hasUpDown := HasKeys(data, "up", "down")
|
||||
hasUpUpDownDown := HasKeys(data, "upup", "downdown")
|
||||
result.BaseType = "TE"
|
||||
switch {
|
||||
case hasUpDown && hasUpUpDownDown:
|
||||
value, err := calculateAverage(data, "up", "down")
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
result.BaseValue = value
|
||||
result.Changes, err = calculateChanges(data, value, false, 4)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
result.Message = "上下限基准值(忽略预警上上下下限)"
|
||||
return result, nil
|
||||
|
||||
case hasUpDown:
|
||||
value, err := calculateAverage(data, "up", "down")
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
result.BaseValue = value
|
||||
result.Changes, err = calculateChanges(data, value, false, 2)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
result.Message = "上下限基准值"
|
||||
return result, nil
|
||||
|
||||
case hasUpUpDownDown:
|
||||
value, err := calculateAverage(data, "upup", "downdown")
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
result.BaseValue = value
|
||||
result.Changes, err = calculateChanges(data, value, true, 2)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
result.Message = "上上下下限基准值"
|
||||
return result, nil
|
||||
|
||||
default:
|
||||
return result, fmt.Errorf("不支持的数据结构: %v", data)
|
||||
}
|
||||
}
|
||||
|
||||
func calculateAverage(data map[string]any, key1, key2 string) (float64, error) {
|
||||
val1, err := getFloatValue(data, key1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
val2, err := getFloatValue(data, key2)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return (val1 + val2) / 2.0, nil
|
||||
}
|
||||
|
||||
func calculateChanges(data map[string]any, baseValue float64, maxLimt bool, limitNum int) ([]float64, error) {
|
||||
results := make([]float64, 0, limitNum)
|
||||
switch limitNum {
|
||||
case 2:
|
||||
var key1, key2 string
|
||||
if maxLimt {
|
||||
key1 = "upup"
|
||||
key2 = "downdown"
|
||||
} else {
|
||||
key1 = "up"
|
||||
key2 = "down"
|
||||
}
|
||||
val1, err := getFloatValue(data, key1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, val1-baseValue)
|
||||
|
||||
val2, err := getFloatValue(data, key2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, val2-baseValue)
|
||||
case 4:
|
||||
key1 := "up"
|
||||
key2 := "down"
|
||||
key3 := "upup"
|
||||
key4 := "downdown"
|
||||
|
||||
val1, err := getFloatValue(data, key1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, val1-baseValue)
|
||||
|
||||
val2, err := getFloatValue(data, key2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, val2-baseValue)
|
||||
|
||||
val3, err := getFloatValue(data, key3)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, val3-baseValue)
|
||||
|
||||
val4, err := getFloatValue(data, key4)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, val4-baseValue)
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func getFloatValue(data map[string]any, key string) (float64, error) {
|
||||
value, exists := data[key]
|
||||
if !exists {
|
||||
return 0, fmt.Errorf("缺少必需的键:%s", key)
|
||||
}
|
||||
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
return v, nil
|
||||
case int:
|
||||
return float64(v), nil
|
||||
case float32:
|
||||
return float64(v), nil
|
||||
default:
|
||||
return 0, fmt.Errorf("键 %s 的值类型错误,期望数字类型,得到 %T", key, value)
|
||||
}
|
||||
}
|
||||
|
||||
func HasKeys(data map[string]any, keys ...string) bool {
|
||||
for _, key := range keys {
|
||||
if _, exists := data[key]; !exists {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func calculateEdgeValue(edge any) (float64, error) {
|
||||
edgeStr, ok := edge.(string)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("edge 字段类型错误,期望 string,得到 %T", edge)
|
||||
}
|
||||
|
||||
switch edgeStr {
|
||||
case "raising":
|
||||
return 1.0, nil
|
||||
case "falling":
|
||||
return 0.0, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("不支持的 edge 值: %s", edgeStr)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
// Package util provide some utility fun
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// InitRedisClient define func to initialize and return a redis client
|
||||
func InitRedisClient(redisAddr string) *redis.Client {
|
||||
rdb := redis.NewClient(&redis.Options{
|
||||
Addr: redisAddr,
|
||||
Password: "",
|
||||
DB: 0,
|
||||
})
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := rdb.Ping(ctx).Result()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return rdb
|
||||
}
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
package diagram
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// anchorValueOverview define struct of storage all anchor value
|
||||
var anchorValueOverview sync.Map
|
||||
|
||||
// GetAnchorValue define func of get circuit diagram data by componentID
|
||||
func GetAnchorValue(componentUUID string) (string, error) {
|
||||
value, ok := diagramsOverview.Load(componentUUID)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("can not find anchor value by componentUUID:%s", componentUUID)
|
||||
}
|
||||
anchorValue, ok := value.(string)
|
||||
if !ok {
|
||||
return "", errors.New("convert to string failed")
|
||||
}
|
||||
return anchorValue, nil
|
||||
}
|
||||
|
||||
// UpdateAnchorValue define func of update anchor value by componentUUID and anchor name
|
||||
func UpdateAnchorValue(componentUUID string, anchorValue string) bool {
|
||||
_, result := anchorValueOverview.Swap(componentUUID, anchorValue)
|
||||
return result
|
||||
}
|
||||
|
||||
// StoreAnchorValue define func of store anchor value with componentUUID and anchor name
|
||||
func StoreAnchorValue(componentUUID string, anchorValue string) {
|
||||
anchorValueOverview.Store(componentUUID, anchorValue)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteAnchorValue define func of delete anchor value with componentUUID
|
||||
func DeleteAnchorValue(componentUUID string) {
|
||||
anchorValueOverview.Delete(componentUUID)
|
||||
return
|
||||
}
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
package diagram
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
cmap "github.com/orcaman/concurrent-map/v2"
|
||||
)
|
||||
|
||||
// DiagramsOverview define struct of storage all circuit diagram data
|
||||
var DiagramsOverview sync.Map
|
||||
|
||||
// GetComponentMap define func of get circuit diagram data by global uuid
|
||||
func GetComponentMap(uuid string) (*cmap.ConcurrentMap[string, any], error) {
|
||||
value, ok := DiagramsOverview.Load(uuid)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("can not find graph by global uuid:%s", uuid)
|
||||
}
|
||||
paramsMap, ok := value.(*cmap.ConcurrentMap[string, any])
|
||||
if !ok {
|
||||
return nil, errors.New("convert to component map struct failed")
|
||||
}
|
||||
return paramsMap, nil
|
||||
}
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
package diagram
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"modelRT/orm"
|
||||
)
|
||||
|
||||
// diagramsOverview define struct of storage all circuit diagram data
|
||||
var diagramsOverview sync.Map
|
||||
|
||||
// GetComponentMap define func of get circuit diagram data by component uuid
|
||||
func GetComponentMap(componentUUID string) (*orm.Component, error) {
|
||||
value, ok := diagramsOverview.Load(componentUUID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("can not find graph by global uuid:%s", componentUUID)
|
||||
}
|
||||
componentInfo, ok := value.(*orm.Component)
|
||||
if !ok {
|
||||
return nil, errors.New("convert to component map struct failed")
|
||||
}
|
||||
return componentInfo, nil
|
||||
}
|
||||
|
||||
// UpdateComponentMap define func of update circuit diagram data by component uuid and component info
|
||||
func UpdateComponentMap(componentID int64, componentInfo *orm.Component) bool {
|
||||
_, result := diagramsOverview.Swap(componentID, componentInfo)
|
||||
return result
|
||||
}
|
||||
|
||||
// StoreComponentMap define func of store circuit diagram data with component uuid and component info
|
||||
func StoreComponentMap(componentUUID string, componentInfo *orm.Component) {
|
||||
diagramsOverview.Store(componentUUID, componentInfo)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteComponentMap define func of delete circuit diagram data with component uuid
|
||||
func DeleteComponentMap(componentUUID string) {
|
||||
diagramsOverview.Delete(componentUUID)
|
||||
return
|
||||
}
|
||||
|
|
@ -5,9 +5,6 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"modelRT/constants"
|
||||
"modelRT/network"
|
||||
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
|
|
@ -47,8 +44,7 @@ func (g *Graph) CreateBackLink(vertex string) {
|
|||
}
|
||||
}
|
||||
|
||||
// AddEdge adds an edge between two verticeLinks
|
||||
// TODO 在添加拓扑信息时是否考虑过滤重复节点
|
||||
// AddEdge adds an edge between two VerticeLinks
|
||||
func (g *Graph) AddEdge(from, to uuid.UUID) {
|
||||
g.Lock()
|
||||
defer g.Unlock()
|
||||
|
|
@ -92,7 +88,7 @@ func (g *Graph) DelNode(vertex string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DelEdge delete an edge between two verticeLinks
|
||||
// DelEdge delete an edge between two VerticeLinks
|
||||
func (g *Graph) DelEdge(from, to uuid.UUID) error {
|
||||
g.Lock()
|
||||
defer g.Unlock()
|
||||
|
|
@ -105,14 +101,12 @@ func (g *Graph) DelEdge(from, to uuid.UUID) error {
|
|||
fromKeys = []uuid.UUID{from}
|
||||
}
|
||||
|
||||
// Process the situation where the to node is taken as the parent node while deleting edges
|
||||
childvertex := g.VerticeLinks[toKey]
|
||||
err := g.DelNode(toKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete edge failed: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("fromKeys:", fromKeys)
|
||||
for _, fromUUID := range fromKeys {
|
||||
fromKey := fromUUID.String()
|
||||
var delIndex int
|
||||
|
|
@ -121,15 +115,10 @@ func (g *Graph) DelEdge(from, to uuid.UUID) error {
|
|||
delIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
vertex := g.VerticeLinks[fromKey]
|
||||
if len(vertex) == 1 {
|
||||
g.DelNode(fromKey)
|
||||
} else {
|
||||
copy(vertex[delIndex:], vertex[delIndex+1:])
|
||||
vertex = vertex[:len(vertex)-1]
|
||||
g.VerticeLinks[fromKey] = vertex
|
||||
}
|
||||
copy(vertex[delIndex:], vertex[delIndex+1:])
|
||||
vertex = vertex[:len(vertex)-1]
|
||||
g.VerticeLinks[fromKey] = vertex
|
||||
}
|
||||
|
||||
fromKey := from.String()
|
||||
|
|
@ -145,14 +134,3 @@ func (g *Graph) PrintGraph() {
|
|||
fmt.Printf("%s -> %v\n", vertex, edges)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateEdge update edge link info between two verticeLinks
|
||||
func (g *Graph) UpdateEdge(changeInfo network.TopologicUUIDChangeInfos) error {
|
||||
if changeInfo.ChangeType == constants.UUIDFromChangeType || changeInfo.ChangeType == constants.UUIDToChangeType {
|
||||
g.DelEdge(changeInfo.OldUUIDFrom, changeInfo.OldUUIDTo)
|
||||
g.AddEdge(changeInfo.NewUUIDFrom, changeInfo.NewUUIDTo)
|
||||
} else {
|
||||
g.AddEdge(changeInfo.NewUUIDFrom, changeInfo.NewUUIDTo)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,33 +0,0 @@
|
|||
package diagram
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func TestHMSet(t *testing.T) {
|
||||
rdb := redis.NewClient(&redis.Options{
|
||||
Network: "tcp",
|
||||
Addr: "192.168.2.104:6379",
|
||||
Password: "cnstar",
|
||||
PoolSize: 50,
|
||||
DialTimeout: 10 * time.Second,
|
||||
})
|
||||
params := map[string]interface{}{
|
||||
"field1": "Hello1",
|
||||
"field2": "World1",
|
||||
"field3": 11,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
res, err := rdb.HSet(ctx, "myhash", params).Result()
|
||||
if err != nil {
|
||||
fmt.Printf("err:%v\n", err)
|
||||
}
|
||||
fmt.Printf("res:%v\n", res)
|
||||
return
|
||||
}
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
package diagram
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
var GlobalTree *MultiBranchTreeNode
|
||||
|
||||
// MultiBranchTreeNode represents a topological structure using an multi branch tree
|
||||
type MultiBranchTreeNode struct {
|
||||
ID uuid.UUID // 节点唯一标识
|
||||
Parent *MultiBranchTreeNode // 指向父节点的指针
|
||||
Children []*MultiBranchTreeNode // 指向所有子节点的指针切片
|
||||
}
|
||||
|
||||
func NewMultiBranchTree(id uuid.UUID) *MultiBranchTreeNode {
|
||||
return &MultiBranchTreeNode{
|
||||
ID: id,
|
||||
Children: make([]*MultiBranchTreeNode, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (n *MultiBranchTreeNode) AddChild(child *MultiBranchTreeNode) {
|
||||
child.Parent = n
|
||||
n.Children = append(n.Children, child)
|
||||
}
|
||||
|
||||
func (n *MultiBranchTreeNode) RemoveChild(childID uuid.UUID) bool {
|
||||
for i, child := range n.Children {
|
||||
if child.ID == childID {
|
||||
n.Children = append(n.Children[:i], n.Children[i+1:]...)
|
||||
child.Parent = nil
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (n *MultiBranchTreeNode) FindNodeByID(id uuid.UUID) *MultiBranchTreeNode {
|
||||
if n.ID == id {
|
||||
return n
|
||||
}
|
||||
|
||||
for _, child := range n.Children {
|
||||
if found := child.FindNodeByID(id); found != nil {
|
||||
return found
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *MultiBranchTreeNode) PrintTree(level int) {
|
||||
for i := 0; i < level; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
|
||||
fmt.Printf("-ID: %s\n", n.ID)
|
||||
|
||||
for _, child := range n.Children {
|
||||
child.PrintTree(level + 1)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
// Package diagram provide diagram data structure and operation
|
||||
package diagram
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// RedisClient define struct to accessing redis data that does not require the use of distributed locks
|
||||
type RedisClient struct {
|
||||
Client *redis.Client
|
||||
}
|
||||
|
||||
// NewRedisClient define func of new redis client instance
|
||||
func NewRedisClient() *RedisClient {
|
||||
return &RedisClient{
|
||||
Client: GetRedisClientInstance(),
|
||||
}
|
||||
}
|
||||
|
||||
// QueryByZRangeByLex define func to query real time data from redis zset
|
||||
func (rc *RedisClient) QueryByZRangeByLex(ctx context.Context, key string, size int64) ([]redis.Z, error) {
|
||||
client := rc.Client
|
||||
args := redis.ZRangeArgs{
|
||||
Key: key,
|
||||
Start: 0,
|
||||
Stop: size,
|
||||
ByScore: false,
|
||||
ByLex: false,
|
||||
Rev: false,
|
||||
Offset: 0,
|
||||
Count: 0,
|
||||
}
|
||||
return client.ZRangeArgsWithScores(ctx, args).Result()
|
||||
}
|
||||
|
|
@ -1,97 +0,0 @@
|
|||
package diagram
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
locker "modelRT/distributedlock"
|
||||
"modelRT/logger"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// RedisHash defines the encapsulation struct of redis hash type
|
||||
type RedisHash struct {
|
||||
ctx context.Context
|
||||
hashKey string
|
||||
rwLocker *locker.RedissionRWLocker
|
||||
storageClient *redis.Client
|
||||
}
|
||||
|
||||
// NewRedisHash define func of new redis hash instance
|
||||
func NewRedisHash(ctx context.Context, hashKey string, lockLeaseTime uint64, needRefresh bool) *RedisHash {
|
||||
token := ctx.Value("client_token").(string)
|
||||
return &RedisHash{
|
||||
ctx: ctx,
|
||||
hashKey: hashKey,
|
||||
rwLocker: locker.InitRWLocker(hashKey, token, lockLeaseTime, needRefresh),
|
||||
storageClient: GetRedisClientInstance(),
|
||||
}
|
||||
}
|
||||
|
||||
// SetRedisHashByMap define func of set redis hash by map struct
|
||||
func (rh *RedisHash) SetRedisHashByMap(fields map[string]interface{}) error {
|
||||
err := rh.rwLocker.WLock(rh.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rh.ctx, "lock wLock by hash_key failed", "hash_key", rh.hashKey, "error", err)
|
||||
return err
|
||||
}
|
||||
defer rh.rwLocker.UnWLock(rh.ctx)
|
||||
|
||||
err = rh.storageClient.HSet(rh.ctx, rh.hashKey, fields).Err()
|
||||
if err != nil {
|
||||
logger.Error(rh.ctx, "set hash by map failed", "hash_key", rh.hashKey, "fields", fields, "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRedisHashByKV define func of set redis hash by kv struct
|
||||
func (rh *RedisHash) SetRedisHashByKV(field string, value interface{}) error {
|
||||
err := rh.rwLocker.WLock(rh.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rh.ctx, "lock wLock by hash_key failed", "hash_key", rh.hashKey, "error", err)
|
||||
return err
|
||||
}
|
||||
defer rh.rwLocker.UnWLock(rh.ctx)
|
||||
|
||||
err = rh.storageClient.HSet(rh.ctx, rh.hashKey, field, value).Err()
|
||||
if err != nil {
|
||||
logger.Error(rh.ctx, "set hash by kv failed", "hash_key", rh.hashKey, "field", field, "value", value, "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HGet define func of get specified field value from redis hash by key and field name
|
||||
func (rh *RedisHash) HGet(field string) (string, error) {
|
||||
err := rh.rwLocker.RLock(rh.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rh.ctx, "lock rLock by hash_key failed", "hash_key", rh.hashKey, "error", err)
|
||||
return "", err
|
||||
}
|
||||
defer rh.rwLocker.UnRLock(rh.ctx)
|
||||
|
||||
result, err := rh.storageClient.HGet(rh.ctx, rh.hashKey, field).Result()
|
||||
if err != nil {
|
||||
logger.Error(rh.ctx, "set hash by kv failed", "hash_key", rh.hashKey, "field", field, "error", err)
|
||||
return "", err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// HGetAll define func of get all filelds from redis hash by key
|
||||
func (rh *RedisHash) HGetAll() (map[string]string, error) {
|
||||
err := rh.rwLocker.RLock(rh.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rh.ctx, "lock rLock by hash_key failed", "hash_key", rh.hashKey, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
defer rh.rwLocker.UnRLock(rh.ctx)
|
||||
|
||||
result, err := rh.storageClient.HGetAll(rh.ctx, rh.hashKey).Result()
|
||||
if err != nil {
|
||||
logger.Error(rh.ctx, "get all hash field by hash key failed", "hash_key", rh.hashKey, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
package diagram
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"modelRT/config"
|
||||
"modelRT/util"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var (
|
||||
_globalStorageClient *redis.Client
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
// initClient define func of return successfully initialized redis client
|
||||
func initClient(rCfg config.RedisConfig) *redis.Client {
|
||||
client, err := util.NewRedisClient(
|
||||
rCfg.Addr,
|
||||
util.WithPassword(rCfg.Password),
|
||||
util.WithDB(rCfg.DB),
|
||||
util.WithPoolSize(rCfg.PoolSize),
|
||||
util.WithTimeout(time.Duration(rCfg.Timeout)*time.Second),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
// InitRedisClientInstance define func of return instance of redis client
|
||||
func InitRedisClientInstance(rCfg config.RedisConfig) *redis.Client {
|
||||
once.Do(func() {
|
||||
_globalStorageClient = initClient(rCfg)
|
||||
})
|
||||
return _globalStorageClient
|
||||
}
|
||||
|
||||
// GetRedisClientInstance define func of get redis client instance
|
||||
func GetRedisClientInstance() *redis.Client {
|
||||
client := _globalStorageClient
|
||||
return client
|
||||
}
|
||||
|
|
@ -1,95 +0,0 @@
|
|||
package diagram
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
locker "modelRT/distributedlock"
|
||||
"modelRT/logger"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// RedisSet defines the encapsulation struct of redis hash type
|
||||
type RedisSet struct {
|
||||
ctx context.Context
|
||||
key string
|
||||
rwLocker *locker.RedissionRWLocker
|
||||
storageClient *redis.Client
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// NewRedisSet define func of new redis set instance
|
||||
func NewRedisSet(ctx context.Context, setKey string, lockLeaseTime uint64, needRefresh bool) *RedisSet {
|
||||
token := ctx.Value("client_token").(string)
|
||||
return &RedisSet{
|
||||
ctx: ctx,
|
||||
key: setKey,
|
||||
rwLocker: locker.InitRWLocker(setKey, token, lockLeaseTime, needRefresh),
|
||||
storageClient: GetRedisClientInstance(),
|
||||
logger: logger.GetLoggerInstance(),
|
||||
}
|
||||
}
|
||||
|
||||
// SADD define func of add redis set by members
|
||||
func (rs *RedisSet) SADD(members ...any) error {
|
||||
err := rs.rwLocker.WLock(rs.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", rs.key, "error", err)
|
||||
return err
|
||||
}
|
||||
defer rs.rwLocker.UnWLock(rs.ctx)
|
||||
|
||||
err = rs.storageClient.SAdd(rs.ctx, rs.key, members).Err()
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "add set by memebers failed", "set_key", rs.key, "members", members, "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SREM define func of remove the specified members from redis set by key
|
||||
func (rs *RedisSet) SREM(members ...any) error {
|
||||
err := rs.rwLocker.WLock(rs.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", rs.key, "error", err)
|
||||
return err
|
||||
}
|
||||
defer rs.rwLocker.UnWLock(rs.ctx)
|
||||
|
||||
count, err := rs.storageClient.SRem(rs.ctx, rs.key, members).Result()
|
||||
if err != nil || count != int64(len(members)) {
|
||||
logger.Error(rs.ctx, "rem members from set failed", "set_key", rs.key, "members", members, "error", err)
|
||||
|
||||
return fmt.Errorf("rem members from set failed:%w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SMembers define func of get all memebers from redis set by key
|
||||
func (rs *RedisSet) SMembers() ([]string, error) {
|
||||
err := rs.rwLocker.RLock(rs.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "lock rLock by setKey failed", "set_key", rs.key, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
defer rs.rwLocker.UnRLock(rs.ctx)
|
||||
|
||||
result, err := rs.storageClient.SMembers(rs.ctx, rs.key).Result()
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", rs.key, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// SIsMember define func of determine whether an member is in set by key
|
||||
func (rs *RedisSet) SIsMember(member any) (bool, error) {
|
||||
result, err := rs.storageClient.SIsMember(rs.ctx, rs.key, member).Result()
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", rs.key, "error", err)
|
||||
return false, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
|
@ -1,114 +0,0 @@
|
|||
package diagram
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
locker "modelRT/distributedlock"
|
||||
"modelRT/logger"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// RedisString defines the encapsulation struct of redis string type
|
||||
type RedisString struct {
|
||||
ctx context.Context
|
||||
rwLocker *locker.RedissionRWLocker
|
||||
storageClient *redis.Client
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// NewRedisString define func of new redis string instance
|
||||
func NewRedisString(ctx context.Context, stringKey string, token string, lockLeaseTime uint64, needRefresh bool) *RedisString {
|
||||
return &RedisString{
|
||||
ctx: ctx,
|
||||
rwLocker: locker.InitRWLocker(stringKey, token, lockLeaseTime, needRefresh),
|
||||
storageClient: GetRedisClientInstance(),
|
||||
logger: logger.GetLoggerInstance(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get define func of get the value of key
|
||||
func (rs *RedisString) Get(stringKey string) (string, error) {
|
||||
err := rs.rwLocker.RLock(rs.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "lock rLock by stringKey failed", "string_key", stringKey, "error", err)
|
||||
return "", err
|
||||
}
|
||||
defer rs.rwLocker.UnRLock(rs.ctx)
|
||||
|
||||
value, err := rs.storageClient.Get(rs.ctx, stringKey).Result()
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "get string value by key failed", "string_key", stringKey, "error", err)
|
||||
return "", err
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Set define func of set the value of key
|
||||
func (rs *RedisString) Set(stringKey string, value interface{}) error {
|
||||
err := rs.rwLocker.WLock(rs.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "lock wLock by stringKey failed", "string_key", stringKey, "error", err)
|
||||
return err
|
||||
}
|
||||
defer rs.rwLocker.UnWLock(rs.ctx)
|
||||
|
||||
err = rs.storageClient.Set(rs.ctx, stringKey, value, redis.KeepTTL).Err()
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "get string value by key failed", "string_key", stringKey, "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Incr define func of increments the number stored at key by one
|
||||
func (rs *RedisString) Incr(stringKey string) error {
|
||||
err := rs.rwLocker.WLock(rs.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "lock wLock by stringKey failed", "string_key", stringKey, "error", err)
|
||||
return err
|
||||
}
|
||||
defer rs.rwLocker.UnWLock(rs.ctx)
|
||||
|
||||
err = rs.storageClient.Incr(rs.ctx, stringKey).Err()
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "incr the number stored at key by one failed", "string_key", stringKey, "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IncrBy define func of increments the number stored at key by increment
|
||||
func (rs *RedisString) IncrBy(stringKey string, value int64) error {
|
||||
err := rs.rwLocker.WLock(rs.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "lock wLock by stringKey failed", "string_key", stringKey, "error", err)
|
||||
return err
|
||||
}
|
||||
defer rs.rwLocker.UnWLock(rs.ctx)
|
||||
|
||||
err = rs.storageClient.IncrBy(rs.ctx, stringKey, value).Err()
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "incr the number stored at key by increment", "string_key", stringKey, "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GETDEL define func of get the value of key and delete the key
|
||||
func (rs *RedisString) GETDEL(stringKey string) error {
|
||||
err := rs.rwLocker.WLock(rs.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "lock wLock by stringKey failed", "string_key", stringKey, "error", err)
|
||||
return err
|
||||
}
|
||||
defer rs.rwLocker.UnWLock(rs.ctx)
|
||||
|
||||
err = rs.storageClient.GetDel(rs.ctx, stringKey).Err()
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "del the key failed", "string_key", stringKey, "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,124 +0,0 @@
|
|||
// Package diagram provide diagram data structure and operation
|
||||
package diagram
|
||||
|
||||
import (
|
||||
"context"
|
||||
"iter"
|
||||
"maps"
|
||||
|
||||
locker "modelRT/distributedlock"
|
||||
"modelRT/logger"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// RedisZSet defines the encapsulation struct of redis zset type
|
||||
type RedisZSet struct {
|
||||
ctx context.Context
|
||||
rwLocker *locker.RedissionRWLocker
|
||||
storageClient *redis.Client
|
||||
}
|
||||
|
||||
// NewRedisZSet define func of new redis zset instance
|
||||
func NewRedisZSet(ctx context.Context, key string, lockLeaseTime uint64, needRefresh bool) *RedisZSet {
|
||||
token := ctx.Value("client_token").(string)
|
||||
return &RedisZSet{
|
||||
ctx: ctx,
|
||||
rwLocker: locker.InitRWLocker(key, token, lockLeaseTime, needRefresh),
|
||||
storageClient: GetRedisClientInstance(),
|
||||
}
|
||||
}
|
||||
|
||||
// ZADD define func of add redis zset by members
|
||||
func (rs *RedisZSet) ZADD(setKey string, score float64, member interface{}) error {
|
||||
err := rs.rwLocker.WLock(rs.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", setKey, "error", err)
|
||||
return err
|
||||
}
|
||||
defer rs.rwLocker.UnWLock(rs.ctx)
|
||||
|
||||
err = rs.storageClient.ZAdd(rs.ctx, setKey, redis.Z{Score: score, Member: member}).Err()
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "add set by score and memebers failed", "set_key", setKey, "members", member, "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ZRANGE define func of returns the specified range of elements in the sorted set stored by key
|
||||
func (rs *RedisZSet) ZRANGE(setKey string, start, stop int64) ([]string, error) {
|
||||
var results []string
|
||||
|
||||
err := rs.rwLocker.RLock(rs.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "lock RLock by setKey failed", "set_key", setKey, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = rs.rwLocker.UnRLock(rs.ctx)
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "unlock RLock by setKey failed", "set_key", setKey, "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
results, err = rs.storageClient.ZRange(rs.ctx, setKey, start, stop).Result()
|
||||
if err != nil {
|
||||
logger.Error(rs.ctx, "range set by key failed", "set_key", setKey, "start", start, "stop", stop, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
type Comparer[T any] interface {
|
||||
Compare(T) int
|
||||
}
|
||||
|
||||
type ComparableComparer[T any] interface {
|
||||
Compare(T) int
|
||||
comparable // 直接嵌入 comparable 约束
|
||||
}
|
||||
|
||||
type methodNode[E Comparer[E]] struct {
|
||||
value E
|
||||
left *methodNode[E]
|
||||
right *methodNode[E]
|
||||
}
|
||||
|
||||
type MethodTree[E Comparer[E]] struct {
|
||||
root *methodNode[E]
|
||||
}
|
||||
|
||||
type OrderedSet[E interface {
|
||||
comparable
|
||||
Comparer[E]
|
||||
}] struct {
|
||||
tree MethodTree[E]
|
||||
elements map[E]bool
|
||||
}
|
||||
|
||||
type ComparableOrderedSet[E ComparableComparer[E]] struct {
|
||||
tree MethodTree[E]
|
||||
elements map[E]bool
|
||||
}
|
||||
|
||||
type Set[E any] interface {
|
||||
Insert(E)
|
||||
Delete(E)
|
||||
Has(E) bool
|
||||
All() iter.Seq[E]
|
||||
}
|
||||
|
||||
func InsertAll[E any](set Set[E], seq iter.Seq[E]) {
|
||||
for v := range seq {
|
||||
set.Insert(v)
|
||||
}
|
||||
}
|
||||
|
||||
type HashSet[E comparable] map[E]bool
|
||||
|
||||
func (s HashSet[E]) Insert(v E) { s[v] = true }
|
||||
func (s HashSet[E]) Delete(v E) { delete(s, v) }
|
||||
func (s HashSet[E]) Has(v E) bool { return s[v] }
|
||||
func (s HashSet[E]) All() iter.Seq[E] { return maps.Keys(s) }
|
||||
|
|
@ -6,20 +6,12 @@ import (
|
|||
"sync"
|
||||
)
|
||||
|
||||
// graphOverview define struct of storage all circuit diagram topologic data
|
||||
var graphOverview sync.Map
|
||||
|
||||
// PrintGrapMap define func of print circuit diagram topologic info data
|
||||
func PrintGrapMap() {
|
||||
graphOverview.Range(func(key, value interface{}) bool {
|
||||
fmt.Println(key, value)
|
||||
return true
|
||||
})
|
||||
}
|
||||
// GraphOverview define struct of storage all circuit diagram topologic data
|
||||
var GraphOverview sync.Map
|
||||
|
||||
// GetGraphMap define func of get circuit diagram topologic data by pageID
|
||||
func GetGraphMap(pageID int64) (*Graph, error) {
|
||||
value, ok := graphOverview.Load(pageID)
|
||||
value, ok := GraphOverview.Load(pageID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("can not find graph by pageID:%d", pageID)
|
||||
}
|
||||
|
|
@ -29,21 +21,3 @@ func GetGraphMap(pageID int64) (*Graph, error) {
|
|||
}
|
||||
return graph, nil
|
||||
}
|
||||
|
||||
// UpdateGrapMap define func of update circuit diagram data by pageID and topologic info
|
||||
func UpdateGrapMap(pageID int64, graphInfo *Graph) bool {
|
||||
_, result := graphOverview.Swap(pageID, graphInfo)
|
||||
return result
|
||||
}
|
||||
|
||||
// StoreGraphMap define func of store circuit diagram topologic data with pageID and topologic info
|
||||
func StoreGraphMap(pageID int64, graphInfo *Graph) {
|
||||
graphOverview.Store(pageID, graphInfo)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteGraphMap define func of delete circuit diagram topologic data with pageID
|
||||
func DeleteGraphMap(pageID int64) {
|
||||
graphOverview.Delete(pageID)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +0,0 @@
|
|||
package constants
|
||||
|
||||
import "errors"
|
||||
|
||||
// AcquireTimeoutErr define error of get lock timeout
|
||||
var AcquireTimeoutErr = errors.New("the waiting time for obtaining the lock operation has timed out")
|
||||
|
|
@ -1,136 +0,0 @@
|
|||
package constants
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type RedisCode int
|
||||
|
||||
const (
|
||||
LockSuccess = RedisCode(1)
|
||||
UnLockSuccess = RedisCode(1)
|
||||
RefreshLockSuccess = RedisCode(1)
|
||||
UnRLockSuccess = RedisCode(0)
|
||||
UnWLockSuccess = RedisCode(0)
|
||||
RLockFailureWithWLockOccupancy = RedisCode(-1)
|
||||
UnRLockFailureWithWLockOccupancy = RedisCode(-2)
|
||||
WLockFailureWithRLockOccupancy = RedisCode(-3)
|
||||
WLockFailureWithWLockOccupancy = RedisCode(-4)
|
||||
UnWLockFailureWithRLockOccupancy = RedisCode(-5)
|
||||
UnWLockFailureWithWLockOccupancy = RedisCode(-6)
|
||||
WLockFailureWithNotFirstPriority = RedisCode(-7)
|
||||
RefreshLockFailure = RedisCode(-8)
|
||||
LockFailure = RedisCode(-9)
|
||||
UnLocakFailureWithLockOccupancy = RedisCode(-10)
|
||||
UnknownInternalError = RedisCode(-99)
|
||||
)
|
||||
|
||||
type RedisLockType int
|
||||
|
||||
const (
|
||||
LockType = RedisLockType(iota)
|
||||
UnRLockType
|
||||
UnWLockType
|
||||
UnLockType
|
||||
RefreshLockType
|
||||
)
|
||||
|
||||
type RedisResult struct {
|
||||
Code RedisCode
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e *RedisResult) Error() string {
|
||||
return fmt.Sprintf("redis execution code:%d,message:%s\n", e.Code, e.Message)
|
||||
}
|
||||
|
||||
func (e *RedisResult) OutputResultMessage() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
func (e *RedisResult) OutputResultCode() int {
|
||||
return int(e.Code)
|
||||
}
|
||||
|
||||
func NewRedisResult(res RedisCode, lockType RedisLockType, redisMsg string) error {
|
||||
resInt := int(res)
|
||||
switch resInt {
|
||||
case 1:
|
||||
if lockType == LockType {
|
||||
return &RedisResult{Code: res, Message: "redis lock success"}
|
||||
} else if (lockType == UnRLockType) || (lockType == UnWLockType) || (lockType == UnLockType) {
|
||||
return &RedisResult{Code: res, Message: "redis unlock success"}
|
||||
} else {
|
||||
return &RedisResult{Code: res, Message: "redis refresh lock success"}
|
||||
}
|
||||
case 0:
|
||||
if lockType == UnRLockType {
|
||||
return &RedisResult{Code: res, Message: "redis unlock read lock success, the lock is still occupied by other processes read lock"}
|
||||
} else {
|
||||
return &RedisResult{Code: res, Message: "redis unlock write lock success, the lock is still occupied by other processes write lock"}
|
||||
}
|
||||
case -1:
|
||||
return &RedisResult{Code: res, Message: "redis lock read lock failure,the lock is already occupied by another processes write lock"}
|
||||
case -2:
|
||||
return &RedisResult{Code: res, Message: "redis un lock read lock failure,the lock is already occupied by another processes write lock"}
|
||||
case -3:
|
||||
return &RedisResult{Code: res, Message: "redis lock write lock failure,the lock is already occupied by anthor processes read lock"}
|
||||
case -4:
|
||||
return &RedisResult{Code: res, Message: "redis lock write lock failure,the lock is already occupied by anthor processes write lock"}
|
||||
case -5:
|
||||
return &RedisResult{Code: res, Message: "redis unlock write lock failure,the lock is already occupied by another processes read lock"}
|
||||
case -6:
|
||||
return &RedisResult{Code: res, Message: "redis unlock write lock failure,the lock is already occupied by another processes write lock"}
|
||||
case -7:
|
||||
return &RedisResult{Code: res, Message: "redis lock write lock failure,the first priority in the current process non-waiting queue"}
|
||||
case -8:
|
||||
return &RedisResult{Code: res, Message: "redis refresh lock failure,the lock not exist"}
|
||||
case -9:
|
||||
return &RedisResult{Code: res, Message: "redis lock failure,the lock is already occupied by another processes lock"}
|
||||
case -99:
|
||||
return &RedisResult{Code: res, Message: fmt.Sprintf("redis internal execution error:%v\n", redisMsg)}
|
||||
default:
|
||||
msg := "unkown redis execution result"
|
||||
if redisMsg != "" {
|
||||
msg = fmt.Sprintf("%s:%s\n", msg, redisMsg)
|
||||
}
|
||||
return &RedisResult{Code: res, Message: msg}
|
||||
}
|
||||
}
|
||||
|
||||
func TranslateResultToStr(res RedisCode, lockType RedisLockType) string {
|
||||
resInt := int(res)
|
||||
switch resInt {
|
||||
case 1:
|
||||
if lockType == LockType {
|
||||
return "redis lock success"
|
||||
} else if (lockType == UnRLockType) || (lockType == UnWLockType) || (lockType == UnLockType) {
|
||||
return "redis unlock success"
|
||||
} else {
|
||||
return "redis refresh lock success"
|
||||
}
|
||||
case 0:
|
||||
if lockType == UnRLockType {
|
||||
return "redis unlock read lock success, the lock is still occupied by other processes read lock"
|
||||
} else {
|
||||
return "redis unlock write lock success, the lock is still occupied by other processes write lock"
|
||||
}
|
||||
case -1:
|
||||
return "redis lock read lock failure,the lock is already occupied by another processes write lock"
|
||||
case -2:
|
||||
return "redis un lock read lock failure,the lock is already occupied by another processes write lock"
|
||||
case -3:
|
||||
return "redis lock write lock failure,the lock is already occupied by anthor processes read lock"
|
||||
case -4:
|
||||
return "redis lock write lock failure,the lock is already occupied by anthor processes write lock"
|
||||
case -5:
|
||||
return "redis un lock write lock failure,the lock is already occupied by another processes read lock"
|
||||
case -6:
|
||||
return "redis un lock write lock failure,the lock is already occupied by another processes write lock"
|
||||
case -7:
|
||||
return "redis lock write lock failure,the first priority in the current process non-waiting queue"
|
||||
case -8:
|
||||
return "redis refresh lock failure,the lock not exist"
|
||||
}
|
||||
return "unkown redis execution result"
|
||||
}
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
package distributedlock
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"modelRT/config"
|
||||
"modelRT/util"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var (
|
||||
_globalLockerClient *redis.Client
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
// initClient define func of return successfully initialized redis client
|
||||
func initClient(rCfg config.RedisConfig) *redis.Client {
|
||||
client, err := util.NewRedisClient(
|
||||
rCfg.Addr,
|
||||
util.WithPassword(rCfg.Password),
|
||||
util.WithDB(rCfg.DB),
|
||||
util.WithPoolSize(rCfg.PoolSize),
|
||||
util.WithTimeout(time.Duration(rCfg.Timeout)*time.Second),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
// InitClientInstance define func of return instance of redis client
|
||||
func InitClientInstance(rCfg config.RedisConfig) *redis.Client {
|
||||
once.Do(func() {
|
||||
_globalLockerClient = initClient(rCfg)
|
||||
})
|
||||
return _globalLockerClient
|
||||
}
|
||||
|
||||
// GetRedisClientInstance define func of get redis client instance
|
||||
func GetRedisClientInstance() *redis.Client {
|
||||
client := _globalLockerClient
|
||||
return client
|
||||
}
|
||||
|
|
@ -1,62 +0,0 @@
|
|||
package luascript
|
||||
|
||||
/*
|
||||
KEYS[1]:锁的键名(key),通常是锁的唯一标识。
|
||||
ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。
|
||||
ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。
|
||||
*/
|
||||
var LockScript = `
|
||||
-- 锁不存在的情况下加锁
|
||||
if (redis.call('exists', KEYS[1]) == 0) then
|
||||
redis.call('hset', KEYS[1], ARGV[2], 1);
|
||||
redis.call('expire', KEYS[1], ARGV[1]);
|
||||
return 1;
|
||||
end;
|
||||
-- 重入锁逻辑
|
||||
if (redis.call('hexists', KEYS[1], ARGV[2]) == 1) then
|
||||
redis.call('hincrby', KEYS[1], ARGV[2], 1);
|
||||
redis.call('expire', KEYS[1], ARGV[1]);
|
||||
return 1;
|
||||
end;
|
||||
-- 持有锁的 token 不是当前客户端的 token,返回加锁失败
|
||||
return -9;
|
||||
`
|
||||
|
||||
/*
|
||||
KEYS[1]:锁的键名(key),通常是锁的唯一标识。
|
||||
ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。
|
||||
ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。
|
||||
*/
|
||||
var RefreshLockScript = `
|
||||
if (redis.call('hexists', KEYS[1], ARGV[2]) == 1) then
|
||||
redis.call('expire', KEYS[1], ARGV[1]);
|
||||
return 1;
|
||||
end;
|
||||
return -8;
|
||||
`
|
||||
|
||||
/*
|
||||
KEYS[1]:锁的键名(key),通常是锁的唯一标识。
|
||||
KEYS[2]:锁的释放通知频道(chankey),用于通知其他客户端锁已释放。
|
||||
ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。
|
||||
ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。
|
||||
*/
|
||||
var UnLockScript = `
|
||||
if (redis.call('exists', KEYS[1]) == 0) then
|
||||
redis.call('publish', KEYS[2], ARGV[1]);
|
||||
return 1;
|
||||
end;
|
||||
if (redis.call('hexists', KEYS[1], ARGV[2]) == 0) then
|
||||
return 1;
|
||||
end;
|
||||
local counter = redis.call('hincrby', KEYS[1], ARGV[2], -1);
|
||||
if (counter > 0) then
|
||||
return 1;
|
||||
else
|
||||
redis.call('del', KEYS[1]);
|
||||
redis.call('publish', KEYS[2], ARGV[1]);
|
||||
return 1;
|
||||
end;
|
||||
-- 持有锁的 token 不是当前客户端的 token,返回解锁失败
|
||||
return -10;
|
||||
`
|
||||
|
|
@ -1,263 +0,0 @@
|
|||
// Package luascript defines the lua script used for redis distributed lock
|
||||
package luascript
|
||||
|
||||
// RLockScript is the lua script for the lock read lock command
|
||||
/*
|
||||
KEYS[1]:锁的键名(key),通常是锁的唯一标识。
|
||||
KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。
|
||||
ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。
|
||||
ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。
|
||||
*/
|
||||
var RLockScript = `
|
||||
local mode = redis.call('hget', KEYS[1], 'mode');
|
||||
local lockKey = KEYS[2] .. ':' .. ARGV[2];
|
||||
if (mode == false) then
|
||||
redis.call('hset', KEYS[1], 'mode', 'read');
|
||||
redis.call('hset', KEYS[1], lockKey, '1');
|
||||
redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey);
|
||||
redis.call('pexpire', KEYS[1], ARGV[1]);
|
||||
return 1;
|
||||
end;
|
||||
|
||||
if (mode == 'write') then
|
||||
-- 放到 list 中等待写锁释放后再次尝试加锁并且订阅写锁释放的消息
|
||||
local waitKey = KEYS[1] .. ':read';
|
||||
redis.call('rpush', waitKey, ARGV[2]);
|
||||
return -1;
|
||||
end;
|
||||
|
||||
if (mode == 'read') then
|
||||
if (redis.call('exists', KEYS[1], ARGV[2]) == 1) then
|
||||
redis.call('hincrby', KEYS[1], lockKey, '1');
|
||||
local remainTime = redis.call('hpttl', KEYS[1], 'fields', '1', lockKey);
|
||||
redis.call('hpexpire', KEYS[1], math.max(tonumber(remainTime[1]), ARGV[1]), 'fields', '1', lockKey);
|
||||
else
|
||||
redis.call('hset', KEYS[1], lockKey, '1');
|
||||
redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey);
|
||||
end;
|
||||
local cursor = 0;
|
||||
local maxRemainTime = tonumber(ARGV[1]);
|
||||
local pattern = KEYS[2] .. ':*';
|
||||
repeat
|
||||
local hscanResult = redis.call('hscan', KEYS[1], cursor, 'match', pattern, 'count', '100');
|
||||
cursor = tonumber(hscanResult[1]);
|
||||
local fields = hscanResult[2];
|
||||
|
||||
for i = 1, #fields,2 do
|
||||
local field = fields[i];
|
||||
local remainTime = redis.call('hpttl', KEYS[1], 'fields', '1', field);
|
||||
maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime);
|
||||
end;
|
||||
until cursor == 0;
|
||||
|
||||
local remainTime = redis.call('pttl', KEYS[1]);
|
||||
redis.call('pexpire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime));
|
||||
return 1;
|
||||
end;
|
||||
`
|
||||
|
||||
// UnRLockScript is the lua script for the unlock read lock command
|
||||
/*
|
||||
KEYS[1]:锁的键名(key),通常是锁的唯一标识。
|
||||
KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。
|
||||
KEYS[3]:锁的释放通知写频道(chankey),用于通知其他写等待客户端锁已释放。
|
||||
ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。
|
||||
ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。
|
||||
*/
|
||||
var UnRLockScript = `
|
||||
local lockKey = KEYS[2] .. ':' .. ARGV[2];
|
||||
local mode = redis.call('hget', KEYS[1], 'mode');
|
||||
if (mode == false) then
|
||||
local writeWait = KEYS[1] .. ':write';
|
||||
-- 优先写锁加锁
|
||||
local counter = redis.call('llen',writeWait);
|
||||
if (counter >= 1) then
|
||||
redis.call('publish', KEYS[3], ARGV[1]);
|
||||
end;
|
||||
return 1;
|
||||
elseif (mode == 'write') then
|
||||
return -2;
|
||||
end;
|
||||
|
||||
-- 判断当前的确是读模式但是当前 token 并没有加读锁的情况,返回 0
|
||||
local lockExists = redis.call('hexists', KEYS[1], lockKey);
|
||||
if ((mode == 'read') and (lockExists == 0)) then
|
||||
return 0;
|
||||
end;
|
||||
|
||||
local counter = redis.call('hincrby', KEYS[1], lockKey, -1);
|
||||
local delTTLs = redis.call('hpttl', KEYS[1], 'fields', '1', lockKey);
|
||||
local delTTL = tonumber(delTTLs[1]);
|
||||
if (counter == 0) then
|
||||
redis.call('hdel', KEYS[1], lockKey);
|
||||
end;
|
||||
|
||||
if (redis.call('hlen', KEYS[1]) > 1) then
|
||||
local cursor = 0;
|
||||
local maxRemainTime = 0;
|
||||
local pattern = KEYS[2] .. ':*';
|
||||
repeat
|
||||
local hscanResult = redis.call('hscan', KEYS[1], cursor, 'match', pattern, 'count', '100');
|
||||
cursor = tonumber(hscanResult[1]);
|
||||
local fields = hscanResult[2];
|
||||
|
||||
for i = 1, #fields,2 do
|
||||
local field = fields[i];
|
||||
local remainTime = redis.call('hpttl', KEYS[1], 'fields', '1', field);
|
||||
maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime);
|
||||
end;
|
||||
until cursor == 0;
|
||||
|
||||
if (maxRemainTime > 0) then
|
||||
if (delTTL > maxRemainTime) then
|
||||
redis.call('pexpire', KEYS[1], maxRemainTime);
|
||||
else
|
||||
local remainTime = redis.call('pttl', KEYS[1]);
|
||||
redis.call('pexpire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime));
|
||||
end;
|
||||
end;
|
||||
else
|
||||
redis.call('del', KEYS[1]);
|
||||
local writeWait = KEYS[1] .. ':write';
|
||||
-- 优先写锁加锁
|
||||
local counter = redis.call('llen',writeWait);
|
||||
if (counter >= 1) then
|
||||
redis.call('publish', KEYS[3], ARGV[1]);
|
||||
end;
|
||||
return 1;
|
||||
end;
|
||||
`
|
||||
|
||||
// WLockScript is the lua script for the lock write lock command
|
||||
/*
|
||||
KEYS[1]:锁的键名(key),通常是锁的唯一标识。
|
||||
KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。
|
||||
ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。
|
||||
ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。
|
||||
*/
|
||||
var WLockScript = `
|
||||
local mode = redis.call('hget', KEYS[1], 'mode');
|
||||
local lockKey = KEYS[2] .. ':' .. ARGV[2];
|
||||
local waitKey = KEYS[1] .. ':write';
|
||||
if (mode == false) then
|
||||
local waitListLen = redis.call('llen', waitKey);
|
||||
if (waitListLen > 0) then
|
||||
local firstToken = redis.call('lindex', waitKey,'0');
|
||||
if (firstToken ~= ARGV[2]) then
|
||||
return -7;
|
||||
end;
|
||||
end;
|
||||
redis.call('hset', KEYS[1], 'mode', 'write');
|
||||
redis.call('hset', KEYS[1], lockKey, 1);
|
||||
redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey);
|
||||
redis.call('pexpire', KEYS[1], ARGV[1]);
|
||||
redis.call('lpop', waitKey, '1');
|
||||
return 1;
|
||||
elseif (mode == 'read') then
|
||||
-- 放到 list 中等待读锁释放后再次尝试加锁并且订阅读锁释放的消息
|
||||
redis.call('rpush', waitKey, ARGV[2]);
|
||||
return -3;
|
||||
else
|
||||
-- 可重入写锁逻辑
|
||||
local lockKey = KEYS[2] .. ':' .. ARGV[2];
|
||||
local lockExists = redis.call('hexists', KEYS[1], lockKey);
|
||||
if (lockExists == 1) then
|
||||
redis.call('hincrby', KEYS[1], lockKey, 1);
|
||||
redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey);
|
||||
redis.call('pexpire', KEYS[1], ARGV[1]);
|
||||
return 1;
|
||||
end;
|
||||
-- 放到 list 中等待写锁释放后再次尝试加锁并且订阅写锁释放的消息
|
||||
local key = KEYS[1] .. ':write';
|
||||
redis.call('rpush', key, ARGV[2]);
|
||||
return -4;
|
||||
end;
|
||||
`
|
||||
|
||||
// UnWLockScript is the lua script for the unlock write lock command
|
||||
/*
|
||||
KEYS[1]:锁的键名(key),通常是锁的唯一标识。
|
||||
KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。
|
||||
KEYS[3]:锁的释放通知写频道(writeChankey),用于通知其他写等待客户端锁已释放。
|
||||
KEYS[4]:锁的释放通知读频道(readChankey),用于通知其他读等待客户端锁已释放。
|
||||
ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。
|
||||
ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。
|
||||
*/
|
||||
var UnWLockScript = `
|
||||
local mode = redis.call('hget', KEYS[1], 'mode');
|
||||
local writeWait = KEYS[1] .. ':write';
|
||||
if (mode == false) then
|
||||
-- 优先写锁加锁,无写锁的情况通知读锁加锁
|
||||
local counter = redis.call('llen',writeWait);
|
||||
if (counter >= 1) then
|
||||
redis.call('publish', KEYS[3], ARGV[1]);
|
||||
else
|
||||
redis.call('publish', KEYS[4], ARGV[1]);
|
||||
end;
|
||||
return 1;
|
||||
elseif (mode == 'read') then
|
||||
return -5;
|
||||
else
|
||||
local lockKey = KEYS[2] .. ':' .. ARGV[2];
|
||||
local lockExists = redis.call('hexists', KEYS[1], lockKey);
|
||||
if (lockExists >= 1) then
|
||||
-- 可重入写锁逻辑
|
||||
local incrRes = redis.call('hincrby', KEYS[1], lockKey, -1);
|
||||
if (incrRes == 0) then
|
||||
redis.call('del', KEYS[1]);
|
||||
local counter = redis.call('llen',writeWait);
|
||||
if (counter >= 1) then
|
||||
redis.call('publish', KEYS[3], ARGV[1]);
|
||||
else
|
||||
redis.call('publish', KEYS[4], ARGV[1]);
|
||||
end;
|
||||
return 1;
|
||||
end;
|
||||
return 0;
|
||||
else
|
||||
return -6;
|
||||
end;
|
||||
end;
|
||||
`
|
||||
|
||||
// RefreshRWLockScript is the lua script for the refresh lock command
|
||||
/*
|
||||
KEYS[1]:锁的键名(key),通常是锁的唯一标识。
|
||||
KEYS[2]:锁的超时键名前缀(rwTimeoutPrefix),用于存储每个读锁的超时键。
|
||||
ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。
|
||||
ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。
|
||||
*/
|
||||
var RefreshRWLockScript = `
|
||||
local lockKey = KEYS[2] .. ':' .. ARGV[2];
|
||||
local lockExists = redis.call('hexists', KEYS[1], lockKey);
|
||||
local mode = redis.call('hget', KEYS[1], 'mode');
|
||||
local maxRemainTime = tonumber(ARGV[1]);
|
||||
if (lockExists == 1) then
|
||||
redis.call('hpexpire', KEYS[1], ARGV[1], 'fields', '1', lockKey);
|
||||
if (mode == 'read') then
|
||||
local cursor = 0;
|
||||
local pattern = KEYS[2] .. ':*';
|
||||
repeat
|
||||
local hscanResult = redis.call('hscan', KEYS[1], cursor, 'match', pattern, 'count', '100');
|
||||
cursor = tonumber(hscanResult[1]);
|
||||
local fields = hscanResult[2];
|
||||
|
||||
for i = 1, #fields,2 do
|
||||
local field = fields[i];
|
||||
local remainTime = redis.call('hpttl', KEYS[1], 'fields', '1', field);
|
||||
maxRemainTime = math.max(tonumber(remainTime[1]), maxRemainTime);
|
||||
end;
|
||||
until cursor == 0;
|
||||
|
||||
if (maxRemainTime > 0) then
|
||||
local remainTime = redis.call('pttl', KEYS[1]);
|
||||
redis.call('pexpire', KEYS[1], math.max(tonumber(remainTime),maxRemainTime));
|
||||
end;
|
||||
elseif (mode == 'write') then
|
||||
redis.call('pexpire', KEYS[1], ARGV[1]);
|
||||
end;
|
||||
-- return redis.call('pttl',KEYS[1]);
|
||||
return 1;
|
||||
end;
|
||||
return -8;
|
||||
`
|
||||
|
|
@ -1,256 +0,0 @@
|
|||
package distributedlock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
constants "modelRT/distributedlock/constant"
|
||||
luascript "modelRT/distributedlock/luascript"
|
||||
"modelRT/logger"
|
||||
|
||||
uuid "github.com/gofrs/uuid"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
internalLockLeaseTime = uint64(30 * 1000)
|
||||
unlockMessage = 0
|
||||
)
|
||||
|
||||
// RedissionLockConfig define redission lock config
|
||||
type RedissionLockConfig struct {
|
||||
LockLeaseTime uint64
|
||||
Token string
|
||||
Prefix string
|
||||
ChanPrefix string
|
||||
TimeoutPrefix string
|
||||
Key string
|
||||
NeedRefresh bool
|
||||
}
|
||||
|
||||
type redissionLocker struct {
|
||||
lockLeaseTime uint64
|
||||
Token string
|
||||
Key string
|
||||
waitChanKey string
|
||||
needRefresh bool
|
||||
refreshExitChan chan struct{}
|
||||
subExitChan chan struct{}
|
||||
client *redis.Client
|
||||
refreshOnce *sync.Once
|
||||
}
|
||||
|
||||
func (rl *redissionLocker) Lock(ctx context.Context, timeout ...time.Duration) error {
|
||||
if rl.refreshExitChan == nil {
|
||||
rl.refreshExitChan = make(chan struct{})
|
||||
}
|
||||
result := rl.tryLock(ctx).(*constants.RedisResult)
|
||||
if result.Code == constants.UnknownInternalError {
|
||||
logger.Error(ctx, result.OutputResultMessage())
|
||||
return fmt.Errorf("get lock failed:%w", result)
|
||||
}
|
||||
|
||||
if (result.Code == constants.LockSuccess) && rl.needRefresh {
|
||||
rl.refreshOnce.Do(func() {
|
||||
// async refresh lock timeout unitl receive exit singal
|
||||
go rl.refreshLockTimeout(ctx)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
subMsg := make(chan struct{}, 1)
|
||||
defer close(subMsg)
|
||||
sub := rl.client.Subscribe(ctx, rl.waitChanKey)
|
||||
defer sub.Close()
|
||||
go rl.subscribeLock(ctx, sub, subMsg)
|
||||
|
||||
if len(timeout) > 0 && timeout[0] > 0 {
|
||||
acquireTimer := time.NewTimer(timeout[0])
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-subMsg:
|
||||
if !ok {
|
||||
err := errors.New("failed to read the lock waiting for for the channel message")
|
||||
logger.Error(ctx, "failed to read the lock waiting for for the channel message")
|
||||
return err
|
||||
}
|
||||
|
||||
resultErr := rl.tryLock(ctx).(*constants.RedisResult)
|
||||
if (resultErr.Code == constants.LockFailure) || (resultErr.Code == constants.UnknownInternalError) {
|
||||
logger.Info(ctx, resultErr.OutputResultMessage())
|
||||
continue
|
||||
}
|
||||
|
||||
if resultErr.Code == constants.LockSuccess {
|
||||
logger.Info(ctx, resultErr.OutputResultMessage())
|
||||
return nil
|
||||
}
|
||||
case <-acquireTimer.C:
|
||||
err := errors.New("the waiting time for obtaining the lock operation has timed out")
|
||||
logger.Info(ctx, "the waiting time for obtaining the lock operation has timed out")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("lock the redis lock failed:%w", result)
|
||||
}
|
||||
|
||||
func (rl *redissionLocker) subscribeLock(ctx context.Context, sub *redis.PubSub, subMsgChan chan struct{}) {
|
||||
if sub == nil || subMsgChan == nil {
|
||||
return
|
||||
}
|
||||
logger.Info(ctx, "lock: enter sub routine", zap.String("token", rl.Token))
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-rl.subExitChan:
|
||||
close(subMsgChan)
|
||||
return
|
||||
case <-sub.Channel():
|
||||
// 这里只会收到真正的数据消息
|
||||
subMsgChan <- struct{}{}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
KEYS[1]:锁的键名(key),通常是锁的唯一标识。
|
||||
ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。
|
||||
ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。
|
||||
*/
|
||||
func (rl *redissionLocker) refreshLockTimeout(ctx context.Context) {
|
||||
logger.Info(ctx, "lock refresh by key and token", zap.String("token", rl.Token), zap.String("key", rl.Key))
|
||||
|
||||
lockTime := time.Duration(rl.lockLeaseTime/3) * time.Millisecond
|
||||
timer := time.NewTimer(lockTime)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
// extend key lease time
|
||||
res := rl.client.Eval(ctx, luascript.RefreshLockScript, []string{rl.Key}, rl.lockLeaseTime, rl.Token)
|
||||
val, err := res.Int()
|
||||
if err != redis.Nil && err != nil {
|
||||
logger.Info(ctx, "lock refresh failed", "token", rl.Token, "key", rl.Key, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if constants.RedisCode(val) == constants.RefreshLockFailure {
|
||||
logger.Error(ctx, "lock refreash failed,can not find the lock by key and token", "token", rl.Token, "key", rl.Key)
|
||||
break
|
||||
}
|
||||
|
||||
if constants.RedisCode(val) == constants.RefreshLockSuccess {
|
||||
logger.Info(ctx, "lock refresh success by key and token", "token", rl.Token, "key", rl.Key)
|
||||
}
|
||||
timer.Reset(lockTime)
|
||||
case <-rl.refreshExitChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rl *redissionLocker) cancelRefreshLockTime() {
|
||||
if rl.refreshExitChan != nil {
|
||||
close(rl.refreshExitChan)
|
||||
rl.refreshOnce = &sync.Once{}
|
||||
}
|
||||
}
|
||||
|
||||
func (rl *redissionLocker) closeSub(ctx context.Context, sub *redis.PubSub, noticeChan chan struct{}) {
|
||||
if sub != nil {
|
||||
err := sub.Close()
|
||||
if err != nil {
|
||||
logger.Error(ctx, "close sub failed", "token", rl.Token, "key", rl.Key, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
if noticeChan != nil {
|
||||
close(noticeChan)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
KEYS[1]:锁的键名(key),通常是锁的唯一标识。
|
||||
ARGV[1]:锁的过期时间(lockLeaseTime),单位为秒。
|
||||
ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。
|
||||
*/
|
||||
func (rl *redissionLocker) tryLock(ctx context.Context) error {
|
||||
lockType := constants.LockType
|
||||
res := rl.client.Eval(ctx, luascript.LockScript, []string{rl.Key}, rl.lockLeaseTime, rl.Token)
|
||||
val, err := res.Int()
|
||||
if err != redis.Nil && err != nil {
|
||||
return constants.NewRedisResult(constants.UnknownInternalError, lockType, err.Error())
|
||||
}
|
||||
return constants.NewRedisResult(constants.RedisCode(val), lockType, "")
|
||||
}
|
||||
|
||||
/*
|
||||
KEYS[1]:锁的键名(key),通常是锁的唯一标识。
|
||||
KEYS[2]:锁的释放通知频道(chankey),用于通知其他客户端锁已释放。
|
||||
ARGV[1]:解锁消息(unlockMessage),用于通知其他客户端锁已释放。
|
||||
ARGV[2]:当前客户端的唯一标识(token),用于区分不同的客户端。
|
||||
*/
|
||||
func (rl *redissionLocker) UnLock(ctx context.Context) error {
|
||||
res := rl.client.Eval(ctx, luascript.UnLockScript, []string{rl.Key, rl.waitChanKey}, unlockMessage, rl.Token)
|
||||
val, err := res.Int()
|
||||
if err != redis.Nil && err != nil {
|
||||
logger.Info(ctx, "unlock lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key), zap.Error(err))
|
||||
return fmt.Errorf("unlock lock failed:%w", constants.NewRedisResult(constants.UnknownInternalError, constants.UnLockType, err.Error()))
|
||||
}
|
||||
|
||||
if constants.RedisCode(val) == constants.UnLockSuccess {
|
||||
if rl.needRefresh {
|
||||
rl.cancelRefreshLockTime()
|
||||
}
|
||||
|
||||
logger.Info(ctx, "unlock lock success", zap.String("token", rl.Token), zap.String("key", rl.Key))
|
||||
return nil
|
||||
}
|
||||
|
||||
if constants.RedisCode(val) == constants.UnLocakFailureWithLockOccupancy {
|
||||
logger.Info(ctx, "unlock lock failed", zap.String("token", rl.Token), zap.String("key", rl.Key))
|
||||
return fmt.Errorf("unlock lock failed:%w", constants.NewRedisResult(constants.UnLocakFailureWithLockOccupancy, constants.UnLockType, ""))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO 优化 panic
|
||||
func GetLocker(client *redis.Client, ops *RedissionLockConfig) *redissionLocker {
|
||||
if ops.Token == "" {
|
||||
token, err := uuid.NewV4()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ops.Token = token.String()
|
||||
}
|
||||
|
||||
if len(ops.Prefix) <= 0 {
|
||||
ops.Prefix = "redission-lock"
|
||||
}
|
||||
|
||||
if len(ops.ChanPrefix) <= 0 {
|
||||
ops.ChanPrefix = "redission-lock-channel"
|
||||
}
|
||||
|
||||
if ops.LockLeaseTime == 0 {
|
||||
ops.LockLeaseTime = internalLockLeaseTime
|
||||
}
|
||||
|
||||
r := &redissionLocker{
|
||||
Token: ops.Token,
|
||||
Key: strings.Join([]string{ops.Prefix, ops.Key}, ":"),
|
||||
waitChanKey: strings.Join([]string{ops.ChanPrefix, ops.Key, "wait"}, ":"),
|
||||
needRefresh: ops.NeedRefresh,
|
||||
client: client,
|
||||
refreshExitChan: make(chan struct{}),
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
|
@ -1,329 +0,0 @@
|
|||
package distributedlock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
constants "modelRT/distributedlock/constant"
|
||||
"modelRT/distributedlock/luascript"
|
||||
"modelRT/logger"
|
||||
|
||||
uuid "github.com/gofrs/uuid"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
type RedissionRWLocker struct {
|
||||
redissionLocker
|
||||
writeWaitChanKey string
|
||||
readWaitChanKey string
|
||||
RWTokenTimeoutPrefix string
|
||||
}
|
||||
|
||||
func (rl *RedissionRWLocker) RLock(ctx context.Context, timeout ...time.Duration) error {
|
||||
result := rl.tryRLock(ctx).(*constants.RedisResult)
|
||||
if result.Code == constants.UnknownInternalError {
|
||||
logger.Error(ctx, result.OutputResultMessage())
|
||||
return fmt.Errorf("get read lock failed:%w", result)
|
||||
}
|
||||
|
||||
if result.Code == constants.LockSuccess {
|
||||
if rl.needRefresh {
|
||||
rl.refreshOnce.Do(func() {
|
||||
if rl.refreshExitChan == nil {
|
||||
rl.refreshExitChan = make(chan struct{})
|
||||
}
|
||||
|
||||
// async refresh lock timeout unitl receive exit singal
|
||||
go rl.refreshLockTimeout(ctx)
|
||||
})
|
||||
}
|
||||
logger.Info(ctx, "success get the read lock by key and token", "key", rl.Key, "token", rl.Token)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(timeout) > 0 && timeout[0] > 0 {
|
||||
if rl.subExitChan == nil {
|
||||
rl.subExitChan = make(chan struct{})
|
||||
}
|
||||
|
||||
subMsgChan := make(chan struct{}, 1)
|
||||
sub := rl.client.Subscribe(ctx, rl.readWaitChanKey)
|
||||
go rl.subscribeLock(ctx, sub, subMsgChan)
|
||||
|
||||
acquireTimer := time.NewTimer(timeout[0])
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-subMsgChan:
|
||||
if !ok {
|
||||
err := errors.New("failed to read the read lock waiting for for the channel message")
|
||||
logger.Error(ctx, "failed to read the read lock waiting for for the channel message")
|
||||
return err
|
||||
}
|
||||
|
||||
result := rl.tryRLock(ctx).(*constants.RedisResult)
|
||||
if (result.Code == constants.RLockFailureWithWLockOccupancy) || (result.Code == constants.UnknownInternalError) {
|
||||
logger.Info(ctx, result.OutputResultMessage())
|
||||
continue
|
||||
}
|
||||
|
||||
if result.Code == constants.LockSuccess {
|
||||
logger.Info(ctx, result.OutputResultMessage())
|
||||
rl.closeSub(ctx, sub, rl.subExitChan)
|
||||
|
||||
if rl.needRefresh {
|
||||
rl.refreshOnce.Do(func() {
|
||||
if rl.refreshExitChan == nil {
|
||||
rl.refreshExitChan = make(chan struct{})
|
||||
}
|
||||
|
||||
// async refresh lock timeout unitl receive exit singal
|
||||
go rl.refreshLockTimeout(ctx)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
case <-acquireTimer.C:
|
||||
logger.Info(ctx, "the waiting time for obtaining the read lock operation has timed out")
|
||||
rl.closeSub(ctx, sub, rl.subExitChan)
|
||||
// after acquire lock timeout,notice the sub channel to close
|
||||
return constants.AcquireTimeoutErr
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("lock the redis read lock failed:%w", result)
|
||||
}
|
||||
|
||||
func (rl *RedissionRWLocker) tryRLock(ctx context.Context) error {
|
||||
lockType := constants.LockType
|
||||
|
||||
res := rl.client.Eval(ctx, luascript.RLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix}, rl.lockLeaseTime, rl.Token)
|
||||
val, err := res.Int()
|
||||
if err != redis.Nil && err != nil {
|
||||
return constants.NewRedisResult(constants.UnknownInternalError, lockType, err.Error())
|
||||
}
|
||||
return constants.NewRedisResult(constants.RedisCode(val), lockType, "")
|
||||
}
|
||||
|
||||
func (rl *RedissionRWLocker) refreshLockTimeout(ctx context.Context) {
|
||||
logger.Info(ctx, "lock refresh by key and token", "token", rl.Token, "key", rl.Key)
|
||||
|
||||
lockTime := time.Duration(rl.lockLeaseTime/3) * time.Millisecond
|
||||
timer := time.NewTimer(lockTime)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
// extend key lease time
|
||||
res := rl.client.Eval(ctx, luascript.RefreshRWLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix}, rl.lockLeaseTime, rl.Token)
|
||||
val, err := res.Int()
|
||||
if err != redis.Nil && err != nil {
|
||||
logger.Info(ctx, "lock refresh failed", "token", rl.Token, "key", rl.Key, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if constants.RedisCode(val) == constants.RefreshLockFailure {
|
||||
logger.Error(ctx, "lock refreash failed,can not find the read lock by key and token", "rwTokenPrefix", rl.RWTokenTimeoutPrefix, "token", rl.Token, "key", rl.Key)
|
||||
return
|
||||
}
|
||||
|
||||
if constants.RedisCode(val) == constants.RefreshLockSuccess {
|
||||
logger.Info(ctx, "lock refresh success by key and token", "token", rl.Token, "key", rl.Key)
|
||||
}
|
||||
timer.Reset(lockTime)
|
||||
case <-rl.refreshExitChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rl *RedissionRWLocker) UnRLock(ctx context.Context) error {
|
||||
logger.Info(ctx, "unlock RLock by key and token", "key", rl.Key, "token", rl.Token)
|
||||
res := rl.client.Eval(ctx, luascript.UnRLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix, rl.writeWaitChanKey}, unlockMessage, rl.Token)
|
||||
val, err := res.Int()
|
||||
if err != redis.Nil && err != nil {
|
||||
logger.Info(ctx, "unlock read lock failed", "token", rl.Token, "key", rl.Key, "error", err)
|
||||
return fmt.Errorf("unlock read lock failed:%w", constants.NewRedisResult(constants.UnknownInternalError, constants.UnRLockType, err.Error()))
|
||||
}
|
||||
|
||||
if (constants.RedisCode(val) == constants.UnLockSuccess) || (constants.RedisCode(val) == constants.UnRLockSuccess) {
|
||||
if rl.needRefresh && (constants.RedisCode(val) == constants.UnLockSuccess) {
|
||||
rl.cancelRefreshLockTime()
|
||||
}
|
||||
|
||||
logger.Info(ctx, "unlock read lock success", "token", rl.Token, "key", rl.Key)
|
||||
return nil
|
||||
}
|
||||
|
||||
if constants.RedisCode(val) == constants.UnRLockFailureWithWLockOccupancy {
|
||||
logger.Info(ctx, "unlock read lock failed", "token", rl.Token, "key", rl.Key)
|
||||
return fmt.Errorf("unlock read lock failed:%w", constants.NewRedisResult(constants.UnRLockFailureWithWLockOccupancy, constants.UnRLockType, ""))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rl *RedissionRWLocker) WLock(ctx context.Context, timeout ...time.Duration) error {
|
||||
result := rl.tryWLock(ctx).(*constants.RedisResult)
|
||||
if result.Code == constants.UnknownInternalError {
|
||||
logger.Error(ctx, result.OutputResultMessage())
|
||||
return fmt.Errorf("get write lock failed:%w", result)
|
||||
}
|
||||
|
||||
if result.Code == constants.LockSuccess {
|
||||
if rl.needRefresh {
|
||||
rl.refreshOnce.Do(func() {
|
||||
if rl.refreshExitChan == nil {
|
||||
rl.refreshExitChan = make(chan struct{})
|
||||
}
|
||||
|
||||
// async refresh lock timeout unitl receive exit singal
|
||||
go rl.refreshLockTimeout(ctx)
|
||||
})
|
||||
}
|
||||
logger.Info(ctx, "success get the write lock by key and token", "key", rl.Key, "token", rl.Token)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(timeout) > 0 && timeout[0] > 0 {
|
||||
if rl.subExitChan == nil {
|
||||
rl.subExitChan = make(chan struct{})
|
||||
}
|
||||
|
||||
subMsgChan := make(chan struct{}, 1)
|
||||
sub := rl.client.Subscribe(ctx, rl.writeWaitChanKey)
|
||||
go rl.subscribeLock(ctx, sub, subMsgChan)
|
||||
|
||||
acquireTimer := time.NewTimer(timeout[0])
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-subMsgChan:
|
||||
if !ok {
|
||||
err := errors.New("failed to read the write lock waiting for for the channel message")
|
||||
logger.Error(ctx, "failed to read the read lock waiting for for the channel message")
|
||||
return err
|
||||
}
|
||||
|
||||
result := rl.tryWLock(ctx).(*constants.RedisResult)
|
||||
if (result.Code == constants.UnknownInternalError) || (result.Code == constants.WLockFailureWithRLockOccupancy) || (result.Code == constants.WLockFailureWithWLockOccupancy) || (result.Code == constants.WLockFailureWithNotFirstPriority) {
|
||||
logger.Info(ctx, result.OutputResultMessage())
|
||||
continue
|
||||
}
|
||||
|
||||
if result.Code == constants.LockSuccess {
|
||||
logger.Info(ctx, result.OutputResultMessage())
|
||||
rl.closeSub(ctx, sub, rl.subExitChan)
|
||||
|
||||
if rl.needRefresh {
|
||||
rl.refreshOnce.Do(func() {
|
||||
if rl.refreshExitChan == nil {
|
||||
rl.refreshExitChan = make(chan struct{})
|
||||
}
|
||||
|
||||
// async refresh lock timeout unitl receive exit singal
|
||||
go rl.refreshLockTimeout(ctx)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
case <-acquireTimer.C:
|
||||
logger.Info(ctx, "the waiting time for obtaining the write lock operation has timed out")
|
||||
rl.closeSub(ctx, sub, rl.subExitChan)
|
||||
// after acquire lock timeout,notice the sub channel to close
|
||||
return constants.AcquireTimeoutErr
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("lock write lock failed:%w", result)
|
||||
}
|
||||
|
||||
func (rl *RedissionRWLocker) tryWLock(ctx context.Context) error {
|
||||
lockType := constants.LockType
|
||||
|
||||
res := rl.client.Eval(ctx, luascript.WLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix}, rl.lockLeaseTime, rl.Token)
|
||||
val, err := res.Int()
|
||||
if err != redis.Nil && err != nil {
|
||||
return constants.NewRedisResult(constants.UnknownInternalError, lockType, err.Error())
|
||||
}
|
||||
return constants.NewRedisResult(constants.RedisCode(val), lockType, "")
|
||||
}
|
||||
|
||||
func (rl *RedissionRWLocker) UnWLock(ctx context.Context) error {
|
||||
res := rl.client.Eval(ctx, luascript.UnWLockScript, []string{rl.Key, rl.RWTokenTimeoutPrefix, rl.writeWaitChanKey, rl.readWaitChanKey}, unlockMessage, rl.Token)
|
||||
val, err := res.Int()
|
||||
if err != redis.Nil && err != nil {
|
||||
logger.Error(ctx, "unlock write lock failed", "token", rl.Token, "key", rl.Key, "error", err)
|
||||
return fmt.Errorf("unlock write lock failed:%w", constants.NewRedisResult(constants.UnknownInternalError, constants.UnWLockType, err.Error()))
|
||||
}
|
||||
|
||||
if (constants.RedisCode(val) == constants.UnLockSuccess) || constants.RedisCode(val) == constants.UnWLockSuccess {
|
||||
if rl.needRefresh && (constants.RedisCode(val) == constants.UnLockSuccess) {
|
||||
rl.cancelRefreshLockTime()
|
||||
}
|
||||
logger.Info(ctx, "unlock write lock success", "token", rl.Token, "key", rl.Key)
|
||||
return nil
|
||||
}
|
||||
|
||||
if (constants.RedisCode(val) == constants.UnWLockFailureWithRLockOccupancy) || (constants.RedisCode(val) == constants.UnWLockFailureWithWLockOccupancy) {
|
||||
logger.Info(ctx, "unlock write lock failed", "token", rl.Token, "key", rl.Key)
|
||||
return fmt.Errorf("unlock write lock failed:%w", constants.NewRedisResult(constants.RedisCode(val), constants.UnWLockType, ""))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO 优化 panic
|
||||
func GetRWLocker(client *redis.Client, conf *RedissionLockConfig) *RedissionRWLocker {
|
||||
if conf.Token == "" {
|
||||
token, err := uuid.NewV4()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
conf.Token = token.String()
|
||||
}
|
||||
|
||||
if conf.Prefix == "" {
|
||||
conf.Prefix = "redission-rwlock"
|
||||
}
|
||||
|
||||
if conf.TimeoutPrefix == "" {
|
||||
conf.TimeoutPrefix = "rwlock_timeout"
|
||||
}
|
||||
|
||||
if conf.ChanPrefix == "" {
|
||||
conf.ChanPrefix = "redission-rwlock-channel"
|
||||
}
|
||||
|
||||
if conf.LockLeaseTime == 0 {
|
||||
conf.LockLeaseTime = internalLockLeaseTime
|
||||
}
|
||||
|
||||
r := &redissionLocker{
|
||||
Token: conf.Token,
|
||||
Key: strings.Join([]string{conf.Prefix, conf.Key}, ":"),
|
||||
needRefresh: conf.NeedRefresh,
|
||||
lockLeaseTime: conf.LockLeaseTime,
|
||||
client: client,
|
||||
refreshOnce: &sync.Once{},
|
||||
}
|
||||
|
||||
rwLocker := &RedissionRWLocker{
|
||||
redissionLocker: *r,
|
||||
writeWaitChanKey: strings.Join([]string{conf.ChanPrefix, conf.Key, "write"}, ":"),
|
||||
readWaitChanKey: strings.Join([]string{conf.ChanPrefix, conf.Key, "read"}, ":"),
|
||||
RWTokenTimeoutPrefix: conf.TimeoutPrefix,
|
||||
}
|
||||
return rwLocker
|
||||
}
|
||||
|
||||
func InitRWLocker(key string, token string, lockLeaseTime uint64, needRefresh bool) *RedissionRWLocker {
|
||||
conf := &RedissionLockConfig{
|
||||
Key: key,
|
||||
Token: token,
|
||||
LockLeaseTime: lockLeaseTime,
|
||||
NeedRefresh: needRefresh,
|
||||
}
|
||||
return GetRWLocker(GetRedisClientInstance(), conf)
|
||||
}
|
||||
275
docs/docs.go
275
docs/docs.go
|
|
@ -1,275 +0,0 @@
|
|||
// Package docs Code generated by swaggo/swag. DO NOT EDIT
|
||||
package docs
|
||||
|
||||
import "github.com/swaggo/swag"
|
||||
|
||||
const docTemplate = `{
|
||||
"schemes": {{ marshal .Schemes }},
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"description": "{{escape .Description}}",
|
||||
"title": "{{.Title}}",
|
||||
"contact": {
|
||||
"name": "douxu",
|
||||
"url": "http://www.swagger.io/support",
|
||||
"email": "douxu@clea.com.cn"
|
||||
},
|
||||
"license": {
|
||||
"name": "Apache 2.0",
|
||||
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
|
||||
},
|
||||
"version": "{{.Version}}"
|
||||
},
|
||||
"host": "{{.Host}}",
|
||||
"basePath": "{{.BasePath}}",
|
||||
"paths": {
|
||||
"/data/realtime": {
|
||||
"get": {
|
||||
"description": "根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据",
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"RealTime Component"
|
||||
],
|
||||
"summary": "获取实时测点数据",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)",
|
||||
"name": "token",
|
||||
"in": "query",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"description": "查询起始时间 (Unix时间戳, e.g., 1761008266)",
|
||||
"name": "begin",
|
||||
"in": "query",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"description": "查询结束时间 (Unix时间戳, e.g., 1761526675)",
|
||||
"name": "end",
|
||||
"in": "query",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "返回实时数据成功",
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/network.SuccessResponse"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"payload": {
|
||||
"$ref": "#/definitions/network.RealTimeDataPayload"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "返回实时数据失败",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/network.FailureResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/measurement/recommend": {
|
||||
"get": {
|
||||
"description": "根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。",
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Measurement Recommend"
|
||||
],
|
||||
"summary": "测量点推荐(搜索框自动补全)",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "查询输入参数,例如 'trans' 或 'transformfeeder1_220.'",
|
||||
"name": "request",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/network.MeasurementRecommendRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "返回推荐列表成功",
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/network.SuccessResponse"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"payload": {
|
||||
"$ref": "#/definitions/network.MeasurementRecommendPayload"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "返回推荐列表失败",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/network.FailureResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/model/diagram_load/{page_id}": {
|
||||
"get": {
|
||||
"description": "load circuit diagram info by page id",
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"load circuit_diagram"
|
||||
],
|
||||
"summary": "load circuit diagram info",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "integer",
|
||||
"description": "page ID",
|
||||
"name": "page_id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "request process success",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/network.SuccessResponse"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "request process failed",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/network.FailureResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"network.FailureResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"example": 500
|
||||
},
|
||||
"msg": {
|
||||
"type": "string",
|
||||
"example": "failed to get recommend data from redis"
|
||||
},
|
||||
"payload": {
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"network.MeasurementRecommendPayload": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input": {
|
||||
"type": "string",
|
||||
"example": "transformfeeder1_220."
|
||||
},
|
||||
"offset": {
|
||||
"type": "integer",
|
||||
"example": 21
|
||||
},
|
||||
"recommended_list": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"example": [
|
||||
"[\"I_A_rms\"",
|
||||
" \"I_B_rms\"",
|
||||
"\"I_C_rms\"]"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"network.MeasurementRecommendRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input": {
|
||||
"type": "string",
|
||||
"example": "trans"
|
||||
}
|
||||
}
|
||||
},
|
||||
"network.RealTimeDataPayload": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"sub_pos": {
|
||||
"description": "TODO 增加example tag",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"network.SuccessResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"example": 200
|
||||
},
|
||||
"msg": {
|
||||
"type": "string",
|
||||
"example": "success"
|
||||
},
|
||||
"payload": {
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
// SwaggerInfo holds exported Swagger Info so clients can modify it
|
||||
var SwaggerInfo = &swag.Spec{
|
||||
Version: "1.0",
|
||||
Host: "localhost:8080",
|
||||
BasePath: "/api/v1",
|
||||
Schemes: []string{},
|
||||
Title: "ModelRT 实时模型服务 API 文档",
|
||||
Description: "实时数据计算和模型运行服务的 API 服务",
|
||||
InfoInstanceName: "swagger",
|
||||
SwaggerTemplate: docTemplate,
|
||||
LeftDelim: "{{",
|
||||
RightDelim: "}}",
|
||||
}
|
||||
|
||||
func init() {
|
||||
swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo)
|
||||
}
|
||||
|
|
@ -1,251 +0,0 @@
|
|||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"description": "实时数据计算和模型运行服务的 API 服务",
|
||||
"title": "ModelRT 实时模型服务 API 文档",
|
||||
"contact": {
|
||||
"name": "douxu",
|
||||
"url": "http://www.swagger.io/support",
|
||||
"email": "douxu@clea.com.cn"
|
||||
},
|
||||
"license": {
|
||||
"name": "Apache 2.0",
|
||||
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
|
||||
},
|
||||
"version": "1.0"
|
||||
},
|
||||
"host": "localhost:8080",
|
||||
"basePath": "/api/v1",
|
||||
"paths": {
|
||||
"/data/realtime": {
|
||||
"get": {
|
||||
"description": "根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据",
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"RealTime Component"
|
||||
],
|
||||
"summary": "获取实时测点数据",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)",
|
||||
"name": "token",
|
||||
"in": "query",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"description": "查询起始时间 (Unix时间戳, e.g., 1761008266)",
|
||||
"name": "begin",
|
||||
"in": "query",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"description": "查询结束时间 (Unix时间戳, e.g., 1761526675)",
|
||||
"name": "end",
|
||||
"in": "query",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "返回实时数据成功",
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/network.SuccessResponse"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"payload": {
|
||||
"$ref": "#/definitions/network.RealTimeDataPayload"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "返回实时数据失败",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/network.FailureResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/measurement/recommend": {
|
||||
"get": {
|
||||
"description": "根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。",
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Measurement Recommend"
|
||||
],
|
||||
"summary": "测量点推荐(搜索框自动补全)",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "查询输入参数,例如 'trans' 或 'transformfeeder1_220.'",
|
||||
"name": "request",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/network.MeasurementRecommendRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "返回推荐列表成功",
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/network.SuccessResponse"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"payload": {
|
||||
"$ref": "#/definitions/network.MeasurementRecommendPayload"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "返回推荐列表失败",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/network.FailureResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/model/diagram_load/{page_id}": {
|
||||
"get": {
|
||||
"description": "load circuit diagram info by page id",
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"load circuit_diagram"
|
||||
],
|
||||
"summary": "load circuit diagram info",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "integer",
|
||||
"description": "page ID",
|
||||
"name": "page_id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "request process success",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/network.SuccessResponse"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "request process failed",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/network.FailureResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"network.FailureResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"example": 500
|
||||
},
|
||||
"msg": {
|
||||
"type": "string",
|
||||
"example": "failed to get recommend data from redis"
|
||||
},
|
||||
"payload": {
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"network.MeasurementRecommendPayload": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input": {
|
||||
"type": "string",
|
||||
"example": "transformfeeder1_220."
|
||||
},
|
||||
"offset": {
|
||||
"type": "integer",
|
||||
"example": 21
|
||||
},
|
||||
"recommended_list": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"example": [
|
||||
"[\"I_A_rms\"",
|
||||
" \"I_B_rms\"",
|
||||
"\"I_C_rms\"]"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"network.MeasurementRecommendRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input": {
|
||||
"type": "string",
|
||||
"example": "trans"
|
||||
}
|
||||
}
|
||||
},
|
||||
"network.RealTimeDataPayload": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"sub_pos": {
|
||||
"description": "TODO 增加example tag",
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"network.SuccessResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"example": 200
|
||||
},
|
||||
"msg": {
|
||||
"type": "string",
|
||||
"example": "success"
|
||||
},
|
||||
"payload": {
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,163 +0,0 @@
|
|||
basePath: /api/v1
|
||||
definitions:
|
||||
network.FailureResponse:
|
||||
properties:
|
||||
code:
|
||||
example: 500
|
||||
type: integer
|
||||
msg:
|
||||
example: failed to get recommend data from redis
|
||||
type: string
|
||||
payload:
|
||||
type: object
|
||||
type: object
|
||||
network.MeasurementRecommendPayload:
|
||||
properties:
|
||||
input:
|
||||
example: transformfeeder1_220.
|
||||
type: string
|
||||
offset:
|
||||
example: 21
|
||||
type: integer
|
||||
recommended_list:
|
||||
example:
|
||||
- '["I_A_rms"'
|
||||
- ' "I_B_rms"'
|
||||
- '"I_C_rms"]'
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
network.MeasurementRecommendRequest:
|
||||
properties:
|
||||
input:
|
||||
example: trans
|
||||
type: string
|
||||
type: object
|
||||
network.RealTimeDataPayload:
|
||||
properties:
|
||||
sub_pos:
|
||||
description: TODO 增加example tag
|
||||
type: object
|
||||
type: object
|
||||
network.SuccessResponse:
|
||||
properties:
|
||||
code:
|
||||
example: 200
|
||||
type: integer
|
||||
msg:
|
||||
example: success
|
||||
type: string
|
||||
payload:
|
||||
type: object
|
||||
type: object
|
||||
host: localhost:8080
|
||||
info:
|
||||
contact:
|
||||
email: douxu@clea.com.cn
|
||||
name: douxu
|
||||
url: http://www.swagger.io/support
|
||||
description: 实时数据计算和模型运行服务的 API 服务
|
||||
license:
|
||||
name: Apache 2.0
|
||||
url: http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
title: ModelRT 实时模型服务 API 文档
|
||||
version: "1.0"
|
||||
paths:
|
||||
/data/realtime:
|
||||
get:
|
||||
consumes:
|
||||
- application/json
|
||||
description: 根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据
|
||||
parameters:
|
||||
- description: 测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)
|
||||
in: query
|
||||
name: token
|
||||
required: true
|
||||
type: string
|
||||
- description: 查询起始时间 (Unix时间戳, e.g., 1761008266)
|
||||
in: query
|
||||
name: begin
|
||||
required: true
|
||||
type: integer
|
||||
- description: 查询结束时间 (Unix时间戳, e.g., 1761526675)
|
||||
in: query
|
||||
name: end
|
||||
required: true
|
||||
type: integer
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: 返回实时数据成功
|
||||
schema:
|
||||
allOf:
|
||||
- $ref: '#/definitions/network.SuccessResponse'
|
||||
- properties:
|
||||
payload:
|
||||
$ref: '#/definitions/network.RealTimeDataPayload'
|
||||
type: object
|
||||
"400":
|
||||
description: 返回实时数据失败
|
||||
schema:
|
||||
$ref: '#/definitions/network.FailureResponse'
|
||||
summary: 获取实时测点数据
|
||||
tags:
|
||||
- RealTime Component
|
||||
/measurement/recommend:
|
||||
get:
|
||||
consumes:
|
||||
- application/json
|
||||
description: 根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。
|
||||
parameters:
|
||||
- description: 查询输入参数,例如 'trans' 或 'transformfeeder1_220.'
|
||||
in: body
|
||||
name: request
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/network.MeasurementRecommendRequest'
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: 返回推荐列表成功
|
||||
schema:
|
||||
allOf:
|
||||
- $ref: '#/definitions/network.SuccessResponse'
|
||||
- properties:
|
||||
payload:
|
||||
$ref: '#/definitions/network.MeasurementRecommendPayload'
|
||||
type: object
|
||||
"400":
|
||||
description: 返回推荐列表失败
|
||||
schema:
|
||||
$ref: '#/definitions/network.FailureResponse'
|
||||
summary: 测量点推荐(搜索框自动补全)
|
||||
tags:
|
||||
- Measurement Recommend
|
||||
/model/diagram_load/{page_id}:
|
||||
get:
|
||||
consumes:
|
||||
- application/json
|
||||
description: load circuit diagram info by page id
|
||||
parameters:
|
||||
- description: page ID
|
||||
in: path
|
||||
name: page_id
|
||||
required: true
|
||||
type: integer
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: request process success
|
||||
schema:
|
||||
$ref: '#/definitions/network.SuccessResponse'
|
||||
"400":
|
||||
description: request process failed
|
||||
schema:
|
||||
$ref: '#/definitions/network.FailureResponse'
|
||||
summary: load circuit diagram info
|
||||
tags:
|
||||
- load circuit_diagram
|
||||
swagger: "2.0"
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
{
|
||||
"params_list": [{
|
||||
"anchor_name": "voltage",
|
||||
"func_type": "1",
|
||||
"upper_limit": 23,
|
||||
"lower_limit": 0.5
|
||||
}, {
|
||||
"anchor_name": "current",
|
||||
"func_type": "2",
|
||||
"upper_limit": 23,
|
||||
"lower_limit": 0.5
|
||||
}]
|
||||
}
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
{
|
||||
"page_id": 1,
|
||||
"topologics": [
|
||||
{
|
||||
"uuid_from": "70c190f2-8a60-42a9-b143-ec5f87e0aa6b",
|
||||
"uuid_to": "70c190f2-8a75-42a9-b166-ec5f87e0aa6b"
|
||||
},
|
||||
{
|
||||
"uuid_from": "70c190f2-8a75-42a9-b166-ec5f87e0aa6b",
|
||||
"uuid_to": "70c200f2-8a75-42a9-c166-bf5f87e0aa6b"
|
||||
}
|
||||
],
|
||||
"component_infos": [
|
||||
{
|
||||
"component_type": 3,
|
||||
"grid_id": 1,
|
||||
"name": "demo21",
|
||||
"page_id": 1,
|
||||
"station_id": 1,
|
||||
"uuid": "70c190f2-8a75-42a9-b166-ec5f87e0aa6b",
|
||||
"zone_id": 1,
|
||||
"context":"{\"top\":\"7.4\",\"left\":\"3.2\",\"right\":\"3.5\",\"buttom\":\"1.2\"}",
|
||||
"params": "{\"anchor_i\":false,\"anchor_v\":true,\"context\":\"{\\\"top\\\": \\\"7.4\\\", \\\"left\\\": \\\"3.2\\\", \\\"right\\\": \\\"3.5\\\", \\\"buttom\\\": \\\"1.2\\\"}\",\"name\":\"demo21\",\"oi_alarm\":110,\"op\":1,\"ov_alarm\":110,\"resistance\":100,\"ts\":\"2021-01-01T08:00:00+08:00\",\"ui_alarm\":90,\"uv_alarm\":90}"
|
||||
},
|
||||
{
|
||||
"component_type": 3,
|
||||
"grid_id": 1,
|
||||
"name": "demo22",
|
||||
"page_id": 1,
|
||||
"station_id": 1,
|
||||
"uuid": "70c200f2-8a75-42a9-c166-bf5f87e0aa6b",
|
||||
"zone_id": 1,
|
||||
"context":"{\"top\":\"7.4\",\"left\":\"3.2\",\"right\":\"3.5\",\"buttom\":\"1.2\"}",
|
||||
"params": "{\"anchor_i\":false,\"anchor_v\":true,\"context\":\"{\\\"top\\\": \\\"7.4\\\", \\\"left\\\": \\\"3.2\\\", \\\"right\\\": \\\"3.5\\\", \\\"buttom\\\": \\\"1.2\\\"}\",\"name\":\"demo22\",\"oi_alarm\":110,\"op\":1,\"ov_alarm\":110,\"resistance\":100,\"ts\":\"2021-01-01T08:00:00+08:00\",\"ui_alarm\":90,\"uv_alarm\":90}"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
{
|
||||
"page_id":1,
|
||||
"topologics":[
|
||||
{
|
||||
"uuid_from": "70c190f2-8a75-42a9-b166-ec5f87e0aa6b",
|
||||
"uuid_to": "70c200f2-8a75-42a9-c166-bf5f87e0aa6b"
|
||||
},
|
||||
{
|
||||
"uuid_from": "70c190f2-8a60-42a9-b143-ec5f87e0aa6b",
|
||||
"uuid_to": "70c190f2-8a75-42a9-b166-ec5f87e0aa6b"
|
||||
}
|
||||
],
|
||||
"component_infos":[
|
||||
{
|
||||
"uuid":"70c200f2-8a75-42a9-c166-bf5f87e0aa6b",
|
||||
"component_type":3
|
||||
},
|
||||
{
|
||||
"uuid":"70c190f2-8a75-42a9-b166-ec5f87e0aa6b",
|
||||
"component_type":3
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -1,39 +0,0 @@
|
|||
{
|
||||
"page_id":1,
|
||||
"topologics":[
|
||||
{
|
||||
"change_type":1,
|
||||
"old_uuid_from":"e32bc0be-67f4-4d79-a5da-eaa40a5bd77d",
|
||||
"old_uuid_to":"70c200f2-8a75-42a9-c166-bf5f87e0aa6b",
|
||||
"new_uuid_from":"70c190f2-8a75-42a9-b166-ec5f87e0aa6b",
|
||||
"new_uuid_to":"70c200f2-8a75-42a9-c166-bf5f87e0aa6b"
|
||||
},
|
||||
{
|
||||
"change_type":2,
|
||||
"old_uuid_from":"70c190f2-8a60-42a9-b143-ec5f87e0aa6b",
|
||||
"old_uuid_to":"10f155cf-bd27-4557-85b2-d126b6e2657f",
|
||||
"new_uuid_from":"70c190f2-8a60-42a9-b143-ec5f87e0aa6b",
|
||||
"new_uuid_to":"70c200f2-8a75-42a9-c166-bf5f87e0aa6b"
|
||||
},
|
||||
{
|
||||
"change_type":3,
|
||||
"old_uuid_from":"",
|
||||
"old_uuid_to":"",
|
||||
"new_uuid_from":"70c200f2-8a75-42a9-c166-bf5f87e0aa6b",
|
||||
"new_uuid_to":"10f155cf-bd27-4557-85b2-d126b6e2657f"
|
||||
}
|
||||
],
|
||||
"component_infos": [
|
||||
{
|
||||
"component_type": 3,
|
||||
"grid_id": 1,
|
||||
"name": "demo23",
|
||||
"page_id": 1,
|
||||
"station_id": 1,
|
||||
"uuid": "70c200f2-8a75-42a9-c166-bf5f87e0aa6b",
|
||||
"zone_id": 1,
|
||||
"context":"{\"top\":\"7.4\",\"left\":\"3.2\",\"right\":\"3.5\",\"buttom\":\"1.2\"}",
|
||||
"params": "{\"anchor_i\":false,\"anchor_v\":true,\"context\":\"{\\\"top\\\": \\\"7.4\\\", \\\"left\\\": \\\"3.2\\\", \\\"right\\\": \\\"3.5\\\", \\\"buttom\\\": \\\"1.2\\\"}\",\"name\":\"demo23\",\"oi_alarm\":110,\"op\":1,\"ov_alarm\":110,\"resistance\":100,\"ts\":\"2021-01-01T08:00:00+08:00\",\"ui_alarm\":90,\"uv_alarm\":90}"
|
||||
}
|
||||
]
|
||||
}
|
||||
59
go.mod
59
go.mod
|
|
@ -1,53 +1,33 @@
|
|||
module modelRT
|
||||
|
||||
go 1.24
|
||||
go 1.22.5
|
||||
|
||||
require (
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/RediSearch/redisearch-go/v2 v2.1.1
|
||||
github.com/bitly/go-simplejson v0.5.1
|
||||
github.com/confluentinc/confluent-kafka-go v1.9.2
|
||||
github.com/gin-gonic/gin v1.10.0
|
||||
github.com/gofrs/uuid v4.4.0+incompatible
|
||||
github.com/gomodule/redigo v1.8.9
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/panjf2000/ants/v2 v2.10.0
|
||||
github.com/redis/go-redis/v9 v9.7.3
|
||||
github.com/spf13/viper v1.19.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/swaggo/files v1.0.1
|
||||
github.com/swaggo/gin-swagger v1.6.0
|
||||
github.com/swaggo/swag v1.16.4
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/sys v0.28.0
|
||||
gorm.io/driver/mysql v1.5.7
|
||||
gorm.io/driver/postgres v1.5.9
|
||||
gorm.io/gorm v1.25.12
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||
github.com/bytedance/sonic v1.12.5 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/bytedance/sonic v1.11.6 // indirect
|
||||
github.com/bytedance/sonic/loader v0.1.1 // indirect
|
||||
github.com/cloudwego/base64x v0.1.4 // indirect
|
||||
github.com/cloudwego/iasm v0.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.7 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/spec v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.23.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.0 // indirect
|
||||
github.com/goccy/go-json v0.10.3 // indirect
|
||||
github.com/go-playground/validator/v10 v10.20.0 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
|
|
@ -55,17 +35,15 @@ require (
|
|||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
|
|
@ -76,15 +54,16 @@ require (
|
|||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
go.uber.org/multierr v1.10.0 // indirect
|
||||
golang.org/x/arch v0.12.0 // indirect
|
||||
golang.org/x/crypto v0.30.0 // indirect
|
||||
golang.org/x/arch v0.8.0 // indirect
|
||||
golang.org/x/crypto v0.23.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||
golang.org/x/net v0.32.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/tools v0.28.0 // indirect
|
||||
google.golang.org/protobuf v1.35.2 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
golang.org/x/text v0.15.0 // indirect
|
||||
google.golang.org/protobuf v1.34.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
|
|||
323
go.sum
323
go.sum
|
|
@ -1,77 +1,113 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
|
||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||
github.com/RediSearch/redisearch-go/v2 v2.1.1 h1:cCn3i40uLsVD8cxwrdrGfhdAgbR5Cld9q11eYyVOwpM=
|
||||
github.com/RediSearch/redisearch-go/v2 v2.1.1/go.mod h1:Uw93Wi97QqAsw1DwbQrhVd88dBorGTfSuCS42zfh1iA=
|
||||
github.com/bitly/go-simplejson v0.5.1 h1:xgwPbetQScXt1gh9BmoJ6j9JMr3TElvuIyjR8pgdoow=
|
||||
github.com/bitly/go-simplejson v0.5.1/go.mod h1:YOPVLzCfwK14b4Sff3oP1AmGhI9T9Vsg84etUnlyp+Q=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
|
||||
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
|
||||
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
|
||||
github.com/bytedance/sonic v1.12.5 h1:hoZxY8uW+mT+OpkcUWw4k0fDINtOcVavEsGfzwzFU/w=
|
||||
github.com/bytedance/sonic v1.12.5/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk=
|
||||
github.com/actgardner/gogen-avro/v10 v10.1.0/go.mod h1:o+ybmVjEa27AAr35FRqU98DJu1fXES56uXniYFv4yDA=
|
||||
github.com/actgardner/gogen-avro/v10 v10.2.1/go.mod h1:QUhjeHPchheYmMDni/Nx7VB0RsT/ee8YIgGY/xpEQgQ=
|
||||
github.com/actgardner/gogen-avro/v9 v9.1.0/go.mod h1:nyTj6wPqDJoxM3qdnjcLv+EnMDSDFqE0qDpva2QRmKc=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
|
||||
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
|
||||
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/bytedance/sonic/loader v0.2.1 h1:1GgorWTqf12TA8mma4DDSbaQigE2wOgQo7iCjjJv3+E=
|
||||
github.com/bytedance/sonic/loader v0.2.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
|
||||
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/confluentinc/confluent-kafka-go v1.9.2 h1:gV/GxhMBUb03tFWkN+7kdhg+zf+QUM+wVkI9zwh770Q=
|
||||
github.com/confluentinc/confluent-kafka-go v1.9.2/go.mod h1:ptXNqsuDfYbAE/LBW6pnwWZElUoWxHoV8E43DCrliyo=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20=
|
||||
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
|
||||
github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y=
|
||||
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA=
|
||||
github.com/gabriel-vasile/mimetype v1.4.7/go.mod h1:GDlAgAyIRT27BhFl53XNAFtfjzOkLaF35JdEG0P7LtU=
|
||||
github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=
|
||||
github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk=
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
|
||||
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
|
||||
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
|
||||
github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
|
||||
github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o=
|
||||
github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
|
||||
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
|
||||
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8=
|
||||
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
|
||||
github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws=
|
||||
github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hamba/avro v1.5.6/go.mod h1:3vNT0RLXXpFm2Tb/5KC71ZRJlOroggq1Rcitb6k4Fr8=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/heetch/avro v0.3.1/go.mod h1:4xn38Oz/+hiEUTpbVfGVLfvOg0yKLlRP7Q9+gJJILgA=
|
||||
github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
|
|
@ -80,29 +116,41 @@ github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
|
|||
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
|
||||
github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
|
||||
github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ=
|
||||
github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E=
|
||||
github.com/jhump/protoreflect v1.12.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
|
||||
github.com/juju/qthttptest v0.1.1/go.mod h1:aTlAv8TYaflIiTDIQYzxnl1QdPjAg8Q8qJMErpKy6A4=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM=
|
||||
github.com/linkedin/goavro/v2 v2.10.0/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
|
||||
github.com/linkedin/goavro/v2 v2.10.1/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
|
||||
github.com/linkedin/goavro/v2 v2.11.1/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
|
|
@ -110,25 +158,35 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
|
|||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM=
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk=
|
||||
github.com/nrwiersma/avro-benchmarks v0.0.0-20210913175520-21aec48c8f76/go.mod h1:iKyFMidsk/sVYONJRE372sJuX/QTRPacU7imPqqsu7g=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
|
||||
github.com/panjf2000/ants/v2 v2.10.0 h1:zhRg1pQUtkyRiOFo2Sbqwjp0GfBNo9cUY2/Grpx1p+8=
|
||||
github.com/panjf2000/ants/v2 v2.10.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
|
||||
github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.0.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||
|
|
@ -142,98 +200,165 @@ github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+
|
|||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE=
|
||||
github.com/swaggo/files v1.0.1/go.mod h1:0qXmMNH6sXNf+73t65aKeB+ApmgxdnkQzVTAj2uaMUg=
|
||||
github.com/swaggo/gin-swagger v1.6.0 h1:y8sxvQ3E20/RCyrXeFfg60r6H0Z+SwpTjMYsMm+zy8M=
|
||||
github.com/swaggo/gin-swagger v1.6.0/go.mod h1:BG00cCEy294xtVpyIAHG6+e2Qzj/xKlRdOqDkvq0uzo=
|
||||
github.com/swaggo/swag v1.16.4 h1:clWJtd9LStiG3VeijiCfOVODP6VpHtKdQy9ELFG3s1A=
|
||||
github.com/swaggo/swag v1.16.4/go.mod h1:VBsHJRsDvfYvqoiMKnsdwhNV9LEMHgEDZcyVYX0sxPg=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
|
||||
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
|
||||
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/arch v0.12.0 h1:UsYJhbzPYGsT0HbEdmYcqtCv8UNGvnaL561NnIUvaKg=
|
||||
golang.org/x/arch v0.12.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
|
||||
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
|
||||
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
|
||||
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
|
||||
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20220503193339-ba3ae3f07e29/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/avro.v0 v0.0.0-20171217001914-a730b5802183/go.mod h1:FvqrFXt+jCsyQibeRv4xxEJBL5iG2DDW5aeJwzDiq4A=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/httprequest.v1 v1.2.1/go.mod h1:x2Otw96yda5+8+6ZeWwHIJTFkEHWP/qP8pJOzqEtWPM=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo=
|
||||
gorm.io/driver/mysql v1.5.7/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM=
|
||||
gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8=
|
||||
gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
|
||||
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
||||
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
|
||||
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
|
|
|
|||
|
|
@ -1,43 +0,0 @@
|
|||
// Package handler provides HTTP handlers for various endpoints.
|
||||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"modelRT/alert"
|
||||
"modelRT/constants"
|
||||
"modelRT/logger"
|
||||
"modelRT/network"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// QueryAlertEventHandler define query alert event process API
|
||||
func QueryAlertEventHandler(c *gin.Context) {
|
||||
var targetLevel constants.AlertLevel
|
||||
|
||||
alertManger := alert.GetAlertMangerInstance()
|
||||
levelStr := c.Query("level")
|
||||
level, err := strconv.Atoi(levelStr)
|
||||
if err != nil {
|
||||
logger.Error(c, "convert alert level string to int failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: -1,
|
||||
Msg: err.Error(),
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
}
|
||||
targetLevel = constants.AlertLevel(level)
|
||||
events := alertManger.GetRangeEventsByLevel(targetLevel)
|
||||
|
||||
resp := network.SuccessResponse{
|
||||
Code: 0,
|
||||
Msg: "success",
|
||||
Payload: map[string]any{
|
||||
"events": events,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
}
|
||||
|
|
@ -1,76 +0,0 @@
|
|||
// Package handler provides HTTP handlers for various endpoints.
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"modelRT/common/errcode"
|
||||
"modelRT/database"
|
||||
"modelRT/diagram"
|
||||
"modelRT/logger"
|
||||
"modelRT/network"
|
||||
"modelRT/orm"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// ComponentAnchorReplaceHandler define component anchor point replace process API
|
||||
func ComponentAnchorReplaceHandler(c *gin.Context) {
|
||||
var uuid, anchorName string
|
||||
|
||||
pgClient := database.GetPostgresDBClient()
|
||||
cancelCtx, cancel := context.WithTimeout(c, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var request network.ComponetAnchorReplaceRequest
|
||||
if err := c.ShouldBindJSON(&request); err != nil {
|
||||
logger.Error(c, "unmarshal component anchor point replace info failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
uuid = request.UUID
|
||||
anchorName = request.AnchorName
|
||||
|
||||
var componentInfo orm.Component
|
||||
result := pgClient.WithContext(cancelCtx).Model(&orm.Component{}).Where("global_uuid = ?", uuid).Find(&componentInfo)
|
||||
if result.Error != nil {
|
||||
logger.Error(c, "query component detail info failed", "error", result.Error)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: result.Error.Error(),
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
if result.RowsAffected == 0 {
|
||||
err := fmt.Errorf("query component detail info by uuid failed:%w", errcode.ErrQueryRowZero)
|
||||
logger.Error(c, "query component detail info from table is empty", "table_name", "component")
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
diagram.UpdateAnchorValue(componentInfo.GlobalUUID.String(), anchorName)
|
||||
|
||||
resp := network.SuccessResponse{
|
||||
Code: http.StatusOK,
|
||||
Msg: "success",
|
||||
Payload: map[string]interface{}{
|
||||
"uuid": request.UUID,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
}
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"modelRT/constants"
|
||||
"modelRT/diagram"
|
||||
"modelRT/logger"
|
||||
"modelRT/network"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// AttrDeleteHandler deletes a data attribute
|
||||
func AttrDeleteHandler(c *gin.Context) {
|
||||
var request network.AttrDeleteRequest
|
||||
clientToken := c.GetString("client_token")
|
||||
if clientToken == "" {
|
||||
err := constants.ErrGetClientToken
|
||||
|
||||
logger.Error(c, "failed to get client token from context", "error", err)
|
||||
c.JSON(http.StatusOK, network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.ShouldBindJSON(&request); err != nil {
|
||||
logger.Error(c, "failed to unmarshal attribute delete request", "error", err)
|
||||
c.JSON(http.StatusOK, network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
rs := diagram.NewRedisString(c, request.AttrToken, clientToken, 10, true)
|
||||
if err := rs.GETDEL(request.AttrToken); err != nil {
|
||||
logger.Error(c, "failed to delete attribute from Redis", "attr_token", request.AttrToken, "error", err)
|
||||
c.JSON(http.StatusOK, network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{"attr_token": request.AttrToken},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||
Code: http.StatusOK,
|
||||
Msg: "success",
|
||||
Payload: map[string]interface{}{
|
||||
"attr_token": request.AttrToken,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
|
@ -1,67 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"modelRT/constants"
|
||||
"modelRT/database"
|
||||
"modelRT/logger"
|
||||
"modelRT/network"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// AttrGetHandler retrieves the value of a data attribute
|
||||
func AttrGetHandler(c *gin.Context) {
|
||||
var request network.AttrGetRequest
|
||||
|
||||
clientToken := c.GetString("client_token")
|
||||
if clientToken == "" {
|
||||
err := constants.ErrGetClientToken
|
||||
|
||||
logger.Error(c, "failed to get client token from context", "error", err)
|
||||
c.JSON(http.StatusOK, network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.ShouldBindJSON(&request); err != nil {
|
||||
logger.Error(c, "failed to unmarshal attribute get request", "error", err)
|
||||
c.JSON(http.StatusOK, network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
pgClient := database.GetPostgresDBClient()
|
||||
tx := pgClient.Begin()
|
||||
|
||||
attrModel, err := database.ParseAttrToken(c, tx, request.AttrToken, clientToken)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
logger.Error(c, "failed to parse attribute token", "attr_token", request.AttrToken, "error", err)
|
||||
c.JSON(http.StatusOK, network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{"attr_token": request.AttrToken},
|
||||
})
|
||||
return
|
||||
}
|
||||
tx.Commit()
|
||||
|
||||
// The GetAttrValue method is assumed to exist on the AttrModelInterface.
|
||||
// You need to add this method to your attribute_model.go interface definition.
|
||||
attrValue := attrModel.GetAttrValue()
|
||||
|
||||
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||
Code: http.StatusOK,
|
||||
Msg: "success",
|
||||
Payload: map[string]interface{}{
|
||||
"attr_token": request.AttrToken,
|
||||
"attr_value": attrValue,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"modelRT/constants"
|
||||
"modelRT/diagram"
|
||||
"modelRT/logger"
|
||||
"modelRT/network"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// AttrSetHandler sets the value of a data attribute
|
||||
func AttrSetHandler(c *gin.Context) {
|
||||
var request network.AttrSetRequest
|
||||
|
||||
clientToken := c.GetString("client_token")
|
||||
if clientToken == "" {
|
||||
err := constants.ErrGetClientToken
|
||||
|
||||
logger.Error(c, "failed to get client token from context", "error", err)
|
||||
c.JSON(http.StatusOK, network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.ShouldBindJSON(&request); err != nil {
|
||||
logger.Error(c, "failed to unmarshal attribute set request", "error", err)
|
||||
c.JSON(http.StatusOK, network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// The logic for handling Redis operations directly from the handler
|
||||
rs := diagram.NewRedisString(c, request.AttrToken, clientToken, 10, true)
|
||||
if err := rs.Set(request.AttrToken, request.AttrValue); err != nil {
|
||||
logger.Error(c, "failed to set attribute value in Redis", "attr_token", request.AttrToken, "error", err)
|
||||
c.JSON(http.StatusOK, network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{"attr_token": request.AttrToken},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||
Code: http.StatusOK,
|
||||
Msg: "success",
|
||||
Payload: map[string]interface{}{
|
||||
"attr_token": request.AttrToken,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
|
@ -1,160 +0,0 @@
|
|||
// Package handler provides HTTP handlers for various endpoints.
|
||||
package handler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"modelRT/database"
|
||||
"modelRT/diagram"
|
||||
"modelRT/logger"
|
||||
"modelRT/network"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
// CircuitDiagramCreateHandler define circuit diagram create process API
|
||||
func CircuitDiagramCreateHandler(c *gin.Context) {
|
||||
pgClient := database.GetPostgresDBClient()
|
||||
|
||||
var request network.CircuitDiagramCreateRequest
|
||||
if err := c.ShouldBindJSON(&request); err != nil {
|
||||
logger.Error(c, "unmarshal circuit diagram create info failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
graph, err := diagram.GetGraphMap(request.PageID)
|
||||
if err != nil {
|
||||
logger.Error(c, "get topologic data from set by pageID failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"page_id": request.PageID,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
var topologicCreateInfos []network.TopologicUUIDCreateInfo
|
||||
for _, topologicLink := range request.TopologicLinks {
|
||||
var createInfo network.TopologicUUIDCreateInfo
|
||||
UUIDFrom, err1 := uuid.FromString(topologicLink.UUIDFrom)
|
||||
UUIDTo, err2 := uuid.FromString(topologicLink.UUIDTo)
|
||||
if err1 != nil || err2 != nil {
|
||||
var err error
|
||||
if err1 != nil && err2 == nil {
|
||||
err = fmt.Errorf("convert uuid from string failed:%w", err1)
|
||||
} else if err1 == nil && err2 != nil {
|
||||
err = fmt.Errorf("convert uuid from string failed:%w", err2)
|
||||
} else {
|
||||
err = fmt.Errorf("convert uuid from string failed:%w:%w", err1, err2)
|
||||
}
|
||||
|
||||
logger.Error(c, "format uuid from string failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"topologic_info": topologicLink,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
createInfo.UUIDFrom = UUIDFrom
|
||||
createInfo.UUIDTo = UUIDTo
|
||||
topologicCreateInfos = append(topologicCreateInfos, createInfo)
|
||||
}
|
||||
|
||||
// open transaction
|
||||
tx := pgClient.Begin()
|
||||
|
||||
err = database.CreateTopologicIntoDB(c, tx, request.PageID, topologicCreateInfos)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
|
||||
logger.Error(c, "create topologic info into DB failed", "topologic_info", topologicCreateInfos, "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"topologic_infos": topologicCreateInfos,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
for _, topologicCreateInfo := range topologicCreateInfos {
|
||||
graph.AddEdge(topologicCreateInfo.UUIDFrom, topologicCreateInfo.UUIDTo)
|
||||
}
|
||||
|
||||
for index, info := range request.ComponentInfos {
|
||||
componentUUID, err := database.CreateComponentIntoDB(c, tx, info)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
|
||||
logger.Error(c, "insert component info into DB failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"component_infos": request.ComponentInfos,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
request.ComponentInfos[index].UUID = componentUUID
|
||||
}
|
||||
|
||||
for _, info := range request.ComponentInfos {
|
||||
// TODO 修复赋值问题
|
||||
component, err := network.ConvertComponentCreateInfosToComponents(info)
|
||||
if err != nil {
|
||||
logger.Error(c, "convert component params info failed", "component_info", info, "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"uuid": info.UUID,
|
||||
"component_params": info.Params,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
diagram.StoreComponentMap(info.UUID, component)
|
||||
}
|
||||
|
||||
if len(request.FreeVertexs) > 0 {
|
||||
for _, freeVertex := range request.FreeVertexs {
|
||||
graph.FreeVertexs[freeVertex] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// commit transaction
|
||||
tx.Commit()
|
||||
resp := network.SuccessResponse{
|
||||
Code: http.StatusOK,
|
||||
Msg: "success",
|
||||
Payload: map[string]interface{}{
|
||||
"page_id": request.PageID,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
}
|
||||
|
|
@ -1,213 +0,0 @@
|
|||
// Package handler provides HTTP handlers for various endpoints.
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"modelRT/common/errcode"
|
||||
"modelRT/database"
|
||||
"modelRT/diagram"
|
||||
"modelRT/logger"
|
||||
"modelRT/network"
|
||||
"modelRT/orm"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gofrs/uuid"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// CircuitDiagramDeleteHandler define circuit diagram delete process API
|
||||
func CircuitDiagramDeleteHandler(c *gin.Context) {
|
||||
pgClient := database.GetPostgresDBClient()
|
||||
|
||||
var request network.CircuitDiagramDeleteRequest
|
||||
if err := c.ShouldBindJSON(&request); err != nil {
|
||||
logger.Error(c, "unmarshal circuit diagram del info failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
graph, err := diagram.GetGraphMap(request.PageID)
|
||||
if err != nil {
|
||||
logger.Error(c, "get topologic data from set by pageID failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"page_id": request.PageID,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
var topologicDelInfos []network.TopologicUUIDDelInfo
|
||||
for _, topologicLink := range request.TopologicLinks {
|
||||
var delInfo network.TopologicUUIDDelInfo
|
||||
UUIDFrom, err1 := uuid.FromString(topologicLink.UUIDFrom)
|
||||
UUIDTo, err2 := uuid.FromString(topologicLink.UUIDTo)
|
||||
if err1 != nil || err2 != nil {
|
||||
var err error
|
||||
if err1 != nil && err2 == nil {
|
||||
err = fmt.Errorf("convert uuid from string failed:%w", err1)
|
||||
} else if err1 == nil && err2 != nil {
|
||||
err = fmt.Errorf("convert uuid from string failed:%w", err2)
|
||||
} else {
|
||||
err = fmt.Errorf("convert uuid from string failed:%w:%w", err1, err2)
|
||||
}
|
||||
|
||||
logger.Error(c, "format uuid from string failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"topologic_info": topologicLink,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
delInfo.UUIDFrom = UUIDFrom
|
||||
delInfo.UUIDTo = UUIDTo
|
||||
topologicDelInfos = append(topologicDelInfos, delInfo)
|
||||
}
|
||||
|
||||
// open transaction
|
||||
tx := pgClient.Begin()
|
||||
|
||||
for _, topologicDelInfo := range topologicDelInfos {
|
||||
err = database.DeleteTopologicIntoDB(c, tx, request.PageID, topologicDelInfo)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
|
||||
logger.Error(c, "delete topologic info into DB failed", "topologic_info", topologicDelInfo, "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"topologic_info": topologicDelInfo,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
err = graph.DelEdge(topologicDelInfo.UUIDFrom, topologicDelInfo.UUIDTo)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
|
||||
logger.Error(c, "delete topologic info failed", "topologic_info", topologicDelInfo, "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"topologic_info": topologicDelInfo,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(graph.VerticeLinks) == 0 && len(graph.FreeVertexs) == 0 {
|
||||
diagram.DeleteGraphMap(request.PageID)
|
||||
}
|
||||
|
||||
for _, componentInfo := range request.ComponentInfos {
|
||||
cancelCtx, cancel := context.WithTimeout(c, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
globalUUID, err := uuid.FromString(componentInfo.UUID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
|
||||
logger.Error(c, "format uuid from string failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"uuid": componentInfo.UUID,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
var component orm.Component
|
||||
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Where("global_uuid = ?", globalUUID).Find(&component)
|
||||
if result.Error != nil || result.RowsAffected == 0 {
|
||||
tx.Rollback()
|
||||
|
||||
err := result.Error
|
||||
if result.RowsAffected == 0 {
|
||||
err = fmt.Errorf("%w:please check uuid conditions", errcode.ErrDeleteRowZero)
|
||||
}
|
||||
|
||||
logger.Error(c, "query component info into postgresDB failed", "component_global_uuid", componentInfo.UUID, "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"uuid": componentInfo.UUID,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
result = tx.WithContext(cancelCtx).Delete(component)
|
||||
if result.Error != nil || result.RowsAffected == 0 {
|
||||
tx.Rollback()
|
||||
|
||||
err := result.Error
|
||||
if result.RowsAffected == 0 {
|
||||
err = fmt.Errorf("%w:please check uuid conditions", errcode.ErrDeleteRowZero)
|
||||
}
|
||||
|
||||
logger.Error(c, "delete component info into postgresDB failed", "component_global_uuid", componentInfo.UUID, "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"uuid": componentInfo.UUID,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
diagram.DeleteComponentMap(component.GlobalUUID.String())
|
||||
}
|
||||
|
||||
if len(request.FreeVertexs) > 0 {
|
||||
for _, freeVertex := range request.FreeVertexs {
|
||||
delete(graph.FreeVertexs, freeVertex)
|
||||
}
|
||||
}
|
||||
|
||||
// commit transaction
|
||||
tx.Commit()
|
||||
resp := network.SuccessResponse{
|
||||
Code: http.StatusOK,
|
||||
Msg: "success",
|
||||
Payload: map[string]interface{}{
|
||||
"page_id": request.PageID,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
}
|
||||
|
|
@ -1,138 +1,84 @@
|
|||
// Package handler provides HTTP handlers for various endpoints.
|
||||
package handler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"modelRT/database"
|
||||
"modelRT/diagram"
|
||||
"modelRT/logger"
|
||||
"modelRT/log"
|
||||
"modelRT/network"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// CircuitDiagramLoadHandler define circuit diagram load process API
|
||||
// @Summary load circuit diagram info
|
||||
// @Description load circuit diagram info by page id
|
||||
// @Tags load circuit_diagram
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param page_id path int64 true "page ID"
|
||||
// @Success 200 {object} network.SuccessResponse "request process success"
|
||||
// @Failure 400 {object} network.FailureResponse "request process failed"
|
||||
// @Router /model/diagram_load/{page_id} [get]
|
||||
func CircuitDiagramLoadHandler(c *gin.Context) {
|
||||
pgClient := database.GetPostgresDBClient()
|
||||
|
||||
logger := log.LoggerInstance()
|
||||
pageID, err := strconv.ParseInt(c.Query("page_id"), 10, 64)
|
||||
if err != nil {
|
||||
logger.Error(c, "get pageID from url param failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
logger.Error("get pageID from url param failed", zap.Error(err))
|
||||
header := network.ResponseHeader{Status: http.StatusBadRequest, ErrMsg: err.Error()}
|
||||
resp := network.BasicResponse{
|
||||
ResponseHeader: header,
|
||||
PayLoad: map[string]interface{}{
|
||||
"page_id": pageID,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
topologicInfo, err := diagram.GetGraphMap(pageID)
|
||||
if err != nil {
|
||||
logger.Error(c, "get topologic data from set by pageID failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
logger.Error("get topologic data from set by pageID failed", zap.Error(err))
|
||||
header := network.ResponseHeader{Status: http.StatusBadRequest, ErrMsg: err.Error()}
|
||||
resp := network.BasicResponse{
|
||||
ResponseHeader: header,
|
||||
PayLoad: map[string]interface{}{
|
||||
"page_id": pageID,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
payload := make(map[string]interface{})
|
||||
payload["root_vertex"] = topologicInfo.RootVertex
|
||||
payload["topologic"] = topologicInfo.VerticeLinks
|
||||
payLoad := make(map[string]interface{})
|
||||
payLoad["root_vertex"] = topologicInfo.RootVertex
|
||||
payLoad["topologic"] = topologicInfo.VerticeLinks
|
||||
|
||||
componentParamMap := make(map[string]any)
|
||||
componentParamMap := make(map[string][]byte)
|
||||
for _, VerticeLink := range topologicInfo.VerticeLinks {
|
||||
fmt.Println(VerticeLink)
|
||||
for _, componentUUID := range VerticeLink {
|
||||
component, err := database.QueryComponentByUUID(c, pgClient, componentUUID)
|
||||
UUIDStr := componentUUID.String()
|
||||
componentParams, err := diagram.GetComponentMap(UUIDStr)
|
||||
if err != nil {
|
||||
logger.Error(c, "get component id info from DB by uuid failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"uuid": componentUUID,
|
||||
logger.Error("get component data from set by uuid failed", zap.Error(err))
|
||||
header := network.ResponseHeader{Status: http.StatusBadRequest, ErrMsg: err.Error()}
|
||||
resp := network.BasicResponse{
|
||||
ResponseHeader: header,
|
||||
PayLoad: map[string]interface{}{
|
||||
"uuid": UUIDStr,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
componentParams, err := diagram.GetComponentMap(component.GlobalUUID.String())
|
||||
byteSlice, err := componentParams.MarshalJSON()
|
||||
if err != nil {
|
||||
logger.Error(c, "get component data from set by uuid failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"uuid": componentUUID,
|
||||
},
|
||||
logger.Error("marshal component data failed", zap.Error(err))
|
||||
header := network.ResponseHeader{Status: http.StatusBadRequest, ErrMsg: err.Error()}
|
||||
resp := network.BasicResponse{
|
||||
ResponseHeader: header,
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
componentParamMap[componentUUID.String()] = componentParams
|
||||
componentParamMap[UUIDStr] = byteSlice
|
||||
}
|
||||
}
|
||||
payLoad["component_params"] = componentParamMap
|
||||
|
||||
rootVertexUUID := topologicInfo.RootVertex.String()
|
||||
rootComponent, err := database.QueryComponentByUUID(c, pgClient, topologicInfo.RootVertex)
|
||||
if err != nil {
|
||||
logger.Error(c, "get component id info from DB by uuid failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"uuid": topologicInfo.RootVertex,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
rootComponentParam, err := diagram.GetComponentMap(rootComponent.GlobalUUID.String())
|
||||
if err != nil {
|
||||
logger.Error(c, "get component data from set by uuid failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"uuid": rootVertexUUID,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
componentParamMap[rootVertexUUID] = rootComponentParam
|
||||
|
||||
payload["component_params"] = componentParamMap
|
||||
|
||||
resp := network.SuccessResponse{
|
||||
Code: http.StatusOK,
|
||||
Msg: "success",
|
||||
Payload: payload,
|
||||
resp := network.DiagramLoadResponse{
|
||||
ResponseHeader: network.ResponseHeader{Status: http.StatusOK, ErrMsg: ""},
|
||||
PayLoad: payLoad,
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,160 +0,0 @@
|
|||
// Package handler provides HTTP handlers for various endpoints.
|
||||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"modelRT/database"
|
||||
"modelRT/diagram"
|
||||
"modelRT/logger"
|
||||
"modelRT/network"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// CircuitDiagramUpdateHandler define circuit diagram update process API
|
||||
func CircuitDiagramUpdateHandler(c *gin.Context) {
|
||||
pgClient := database.GetPostgresDBClient()
|
||||
|
||||
var request network.CircuitDiagramUpdateRequest
|
||||
if err := c.ShouldBindJSON(&request); err != nil {
|
||||
logger.Error(c, "unmarshal circuit diagram update info failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
graph, err := diagram.GetGraphMap(request.PageID)
|
||||
if err != nil {
|
||||
logger.Error(c, "get topologic data from set by pageID failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"page_id": request.PageID,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
var topologicChangeInfos []network.TopologicUUIDChangeInfos
|
||||
for _, topologicLink := range request.TopologicLinks {
|
||||
changeInfo, err := network.ParseUUID(topologicLink)
|
||||
if err != nil {
|
||||
logger.Error(c, "format uuid from string failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"topologic_info": topologicLink,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
topologicChangeInfos = append(topologicChangeInfos, changeInfo)
|
||||
}
|
||||
|
||||
// open transaction
|
||||
tx := pgClient.Begin()
|
||||
|
||||
for _, topologicChangeInfo := range topologicChangeInfos {
|
||||
err = database.UpdateTopologicIntoDB(c, tx, request.PageID, topologicChangeInfo)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
|
||||
logger.Error(c, "update topologic info into DB failed", "topologic_info", topologicChangeInfo, "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"topologic_info": topologicChangeInfo,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
|
||||
err = graph.UpdateEdge(topologicChangeInfo)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
|
||||
logger.Error(c, "update topologic info failed", "topologic_info", topologicChangeInfo, "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"topologic_info": topologicChangeInfo,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for index, componentInfo := range request.ComponentInfos {
|
||||
componentUUID, err := database.UpdateComponentIntoDB(c, tx, componentInfo)
|
||||
if err != nil {
|
||||
logger.Error(c, "udpate component info into DB failed", "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"page_id": request.PageID,
|
||||
"component_info": request.ComponentInfos,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
request.ComponentInfos[index].UUID = componentUUID
|
||||
}
|
||||
|
||||
for _, info := range request.ComponentInfos {
|
||||
// TODO 修复赋值问题
|
||||
component, err := network.ConvertComponentUpdateInfosToComponents(info)
|
||||
if err != nil {
|
||||
logger.Error(c, "convert component params info failed", "component_info", info, "error", err)
|
||||
|
||||
resp := network.FailureResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
Msg: err.Error(),
|
||||
Payload: map[string]interface{}{
|
||||
"uuid": info.UUID,
|
||||
"component_params": info.Params,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
return
|
||||
}
|
||||
diagram.UpdateComponentMap(info.ID, component)
|
||||
}
|
||||
|
||||
if len(request.FreeVertexs) > 0 {
|
||||
for _, freeVertex := range request.FreeVertexs {
|
||||
graph.FreeVertexs[freeVertex] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// commit transaction
|
||||
tx.Commit()
|
||||
|
||||
resp := network.SuccessResponse{
|
||||
Code: http.StatusOK,
|
||||
Msg: "success",
|
||||
Payload: map[string]interface{}{
|
||||
"page_id": request.PageID,
|
||||
},
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
}
|
||||
|
|
@ -1,253 +0,0 @@
|
|||
// Package handler provides HTTP handlers for various endpoints.
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/gofrs/uuid"
|
||||
|
||||
"modelRT/common/errcode"
|
||||
"modelRT/constants"
|
||||
"modelRT/database"
|
||||
"modelRT/diagram"
|
||||
"modelRT/logger"
|
||||
"modelRT/orm"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// ComponentAttributeQueryHandler define circuit diagram component attribute value query process API
|
||||
func ComponentAttributeQueryHandler(c *gin.Context) {
|
||||
pgClient := database.GetPostgresDBClient()
|
||||
|
||||
tokens := c.Param("tokens")
|
||||
if tokens == "" {
|
||||
err := fmt.Errorf("tokens is missing from the path")
|
||||
logger.Error(c, "query tokens from path failed", "error", err, "url", c.Request.RequestURI)
|
||||
renderRespFailure(c, constants.RespCodeInvalidParams, err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
tokenSlice := strings.Split(tokens, ",")
|
||||
queryResults := make(map[string]queryResult)
|
||||
cacheQueryMap := make(map[string][]cacheQueryItem)
|
||||
|
||||
for _, token := range tokenSlice {
|
||||
slices := strings.Split(token, ".")
|
||||
if len(slices) < 7 {
|
||||
queryResults[token] = queryResult{err: errcode.ErrInvalidToken}
|
||||
continue
|
||||
}
|
||||
hSetKey := fmt.Sprintf("%s_%s", slices[4], slices[5])
|
||||
cacheQueryMap[hSetKey] = append(cacheQueryMap[hSetKey], cacheQueryItem{
|
||||
token: token,
|
||||
attributeCompTag: slices[4],
|
||||
attributeExtendType: slices[5],
|
||||
attributeName: slices[6],
|
||||
})
|
||||
}
|
||||
|
||||
dbQueryMap := make(map[string][]cacheQueryItem)
|
||||
var secondaryQueryCount int
|
||||
for hSetKey, items := range cacheQueryMap {
|
||||
hset := diagram.NewRedisHash(c, hSetKey, 5000, false)
|
||||
cacheData, err := hset.HGetAll()
|
||||
if err != nil {
|
||||
logger.Warn(c, "redis hgetall failed", "key", hSetKey, "err", err)
|
||||
}
|
||||
for _, item := range items {
|
||||
if val, ok := cacheData[item.attributeName]; ok {
|
||||
queryResults[item.token] = queryResult{err: errcode.ErrProcessSuccess, value: val}
|
||||
} else {
|
||||
dbQueryMap[item.attributeCompTag] = append(dbQueryMap[item.attributeCompTag], item)
|
||||
secondaryQueryCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if secondaryQueryCount == 0 {
|
||||
payload := genQueryRespPayload(queryResults, tokenSlice)
|
||||
renderRespSuccess(c, constants.RespCodeSuccess, "query dynamic parameter values success", payload)
|
||||
return
|
||||
}
|
||||
|
||||
tx := pgClient.WithContext(c).Begin()
|
||||
if tx.Error != nil {
|
||||
logger.Error(c, "begin postgres transaction failed", "error", tx.Error)
|
||||
fillRemainingErrors(queryResults, tokenSlice, errcode.ErrBeginTxFailed)
|
||||
payload := genQueryRespPayload(queryResults, tokenSlice)
|
||||
renderRespFailure(c, constants.RespCodeServerError, "begin postgres database transaction failed", payload)
|
||||
return
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
allCompTags := slices.Collect(maps.Keys(dbQueryMap))
|
||||
compModelMap, err := database.QueryComponentByCompTags(c, tx, allCompTags)
|
||||
if err != nil {
|
||||
logger.Error(c, "query component info from postgres database failed", "error", err)
|
||||
fillRemainingErrors(queryResults, tokenSlice, errcode.ErrDBQueryFailed)
|
||||
payload := genQueryRespPayload(queryResults, tokenSlice)
|
||||
renderRespFailure(c, constants.RespCodeServerError, "query component meta failed", payload)
|
||||
return
|
||||
}
|
||||
|
||||
// batch retrieve component metadata
|
||||
identifiers := make([]orm.ProjectIdentifier, 0, secondaryQueryCount)
|
||||
for tag, items := range dbQueryMap {
|
||||
comp, ok := compModelMap[tag]
|
||||
if !ok {
|
||||
for _, it := range items {
|
||||
queryResults[it.token] = queryResult{err: errcode.ErrFoundTargetFailed}
|
||||
}
|
||||
continue
|
||||
}
|
||||
for i := range items {
|
||||
items[i].attributeModelName = comp.ModelName
|
||||
items[i].globalUUID = comp.GlobalUUID
|
||||
identifiers = append(identifiers, orm.ProjectIdentifier{
|
||||
Token: items[i].token, Tag: comp.ModelName, GroupName: items[i].attributeExtendType,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
tableNameMap, err := database.BatchGetProjectNames(tx, identifiers)
|
||||
if err != nil {
|
||||
logger.Error(c, "batch get table names from postgres database failed", "error", err)
|
||||
fillRemainingErrors(queryResults, tokenSlice, errcode.ErrRetrieveFailed)
|
||||
payload := genQueryRespPayload(queryResults, tokenSlice)
|
||||
renderRespFailure(c, constants.RespCodeServerError, "batch get table names from postgres database failed", payload)
|
||||
return
|
||||
}
|
||||
|
||||
redisSyncMap := make(map[string][]cacheQueryItem)
|
||||
for _, items := range dbQueryMap {
|
||||
for _, item := range items {
|
||||
if _, exists := queryResults[item.token]; exists {
|
||||
continue
|
||||
}
|
||||
|
||||
tbl, ok := tableNameMap[orm.ProjectIdentifier{Tag: item.attributeModelName, GroupName: item.attributeExtendType}]
|
||||
if !ok {
|
||||
queryResults[item.token] = queryResult{err: errcode.ErrFoundTargetFailed}
|
||||
continue
|
||||
}
|
||||
|
||||
var dbVal string
|
||||
res := tx.Table(tbl).Select(item.attributeName).Where("global_uuid = ?", item.globalUUID).Scan(&dbVal)
|
||||
if res.Error != nil || res.RowsAffected == 0 {
|
||||
queryResults[item.token] = queryResult{err: errcode.ErrDBQueryFailed}
|
||||
continue
|
||||
}
|
||||
|
||||
queryResults[item.token] = queryResult{err: errcode.ErrProcessSuccess, value: dbVal}
|
||||
item.attributeVal = dbVal
|
||||
hKey := fmt.Sprintf("%s_%s", item.attributeCompTag, item.attributeExtendType)
|
||||
redisSyncMap[hKey] = append(redisSyncMap[hKey], item)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit().Error; err != nil {
|
||||
logger.Warn(c, "postgres transaction commit failed, but returning scanned data", "error", err)
|
||||
} else {
|
||||
for hKey, items := range redisSyncMap {
|
||||
go backfillRedis(c.Copy(), hKey, items)
|
||||
}
|
||||
}
|
||||
|
||||
payload := genQueryRespPayload(queryResults, tokenSlice)
|
||||
if hasAnyError(queryResults) {
|
||||
renderRespFailure(c, constants.RespCodeFailed, "query completed with partial failures", payload)
|
||||
} else {
|
||||
renderRespSuccess(c, constants.RespCodeSuccess, "query completed successfully", payload)
|
||||
}
|
||||
}
|
||||
|
||||
func hasAnyError(results map[string]queryResult) bool {
|
||||
for _, res := range results {
|
||||
if res.err != nil && res.err.Code() != constants.RespCodeSuccess {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func fillRemainingErrors(results map[string]queryResult, tokens []string, err *errcode.AppError) {
|
||||
for _, token := range tokens {
|
||||
if _, exists := results[token]; !exists {
|
||||
results[token] = queryResult{err: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func backfillRedis(ctx context.Context, hSetKey string, items []cacheQueryItem) {
|
||||
hset := diagram.NewRedisHash(ctx, hSetKey, 5000, false)
|
||||
fields := make(map[string]any, len(items))
|
||||
for _, item := range items {
|
||||
if item.attributeVal != "" {
|
||||
fields[item.attributeName] = item.attributeVal
|
||||
}
|
||||
}
|
||||
|
||||
if len(fields) > 0 {
|
||||
if err := hset.SetRedisHashByMap(fields); err != nil {
|
||||
logger.Error(ctx, "async backfill redis failed", "hash_key", hSetKey, "error", err)
|
||||
} else {
|
||||
logger.Info(ctx, "async backfill redis success", "hash_key", hSetKey, "count", len(fields))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genQueryRespPayload(queryResults map[string]queryResult, requestTokens []string) map[string]any {
|
||||
attributes := make([]attributeQueryResult, 0, len(requestTokens))
|
||||
|
||||
for _, token := range requestTokens {
|
||||
if queryResult, exists := queryResults[token]; exists {
|
||||
attributes = append(attributes, attributeQueryResult{
|
||||
Token: token,
|
||||
Code: queryResult.err.Code(),
|
||||
Msg: queryResult.err.Msg(),
|
||||
Value: queryResult.value,
|
||||
})
|
||||
} else {
|
||||
err := errcode.ErrCacheQueryFailed
|
||||
attributes = append(attributes, attributeQueryResult{
|
||||
Token: token,
|
||||
Code: err.Code(),
|
||||
Msg: err.Msg(),
|
||||
Value: "",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
payload := map[string]any{
|
||||
"attributes": attributes,
|
||||
}
|
||||
|
||||
return payload
|
||||
}
|
||||
|
||||
type cacheQueryItem struct {
|
||||
globalUUID uuid.UUID
|
||||
token string
|
||||
attributeCompTag string
|
||||
attributeModelName string
|
||||
attributeExtendType string
|
||||
attributeName string
|
||||
attributeVal string
|
||||
}
|
||||
|
||||
type attributeQueryResult struct {
|
||||
Token string `json:"token"`
|
||||
Msg string `json:"msg"`
|
||||
Value string `json:"value"`
|
||||
Code int `json:"code"`
|
||||
}
|
||||
|
||||
type queryResult struct {
|
||||
err *errcode.AppError
|
||||
value string
|
||||
}
|
||||
|
|
@ -1,214 +0,0 @@
|
|||
// Package handler provides HTTP handlers for various endpoints.
|
||||
package handler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"modelRT/common/errcode"
|
||||
"modelRT/constants"
|
||||
"modelRT/database"
|
||||
"modelRT/diagram"
|
||||
"modelRT/logger"
|
||||
"modelRT/network"
|
||||
"modelRT/orm"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// ComponentAttributeUpdateHandler define circuit diagram component attribute value update process API
|
||||
func ComponentAttributeUpdateHandler(c *gin.Context) {
|
||||
pgClient := database.GetPostgresDBClient()
|
||||
var request network.ComponentAttributeUpdateInfo
|
||||
if err := c.ShouldBindJSON(&request); err != nil {
|
||||
logger.Error(c, "unmarshal request params failed", "error", err)
|
||||
renderRespFailure(c, constants.RespCodeInvalidParams, err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
updateResults := make(map[string]*errcode.AppError)
|
||||
attriModifyConfs := make([]attributeModifyConfig, 0, len(request.AttributeConfigs))
|
||||
var attributeComponentTag string
|
||||
for index, attribute := range request.AttributeConfigs {
|
||||
slices := strings.Split(attribute.AttributeToken, ".")
|
||||
if len(slices) < 7 {
|
||||
updateResults[attribute.AttributeToken] = errcode.ErrInvalidToken
|
||||
continue
|
||||
}
|
||||
|
||||
componentTag := slices[4]
|
||||
if index == 0 {
|
||||
attributeComponentTag = componentTag
|
||||
} else if componentTag != attributeComponentTag {
|
||||
updateResults[attribute.AttributeToken] = errcode.ErrCrossToken
|
||||
continue
|
||||
}
|
||||
|
||||
attriModifyConfs = append(attriModifyConfs, attributeModifyConfig{
|
||||
attributeToken: attribute.AttributeToken,
|
||||
attributeExtendType: slices[5],
|
||||
attributeName: slices[6],
|
||||
attributeOldVal: attribute.AttributeOldVal,
|
||||
attributeNewVal: attribute.AttributeNewVal,
|
||||
})
|
||||
}
|
||||
|
||||
// open transaction
|
||||
tx := pgClient.WithContext(c).Begin()
|
||||
if tx.Error != nil {
|
||||
logger.Error(c, "begin postgres transaction failed", "error", tx.Error)
|
||||
renderRespFailure(c, constants.RespCodeServerError, "begin postgres transaction failed", nil)
|
||||
return
|
||||
}
|
||||
|
||||
compInfo, err := database.QueryComponentByCompTag(c, tx, attributeComponentTag)
|
||||
if err != nil {
|
||||
logger.Error(c, "query component info by component tag failed", "error", err, "tag", attributeComponentTag)
|
||||
|
||||
for _, attribute := range request.AttributeConfigs {
|
||||
if _, exists := updateResults[attribute.AttributeToken]; !exists {
|
||||
updateResults[attribute.AttributeToken] = errcode.ErrDBQueryFailed.WithCause(err)
|
||||
}
|
||||
}
|
||||
|
||||
tx.Rollback()
|
||||
|
||||
payload := genUpdateRespPayload(updateResults, request.AttributeConfigs)
|
||||
renderRespFailure(c, constants.RespCodeFailed, "query component metadata failed", payload)
|
||||
return
|
||||
}
|
||||
|
||||
identifiers := make([]orm.ProjectIdentifier, len(attriModifyConfs))
|
||||
for i, mod := range attriModifyConfs {
|
||||
identifiers[i] = orm.ProjectIdentifier{
|
||||
Token: mod.attributeToken,
|
||||
Tag: compInfo.ModelName,
|
||||
GroupName: mod.attributeExtendType,
|
||||
}
|
||||
}
|
||||
tableNameMap, err := database.BatchGetProjectNames(tx, identifiers)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
|
||||
for _, id := range identifiers {
|
||||
if _, exists := updateResults[id.Token]; !exists {
|
||||
updateResults[id.Token] = errcode.ErrRetrieveFailed.WithCause(err)
|
||||
}
|
||||
}
|
||||
|
||||
payload := genUpdateRespPayload(updateResults, request.AttributeConfigs)
|
||||
renderRespFailure(c, constants.RespCodeFailed, "batch retrieve table names failed", payload)
|
||||
return
|
||||
}
|
||||
|
||||
redisUpdateMap := make(map[string][]cacheUpdateItem)
|
||||
for _, mod := range attriModifyConfs {
|
||||
id := orm.ProjectIdentifier{Tag: compInfo.ModelName, GroupName: mod.attributeExtendType}
|
||||
tableName, exists := tableNameMap[id]
|
||||
if !exists {
|
||||
updateResults[mod.attributeToken] = errcode.ErrFoundTargetFailed
|
||||
continue
|
||||
}
|
||||
|
||||
result := tx.Table(tableName).
|
||||
Where(fmt.Sprintf("%s = ? AND global_uuid = ?", mod.attributeName), mod.attributeOldVal, compInfo.GlobalUUID).
|
||||
Updates(map[string]any{mod.attributeName: mod.attributeNewVal})
|
||||
|
||||
if result.Error != nil {
|
||||
updateResults[mod.attributeToken] = errcode.ErrDBUpdateFailed
|
||||
continue
|
||||
}
|
||||
if result.RowsAffected == 0 {
|
||||
updateResults[mod.attributeToken] = errcode.ErrDBzeroAffectedRows
|
||||
continue
|
||||
}
|
||||
|
||||
cacheKey := fmt.Sprintf("%s_%s", attributeComponentTag, mod.attributeExtendType)
|
||||
redisUpdateMap[cacheKey] = append(redisUpdateMap[cacheKey],
|
||||
cacheUpdateItem{
|
||||
token: mod.attributeToken,
|
||||
name: mod.attributeName,
|
||||
newVal: mod.attributeNewVal,
|
||||
})
|
||||
}
|
||||
|
||||
// commit transaction
|
||||
if err := tx.Commit().Error; err != nil {
|
||||
renderRespFailure(c, constants.RespCodeServerError, "transaction commit failed", nil)
|
||||
return
|
||||
}
|
||||
|
||||
for key, items := range redisUpdateMap {
|
||||
hset := diagram.NewRedisHash(c, key, 5000, false)
|
||||
|
||||
fields := make(map[string]any, len(items))
|
||||
for _, item := range items {
|
||||
fields[item.name] = item.newVal
|
||||
}
|
||||
|
||||
if err := hset.SetRedisHashByMap(fields); err != nil {
|
||||
logger.Error(c, "batch sync redis failed", "hash_key", key, "error", err)
|
||||
|
||||
for _, item := range items {
|
||||
if _, exists := updateResults[item.token]; exists {
|
||||
updateResults[item.token] = errcode.ErrCacheSyncWarn.WithCause(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
payload := genUpdateRespPayload(updateResults, request.AttributeConfigs)
|
||||
if len(updateResults) > 0 {
|
||||
renderRespFailure(c, constants.RespCodeFailed, "process completed with partial failures", payload)
|
||||
return
|
||||
}
|
||||
renderRespSuccess(c, constants.RespCodeSuccess, "process completed successfully", payload)
|
||||
}
|
||||
|
||||
type attributeModifyConfig struct {
|
||||
attributeToken string
|
||||
attributeExtendType string
|
||||
attributeName string
|
||||
attributeOldVal string
|
||||
attributeNewVal string
|
||||
}
|
||||
|
||||
type cacheUpdateItem struct {
|
||||
token string
|
||||
name string
|
||||
newVal string
|
||||
}
|
||||
|
||||
type attributeUpdateResult struct {
|
||||
Token string `json:"token"`
|
||||
Code int `json:"code"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
func genUpdateRespPayload(updateResults map[string]*errcode.AppError, originalRequests []network.ComponentAttributeConfig) map[string]any {
|
||||
attributes := make([]attributeUpdateResult, 0, len(originalRequests))
|
||||
|
||||
for _, req := range originalRequests {
|
||||
token := req.AttributeToken
|
||||
|
||||
if appErr, exists := updateResults[token]; exists {
|
||||
attributes = append(attributes, attributeUpdateResult{
|
||||
Token: token,
|
||||
Code: appErr.Code(),
|
||||
Msg: appErr.Msg(),
|
||||
})
|
||||
} else {
|
||||
attributes = append(attributes, attributeUpdateResult{
|
||||
Token: token,
|
||||
Code: constants.CodeSuccess,
|
||||
Msg: "token value update success",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
payload := map[string]any{
|
||||
"attributes": attributes,
|
||||
}
|
||||
|
||||
return payload
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue