Compare commits

..

No commits in common. "feature-jointDebuggingDemo" and "develop" have entirely different histories.

93 changed files with 1014 additions and 6385 deletions

1
.gitignore vendored
View File

@ -22,7 +22,6 @@
go.work go.work
.vscode .vscode
.idea
# Shield all log files in the log folder # Shield all log files in the log folder
/log/ /log/
# Shield config files in the configs folder # Shield config files in the configs folder

View File

@ -16,7 +16,6 @@ type BaseConfig struct {
// ServiceConfig define config struct of service config // ServiceConfig define config struct of service config
type ServiceConfig struct { type ServiceConfig struct {
ServiceAddr string `mapstructure:"service_addr"`
ServiceName string `mapstructure:"service_name"` ServiceName string `mapstructure:"service_name"`
SecretKey string `mapstructure:"secret_key"` SecretKey string `mapstructure:"secret_key"`
} }
@ -28,7 +27,7 @@ type KafkaConfig struct {
Topic string `mapstructure:"topic"` Topic string `mapstructure:"topic"`
AutoOffsetReset string `mapstructure:"auto_offset_reset"` AutoOffsetReset string `mapstructure:"auto_offset_reset"`
EnableAutoCommit string `mapstructure:"enable_auto_commit"` EnableAutoCommit string `mapstructure:"enable_auto_commit"`
ReadMessageTimeDuration float32 `mapstructure:"read_message_time_duration"` ReadMessageTimeDuration string `mapstructure:"read_message_time_duration"`
} }
// PostgresConfig define config struct of postgres config // PostgresConfig define config struct of postgres config

View File

@ -1,17 +0,0 @@
// Package constants define constant variable
package constants
import "time"
const (
// FanInChanMaxSize define maximum buffer capacity by fanChannel
FanInChanMaxSize = 10000
// SendMaxBatchSize define maximum buffer capacity
// TODO 后续优化批处理大小
SendMaxBatchSize = 100
// SendChanBufferSize define maximum buffer capacity by channel
SendChanBufferSize = 100
// SendMaxBatchInterval define maximum aggregate latency
SendMaxBatchInterval = 20 * time.Millisecond
)

View File

@ -1,7 +0,0 @@
// Package constants define constant variable
package constants
type contextKey string
// MeasurementUUIDKey define measurement uuid key into context
const MeasurementUUIDKey contextKey = "measurement_uuid"

View File

@ -49,9 +49,3 @@ var ErrChanIsNil = errors.New("this channel is nil")
// ErrConcurrentModify define error of concurrent modification detected // ErrConcurrentModify define error of concurrent modification detected
var ErrConcurrentModify = errors.New("existed concurrent modification risk") var ErrConcurrentModify = errors.New("existed concurrent modification risk")
// ErrUnsupportedSubAction define error of unsupported real time data subscription action
var ErrUnsupportedSubAction = errors.New("unsupported real time data subscription action")
// ErrUnsupportedLinkAction define error of unsupported measurement link process action
var ErrUnsupportedLinkAction = errors.New("unsupported rmeasurement link process action")

View File

@ -1,31 +0,0 @@
// Package constants define constant variable
package constants
const (
// TIBreachTriggerType define out of bounds type constant
TIBreachTriggerType = "trigger"
)
const (
// TelemetryUpLimit define telemetry upper limit
TelemetryUpLimit = "up"
// TelemetryUpUpLimit define telemetry upper upper limit
TelemetryUpUpLimit = "upup"
// TelemetryDownLimit define telemetry limit
TelemetryDownLimit = "down"
// TelemetryDownDownLimit define telemetry lower lower limit
TelemetryDownDownLimit = "downdown"
)
const (
// TelesignalRaising define telesignal raising edge
TelesignalRaising = "raising"
// TelesignalFalling define telesignal falling edge
TelesignalFalling = "falling"
)
const (
// MinBreachCount define min breach count of real time data
MinBreachCount = 10
)

24
constants/keys.go Normal file
View File

@ -0,0 +1,24 @@
// Package constants define constant variable
package constants
const (
// RedisAllGridSetKey define redis set key which store all grid keys
RedisAllGridSetKey = "grid_keys"
// RedisSpecGridZoneSetKey define redis set key which store all zone keys under specific grid
RedisSpecGridZoneSetKey = "grid_%s_zones_keys"
// RedisAllZoneSetKey define redis set key which store all zone keys
RedisAllZoneSetKey = "zone_keys"
// RedisSpecZoneStationSetKey define redis set key which store all station keys under specific zone
RedisSpecZoneStationSetKey = "zone_%s_stations_keys"
// RedisAllStationSetKey define redis set key which store all station keys
RedisAllStationSetKey = "station_keys"
// RedisSpecStationComponentSetKey define redis set key which store all component keys under specific station
RedisSpecStationComponentSetKey = "station_%s_components_keys"
// RedisAllComponentSetKey define redis set key which store all component keys
RedisAllComponentSetKey = "component_keys"
// RedisSpecComponentSetKey define redis set key which store all component keys under specific zone
RedisSpecComponentSetKey = "zone_%s_components_keys"
)

View File

@ -4,8 +4,6 @@ package constants
const ( const (
// DevelopmentLogMode define development operator environment for modelRT project // DevelopmentLogMode define development operator environment for modelRT project
DevelopmentLogMode = "development" DevelopmentLogMode = "development"
// DebugLogMode define debug operator environment for modelRT project
DebugLogMode = "debug"
// ProductionLogMode define production operator environment for modelRT project // ProductionLogMode define production operator environment for modelRT project
ProductionLogMode = "production" ProductionLogMode = "production"
) )

View File

@ -29,8 +29,3 @@ const (
ChannelSuffixUBC = "UBC" ChannelSuffixUBC = "UBC"
ChannelSuffixUCA = "UCA" ChannelSuffixUCA = "UCA"
) )
const (
// MaxIdentifyHierarchy define max data indentify syntax hierarchy
MaxIdentifyHierarchy = 7
)

View File

@ -1,47 +0,0 @@
// Package constants define constant variable
package constants
const (
// RedisAllGridSetKey define redis set key which store all grid keys
RedisAllGridSetKey = "grid_keys"
// RedisAllZoneSetKey define redis set key which store all zone keys
RedisAllZoneSetKey = "zone_keys"
// RedisAllStationSetKey define redis set key which store all station keys
RedisAllStationSetKey = "station_keys"
// RedisAllCompNSPathSetKey define redis set key which store all component nspath keys
RedisAllCompNSPathSetKey = "component_nspath_keys"
// RedisAllCompTagSetKey define redis set key which store all component tag keys
RedisAllCompTagSetKey = "component_tag_keys"
// RedisAllConfigSetKey define redis set key which store all config keys
RedisAllConfigSetKey = "config_keys"
// RedisAllMeasTagSetKey define redis set key which store all measurement tag keys
RedisAllMeasTagSetKey = "measurement_tag_keys"
// RedisSpecGridZoneSetKey define redis set key which store all zone keys under specific grid
RedisSpecGridZoneSetKey = "%s_zones_keys"
// RedisSpecZoneStationSetKey define redis set key which store all station keys under specific zone
RedisSpecZoneStationSetKey = "%s_stations_keys"
// RedisSpecStationCompNSPATHSetKey define redis set key which store all component nspath keys under specific station
RedisSpecStationCompNSPATHSetKey = "%s_components_nspath_keys"
// RedisSpecStationCompTagSetKey define redis set key which store all component tag keys under specific station
RedisSpecStationCompTagSetKey = "%s_components_tag_keys"
// RedisSpecCompTagMeasSetKey define redis set key which store all measurement keys under specific component tag
RedisSpecCompTagMeasSetKey = "%s_measurement_keys"
)
const (
// SearchLinkAddAction define search link add action
SearchLinkAddAction = "add"
// SearchLinkDelAction define search link del action
SearchLinkDelAction = "del"
)

View File

@ -1,73 +0,0 @@
// Package constants define constant variable
package constants
const (
// SubStartAction define the real time subscription start action
SubStartAction string = "start"
// SubStopAction define the real time subscription stop action
SubStopAction string = "stop"
// SubAppendAction define the real time subscription append action
SubAppendAction string = "append"
// SubUpdateAction define the real time subscription update action
SubUpdateAction string = "update"
)
// 定义状态常量
const (
// SubSuccessCode define subscription success code
SubSuccessCode = "1001"
// SubFailedCode define subscription failed code
SubFailedCode = "1002"
// RTDSuccessCode define real time data return success code
RTDSuccessCode = "1003"
// RTDFailedCode define real time data return failed code
RTDFailedCode = "1004"
// CancelSubSuccessCode define cancel subscription success code
CancelSubSuccessCode = "1005"
// CancelSubFailedCode define cancel subscription failed code
CancelSubFailedCode = "1006"
// SubRepeatCode define subscription repeat code
SubRepeatCode = "1007"
// UpdateSubSuccessCode define update subscription success code
UpdateSubSuccessCode = "1008"
// UpdateSubFailedCode define update subscription failed code
UpdateSubFailedCode = "1009"
)
const (
// SubSuccessMsg define subscription success message
SubSuccessMsg = "subscription success"
// SubFailedMsg define subscription failed message
SubFailedMsg = "subscription failed"
// RTDSuccessMsg define real time data return success message
RTDSuccessMsg = "real time data return success"
// RTDFailedMsg define real time data return failed message
RTDFailedMsg = "real time data return failed"
// CancelSubSuccessMsg define cancel subscription success message
CancelSubSuccessMsg = "cancel subscription success"
// CancelSubFailedMsg define cancel subscription failed message
CancelSubFailedMsg = "cancel subscription failed"
// SubRepeatMsg define subscription repeat message
SubRepeatMsg = "subscription repeat in target interval"
// UpdateSubSuccessMsg define update subscription success message
UpdateSubSuccessMsg = "update subscription success"
// UpdateSubFailedMsg define update subscription failed message
UpdateSubFailedMsg = "update subscription failed"
)
// TargetOperationType define constant to the target operation type
type TargetOperationType int
const (
// OpAppend define append new target to the subscription list
OpAppend TargetOperationType = iota
// OpRemove define remove exist target from the subscription list
OpRemove
// OpUpdate define update exist target from the subscription list
OpUpdate
)
const (
// NoticeChanCap define real time data notice channel capacity
NoticeChanCap = 10000
)

View File

@ -27,9 +27,9 @@ func CreateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo netwo
component := orm.Component{ component := orm.Component{
GlobalUUID: globalUUID, GlobalUUID: globalUUID,
GridTag: strconv.FormatInt(componentInfo.GridID, 10), GridID: strconv.FormatInt(componentInfo.GridID, 10),
ZoneTag: strconv.FormatInt(componentInfo.ZoneID, 10), ZoneID: strconv.FormatInt(componentInfo.ZoneID, 10),
StationTag: strconv.FormatInt(componentInfo.StationID, 10), StationID: strconv.FormatInt(componentInfo.StationID, 10),
Tag: componentInfo.Tag, Tag: componentInfo.Tag,
Name: componentInfo.Name, Name: componentInfo.Name,
Context: componentInfo.Context, Context: componentInfo.Context,

View File

@ -1,50 +0,0 @@
// Package database define database operation functions
package database
import (
"context"
"fmt"
"strconv"
"time"
"modelRT/common/errcode"
"modelRT/network"
"modelRT/orm"
"github.com/gofrs/uuid"
"gorm.io/gorm"
)
// CreateMeasurement define create measurement info of the circuit diagram into DB
func CreateMeasurement(ctx context.Context, tx *gorm.DB, measurementInfo network.MeasurementCreateInfo) (string, error) {
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
globalUUID, err := uuid.FromString(measurementInfo.UUID)
if err != nil {
return "", fmt.Errorf("format uuid from string type failed:%w", err)
}
measurement := orm.Measurement{
Tag: "",
Name: "",
Type: -1,
Size: -1,
DataSource: nil,
EventPlan: nil,
BayUUID: globalUUID,
ComponentUUID: globalUUID,
Op: -1,
Ts: time.Now(),
}
result := tx.WithContext(cancelCtx).Create(&measurement)
if result.Error != nil || result.RowsAffected == 0 {
err := result.Error
if result.RowsAffected == 0 {
err = fmt.Errorf("%w:please check insert component slice", errcode.ErrInsertRowUnexpected)
}
return "", fmt.Errorf("insert component info failed:%w", err)
}
return strconv.FormatInt(measurement.ID, 10), nil
}

View File

@ -24,6 +24,7 @@ func CreateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, topol
UUIDFrom: info.UUIDFrom, UUIDFrom: info.UUIDFrom,
UUIDTo: info.UUIDTo, UUIDTo: info.UUIDTo,
Flag: info.Flag, Flag: info.Flag,
Comment: info.Comment,
} }
topologicSlice = append(topologicSlice, topologicInfo) topologicSlice = append(topologicSlice, topologicInfo)
} }

View File

@ -1,88 +0,0 @@
// Package database define database operation functions
package database
import (
"context"
"fmt"
"strings"
"modelRT/logger"
"modelRT/model"
"modelRT/orm"
"gorm.io/gorm"
)
// FillingShortTokenModel define filling short token model info
func FillingShortTokenModel(ctx context.Context, tx *gorm.DB, identModel *model.ShortIdentityTokenModel) error {
filterComponent := &orm.Component{
GridTag: identModel.GetGridTag(),
ZoneTag: identModel.GetZoneTag(),
StationTag: identModel.GetStationTag(),
}
component, measurement, err := QueryLongIdentModelInfoByToken(ctx, tx, identModel.MeasurementTag, filterComponent)
if err != nil {
logger.Error(ctx, "query long identity token model info failed", "error", err)
return err
}
identModel.ComponentInfo = component
identModel.MeasurementInfo = measurement
return nil
}
// FillingLongTokenModel define filling long token model info
func FillingLongTokenModel(ctx context.Context, tx *gorm.DB, identModel *model.LongIdentityTokenModel) error {
filterComponent := &orm.Component{
GridTag: identModel.GetGridTag(),
ZoneTag: identModel.GetZoneTag(),
StationTag: identModel.GetStationTag(),
Tag: identModel.GetComponentTag(),
}
component, measurement, err := QueryLongIdentModelInfoByToken(ctx, tx, identModel.MeasurementTag, filterComponent)
if err != nil {
logger.Error(ctx, "query long identity token model info failed", "error", err)
return err
}
identModel.ComponentInfo = component
identModel.MeasurementInfo = measurement
return nil
}
// ParseDataIdentifierToken define function to parse data identifier token function
func ParseDataIdentifierToken(ctx context.Context, tx *gorm.DB, identToken string) (model.IndentityTokenModelInterface, error) {
identSlice := strings.Split(identToken, ".")
identSliceLen := len(identSlice)
if identSliceLen == 4 {
// token1.token2.token3.token4.token7
shortIndentModel := &model.ShortIdentityTokenModel{
GridTag: identSlice[0],
ZoneTag: identSlice[1],
StationTag: identSlice[2],
NamespacePath: identSlice[3],
MeasurementTag: identSlice[6],
}
err := FillingShortTokenModel(ctx, tx, shortIndentModel)
if err != nil {
return nil, err
}
return shortIndentModel, nil
} else if identSliceLen == 7 {
// token1.token2.token3.token4.token5.token6.token7
longIndentModel := &model.LongIdentityTokenModel{
GridTag: identSlice[0],
ZoneTag: identSlice[1],
StationTag: identSlice[2],
NamespacePath: identSlice[3],
ComponentTag: identSlice[4],
AttributeGroup: identSlice[5],
MeasurementTag: identSlice[6],
}
err := FillingLongTokenModel(ctx, tx, longIndentModel)
if err != nil {
return nil, err
}
return longIndentModel, nil
}
return nil, fmt.Errorf("invalid identity token format: %s", identToken)
}

View File

@ -3,7 +3,6 @@ package database
import ( import (
"context" "context"
"fmt"
"time" "time"
"modelRT/orm" "modelRT/orm"
@ -82,58 +81,3 @@ func QueryComponentByNsPath(ctx context.Context, tx *gorm.DB, nsPath string) (or
} }
return component, nil return component, nil
} }
// QueryLongIdentModelInfoByToken define func to query long identity model info by long token
func QueryLongIdentModelInfoByToken(ctx context.Context, tx *gorm.DB, measTag string, condition *orm.Component) (*orm.Component, *orm.Measurement, error) {
var resultComp orm.Component
var meauserment orm.Measurement
// ctx timeout judgment
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).First(&resultComp, &condition)
if result.Error != nil {
if result.Error == gorm.ErrRecordNotFound {
return nil, nil, fmt.Errorf("component record not found by %v:%w", condition, result.Error)
}
return nil, nil, result.Error
}
filterMap := map[string]any{"component_uuid": resultComp.GlobalUUID, "tag": measTag}
result = tx.WithContext(cancelCtx).Where(filterMap).Clauses(clause.Locking{Strength: "UPDATE"}).First(&meauserment)
if result.Error != nil {
if result.Error == gorm.ErrRecordNotFound {
return nil, nil, fmt.Errorf("measurement record not found by %v:%w", filterMap, result.Error)
}
return nil, nil, result.Error
}
return &resultComp, &meauserment, nil
}
// QueryShortIdentModelInfoByToken define func to query short identity model info by short token
func QueryShortIdentModelInfoByToken(ctx context.Context, tx *gorm.DB, measTag string, condition *orm.Component) (*orm.Component, *orm.Measurement, error) {
var resultComp orm.Component
var meauserment orm.Measurement
// ctx timeout judgment
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).First(&resultComp, &condition)
if result.Error != nil {
if result.Error == gorm.ErrRecordNotFound {
return nil, nil, fmt.Errorf("component record not found by %v:%w", condition, result.Error)
}
return nil, nil, result.Error
}
filterMap := map[string]any{"component_uuid": resultComp.GlobalUUID, "tag": measTag}
result = tx.WithContext(cancelCtx).Where(filterMap).Clauses(clause.Locking{Strength: "UPDATE"}).First(&meauserment)
if result.Error != nil {
if result.Error == gorm.ErrRecordNotFound {
return nil, nil, fmt.Errorf("measurement record not found by %v:%w", filterMap, result.Error)
}
return nil, nil, result.Error
}
return &resultComp, &meauserment, nil
}

View File

@ -13,31 +13,12 @@ import (
// QueryMeasurementByID return the result of query circuit diagram component measurement info by id from postgresDB // QueryMeasurementByID return the result of query circuit diagram component measurement info by id from postgresDB
func QueryMeasurementByID(ctx context.Context, tx *gorm.DB, id int64) (orm.Measurement, error) { func QueryMeasurementByID(ctx context.Context, tx *gorm.DB, id int64) (orm.Measurement, error) {
var measurement orm.Measurement
// ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).
Where("id = ?", id).
Clauses(clause.Locking{Strength: "UPDATE"}).
First(&measurement)
if result.Error != nil {
return orm.Measurement{}, result.Error
}
return measurement, nil
}
// QueryMeasurementByToken define function query circuit diagram component measurement info by token from postgresDB
func QueryMeasurementByToken(ctx context.Context, tx *gorm.DB, token string) (orm.Measurement, error) {
// TODO parse token to avoid SQL injection
var component orm.Measurement var component orm.Measurement
// ctx超时判断 // ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second) cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel() defer cancel()
result := tx.WithContext(cancelCtx). result := tx.WithContext(cancelCtx).
Where(" = ?", token). Where("id = ?", id).
Clauses(clause.Locking{Strength: "UPDATE"}). Clauses(clause.Locking{Strength: "UPDATE"}).
First(&component) First(&component)
@ -46,17 +27,3 @@ func QueryMeasurementByToken(ctx context.Context, tx *gorm.DB, token string) (or
} }
return component, nil return component, nil
} }
// GetAllMeasurements define func to query all measurement info from postgresDB
func GetAllMeasurements(ctx context.Context, tx *gorm.DB) ([]orm.Measurement, error) {
var measurements []orm.Measurement
// ctx超时判断
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&measurements)
if result.Error != nil {
return nil, result.Error
}
return measurements, nil
}

View File

@ -1,80 +0,0 @@
// Package database define database operation functions
package database
import (
"context"
"fmt"
"time"
"modelRT/orm"
"gorm.io/gorm"
)
func queryFirstByID(ctx context.Context, tx *gorm.DB, id any, dest any) error {
result := tx.WithContext(ctx).Where("id = ?", id).First(dest)
return result.Error
}
func queryFirstByTag(ctx context.Context, tx *gorm.DB, tagName any, dest any) error {
result := tx.WithContext(ctx).Where("tagname = ?", tagName).First(dest)
return result.Error
}
// QueryNodeInfoByID return the result of query circuit diagram node info by id and level from postgresDB
func QueryNodeInfoByID(ctx context.Context, tx *gorm.DB, id int64, level int) (orm.CircuitDiagramNodeInterface, orm.CircuitDiagramNodeInterface, error) {
// 设置 Context 超时
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
var currentNodeInfo orm.CircuitDiagramNodeInterface
var previousNodeInfo orm.CircuitDiagramNodeInterface
var err error
switch level {
case 0:
var grid orm.Grid
err = queryFirstByID(cancelCtx, tx, id, &grid)
currentNodeInfo = grid
case 1:
// current:Zone,Previous:Grid
var zone orm.Zone
err = queryFirstByID(cancelCtx, tx, id, &zone)
currentNodeInfo = zone
if err == nil {
var grid orm.Grid
err = queryFirstByID(cancelCtx, tx, zone.GridID, &grid)
previousNodeInfo = grid
}
case 2:
// current:Station,Previous:Zone
var station orm.Station
err = queryFirstByID(cancelCtx, tx, id, &station)
currentNodeInfo = station
if err == nil {
var zone orm.Zone
err = queryFirstByID(cancelCtx, tx, station.ZoneID, &zone)
previousNodeInfo = zone
}
case 3, 4:
// current:Component, Previous:Station
var component orm.Component
err = queryFirstByID(cancelCtx, tx, id, &component)
currentNodeInfo = component
if err == nil {
var station orm.Station
err = queryFirstByTag(cancelCtx, tx, component.StationTag, &station)
previousNodeInfo = station
}
case 5:
// TODO[NONEED-ISSUE]暂无此层级增加或删除需求 #2
return nil, nil, nil
default:
return nil, nil, fmt.Errorf("unsupported node level: %d", level)
}
if err != nil {
return nil, nil, err
}
return previousNodeInfo, currentNodeInfo, nil
}

View File

@ -37,9 +37,9 @@ func UpdateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo netwo
updateParams := orm.Component{ updateParams := orm.Component{
GlobalUUID: globalUUID, GlobalUUID: globalUUID,
GridTag: strconv.FormatInt(componentInfo.GridID, 10), GridID: strconv.FormatInt(componentInfo.GridID, 10),
ZoneTag: strconv.FormatInt(componentInfo.ZoneID, 10), ZoneID: strconv.FormatInt(componentInfo.ZoneID, 10),
StationTag: strconv.FormatInt(componentInfo.StationID, 10), StationID: strconv.FormatInt(componentInfo.StationID, 10),
Tag: componentInfo.Tag, Tag: componentInfo.Tag,
Name: componentInfo.Name, Name: componentInfo.Name,
Context: componentInfo.Context, Context: componentInfo.Context,

View File

@ -51,6 +51,7 @@ func UpdateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, chang
Flag: changeInfo.Flag, Flag: changeInfo.Flag,
UUIDFrom: changeInfo.NewUUIDFrom, UUIDFrom: changeInfo.NewUUIDFrom,
UUIDTo: changeInfo.NewUUIDTo, UUIDTo: changeInfo.NewUUIDTo,
Comment: changeInfo.Comment,
} }
result = tx.WithContext(cancelCtx).Create(&topologic) result = tx.WithContext(cancelCtx).Create(&topologic)
} }

View File

@ -88,95 +88,45 @@ docker logs redis
##### 2.4.1 Postgres数据注入 ##### 2.4.1 Postgres数据注入
```SQL ```SQL
insert into public.grid(id,tagname,name,description,op,ts) VALUES (1, 'grid1', '网格1', '测试网格1', -1,CURRENT_TIMESTAMP); INSERT INTO public."Topologic" VALUES (2, 1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '10f155cf-bd27-4557-85b2-d126b6e2657f', 1, NULL);
INSERT INTO public."Topologic" VALUES (3, 1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', 1, NULL);
insert into public.zone(id,grid_id,tagname,name,description,op,ts) VALUES (1, 1,'zone1', '区域1_1', '测试区域1_1', -1,CURRENT_TIMESTAMP); INSERT INTO public."Topologic" VALUES (4, 1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '70c190f2-8a75-42a9-b166-ec5f87e0aa6b', 1, NULL);
INSERT INTO public."Topologic" VALUES (5, 1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '70c200f2-8a75-42a9-c166-bf5f87e0aa6b', 1, NULL);
insert into public.station(id,zone_id,tagname,name,description,is_local,op,ts) VALUES (1, 1,'station1', '站1_1_1', '测试站1_1_1', true, -1,CURRENT_TIMESTAMP); INSERT INTO public."Topologic" VALUES (1, 1, '00000000-0000-0000-0000-000000000000', '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', 1, NULL);
INSERT INTO public.topologic(flag, uuid_from, uuid_to, context, description, op, ts)
VALUES
(1, '00000000-0000-0000-0000-000000000000', '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '{}', '', 1, CURRENT_TIMESTAMP),
(1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '10f155cf-bd27-4557-85b2-d126b6e2657f', '{}', '', 1, CURRENT_TIMESTAMP),
(1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '{}', '', 1, CURRENT_TIMESTAMP),
(1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '70c190f2-8a75-42a9-b166-ec5f87e0aa6b', '{}', '', 1, CURRENT_TIMESTAMP),
(1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '70c200f2-8a75-42a9-c166-bf5f87e0aa6b', '{}', '', 1, CURRENT_TIMESTAMP),
(1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '968dd6e6-faec-4f78-b58a-d6e68426b09e', '{}', '', 1, CURRENT_TIMESTAMP),
(1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '968dd6e6-faec-4f78-b58a-d6e68426b08e', '{}', '', 1, CURRENT_TIMESTAMP);
INSERT INTO public.bay (bay_uuid, name, tag, type, unom, fla, capacity, description, in_service, state, grid, zone, station, business, context, from_uuids, to_uuids, dev_protect, dev_fault_record, dev_status, dev_dyn_sense, dev_instruct, dev_etc, components, op, ts)
VALUES (
'18e71a24-694a-43fa-93a7-c4d02a27d1bc',
'', '', '',
-1, -1, -1,
'',
false,
-1,
'', '', '',
'{}',
'{}',
'[]',
'[]',
'[]',
'[]',
'[]',
'[]',
'[]',
'[]',
ARRAY['968dd6e6-faec-4f78-b58a-d6e68426b09e', '968dd6e6-faec-4f78-b58a-d6e68426b08e']::uuid[],
-1,
CURRENT_TIMESTAMP
);
INSERT INTO public.component (global_uuid, nspath, tag, name, model_name, description, grid, zone, station, type, in_service, state, status, connection, label, context, op, ts)
VALUES
(
'968dd6e6-faec-4f78-b58a-d6e68426b09e',
'ns1', 'tag1', 'component1', '', '',
'grid1', 'zone1', 'station1',
-1,
false,
-1, -1,
'{}',
'{}',
'{}',
-1,
CURRENT_TIMESTAMP
),
(
'968dd6e6-faec-4f78-b58a-d6e68426b08e',
'ns1', 'tag2', 'component2', '', '',
'grid1', 'zone1', 'station1',
-1,
false,
-1, -1,
'{}',
'{}',
'{}',
-1,
CURRENT_TIMESTAMP
);
INSERT INTO public.measurement (id, tag, name, type, size, data_source, event_plan, bay_uuid, component_uuid, op, ts)
VALUES
(3, 'I11_C_rms', '45母甲侧互连电流C相1', -1, 200, '{"type": 1, "io_address": {"device": "ssu001", "channel": "TM1", "station": "001"}}', '{"cause": {"up": 55.0, "down": 45.0}, "action": {"command": "warning", "parameters": ["I段母线甲侧互连电流C相1"]}, "enable": true}', '18e71a24-694a-43fa-93a7-c4d02a27d1bc', '968dd6e6-faec-4f78-b58a-d6e68426b09e', -1, CURRENT_TIMESTAMP),
(4, 'I11_B_rms', '45母甲侧互连电流B相1', -1, 300, '{"type": 1, "io_address": {"device": "ssu001", "channel": "TM2", "station": "001"}}', '{"cause": {"upup": 65, "downdown": 35}, "action": {"command": "warning", "parameters": ["I段母线甲侧互连电流B相1"]}, "enable": true}', '18e71a24-694a-43fa-93a7-c4d02a27d1bc', '968dd6e6-faec-4f78-b58a-d6e68426b09e', -1, CURRENT_TIMESTAMP),
(5, 'I11_A_rms', '45母甲侧互连电流A相1', -1, 300, '{"type": 1, "io_address": {"device": "ssu001", "channel": "TM3", "station": "001"}}', '{"cause": {"up": 55, "down": 45, "upup": 65, "downdown": 35}, "action": {"command": "warning", "parameters": ["I段母线甲侧互连电流A相1"]}, "enable": true}', '18e71a24-694a-43fa-93a7-c4d02a27d1bc', '968dd6e6-faec-4f78-b58a-d6e68426b09e', -1, CURRENT_TIMESTAMP);
``` ```
##### 2.4.2 Redis数据注入 ##### 2.4.2 Redis数据注入
Redis数据脚本 Redis数据脚本
```shell ```Lua
deploy/redis-test-data/measurments-recommend/measurement_injection.go redis.call('SADD', 'grid_keys', 'transformfeeder1_220', 'transformfeeder1_220_35', 'transformfeeder1_220_36')
redis.call('SADD', 'grid_transformfeeder1_220_zones_keys', 'I_A_rms', 'I_B_rms', 'I_C_rms')
redis.call('SADD', 'grid_transformfeeder1_220_35_zones_keys', 'I_A_rms', 'I_B_rms', 'I_C_rms')
redis.call('SADD', 'grid_transformfeeder1_220_36_zones_keys', 'I_A_rms', 'I_B_rms', 'I_C_rms')
local dict_key = 'search_suggestions_dict'
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220', 1)
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_35', 1)
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_36', 1)
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220.I_A_rms', 1)
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220.I_B_rms', 1)
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220.I_C_rms', 1)
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_35.I_A_rms', 1)
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_35.I_B_rms', 1)
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_35.I_C_rms', 1)
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_36.I_A_rms', 1)
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_36.I_B_rms', 1)
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_36.I_C_rms', 1)
return 'OK'
``` ```
运行脚本向 Reids 导入数据 在Redis CLI 中导入命令
```shell 1. 使用 `EVAL "lua脚本" 0`即可成功导入数据
go run deploy/redis-test-data/measurments-recommend/measurement_injection.go 2. 使用 `SCRIPT LOAD "lua脚本"`加载脚本,然后使用 `EVAL SHA1值 0` 命令执行上一步存储命令返回的哈希值即可
```
### 3\. 启动 ModelRT 服务 ### 3\. 启动 ModelRT 服务

View File

@ -1,15 +0,0 @@
FROM golang:1.24-alpine AS builder
WORKDIR /app
COPY go.mod .
COPY go.sum .
RUN GOPROXY="https://goproxy.cn,direct" go mod download
COPY . .
RUN CGO_ENABLED=0 go build -ldflags="-s -w" -o modelrt main.go
FROM alpine:latest
WORKDIR /app
COPY --from=builder /app/modelrt ./modelrt
COPY configs/config.example.yaml ./configs/config.example.yaml
RUN chmod +x /app/modelrt
CMD ["/app/modelrt"]

View File

@ -1,330 +0,0 @@
// Package main implement redis test data injection
package main
import (
"context"
"fmt"
"log"
"github.com/RediSearch/redisearch-go/v2/redisearch"
"github.com/redis/go-redis/v9"
)
var ac *redisearch.Autocompleter
// InitAutocompleterWithPool define func of initialize the Autocompleter with redigo pool
func init() {
// ac = redisearch.NewAutocompleterFromPool(pool, redisSearchDictName)
ac = redisearch.NewAutocompleter("localhost:6379", redisSearchDictName)
}
const (
gridKeysSet = "grid_keys"
zoneKeysSet = "zone_keys"
stationKeysSet = "station_keys"
componentNSPathKeysSet = "component_nspath_keys"
componentTagKeysSet = "component_tag_keys"
configKeysSet = "config_keys"
measurementTagKeysSet = "measurement_tag_keys"
// Grid -> Zone (e.g., grid1_zones_keys)
gridZoneSetKeyFormat = "grid%d_zones_keys"
// Zone -> Station (e.g., zone1_1_stations_keys)
zoneStationSetKeyFormat = "zone%d_%d_stations_keys"
// Station -> NSPath (e.g., station1_1_1_components_nspath_keys)
stationNSPathKeyFormat = "station%d_%d_%d_components_nspath_keys"
// NSPath -> CompTag (e.g., ns1_1_1_1_components_tag_keys)
nsPathCompTagKeyFormat = "ns%d_%d_%d_%d_components_tag_keys"
// CompTag -> Measurement (e.g., comptag1_1_1_1_1_measurement_keys)
compTagMeasKeyFormat = "comptag%d_%d_%d_%d_%d_measurement_keys"
)
const (
redisSearchDictName = "search_suggestions_dict"
defaultScore = 1.0
)
var configMetrics = []any{
"component", "base_extend", "rated", "setup", "model",
"stable", "bay", "craft", "integrity", "behavior",
}
func bulkInsertAllHierarchySets(ctx context.Context, rdb *redis.Client) error {
log.Println("starting bulk insertion of Redis hierarchy sets")
if err := insertStaticSets(ctx, rdb); err != nil {
return fmt.Errorf("static set insertion failed: %w", err)
}
if err := insertDynamicHierarchy(ctx, rdb); err != nil {
return fmt.Errorf("dynamic hierarchy insertion failed: %w", err)
}
if err := insertAllHierarchySuggestions(ac); err != nil {
return fmt.Errorf("dynamic hierarchy insertion failed: %w", err)
}
log.Println("bulk insertion complete")
return nil
}
func insertStaticSets(ctx context.Context, rdb *redis.Client) error {
// grid_keys
if err := rdb.SAdd(ctx, gridKeysSet, "grid1", "grid2", "grid3").Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", gridKeysSet, err)
}
// zone_keys (3x3 = 9 members)
zoneMembers := make([]any, 0, 9)
for i := 1; i <= 3; i++ {
for j := 1; j <= 3; j++ {
zoneMembers = append(zoneMembers, fmt.Sprintf("zone%d_%d", i, j))
}
}
if err := rdb.SAdd(ctx, zoneKeysSet, zoneMembers...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", zoneKeysSet, err)
}
// config_keys
if err := rdb.SAdd(ctx, configKeysSet, configMetrics...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", configKeysSet, err)
}
log.Println("Static sets (grid_keys, zone_keys, config_keys) inserted.")
return nil
}
func insertDynamicHierarchy(ctx context.Context, rdb *redis.Client) error {
allStationKeys := make([]any, 0, 27)
allNSPathKeys := make([]any, 0, 81)
allCompTagKeys := make([]any, 0, 243)
allMeasurementTagKeys := make([]any, 0, 729)
// S: Grid Prefix (1-3)
for S := 1; S <= 3; S++ {
// Grid-Zone Set Key: gridS_zones_keys
gridZoneKey := fmt.Sprintf(gridZoneSetKeyFormat, S)
gridZoneMembers := make([]any, 0, 3)
// Y: Zone Index (1-3)
for Y := 1; Y <= 3; Y++ {
zoneID := fmt.Sprintf("%d_%d", S, Y)
zoneMember := "zone" + zoneID
gridZoneMembers = append(gridZoneMembers, zoneMember)
// Zone-Station Set Key: zoneS_Y_stations_keys
zoneStationKey := fmt.Sprintf(zoneStationSetKeyFormat, S, Y)
zoneStationMembers := make([]any, 0, 3)
// Z: Station Index (1-3)
for Z := 1; Z <= 3; Z++ {
stationID := fmt.Sprintf("%d_%d_%d", S, Y, Z)
stationKey := "station" + stationID
allStationKeys = append(allStationKeys, stationKey)
zoneStationMembers = append(zoneStationMembers, stationKey)
// Station-NSPath Set Key: stationS_Y_Z_components_nspath_keys
stationNSPathKey := fmt.Sprintf(stationNSPathKeyFormat, S, Y, Z)
stationNSMembers := make([]any, 0, 3)
// D: NSPath Index (1-3)
for D := 1; D <= 3; D++ {
nsPathID := fmt.Sprintf("%s_%d", stationID, D)
nsPathKey := "ns" + nsPathID
allNSPathKeys = append(allNSPathKeys, nsPathKey)
stationNSMembers = append(stationNSMembers, nsPathKey)
// NSPath-CompTag Set Key: nsS_Y_Z_D_components_tag_keys
nsCompTagKey := fmt.Sprintf(nsPathCompTagKeyFormat, S, Y, Z, D)
nsCompTagMembers := make([]any, 0, 3)
// I: CompTag Index (1-3)
for I := 1; I <= 3; I++ {
compTagID := fmt.Sprintf("%s_%d", nsPathID, I)
compTagKey := "cmptag" + compTagID
allCompTagKeys = append(allCompTagKeys, compTagKey)
nsCompTagMembers = append(nsCompTagMembers, compTagKey)
// CompTag-Measurement Set Key: comptagS_Y_Z_D_I_measurement_keys
compTagMeasKey := fmt.Sprintf(compTagMeasKeyFormat, S, Y, Z, D, I)
compTagMeasMembers := make([]any, 0, 3)
// M: Measurement Index (1-3)
for M := 1; M <= 3; M++ {
measurementID := fmt.Sprintf("%s_%d", compTagID, M)
measurementKey := "meas" + measurementID
allMeasurementTagKeys = append(allMeasurementTagKeys, measurementKey)
compTagMeasMembers = append(compTagMeasMembers, measurementKey)
}
if err := rdb.SAdd(ctx, compTagMeasKey, compTagMeasMembers...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", compTagMeasKey, err)
}
}
if err := rdb.SAdd(ctx, nsCompTagKey, nsCompTagMembers...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", nsCompTagKey, err)
}
}
if err := rdb.SAdd(ctx, stationNSPathKey, stationNSMembers...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", stationNSPathKey, err)
}
}
if err := rdb.SAdd(ctx, zoneStationKey, zoneStationMembers...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", zoneStationKey, err)
}
}
if err := rdb.SAdd(ctx, gridZoneKey, gridZoneMembers...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", gridZoneKey, err)
}
}
// 插入所有顶层动态 Set (将所有成员一次性插入到全局 Set 中)
if err := rdb.SAdd(ctx, stationKeysSet, allStationKeys...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", stationKeysSet, err)
}
if err := rdb.SAdd(ctx, componentNSPathKeysSet, allNSPathKeys...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", componentNSPathKeysSet, err)
}
if err := rdb.SAdd(ctx, componentTagKeysSet, allCompTagKeys...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", componentTagKeysSet, err)
}
if err := rdb.SAdd(ctx, measurementTagKeysSet, allMeasurementTagKeys...).Err(); err != nil {
return fmt.Errorf("sadd failed for %s: %w", measurementTagKeysSet, err)
}
log.Printf("inserted %d stations, %d nspaths, %d comptags, and %d measurements.\n",
len(allStationKeys), len(allNSPathKeys), len(allCompTagKeys), len(allMeasurementTagKeys))
return nil
}
func insertAllHierarchySuggestions(ac *redisearch.Autocompleter) error {
suggestions := make([]redisearch.Suggestion, 0, 10000)
// S: grid Index (1-3)
for S := 1; S <= 3; S++ {
gridStr := fmt.Sprintf("grid%d", S)
suggestions = append(suggestions, redisearch.Suggestion{Term: gridStr, Score: defaultScore})
// Y: zone Index (1-3)
for Y := 1; Y <= 3; Y++ {
zoneStr := fmt.Sprintf("zone%d_%d", S, Y)
gridZonePath := fmt.Sprintf("%s.%s", gridStr, zoneStr)
suggestions = append(suggestions, redisearch.Suggestion{Term: gridZonePath, Score: defaultScore})
// Z: station Index (1-3)
for Z := 1; Z <= 3; Z++ {
stationStr := fmt.Sprintf("station%d_%d_%d", S, Y, Z)
gridZoneStationPath := fmt.Sprintf("%s.%s", gridZonePath, stationStr)
suggestions = append(suggestions, redisearch.Suggestion{Term: gridZoneStationPath, Score: defaultScore})
// D: nsPath Index (1-3)
for D := 1; D <= 3; D++ {
nsPathStr := fmt.Sprintf("ns%d_%d_%d_%d", S, Y, Z, D)
gridZoneStationNSPath := fmt.Sprintf("%s.%s", gridZoneStationPath, nsPathStr)
suggestions = append(suggestions, redisearch.Suggestion{Term: gridZoneStationNSPath, Score: defaultScore})
// I: compTag Index (1-3)
for I := 1; I <= 3; I++ {
compTagStr := fmt.Sprintf("comptag%d_%d_%d_%d_%d", S, Y, Z, D, I)
fullCompTagPath := fmt.Sprintf("%s.%s", gridZoneStationNSPath, compTagStr)
suggestions = append(suggestions, redisearch.Suggestion{Term: fullCompTagPath, Score: defaultScore})
for _, metric := range configMetrics {
fullMetricPath := fmt.Sprintf("%s.%s", fullCompTagPath, metric)
suggestions = append(suggestions, redisearch.Suggestion{Term: fullMetricPath, Score: defaultScore})
// J: measTag Index (1-3)
for J := 1; J <= 3; J++ {
measTagStr := fmt.Sprintf("comptag%d_%d_%d_%d_%d_%d", S, Y, Z, D, I, J)
fullMeasurementPath := fmt.Sprintf("%s.%s", fullMetricPath, measTagStr)
suggestions = append(suggestions, redisearch.Suggestion{Term: fullMeasurementPath, Score: defaultScore})
}
}
}
}
}
}
}
log.Printf("generated %d suggestions. starting bulk insertion into dictionary '%s'.", len(suggestions), redisSearchDictName)
// del ac suggestion
ac.Delete()
err := ac.AddTerms(suggestions...)
if err != nil {
return fmt.Errorf("failed to add %d suggestions: %w", len(suggestions), err)
}
return nil
}
func deleteAllHierarchySets(ctx context.Context, rdb *redis.Client) error {
log.Println("starting to collect all Redis Set keys for deletion...")
keysToDelete := []string{
gridKeysSet,
zoneKeysSet,
stationKeysSet,
componentNSPathKeysSet,
componentTagKeysSet,
configKeysSet,
measurementTagKeysSet,
}
for S := 1; S <= 3; S++ {
keysToDelete = append(keysToDelete, fmt.Sprintf(gridZoneSetKeyFormat, S))
for Y := 1; Y <= 3; Y++ {
keysToDelete = append(keysToDelete, fmt.Sprintf(zoneStationSetKeyFormat, S, Y))
for Z := 1; Z <= 3; Z++ {
keysToDelete = append(keysToDelete, fmt.Sprintf(stationNSPathKeyFormat, S, Y, Z))
for D := 1; D <= 3; D++ {
keysToDelete = append(keysToDelete, fmt.Sprintf(nsPathCompTagKeyFormat, S, Y, Z, D))
for I := 1; I <= 3; I++ {
keysToDelete = append(keysToDelete, fmt.Sprintf(compTagMeasKeyFormat, S, Y, Z, D, I))
}
}
}
}
}
log.Printf("collected %d unique keys. Starting batch deletion...", len(keysToDelete))
deletedCount, err := rdb.Del(ctx, keysToDelete...).Result()
if err != nil {
return fmt.Errorf("batch deletion failed: %w", err)
}
log.Printf("Successfully deleted %d keys (Sets) from Redis.", deletedCount)
return nil
}
func main() {
rdb := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "",
DB: 0,
})
ctx := context.Background()
if err := rdb.Ping(ctx).Err(); err != nil {
log.Fatalf("could not connect to Redis: %v", err)
}
log.Println("connected to Redis successfully")
if err := deleteAllHierarchySets(ctx, rdb); err != nil {
log.Fatalf("error delete exist set before bulk insertion: %v", err)
}
if err := bulkInsertAllHierarchySets(ctx, rdb); err != nil {
log.Fatalf("error during bulk insertion: %v", err)
}
}

View File

@ -1,224 +0,0 @@
// Package main implement redis test data injection
package main
import (
"context"
"fmt"
"log"
"math/rand"
"strconv"
"time"
"modelRT/orm"
util "modelRT/deploy/redis-test-data/util"
"github.com/redis/go-redis/v9"
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
const (
redisAddr = "localhost:6379"
)
var globalRedisClient *redis.Client
var (
highEnd, highStart, lowStart, lowEnd int
totalLength int
highSegmentLength int
lowSegmentLength int
)
func selectRandomInt() int {
options := []int{0, 2}
randomIndex := rand.Intn(len(options))
return options[randomIndex]
}
// generateMixedData define func to generate a set of floating-point data that meets specific conditions
func generateMixedData(highMin, lowMin, highBase, lowBase, baseValue, normalBase float64) []float64 {
totalLength = 500
highSegmentLength = 20
lowSegmentLength = 20
seed := time.Now().UnixNano()
source := rand.NewSource(seed)
r := rand.New(source)
data := make([]float64, totalLength)
highStart = rand.Intn(totalLength - highSegmentLength - lowSegmentLength - 1)
highEnd = highStart + highSegmentLength
lowStart = rand.Intn(totalLength-lowSegmentLength-highEnd) + highEnd
lowEnd = lowStart + lowSegmentLength
for i := 0; i < totalLength; i++ {
if i >= highStart && i < highStart+highSegmentLength {
// 数据值均大于 55.0,在 [55.5, 60.0] 范围内随机
// rand.Float64() 生成 [0.0, 1.0) 范围的浮点数
data[i] = highMin + r.Float64()*(highBase)
} else if i >= lowStart && i < lowStart+lowSegmentLength {
// 数据值均小于 45.0,在 [40.0, 44.5] 范围内随机
data[i] = lowMin + r.Float64()*(lowBase)
} else {
// 数据在 [45.0, 55.0] 范围内随机 (baseValue ± 5)
// 50 + rand.Float64() * 10 - 5
change := normalBase - r.Float64()*normalBase*2
data[i] = baseValue + change
}
}
return data
}
func generateNormalData(baseValue, normalBase float64) []float64 {
totalLength = 500
seed := time.Now().UnixNano()
source := rand.NewSource(seed)
r := rand.New(source)
data := make([]float64, totalLength)
for i := 0; i < totalLength; i++ {
change := normalBase - r.Float64()*normalBase*2
data[i] = baseValue + change
}
return data
}
func main() {
rootCtx := context.Background()
pgURI := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", "192.168.1.101", 5432, "postgres", "coslight", "demo")
postgresDBClient, err := gorm.Open(postgres.Open(pgURI))
if err != nil {
panic(err)
}
defer func() {
sqlDB, err := postgresDBClient.DB()
if err != nil {
panic(err)
}
sqlDB.Close()
}()
cancelCtx, cancel := context.WithTimeout(rootCtx, 5*time.Second)
defer cancel()
var measurements []orm.Measurement
result := postgresDBClient.WithContext(cancelCtx).Find(&measurements)
if result.Error != nil {
panic(result.Error)
}
log.Println("总共读取到测量点数量:", len(measurements))
measInfos := util.ProcessMeasurements(measurements)
globalRedisClient = util.InitRedisClient(redisAddr)
rCancelCtx, cancel := context.WithCancel(rootCtx)
defer cancel()
for key, measInfo := range measInfos {
randomType := selectRandomType()
var datas []float64
if randomType {
// 生成正常数据
log.Printf("key:%s generate normal data\n", key)
baseValue := measInfo.BaseValue
changes := measInfo.Changes
normalBase := changes[0]
noramlMin := baseValue - normalBase
normalMax := baseValue + normalBase
datas = generateNormalData(baseValue, normalBase)
allTrue := true
for i := 0; i < totalLength-1; i++ {
value := datas[i]
// log.Printf("index:%d, value:%.2f\n", i, value)
if value < noramlMin && value > normalMax {
allTrue = false
}
}
log.Printf("// 验证结果: 所有值是否 >= %.2f或 <= %.2f %t\n", noramlMin, normalMax, allTrue)
} else {
// 生成异常数据
log.Printf("key:%s generate abnormal data\n", key)
var highMin, highBase float64
var lowMin, lowBase float64
var normalBase float64
// TODO 生成一次测试数据
changes := measInfo.Changes
baseValue := measInfo.BaseValue
if len(changes) == 2 {
highMin = baseValue + changes[0]
lowMin = baseValue + changes[1]
highBase = changes[0]
lowBase = changes[1]
normalBase = changes[0]
} else {
randomIndex := selectRandomInt()
highMin = baseValue + changes[randomIndex]
lowMin = baseValue + changes[randomIndex+1]
highBase = changes[randomIndex]
lowBase = changes[randomIndex+1]
normalBase = changes[0]
}
datas = generateMixedData(highMin, lowMin, highBase, lowBase, baseValue, normalBase)
// log.Printf("key:%s\n datas:%v\n", key, datas)
allHigh := true
for i := highStart; i < highEnd; i++ {
if datas[i] <= highMin {
allHigh = false
break
}
}
log.Printf("// 验证结果 (高值段在 %d-%d): 所有值是否 > %.2f? %t\n", highStart, highEnd-1, highMin, allHigh)
allLow := true
for i := lowStart; i < lowEnd; i++ {
if datas[i] >= lowMin {
allLow = false
break
}
}
log.Printf("// 验证结果 (低值段在 %d-%d): 所有值是否 < %.2f? %t\n", lowStart, lowEnd-1, lowMin, allLow)
allTrue := true
for i := 0; i < totalLength-1; i++ {
value := datas[i]
if i < highStart || (i >= highEnd && i < lowStart) || i >= lowEnd {
// log.Printf("index:%d, value:%.2f\n", i, value)
if value >= highMin && value <= lowMin {
allTrue = false
}
}
}
log.Printf("// 验证结果 (正常段在 %d-%d): 所有值是否 <= %.2f或>= %.2f %t\n", 0, totalLength-1, highMin, lowMin, allTrue)
}
log.Printf("启动数据写入程序, Redis Key: %s, 基准值: %.4f, 变化范围: %+v\n", key, measInfo.BaseValue, measInfo.Changes)
pipe := globalRedisClient.Pipeline()
redisZs := make([]redis.Z, 0, totalLength)
currentTime := time.Now().UnixNano()
for i := range totalLength {
sequentialTime := currentTime + int64(i)
z := redis.Z{
Score: datas[i],
Member: strconv.FormatInt(sequentialTime, 10),
}
redisZs = append(redisZs, z)
}
log.Printf("启动数据写入程序, Redis Key: %s, 写入数据量: %d\n", key, len(redisZs))
pipe.ZAdd(rCancelCtx, key, redisZs...)
_, err = pipe.Exec(rCancelCtx)
if err != nil {
log.Printf("redis pipeline execution failed: %v\n", err)
}
}
}
func selectRandomType() bool {
options := []int{0, 2}
randomValue := rand.Intn(len(options))
return randomValue != 0
}

View File

@ -1,449 +0,0 @@
// Package main implement redis test data injection
package main
import (
"context"
"fmt"
"log"
"math/rand"
"os"
"os/signal"
"strconv"
"syscall"
"time"
"modelRT/deploy/redis-test-data/util"
"modelRT/orm"
redis "github.com/redis/go-redis/v9"
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
// Redis配置
const (
redisAddr = "localhost:6379"
)
var globalRedisClient *redis.Client
// outlierConfig 异常段配置
type outlierConfig struct {
Enabled bool // 是否启用异常段
Count int // 异常段数量 (0=随机, 1-5=指定数量)
MinLength int // 异常段最小长度
MaxLength int // 异常段最大长度
Intensity float64 // 异常强度系数 (1.0=轻微超出, 2.0=显著超出)
Distribution string // 分布类型 "both"-上下都有, "upper"-只向上, "lower"-只向下
}
// GenerateFloatSliceWithOutliers 生成包含连续异常段的数据
// baseValue: 基准值
// changes: 变化范围每2个元素为一组 [minChange1, maxChange1, minChange2, maxChange2, ...]
// size: 生成的切片长度
// variationType: 变化类型
// outlierConfig: 异常段配置
func generateFloatSliceWithOutliers(baseValue float64, changes []float64, size int, variationType string, outlierConfig outlierConfig) ([]float64, error) {
// 先生成正常数据
data, err := generateFloatSlice(baseValue, changes, size, variationType)
if err != nil {
return nil, err
}
// 插入异常段
if outlierConfig.Enabled {
data = insertOutliers(data, baseValue, changes, outlierConfig)
}
return data, nil
}
// 插入异常段
func insertOutliers(data []float64, baseValue float64, changes []float64, config outlierConfig) []float64 {
if len(data) == 0 || !config.Enabled {
return data
}
// 获取变化范围的边界
minBound, maxBound := getChangeBounds(baseValue, changes)
// TODO delete
log.Printf("获取变化范围的边界,min:%.4f,max:%.4f\n", minBound, maxBound)
// 确定异常段数量
outlierCount := config.Count
if outlierCount == 0 {
// 随机生成1-3个异常段
outlierCount = rand.Intn(3) + 1
}
// 计算最大可能的异常段数量
maxPossibleOutliers := len(data) / (config.MinLength + 10)
if outlierCount > maxPossibleOutliers {
outlierCount = maxPossibleOutliers
}
// 生成异常段位置
segments := generateOutlierSegments(len(data), config.MinLength, config.MaxLength, outlierCount, config.Distribution)
// TODO 调试信息待删除
log.Printf("生成异常段位置:%+v\n", segments)
// 插入异常数据
for _, segment := range segments {
data = insertOutlierSegment(data, segment, minBound, maxBound, config)
}
return data
}
// 获取变化范围的边界
func getChangeBounds(baseValue float64, changes []float64) (minBound, maxBound float64) {
if len(changes) == 0 {
return baseValue - 10, baseValue + 10
}
ranges := normalizeRanges(changes)
minBound, maxBound = baseValue+ranges[0][0], baseValue+ranges[0][1]
for _, r := range ranges {
if baseValue+r[0] < minBound {
minBound = baseValue + r[0]
}
if baseValue+r[1] > maxBound {
maxBound = baseValue + r[1]
}
}
return minBound, maxBound
}
// OutlierSegment 异常段定义
type OutlierSegment struct {
Start int
Length int
Type string // "upper"-向上异常, "lower"-向下异常
}
func generateOutlierSegments(totalSize, minLength, maxLength, count int, distribution string) []OutlierSegment {
if count == 0 {
return nil
}
segments := make([]OutlierSegment, 0, count)
usedPositions := make(map[int]bool)
for i := 0; i < count; i++ {
// 尝试多次寻找合适的位置
for attempt := 0; attempt < 10; attempt++ {
length := rand.Intn(maxLength-minLength+1) + minLength
start := rand.Intn(totalSize - length)
// 检查是否与已有段重叠
overlap := false
for pos := start; pos < start+length; pos++ {
if usedPositions[pos] {
overlap = true
break
}
}
if !overlap {
// 标记已使用的位置
for pos := start; pos < start+length; pos++ {
usedPositions[pos] = true
}
// 根据 distribution 配置决定异常类型
var outlierType string
switch distribution {
case "upper":
outlierType = "upper"
case "lower":
outlierType = "lower"
case "both":
fallthrough
default:
if rand.Float64() < 0.5 {
outlierType = "upper"
} else {
outlierType = "lower"
}
}
segments = append(segments, OutlierSegment{
Start: start,
Length: length,
Type: outlierType,
})
break
}
}
}
return segments
}
func insertOutlierSegment(data []float64, segment OutlierSegment, minBound, maxBound float64, config outlierConfig) []float64 {
rangeWidth := maxBound - minBound
// 确定整个异常段的方向
outlierType := segment.Type
if outlierType == "" {
switch config.Distribution {
case "upper":
outlierType = "upper"
case "lower":
outlierType = "lower"
default:
if rand.Float64() < 0.5 {
outlierType = "upper"
} else {
outlierType = "lower"
}
}
}
// 为整个段生成同方向异常值
for i := segment.Start; i < segment.Start+segment.Length && i < len(data); i++ {
excess := rangeWidth * (0.3 + rand.Float64()*config.Intensity)
if outlierType == "upper" {
data[i] = maxBound + excess
} else {
data[i] = minBound - excess
}
}
return data
}
func detectOutlierSegments(data []float64, baseValue float64, changes []float64, minSegmentLength int) []OutlierSegment {
if len(data) == 0 {
return nil
}
minBound, maxBound := getChangeBounds(baseValue, changes)
var segments []OutlierSegment
currentStart := -1
currentType := ""
for i, value := range data {
isOutlier := value > maxBound || value < minBound
if isOutlier {
outlierType := "upper"
if value < minBound {
outlierType = "lower"
}
if currentStart == -1 {
// 开始新的异常段
currentStart = i
currentType = outlierType
} else if currentType != outlierType {
// 类型变化,结束当前段
if i-currentStart >= minSegmentLength {
segments = append(segments, OutlierSegment{
Start: currentStart,
Length: i - currentStart,
Type: currentType,
})
}
currentStart = i
currentType = outlierType
}
} else {
if currentStart != -1 {
// 结束当前异常段
if i-currentStart >= minSegmentLength {
segments = append(segments, OutlierSegment{
Start: currentStart,
Length: i - currentStart,
Type: currentType,
})
}
currentStart = -1
currentType = ""
}
}
}
// 处理最后的异常段
if currentStart != -1 && len(data)-currentStart >= minSegmentLength {
segments = append(segments, OutlierSegment{
Start: currentStart,
Length: len(data) - currentStart,
Type: currentType,
})
}
return segments
}
func generateFloatSlice(baseValue float64, changes []float64, size int, variationType string) ([]float64, error) {
return generateRandomData(baseValue, changes, size), nil
}
func normalizeRanges(changes []float64) [][2]float64 {
ranges := make([][2]float64, len(changes)/2)
for i := 0; i < len(changes); i += 2 {
min, max := changes[i], changes[i+1]
if min > max {
min, max = max, min
}
ranges[i/2] = [2]float64{min, max}
}
return ranges
}
func generateRandomData(baseValue float64, changes []float64, size int) []float64 {
data := make([]float64, size)
ranges := normalizeRanges(changes)
for i := range data {
rangeIdx := rand.Intn(len(ranges))
minChange := ranges[rangeIdx][0]
maxChange := ranges[rangeIdx][1]
change := minChange + rand.Float64()*(maxChange-minChange)
data[i] = baseValue + change
}
return data
}
// simulateDataWrite 定时生成并写入模拟数据到 Redis ZSet
func simulateDataWrite(ctx context.Context, rdb *redis.Client, redisKey string, config outlierConfig, measInfo util.CalculationResult) {
log.Printf("启动数据写入程序, Redis Key: %s, 基准值: %.4f, 变化范围: %+v\n", redisKey, measInfo.BaseValue, measInfo.Changes)
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
pipe := rdb.Pipeline()
for {
select {
case <-ctx.Done():
log.Printf("\n[%s] 写入程序已停止\n", redisKey)
return
case <-ticker.C:
minBound, maxBound := getChangeBounds(measInfo.BaseValue, measInfo.Changes)
log.Printf("计算边界: [%.4f, %.4f]\n", minBound, maxBound)
// 根据基准值类型决定如何处理
switch measInfo.BaseType {
case "TI":
// 边沿触发类型,生成特殊处理的数据
log.Printf("边沿触发类型,跳过异常数据生成\n")
return
case "TE":
// 正常上下限类型,生成包含异常的数据
if len(measInfo.Changes) == 0 {
log.Printf("无变化范围数据,跳过\n")
return
}
// 根据变化范围数量调整异常配置
if len(measInfo.Changes) == 2 {
// 只有上下限
config.Distribution = "both"
} else if len(measInfo.Changes) == 4 {
// 有上下限和预警上下限
config.Distribution = "both"
config.Intensity = 2.0 // 增强异常强度
}
// 生成包含异常的数据
data, err := generateFloatSliceWithOutliers(
measInfo.BaseValue,
measInfo.Changes,
measInfo.Size,
"random",
config,
)
if err != nil {
log.Printf("生成异常数据失败:%v\n", err)
continue
}
segments := detectOutlierSegments(data, measInfo.BaseValue, measInfo.Changes, config.MinLength)
log.Printf("检测到异常段数量:%d\n", len(segments))
for i, segment := range segments {
log.Printf("异常段%d: 位置[%d-%d], 长度=%d, 类型=%s\n",
i+1, segment.Start, segment.Start+segment.Length-1, segment.Length, segment.Type)
}
redisZs := make([]redis.Z, 0, len(data))
for i := range len(data) {
z := redis.Z{
Score: data[i],
Member: strconv.FormatInt(time.Now().UnixNano(), 10),
}
redisZs = append(redisZs, z)
}
pipe.ZAdd(ctx, redisKey, redisZs...)
_, err = pipe.Exec(ctx)
if err != nil {
log.Printf("redis pipeline execution failed: %v", err)
}
log.Printf("生成 redis 实时数据成功\n")
}
}
}
}
func gracefulShutdown() {
if globalRedisClient != nil {
if err := globalRedisClient.Close(); err != nil {
log.Printf("关闭 Redis 客户端失败:%v", err)
} else {
log.Println("关闭 Redis 客户端成功")
}
}
time.Sleep(500 * time.Millisecond)
os.Exit(0)
}
func main() {
rootCtx := context.Background()
pgURI := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", "192.168.1.101", 5432, "postgres", "coslight", "demo")
postgresDBClient, err := gorm.Open(postgres.Open(pgURI))
if err != nil {
panic(err)
}
defer func() {
sqlDB, err := postgresDBClient.DB()
if err != nil {
panic(err)
}
sqlDB.Close()
}()
cancelCtx, cancel := context.WithTimeout(rootCtx, 5*time.Second)
defer cancel()
var measurements []orm.Measurement
result := postgresDBClient.WithContext(cancelCtx).Find(&measurements)
if result.Error != nil {
panic(result.Error)
}
log.Println("总共读取到测量点数量:", len(measurements))
measInfos := util.ProcessMeasurements(measurements)
// 测量点数据生成(包含异常数据)
// 配置异常段参数
outlierConfig := outlierConfig{
Enabled: true, // 是否产生异常段数据
Count: 2, // 异常段数量
MinLength: 10, // 异常段最小连续长度
MaxLength: 15, // 异常段最大连续长度
Intensity: 1.5, // 异常强度
Distribution: "both", // 分布类型
}
globalRedisClient = util.InitRedisClient(redisAddr)
rCancelCtx, cancel := context.WithCancel(rootCtx)
defer cancel()
for key, measInfo := range measInfos {
go simulateDataWrite(rCancelCtx, globalRedisClient, key, outlierConfig, measInfo)
}
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
<-sigChan
gracefulShutdown()
}

View File

@ -1,266 +0,0 @@
// Package util provide some utility fun
package util
import (
"fmt"
"modelRT/orm"
)
type CalculationResult struct {
BaseValue float64
Changes []float64
Size int
BaseType string // "normal", "warning", "edge"
Message string
}
func ProcessMeasurements(measurements []orm.Measurement) map[string]CalculationResult {
results := make(map[string]CalculationResult, len(measurements))
for _, measurement := range measurements {
// 检查 DataSource 是否存在且 type 为 1
if measurement.DataSource == nil {
continue
}
// 检查 type 是否为 1
dataType, typeExists := measurement.DataSource["type"]
if !typeExists {
continue
}
// 类型断言,处理不同的数字类型
var typeValue int
switch v := dataType.(type) {
case int:
typeValue = v
case float64:
typeValue = int(v)
case int64:
typeValue = int(v)
default:
continue
}
if typeValue != 1 {
continue
}
// 获取 io_address
ioAddressRaw, ioExists := measurement.DataSource["io_address"]
if !ioExists {
continue
}
ioAddress, ok := ioAddressRaw.(map[string]any)
if !ok {
continue
}
station, _ := ioAddress["station"].(string)
device, _ := ioAddress["device"].(string)
channel, _ := ioAddress["channel"].(string)
result := fmt.Sprintf("%s:%s:phasor:%s", station, device, channel)
if measurement.EventPlan == nil {
continue
}
causeValue, causeExist := measurement.EventPlan["cause"]
if !causeExist {
continue
}
causeMap, ok := causeValue.(map[string]any)
if !ok {
continue
}
calResult, err := calculateBaseValueEnhanced(causeMap)
if err != nil {
continue
}
calResult.Size = measurement.Size
results[result] = calResult
}
return results
}
func calculateBaseValueEnhanced(data map[string]any) (CalculationResult, error) {
result := CalculationResult{}
if edge, exists := data["edge"]; exists {
value, err := calculateEdgeValue(edge)
if err != nil {
return result, err
}
if edge == "raising" {
result.Changes = []float64{1.0}
} else {
result.Changes = []float64{0.0}
}
result.BaseValue = value
result.BaseType = "TI"
result.Message = "边沿触发基准值"
return result, nil
}
hasUpDown := HasKeys(data, "up", "down")
hasUpUpDownDown := HasKeys(data, "upup", "downdown")
result.BaseType = "TE"
switch {
case hasUpDown && hasUpUpDownDown:
value, err := calculateAverage(data, "up", "down")
if err != nil {
return result, err
}
result.BaseValue = value
result.Changes, err = calculateChanges(data, value, false, 4)
if err != nil {
return result, err
}
result.Message = "上下限基准值(忽略预警上上下下限)"
return result, nil
case hasUpDown:
value, err := calculateAverage(data, "up", "down")
if err != nil {
return result, err
}
result.BaseValue = value
result.Changes, err = calculateChanges(data, value, false, 2)
if err != nil {
return result, err
}
result.Message = "上下限基准值"
return result, nil
case hasUpUpDownDown:
value, err := calculateAverage(data, "upup", "downdown")
if err != nil {
return result, err
}
result.BaseValue = value
result.Changes, err = calculateChanges(data, value, true, 2)
if err != nil {
return result, err
}
result.Message = "上上下下限基准值"
return result, nil
default:
return result, fmt.Errorf("不支持的数据结构: %v", data)
}
}
func calculateAverage(data map[string]any, key1, key2 string) (float64, error) {
val1, err := getFloatValue(data, key1)
if err != nil {
return 0, err
}
val2, err := getFloatValue(data, key2)
if err != nil {
return 0, err
}
return (val1 + val2) / 2.0, nil
}
func calculateChanges(data map[string]any, baseValue float64, maxLimt bool, limitNum int) ([]float64, error) {
results := make([]float64, 0, limitNum)
switch limitNum {
case 2:
var key1, key2 string
if maxLimt {
key1 = "upup"
key2 = "downdown"
} else {
key1 = "up"
key2 = "down"
}
val1, err := getFloatValue(data, key1)
if err != nil {
return nil, err
}
results = append(results, val1-baseValue)
val2, err := getFloatValue(data, key2)
if err != nil {
return nil, err
}
results = append(results, val2-baseValue)
case 4:
key1 := "up"
key2 := "down"
key3 := "upup"
key4 := "downdown"
val1, err := getFloatValue(data, key1)
if err != nil {
return nil, err
}
results = append(results, val1-baseValue)
val2, err := getFloatValue(data, key2)
if err != nil {
return nil, err
}
results = append(results, val2-baseValue)
val3, err := getFloatValue(data, key3)
if err != nil {
return nil, err
}
results = append(results, val3-baseValue)
val4, err := getFloatValue(data, key4)
if err != nil {
return nil, err
}
results = append(results, val4-baseValue)
}
return results, nil
}
func getFloatValue(data map[string]any, key string) (float64, error) {
value, exists := data[key]
if !exists {
return 0, fmt.Errorf("缺少必需的键:%s", key)
}
switch v := value.(type) {
case float64:
return v, nil
case int:
return float64(v), nil
case float32:
return float64(v), nil
default:
return 0, fmt.Errorf("键 %s 的值类型错误,期望数字类型,得到 %T", key, value)
}
}
func HasKeys(data map[string]any, keys ...string) bool {
for _, key := range keys {
if _, exists := data[key]; !exists {
return false
}
}
return true
}
func calculateEdgeValue(edge any) (float64, error) {
edgeStr, ok := edge.(string)
if !ok {
return 0, fmt.Errorf("edge 字段类型错误,期望 string,得到 %T", edge)
}
switch edgeStr {
case "raising":
return 1.0, nil
case "falling":
return 0.0, nil
default:
return 0, fmt.Errorf("不支持的 edge 值: %s", edgeStr)
}
}

View File

@ -1,27 +0,0 @@
// Package util provide some utility fun
package util
import (
"context"
"time"
"github.com/redis/go-redis/v9"
)
// InitRedisClient define func to initialize and return a redis client
func InitRedisClient(redisAddr string) *redis.Client {
rdb := redis.NewClient(&redis.Options{
Addr: redisAddr,
Password: "",
DB: 0,
})
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, err := rdb.Ping(ctx).Result()
if err != nil {
return nil
}
return rdb
}

View File

@ -1,36 +0,0 @@
// Package diagram provide diagram data structure and operation
package diagram
import (
"context"
"github.com/redis/go-redis/v9"
)
// RedisClient define struct to accessing redis data that does not require the use of distributed locks
type RedisClient struct {
Client *redis.Client
}
// NewRedisClient define func of new redis client instance
func NewRedisClient() *RedisClient {
return &RedisClient{
Client: GetRedisClientInstance(),
}
}
// QueryByZRangeByLex define func to query real time data from redis zset
func (rc *RedisClient) QueryByZRangeByLex(ctx context.Context, key string, size int64) ([]redis.Z, error) {
client := rc.Client
args := redis.ZRangeArgs{
Key: key,
Start: 0,
Stop: size,
ByScore: false,
ByLex: false,
Rev: false,
Offset: 0,
Count: 0,
}
return client.ZRangeArgsWithScores(ctx, args).Result()
}

View File

@ -14,7 +14,6 @@ import (
// RedisSet defines the encapsulation struct of redis hash type // RedisSet defines the encapsulation struct of redis hash type
type RedisSet struct { type RedisSet struct {
ctx context.Context ctx context.Context
key string
rwLocker *locker.RedissionRWLocker rwLocker *locker.RedissionRWLocker
storageClient *redis.Client storageClient *redis.Client
logger *zap.Logger logger *zap.Logger
@ -25,7 +24,6 @@ func NewRedisSet(ctx context.Context, setKey string, lockLeaseTime uint64, needR
token := ctx.Value("client_token").(string) token := ctx.Value("client_token").(string)
return &RedisSet{ return &RedisSet{
ctx: ctx, ctx: ctx,
key: setKey,
rwLocker: locker.InitRWLocker(setKey, token, lockLeaseTime, needRefresh), rwLocker: locker.InitRWLocker(setKey, token, lockLeaseTime, needRefresh),
storageClient: GetRedisClientInstance(), storageClient: GetRedisClientInstance(),
logger: logger.GetLoggerInstance(), logger: logger.GetLoggerInstance(),
@ -33,34 +31,34 @@ func NewRedisSet(ctx context.Context, setKey string, lockLeaseTime uint64, needR
} }
// SADD define func of add redis set by members // SADD define func of add redis set by members
func (rs *RedisSet) SADD(members ...any) error { func (rs *RedisSet) SADD(setKey string, members ...interface{}) error {
err := rs.rwLocker.WLock(rs.ctx) err := rs.rwLocker.WLock(rs.ctx)
if err != nil { if err != nil {
logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", rs.key, "error", err) logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", setKey, "error", err)
return err return err
} }
defer rs.rwLocker.UnWLock(rs.ctx) defer rs.rwLocker.UnWLock(rs.ctx)
err = rs.storageClient.SAdd(rs.ctx, rs.key, members).Err() err = rs.storageClient.SAdd(rs.ctx, setKey, members).Err()
if err != nil { if err != nil {
logger.Error(rs.ctx, "add set by memebers failed", "set_key", rs.key, "members", members, "error", err) logger.Error(rs.ctx, "add set by memebers failed", "set_key", setKey, "members", members, "error", err)
return err return err
} }
return nil return nil
} }
// SREM define func of remove the specified members from redis set by key // SREM define func of remove the specified members from redis set by key
func (rs *RedisSet) SREM(members ...any) error { func (rs *RedisSet) SREM(setKey string, members ...interface{}) error {
err := rs.rwLocker.WLock(rs.ctx) err := rs.rwLocker.WLock(rs.ctx)
if err != nil { if err != nil {
logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", rs.key, "error", err) logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", setKey, "error", err)
return err return err
} }
defer rs.rwLocker.UnWLock(rs.ctx) defer rs.rwLocker.UnWLock(rs.ctx)
count, err := rs.storageClient.SRem(rs.ctx, rs.key, members).Result() count, err := rs.storageClient.SRem(rs.ctx, setKey, members).Result()
if err != nil || count != int64(len(members)) { if err != nil || count != int64(len(members)) {
logger.Error(rs.ctx, "rem members from set failed", "set_key", rs.key, "members", members, "error", err) logger.Error(rs.ctx, "rem members from set failed", "set_key", setKey, "members", members, "error", err)
return fmt.Errorf("rem members from set failed:%w", err) return fmt.Errorf("rem members from set failed:%w", err)
} }
@ -68,27 +66,27 @@ func (rs *RedisSet) SREM(members ...any) error {
} }
// SMembers define func of get all memebers from redis set by key // SMembers define func of get all memebers from redis set by key
func (rs *RedisSet) SMembers() ([]string, error) { func (rs *RedisSet) SMembers(setKey string) ([]string, error) {
err := rs.rwLocker.RLock(rs.ctx) err := rs.rwLocker.RLock(rs.ctx)
if err != nil { if err != nil {
logger.Error(rs.ctx, "lock rLock by setKey failed", "set_key", rs.key, "error", err) logger.Error(rs.ctx, "lock rLock by setKey failed", "set_key", setKey, "error", err)
return nil, err return nil, err
} }
defer rs.rwLocker.UnRLock(rs.ctx) defer rs.rwLocker.UnRLock(rs.ctx)
result, err := rs.storageClient.SMembers(rs.ctx, rs.key).Result() result, err := rs.storageClient.SMembers(rs.ctx, setKey).Result()
if err != nil { if err != nil {
logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", rs.key, "error", err) logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", setKey, "error", err)
return nil, err return nil, err
} }
return result, nil return result, nil
} }
// SIsMember define func of determine whether an member is in set by key // SIsMember define func of determine whether an member is in set by key
func (rs *RedisSet) SIsMember(member any) (bool, error) { func (rs *RedisSet) SIsMember(setKey string, member interface{}) (bool, error) {
result, err := rs.storageClient.SIsMember(rs.ctx, rs.key, member).Result() result, err := rs.storageClient.SIsMember(rs.ctx, setKey, member).Result()
if err != nil { if err != nil {
logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", rs.key, "error", err) logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", setKey, "error", err)
return false, err return false, err
} }
return result, nil return result, nil

View File

@ -20,8 +20,7 @@ type RedisZSet struct {
} }
// NewRedisZSet define func of new redis zset instance // NewRedisZSet define func of new redis zset instance
func NewRedisZSet(ctx context.Context, key string, lockLeaseTime uint64, needRefresh bool) *RedisZSet { func NewRedisZSet(ctx context.Context, key string, token string, lockLeaseTime uint64, needRefresh bool) *RedisZSet {
token := ctx.Value("client_token").(string)
return &RedisZSet{ return &RedisZSet{
ctx: ctx, ctx: ctx,
rwLocker: locker.InitRWLocker(key, token, lockLeaseTime, needRefresh), rwLocker: locker.InitRWLocker(key, token, lockLeaseTime, needRefresh),

View File

@ -23,70 +23,6 @@ const docTemplate = `{
"host": "{{.Host}}", "host": "{{.Host}}",
"basePath": "{{.BasePath}}", "basePath": "{{.BasePath}}",
"paths": { "paths": {
"/data/realtime": {
"get": {
"description": "根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"RealTime Component"
],
"summary": "获取实时测点数据",
"parameters": [
{
"type": "string",
"description": "测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)",
"name": "token",
"in": "query",
"required": true
},
{
"type": "integer",
"description": "查询起始时间 (Unix时间戳, e.g., 1761008266)",
"name": "begin",
"in": "query",
"required": true
},
{
"type": "integer",
"description": "查询结束时间 (Unix时间戳, e.g., 1761526675)",
"name": "end",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "返回实时数据成功",
"schema": {
"allOf": [
{
"$ref": "#/definitions/network.SuccessResponse"
},
{
"type": "object",
"properties": {
"payload": {
"$ref": "#/definitions/network.RealTimeDataPayload"
}
}
}
]
}
},
"400": {
"description": "返回实时数据失败",
"schema": {
"$ref": "#/definitions/network.FailureResponse"
}
}
}
}
},
"/measurement/recommend": { "/measurement/recommend": {
"get": { "get": {
"description": "根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。", "description": "根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。",
@ -228,15 +164,6 @@ const docTemplate = `{
} }
} }
}, },
"network.RealTimeDataPayload": {
"type": "object",
"properties": {
"sub_pos": {
"description": "TODO 增加example tag",
"type": "object"
}
}
},
"network.SuccessResponse": { "network.SuccessResponse": {
"type": "object", "type": "object",
"properties": { "properties": {

View File

@ -17,70 +17,6 @@
"host": "localhost:8080", "host": "localhost:8080",
"basePath": "/api/v1", "basePath": "/api/v1",
"paths": { "paths": {
"/data/realtime": {
"get": {
"description": "根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"RealTime Component"
],
"summary": "获取实时测点数据",
"parameters": [
{
"type": "string",
"description": "测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)",
"name": "token",
"in": "query",
"required": true
},
{
"type": "integer",
"description": "查询起始时间 (Unix时间戳, e.g., 1761008266)",
"name": "begin",
"in": "query",
"required": true
},
{
"type": "integer",
"description": "查询结束时间 (Unix时间戳, e.g., 1761526675)",
"name": "end",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "返回实时数据成功",
"schema": {
"allOf": [
{
"$ref": "#/definitions/network.SuccessResponse"
},
{
"type": "object",
"properties": {
"payload": {
"$ref": "#/definitions/network.RealTimeDataPayload"
}
}
}
]
}
},
"400": {
"description": "返回实时数据失败",
"schema": {
"$ref": "#/definitions/network.FailureResponse"
}
}
}
}
},
"/measurement/recommend": { "/measurement/recommend": {
"get": { "get": {
"description": "根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。", "description": "根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。",
@ -222,15 +158,6 @@
} }
} }
}, },
"network.RealTimeDataPayload": {
"type": "object",
"properties": {
"sub_pos": {
"description": "TODO 增加example tag",
"type": "object"
}
}
},
"network.SuccessResponse": { "network.SuccessResponse": {
"type": "object", "type": "object",
"properties": { "properties": {

View File

@ -34,12 +34,6 @@ definitions:
example: trans example: trans
type: string type: string
type: object type: object
network.RealTimeDataPayload:
properties:
sub_pos:
description: TODO 增加example tag
type: object
type: object
network.SuccessResponse: network.SuccessResponse:
properties: properties:
code: code:
@ -64,46 +58,6 @@ info:
title: ModelRT 实时模型服务 API 文档 title: ModelRT 实时模型服务 API 文档
version: "1.0" version: "1.0"
paths: paths:
/data/realtime:
get:
consumes:
- application/json
description: 根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据
parameters:
- description: 测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)
in: query
name: token
required: true
type: string
- description: 查询起始时间 (Unix时间戳, e.g., 1761008266)
in: query
name: begin
required: true
type: integer
- description: 查询结束时间 (Unix时间戳, e.g., 1761526675)
in: query
name: end
required: true
type: integer
produces:
- application/json
responses:
"200":
description: 返回实时数据成功
schema:
allOf:
- $ref: '#/definitions/network.SuccessResponse'
- properties:
payload:
$ref: '#/definitions/network.RealTimeDataPayload'
type: object
"400":
description: 返回实时数据失败
schema:
$ref: '#/definitions/network.FailureResponse'
summary: 获取实时测点数据
tags:
- RealTime Component
/measurement/recommend: /measurement/recommend:
get: get:
consumes: consumes:

3
go.mod
View File

@ -1,11 +1,12 @@
module modelRT module modelRT
go 1.24 go 1.24.1
require ( require (
github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/DATA-DOG/go-sqlmock v1.5.2
github.com/RediSearch/redisearch-go/v2 v2.1.1 github.com/RediSearch/redisearch-go/v2 v2.1.1
github.com/bitly/go-simplejson v0.5.1 github.com/bitly/go-simplejson v0.5.1
github.com/confluentinc/confluent-kafka-go v1.9.2
github.com/gin-gonic/gin v1.10.0 github.com/gin-gonic/gin v1.10.0
github.com/gofrs/uuid v4.4.0+incompatible github.com/gofrs/uuid v4.4.0+incompatible
github.com/gomodule/redigo v1.8.9 github.com/gomodule/redigo v1.8.9

187
go.sum
View File

@ -1,3 +1,6 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
@ -6,6 +9,10 @@ github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/RediSearch/redisearch-go/v2 v2.1.1 h1:cCn3i40uLsVD8cxwrdrGfhdAgbR5Cld9q11eYyVOwpM= github.com/RediSearch/redisearch-go/v2 v2.1.1 h1:cCn3i40uLsVD8cxwrdrGfhdAgbR5Cld9q11eYyVOwpM=
github.com/RediSearch/redisearch-go/v2 v2.1.1/go.mod h1:Uw93Wi97QqAsw1DwbQrhVd88dBorGTfSuCS42zfh1iA= github.com/RediSearch/redisearch-go/v2 v2.1.1/go.mod h1:Uw93Wi97QqAsw1DwbQrhVd88dBorGTfSuCS42zfh1iA=
github.com/actgardner/gogen-avro/v10 v10.1.0/go.mod h1:o+ybmVjEa27AAr35FRqU98DJu1fXES56uXniYFv4yDA=
github.com/actgardner/gogen-avro/v10 v10.2.1/go.mod h1:QUhjeHPchheYmMDni/Nx7VB0RsT/ee8YIgGY/xpEQgQ=
github.com/actgardner/gogen-avro/v9 v9.1.0/go.mod h1:nyTj6wPqDJoxM3qdnjcLv+EnMDSDFqE0qDpva2QRmKc=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/bitly/go-simplejson v0.5.1 h1:xgwPbetQScXt1gh9BmoJ6j9JMr3TElvuIyjR8pgdoow= github.com/bitly/go-simplejson v0.5.1 h1:xgwPbetQScXt1gh9BmoJ6j9JMr3TElvuIyjR8pgdoow=
github.com/bitly/go-simplejson v0.5.1/go.mod h1:YOPVLzCfwK14b4Sff3oP1AmGhI9T9Vsg84etUnlyp+Q= github.com/bitly/go-simplejson v0.5.1/go.mod h1:YOPVLzCfwK14b4Sff3oP1AmGhI9T9Vsg84etUnlyp+Q=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
@ -17,24 +24,51 @@ github.com/bytedance/sonic v1.12.5/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKz
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/bytedance/sonic/loader v0.2.1 h1:1GgorWTqf12TA8mma4DDSbaQigE2wOgQo7iCjjJv3+E= github.com/bytedance/sonic/loader v0.2.1 h1:1GgorWTqf12TA8mma4DDSbaQigE2wOgQo7iCjjJv3+E=
github.com/bytedance/sonic/loader v0.2.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.2.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/confluentinc/confluent-kafka-go v1.9.2 h1:gV/GxhMBUb03tFWkN+7kdhg+zf+QUM+wVkI9zwh770Q=
github.com/confluentinc/confluent-kafka-go v1.9.2/go.mod h1:ptXNqsuDfYbAE/LBW6pnwWZElUoWxHoV8E43DCrliyo=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20=
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y=
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA= github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA=
github.com/gabriel-vasile/mimetype v1.4.7/go.mod h1:GDlAgAyIRT27BhFl53XNAFtfjzOkLaF35JdEG0P7LtU= github.com/gabriel-vasile/mimetype v1.4.7/go.mod h1:GDlAgAyIRT27BhFl53XNAFtfjzOkLaF35JdEG0P7LtU=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4= github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=
github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk= github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
@ -63,15 +97,50 @@ github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws= github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws=
github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hamba/avro v1.5.6/go.mod h1:3vNT0RLXXpFm2Tb/5KC71ZRJlOroggq1Rcitb6k4Fr8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/heetch/avro v0.3.1/go.mod h1:4xn38Oz/+hiEUTpbVfGVLfvOg0yKLlRP7Q9+gJJILgA=
github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
@ -80,25 +149,42 @@ github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ=
github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E=
github.com/jhump/protoreflect v1.12.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/juju/qthttptest v0.1.1/go.mod h1:aTlAv8TYaflIiTDIQYzxnl1QdPjAg8Q8qJMErpKy6A4=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM=
github.com/linkedin/goavro/v2 v2.10.0/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
github.com/linkedin/goavro/v2 v2.10.1/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
github.com/linkedin/goavro/v2 v2.11.1/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
@ -110,25 +196,35 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM=
github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk=
github.com/nrwiersma/avro-benchmarks v0.0.0-20210913175520-21aec48c8f76/go.mod h1:iKyFMidsk/sVYONJRE372sJuX/QTRPacU7imPqqsu7g=
github.com/panjf2000/ants/v2 v2.10.0 h1:zhRg1pQUtkyRiOFo2Sbqwjp0GfBNo9cUY2/Grpx1p+8= github.com/panjf2000/ants/v2 v2.10.0 h1:zhRg1pQUtkyRiOFo2Sbqwjp0GfBNo9cUY2/Grpx1p+8=
github.com/panjf2000/ants/v2 v2.10.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I= github.com/panjf2000/ants/v2 v2.10.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/santhosh-tekuri/jsonschema/v5 v5.0.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
@ -143,6 +239,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
@ -162,7 +260,9 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
@ -172,28 +272,62 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/arch v0.12.0 h1:UsYJhbzPYGsT0HbEdmYcqtCv8UNGvnaL561NnIUvaKg= golang.org/x/arch v0.12.0 h1:UsYJhbzPYGsT0HbEdmYcqtCv8UNGvnaL561NnIUvaKg=
golang.org/x/arch v0.12.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/arch v0.12.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -204,29 +338,80 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20220503193339-ba3ae3f07e29/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/avro.v0 v0.0.0-20171217001914-a730b5802183/go.mod h1:FvqrFXt+jCsyQibeRv4xxEJBL5iG2DDW5aeJwzDiq4A=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/httprequest.v1 v1.2.1/go.mod h1:x2Otw96yda5+8+6ZeWwHIJTFkEHWP/qP8pJOzqEtWPM=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo= gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo=
@ -236,4 +421,6 @@ gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkw
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8= gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=

View File

@ -35,7 +35,7 @@ func QueryAlertEventHandler(c *gin.Context) {
resp := network.SuccessResponse{ resp := network.SuccessResponse{
Code: 0, Code: 0,
Msg: "success", Msg: "success",
Payload: map[string]any{ PayLoad: map[string]interface{}{
"events": events, "events": events,
}, },
} }

View File

@ -68,7 +68,7 @@ func ComponentAnchorReplaceHandler(c *gin.Context) {
resp := network.SuccessResponse{ resp := network.SuccessResponse{
Code: http.StatusOK, Code: http.StatusOK,
Msg: "success", Msg: "success",
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"uuid": request.UUID, "uuid": request.UUID,
}, },
} }

View File

@ -41,7 +41,7 @@ func AttrDeleteHandler(c *gin.Context) {
c.JSON(http.StatusOK, network.FailureResponse{ c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{"attr_token": request.AttrToken}, PayLoad: map[string]interface{}{"attr_token": request.AttrToken},
}) })
return return
} }
@ -49,7 +49,7 @@ func AttrDeleteHandler(c *gin.Context) {
c.JSON(http.StatusOK, network.SuccessResponse{ c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK, Code: http.StatusOK,
Msg: "success", Msg: "success",
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"attr_token": request.AttrToken, "attr_token": request.AttrToken,
}, },
}) })

View File

@ -46,7 +46,7 @@ func AttrGetHandler(c *gin.Context) {
c.JSON(http.StatusOK, network.FailureResponse{ c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{"attr_token": request.AttrToken}, PayLoad: map[string]interface{}{"attr_token": request.AttrToken},
}) })
return return
} }
@ -59,7 +59,7 @@ func AttrGetHandler(c *gin.Context) {
c.JSON(http.StatusOK, network.SuccessResponse{ c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK, Code: http.StatusOK,
Msg: "success", Msg: "success",
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"attr_token": request.AttrToken, "attr_token": request.AttrToken,
"attr_value": attrValue, "attr_value": attrValue,
}, },

View File

@ -43,7 +43,7 @@ func AttrSetHandler(c *gin.Context) {
c.JSON(http.StatusOK, network.FailureResponse{ c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{"attr_token": request.AttrToken}, PayLoad: map[string]interface{}{"attr_token": request.AttrToken},
}) })
return return
} }
@ -51,7 +51,7 @@ func AttrSetHandler(c *gin.Context) {
c.JSON(http.StatusOK, network.SuccessResponse{ c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK, Code: http.StatusOK,
Msg: "success", Msg: "success",
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"attr_token": request.AttrToken, "attr_token": request.AttrToken,
}, },
}) })

View File

@ -37,7 +37,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"page_id": request.PageID, "page_id": request.PageID,
}, },
} }
@ -65,7 +65,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"topologic_info": topologicLink, "topologic_info": topologicLink,
}, },
} }
@ -89,7 +89,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"topologic_infos": topologicCreateInfos, "topologic_infos": topologicCreateInfos,
}, },
} }
@ -111,7 +111,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"component_infos": request.ComponentInfos, "component_infos": request.ComponentInfos,
}, },
} }
@ -130,7 +130,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"uuid": info.UUID, "uuid": info.UUID,
"component_params": info.Params, "component_params": info.Params,
}, },
@ -152,7 +152,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
resp := network.SuccessResponse{ resp := network.SuccessResponse{
Code: http.StatusOK, Code: http.StatusOK,
Msg: "success", Msg: "success",
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"page_id": request.PageID, "page_id": request.PageID,
}, },
} }

View File

@ -42,7 +42,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"page_id": request.PageID, "page_id": request.PageID,
}, },
} }
@ -70,7 +70,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"topologic_info": topologicLink, "topologic_info": topologicLink,
}, },
} }
@ -95,7 +95,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"topologic_info": topologicDelInfo, "topologic_info": topologicDelInfo,
}, },
} }
@ -112,7 +112,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"topologic_info": topologicDelInfo, "topologic_info": topologicDelInfo,
}, },
} }
@ -138,7 +138,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"uuid": componentInfo.UUID, "uuid": componentInfo.UUID,
}, },
} }
@ -162,7 +162,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"uuid": componentInfo.UUID, "uuid": componentInfo.UUID,
}, },
} }
@ -184,7 +184,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"uuid": componentInfo.UUID, "uuid": componentInfo.UUID,
}, },
} }
@ -205,7 +205,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
resp := network.SuccessResponse{ resp := network.SuccessResponse{
Code: http.StatusOK, Code: http.StatusOK,
Msg: "success", Msg: "success",
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"page_id": request.PageID, "page_id": request.PageID,
}, },
} }

View File

@ -33,7 +33,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"page_id": pageID, "page_id": pageID,
}, },
} }
@ -48,16 +48,16 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"page_id": pageID, "page_id": pageID,
}, },
} }
c.JSON(http.StatusOK, resp) c.JSON(http.StatusOK, resp)
return return
} }
payload := make(map[string]interface{}) payLoad := make(map[string]interface{})
payload["root_vertex"] = topologicInfo.RootVertex payLoad["root_vertex"] = topologicInfo.RootVertex
payload["topologic"] = topologicInfo.VerticeLinks payLoad["topologic"] = topologicInfo.VerticeLinks
componentParamMap := make(map[string]any) componentParamMap := make(map[string]any)
for _, VerticeLink := range topologicInfo.VerticeLinks { for _, VerticeLink := range topologicInfo.VerticeLinks {
@ -69,7 +69,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"uuid": componentUUID, "uuid": componentUUID,
}, },
} }
@ -84,7 +84,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"uuid": componentUUID, "uuid": componentUUID,
}, },
} }
@ -103,7 +103,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"uuid": topologicInfo.RootVertex, "uuid": topologicInfo.RootVertex,
}, },
} }
@ -118,7 +118,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"uuid": rootVertexUUID, "uuid": rootVertexUUID,
}, },
} }
@ -127,12 +127,12 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
} }
componentParamMap[rootVertexUUID] = rootComponentParam componentParamMap[rootVertexUUID] = rootComponentParam
payload["component_params"] = componentParamMap payLoad["component_params"] = componentParamMap
resp := network.SuccessResponse{ resp := network.SuccessResponse{
Code: http.StatusOK, Code: http.StatusOK,
Msg: "success", Msg: "success",
Payload: payload, PayLoad: payLoad,
} }
c.JSON(http.StatusOK, resp) c.JSON(http.StatusOK, resp)
} }

View File

@ -35,7 +35,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"page_id": request.PageID, "page_id": request.PageID,
}, },
} }
@ -52,7 +52,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"topologic_info": topologicLink, "topologic_info": topologicLink,
}, },
} }
@ -75,7 +75,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"topologic_info": topologicChangeInfo, "topologic_info": topologicChangeInfo,
}, },
} }
@ -92,7 +92,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"topologic_info": topologicChangeInfo, "topologic_info": topologicChangeInfo,
}, },
} }
@ -109,7 +109,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"page_id": request.PageID, "page_id": request.PageID,
"component_info": request.ComponentInfos, "component_info": request.ComponentInfos,
}, },
@ -129,7 +129,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
resp := network.FailureResponse{ resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"uuid": info.UUID, "uuid": info.UUID,
"component_params": info.Params, "component_params": info.Params,
}, },
@ -152,7 +152,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
resp := network.SuccessResponse{ resp := network.SuccessResponse{
Code: http.StatusOK, Code: http.StatusOK,
Msg: "success", Msg: "success",
Payload: map[string]interface{}{ PayLoad: map[string]interface{}{
"page_id": request.PageID, "page_id": request.PageID,
}, },
} }

View File

@ -1,188 +0,0 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"context"
"errors"
"fmt"
"net/http"
"modelRT/constants"
"modelRT/database"
"modelRT/diagram"
"modelRT/logger"
"modelRT/network"
"modelRT/orm"
"github.com/gin-gonic/gin"
)
type linkSetConfig struct {
CurrKey string
PrevKeyTemplate string
PrevIsNil bool
}
var linkSetConfigs = map[int]linkSetConfig{
// grid hierarchy
0: {CurrKey: constants.RedisAllGridSetKey, PrevIsNil: true},
// zone hierarchy
1: {CurrKey: constants.RedisAllZoneSetKey, PrevKeyTemplate: constants.RedisSpecGridZoneSetKey},
// station hierarchy
2: {CurrKey: constants.RedisAllStationSetKey, PrevKeyTemplate: constants.RedisSpecZoneStationSetKey},
// component nspath hierarchy
3: {CurrKey: constants.RedisAllCompNSPathSetKey, PrevKeyTemplate: constants.RedisSpecStationCompNSPATHSetKey},
// component tag hierarchy
4: {CurrKey: constants.RedisAllCompTagSetKey, PrevKeyTemplate: constants.RedisSpecStationCompTagSetKey},
// config hierarchy
5: {CurrKey: constants.RedisAllConfigSetKey, PrevIsNil: true},
}
// DiagramNodeLinkHandler defines the diagram node link process api
func DiagramNodeLinkHandler(c *gin.Context) {
var request network.DiagramNodeLinkRequest
clientToken := c.GetString("client_token")
if clientToken == "" {
err := constants.ErrGetClientToken
logger.Error(c, "failed to get client token from context", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error(c, "failed to unmarshal diagram node process request", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: "invalid request body format: " + err.Error(),
})
return
}
var err error
pgClient := database.GetPostgresDBClient()
nodeID := request.NodeID
nodeLevel := request.NodeLevel
action := request.Action
prevNodeInfo, currNodeInfo, err := database.QueryNodeInfoByID(c, pgClient, nodeID, nodeLevel)
if err != nil {
logger.Error(c, "failed to query diagram node info by nodeID and level from postgres", "node_id", nodeID, "level", nodeLevel, "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: "failed to query measurement info record: " + err.Error(),
Payload: map[string]any{
"node_id": nodeID,
"node_level": nodeLevel,
"action": action,
},
})
return
}
prevLinkSet, currLinkSet := generateLinkSet(c, nodeLevel, prevNodeInfo)
err = processLinkSetData(c, action, nodeLevel, prevLinkSet, currLinkSet, prevNodeInfo, currNodeInfo)
if err != nil {
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
Payload: map[string]any{
"node_id": nodeID,
"node_level": nodeLevel,
"action": action,
},
})
return
}
logger.Info(c, "process diagram node link success", "node_id", nodeID, "level", nodeLevel, "action", request.Action)
c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK,
Msg: "diagram node link process success",
Payload: map[string]any{
"node_id": nodeID,
"node_level": nodeLevel,
"action": action,
},
})
}
func generateLinkSet(ctx context.Context, level int, prevNodeInfo orm.CircuitDiagramNodeInterface) (*diagram.RedisSet, *diagram.RedisSet) {
config, ok := linkSetConfigs[level]
// level not supported
if !ok {
return nil, nil
}
currLinkSet := diagram.NewRedisSet(ctx, config.CurrKey, 0, false)
if config.PrevIsNil {
return nil, currLinkSet
}
prevLinkSetKey := fmt.Sprintf(config.PrevKeyTemplate, prevNodeInfo.GetTagName())
prevLinkSet := diagram.NewRedisSet(ctx, prevLinkSetKey, 0, false)
return prevLinkSet, currLinkSet
}
func processLinkSetData(ctx context.Context, action string, level int, prevLinkSet, currLinkSet *diagram.RedisSet, prevNodeInfo, currNodeInfo orm.CircuitDiagramNodeInterface) error {
var currMember string
var prevMember string
var err1, err2 error
switch level {
case 0, 1, 2, 4:
// grid、zone、station、component tag hierarchy
currMember = currNodeInfo.GetTagName()
if prevLinkSet != nil {
prevMember = prevNodeInfo.GetTagName()
}
case 3:
// component NSPath hierarchy
currMember = currNodeInfo.GetNSPath()
prevMember = prevNodeInfo.GetTagName()
case 5:
// TODO[NONEED-ISSUE]暂无此层级增加或删除需求 #2
err := fmt.Errorf("currently hierarchy no need to add or delete this level: %d", level)
logger.Error(ctx, "no need level for link process", "level", level, "action", action, "error", err)
return nil
default:
err := fmt.Errorf("unsupported diagram node level: %d", level)
logger.Error(ctx, "unsupport diagram node level for link process", "level", level, "action", action, "error", err)
return err
}
switch action {
case constants.SearchLinkAddAction:
err1 = currLinkSet.SADD(currMember)
if prevLinkSet != nil {
err2 = prevLinkSet.SADD(prevMember)
}
case constants.SearchLinkDelAction:
err1 = currLinkSet.SREM(currMember)
if prevLinkSet != nil {
err2 = prevLinkSet.SREM(prevMember)
}
default:
err := constants.ErrUnsupportedLinkAction
logger.Error(ctx, "unsupport diagram node link process action", "action", action, "error", err)
return err
}
return processDiagramLinkError(err1, err2, action)
}
func processDiagramLinkError(err1, err2 error, action string) error {
var err error
if err1 != nil && err2 != nil {
err = errors.Join(err1, err2)
err = fmt.Errorf("process diagram node link failed, currLinkSet %s operation and prevLinkSet %s operation failed: %w", action, action, err)
} else if err1 != nil {
err = fmt.Errorf("process diagram node currLinkSet link failed: currLinkSet %s operation failed: %w", action, err1)
} else {
err = fmt.Errorf("process diagram node prevLinkSet link failed: prevLinkSet %s operation: %w", action, err2)
}
return err
}

View File

@ -1,58 +0,0 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"fmt"
"net/http"
"strconv"
"modelRT/alert"
"modelRT/constants"
"modelRT/logger"
"modelRT/network"
"github.com/gin-gonic/gin"
)
// QueryHistoryDataHandler define query history data process API
func QueryHistoryDataHandler(c *gin.Context) {
token := c.Query("token")
beginStr := c.Query("begin")
begin, err := strconv.Atoi(beginStr)
if err != nil {
logger.Error(c, "convert begin param from string to int failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
}
c.JSON(http.StatusOK, resp)
}
endStr := c.Query("end")
end, err := strconv.Atoi(endStr)
if err != nil {
logger.Error(c, "convert end param from string to int failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
}
c.JSON(http.StatusOK, resp)
}
fmt.Println(token, begin, end)
// TODO parse token to dataRT query params
var level int
var targetLevel constants.AlertLevel
alertManger := alert.GetAlertMangerInstance()
targetLevel = constants.AlertLevel(level)
events := alertManger.GetRangeEventsByLevel(targetLevel)
resp := network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
Payload: map[string]interface{}{
"events": events,
},
}
c.JSON(http.StatusOK, resp)
}

View File

@ -38,14 +38,14 @@ func MeasurementGetHandler(c *gin.Context) {
return return
} }
zset := diagram.NewRedisZSet(c, request.MeasurementToken, 0, false) zset := diagram.NewRedisZSet(c, request.MeasurementToken, clientToken, 0, false)
points, err := zset.ZRANGE(request.MeasurementToken, 0, -1) points, err := zset.ZRANGE(request.MeasurementToken, 0, -1)
if err != nil { if err != nil {
logger.Error(c, "failed to get measurement data from redis", "measurement_token", request.MeasurementToken, "error", err) logger.Error(c, "failed to get measurement data from redis", "measurement_token", request.MeasurementToken, "error", err)
c.JSON(http.StatusOK, network.FailureResponse{ c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusInternalServerError, Code: http.StatusInternalServerError,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]any{ PayLoad: map[string]interface{}{
"measurement_id": request.MeasurementID, "measurement_id": request.MeasurementID,
"measurement_token": request.MeasurementToken, "measurement_token": request.MeasurementToken,
}, },
@ -60,7 +60,7 @@ func MeasurementGetHandler(c *gin.Context) {
c.JSON(http.StatusOK, network.FailureResponse{ c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]any{ PayLoad: map[string]interface{}{
"measurement_id": request.MeasurementID, "measurement_id": request.MeasurementID,
"measurement_token": request.MeasurementToken, "measurement_token": request.MeasurementToken,
"measurement_value": points, "measurement_value": points,
@ -72,7 +72,7 @@ func MeasurementGetHandler(c *gin.Context) {
c.JSON(http.StatusOK, network.SuccessResponse{ c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK, Code: http.StatusOK,
Msg: "success", Msg: "success",
Payload: map[string]any{ PayLoad: map[string]interface{}{
"measurement_id": request.MeasurementID, "measurement_id": request.MeasurementID,
"measurement_token": request.MeasurementToken, "measurement_token": request.MeasurementToken,
"measurement_info": measurementInfo, "measurement_info": measurementInfo,

View File

@ -7,7 +7,6 @@ import (
"modelRT/logger" "modelRT/logger"
"modelRT/model" "modelRT/model"
"modelRT/network" "modelRT/network"
"modelRT/util"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
@ -18,36 +17,34 @@ import (
// @Tags Measurement Recommend // @Tags Measurement Recommend
// @Accept json // @Accept json
// @Produce json // @Produce json
// @Param input query string true "推荐关键词,例如 'grid1' 或 'grid1.'" Example("grid1") // @Param request body network.MeasurementRecommendRequest true "查询输入参数,例如 'trans' 或 'transformfeeder1_220.'"
// @Success 200 {object} network.SuccessResponse{payload=network.MeasurementRecommendPayload} "返回推荐列表成功" // @Success 200 {object} network.SuccessResponse{payload=network.MeasurementRecommendPayload} "返回推荐列表成功"
// //
// @Example 200 { // @Example 200 {
// "code": 200, // "code": 200,
// "msg": "success", // "msg": "success",
// "payload": { // "payload": {
// "input": "grid1.zone1.station1.ns1.tag1.bay.", // "input": "transformfeeder1_220.",
// "offset": 21, // "offset": 21,
// "recommended_list": [ // "recommended_list": [
// "I11_A_rms", // "I_A_rms",
// "I11_B_rms.", // "I_B_rms",
// "I11_C_rms.", // "I_C_rms",
// ] // ]
// } // }
// } // }
// //
// @Failure 400 {object} network.FailureResponse "返回推荐列表失败" // @Failure 400 {object} network.FailureResponse "返回推荐列表失败"
//
// @Example 400 { // @Example 400 {
// "code": 400, // "code": 400,
// "msg": "failed to get recommend data from redis", // "msg": "failed to get recommend data from redis",
// } // }
//
// @Router /measurement/recommend [get] // @Router /measurement/recommend [get]
func MeasurementRecommendHandler(c *gin.Context) { func MeasurementRecommendHandler(c *gin.Context) {
var request network.MeasurementRecommendRequest var request network.MeasurementRecommendRequest
if err := c.ShouldBindQuery(&request); err != nil { if err := c.ShouldBindJSON(&request); err != nil {
logger.Error(c, "failed to bind measurement recommend request", "error", err) logger.Error(c, "failed to unmarshal measurement recommend request", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{ c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
@ -61,7 +58,7 @@ func MeasurementRecommendHandler(c *gin.Context) {
c.JSON(http.StatusOK, network.FailureResponse{ c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusInternalServerError, Code: http.StatusInternalServerError,
Msg: err.Error(), Msg: err.Error(),
Payload: map[string]any{ PayLoad: map[string]any{
"input": request.Input, "input": request.Input,
}, },
}) })
@ -72,7 +69,7 @@ func MeasurementRecommendHandler(c *gin.Context) {
if isFuzzy { if isFuzzy {
var maxOffset int var maxOffset int
for index, recommend := range recommends { for index, recommend := range recommends {
offset := util.GetLongestCommonPrefixLength(request.Input, recommend) offset := model.GetLongestCommonPrefixLength(request.Input, recommend)
if index == 0 || offset > maxOffset { if index == 0 || offset > maxOffset {
maxOffset = offset maxOffset = offset
} }
@ -81,7 +78,7 @@ func MeasurementRecommendHandler(c *gin.Context) {
} else { } else {
var minOffset int var minOffset int
for index, recommend := range recommends { for index, recommend := range recommends {
offset := util.GetLongestCommonPrefixLength(request.Input, recommend) offset := model.GetLongestCommonPrefixLength(request.Input, recommend)
if index == 0 || offset < minOffset { if index == 0 || offset < minOffset {
minOffset = offset minOffset = offset
} }
@ -105,7 +102,12 @@ func MeasurementRecommendHandler(c *gin.Context) {
c.JSON(http.StatusOK, network.SuccessResponse{ c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK, Code: http.StatusOK,
Msg: "success", Msg: "success",
Payload: &network.MeasurementRecommendPayload{ // PayLoad: map[string]any{
// "input": request.Input,
// "offset": finalOffset,
// "recommended_list": resultRecommends,
// },
PayLoad: &network.MeasurementRecommendPayload{
Input: request.Input, Input: request.Input,
Offset: finalOffset, Offset: finalOffset,
RecommendedList: resultRecommends, RecommendedList: resultRecommends,

View File

@ -1,135 +0,0 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"errors"
"fmt"
"net/http"
"modelRT/constants"
"modelRT/database"
"modelRT/diagram"
"modelRT/logger"
"modelRT/network"
"github.com/gin-gonic/gin"
)
// MeasurementLinkHandler defines the measurement link process api
func MeasurementLinkHandler(c *gin.Context) {
var request network.MeasurementLinkRequest
clientToken := c.GetString("client_token")
if clientToken == "" {
err := constants.ErrGetClientToken
logger.Error(c, "failed to get client token from context", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error(c, "failed to unmarshal measurement process request", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: "invalid request body format: " + err.Error(),
})
return
}
var err error
pgClient := database.GetPostgresDBClient()
measurementID := request.MeasurementID
action := request.Action
measurementInfo, err := database.QueryMeasurementByID(c, pgClient, measurementID)
if err != nil {
logger.Error(c, "failed to query measurement info by measurement id from postgres", "meauserement_id", measurementID, "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: "failed to query measurement info record: " + err.Error(),
Payload: map[string]any{
"id": measurementID,
"action": action,
},
})
return
}
componentInfo, err := database.QueryComponentByUUID(c, pgClient, measurementInfo.ComponentUUID)
if err != nil {
logger.Error(c, "failed to query component info by component uuid from postgres", "component_uuid", measurementInfo.ComponentUUID, "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: "failed to query component info record: " + err.Error(),
Payload: map[string]any{
"id": measurementID,
"action": action,
},
})
return
}
allMeasSet := diagram.NewRedisSet(c, constants.RedisAllMeasTagSetKey, 0, false)
compMeasLinkKey := fmt.Sprintf(constants.RedisSpecCompTagMeasSetKey, componentInfo.Tag)
compMeasLinkSet := diagram.NewRedisSet(c, compMeasLinkKey, 0, false)
switch action {
case constants.SearchLinkAddAction:
err1 := allMeasSet.SADD(measurementInfo.Tag)
err2 := compMeasLinkSet.SADD(measurementInfo.Tag)
err = processActionError(err1, err2, action)
if err != nil {
logger.Error(c, "add measurement link process operation failed", "measurement_id", measurementID, "action", action, "error", err)
}
case constants.SearchLinkDelAction:
err1 := allMeasSet.SREM(measurementInfo.Tag)
err2 := compMeasLinkSet.SREM(measurementInfo.Tag)
err = processActionError(err1, err2, action)
if err != nil {
logger.Error(c, "del measurement link process operation failed", "measurement_id", measurementID, "action", action, "error", err)
}
default:
err = constants.ErrUnsupportedLinkAction
logger.Error(c, "unsupport measurement link process action", "measurement_id", measurementID, "action", action, "error", err)
}
if err != nil {
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
Payload: map[string]any{
"measurement_id": request.MeasurementID,
"action": request.Action,
},
})
return
}
logger.Info(c, "process measurement link success", "measurement_id", measurementID, "action", request.Action)
c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK,
Msg: "measurement link process success",
Payload: map[string]any{
"measurement_id": measurementID,
"action": request.Action,
},
})
}
func processActionError(err1, err2 error, action string) error {
var err error
if err1 != nil && err2 != nil {
err = errors.Join(err1, err2)
err = fmt.Errorf("process measurement link failed, allMeasSet %s operation and compMeasLinkSet %s operation failed: %w", action, action, err)
} else if err1 != nil {
err = fmt.Errorf("process measurement link failed: allMeasSet %s operation failed: %w", action, err1)
} else {
err = fmt.Errorf("process measurement link failed: compMeasLinkSet %s operation: %w", action, err2)
}
return err
}

View File

@ -1,483 +0,0 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"context"
"fmt"
"maps"
"net/http"
"slices"
"sort"
"strconv"
"time"
"modelRT/constants"
"modelRT/diagram"
"modelRT/logger"
"modelRT/model"
"modelRT/network"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
)
var pullUpgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(_ *http.Request) bool {
return true
},
}
// PullRealTimeDataHandler define real time data pull API
// @Summary 实时数据拉取 websocket api
// @Description 根据用户输入的clientID拉取对应的实时数据
// @Tags RealTime Component Websocket
// @Router /monitors/data/realtime/stream/:clientID [get]
func PullRealTimeDataHandler(c *gin.Context) {
clientID := c.Param("clientID")
if clientID == "" {
err := fmt.Errorf("clientID is missing from the path")
logger.Error(c, "query clientID from path failed", "error", err, "url", c.Request.RequestURI)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
conn, err := pullUpgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
logger.Error(c, "upgrade http protocol to websocket protocal failed", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
defer conn.Close()
ctx, cancel := context.WithCancel(c.Request.Context())
defer cancel()
// TODO[BACKPRESSURE-ISSUE] 先期使用固定大容量对扇入模型进行定义 #1
fanInChan := make(chan network.RealTimePullTarget, constants.FanInChanMaxSize)
sendChan := make(chan []network.RealTimePullTarget, constants.SendChanBufferSize)
go processTargetPolling(ctx, globalSubState, clientID, fanInChan)
go readClientMessages(ctx, conn, clientID, cancel)
go sendDataStream(ctx, conn, clientID, sendChan, cancel)
defer close(sendChan)
bufferMaxSize := constants.SendMaxBatchSize
sendMaxInterval := constants.SendMaxBatchInterval
buffer := make([]network.RealTimePullTarget, 0, bufferMaxSize)
ticker := time.NewTicker(sendMaxInterval)
defer ticker.Stop()
for {
select {
case targetData, ok := <-fanInChan:
if !ok {
logger.Error(ctx, "fanInChan closed unexpectedly", "client_id", clientID)
return
}
buffer = append(buffer, targetData)
if len(buffer) >= bufferMaxSize {
// buffer is full, send immediately
select {
case sendChan <- buffer:
default:
logger.Warn(ctx, "sendChan is full, dropping aggregated data batch (buffer is full)", "client_id", clientID)
}
// reset buffer
buffer = make([]network.RealTimePullTarget, 0, bufferMaxSize)
// reset the ticker to prevent it from triggering immediately after the ticker is sent
ticker.Reset(sendMaxInterval)
}
case <-ticker.C:
if len(buffer) > 0 {
// when the ticker is triggered, all data in the send buffer is sent
select {
case sendChan <- buffer:
default:
logger.Warn(ctx, "sendChan is full, dropping aggregated data batch (ticker is triggered)", "client_id", clientID)
}
// reset buffer
buffer = make([]network.RealTimePullTarget, 0, bufferMaxSize)
}
case <-ctx.Done():
// send the last remaining data
if len(buffer) > 0 {
select {
case sendChan <- buffer:
default:
logger.Warn(ctx, "sendChan is full, cannot send last remaining data during shutdown.", "client_id", clientID)
}
}
logger.Info(ctx, "pullRealTimeDataHandler exiting as context is done.", "client_id", clientID)
return
}
}
}
// readClientMessages 负责持续监听客户端发送的消息(例如 Ping/Pong, Close Frame, 或控制命令)
func readClientMessages(ctx context.Context, conn *websocket.Conn, clientID string, cancel context.CancelFunc) {
// conn.SetReadLimit(512)
for {
msgType, msgBytes, err := conn.ReadMessage()
if err != nil {
if websocket.IsCloseError(err, websocket.CloseNormalClosure) {
logger.Info(ctx, "client actively and normally closed the connection", "client_id", clientID)
} else if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
logger.Error(ctx, "an unexpected error occurred while reading the webSocket connection", "client_id", clientID, "error", err)
} else {
// handle other read errors (eg, I/O errors)
logger.Error(ctx, "an error occurred while reading the webSocket connection", "client_id", clientID, "error", err)
}
cancel()
break
}
// process normal message from client
if msgType == websocket.TextMessage || msgType == websocket.BinaryMessage {
logger.Info(ctx, "read normal message from client", "client_id", clientID, "content", string(msgBytes))
}
}
}
// sendAggregateRealTimeDataStream define func to responsible for continuously pushing aggregate real-time data to the client
func sendAggregateRealTimeDataStream(conn *websocket.Conn, targetsData []network.RealTimePullTarget) error {
if len(targetsData) == 0 {
return nil
}
response := network.SuccessResponse{
Code: 200,
Msg: "success",
Payload: network.RealTimePullPayload{
Targets: targetsData,
},
}
return conn.WriteJSON(response)
}
func sendDataStream(ctx context.Context, conn *websocket.Conn, clientID string, sendChan <-chan []network.RealTimePullTarget, cancel context.CancelFunc) {
logger.Info(ctx, "start dedicated websocket sender goroutine", "client_id", clientID)
for {
select {
case targetsData, ok := <-sendChan:
if !ok {
logger.Info(ctx, "send channel closed, sender goroutine exiting", "client_id", clientID)
return
}
if err := sendAggregateRealTimeDataStream(conn, targetsData); err != nil {
logger.Error(ctx, "send the real time aggregate data failed in sender goroutine", "client_id", clientID, "error", err)
cancel()
return
}
case <-ctx.Done():
logger.Info(ctx, "sender goroutine exiting as context is done", "client_id", clientID)
return
}
}
}
// processTargetPolling define function to process target in subscription map and data is continuously retrieved from redis based on the target
func processTargetPolling(ctx context.Context, s *SharedSubState, clientID string, fanInChan chan network.RealTimePullTarget) {
// ensure the fanInChan will not leak
defer close(fanInChan)
logger.Info(ctx, fmt.Sprintf("start processing real time data polling for clientID:%s", clientID))
stopChanMap := make(map[string]chan struct{})
s.globalMutex.RLock()
config, confExist := s.subMap[clientID]
if !confExist {
logger.Error(ctx, "can not found config into local stored map by clientID", "clientID", clientID)
s.globalMutex.RUnlock()
return
}
s.globalMutex.RUnlock()
logger.Info(ctx, fmt.Sprintf("found subscription config for clientID:%s, start initial polling goroutines", clientID), "components len", config.measurements)
config.mutex.RLock()
for interval, measurementTargets := range config.measurements {
for _, target := range measurementTargets {
// add a secondary check to prevent the target from already existing in the stopChanMap
if _, exists := stopChanMap[target]; exists {
logger.Warn(ctx, "target already exists in polling map, skipping start-up", "target", target)
continue
}
targetContext, exist := config.targetContext[target]
if !exist {
logger.Error(ctx, "can not found subscription node param into param map", "target", target)
continue
}
measurementInfo := targetContext.measurement
queryGStopChan := make(chan struct{})
// store stop channel with target into map
stopChanMap[target] = queryGStopChan
queryKey, err := model.GenerateMeasureIdentifier(measurementInfo.DataSource)
if err != nil {
logger.Error(ctx, "generate measurement indentifier by data_source field failed", "data_source", measurementInfo.DataSource, "error", err)
continue
}
pollingConfig := redisPollingConfig{
targetID: target,
queryKey: queryKey,
interval: interval,
dataSize: int64(measurementInfo.Size),
}
go realTimeDataQueryFromRedis(ctx, pollingConfig, fanInChan, queryGStopChan)
}
}
config.mutex.RUnlock()
for {
select {
case transportTargets, ok := <-config.noticeChan:
if !ok {
logger.Error(ctx, "notice channel was closed unexpectedly", "clientID", clientID)
stopAllPolling(ctx, stopChanMap)
return
}
config.mutex.Lock()
switch transportTargets.OperationType {
case constants.OpAppend:
appendTargets(ctx, config, stopChanMap, fanInChan, transportTargets.Targets)
case constants.OpRemove:
removeTargets(ctx, stopChanMap, transportTargets.Targets)
case constants.OpUpdate:
updateTargets(ctx, config, stopChanMap, fanInChan, transportTargets.Targets)
}
config.mutex.Unlock()
case <-ctx.Done():
logger.Info(ctx, fmt.Sprintf("stop all data retrieval goroutines under this clientID:%s", clientID))
stopAllPolling(ctx, stopChanMap)
return
}
}
}
// appendTargets starts new polling goroutines for targets that were just added
func appendTargets(ctx context.Context, config *RealTimeSubConfig, stopChanMap map[string]chan struct{}, fanInChan chan network.RealTimePullTarget, appendTargets []string) {
appendTargetsSet := make(map[string]struct{}, len(appendTargets))
for _, target := range appendTargets {
appendTargetsSet[target] = struct{}{}
}
for _, target := range appendTargets {
targetContext, exists := config.targetContext[target]
if !exists {
logger.Error(ctx, "the append target does not exist in the real time data config context map,skipping the startup step", "target", target)
continue
}
if _, exists := stopChanMap[target]; exists {
logger.Error(ctx, "the append target already has a stop channel, skipping the startup step", "target", target)
continue
}
queryGStopChan := make(chan struct{})
stopChanMap[target] = queryGStopChan
interval := targetContext.interval
_, exists = config.measurements[interval]
if !exists {
logger.Error(ctx, "targetContext exist but measurements is missing, cannot update config", "target", target, "interval", interval)
continue
}
delete(appendTargetsSet, target)
queryKey, err := model.GenerateMeasureIdentifier(targetContext.measurement.DataSource)
if err != nil {
logger.Error(ctx, "the append target generate redis query key identifier failed", "target", target, "error", err)
continue
}
pollingConfig := redisPollingConfig{
targetID: target,
queryKey: queryKey,
interval: targetContext.interval,
dataSize: int64(targetContext.measurement.Size),
}
go realTimeDataQueryFromRedis(ctx, pollingConfig, fanInChan, queryGStopChan)
logger.Info(ctx, "started new polling goroutine for appended target", "target", target, "interval", targetContext.interval)
}
// allKeys := util.GetKeysFromSet(appendTargetsSet)
allKeys := slices.Sorted(maps.Keys(appendTargetsSet))
if len(allKeys) > 0 {
logger.Warn(ctx, fmt.Sprintf("the following targets:%v start up fetch real time data process goroutine not started", allKeys))
clear(appendTargetsSet)
}
}
// updateTargets starts new polling goroutines for targets that were just updated
func updateTargets(ctx context.Context, config *RealTimeSubConfig, stopChanMap map[string]chan struct{}, fanInChan chan network.RealTimePullTarget, updateTargets []string) {
updateTargetsSet := make(map[string]struct{}, len(updateTargets))
for _, target := range updateTargets {
updateTargetsSet[target] = struct{}{}
}
for _, target := range updateTargets {
targetContext, exists := config.targetContext[target]
if !exists {
logger.Error(ctx, "the update target does not exist in the real time data config context map,skipping the startup step", "target", target)
continue
}
if _, exist := stopChanMap[target]; !exist {
logger.Error(ctx, "the update target does not has a stop channel, skipping the startup step", "target", target)
continue
}
oldQueryGStopChan := stopChanMap[target]
logger.Info(ctx, "stopped old polling goroutine for updated target", "target", target)
close(oldQueryGStopChan)
newQueryGStopChan := make(chan struct{})
stopChanMap[target] = newQueryGStopChan
interval := targetContext.interval
_, exists = config.measurements[interval]
if !exists {
logger.Error(ctx, "targetContext exist but measurements is missing, cannot update config", "target", target, "interval", interval)
continue
}
delete(updateTargetsSet, target)
queryKey, err := model.GenerateMeasureIdentifier(targetContext.measurement.DataSource)
if err != nil {
logger.Error(ctx, "the update target generate redis query key identifier failed", "target", target, "error", err)
continue
}
pollingConfig := redisPollingConfig{
targetID: target,
queryKey: queryKey,
interval: targetContext.interval,
dataSize: int64(targetContext.measurement.Size),
}
go realTimeDataQueryFromRedis(ctx, pollingConfig, fanInChan, newQueryGStopChan)
logger.Info(ctx, "started new polling goroutine for update target", "target", target, "interval", targetContext.interval)
}
// allKeys := util.GetKeysFromSet(updateTargetsSet)
allKeys := slices.Sorted(maps.Keys(updateTargetsSet))
if len(allKeys) > 0 {
logger.Warn(ctx, fmt.Sprintf("the following targets:%v start up fetch real time data process goroutine not started", allKeys))
clear(updateTargetsSet)
}
}
// removeTargets define func to stops running polling goroutines for targets that were removed
func removeTargets(ctx context.Context, stopChanMap map[string]chan struct{}, removeTargets []string) {
for _, target := range removeTargets {
stopChan, exists := stopChanMap[target]
if !exists {
logger.Warn(ctx, "removeTarget was not running, skipping remove operation", "target", target)
continue
}
close(stopChan)
delete(stopChanMap, target)
logger.Info(ctx, "stopped polling goroutine for removed target", "target", target)
}
}
// stopAllPolling stops all running query goroutines for a specific client
func stopAllPolling(ctx context.Context, stopChanMap map[string]chan struct{}) {
for target, stopChan := range stopChanMap {
logger.Info(ctx, fmt.Sprintf("stop the data fetching behavior for the corresponding target:%s", target))
close(stopChan)
}
clear(stopChanMap)
return
}
// redisPollingConfig define struct for param which query real time data from redis
type redisPollingConfig struct {
targetID string
queryKey string
interval string
dataSize int64
}
func realTimeDataQueryFromRedis(ctx context.Context, config redisPollingConfig, fanInChan chan network.RealTimePullTarget, stopChan chan struct{}) {
logger.Info(ctx, "start a redis query goroutine for real time data pulling", "targetID", config.targetID, "queryKey", config.queryKey, "interval", config.interval, "dataSize", config.dataSize)
duration, err := time.ParseDuration(config.interval)
if err != nil {
logger.Error(ctx, "failed to parse the time string", "interval", config.interval, "error", err)
return
}
ticker := time.NewTicker(duration)
defer ticker.Stop()
client := diagram.NewRedisClient()
needPerformQuery := true
for {
if needPerformQuery {
performQuery(ctx, client, config, fanInChan)
needPerformQuery = false
}
select {
case <-ticker.C:
needPerformQuery = true
case <-stopChan:
logger.Info(ctx, "stop the redis query goroutine via a singal")
return
}
}
}
func performQuery(ctx context.Context, client *diagram.RedisClient, config redisPollingConfig, fanInChan chan network.RealTimePullTarget) {
members, err := client.QueryByZRangeByLex(ctx, config.queryKey, config.dataSize)
if err != nil {
logger.Error(ctx, "query real time data from redis failed", "key", config.queryKey, "error", err)
return
}
pullDatas := make([]network.RealTimePullData, 0, len(members))
for _, member := range members {
pullDatas = append(pullDatas, network.RealTimePullData{
Time: member.Member.(string),
Value: member.Score,
})
}
sortPullDataByTimeAscending(ctx, pullDatas)
targetData := network.RealTimePullTarget{
ID: config.targetID,
Datas: pullDatas,
}
select {
case fanInChan <- targetData:
default:
// TODO[BACKPRESSURE-ISSUE] 考虑 fanInChan 阻塞,当出现大量数据阻塞查询循环并丢弃时,采取背压方式解决问题 #1
logger.Warn(ctx, "fanInChan is full, dropping real-time data frame", "key", config.queryKey, "data_size", len(members))
}
}
func sortPullDataByTimeAscending(ctx context.Context, data []network.RealTimePullData) {
sort.Slice(data, func(i, j int) bool {
t1, err1 := strconv.ParseInt(data[i].Time, 10, 64)
if err1 != nil {
logger.Error(ctx, "parsing real time data timestamp failed", "index", i, "time", data[i].Time, "error", err1)
return false
}
t2, err2 := strconv.ParseInt(data[j].Time, 10, 64)
if err2 != nil {
logger.Error(ctx, "parsing real time data timestamp failed", "index", j, "time", data[j].Time, "error", err2)
return true
}
return t1 < t2
})
}

View File

@ -2,189 +2,43 @@
package handler package handler
import ( import (
"context"
"net/http" "net/http"
"net/url" "strconv"
"time"
"modelRT/alert"
"modelRT/constants"
"modelRT/logger" "modelRT/logger"
"modelRT/network" "modelRT/network"
"github.com/bitly/go-simplejson"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
jsoniter "github.com/json-iterator/go"
) )
var wsUpgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
// CheckOrigin 必须返回 true否则浏览器会拒绝连接
CheckOrigin: func(_ *http.Request) bool {
// 在生产环境中,应该更严格地检查 Origin 头部
return true
},
}
// QueryRealTimeDataHandler define query real time data process API // QueryRealTimeDataHandler define query real time data process API
// @Summary 获取实时测点数据
// @Description 根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据
// @Tags RealTime Component
// @Accept json
// @Produce json
// @Param token query string true "测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)"
// @Param begin query int true "查询起始时间 (Unix时间戳, e.g., 1761008266)"
// @Param end query int true "查询结束时间 (Unix时间戳, e.g., 1761526675)"
// @Success 200 {object} network.SuccessResponse{payload=network.RealTimeDataPayload} "返回实时数据成功"
//
// @Example 200 {
// "code": 200,
// "msg": "success",
// "payload": {
// "input": "grid1.zone1.station1.ns1.tag1.transformfeeder1_220.I_A_rms",
// "sub_pos": [
// {
// "time": 1736305467506000000,
// "value": 1
// }
// ]
// }
// }
//
// @Failure 400 {object} network.FailureResponse "返回实时数据失败"
//
// @Example 400 {
// "code": 400,
// "msg": "failed to get real time data from dataRT",
// }
//
// @Router /data/realtime [get]
func QueryRealTimeDataHandler(c *gin.Context) { func QueryRealTimeDataHandler(c *gin.Context) {
var request network.RealTimeQueryRequest var targetLevel constants.AlertLevel
if err := c.ShouldBindJSON(&request); err != nil { alertManger := alert.GetAlertMangerInstance()
logger.Error(c, "failed to unmarshal real time query request", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{ levelStr := c.Query("level")
level, err := strconv.Atoi(levelStr)
if err != nil {
logger.Error(c, "convert alert level string to int failed", "error", err)
resp := network.FailureResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
Msg: err.Error(), Msg: err.Error(),
})
return
} }
c.JSON(http.StatusOK, resp)
}
targetLevel = constants.AlertLevel(level)
events := alertManger.GetRangeEventsByLevel(targetLevel)
conn, err := wsUpgrader.Upgrade(c.Writer, c.Request, nil) resp := network.SuccessResponse{
if err != nil { Code: http.StatusOK,
logger.Error(c, "upgrade http protocol to websocket protocal failed", "error", err) Msg: "success",
return PayLoad: map[string]interface{}{
} "events": events,
defer conn.Close() },
// start a goroutine to open a websocket service with the dataRT service and use the channel to pass data back. Start and maintain the websocket connection with the front-end UI in the local api
transportChannel := make(chan []any, 100)
closeChannel := make(chan struct{})
for {
select {
case data := <-transportChannel:
respByte, err := jsoniter.Marshal(data)
if err != nil {
logger.Error(c, "marshal real time data to bytes failed", "error", err)
continue
}
err = conn.WriteMessage(websocket.TextMessage, respByte)
if err != nil {
logger.Error(c, "write message to websocket connection failed", "error", err)
continue
}
case <-closeChannel:
logger.Info(c, "data receiving goroutine has been closed")
// TODO 优化时间控制
deadline := time.Now().Add(5 * time.Second)
err := conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "the session ended normally"), deadline)
if err != nil {
logger.Error(c, "sending close control message failed", "error", err)
}
// gracefully close session processing
err = conn.Close()
if err != nil {
logger.Error(c, "websocket conn closed failed", "error", err)
}
logger.Info(c, "websocket connection closed successfully.")
}
}
}
// receiveRealTimeDataByWebSocket define func of receive real time data by websocket
func receiveRealTimeDataByWebSocket(ctx context.Context, params url.Values, transportChannel chan []any, closeChannel chan struct{}) {
serverURL := "ws://127.0.0.1:8888/ws/points"
u, err := url.Parse(serverURL)
if err != nil {
logger.Error(ctx, "parse url failed", "error", err)
}
q := u.Query()
for key, values := range params {
for _, value := range values {
q.Add(key, value)
}
}
u.RawQuery = q.Encode()
finalServerURL := u.String()
conn, resp, err := websocket.DefaultDialer.Dial(finalServerURL, nil)
if err != nil {
logger.Error(ctx, "dialing websocket server failed", "error", err)
if resp != nil {
logger.Error(ctx, "websocket server response", "status", resp.Status)
}
return
}
defer conn.Close()
for {
msgType, message, err := conn.ReadMessage()
if err != nil {
// check if it is an expected shutdown error
if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) {
logger.Info(ctx, "connection closed normally")
} else {
logger.Error(ctx, "abnormal disconnection from websocket server", "err", err)
}
close(closeChannel)
break
}
logger.Info(ctx, "received info from dataRT server", "msg_type", messageTypeToString(msgType), "message", string(message))
js, err := simplejson.NewJson(message)
if err != nil {
logger.Error(ctx, "parse real time data from message failed", "message", string(message), "err", err)
continue
}
subPoss, err := js.Get("sub_pos").Array()
if err != nil {
logger.Error(ctx, "parse sub_pos struct from message json info", "sub_pos", js.Get("sub_pos"), "err", err)
continue
}
transportChannel <- subPoss
}
return
}
// messageTypeToString define func of auxiliary to convert message type to string
func messageTypeToString(t int) string {
switch t {
case websocket.TextMessage:
return "TEXT"
case websocket.BinaryMessage:
return "BINARY"
case websocket.PingMessage:
return "PING"
case websocket.PongMessage:
return "PONG"
case websocket.CloseMessage:
return "CLOSE"
default:
return "UNKNOWN"
} }
c.JSON(http.StatusOK, resp)
} }

View File

@ -1,741 +0,0 @@
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"context"
"fmt"
"maps"
"net/http"
"sync"
"modelRT/constants"
"modelRT/database"
"modelRT/logger"
"modelRT/network"
"modelRT/orm"
"modelRT/util"
"github.com/gin-gonic/gin"
"github.com/gofrs/uuid"
"gorm.io/gorm"
)
var globalSubState *SharedSubState
func init() {
globalSubState = NewSharedSubState()
}
// RealTimeSubHandler define real time data subscriptions process API
// @Summary 开始或结束订阅实时数据
// @Description 根据用户输入的组件token,从 modelRT 服务中开始或结束对于量测节点的实时数据的订阅
// @Tags RealTime Component
// @Accept json
// @Produce json
// @Param request body network.RealTimeSubRequest true "量测节点实时数据订阅"
// @Success 200 {object} network.SuccessResponse{payload=network.RealTimeSubPayload} "订阅实时数据结果列表"
//
// @Example 200 {
// "code": 200,
// "msg": "success",
// "payload": {
// "targets": [
// {
// "id": "grid1.zone1.station1.ns1.tag1.bay.I11_C_rms",
// "code": "1001",
// "msg": "subscription success"
// },
// {
// "id": "grid1.zone1.station1.ns1.tag1.bay.I11_B_rms",
// "code": "1002",
// "msg": "subscription failed"
// }
// ]
// }
// }
//
// @Failure 400 {object} network.FailureResponse{payload=network.RealTimeSubPayload} "订阅实时数据结果列表"
//
// @Example 400 {
// "code": 400,
// "msg": "failed to get recommend data from redis",
// "payload": {
// "targets": [
// {
// "id": "grid1.zone1.station1.ns1.tag1.bay.I11_A_rms",
// "code": "1002",
// "msg": "subscription failed"
// },
// {
// "id": "grid1.zone1.station1.ns1.tag1.bay.I11_B_rms",
// "code": "1002",
// "msg": "subscription failed"
// }
// ]
// }
// }
//
// @Router /monitors/data/subscriptions [post]
func RealTimeSubHandler(c *gin.Context) {
var request network.RealTimeSubRequest
var subAction string
var clientID string
if err := c.ShouldBindJSON(&request); err != nil {
logger.Error(c, "failed to unmarshal real time query request", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
if request.Action == constants.SubStartAction && request.ClientID == "" {
subAction = request.Action
id, err := uuid.NewV4()
if err != nil {
logger.Error(c, "failed to generate client id", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
clientID = id.String()
} else if request.Action == constants.SubStartAction && request.ClientID != "" {
subAction = constants.SubAppendAction
clientID = request.ClientID
} else if request.Action == constants.SubStopAction && request.ClientID != "" {
subAction = request.Action
clientID = request.ClientID
} else if request.Action == constants.SubUpdateAction && request.ClientID != "" {
subAction = request.Action
clientID = request.ClientID
}
pgClient := database.GetPostgresDBClient()
// open transaction
tx := pgClient.Begin()
defer tx.Commit()
switch subAction {
case constants.SubStartAction:
results, err := globalSubState.CreateConfig(c, tx, clientID, request.Measurements)
if err != nil {
logger.Error(c, "create real time data subscription config failed", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
Payload: network.RealTimeSubPayload{
ClientID: clientID,
TargetResults: results,
},
})
return
}
c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
Payload: network.RealTimeSubPayload{
ClientID: clientID,
TargetResults: results,
},
})
return
case constants.SubStopAction:
results, err := globalSubState.RemoveTargets(c, clientID, request.Measurements)
if err != nil {
logger.Error(c, "remove target to real time data subscription config failed", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
Payload: network.RealTimeSubPayload{
ClientID: clientID,
TargetResults: results,
},
})
return
}
c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
Payload: network.RealTimeSubPayload{
ClientID: clientID,
TargetResults: results,
},
})
return
case constants.SubAppendAction:
results, err := globalSubState.AppendTargets(c, tx, clientID, request.Measurements)
if err != nil {
logger.Error(c, "append target to real time data subscription config failed", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
Payload: network.RealTimeSubPayload{
ClientID: clientID,
TargetResults: results,
},
})
return
}
c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
Payload: network.RealTimeSubPayload{
ClientID: clientID,
TargetResults: results,
},
})
return
case constants.SubUpdateAction:
results, err := globalSubState.UpdateTargets(c, tx, clientID, request.Measurements)
if err != nil {
logger.Error(c, "update target to real time data subscription config failed", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
Payload: network.RealTimeSubPayload{
ClientID: clientID,
TargetResults: results,
},
})
return
}
c.JSON(http.StatusOK, network.SuccessResponse{
Code: http.StatusOK,
Msg: "success",
Payload: network.RealTimeSubPayload{
ClientID: clientID,
TargetResults: results,
},
})
return
default:
err := fmt.Errorf("%w: request action is %s", constants.ErrUnsupportedSubAction, request.Action)
logger.Error(c, "unsupported action of real time data subscription request", "error", err)
requestTargetsCount := processRealTimeRequestCount(request.Measurements)
results := processRealTimeRequestTargets(request.Measurements, requestTargetsCount, err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
Payload: network.RealTimeSubPayload{
ClientID: clientID,
TargetResults: results,
},
})
return
}
}
// RealTimeSubMeasurement define struct of real time subscription measurement
type RealTimeSubMeasurement struct {
targets []string
}
// TargetPollingContext define struct of real time pulling reverse context
type TargetPollingContext struct {
interval string
measurement *orm.Measurement
}
// RealTimeSubConfig define struct of real time subscription config
type RealTimeSubConfig struct {
noticeChan chan *transportTargets
mutex sync.RWMutex
measurements map[string][]string
targetContext map[string]*TargetPollingContext
}
// SharedSubState define struct of shared subscription state with mutex
type SharedSubState struct {
subMap map[string]*RealTimeSubConfig
globalMutex sync.RWMutex
}
// NewSharedSubState define function to create new SharedSubState
func NewSharedSubState() *SharedSubState {
return &SharedSubState{
subMap: make(map[string]*RealTimeSubConfig),
}
}
// processAndValidateTargetsForStart define func to perform all database I/O operations in a lock-free state for start action
func processAndValidateTargetsForStart(ctx context.Context, tx *gorm.DB, measurements []network.RealTimeMeasurementItem, allReqTargetNum int) (
[]network.TargetResult, []string,
map[string][]string,
map[string]*TargetPollingContext,
) {
targetProcessResults := make([]network.TargetResult, 0, allReqTargetNum)
newMeasMap := make(map[string][]string)
successfulTargets := make([]string, 0, allReqTargetNum)
newMeasContextMap := make(map[string]*TargetPollingContext)
for _, measurementItem := range measurements {
interval := measurementItem.Interval
for _, target := range measurementItem.Targets {
var targetResult network.TargetResult
targetResult.ID = target
targetModel, err := database.ParseDataIdentifierToken(ctx, tx, target)
if err != nil {
logger.Error(ctx, "parse data indentity token failed", "error", err, "identity_token", target)
targetResult.Code = constants.SubFailedCode
targetResult.Msg = fmt.Sprintf("%s: %s", constants.SubFailedMsg, err.Error())
targetProcessResults = append(targetProcessResults, targetResult)
continue
}
targetResult.Code = constants.SubSuccessCode
targetResult.Msg = constants.SubSuccessMsg
targetProcessResults = append(targetProcessResults, targetResult)
successfulTargets = append(successfulTargets, target)
if _, exists := newMeasMap[interval]; !exists {
newMeasMap[interval] = make([]string, 0, len(measurementItem.Targets))
}
meas := newMeasMap[interval]
meas = append(meas, target)
newMeasMap[interval] = meas
newMeasContextMap[target] = &TargetPollingContext{
interval: interval,
measurement: targetModel.GetMeasurementInfo(),
}
}
}
return targetProcessResults, successfulTargets, newMeasMap, newMeasContextMap
}
// processAndValidateTargetsForUpdate define func to perform all database I/O operations in a lock-free state for update action
func processAndValidateTargetsForUpdate(ctx context.Context, tx *gorm.DB, config *RealTimeSubConfig, measurements []network.RealTimeMeasurementItem, allReqTargetNum int) (
[]network.TargetResult, []string,
map[string][]string,
map[string]*TargetPollingContext,
) {
targetProcessResults := make([]network.TargetResult, 0, allReqTargetNum)
newMeasMap := make(map[string][]string)
successfulTargets := make([]string, 0, allReqTargetNum)
newMeasContextMap := make(map[string]*TargetPollingContext)
for _, measurementItem := range measurements {
interval := measurementItem.Interval
for _, target := range measurementItem.Targets {
targetResult := network.TargetResult{ID: target}
if _, exist := config.targetContext[target]; !exist {
err := fmt.Errorf("target %s does not exists in subscription list", target)
logger.Error(ctx, "update target does not exist in subscription list", "error", err, "target", target)
targetResult.Code = constants.UpdateSubFailedCode
targetResult.Msg = fmt.Sprintf("%s: %s", constants.UpdateSubFailedMsg, err.Error())
targetProcessResults = append(targetProcessResults, targetResult)
continue
}
targetModel, err := database.ParseDataIdentifierToken(ctx, tx, target)
if err != nil {
logger.Error(ctx, "parse data indentity token failed", "error", err, "identity_token", target)
targetResult.Code = constants.UpdateSubFailedCode
targetResult.Msg = fmt.Sprintf("%s: %s", constants.UpdateSubFailedMsg, err.Error())
targetProcessResults = append(targetProcessResults, targetResult)
continue
}
targetResult.Code = constants.UpdateSubSuccessCode
targetResult.Msg = constants.UpdateSubSuccessMsg
targetProcessResults = append(targetProcessResults, targetResult)
successfulTargets = append(successfulTargets, target)
if _, exists := newMeasMap[interval]; !exists {
newMeasMap[interval] = make([]string, 0, len(measurementItem.Targets))
}
meas := newMeasMap[interval]
meas = append(meas, target)
newMeasMap[interval] = meas
newMeasContextMap[target] = &TargetPollingContext{
interval: interval,
measurement: targetModel.GetMeasurementInfo(),
}
}
}
return targetProcessResults, successfulTargets, newMeasMap, newMeasContextMap
}
// mergeMeasurementsForStart define func to merge newMeasurementsMap into existingMeasurementsMap for start action
func mergeMeasurementsForStart(config *RealTimeSubConfig, newMeasurements map[string][]string, newMeasurementsContextMap map[string]*TargetPollingContext) []string {
allDuplicates := make([]string, 0)
for interval, newMeas := range newMeasurements {
if existingMeas, exists := config.measurements[interval]; exists {
// deduplication operations prevent duplicate subscriptions to the same measurement node
deduplicated, duplicates := util.DeduplicateAndReportDuplicates(existingMeas, newMeas)
if len(duplicates) > 0 {
for _, duplicate := range duplicates {
delete(newMeasurementsContextMap, duplicate)
}
allDuplicates = append(allDuplicates, duplicates...)
}
if len(deduplicated) > 0 {
existingMeas = append(existingMeas, deduplicated...)
config.measurements[interval] = existingMeas
maps.Copy(config.targetContext, newMeasurementsContextMap)
}
}
}
return allDuplicates
}
// mergeMeasurementsForUpdate define func to merge newMeasurementsMap into existingMeasurementsMap for update action
func mergeMeasurementsForUpdate(config *RealTimeSubConfig, newMeasurements map[string][]string, newMeasurementsContextMap map[string]*TargetPollingContext) ([]string, error) {
allDuplicates := make([]string, 0)
delMeasMap := make(map[string][]string)
for _, newMeas := range newMeasurements {
for _, measurement := range newMeas {
oldInterval := config.targetContext[measurement].interval
if _, exists := delMeasMap[oldInterval]; !exists {
delMeasurements := []string{measurement}
delMeasMap[oldInterval] = delMeasurements
} else {
delMeasurements := delMeasMap[oldInterval]
delMeasurements = append(delMeasurements, measurement)
delMeasMap[oldInterval] = delMeasurements
}
}
}
for interval, delMeas := range delMeasMap {
existingMeas, exist := config.measurements[interval]
if !exist {
return nil, fmt.Errorf("can not find exist measurements in %s interval", interval)
}
measurements := util.RemoveTargetsFromSliceSimple(existingMeas, delMeas)
config.measurements[interval] = measurements
}
for interval, newMeas := range newMeasurements {
if existingMeas, exists := config.measurements[interval]; exists {
deduplicated, duplicates := util.DeduplicateAndReportDuplicates(existingMeas, newMeas)
if len(duplicates) > 0 {
for _, duplicate := range duplicates {
delete(newMeasurementsContextMap, duplicate)
}
allDuplicates = append(allDuplicates, duplicates...)
}
if len(deduplicated) > 0 {
existingMeas = append(existingMeas, deduplicated...)
config.measurements[interval] = existingMeas
maps.Copy(config.targetContext, newMeasurementsContextMap)
}
}
}
return allDuplicates, nil
}
// CreateConfig define function to create config in SharedSubState
func (s *SharedSubState) CreateConfig(ctx context.Context, tx *gorm.DB, clientID string, measurements []network.RealTimeMeasurementItem) ([]network.TargetResult, error) {
requestTargetsCount := processRealTimeRequestCount(measurements)
targetProcessResults, _, newMeasurementsMap, measurementContexts := processAndValidateTargetsForStart(ctx, tx, measurements, requestTargetsCount)
s.globalMutex.Lock()
if _, exist := s.subMap[clientID]; exist {
s.globalMutex.Unlock()
err := fmt.Errorf("clientID %s already exists. use AppendTargets to modify existing config", clientID)
logger.Error(ctx, "clientID already exists. use AppendTargets to modify existing config", "error", err)
return targetProcessResults, err
}
config := &RealTimeSubConfig{
noticeChan: make(chan *transportTargets, constants.NoticeChanCap),
measurements: newMeasurementsMap,
targetContext: measurementContexts,
}
s.subMap[clientID] = config
s.globalMutex.Unlock()
return targetProcessResults, nil
}
// AppendTargets define function to append targets in SharedSubState
func (s *SharedSubState) AppendTargets(ctx context.Context, tx *gorm.DB, clientID string, measurements []network.RealTimeMeasurementItem) ([]network.TargetResult, error) {
requestTargetsCount := processRealTimeRequestCount(measurements)
s.globalMutex.RLock()
config, exist := s.subMap[clientID]
s.globalMutex.RUnlock()
if !exist {
err := fmt.Errorf("clientID %s not found. use CreateConfig to start a new config", clientID)
logger.Error(ctx, "clientID not found. use CreateConfig to start a new config", "error", err)
return processRealTimeRequestTargets(measurements, requestTargetsCount, err), err
}
targetProcessResults, successfulTargets, newMeasMap, newMeasContextMap := processAndValidateTargetsForStart(ctx, tx, measurements, requestTargetsCount)
config.mutex.Lock()
allDuplicates := mergeMeasurementsForStart(config, newMeasMap, newMeasContextMap)
if len(allDuplicates) > 0 {
logger.Warn(ctx, "some targets are duplicate and have been ignored in append operation", "clientID", clientID, "duplicates", allDuplicates)
// process repeat target in targetProcessResults and successfulTargets
targetProcessResults, successfulTargets = filterAndDeduplicateRepeatTargets(targetProcessResults, successfulTargets, allDuplicates)
}
config.mutex.Unlock()
if len(successfulTargets) > 0 {
transportTargets := &transportTargets{
OperationType: constants.OpAppend,
Targets: successfulTargets,
}
config.noticeChan <- transportTargets
}
return targetProcessResults, nil
}
func filterAndDeduplicateRepeatTargets(resultsSlice []network.TargetResult, idsSlice []string, duplicates []string) ([]network.TargetResult, []string) {
filteredIDs := make([]string, 0, len(idsSlice))
set := make(map[string]struct{}, len(duplicates))
for _, duplicate := range duplicates {
set[duplicate] = struct{}{}
}
for index := range resultsSlice {
if _, isTarget := set[resultsSlice[index].ID]; isTarget {
resultsSlice[index].Code = constants.SubRepeatCode
resultsSlice[index].Msg = constants.SubRepeatMsg
}
}
for _, id := range idsSlice {
if _, isTarget := set[id]; !isTarget {
filteredIDs = append(filteredIDs, id)
}
}
return resultsSlice, filteredIDs
}
// UpsertTargets define function to upsert targets in SharedSubState
func (s *SharedSubState) UpsertTargets(ctx context.Context, tx *gorm.DB, clientID string, measurements []network.RealTimeMeasurementItem) ([]network.TargetResult, error) {
requestTargetsCount := processRealTimeRequestCount(measurements)
targetProcessResults, successfulTargets, newMeasMap, newMeasContextMap := processAndValidateTargetsForStart(ctx, tx, measurements, requestTargetsCount)
s.globalMutex.RLock()
config, exist := s.subMap[clientID]
s.globalMutex.RUnlock()
var opType constants.TargetOperationType
if exist {
opType = constants.OpUpdate
config.mutex.Lock()
mergeMeasurementsForStart(config, newMeasMap, newMeasContextMap)
config.mutex.Unlock()
} else {
opType = constants.OpAppend
s.globalMutex.Lock()
if config, exist = s.subMap[clientID]; !exist {
config = &RealTimeSubConfig{
noticeChan: make(chan *transportTargets, constants.NoticeChanCap),
measurements: newMeasMap,
}
s.subMap[clientID] = config
} else {
s.globalMutex.Unlock()
config.mutex.Lock()
mergeMeasurementsForStart(config, newMeasMap, newMeasContextMap)
config.mutex.Unlock()
}
s.globalMutex.Unlock()
}
if len(successfulTargets) > 0 {
transportTargets := &transportTargets{
OperationType: opType,
Targets: successfulTargets,
}
config.noticeChan <- transportTargets
}
return targetProcessResults, nil
}
// RemoveTargets define function to remove targets in SharedSubState
func (s *SharedSubState) RemoveTargets(ctx context.Context, clientID string, measurements []network.RealTimeMeasurementItem) ([]network.TargetResult, error) {
requestTargetsCount := processRealTimeRequestCount(measurements)
targetProcessResults := make([]network.TargetResult, 0, requestTargetsCount)
s.globalMutex.RLock()
config, exist := s.subMap[clientID]
if !exist {
s.globalMutex.RUnlock()
err := fmt.Errorf("clientID %s not found", clientID)
logger.Error(ctx, "clientID not found in remove targets operation", "error", err)
return processRealTimeRequestTargets(measurements, requestTargetsCount, err), err
}
s.globalMutex.RUnlock()
var shouldRemoveClient bool
// measurements is the list of items to be removed passed in the request
transportTargets := &transportTargets{
OperationType: constants.OpRemove,
Targets: make([]string, 0, requestTargetsCount),
}
config.mutex.Lock()
for _, measurement := range measurements {
interval := measurement.Interval
// meas is the locally running listener configuration
measTargets, measExist := config.measurements[interval]
if !measExist {
logger.Error(ctx, fmt.Sprintf("measurement with interval %s not found under clientID %s", interval, clientID), "clientID", clientID, "interval", interval)
for _, target := range measTargets {
targetResult := network.TargetResult{
ID: target,
Code: constants.CancelSubFailedCode,
Msg: constants.CancelSubFailedMsg,
}
targetProcessResults = append(targetProcessResults, targetResult)
}
continue
}
targetsToRemoveMap := make(map[string]struct{})
for _, target := range measurement.Targets {
targetsToRemoveMap[target] = struct{}{}
}
var newTargets []string
for _, existingTarget := range measTargets {
if _, found := targetsToRemoveMap[existingTarget]; !found {
newTargets = append(newTargets, existingTarget)
} else {
transportTargets.Targets = append(transportTargets.Targets, existingTarget)
targetResult := network.TargetResult{
ID: existingTarget,
Code: constants.CancelSubSuccessCode,
Msg: constants.CancelSubSuccessMsg,
}
targetProcessResults = append(targetProcessResults, targetResult)
delete(targetsToRemoveMap, existingTarget)
delete(config.targetContext, existingTarget)
}
}
measTargets = newTargets
if len(measTargets) == 0 {
delete(config.measurements, interval)
}
if len(config.measurements) == 0 {
shouldRemoveClient = true
}
if len(targetsToRemoveMap) > 0 {
err := fmt.Errorf("target remove were not found under clientID %s and interval %s", clientID, interval)
for target := range targetsToRemoveMap {
targetResult := network.TargetResult{
ID: target,
Code: constants.CancelSubFailedCode,
Msg: fmt.Sprintf("%s: %s", constants.SubFailedMsg, err.Error()),
}
targetProcessResults = append(targetProcessResults, targetResult)
}
}
}
config.mutex.Unlock()
// pass the removed subscription configuration to the notice channel
config.noticeChan <- transportTargets
if shouldRemoveClient {
s.globalMutex.Lock()
if currentConfig, exist := s.subMap[clientID]; exist && len(currentConfig.measurements) == 0 {
delete(s.subMap, clientID)
}
s.globalMutex.Unlock()
}
return targetProcessResults, nil
}
// UpdateTargets define function to update targets in SharedSubState
func (s *SharedSubState) UpdateTargets(ctx context.Context, tx *gorm.DB, clientID string, measurements []network.RealTimeMeasurementItem) ([]network.TargetResult, error) {
requestTargetsCount := processRealTimeRequestCount(measurements)
targetProcessResults := make([]network.TargetResult, 0, requestTargetsCount)
s.globalMutex.RLock()
config, exist := s.subMap[clientID]
s.globalMutex.RUnlock()
if !exist {
s.globalMutex.RUnlock()
err := fmt.Errorf("clientID %s not found", clientID)
logger.Error(ctx, "clientID not found in remove targets operation", "error", err)
return processRealTimeRequestTargets(measurements, requestTargetsCount, err), err
}
targetProcessResults, successfulTargets, newMeasMap, newMeasContextMap := processAndValidateTargetsForUpdate(ctx, tx, config, measurements, requestTargetsCount)
config.mutex.Lock()
allDuplicates, err := mergeMeasurementsForUpdate(config, newMeasMap, newMeasContextMap)
if err != nil {
logger.Warn(ctx, "can not find exist measurements in target interval", "clientID", clientID, "duplicates", allDuplicates, "error", err)
}
if len(allDuplicates) > 0 {
logger.Warn(ctx, "some targets are duplicate and have been ignored in append operation", "clientID", clientID, "duplicates", allDuplicates)
// process repeat target in targetProcessResults and successfulTargets
targetProcessResults, successfulTargets = filterAndDeduplicateRepeatTargets(targetProcessResults, successfulTargets, allDuplicates)
}
config.mutex.Unlock()
if len(successfulTargets) > 0 {
transportTargets := &transportTargets{
OperationType: constants.OpUpdate,
Targets: successfulTargets,
}
config.noticeChan <- transportTargets
}
return targetProcessResults, nil
}
// Get define function to get subscriptions config from SharedSubState
func (s *SharedSubState) Get(clientID string) (*RealTimeSubConfig, bool) {
s.globalMutex.RLock()
defer s.globalMutex.RUnlock()
config, exists := s.subMap[clientID]
if !exists {
return nil, false
}
return config, true
}
func processRealTimeRequestCount(measurements []network.RealTimeMeasurementItem) int {
totalTargetsCount := 0
for _, measItem := range measurements {
totalTargetsCount += len(measItem.Targets)
}
return totalTargetsCount
}
func processRealTimeRequestTargets(measurements []network.RealTimeMeasurementItem, targetCount int, err error) []network.TargetResult {
targetProcessResults := make([]network.TargetResult, 0, targetCount)
for _, measurementItem := range measurements {
for _, target := range measurementItem.Targets {
var targetResult network.TargetResult
targetResult.ID = target
targetResult.Code = constants.SubFailedCode
targetResult.Msg = fmt.Sprintf("%s: %s", constants.SubFailedMsg, err.Error())
targetProcessResults = append(targetProcessResults, targetResult)
}
}
return targetProcessResults
}
// transportTargets define struct to transport update or remove target to real
// time pull api
type transportTargets struct {
OperationType constants.TargetOperationType
Targets []string
}

View File

@ -2,10 +2,8 @@
package logger package logger
import ( import (
"fmt"
"os" "os"
"sync" "sync"
"time"
"modelRT/config" "modelRT/config"
"modelRT/constants" "modelRT/constants"
@ -34,10 +32,8 @@ func getEncoder() zapcore.Encoder {
// getLogWriter responsible for setting the location of log storage // getLogWriter responsible for setting the location of log storage
func getLogWriter(mode, filename string, maxsize, maxBackup, maxAge int, compress bool) zapcore.WriteSyncer { func getLogWriter(mode, filename string, maxsize, maxBackup, maxAge int, compress bool) zapcore.WriteSyncer {
dateStr := time.Now().Format("2006-01-02 15:04:05")
finalFilename := fmt.Sprintf(filename, dateStr)
lumberJackLogger := &lumberjack.Logger{ lumberJackLogger := &lumberjack.Logger{
Filename: finalFilename, // log file position Filename: filename, // log file position
MaxSize: maxsize, // log file maxsize MaxSize: maxsize, // log file maxsize
MaxAge: maxAge, // maximum number of day files retained MaxAge: maxAge, // maximum number of day files retained
MaxBackups: maxBackup, // maximum number of old files retained MaxBackups: maxBackup, // maximum number of old files retained
@ -77,6 +73,7 @@ func InitLoggerInstance(lCfg config.LoggerConfig) {
once.Do(func() { once.Do(func() {
_globalLogger = initLogger(lCfg) _globalLogger = initLogger(lCfg)
}) })
defer _globalLogger.Sync()
} }
// GetLoggerInstance define func of returns the global logger instance It's safe for concurrent use. // GetLoggerInstance define func of returns the global logger instance It's safe for concurrent use.

51
main.go
View File

@ -3,10 +3,7 @@ package main
import ( import (
"context" "context"
"errors"
"flag" "flag"
"fmt"
"log"
"net/http" "net/http"
"os" "os"
"os/signal" "os/signal"
@ -25,10 +22,11 @@ import (
"modelRT/middleware" "modelRT/middleware"
"modelRT/model" "modelRT/model"
"modelRT/pool" "modelRT/pool"
realtimedata "modelRT/real-time-data"
"modelRT/router" "modelRT/router"
"modelRT/util" "modelRT/util"
realtimedata "modelRT/real-time-data"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/panjf2000/ants/v2" "github.com/panjf2000/ants/v2"
swaggerFiles "github.com/swaggo/files" swaggerFiles "github.com/swaggo/files"
@ -51,7 +49,7 @@ var (
var ( var (
modelRTConfig config.ModelRTConfig modelRTConfig config.ModelRTConfig
postgresDBClient *gorm.DB postgresDBClient *gorm.DB
// alertManager *alert.EventManager alertManager *alert.EventManager
) )
// TODO 使用 wire 依赖注入管理 DVIE 面板注册的 panel // TODO 使用 wire 依赖注入管理 DVIE 面板注册的 panel
@ -75,28 +73,27 @@ func main() {
flag.Parse() flag.Parse()
ctx := context.TODO() ctx := context.TODO()
// init logger
logger.InitLoggerInstance(modelRTConfig.LoggerConfig)
configPath := filepath.Join(*modelRTConfigDir, *modelRTConfigName+"."+*modelRTConfigType) configPath := filepath.Join(*modelRTConfigDir, *modelRTConfigName+"."+*modelRTConfigType)
if _, err := os.Stat(configPath); os.IsNotExist(err) { if _, err := os.Stat(configPath); os.IsNotExist(err) {
log.Println("configuration file not found,checking for example file") logger.Info(ctx, "configuration file not found,checking for example file")
exampleConfigPath := filepath.Join(*modelRTConfigDir, *modelRTConfigName+".example."+*modelRTConfigType) exampleConfigPath := filepath.Join(*modelRTConfigDir, *modelRTConfigName+".example."+*modelRTConfigType)
if _, err := os.Stat(exampleConfigPath); err == nil { if _, err := os.Stat(exampleConfigPath); err == nil {
if err := util.CopyFile(exampleConfigPath, configPath); err != nil { if err := util.CopyFile(exampleConfigPath, configPath); err != nil {
panicErr := fmt.Errorf("failed to copy example config file:%w", err) logger.Error(ctx, "failed to copy example config file", "error", err)
panic(panicErr) panic(err)
} }
logger.Info(ctx, "successfully copied example config to actual config file")
} else { } else {
panicErr := errors.New("no config file and no config example file found") logger.Error(ctx, "No config file and no config example file found.")
panic(panicErr)
} }
} }
modelRTConfig = config.ReadAndInitConfig(*modelRTConfigDir, *modelRTConfigName, *modelRTConfigType) modelRTConfig = config.ReadAndInitConfig(*modelRTConfigDir, *modelRTConfigName, *modelRTConfigType)
// init logger
logger.InitLoggerInstance(modelRTConfig.LoggerConfig)
defer logger.GetLoggerInstance().Sync()
hostName, err := os.Hostname() hostName, err := os.Hostname()
if err != nil { if err != nil {
logger.Error(ctx, "get host name failed", "error", err) logger.Error(ctx, "get host name failed", "error", err)
@ -149,6 +146,12 @@ func main() {
} }
defer anchorRealTimePool.Release() defer anchorRealTimePool.Release()
// init cancel context
cancelCtx, cancel := context.WithCancel(ctx)
defer cancel()
// init real time data receive channel
go realtimedata.ReceiveChan(cancelCtx)
postgresDBClient.Transaction(func(tx *gorm.DB) error { postgresDBClient.Transaction(func(tx *gorm.DB) error {
// load circuit diagram from postgres // load circuit diagram from postgres
// componentTypeMap, err := database.QueryCircuitDiagramComponentFromDB(cancelCtx, tx, parsePool) // componentTypeMap, err := database.QueryCircuitDiagramComponentFromDB(cancelCtx, tx, parsePool)
@ -157,13 +160,7 @@ func main() {
// panic(err) // panic(err)
// } // }
allMeasurement, err := database.GetAllMeasurements(ctx, tx) // TODO 暂时屏蔽完成 swagger 启动测试
if err != nil {
logger.Error(ctx, "load topologic info from postgres failed", "error", err)
panic(err)
}
go realtimedata.StartRealTimeDataComputing(ctx, allMeasurement)
tree, err := database.QueryTopologicFromDB(ctx, tx) tree, err := database.QueryTopologicFromDB(ctx, tx)
if err != nil { if err != nil {
logger.Error(ctx, "load topologic info from postgres failed", "error", err) logger.Error(ctx, "load topologic info from postgres failed", "error", err)
@ -173,6 +170,10 @@ func main() {
return nil return nil
}) })
// TODO 完成订阅数据分析
// TODO 暂时屏蔽完成 swagger 启动测试
// go realtimedata.RealTimeDataComputer(ctx, nil, []string{}, "")
// use release mode in productio // use release mode in productio
// gin.SetMode(gin.ReleaseMode) // gin.SetMode(gin.ReleaseMode)
engine := gin.New() engine := gin.New()
@ -214,7 +215,7 @@ func main() {
// } // }
server := http.Server{ server := http.Server{
Addr: modelRTConfig.ServiceConfig.ServiceAddr, Addr: ":8080",
Handler: engine, Handler: engine,
} }
@ -224,7 +225,7 @@ func main() {
go func() { go func() {
<-done <-done
if err := server.Shutdown(context.Background()); err != nil { if err := server.Shutdown(context.Background()); err != nil {
logger.Error(ctx, "shutdown serverError", "err", err) logger.Error(ctx, "ShutdownServerError", "err", err)
} }
}() }()
@ -233,10 +234,10 @@ func main() {
if err != nil { if err != nil {
if err == http.ErrServerClosed { if err == http.ErrServerClosed {
// the service receives the shutdown signal normally and then closes // the service receives the shutdown signal normally and then closes
logger.Info(ctx, "server closed under request") logger.Info(ctx, "Server closed under request")
} else { } else {
// abnormal shutdown of service // abnormal shutdown of service
logger.Error(ctx, "server closed unexpected", "err", err) logger.Error(ctx, "Server closed unexpected", "err", err)
} }
} }
} }

View File

@ -1,47 +0,0 @@
package middleware
import (
"net"
"net/http"
"net/http/httputil"
"os"
"runtime/debug"
"strings"
"modelRT/logger"
"github.com/gin-gonic/gin"
)
// GinPanicRecovery define func of customizing gin recover output
func GinPanicRecovery() gin.HandlerFunc {
return func(c *gin.Context) {
defer func() {
if err := recover(); err != nil {
// Check for a broken connection, as it is not really a
// condition that warrants a panic stack trace.
var brokenPipe bool
if ne, ok := err.(*net.OpError); ok {
if se, ok := ne.Err.(*os.SyscallError); ok {
if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") {
brokenPipe = true
}
}
}
httpRequest, _ := httputil.DumpRequest(c.Request, false)
if brokenPipe {
logger.Error(c, "http request broken pipe", "path", c.Request.URL.Path, "error", err, "request", string(httpRequest))
// If the connection is dead, we can't write a status to it.
c.Error(err.(error))
c.Abort()
return
}
logger.Error(c, "http_request_panic", "path", c.Request.URL.Path, "error", err, "request", string(httpRequest), "stack", string(debug.Stack()))
c.AbortWithError(http.StatusInternalServerError, err.(error))
}
}()
c.Next()
}
}

View File

@ -19,7 +19,7 @@ type AttrModelInterface interface {
type LongAttrInfo struct { type LongAttrInfo struct {
AttrGroupName string AttrGroupName string
AttrKey string AttrKey string
AttrValue any AttrValue interface{}
GridInfo *orm.Grid GridInfo *orm.Grid
ZoneInfo *orm.Zone ZoneInfo *orm.Zone
StationInfo *orm.Station StationInfo *orm.Station
@ -52,7 +52,7 @@ func (l *LongAttrInfo) IsLocal() bool {
} }
// GetAttrValue define return the attribute value // GetAttrValue define return the attribute value
func (l *LongAttrInfo) GetAttrValue() any { func (l *LongAttrInfo) GetAttrValue() interface{} {
return l.AttrValue return l.AttrValue
} }
@ -60,7 +60,7 @@ func (l *LongAttrInfo) GetAttrValue() any {
type ShortAttrInfo struct { type ShortAttrInfo struct {
AttrGroupName string AttrGroupName string
AttrKey string AttrKey string
AttrValue any AttrValue interface{}
ComponentInfo *orm.Component ComponentInfo *orm.Component
} }
@ -90,6 +90,6 @@ func (s *ShortAttrInfo) IsLocal() bool {
} }
// GetAttrValue define return the attribute value // GetAttrValue define return the attribute value
func (l *ShortAttrInfo) GetAttrValue() any { func (l *ShortAttrInfo) GetAttrValue() interface{} {
return l.AttrValue return l.AttrValue
} }

View File

@ -1,119 +0,0 @@
// Package model define model struct of model runtime service
package model
import "modelRT/orm"
// IndentityTokenModelInterface define basic identity token model type interface
type IndentityTokenModelInterface interface {
GetComponentInfo() *orm.Component
GetMeasurementInfo() *orm.Measurement
GetGridTag() string // token1
GetZoneTag() string // token2
GetStationTag() string // token3
GetNamespacePath() string // token4(COMPONENT TABLE NSPATH)
GetComponentTag() string // token5(COMPONENT TABLE TAG)
GetAttributeGroup() string // token6(component attribute group information)
GetMeasurementTag() string // token7(measurement value or attribute field)
IsLocal() bool
}
// LongIdentityTokenModel define struct to long identity token info
type LongIdentityTokenModel struct {
ComponentInfo *orm.Component
MeasurementInfo *orm.Measurement
GridTag string
ZoneTag string
StationTag string
NamespacePath string
ComponentTag string
AttributeGroup string
MeasurementTag string
}
// GetComponentInfo define return the component information in the long identity token
func (l *LongIdentityTokenModel) GetComponentInfo() *orm.Component {
return l.ComponentInfo
}
// GetMeasurementInfo define return the measurement information in the long identity token
func (l *LongIdentityTokenModel) GetMeasurementInfo() *orm.Measurement {
return l.MeasurementInfo
}
// GetGridTag define function to return the grid tag information in the long identity token
func (l *LongIdentityTokenModel) GetGridTag() string { return l.GridTag }
// GetZoneTag define function to return the zone tag information in the long identity token
func (l *LongIdentityTokenModel) GetZoneTag() string { return l.ZoneTag }
// GetStationTag define function to return the station tag information in the long identity token
func (l *LongIdentityTokenModel) GetStationTag() string { return l.StationTag }
// GetNamespacePath define function to return the namespace path information in the long identity token
func (l *LongIdentityTokenModel) GetNamespacePath() string { return l.NamespacePath }
// GetComponentTag define function to return the component tag information in the long identity token
func (l *LongIdentityTokenModel) GetComponentTag() string { return l.ComponentTag }
// GetAttributeGroup define function to return the attribute group information in the long identity token
func (l *LongIdentityTokenModel) GetAttributeGroup() string { return l.AttributeGroup }
// GetMeasurementTag define function to return the measurement tag information in the long identity token
func (l *LongIdentityTokenModel) GetMeasurementTag() string {
return l.MeasurementTag
}
// IsLocal define return the is_local information in the long identity token
func (l *LongIdentityTokenModel) IsLocal() bool {
return false
}
// ShortIdentityTokenModel define struct to short identity token info
type ShortIdentityTokenModel struct {
ComponentInfo *orm.Component
MeasurementInfo *orm.Measurement
GridTag string // token1
ZoneTag string // token2
StationTag string // token3
NamespacePath string // token4
MeasurementTag string // token7
}
// GetComponentInfo define return the component information in the short identity token
func (s *ShortIdentityTokenModel) GetComponentInfo() *orm.Component {
return s.ComponentInfo
}
// GetMeasurementInfo define return the measurement information in the long identity token
func (s *ShortIdentityTokenModel) GetMeasurementInfo() *orm.Measurement {
return s.MeasurementInfo
}
// GetGridTag define function to return the grid tag information in the short identity token
func (s *ShortIdentityTokenModel) GetGridTag() string { return "" }
// GetZoneTag define function to return the zone tag information in the short identity token
func (s *ShortIdentityTokenModel) GetZoneTag() string { return "" }
// GetStationTag define function to return the station tag information in the short identity token
func (s *ShortIdentityTokenModel) GetStationTag() string { return "" }
// GetNamespacePath define function to return the namespace path information in the short identity token
func (s *ShortIdentityTokenModel) GetNamespacePath() string { return s.NamespacePath }
// GetComponentTag define function to return the component tag information in the short identity token
func (s *ShortIdentityTokenModel) GetComponentTag() string { return "" }
// GetAttributeGroup define function to return the attribute group information in the short identity token
func (s *ShortIdentityTokenModel) GetAttributeGroup() string { return "" }
// GetMeasurementTag define function to return the measurement tag information in the short identity token
func (s *ShortIdentityTokenModel) GetMeasurementTag() string {
return ""
}
// IsLocal define return the is_local information in the short identity token
func (s *ShortIdentityTokenModel) IsLocal() bool {
return true
}

View File

@ -17,7 +17,7 @@ type MeasurementDataSource struct {
} }
// IOAddress define interface of IO address // IOAddress define interface of IO address
type IOAddress any type IOAddress interface{}
// CL3611Address define CL3611 protol struct // CL3611Address define CL3611 protol struct
type CL3611Address struct { type CL3611Address struct {
@ -174,98 +174,3 @@ func (m MeasurementDataSource) GetIOAddress() (IOAddress, error) {
return nil, constants.ErrUnknownDataType return nil, constants.ErrUnknownDataType
} }
} }
// GenerateMeasureIdentifier define func of generate measurement identifier
func GenerateMeasureIdentifier(source map[string]any) (string, error) {
regTypeVal, ok := source["type"]
if !ok {
return "", fmt.Errorf("can not find type in datasource field")
}
var regType int
switch v := regTypeVal.(type) {
case int:
regType = v
case float32:
if v != float32(int(v)) {
return "", fmt.Errorf("invalid type format in datasource field, expected integer value, got float: %f", v)
}
regType = int(v)
case float64:
if v != float64(int(v)) {
return "", fmt.Errorf("invalid type format in datasource field, expected integer value, got float: %f", v)
}
regType = int(v)
default:
return "", fmt.Errorf("invalid type format in datasource field,%T", v)
}
ioAddrVal, ok := source["io_address"]
if !ok {
return "", fmt.Errorf("can not find io_address from datasource field")
}
ioAddress, ok := ioAddrVal.(map[string]any)
if !ok {
return "", fmt.Errorf("io_address field is not a valid map")
}
switch regType {
case constants.DataSourceTypeCL3611:
station, ok := ioAddress["station"].(string)
if !ok {
return "", fmt.Errorf("CL3611:invalid or missing station field")
}
device, ok := ioAddress["device"].(string)
if !ok {
return "", fmt.Errorf("CL3611:invalid or missing device field")
}
// 提取 channel (string)
channel, ok := ioAddress["channel"].(string)
if !ok {
return "", fmt.Errorf("CL3611:invalid or missing channel field")
}
return concatCL361WithPlus(station, device, channel), nil
case constants.DataSourceTypePower104:
station, ok := ioAddress["station"].(string)
if !ok {
return "", fmt.Errorf("Power104:invalid or missing station field")
}
packetVal, ok := ioAddress["packet"]
if !ok {
return "", fmt.Errorf("Power104: missing packet field")
}
var packet int
switch v := packetVal.(type) {
case int:
packet = v
default:
return "", fmt.Errorf("Power104:invalid packet format")
}
offsetVal, ok := ioAddress["offset"]
if !ok {
return "", fmt.Errorf("Power104:missing offset field")
}
var offset int
switch v := offsetVal.(type) {
case int:
offset = v
default:
return "", fmt.Errorf("Power104:invalid offset format")
}
return concatP104WithPlus(station, packet, offset), nil
default:
return "", fmt.Errorf("unsupport regulation type %d into datasource field", regType)
}
}
func concatP104WithPlus(station string, packet int, offset int) string {
packetStr := strconv.Itoa(packet)
offsetStr := strconv.Itoa(offset)
return station + ":" + packetStr + ":" + offsetStr
}
func concatCL361WithPlus(station, device, channel string) string {
return station + ":" + device + ":" + "phasor" + ":" + channel
}

View File

@ -13,7 +13,6 @@ import (
"github.com/RediSearch/redisearch-go/v2/redisearch" "github.com/RediSearch/redisearch-go/v2/redisearch"
redigo "github.com/gomodule/redigo/redis" redigo "github.com/gomodule/redigo/redis"
"github.com/redis/go-redis/v9"
) )
var ac *redisearch.Autocompleter var ac *redisearch.Autocompleter
@ -28,594 +27,187 @@ func RedisSearchRecommend(ctx context.Context, input string) ([]string, bool, er
rdb := diagram.GetRedisClientInstance() rdb := diagram.GetRedisClientInstance()
if input == "" { if input == "" {
// return all grid tagname // 返回所有 grid 名
return getKeyBySpecificsLevel(ctx, rdb, 1, input) return getAllGridKeys(ctx, constants.RedisAllGridSetKey)
} }
inputSlice := strings.Split(input, ".") inputSlice := strings.Split(input, ".")
inputSliceLen := len(inputSlice) inputSliceLen := len(inputSlice)
originInputLen := len(inputSlice)
switch inputSliceLen { switch inputSliceLen {
case 1: case 1:
// grid tagname search // TODO 优化成NewSet的形式
gridSearchInput := inputSlice[0] gridExist, err := rdb.SIsMember(ctx, constants.RedisAllGridSetKey, input).Result()
gridExists, err := rdb.SIsMember(ctx, constants.RedisAllGridSetKey, gridSearchInput).Result()
if err != nil { if err != nil {
logger.Error(ctx, "check grid key exist failed ", "grid_key", input, "error", err) logger.Error(ctx, "check grid key exist failed ", "grid_key", input, "error", err)
return []string{}, false, err return []string{}, false, err
} }
if gridExists { searchInput := input
return []string{"."}, false, nil inputLen := inputSliceLen
} for inputLen != 0 && !gridExist {
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
// start grid tagname fuzzy search Num: math.MaxInt16,
recommends, err := runFuzzySearch(ctx, gridSearchInput, "", inputSliceLen) Fuzzy: true,
WithScores: false,
WithPayloads: false,
})
if err != nil { if err != nil {
logger.Error(ctx, "fuzzy search failed for level 1", "search_input", gridSearchInput, "error", err) logger.Error(ctx, "query info by fuzzy failed", "query_key", input, "error", err)
return []string{}, false, err return []string{}, false, err
} }
if len(recommends) > 0 { if len(results) == 0 {
// TODO 构建 for 循环返回所有可能的补全
searchInput = searchInput[:len(searchInput)-1]
inputLen = len(searchInput)
continue
}
var recommends []string
for _, result := range results {
termSlice := strings.Split(result.Term, ".")
if len(termSlice) <= originInputLen {
recommends = append(recommends, result.Term)
}
}
// 返回模糊查询结果
return recommends, true, nil return recommends, true, nil
} }
return []string{}, true, nil
case 2:
return handleLevelFuzzySearch(ctx, rdb, 2, constants.RedisAllZoneSetKey, inputSlice)
case 3:
return handleLevelFuzzySearch(ctx, rdb, 3, constants.RedisAllStationSetKey, inputSlice)
case 4:
return handleLevelFuzzySearch(ctx, rdb, 4, constants.RedisAllCompNSPathSetKey, inputSlice)
case 5:
return handleLevelFuzzySearch(ctx, rdb, 5, constants.RedisAllCompTagSetKey, inputSlice)
case 6:
return handleLevelFuzzySearch(ctx, rdb, 6, constants.RedisAllConfigSetKey, inputSlice)
case 7:
return handleLevelFuzzySearch(ctx, rdb, 7, constants.RedisAllMeasTagSetKey, inputSlice)
// 处理 input 不为空、不含有.并且 input 是一个完整的 grid key 的情况
if strings.HasSuffix(input, ".") == false {
recommend := input + "."
return []string{recommend}, false, nil
}
default: default:
logger.Error(ctx, "unsupport length of search input", "input_len", inputSliceLen) lastInput := inputSlice[inputSliceLen-1]
return []string{}, false, nil // 判断 queryKey 是否是空值空值则返回上一级别下的所有key
} if lastInput == "" {
setKey := getCombinedConstantsKeyByLength(inputSlice[inputSliceLen-2], inputSliceLen)
targetSet := diagram.NewRedisSet(ctx, setKey, 10, true)
keys, err := targetSet.SMembers(setKey)
if err != nil {
logger.Error(ctx, "get all recommend key by setKey failed", "set_key", setKey, "error", err)
return []string{}, false, fmt.Errorf("get all recommend key by setKey failed,%w", err)
} }
func getKeyBySpecificsLevel(ctx context.Context, rdb *redis.Client, inputLen int, input string) ([]string, bool, error) { var results []string
queryKey := getSpecificKeyByLength(inputLen, input) for _, key := range keys {
results, err := rdb.SMembers(ctx, queryKey).Result() result := input + key
if err != nil { results = append(results, result)
return []string{}, false, fmt.Errorf("get all keys failed, error: %w", err)
} }
return results, false, nil return results, false, nil
} }
func combineQueryResultByInput(inputSliceLen int, inputSlice []string, queryResults []string) []string { setKey := getCombinedConstantsKeyByLength(inputSlice[inputSliceLen-2], inputSliceLen)
prefixs := make([]string, 0, len(inputSlice)) targetSet := diagram.NewRedisSet(ctx, setKey, 10, true)
recommandResults := make([]string, 0, len(queryResults)) exist, err := targetSet.SIsMember(setKey, lastInput)
switch inputSliceLen { if err != nil {
case 2: logger.Error(ctx, "check keys exist failed", "set_key", setKey, "query_key", lastInput, "error", err)
prefixs = []string{inputSlice[0]} return []string{}, false, fmt.Errorf("check keys failed,%w", err)
case 3:
prefixs = inputSlice[0:2]
case 4:
prefixs = inputSlice[0:3]
case 5:
prefixs = inputSlice[0:4]
case 6:
prefixs = inputSlice[0:5]
case 7:
prefixs = inputSlice[0:6]
default:
return []string{}
} }
for _, queryResult := range queryResults { searchInput := input
combineStrs := make([]string, 0, len(inputSlice)) inputLen := len(searchInput)
combineStrs = append(combineStrs, prefixs...) for inputLen != 0 && !exist {
combineStrs = append(combineStrs, queryResult) logger.Info(ctx, "use fuzzy query", "input", input)
recommandResult := strings.Join(combineStrs, ".") results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
recommandResults = append(recommandResults, recommandResult) Num: math.MaxInt16,
Fuzzy: true,
WithScores: false,
WithPayloads: false,
})
if err != nil {
logger.Error(ctx, "query info by fuzzy failed", "query_key", input, "error", err)
return []string{}, false, err
} }
return recommandResults if len(results) == 0 {
// TODO 构建 for 循环返回所有可能的补全
searchInput = input[:inputLen-1]
inputLen = len(searchInput)
continue
} }
func getSpecificKeyByLength(inputLen int, input string) string { var terms []string
for _, result := range results {
terms = append(terms, result.Term)
}
// 返回模糊查询结果
return terms, true, nil
}
return []string{input}, false, nil
}
return []string{}, false, nil
}
func getAllGridKeys(ctx context.Context, setKey string) ([]string, bool, error) {
// 从redis set 中获取所有的 grid key
gridSets := diagram.NewRedisSet(ctx, setKey, 10, true)
keys, err := gridSets.SMembers("grid_keys")
if err != nil {
return []string{}, false, fmt.Errorf("get all root keys failed, error: %v", err)
}
return keys, false, nil
}
func getSpecificZoneKeys(ctx context.Context, input string) ([]string, bool, error) {
setKey := fmt.Sprintf(constants.RedisSpecGridZoneSetKey, input)
// TODO 从redis set 中获取指定 grid 下的 zone key
zoneSets := diagram.NewRedisSet(ctx, setKey, 10, true)
keys, err := zoneSets.SMembers(setKey)
if err != nil {
return []string{}, false, fmt.Errorf("get all root keys failed, error: %v", err)
}
var results []string
for _, key := range keys {
result := input + "." + key
results = append(results, result)
}
return results, false, nil
}
func getConstantsKeyByLength(inputLen int) string {
switch inputLen { switch inputLen {
case 1: case 1:
return constants.RedisAllGridSetKey return constants.RedisAllGridSetKey
case 2: case 2:
return fmt.Sprintf(constants.RedisSpecGridZoneSetKey, input) return constants.RedisAllZoneSetKey
case 3: case 3:
return fmt.Sprintf(constants.RedisSpecZoneStationSetKey, input) return constants.RedisAllStationSetKey
case 4: case 4:
return fmt.Sprintf(constants.RedisSpecStationCompNSPATHSetKey, input) return constants.RedisAllComponentSetKey
case 5:
return fmt.Sprintf(constants.RedisSpecStationCompTagSetKey, input)
case 6:
return constants.RedisAllConfigSetKey
case 7:
return fmt.Sprintf(constants.RedisSpecCompTagMeasSetKey, input)
default: default:
return constants.RedisAllGridSetKey return constants.RedisAllGridSetKey
} }
} }
// handleLevelFuzzySearch define func to process recommendation logic for specific levels(level >= 2) func getCombinedConstantsKeyByLength(key string, inputLen int) string {
func handleLevelFuzzySearch(ctx context.Context, rdb *redis.Client, hierarchy int, keySetKey string, inputSlice []string) ([]string, bool, error) { switch inputLen {
searchInputIndex := hierarchy - 1
searchInput := inputSlice[searchInputIndex]
searchPrefix := strings.Join(inputSlice[0:searchInputIndex], ".")
if searchInput == "" {
var specificalKey string
specificalKeyIndex := searchInputIndex - 1
if specificalKeyIndex >= 0 {
specificalKey = inputSlice[specificalKeyIndex]
}
allResults, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, hierarchy, specificalKey)
if err != nil {
return []string{}, false, err
}
recommandResults := combineQueryResultByInput(hierarchy, inputSlice, allResults)
return recommandResults, isFuzzy, nil
}
keyExists, err := rdb.SIsMember(ctx, keySetKey, searchInput).Result()
if err != nil {
logger.Error(ctx, "check key exist failed ", "key", searchInput, "error", err)
return []string{}, false, err
}
if keyExists {
if hierarchy == constants.MaxIdentifyHierarchy {
return []string{""}, false, nil
}
return []string{"."}, false, nil
}
// start redis fuzzy search
recommends, err := runFuzzySearch(ctx, searchInput, searchPrefix, hierarchy)
if err != nil {
logger.Error(ctx, "fuzzy search failed by hierarchy", "hierarchy", hierarchy, "search_input", searchInput, "error", err)
return []string{}, false, err
}
if len(recommends) == 0 {
logger.Error(ctx, "fuzzy search without result", "hierarchy", hierarchy, "search_input", searchInput, "error", err)
return []string{}, true, nil
}
return recommends, true, nil
}
// runFuzzySearch define func to process redis fuzzy search
func runFuzzySearch(ctx context.Context, searchInput string, searchPrefix string, inputSliceLen int) ([]string, error) {
searchInputLen := len(searchInput)
for searchInputLen != 0 {
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
Num: math.MaxInt16,
Fuzzy: true,
WithScores: false,
WithPayloads: false,
})
if err != nil {
logger.Error(ctx, "query key by redis fuzzy search failed", "query_key", searchInput, "error", err)
return nil, fmt.Errorf("redisearch suggest failed: %w", err)
}
if len(results) == 0 {
// 如果没有结果,退一步(删除最后一个字节)并继续循环
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
searchInput = searchInput[:len(searchInput)-1]
searchInputLen = len(searchInput)
continue
}
var recommends []string
for _, result := range results {
term := result.Term
var termSliceLen int
var termPrefix string
lastDotIndex := strings.LastIndex(term, ".")
if lastDotIndex == -1 {
termPrefix = ""
} else {
termPrefix = term[:lastDotIndex]
}
if result.Term == "" {
termSliceLen = 1
} else {
termSliceLen = strings.Count(result.Term, ".") + 1
}
if termSliceLen == inputSliceLen && termPrefix == searchPrefix {
recommends = append(recommends, result.Term)
}
}
return recommends, nil
}
return []string{}, nil
}
// RedisSearchRecommend1 define func of redis search by input string and return recommend results
func RedisSearchRecommend1(ctx context.Context, input string) ([]string, bool, error) {
rdb := diagram.GetRedisClientInstance()
if input == "" {
// 返回所有 grid 名
return getKeyBySpecificsLevel(ctx, rdb, 1, input)
}
inputSlice := strings.Split(input, ".")
inputSliceLen := len(inputSlice)
switch inputSliceLen {
case 1:
// grid tagname search
gridSearchInput := inputSlice[0]
gridExists, err := rdb.SIsMember(ctx, constants.RedisAllGridSetKey, gridSearchInput).Result()
if err != nil {
logger.Error(ctx, "check grid key exist failed ", "grid_key", input, "error", err)
return []string{}, false, err
}
if gridExists {
return []string{"."}, false, err
}
// start grid tagname fuzzy search
searchInput := gridSearchInput
searchInputLen := len(searchInput)
for searchInputLen != 0 && !gridExists {
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
Num: math.MaxInt16,
Fuzzy: true,
WithScores: false,
WithPayloads: false,
})
if err != nil {
logger.Error(ctx, "query grid key by redis fuzzy search failed", "query_key", searchInput, "error", err)
return []string{}, false, err
}
if len(results) == 0 {
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
searchInput = searchInput[:len(searchInput)-1]
searchInputLen = len(searchInput)
continue
}
var recommends []string
for _, result := range results {
termSlice := strings.Split(result.Term, ".")
if len(termSlice) <= inputSliceLen {
recommends = append(recommends, result.Term)
}
}
// return fuzzy search results
return recommends, true, nil
}
case 2: case 2:
// zone tagname search return fmt.Sprintf(constants.RedisSpecGridZoneSetKey, key)
zoneSearchInput := inputSlice[1]
if zoneSearchInput == "" {
specificalGrid := inputSlice[0]
allZones, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, inputSliceLen, specificalGrid)
recommandResults := combineQueryResultByInput(inputSliceLen, inputSlice, allZones)
return recommandResults, isFuzzy, err
}
zoneExists, err := rdb.SIsMember(ctx, constants.RedisAllZoneSetKey, zoneSearchInput).Result()
if err != nil {
logger.Error(ctx, "check zone key exist failed ", "zone_key", zoneSearchInput, "error", err)
return []string{}, false, err
}
if zoneExists {
return []string{"."}, false, err
}
// start zone tagname fuzzy search
searchInput := zoneSearchInput
searchInputLen := len(searchInput)
for searchInputLen != 0 && !zoneExists {
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
Num: math.MaxInt16,
Fuzzy: true,
WithScores: false,
WithPayloads: false,
})
if err != nil {
logger.Error(ctx, "query zone key by redis fuzzy search failed", "query_key", searchInput, "error", err)
return []string{}, false, err
}
if len(results) == 0 {
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
searchInput = searchInput[:len(searchInput)-1]
searchInputLen = len(searchInput)
continue
}
var recommends []string
for _, result := range results {
termSlice := strings.Split(result.Term, ".")
if len(termSlice) <= inputSliceLen {
recommends = append(recommends, result.Term)
}
}
// return fuzzy search results
return combineQueryResultByInput(inputSliceLen, inputSlice, recommends), true, nil
}
case 3: case 3:
// station tagname search return fmt.Sprintf(constants.RedisSpecZoneStationSetKey, key)
stationSearchInput := inputSlice[2]
if stationSearchInput == "" {
specificalZone := inputSlice[1]
allStations, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, inputSliceLen, specificalZone)
recommandResults := combineQueryResultByInput(inputSliceLen, inputSlice, allStations)
return recommandResults, isFuzzy, err
}
stationExists, err := rdb.SIsMember(ctx, constants.RedisAllStationSetKey, stationSearchInput).Result()
if err != nil {
logger.Error(ctx, "check station key exist failed ", "station_key", stationSearchInput, "error", err)
return []string{}, false, err
}
if stationExists {
return []string{"."}, false, err
}
// start station tagname fuzzy search
searchInput := stationSearchInput
searchInputLen := len(searchInput)
for searchInputLen != 0 && !stationExists {
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
Num: math.MaxInt16,
Fuzzy: true,
WithScores: false,
WithPayloads: false,
})
if err != nil {
logger.Error(ctx, "query station key by redis fuzzy search failed", "query_key", searchInput, "error", err)
return []string{}, false, err
}
if len(results) == 0 {
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
searchInput = searchInput[:len(searchInput)-1]
searchInputLen = len(searchInput)
continue
}
var recommends []string
for _, result := range results {
termSlice := strings.Split(result.Term, ".")
if len(termSlice) <= inputSliceLen {
recommends = append(recommends, result.Term)
}
}
// return fuzzy search results
return combineQueryResultByInput(inputSliceLen, inputSlice, recommends), true, nil
}
case 4: case 4:
// component nspath search return fmt.Sprintf(constants.RedisSpecStationComponentSetKey, key)
compNSPSearchInput := inputSlice[3]
if compNSPSearchInput == "" {
specificalStation := inputSlice[2]
allCompNSPaths, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, inputSliceLen, specificalStation)
recommandResults := combineQueryResultByInput(inputSliceLen, inputSlice, allCompNSPaths)
return recommandResults, isFuzzy, err
}
compNSPathExists, err := rdb.SIsMember(ctx, constants.RedisAllCompNSPathSetKey, compNSPSearchInput).Result()
if err != nil {
logger.Error(ctx, "check component nspath key exist failed ", "component_nspath_key", compNSPSearchInput, "error", err)
return []string{}, false, err
}
if compNSPathExists {
return []string{"."}, false, err
}
// start grid fuzzy search
searchInput := compNSPSearchInput
searchInputLen := len(searchInput)
for searchInputLen != 0 && !compNSPathExists {
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
Num: math.MaxInt16,
Fuzzy: true,
WithScores: false,
WithPayloads: false,
})
if err != nil {
logger.Error(ctx, "query component nspath key by redis fuzzy search failed", "query_key", searchInput, "error", err)
return []string{}, false, err
}
if len(results) == 0 {
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
searchInput = searchInput[:len(searchInput)-1]
searchInputLen = len(searchInput)
continue
}
var recommends []string
for _, result := range results {
termSlice := strings.Split(result.Term, ".")
if len(termSlice) <= inputSliceLen {
recommends = append(recommends, result.Term)
}
}
// return fuzzy search results
return combineQueryResultByInput(inputSliceLen, inputSlice, recommends), true, nil
}
case 5:
// component tag search
compTagSearchInput := inputSlice[4]
if compTagSearchInput == "" {
// TODO 优化考虑是否使用 station 作为 key 的一部分
specificalStation := inputSlice[2]
allCompNSPaths, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, inputSliceLen, specificalStation)
recommandResults := combineQueryResultByInput(inputSliceLen, inputSlice, allCompNSPaths)
return recommandResults, isFuzzy, err
}
compTagExists, err := rdb.SIsMember(ctx, constants.RedisAllCompTagSetKey, compTagSearchInput).Result()
if err != nil {
logger.Error(ctx, "check component tag key exist failed ", "component_tag_key", compTagSearchInput, "error", err)
return []string{}, false, err
}
if compTagExists {
return []string{"."}, false, err
}
// start grid fuzzy search
searchInput := compTagSearchInput
searchInputLen := len(searchInput)
for searchInputLen != 0 && !compTagExists {
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
Num: math.MaxInt16,
Fuzzy: true,
WithScores: false,
WithPayloads: false,
})
if err != nil {
logger.Error(ctx, "query component tag key by redis fuzzy search failed", "query_key", searchInput, "error", err)
return []string{}, false, err
}
if len(results) == 0 {
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
searchInput = searchInput[:len(searchInput)-1]
searchInputLen = len(searchInput)
continue
}
var recommends []string
for _, result := range results {
termSlice := strings.Split(result.Term, ".")
if len(termSlice) <= inputSliceLen {
recommends = append(recommends, result.Term)
}
}
// return fuzzy search results
return combineQueryResultByInput(inputSliceLen, inputSlice, recommends), true, nil
}
case 6:
// configuration search
// TODO 优化
configSearchInput := inputSlice[5]
if configSearchInput == "" {
allCompNSPaths, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, inputSliceLen, "")
recommandResults := combineQueryResultByInput(inputSliceLen, inputSlice, allCompNSPaths)
return recommandResults, isFuzzy, err
}
configExists, err := rdb.SIsMember(ctx, constants.RedisAllConfigSetKey, configSearchInput).Result()
if err != nil {
logger.Error(ctx, "check config key exist failed ", "config_key", configSearchInput, "error", err)
return []string{}, false, err
}
if configExists {
return []string{"."}, false, err
}
// start grid fuzzy search
searchInput := configSearchInput
searchInputLen := len(searchInput)
for searchInputLen != 0 && !configExists {
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
Num: math.MaxInt16,
Fuzzy: true,
WithScores: false,
WithPayloads: false,
})
if err != nil {
logger.Error(ctx, "query config key by redis fuzzy search failed", "query_key", searchInput, "error", err)
return []string{}, false, err
}
if len(results) == 0 {
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
searchInput = searchInput[:len(searchInput)-1]
searchInputLen = len(searchInput)
continue
}
var recommends []string
for _, result := range results {
termSlice := strings.Split(result.Term, ".")
if len(termSlice) <= inputSliceLen {
recommends = append(recommends, result.Term)
}
}
// return fuzzy search results
return combineQueryResultByInput(inputSliceLen, inputSlice, recommends), true, nil
}
case 7:
// measurement search
measSearchInput := inputSlice[6]
if measSearchInput == "" {
// use compoent tag for redis unique key prefix
specificalCompTag := inputSlice[4]
allMeasTags, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, inputSliceLen, specificalCompTag)
recommandResults := combineQueryResultByInput(inputSliceLen, inputSlice, allMeasTags)
return recommandResults, isFuzzy, err
}
measTagExists, err := rdb.SIsMember(ctx, constants.RedisAllMeasTagSetKey, measSearchInput).Result()
if err != nil {
logger.Error(ctx, "check component tag key exist failed ", "component_tag_key", measSearchInput, "error", err)
return []string{}, false, err
}
if measTagExists {
return []string{"."}, false, err
}
// start measurement tag fuzzy search
searchInput := measSearchInput
searchInputLen := len(searchInput)
for searchInputLen != 0 && !measTagExists {
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
Num: math.MaxInt16,
Fuzzy: true,
WithScores: false,
WithPayloads: false,
})
if err != nil {
logger.Error(ctx, "query measurement tag key by redis fuzzy search failed", "query_key", searchInput, "error", err)
return []string{}, false, err
}
if len(results) == 0 {
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
searchInput = searchInput[:len(searchInput)-1]
searchInputLen = len(searchInput)
continue
}
var recommends []string
for _, result := range results {
termSlice := strings.Split(result.Term, ".")
if len(termSlice) <= inputSliceLen {
recommends = append(recommends, result.Term)
}
}
// return fuzzy search results
return combineQueryResultByInput(inputSliceLen, inputSlice, recommends), true, nil
}
default: default:
logger.Error(ctx, "unsupport length of search input", "input_len", inputSliceLen) return constants.RedisAllGridSetKey
return []string{}, false, nil
} }
return []string{}, false, nil }
// GetLongestCommonPrefixLength define func of get longest common prefix length between two strings
func GetLongestCommonPrefixLength(input string, recommendResult string) int {
if input == "" {
return 0
}
minLen := min(len(input), len(recommendResult))
for i := range minLen {
if input[i] != recommendResult[i] {
return i
}
}
return minLen
} }

View File

@ -23,25 +23,11 @@ type TopologicUUIDCreateInfo struct {
Comment string `json:"comment"` Comment string `json:"comment"`
} }
// ComponentCreateInfo defines circuit diagram component create info // ComponentCreateInfo defines circuit diagram component create index info
type ComponentCreateInfo struct { type ComponentCreateInfo struct {
UUID string `json:"uuid"` UUID string `json:"uuid"`
Name string `json:"name"` Name string `json:"name"`
Context map[string]any `json:"context"` Context string `json:"context"`
GridID int64 `json:"grid_id"`
ZoneID int64 `json:"zone_id"`
StationID int64 `json:"station_id"`
PageID int64 `json:"page_id"`
Tag string `json:"tag"`
Params string `json:"params"`
Op int `json:"op"`
}
// MeasurementCreateInfo defines circuit diagram measurement create info
type MeasurementCreateInfo struct {
UUID string `json:"uuid"`
Name string `json:"name"`
Context map[string]any `json:"context"`
GridID int64 `json:"grid_id"` GridID int64 `json:"grid_id"`
ZoneID int64 `json:"zone_id"` ZoneID int64 `json:"zone_id"`
StationID int64 `json:"station_id"` StationID int64 `json:"station_id"`

View File

@ -1,22 +0,0 @@
// Package network define struct of network operation
package network
// MeasurementLinkRequest defines the request payload for process an measurement link
type MeasurementLinkRequest struct {
// required: true
MeasurementID int64 `json:"measurement_id" example:"1001"`
// required: true
// enum: [add, del]
Action string `json:"action" example:"add"`
}
// DiagramNodeLinkRequest defines the request payload for process an diagram node link
type DiagramNodeLinkRequest struct {
// required: true
NodeID int64 `json:"node_id" example:"1001"`
// required: true
NodeLevel int `json:"node_level" example:"1"`
// required: true
// enum: [add, del]
Action string `json:"action" example:"add"`
}

View File

@ -39,7 +39,7 @@ type ComponentUpdateInfo struct {
ID int64 `json:"id"` ID int64 `json:"id"`
UUID string `json:"uuid"` UUID string `json:"uuid"`
Name string `json:"name"` Name string `json:"name"`
Context map[string]any `json:"context"` Context string `json:"context"`
GridID int64 `json:"grid_id"` GridID int64 `json:"grid_id"`
ZoneID int64 `json:"zone_id"` ZoneID int64 `json:"zone_id"`
StationID int64 `json:"station_id"` StationID int64 `json:"station_id"`

View File

@ -9,5 +9,5 @@ type MeasurementGetRequest struct {
// MeasurementRecommendRequest defines the request payload for an measurement recommend // MeasurementRecommendRequest defines the request payload for an measurement recommend
type MeasurementRecommendRequest struct { type MeasurementRecommendRequest struct {
Input string `form:"input,omitempty" example:"grid1"` Input string `json:"input" example:"trans"`
} }

View File

@ -1,26 +0,0 @@
// Package network define struct of network operation
package network
// RealTimeDataReceiveRequest defines request params of real time data receive api
type RealTimeDataReceiveRequest struct {
PayLoad RealTimeDataReceivePayload `json:"payload"`
}
// RealTimeDataReceivePayload defines request payload of real time data receive api
type RealTimeDataReceivePayload struct {
ComponentUUID string `json:"component_uuid"`
Point string `json:"point"`
Values []RealTimeDataPoint `json:"values"`
}
// RealTimeDataPoint define struct of real time data point
type RealTimeDataPoint struct {
Time int64 `json:"time" example:"1678886400"`
Value float64 `json:"value" example:"123.1"`
}
// RealTimeDataPayload define struct of real time data payload
type RealTimeDataPayload struct {
// TODO 增加example tag
RealTimeDataPoints []RealTimeDataPoint `json:"sub_pos" swaggertype:"object"`
}

View File

@ -1,48 +1,20 @@
// Package network define struct of network operation // Package network define struct of network operation
package network package network
// RealTimeQueryRequest define struct of real time data query request // RealTimeDataReceiveRequest defines request params of real time data receive api
type RealTimeQueryRequest struct { type RealTimeDataReceiveRequest struct {
// required: true PayLoad RealTimeDataReceivePayload `json:"payload"`
// enum: [start, stop]
Action string `json:"action" example:"start" description:"请求的操作,例如 start/stop"`
// TODO 增加monitorID的example值说明
ClientID string `json:"client_id" example:"xxxx" description:"用于标识不同client的监控请求ID"`
// required: true
Measurements []RealTimeMeasurementItem `json:"measurements" description:"定义不同的数据采集策略和目标"`
} }
// RealTimeSubRequest define struct of real time data subscription request // RealTimeDataReceivePayload defines request payload of real time data receive api
type RealTimeSubRequest struct { type RealTimeDataReceivePayload struct {
// required: true ComponentUUID string `json:"component_uuid"`
// enum: [start, stop] Point string `json:"point"`
Action string `json:"action" example:"start" description:"请求的操作,例如 start/stop"` Values []RealTimeDataReceiveParam `json:"values"`
ClientID string `json:"client_id" example:"5d72f2d9-e33a-4f1b-9c76-88a44b9a953e" description:"用于标识不同client的监控请求ID"`
// required: true
Measurements []RealTimeMeasurementItem `json:"measurements" description:"定义不同的数据采集策略和目标"`
} }
// RealTimeMeasurementItem define struct of real time measurement item // RealTimeDataReceiveParam defines request param of real time data receive api
type RealTimeMeasurementItem struct { type RealTimeDataReceiveParam struct {
Interval string `json:"interval" example:"1" description:"数据采集的时间间隔(秒)"` Time int64 `json:"time"`
Targets []string `json:"targets" example:"[\"grid1.zone1.station1.ns1.tag1.bay.I11_A_rms\",\"grid1.zone1.station1.ns1.tag1.tag1.bay.I11_B_rms\"]" description:"需要采集数据的测点或标签名称列表"` Value float64 `json:"value"`
}
// RealTimePullPayload define struct of pull real time data payload
type RealTimePullPayload struct {
// required: true
Targets []RealTimePullTarget `json:"targets" example:"{\"targets\":[{\"id\":\"grid1.zone1.station1.ns1.tag1.bay.I11_A_rms\",\"datas\":[{\"time\":1736305467506000000,\"value\":1},{\"time\":1736305467506000000,\"value\":1}]},{\"id\":\"grid1.zone1.station1.ns1.tag1.bay.I11_B_rms\",\"datas\":[{\"time\":1736305467506000000,\"value\":1},{\"time\":1736305467506000000,\"value\":1}]}]}" description:"实时数据"`
}
// RealTimePullTarget define struct of pull real time data target
type RealTimePullTarget struct {
ID string `json:"id" example:"grid1.zone1.station1.ns1.tag1.bay.I11_A_rms" description:"实时数据ID值"`
Datas []RealTimePullData `json:"datas" example:"[{\"time\":1736305467506000000,\"value\":220},{\"time\":1736305467506000000,\"value\":220}]" description:"实时数据值数组"`
}
// RealTimePullData define struct of pull real time data param
type RealTimePullData struct {
Time string `json:"time" example:"1736305467506000000" description:"实时数据时间戳"`
Value float64 `json:"value" example:"220" description:"实时数据值"`
} }

View File

@ -5,14 +5,14 @@ package network
type FailureResponse struct { type FailureResponse struct {
Code int `json:"code" example:"500"` Code int `json:"code" example:"500"`
Msg string `json:"msg" example:"failed to get recommend data from redis"` Msg string `json:"msg" example:"failed to get recommend data from redis"`
Payload any `json:"payload" swaggertype:"object"` PayLoad any `json:"payload" swaggertype:"object"`
} }
// SuccessResponse define struct of standard successful API response format // SuccessResponse define struct of standard successful API response format
type SuccessResponse struct { type SuccessResponse struct {
Code int `json:"code" example:"200"` Code int `json:"code" example:"200"`
Msg string `json:"msg" example:"success"` Msg string `json:"msg" example:"success"`
Payload any `json:"payload" swaggertype:"object"` PayLoad any `json:"payload" swaggertype:"object"`
} }
// MeasurementRecommendPayload define struct of represents the data payload for the successful recommendation response. // MeasurementRecommendPayload define struct of represents the data payload for the successful recommendation response.
@ -20,17 +20,5 @@ type MeasurementRecommendPayload struct {
Input string `json:"input" example:"transformfeeder1_220."` Input string `json:"input" example:"transformfeeder1_220."`
Offset int `json:"offset" example:"21"` Offset int `json:"offset" example:"21"`
RecommendedList []string `json:"recommended_list" example:"[\"I_A_rms\", \"I_B_rms\",\"I_C_rms\"]"` RecommendedList []string `json:"recommended_list" example:"[\"I_A_rms\", \"I_B_rms\",\"I_C_rms\"]"`
} // RecommendedList []string `json:"recommended_list"`
// TargetResult define struct of target item in real time data subscription response payload
type TargetResult struct {
ID string `json:"id" example:"grid1.zone1.station1.ns1.tag1.transformfeeder1_220.I_A_rms"`
Code string `json:"code" example:"1001"`
Msg string `json:"msg" example:"subscription success"`
}
// RealTimeSubPayload define struct of real time data subscription request
type RealTimeSubPayload struct {
ClientID string `json:"client_id" example:"5d72f2d9-e33a-4f1b-9c76-88a44b9a953e" description:"用于标识不同client的监控请求ID"`
TargetResults []TargetResult `json:"targets"`
} }

View File

@ -9,35 +9,62 @@ import (
// Bay structure define abstracted info set of electrical bay // Bay structure define abstracted info set of electrical bay
type Bay struct { type Bay struct {
BayUUID uuid.UUID `gorm:"column:bay_uuid;type:uuid;primaryKey;default:gen_random_uuid()"` BayUUID uuid.UUID `gorm:"column:BAY_UUID;type:uuid;primaryKey;default:gen_random_uuid()"`
Name string `gorm:"column:name;size:64;not null;default:''"` Name string `gorm:"column:NAME;size:64;not null;default:''"`
Tag string `gorm:"column:tag;size:32;not null;default:''"` Type string `gorm:"column:TYPE;size:64;not null;default:''"`
Type string `gorm:"column:type;size:64;not null;default:''"` Unom float64 `gorm:"column:UNOM;not null;default:-1"`
Unom float64 `gorm:"column:unom;not null;default:-1"` Fla float64 `gorm:"column:FLA;not null;default:-1"`
Fla float64 `gorm:"column:fla;not null;default:-1"` Capacity float64 `gorm:"column:CAPACITY;not null;default:-1"`
Capacity float64 `gorm:"column:capacity;not null;default:-1"` Description string `gorm:"column:DESCRIPTION;size:512;not null;default:''"`
Description string `gorm:"column:description;size:512;not null;default:''"` InService bool `gorm:"column:IN_SERVICE;not null;default:false"`
InService bool `gorm:"column:in_service;not null;default:false"` State int `gorm:"column:STATE;not null;default:-1"`
State int `gorm:"column:state;not null;default:-1"` Grid string `gorm:"column:GRID;size:64;not null;default:''"`
Grid string `gorm:"column:grid;size:64;not null;default:''"` Zone string `gorm:"column:ZONE;size:64;not null;default:''"`
Zone string `gorm:"column:zone;size:64;not null;default:''"` Station string `gorm:"column:STATION;size:64;not null;default:''"`
Station string `gorm:"column:station;size:64;not null;default:''"` Business map[string]interface{} `gorm:"column:BUSINESS;type:jsonb;not null;default:'{}'"`
Business JSONMap `gorm:"column:business;type:jsonb;not null;default:'{}'"` Context map[string]interface{} `gorm:"column:CONTEXT;type:jsonb;not null;default:'{}'"`
Context JSONMap `gorm:"column:context;type:jsonb;not null;default:'{}'"` FromUUIDs []uuid.UUID `gorm:"column:FROM_UUIDS;type:jsonb;not null;default:'[]'"`
FromUUIDs []uuid.UUID `gorm:"column:from_uuids;type:jsonb;not null;default:'[]'"` ToUUIDs []uuid.UUID `gorm:"column:TO_UUIDS;type:jsonb;not null;default:'[]'"`
ToUUIDs []uuid.UUID `gorm:"column:to_uuids;type:jsonb;not null;default:'[]'"` DevProtect []interface{} `gorm:"column:DEV_PROTECT;type:jsonb;not null;default:'[]'"`
DevProtect JSONMap `gorm:"column:dev_protect;type:jsonb;not null;default:'[]'"` DevFaultRecord []interface{} `gorm:"column:DEV_FAULT_RECORD;type:jsonb;not null;default:'[]'"`
DevFaultRecord JSONMap `gorm:"column:dev_fault_record;type:jsonb;not null;default:'[]'"` DevStatus []interface{} `gorm:"column:DEV_STATUS;type:jsonb;not null;default:'[]'"`
DevStatus JSONMap `gorm:"column:dev_status;type:jsonb;not null;default:'[]'"` DevDynSense []interface{} `gorm:"column:DEV_DYN_SENSE;type:jsonb;not null;default:'[]'"`
DevDynSense JSONMap `gorm:"column:dev_dyn_sense;type:jsonb;not null;default:'[]'"` DevInstruct []interface{} `gorm:"column:DEV_INSTRUCT;type:jsonb;not null;default:'[]'"`
DevInstruct JSONMap `gorm:"column:dev_instruct;type:jsonb;not null;default:'[]'"` DevEtc []interface{} `gorm:"column:DEV_ETC;type:jsonb;not null;default:'[]'"`
DevEtc JSONMap `gorm:"column:dev_etc;type:jsonb;not null;default:'[]'"` Components []uuid.UUID `gorm:"column:COMPONENTS;type:uuid[];not null;default:'{}'"`
Components []uuid.UUID `gorm:"column:components;type:uuid[];not null;default:'{}'"` Op int `gorm:"column:OP;not null;default:-1"`
Op int `gorm:"column:op;not null;default:-1"` Ts time.Time `gorm:"column:TS;type:timestamptz;not null;default:CURRENT_TIMESTAMP"`
Ts time.Time `gorm:"column:ts;type:timestamptz;not null;default:CURRENT_TIMESTAMP"`
} }
// TableName func respresent return table name of Bay // TableName func respresent return table name of Bay
func (b *Bay) TableName() string { func (Bay) TableName() string {
return "bay" return "BAY"
} }
// CREATE TABLE PUBLIC.BAY (
// BAY_UUID UUID PRIMARY KEY DEFAULT GEN_RANDOM_UUID(),
// NAME VARCHAR(64) NOT NULL DEFAULT '',
// TYPE VARCHAR(64) NOT NULL DEFAULT '',
// UNOM DOUBLE PRECISION NOT NULL DEFAULT -1,
// FLA DOUBLE PRECISION NOT NULL DEFAULT -1,
// CAPACITY DOUBLE PRECISION NOT NULL DEFAULT -1,
// DESCRIPTION VARCHAR(512) NOT NULL DEFAULT '',
// IN_SERVICE BOOLEAN NOT NULL DEFAULT FALSE,
// STATE INTEGER NOT NULL DEFAULT -1,
// GRID VARCHAR(64) NOT NULL DEFAULT '',
// ZONE VARCHAR(64) NOT NULL DEFAULT '',
// STATION VARCHAR(64) NOT NULL DEFAULT '',
// BUSINESS JSONB NOT NULL DEFAULT '{}', -- for Server
// CONTEXT JSONB NOT NULL DEFAULT '{}', -- for UI
// FROM_UUIDS JSONB NOT NULL DEFAULT '[]', -- uuids
// TO_UUIDS JSONB NOT NULL DEFAULT '[]', -- uuids
// DEV_PROTECT JSONB NOT NULL DEFAULT '[]', -- devices
// DEV_FAULT_RECORD JSONB NOT NULL DEFAULT '[]', -- devices
// DEV_STATUS JSONB NOT NULL DEFAULT '[]', -- devices
// DEV_DYN_SENSE JSONB NOT NULL DEFAULT '[]', -- devices
// DEV_INSTRUCT JSONB NOT NULL DEFAULT '[]', -- devices
// DEV_ETC JSONB NOT NULL DEFAULT '[]', -- devices
// COMPONENTS UUID[] NOT NULL DEFAULT '{}',
// OP INTEGER NOT NULL DEFAULT -1,
// TS TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
// );

View File

@ -9,37 +9,27 @@ import (
// Component structure define abstracted info set of electrical component // Component structure define abstracted info set of electrical component
type Component struct { type Component struct {
GlobalUUID uuid.UUID `gorm:"column:global_uuid;primaryKey"` GlobalUUID uuid.UUID `gorm:"column:GLOBAL_UUID;primaryKey"`
NsPath string `gorm:"column:nspath"` NsPath string `gorm:"column:NSPATH"`
Tag string `gorm:"column:tag"` Tag string `gorm:"column:TAG"`
Name string `gorm:"column:name"` Name string `gorm:"column:NAME"`
ModelName string `gorm:"column:model_name"` ModelName string `gorm:"column:MODEL_NAME"`
Description string `gorm:"column:description"` Description string `gorm:"column:DESCRIPTION"`
GridTag string `gorm:"column:grid"` GridID string `gorm:"column:GRID"`
ZoneTag string `gorm:"column:zone"` ZoneID string `gorm:"column:ZONE"`
StationTag string `gorm:"column:station"` StationID string `gorm:"column:STATION"`
Type int `gorm:"column:type"` Type int `gorm:"column:TYPE"`
InService bool `gorm:"column:in_service"` InService bool `gorm:"column:IN_SERVICE"`
State int `gorm:"column:state"` State int `gorm:"column:STATE"`
Status int `gorm:"column:status"` Status int `gorm:"column:STATUS"`
Connection JSONMap `gorm:"column:connection;type:jsonb;default:'{}'"` Connection map[string]interface{} `gorm:"column:CONNECTION;type:jsonb;default:'{}'"`
Label JSONMap `gorm:"column:label;type:jsonb;default:'{}'"` Label map[string]interface{} `gorm:"column:LABEL;type:jsonb;default:'{}'"`
Context JSONMap `gorm:"column:context;type:jsonb;default:'{}'"` Context string `gorm:"column:CONTEXT"`
Op int `gorm:"column:op"` Op int `gorm:"column:OP"`
Ts time.Time `gorm:"column:ts"` Ts time.Time `gorm:"column:TS"`
} }
// TableName func respresent return table name of Component // TableName func respresent return table name of Component
func (c *Component) TableName() string { func (c *Component) TableName() string {
return "component" return "COMPONENT"
}
// GetTagName define func to inplement CircuitDiagramNodeInterface interface
func (c Component) GetTagName() string {
return c.Tag
}
// GetNSPath define func to inplement CircuitDiagramNodeInterface interface
func (c Component) GetNSPath() string {
return c.NsPath
} }

View File

@ -7,25 +7,15 @@ import (
// Grid structure define abstracted info set of electrical grid // Grid structure define abstracted info set of electrical grid
type Grid struct { type Grid struct {
ID int64 `gorm:"column:id;primaryKey"` ID int64 `gorm:"column:ID;primaryKey"`
TAGNAME string `gorm:"column:tagname"` TAGNAME string `gorm:"column:TAGNAME"`
Name string `gorm:"column:name"` Name string `gorm:"column:NAME"`
Description string `gorm:"column:description"` Description string `gorm:"column:DESCRIPTION"`
Op int `gorm:"column:op"` Op int `gorm:"column:OP"`
Ts time.Time `gorm:"column:ts"` Ts time.Time `gorm:"column:TS"`
} }
// TableName func respresent return table name of Grid // TableName func respresent return table name of Grid
func (g *Grid) TableName() string { func (g *Grid) TableName() string {
return "grid" return "GRID"
}
// GetTagName define func to inplement CircuitDiagramNodeInterface interface
func (g Grid) GetTagName() string {
return g.TAGNAME
}
// GetNSPath define func to inplement CircuitDiagramNodeInterface interface
func (g Grid) GetNSPath() string {
return ""
} }

View File

@ -9,20 +9,20 @@ import (
// Measurement structure define abstracted info set of electrical measurement // Measurement structure define abstracted info set of electrical measurement
type Measurement struct { type Measurement struct {
ID int64 `gorm:"column:id;primaryKey;autoIncrement"` ID int64 `gorm:"column:ID;primaryKey;autoIncrement"`
Tag string `gorm:"column:tag;size:64;not null;default:''"` Tag string `gorm:"column:TAG;size:64;not null;default:''"`
Name string `gorm:"column:name;size:64;not null;default:''"` Name string `gorm:"column:NAME;size:64;not null;default:''"`
Type int16 `gorm:"column:type;not null;default:-1"` Type int16 `gorm:"column:TYPE;not null;default:-1"`
Size int `gorm:"column:size;not null;default:-1"` Size int `gorm:"column:SIZE;not null;default:-1"`
DataSource JSONMap `gorm:"column:data_source;type:jsonb;not null;default:'{}'"` DataSource map[string]interface{} `gorm:"column:DATA_SOURCE;type:jsonb;not null;default:'{}'"`
EventPlan JSONMap `gorm:"column:event_plan;type:jsonb;not null;default:'{}'"` EventPlan map[string]interface{} `gorm:"column:EVENT_PLAN;type:jsonb;not null;default:'{}'"`
BayUUID uuid.UUID `gorm:"column:bay_uuid;type:uuid;not null"` BayUUID uuid.UUID `gorm:"column:BAY_UUID;type:uuid;not null"`
ComponentUUID uuid.UUID `gorm:"column:component_uuid;type:uuid;not null"` ComponentUUID uuid.UUID `gorm:"column:COMPONENT_UUID;type:uuid;not null"`
Op int `gorm:"column:op;not null;default:-1"` Op int `gorm:"column:OP;not null;default:-1"`
Ts time.Time `gorm:"column:ts;type:timestamptz;not null;default:CURRENT_TIMESTAMP"` Ts time.Time `gorm:"column:TS;type:timestamptz;not null;default:CURRENT_TIMESTAMP"`
} }
// TableName func respresent return table name of Measurement // TableName func respresent return table name of Measurement
func (Measurement) TableName() string { func (Measurement) TableName() string {
return "measurement" return "MEASUREMENT"
} }

View File

@ -1,8 +0,0 @@
// Package orm define database data struct
package orm
// CircuitDiagramNodeInterface define general node type interface
type CircuitDiagramNodeInterface interface {
GetTagName() string
GetNSPath() string
}

View File

@ -5,17 +5,17 @@ import "time"
// Page structure define circuit diagram page info set // Page structure define circuit diagram page info set
type Page struct { type Page struct {
ID int64 `gorm:"column:id;primaryKey"` ID int64 `gorm:"column:ID;primaryKey"`
Tag string `gorm:"column:tag"` Tag string `gorm:"column:TAG"`
Name string `gorm:"column:name"` Name string `gorm:"column:NAME"`
Label JSONMap `gorm:"column:label;type:jsonb;default:'{}'"` Label map[string]interface{} `gorm:"column:LABEL;type:jsonb;default:'{}'"`
Context JSONMap `gorm:"column:context;type:jsonb;default:'{}'"` Context map[string]interface{} `gorm:"column:CONTEXT;type:jsonb;default:'{}'"`
Description string `gorm:"column:description"` Description string `gorm:"column:DESCRIPTION"`
Op int `gorm:"column:op"` Op int `gorm:"column:OP"`
Ts time.Time `gorm:"column:ts"` Ts time.Time `gorm:"column:TS"`
} }
// TableName func respresent return table name of Page // TableName func respresent return table name of Page
func (p *Page) TableName() string { func (p *Page) TableName() string {
return "page" return "PAGE"
} }

View File

@ -7,27 +7,17 @@ import (
// Station structure define abstracted info set of electrical Station // Station structure define abstracted info set of electrical Station
type Station struct { type Station struct {
ID int64 `gorm:"column:id;primaryKey"` ID int64 `gorm:"column:ID;primaryKey"`
ZoneID int64 `gorm:"column:zone_id"` ZoneID int64 `gorm:"column:ZONE_ID"`
TAGNAME string `gorm:"column:tagname"` TAGNAME string `gorm:"column:TAGNAME"`
Name string `gorm:"column:name"` Name string `gorm:"column:NAME"`
Description string `gorm:"column:description"` Description string `gorm:"column:DESCRIPTION"`
IsLocal bool `gorm:"column:is_local"` IsLocal bool `gorm:"column:IS_LOCAL"`
Op int `gorm:"column:op"` Op int `gorm:"column:OP"`
Ts time.Time `gorm:"column:ts"` Ts time.Time `gorm:"column:TS"`
} }
// TableName func respresent return table name of Station // TableName func respresent return table name of Station
func (s *Station) TableName() string { func (s *Station) TableName() string {
return "station" return "STATION"
}
// GetTagName define func to inplement CircuitDiagramNodeInterface interface
func (s Station) GetTagName() string {
return s.TAGNAME
}
// GetNSPath define func to inplement CircuitDiagramNodeInterface interface
func (s Station) GetNSPath() string {
return ""
} }

View File

@ -1,25 +1,18 @@
// Package orm define database data struct // Package orm define database data struct
package orm package orm
import ( import "github.com/gofrs/uuid"
"time"
"github.com/gofrs/uuid"
)
// Topologic structure define topologic info set of circuit diagram // Topologic structure define topologic info set of circuit diagram
type Topologic struct { type Topologic struct {
ID int64 `gorm:"column:id"` ID int64 `gorm:"column:id"`
Flag int `gorm:"column:flag"`
UUIDFrom uuid.UUID `gorm:"column:uuid_from"` UUIDFrom uuid.UUID `gorm:"column:uuid_from"`
UUIDTo uuid.UUID `gorm:"column:uuid_to"` UUIDTo uuid.UUID `gorm:"column:uuid_to"`
Context JSONMap `gorm:"column:context;type:jsonb;default:'{}'"` Comment string `gorm:"column:comment"`
Flag int `gorm:"column:flag"`
Description string `gorm:"column:description;size:512;not null;default:''"`
Op int `gorm:"column:op;not null;default:-1"`
Ts time.Time `gorm:"column:ts;type:timestamptz;not null;default:CURRENT_TIMESTAMP"`
} }
// TableName func respresent return table name of Page // TableName func respresent return table name of Page
func (t *Topologic) TableName() string { func (t *Topologic) TableName() string {
return "topologic" return "Topologic"
} }

View File

@ -7,26 +7,16 @@ import (
// Zone structure define abstracted info set of electrical zone // Zone structure define abstracted info set of electrical zone
type Zone struct { type Zone struct {
ID int64 `gorm:"column:id;primaryKey"` ID int64 `gorm:"column:ID;primaryKey"`
GridID int64 `gorm:"column:grid_id"` GridID int64 `gorm:"column:GRID_ID"`
TAGNAME string `gorm:"column:tagname"` TAGNAME string `gorm:"column:TAGNAME"`
Name string `gorm:"column:name"` Name string `gorm:"column:NAME"`
Description string `gorm:"column:description"` Description string `gorm:"column:DESCRIPTION"`
Op int `gorm:"column:op"` Op int `gorm:"column:OP"`
Ts time.Time `gorm:"column:ts"` Ts time.Time `gorm:"column:TS"`
} }
// TableName func respresent return table name of Zone // TableName func respresent return table name of Zone
func (z *Zone) TableName() string { func (z *Zone) TableName() string {
return "zone" return "ZONE"
}
// GetTagName define func to inplement CircuitDiagramNodeInterface interface
func (z Zone) GetTagName() string {
return z.TAGNAME
}
// GetNSPath define func to inplement CircuitDiagramNodeInterface interface
func (z Zone) GetNSPath() string {
return ""
} }

View File

@ -1,38 +0,0 @@
// Package orm define database data struct
package orm
import (
"database/sql/driver"
"encoding/json"
"errors"
)
// JSONMap define struct of implements the sql.Scanner and driver.Valuer interfaces for handling JSONB fields
type JSONMap map[string]any
// Value define func to convert the JSONMap to driver.Value([]byte) for writing to the database
func (j JSONMap) Value() (driver.Value, error) {
if j == nil {
return nil, nil
}
return json.Marshal(j)
}
// Scan define to scanned values([]bytes) in the database and parsed into a JSONMap for data retrieval
func (j *JSONMap) Scan(value any) error {
if value == nil {
*j = nil
return nil
}
var source []byte
switch v := value.(type) {
case []byte:
source = v
case string:
source = []byte(v)
default:
return errors.New("unsupported data type for JSONMap Scan")
}
return json.Unmarshal(source, j)
}

View File

@ -1,200 +1,200 @@
// Package realtimedata define real time data operation functions // Package realtimedata define real time data operation functions
package realtimedata package realtimedata
// import ( import (
// "context" "context"
// "encoding/json" "encoding/json"
// "sync" "sync"
// "time" "time"
// "modelRT/constants" "modelRT/constants"
// "modelRT/database" "modelRT/database"
// "modelRT/logger" "modelRT/logger"
// "modelRT/network" "modelRT/network"
// "github.com/confluentinc/confluent-kafka-go/kafka" "github.com/confluentinc/confluent-kafka-go/kafka"
// "github.com/gofrs/uuid" "github.com/gofrs/uuid"
// "gorm.io/gorm" "gorm.io/gorm"
// ) )
// // CacheManager define structure for managing cache items // CacheManager define structure for managing cache items
// type CacheManager struct { type CacheManager struct {
// mutex sync.RWMutex mutex sync.RWMutex
// store sync.Map store sync.Map
// pool sync.Pool pool sync.Pool
// dbClient *gorm.DB dbClient *gorm.DB
// kafkaConsumer *kafka.Consumer kafkaConsumer *kafka.Consumer
// ttl time.Duration ttl time.Duration
// } }
// // NewCacheManager define func to create new cache manager // NewCacheManager define func to create new cache manager
// func NewCacheManager(db *gorm.DB, kf *kafka.Consumer, ttl time.Duration) *CacheManager { func NewCacheManager(db *gorm.DB, kf *kafka.Consumer, ttl time.Duration) *CacheManager {
// cm := &CacheManager{ cm := &CacheManager{
// dbClient: db, dbClient: db,
// kafkaConsumer: kf, kafkaConsumer: kf,
// ttl: ttl, ttl: ttl,
// } }
// cm.pool.New = func() any { cm.pool.New = func() any {
// item := &CacheItem{} item := &CacheItem{}
// return item return item
// } }
// return cm return cm
// } }
// // RealTimeComponentMonitor define func to continuously processing component info and build real time component data monitor from kafka specified topic // RealTimeComponentMonitor define func to continuously processing component info and build real time component data monitor from kafka specified topic
// func (cm *CacheManager) RealTimeComponentMonitor(ctx context.Context, topic string, duration string) { func (cm *CacheManager) RealTimeComponentMonitor(ctx context.Context, topic string, duration string) {
// // context for graceful shutdown // context for graceful shutdown
// cancelCtx, cancel := context.WithCancel(ctx) cancelCtx, cancel := context.WithCancel(ctx)
// defer cancel() defer cancel()
// monitorConsumer := cm.kafkaConsumer monitorConsumer := cm.kafkaConsumer
// // subscribe the monitor topic // subscribe the monitor topic
// err := monitorConsumer.SubscribeTopics([]string{topic}, nil) err := monitorConsumer.SubscribeTopics([]string{topic}, nil)
// if err != nil { if err != nil {
// logger.Error(ctx, "subscribe to the monitor topic failed", "topic", topic, "error", err) logger.Error(ctx, "subscribe to the monitor topic failed", "topic", topic, "error", err)
// return return
// } }
// defer monitorConsumer.Close() defer monitorConsumer.Close()
// timeoutDuration, err := time.ParseDuration(duration) timeoutDuration, err := time.ParseDuration(duration)
// if err != nil { if err != nil {
// logger.Error(ctx, "parse duration failed", "duration", duration, "error", err) logger.Error(ctx, "parse duration failed", "duration", duration, "error", err)
// return return
// } }
// // continuously read messages from kafka // continuously read messages from kafka
// for { for {
// select { select {
// case <-cancelCtx.Done(): case <-cancelCtx.Done():
// logger.Info(ctx, "context canceled, stopping read loop") logger.Info(ctx, "context canceled, stopping read loop")
// return return
// default: default:
// msg, err := monitorConsumer.ReadMessage(timeoutDuration) msg, err := monitorConsumer.ReadMessage(timeoutDuration)
// if err != nil { if err != nil {
// if err.(kafka.Error).Code() == kafka.ErrTimedOut { if err.(kafka.Error).Code() == kafka.ErrTimedOut {
// continue continue
// } }
// logger.Error(ctx, "consumer read message failed", "error", err) logger.Error(ctx, "consumer read message failed", "error", err)
// continue continue
// } }
// var realTimeData network.RealTimeDataReceiveRequest var realTimeData network.RealTimeDataReceiveRequest
// if err := json.Unmarshal(msg.Value, &realTimeData); err != nil { if err := json.Unmarshal(msg.Value, &realTimeData); err != nil {
// logger.Error(ctx, "unmarshal kafka message failed", "message", string(msg.Value), "error", err) logger.Error(ctx, "unmarshal kafka message failed", "message", string(msg.Value), "error", err)
// continue continue
// } }
// key := realTimeData.PayLoad.ComponentUUID key := realTimeData.PayLoad.ComponentUUID
// _, err = cm.StoreIntoCache(ctx, key) _, err = cm.StoreIntoCache(ctx, key)
// if err != nil { if err != nil {
// logger.Error(ctx, "store into cache failed", "key", key, "error", err) logger.Error(ctx, "store into cache failed", "key", key, "error", err)
// continue continue
// } }
// _, err = monitorConsumer.CommitMessage(msg) _, err = monitorConsumer.CommitMessage(msg)
// if err != nil { if err != nil {
// logger.Error(ctx, "manual submission information failed", "message", msg, "error", err) logger.Error(ctx, "manual submission information failed", "message", msg, "error", err)
// } }
// } }
// } }
// } }
// // GetCacheItemFromPool define func to get a cache item from the pool // GetCacheItemFromPool define func to get a cache item from the pool
// func (cm *CacheManager) GetCacheItemFromPool() *CacheItem { func (cm *CacheManager) GetCacheItemFromPool() *CacheItem {
// item := cm.pool.Get().(*CacheItem) item := cm.pool.Get().(*CacheItem)
// item.Reset() item.Reset()
// return item return item
// } }
// // PutCacheItemToPool define func to put a cache item back to the pool // PutCacheItemToPool define func to put a cache item back to the pool
// func (cm *CacheManager) PutCacheItemToPool(item *CacheItem) { func (cm *CacheManager) PutCacheItemToPool(item *CacheItem) {
// if item != nil { if item != nil {
// item.Reset() item.Reset()
// cm.pool.Put(item) cm.pool.Put(item)
// } }
// } }
// // StoreIntoCache define func to store data into cache, if the key already exists and is not expired, return the existing item // StoreIntoCache define func to store data into cache, if the key already exists and is not expired, return the existing item
// func (cm *CacheManager) StoreIntoCache(ctx context.Context, key string) (*CacheItem, error) { func (cm *CacheManager) StoreIntoCache(ctx context.Context, key string) (*CacheItem, error) {
// cm.mutex.Lock() cm.mutex.Lock()
// defer cm.mutex.Unlock() defer cm.mutex.Unlock()
// if cachedItemRaw, ok := cm.store.Load(key); ok { if cachedItemRaw, ok := cm.store.Load(key); ok {
// cachedItem := cachedItemRaw.(*CacheItem) cachedItem := cachedItemRaw.(*CacheItem)
// if !cachedItem.IsExpired(cm.ttl) { if !cachedItem.IsExpired(cm.ttl) {
// cachedItem.lastAccessed = time.Now() cachedItem.lastAccessed = time.Now()
// return cachedItem, nil return cachedItem, nil
// } }
// cm.PutCacheItemToPool(cachedItem) cm.PutCacheItemToPool(cachedItem)
// cm.store.Delete(key) cm.store.Delete(key)
// } }
// uuid, err := uuid.FromString(key) uuid, err := uuid.FromString(key)
// if err != nil { if err != nil {
// logger.Error(ctx, "format key to UUID failed", "key", key, "error", err) logger.Error(ctx, "format key to UUID failed", "key", key, "error", err)
// return nil, constants.ErrFormatUUID return nil, constants.ErrFormatUUID
// } }
// componentInfo, err := database.QueryComponentByUUID(ctx, cm.dbClient, uuid) componentInfo, err := database.QueryComponentByUUID(ctx, cm.dbClient, uuid)
// if err != nil { if err != nil {
// logger.Error(ctx, "query component from db failed by uuid", "component_uuid", key, "error", err) logger.Error(ctx, "query component from db failed by uuid", "component_uuid", key, "error", err)
// return nil, constants.ErrQueryComponentByUUID return nil, constants.ErrQueryComponentByUUID
// } }
// newItem := cm.GetCacheItemFromPool() newItem := cm.GetCacheItemFromPool()
// newItem.key = key newItem.key = key
// newItem.value = []any{componentInfo.Context} newItem.value = []any{componentInfo.Context}
// // newItem.calculatorFunc = componentInfo.CalculatorFunc // newItem.calculatorFunc = componentInfo.CalculatorFunc
// newItem.lastAccessed = time.Now() newItem.lastAccessed = time.Now()
// // 在存储前启动goroutine // 在存储前启动goroutine
// if newItem.calculatorFunc != nil { if newItem.calculatorFunc != nil {
// newCtx, newCancel := context.WithCancel(ctx) newCtx, newCancel := context.WithCancel(ctx)
// newItem.cancelFunc = newCancel newItem.cancelFunc = newCancel
// go newItem.calculatorFunc(newCtx, newItem.topic, newItem.value) go newItem.calculatorFunc(newCtx, newItem.topic, newItem.value)
// } }
// cm.store.Store(key, newItem) cm.store.Store(key, newItem)
// return newItem, nil return newItem, nil
// } }
// // UpdateCacheItem define func to update cache item with new data and trigger new calculation // UpdateCacheItem define func to update cache item with new data and trigger new calculation
// func (cm *CacheManager) UpdateCacheItem(ctx context.Context, key string, newData any) error { func (cm *CacheManager) UpdateCacheItem(ctx context.Context, key string, newData any) error {
// cm.mutex.Lock() cm.mutex.Lock()
// defer cm.mutex.Unlock() defer cm.mutex.Unlock()
// cache, existed := cm.store.Load(key) cache, existed := cm.store.Load(key)
// if !existed { if !existed {
// return nil return nil
// } }
// cacheItem, ok := cache.(*CacheItem) cacheItem, ok := cache.(*CacheItem)
// if !ok { if !ok {
// return constants.ErrFormatCache return constants.ErrFormatCache
// } }
// // stop old calculation goroutine // stop old calculation goroutine
// if cacheItem.cancelFunc != nil { if cacheItem.cancelFunc != nil {
// cacheItem.cancelFunc() cacheItem.cancelFunc()
// } }
// oldValue := cacheItem.value oldValue := cacheItem.value
// cacheItem.value = []any{newData} cacheItem.value = []any{newData}
// cacheItem.lastAccessed = time.Now() cacheItem.lastAccessed = time.Now()
// newCtx, newCancel := context.WithCancel(ctx) newCtx, newCancel := context.WithCancel(ctx)
// cacheItem.cancelFunc = newCancel cacheItem.cancelFunc = newCancel
// swapped := cm.store.CompareAndSwap(key, oldValue, cacheItem) swapped := cm.store.CompareAndSwap(key, oldValue, cacheItem)
// if !swapped { if !swapped {
// cacheValue, _ := cm.store.Load(key) cacheValue, _ := cm.store.Load(key)
// logger.Error(ctx, "store new value into cache failed,existed concurrent modification risk", "key", key, "old_value", oldValue, "cache_value", cacheValue, "store_value", cacheItem.value) logger.Error(ctx, "store new value into cache failed,existed concurrent modification risk", "key", key, "old_value", oldValue, "cache_value", cacheValue, "store_value", cacheItem.value)
// return constants.ErrConcurrentModify return constants.ErrConcurrentModify
// } }
// // start new calculation goroutine // start new calculation goroutine
// if cacheItem.calculatorFunc != nil { if cacheItem.calculatorFunc != nil {
// go cacheItem.calculatorFunc(newCtx, cacheItem.topic, cacheItem.value) go cacheItem.calculatorFunc(newCtx, cacheItem.topic, cacheItem.value)
// } }
// return nil return nil
// } }

View File

@ -1,342 +0,0 @@
// Package realtimedata define real time data operation functions
package realtimedata
import (
"context"
"errors"
"fmt"
"strings"
"modelRT/constants"
"modelRT/logger"
"modelRT/real-time-data/event"
)
// RealTimeAnalyzer define interface general methods for real-time data analysis and event triggering
type RealTimeAnalyzer interface {
AnalyzeAndTriggerEvent(ctx context.Context, conf *ComputeConfig, realTimeValues []float64)
}
// teEventThresholds define struct of store the telemetry float point threshold parsed from conf field cause
type teEventThresholds struct {
up float64
upup float64
down float64
downdown float64
isFloatCause bool
}
// parseTEThresholds define func to parse telemetry thresholds by casue map
func parseTEThresholds(cause map[string]any) (teEventThresholds, error) {
t := teEventThresholds{}
floatKeys := map[string]*float64{
"upup": &t.upup,
"up": &t.up,
"down": &t.down,
"downdown": &t.downdown,
}
for key, ptr := range floatKeys {
if value, exists := cause[key]; exists {
if floatVal, ok := value.(float64); ok {
*ptr = floatVal
t.isFloatCause = true
} else {
return teEventThresholds{}, fmt.Errorf("key:%s type is incorrect. expected float64, actual %T", key, value)
}
}
}
// quickly check mutual exclusion
if _, exists := cause["edge"]; exists && t.isFloatCause {
return teEventThresholds{}, errors.New("cause config error: 'up/down' keys and 'edge' key are mutually exclusive, but both found")
}
return t, nil
}
// getTEBreachType define func to determine which type of out-of-limit the telemetry real time data belongs to
func getTEBreachType(value float64, t teEventThresholds) string {
if t.upup > 0 && value > t.upup {
return constants.TelemetryUpUpLimit
}
if t.up > 0 && value > t.up {
return constants.TelemetryUpLimit
}
if t.downdown > 0 && value < t.downdown {
return constants.TelemetryDownDownLimit
}
if t.down > 0 && value < t.down {
return constants.TelemetryDownLimit
}
return ""
}
// TEAnalyzer define struct of store the thresholds required for telemetry and implements the analysis logic.
type TEAnalyzer struct {
Thresholds teEventThresholds
}
// AnalyzeAndTriggerEvent define func to implemented the RealTimeAnalyzer interface
func (t *TEAnalyzer) AnalyzeAndTriggerEvent(ctx context.Context, conf *ComputeConfig, realTimeValues []float64) {
analyzeTEDataLogic(ctx, conf, t.Thresholds, realTimeValues)
}
// analyzeTEDataLogic define func to processing telemetry data and event triggering
func analyzeTEDataLogic(ctx context.Context, conf *ComputeConfig, thresholds teEventThresholds, realTimeValues []float64) {
windowSize := conf.minBreachCount
if windowSize <= 0 {
logger.Error(ctx, "variable minBreachCount is invalid or zero, analysis skipped", "minBreachCount", windowSize)
return
}
// mark whether any events have been triggered in this batch
var eventTriggered bool
breachTriggers := map[string]bool{
"up": false, "upup": false, "down": false, "downdown": false,
}
// implement slide window to determine breach counts
for i := 0; i <= len(realTimeValues)-windowSize; i++ {
window := realTimeValues[i : i+windowSize]
firstValueBreachType := getTEBreachType(window[0], thresholds)
if firstValueBreachType == "" {
continue
}
allMatch := true
for j := 1; j < windowSize; j++ {
currentValueBreachType := getTEBreachType(window[j], thresholds)
if currentValueBreachType != firstValueBreachType {
allMatch = false
break
}
}
if allMatch {
// in the case of a continuous sequence of out-of-limit events, check whether this type of event has already been triggered in the current batch of data
if !breachTriggers[firstValueBreachType] {
// trigger event
logger.Warn(ctx, "event triggered by sliding window", "breach_type", firstValueBreachType, "value", window[windowSize-1])
breachTriggers[firstValueBreachType] = true
eventTriggered = true
}
}
}
if eventTriggered {
command, content := genTEEventCommandAndContent(ctx, conf.Action)
// TODO 考虑 content 是否可以为空,先期不允许
if command == "" || content == "" {
logger.Error(ctx, "generate telemetry evnet command or content failed", "action", conf.Action, "command", command, "content", content)
return
}
event.TriggerEventAction(ctx, command, content)
return
}
}
func genTEEventCommandAndContent(ctx context.Context, action map[string]any) (command string, content string) {
cmdValue, exist := action["command"]
if !exist {
logger.Error(ctx, "can not find command variable into action map", "action", action)
return "", ""
}
commandStr, ok := cmdValue.(string)
if !ok {
logger.Error(ctx, "convert command to string type failed", "command", cmdValue, "type", fmt.Sprintf("%T", cmdValue))
return "", ""
}
command = commandStr
paramsValue, exist := action["parameters"]
if !exist {
logger.Error(ctx, "can not find parameters variable into action map", "action", action)
return command, ""
}
parameterSlice, ok := paramsValue.([]any)
if !ok {
logger.Error(ctx, "convert parameters to []any type failed", "parameters", paramsValue, "type", fmt.Sprintf("%T", paramsValue))
return command, ""
}
var builder strings.Builder
for i, parameter := range parameterSlice {
if i > 0 {
builder.WriteString(",")
}
parameterStr, ok := parameter.(string)
if !ok {
logger.Warn(ctx, "parameter type is incorrect, skip this parameter", "parameter", parameter, "type", fmt.Sprintf("%T", parameter))
continue
}
builder.WriteString(parameterStr)
}
return command, builder.String()
}
// tiEventThresholds define struct of store the telesignal float point threshold parsed from conf field cause
type tiEventThresholds struct {
edge string
isFloatCause bool
}
// parseTEThresholds define func to parse telesignal thresholds by casue map
func parseTIThresholds(cause map[string]any) (tiEventThresholds, error) {
edgeKey := "edge"
t := tiEventThresholds{
isFloatCause: false,
}
if value, exists := cause[edgeKey]; exists {
if strVal, ok := value.(string); ok {
switch strVal {
case "raising", "falling":
t.edge = strVal
return t, nil
default:
return tiEventThresholds{}, fmt.Errorf("key:%s value is incorrect, actual value %s. expected 'raising' or 'falling'", edgeKey, strVal)
}
} else {
return tiEventThresholds{}, fmt.Errorf("key:%s already exists but type is incorrect. expected string, actual %T", edgeKey, value)
}
}
return tiEventThresholds{}, fmt.Errorf("cause map is invalid for telesignal: missing required key '%s'", edgeKey)
}
// getTIBreachType define func to determine which type of out-of-limit the telesignal real time data belongs to
func getTIBreachType(currentValue float64, previousValue float64, t tiEventThresholds) string {
if t.edge == constants.TelesignalRaising {
if previousValue == 0.0 && currentValue == 1.0 {
return constants.TIBreachTriggerType
}
} else if t.edge == constants.TelesignalFalling {
if previousValue == 1.0 && currentValue == 0.0 {
return constants.TIBreachTriggerType
}
}
return ""
}
// TIAnalyzer define struct of store the thresholds required for remote signaling and implements the analysis logic
type TIAnalyzer struct {
Thresholds tiEventThresholds
}
// AnalyzeAndTriggerEvent define func to implemented the RealTimeAnalyzer interface
func (t *TIAnalyzer) AnalyzeAndTriggerEvent(ctx context.Context, conf *ComputeConfig, realTimeValues []float64) {
analyzeTIDataLogic(ctx, conf, t.Thresholds, realTimeValues)
}
// analyzeTIDataLogic define func to processing telesignal data and event triggering
func analyzeTIDataLogic(ctx context.Context, conf *ComputeConfig, thresholds tiEventThresholds, realTimeValues []float64) {
windowSize := conf.minBreachCount
if windowSize <= 0 {
logger.Error(ctx, "variable minBreachCount is invalid or zero, analysis skipped", "minBreachCount", windowSize)
return
}
numDataPoints := len(realTimeValues)
if numDataPoints < 2 {
logger.Info(ctx, "data points less than 2, no change event possible, analysis skipped", "data_points", numDataPoints)
return
}
// pre calculate the change event type for all adjacent point pairs
numChanges := numDataPoints - 1
changeBreachTypes := make([]string, numChanges)
for i := range numChanges {
previousValue := realTimeValues[i]
currentValue := realTimeValues[i+1]
changeBreachTypes[i] = getTIBreachType(currentValue, previousValue, thresholds)
}
if numChanges < windowSize {
logger.Error(ctx, "number of change events is less than window size, analysis skipped", "num_changes", numChanges, "window_size", windowSize)
return
}
var eventTriggered bool
breachTriggers := map[string]bool{
constants.TIBreachTriggerType: false,
}
for i := 0; i <= numChanges-windowSize; i++ {
windowBreachTypes := changeBreachTypes[i : i+windowSize]
firstBreachType := windowBreachTypes[0]
if firstBreachType == "" {
continue
}
allMatch := true
for j := 1; j < windowSize; j++ {
if windowBreachTypes[j] != firstBreachType {
allMatch = false
break
}
}
if allMatch {
if !breachTriggers[firstBreachType] {
finalValueIndex := i + windowSize
logger.Warn(ctx, "event triggered by sliding window", "breach_type", firstBreachType, "value", realTimeValues[finalValueIndex])
breachTriggers[firstBreachType] = true
eventTriggered = true
}
}
}
if eventTriggered {
command, content := genTIEventCommandAndContent(conf.Action)
// TODO 考虑 content 是否可以为空,先期不允许
if command == "" || content == "" {
logger.Error(ctx, "generate telemetry evnet command or content failed", "action", conf.Action, "command", command, "content", content)
return
}
event.TriggerEventAction(ctx, command, content)
return
}
}
func genTIEventCommandAndContent(action map[string]any) (command string, content string) {
cmdValue, exist := action["command"]
if !exist {
return "", ""
}
commandStr, ok := cmdValue.(string)
if !ok {
return "", ""
}
command = commandStr
paramsValue, exist := action["parametes"]
if !exist {
return command, ""
}
parameterSlice, ok := paramsValue.([]string)
if !ok {
return command, ""
}
var builder strings.Builder
for i, parameter := range parameterSlice {
if i > 0 {
builder.WriteString(",")
}
builder.WriteString(parameter)
}
return command, builder.String()
}

View File

@ -1,72 +0,0 @@
// Package realtimedata define real time data operation functions
package realtimedata
import (
"sync"
)
// ComputeConfig define struct of measurement computation
type ComputeConfig struct {
Cause map[string]any
Action map[string]any
// TODO 预留自由调整的入口
// min consecutive breach count
minBreachCount int
Duration int
DataSize int64
QueryKey string
StopGchan chan struct{}
Analyzer RealTimeAnalyzer
}
// MeasComputeState define struct of manages the state of measurement computations using sync.Map
type MeasComputeState struct {
measMap sync.Map
}
// NewMeasComputeState define func to create and returns a new instance of MeasComputeState
func NewMeasComputeState() *MeasComputeState {
return &MeasComputeState{}
}
// Store define func to store a compute configuration for the specified key
func (m *MeasComputeState) Store(key string, config *ComputeConfig) {
m.measMap.Store(key, config)
}
// Load define func to retrieve the compute configuration for the specified key
func (m *MeasComputeState) Load(key string) (*ComputeConfig, bool) {
value, ok := m.measMap.Load(key)
if !ok {
return nil, false
}
return value.(*ComputeConfig), true
}
// Delete define func to remove the compute configuration for the specified key
func (m *MeasComputeState) Delete(key string) {
m.measMap.Delete(key)
}
// LoadOrStore define func to returns the existing compute configuration for the key if present,otherwise stores and returns the given configuration
func (m *MeasComputeState) LoadOrStore(key string, config *ComputeConfig) (*ComputeConfig, bool) {
value, loaded := m.measMap.LoadOrStore(key, config)
return value.(*ComputeConfig), loaded
}
// Range define func to iterate over all key-configuration pairs in the map
func (m *MeasComputeState) Range(f func(key string, config *ComputeConfig) bool) {
m.measMap.Range(func(key, value any) bool {
return f(key.(string), value.(*ComputeConfig))
})
}
// Len define func to return the number of compute configurations in the map
func (m *MeasComputeState) Len() int {
count := 0
m.measMap.Range(func(_, _ any) bool {
count++
return true
})
return count
}

View File

@ -1,74 +0,0 @@
// Package event define real time data evnet operation functions
package event
import (
"context"
"modelRT/logger"
)
type actionHandler func(ctx context.Context, content string) error
// actionDispatchMap define variable to store all action handler into map
var actionDispatchMap = map[string]actionHandler{
"info": handleInfoAction,
"warning": handleWarningAction,
"error": handleErrorAction,
"critical": handleCriticalAction,
"exception": handleExceptionAction,
}
// TriggerEventAction define func to trigger event by action in compute config
func TriggerEventAction(ctx context.Context, command string, content string) {
handler, exists := actionDispatchMap[command]
if !exists {
logger.Error(ctx, "unknown action command", "command", command)
return
}
err := handler(ctx, content)
if err != nil {
logger.Error(ctx, "action handler failed", "command", command, "content", content, "error", err)
return
}
logger.Info(ctx, "action handler success", "command", command, "content", content)
}
func handleInfoAction(ctx context.Context, content string) error {
// 实际执行发送警告、记录日志等操作
actionParams := content
// ... logic to send info level event using actionParams ...
logger.Warn(ctx, "trigger info event", "message", actionParams)
return nil
}
func handleWarningAction(ctx context.Context, content string) error {
// 实际执行发送警告、记录日志等操作
actionParams := content
// ... logic to send warning level event using actionParams ...
logger.Warn(ctx, "trigger warning event", "message", actionParams)
return nil
}
func handleErrorAction(ctx context.Context, content string) error {
// 实际执行发送警告、记录日志等操作
actionParams := content
// ... logic to send error level event using actionParams ...
logger.Warn(ctx, "trigger error event", "message", actionParams)
return nil
}
func handleCriticalAction(ctx context.Context, content string) error {
// 实际执行发送警告、记录日志等操作
actionParams := content
// ... logic to send critical level event using actionParams ...
logger.Warn(ctx, "trigger critical event", "message", actionParams)
return nil
}
func handleExceptionAction(ctx context.Context, content string) error {
// 实际执行发送警告、记录日志等操作
actionParams := content
// ... logic to send except level event using actionParams ...
logger.Warn(ctx, "trigger except event", "message", actionParams)
return nil
}

63
real-time-data/kafka.go Normal file
View File

@ -0,0 +1,63 @@
// Package realtimedata define real time data operation functions
package realtimedata
import (
"context"
"time"
"modelRT/logger"
"github.com/confluentinc/confluent-kafka-go/kafka"
)
// RealTimeDataComputer continuously processing real-time data from Kafka specified topics
func RealTimeDataComputer(ctx context.Context, consumerConfig kafka.ConfigMap, topics []string, duration string) {
// context for graceful shutdown
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// setup a channel to listen for interrupt signals
// TODO 将中断信号放到入参中
interrupt := make(chan struct{}, 1)
// read message (-1 means wait indefinitely)
timeoutDuration, err := time.ParseDuration(duration)
// create a new consumer
consumer, err := kafka.NewConsumer(&consumerConfig)
if err != nil {
logger.Error(ctx, "init kafka consume by config failed", "config", consumerConfig, "error", err)
}
// subscribe to the topic
err = consumer.SubscribeTopics(topics, nil)
if err != nil {
logger.Error(ctx, "subscribe to the topic failed", "topic", topics, "error", err)
}
// start a goroutine to handle shutdown
go func() {
<-interrupt
cancel()
consumer.Close()
}()
// continuously read messages from Kafka
for {
msg, err := consumer.ReadMessage(timeoutDuration)
if err != nil {
if ctx.Err() == context.Canceled {
logger.Info(ctx, "context canceled, stopping read loop")
break
}
logger.Error(ctx, "consumer read message failed", "error", err)
continue
}
// TODO 使用 ants.pool处理 kafka 的订阅数据
_, err = consumer.CommitMessage(msg)
if err != nil {
logger.Error(ctx, "manual submission information failed", "message", msg, "error", err)
}
}
}

View File

@ -1,400 +0,0 @@
// Package realtimedata define real time data operation functions
package realtimedata
import (
"context"
"errors"
"fmt"
"time"
"modelRT/constants"
"modelRT/diagram"
"modelRT/logger"
"modelRT/model"
"modelRT/network"
"modelRT/orm"
"modelRT/util"
)
var (
// RealTimeDataChan define channel of real time data receive
RealTimeDataChan chan network.RealTimeDataReceiveRequest
globalComputeState *MeasComputeState
)
func init() {
RealTimeDataChan = make(chan network.RealTimeDataReceiveRequest, 100)
globalComputeState = NewMeasComputeState()
}
// StartRealTimeDataComputing define func to start real time data process goroutines by measurement info
func StartRealTimeDataComputing(ctx context.Context, measurements []orm.Measurement) {
for _, measurement := range measurements {
enableValue, exist := measurement.EventPlan["enable"]
enable, ok := enableValue.(bool)
if !exist || !enable {
logger.Info(ctx, "measurement object do not need real time data computing", "measurement_uuid", measurement.ComponentUUID)
continue
}
if !ok {
logger.Error(ctx, "covert enable variable to boolean type failed", "measurement_uuid", measurement.ComponentUUID, "enable", enableValue)
continue
}
conf, err := initComputeConfig(measurement)
if err != nil {
logger.Error(ctx, "failed to initialize real time compute config", "measurement_uuid", measurement.ComponentUUID, "error", err)
continue
}
if conf == nil {
logger.Info(ctx, "measurement object is disabled or does not require real time computing", "measurement_uuid", measurement.ComponentUUID)
continue
}
uuidStr := measurement.ComponentUUID.String()
enrichedCtx := context.WithValue(ctx, constants.MeasurementUUIDKey, uuidStr)
conf.StopGchan = make(chan struct{})
globalComputeState.Store(uuidStr, conf)
logger.Info(ctx, "starting real time data computing for measurement", "measurement_uuid", measurement.ComponentUUID)
go continuousComputation(enrichedCtx, conf)
}
}
func initComputeConfig(measurement orm.Measurement) (*ComputeConfig, error) {
var err error
enableValue, exist := measurement.EventPlan["enable"]
enable, ok := enableValue.(bool)
if !exist {
return nil, nil
}
if !ok {
return nil, fmt.Errorf("field enable can not be converted to boolean, found type: %T", enableValue)
}
if !enable {
return nil, nil
}
conf := &ComputeConfig{}
causeValue, exist := measurement.EventPlan["cause"]
if !exist {
return nil, errors.New("missing required field cause")
}
cause, ok := causeValue.(map[string]any)
if !ok {
return nil, fmt.Errorf("field cause can not be converted to map[string]any, found type: %T", causeValue)
}
conf.Cause, err = processCauseMap(cause)
if err != nil {
return nil, fmt.Errorf("parse content of field cause failed:%w", err)
}
actionValue, exist := measurement.EventPlan["action"]
if !exist {
return nil, errors.New("missing required field action")
}
action, ok := actionValue.(map[string]any)
if !ok {
return nil, fmt.Errorf("field action can not be converted to map[string]any, found type: %T", actionValue)
}
conf.Action = action
queryKey, err := model.GenerateMeasureIdentifier(measurement.DataSource)
if err != nil {
return nil, fmt.Errorf("generate redis query key by datasource failed: %w", err)
}
conf.QueryKey = queryKey
conf.DataSize = int64(measurement.Size)
// TODO use constant values for temporary settings
conf.minBreachCount = constants.MinBreachCount
// TODO 后续优化 duration 创建方式
conf.Duration = 10
isFloatCause := false
if _, exists := conf.Cause["up"]; exists {
isFloatCause = true
} else if _, exists := conf.Cause["down"]; exists {
isFloatCause = true
} else if _, exists := conf.Cause["upup"]; exists {
isFloatCause = true
} else if _, exists := conf.Cause["downdown"]; exists {
isFloatCause = true
}
if isFloatCause {
// te config
teThresholds, err := parseTEThresholds(conf.Cause)
if err != nil {
return nil, fmt.Errorf("failed to parse telemetry thresholds: %w", err)
}
conf.Analyzer = &TEAnalyzer{Thresholds: teThresholds}
} else {
// ti config
tiThresholds, err := parseTIThresholds(conf.Cause)
if err != nil {
return nil, fmt.Errorf("failed to parse telesignal thresholds: %w", err)
}
conf.Analyzer = &TIAnalyzer{Thresholds: tiThresholds}
}
return conf, nil
}
func processCauseMap(data map[string]any) (map[string]any, error) {
causeResult := make(map[string]any)
keysToExtract := []string{"up", "down", "upup", "downdown"}
var foundFloatKey bool
for _, key := range keysToExtract {
if value, exists := data[key]; exists {
foundFloatKey = true
// check value type
if floatVal, ok := value.(float64); ok {
causeResult[key] = floatVal
} else {
return nil, fmt.Errorf("key:%s already exists but type is incorrect.expected float64, actual %T", key, value)
}
}
}
if foundFloatKey == true {
return causeResult, nil
}
edgeKey := "edge"
if value, exists := data[edgeKey]; exists {
if stringVal, ok := value.(string); ok {
switch stringVal {
case "raising":
fallthrough
case "falling":
causeResult[edgeKey] = stringVal
default:
return nil, fmt.Errorf("key:%s value is incorrect,actual value %s", edgeKey, value)
}
} else {
return nil, fmt.Errorf("key:%s already exists but type is incorrect.expected string, actual %T", edgeKey, value)
}
} else {
return nil, fmt.Errorf("key:%s do not exists", edgeKey)
}
return nil, fmt.Errorf("cause map is invalid: missing required keys (%v) or '%s'", keysToExtract, edgeKey)
}
func continuousComputation(ctx context.Context, conf *ComputeConfig) {
client := diagram.NewRedisClient()
uuid, _ := ctx.Value(constants.MeasurementUUIDKey).(string)
duration := util.SecondsToDuration(conf.Duration)
ticker := time.NewTicker(duration)
defer ticker.Stop()
for {
select {
case <-conf.StopGchan:
logger.Info(ctx, "continuous computing groutine stopped by local StopGchan", "uuid", uuid)
return
case <-ctx.Done():
logger.Info(ctx, "continuous computing goroutine stopped by parent context done signal")
return
case <-ticker.C:
members, err := client.QueryByZRangeByLex(ctx, conf.QueryKey, conf.DataSize)
if err != nil {
logger.Error(ctx, "query real time data from redis failed", "key", conf.QueryKey, "error", err)
continue
}
realTimedatas := util.ConvertZSetMembersToFloat64(members)
if conf.Analyzer != nil {
conf.Analyzer.AnalyzeAndTriggerEvent(ctx, conf, realTimedatas)
} else {
logger.Error(ctx, "analyzer is not initialized for this measurement", "uuid", uuid)
}
}
}
}
// // ReceiveChan define func to real time data receive and process
// func ReceiveChan(ctx context.Context, consumerConfig *kafka.ConfigMap, topics []string, duration float32) {
// consumer, err := kafka.NewConsumer(consumerConfig)
// if err != nil {
// logger.Error(ctx, "create kafka consumer failed", "error", err)
// return
// }
// defer consumer.Close()
// err = consumer.SubscribeTopics(topics, nil)
// if err != nil {
// logger.Error(ctx, "subscribe kafka topics failed", "topic", topics, "error", err)
// return
// }
// batchSize := 100
// batchTimeout := util.SecondsToDuration(duration)
// messages := make([]*kafka.Message, 0, batchSize)
// lastCommit := time.Now()
// logger.Info(ctx, "start consuming from kafka", "topic", topics)
// for {
// select {
// case <-ctx.Done():
// logger.Info(ctx, "stop real time data computing by context cancel")
// return
// case realTimeData := <-RealTimeDataChan:
// componentUUID := realTimeData.PayLoad.ComponentUUID
// component, err := diagram.GetComponentMap(componentUUID)
// if err != nil {
// logger.Error(ctx, "query component info from diagram map by componet id failed", "component_uuid", componentUUID, "error", err)
// continue
// }
// componentType := component.Type
// if componentType != constants.DemoType {
// logger.Error(ctx, "can not process real time data of component type not equal DemoType", "component_uuid", componentUUID)
// continue
// }
// var anchorName string
// var compareValUpperLimit, compareValLowerLimit float64
// var anchorRealTimeData []float64
// var calculateFunc func(archorValue float64, args ...float64) float64
// // calculateFunc, params := config.SelectAnchorCalculateFuncAndParams(componentType, anchorName, componentData)
// for _, param := range realTimeData.PayLoad.Values {
// anchorRealTimeData = append(anchorRealTimeData, param.Value)
// }
// anchorConfig := config.AnchorParamConfig{
// AnchorParamBaseConfig: config.AnchorParamBaseConfig{
// ComponentUUID: componentUUID,
// AnchorName: anchorName,
// CompareValUpperLimit: compareValUpperLimit,
// CompareValLowerLimit: compareValLowerLimit,
// AnchorRealTimeData: anchorRealTimeData,
// },
// CalculateFunc: calculateFunc,
// CalculateParams: []float64{},
// }
// anchorChan, err := pool.GetAnchorParamChan(ctx, componentUUID)
// if err != nil {
// logger.Error(ctx, "get anchor param chan failed", "component_uuid", componentUUID, "error", err)
// continue
// }
// anchorChan <- anchorConfig
// default:
// msg, err := consumer.ReadMessage(batchTimeout)
// if err != nil {
// if err.(kafka.Error).Code() == kafka.ErrTimedOut {
// // process accumulated messages when timeout
// if len(messages) > 0 {
// processMessageBatch(ctx, messages)
// consumer.Commit()
// messages = messages[:0]
// }
// continue
// }
// logger.Error(ctx, "read message from kafka failed", "error", err, "msg", msg)
// continue
// }
// messages = append(messages, msg)
// // process messages when batch size or timeout period is reached
// if len(messages) >= batchSize || time.Since(lastCommit) >= batchTimeout {
// processMessageBatch(ctx, messages)
// consumer.Commit()
// messages = messages[:0]
// lastCommit = time.Now()
// }
// }
// }
// }
// type realTimeDataPayload struct {
// ComponentUUID string
// Values []float64
// }
// type realTimeData struct {
// Payload realTimeDataPayload
// }
// func parseKafkaMessage(msgValue []byte) (*realTimeData, error) {
// var realTimeData realTimeData
// err := json.Unmarshal(msgValue, &realTimeData)
// if err != nil {
// return nil, fmt.Errorf("unmarshal real time data failed: %w", err)
// }
// return &realTimeData, nil
// }
// func processRealTimeData(ctx context.Context, realTimeData *realTimeData) {
// componentUUID := realTimeData.Payload.ComponentUUID
// component, err := diagram.GetComponentMap(componentUUID)
// if err != nil {
// logger.Error(ctx, "query component info from diagram map by component id failed",
// "component_uuid", componentUUID, "error", err)
// return
// }
// componentType := component.Type
// if componentType != constants.DemoType {
// logger.Error(ctx, "can not process real time data of component type not equal DemoType",
// "component_uuid", componentUUID)
// return
// }
// var anchorName string
// var compareValUpperLimit, compareValLowerLimit float64
// var anchorRealTimeData []float64
// var calculateFunc func(archorValue float64, args ...float64) float64
// for _, param := range realTimeData.Payload.Values {
// anchorRealTimeData = append(anchorRealTimeData, param)
// }
// anchorConfig := config.AnchorParamConfig{
// AnchorParamBaseConfig: config.AnchorParamBaseConfig{
// ComponentUUID: componentUUID,
// AnchorName: anchorName,
// CompareValUpperLimit: compareValUpperLimit,
// CompareValLowerLimit: compareValLowerLimit,
// AnchorRealTimeData: anchorRealTimeData,
// },
// CalculateFunc: calculateFunc,
// CalculateParams: []float64{},
// }
// anchorChan, err := pool.GetAnchorParamChan(ctx, componentUUID)
// if err != nil {
// logger.Error(ctx, "get anchor param chan failed",
// "component_uuid", componentUUID, "error", err)
// return
// }
// select {
// case anchorChan <- anchorConfig:
// case <-ctx.Done():
// logger.Info(ctx, "context done while sending to anchor chan")
// case <-time.After(5 * time.Second):
// logger.Error(ctx, "timeout sending to anchor chan", "component_uuid", componentUUID)
// }
// }
// // processMessageBatch define func to bathc process kafka message
// func processMessageBatch(ctx context.Context, messages []*kafka.Message) {
// for _, msg := range messages {
// realTimeData, err := parseKafkaMessage(msg.Value)
// if err != nil {
// logger.Error(ctx, "parse kafka message failed", "error", err, "msg", msg)
// continue
// }
// go processRealTimeData(ctx, realTimeData)
// }
// }

View File

@ -0,0 +1,73 @@
// Package realtimedata define real time data operation functions
package realtimedata
import (
"context"
"modelRT/config"
"modelRT/constants"
"modelRT/diagram"
"modelRT/logger"
"modelRT/network"
"modelRT/pool"
)
// RealTimeDataChan define channel of real time data receive
var RealTimeDataChan chan network.RealTimeDataReceiveRequest
func init() {
RealTimeDataChan = make(chan network.RealTimeDataReceiveRequest, 100)
}
// ReceiveChan define func of real time data receive and process
func ReceiveChan(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case realTimeData := <-RealTimeDataChan:
componentUUID := realTimeData.PayLoad.ComponentUUID
component, err := diagram.GetComponentMap(componentUUID)
if err != nil {
logger.Error(ctx, "query component info from diagram map by componet id failed", "component_uuid", componentUUID, "error", err)
continue
}
componentType := component.Type
if componentType != constants.DemoType {
logger.Error(ctx, "can not process real time data of component type not equal DemoType", "component_uuid", componentUUID)
continue
}
var anchorName string
var compareValUpperLimit, compareValLowerLimit float64
var anchorRealTimeData []float64
var calculateFunc func(archorValue float64, args ...float64) float64
// calculateFunc, params := config.SelectAnchorCalculateFuncAndParams(componentType, anchorName, componentData)
for _, param := range realTimeData.PayLoad.Values {
anchorRealTimeData = append(anchorRealTimeData, param.Value)
}
anchorConfig := config.AnchorParamConfig{
AnchorParamBaseConfig: config.AnchorParamBaseConfig{
ComponentUUID: componentUUID,
AnchorName: anchorName,
CompareValUpperLimit: compareValUpperLimit,
CompareValLowerLimit: compareValLowerLimit,
AnchorRealTimeData: anchorRealTimeData,
},
CalculateFunc: calculateFunc,
CalculateParams: []float64{},
}
anchorChan, err := pool.GetAnchorParamChan(ctx, componentUUID)
if err != nil {
logger.Error(ctx, "get anchor param chan failed", "component_uuid", componentUUID, "error", err)
continue
}
anchorChan <- anchorConfig
default:
}
}
}

View File

@ -1,16 +0,0 @@
// Package router provides router config
package router
import (
"modelRT/handler"
"github.com/gin-gonic/gin"
)
// registerDataRoutes define func of register data routes
func registerDataRoutes(rg *gin.RouterGroup) {
g := rg.Group("/data/")
// TODO 修改为ws路径
// g.GET("realtime", handler.QueryRealTimeMonitorHandler)
g.GET("history", handler.QueryHistoryDataHandler)
}

View File

@ -1,15 +0,0 @@
// Package router provides router config
package router
import (
"modelRT/handler"
"github.com/gin-gonic/gin"
)
// registerMonitorRoutes define func of register monitordata routes
func registerMonitorRoutes(rg *gin.RouterGroup) {
g := rg.Group("/monitors/")
g.POST("data/subscriptions", handler.RealTimeSubHandler)
g.GET("data/realtime/stream/:clientID", handler.PullRealTimeDataHandler)
}

View File

@ -23,6 +23,4 @@ func RegisterRoutes(engine *gin.Engine, clientToken string) {
registerDiagramRoutes(routeGroup) registerDiagramRoutes(routeGroup)
registerAttrRoutes(routeGroup) registerAttrRoutes(routeGroup)
registerMeasurementRoutes(routeGroup, clientToken) registerMeasurementRoutes(routeGroup, clientToken)
registerDataRoutes(routeGroup)
registerMonitorRoutes(routeGroup)
} }

View File

@ -1,14 +1,14 @@
// Package sql define database sql statement // Package sql define database sql statement
package sql package sql
// RecursiveSQL define topologic table recursive query statement // RecursiveSQL define Topologic table recursive query statement
var RecursiveSQL = `WITH RECURSIVE recursive_tree as ( var RecursiveSQL = `WITH RECURSIVE recursive_tree as (
SELECT uuid_from,uuid_to,flag SELECT uuid_from,uuid_to,flag
FROM "topologic" FROM "Topologic"
WHERE uuid_from = ? WHERE uuid_from = ?
UNION ALL UNION ALL
SELECT t.uuid_from,t.uuid_to,t.flag SELECT t.uuid_from,t.uuid_to,t.flag
FROM "topologic" t FROM "Topologic" t
JOIN recursive_tree rt ON t.uuid_from = rt.uuid_to JOIN recursive_tree rt ON t.uuid_from = rt.uuid_to
) )
SELECT * FROM recursive_tree;` SELECT * FROM recursive_tree;`

View File

@ -43,13 +43,14 @@ func TestUserDao_CreateUser(t *testing.T) {
topologicInfo := &orm.Topologic{ topologicInfo := &orm.Topologic{
UUIDFrom: uuid.FromStringOrNil("70c190f2-8a60-42a9-b143-ec5f87e0aa6b"), UUIDFrom: uuid.FromStringOrNil("70c190f2-8a60-42a9-b143-ec5f87e0aa6b"),
UUIDTo: uuid.FromStringOrNil("70c190f2-8a75-42a9-b166-ec5f87e0aa6b"), UUIDTo: uuid.FromStringOrNil("70c190f2-8a75-42a9-b166-ec5f87e0aa6b"),
Comment: "test",
Flag: 1, Flag: 1,
} }
// ud := dao2.NewUserDao(context.TODO()) // ud := dao2.NewUserDao(context.TODO())
mock.ExpectBegin() mock.ExpectBegin()
mock.ExpectExec(regexp.QuoteMeta("INSERT INTO `Topologic`")). mock.ExpectExec(regexp.QuoteMeta("INSERT INTO `Topologic`")).
WithArgs(topologicInfo.Flag, topologicInfo.UUIDFrom, topologicInfo.UUIDTo). WithArgs(topologicInfo.Flag, topologicInfo.UUIDFrom, topologicInfo.UUIDTo, topologicInfo.Comment).
WillReturnResult(sqlmock.NewResult(1, 1)) WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectCommit() mock.ExpectCommit()

View File

@ -1,27 +0,0 @@
// Package util provide some utility fun
package util
import (
"sort"
"github.com/redis/go-redis/v9"
)
// ConvertZSetMembersToFloat64 define func to conver zset member type to float64
func ConvertZSetMembersToFloat64(members []redis.Z) []float64 {
dataFloats := make([]float64, 0, len(members))
// recovery time sorted in ascending order
sortRedisZByTimeMemberAscending(members)
for _, member := range members {
dataFloats = append(dataFloats, member.Score)
}
return dataFloats
}
func sortRedisZByTimeMemberAscending(data []redis.Z) {
sort.Slice(data, func(i, j int) bool {
memberI := data[i].Member.(string)
memberJ := data[j].Member.(string)
return memberI < memberJ
})
}

View File

@ -1,11 +0,0 @@
// Package util provide some utility functions
package util
// GetKeysFromSet define func to get all keys from a map[string]struct{}
func GetKeysFromSet(set map[string]struct{}) []string {
keys := make([]string, 0, len(set))
for key := range set {
keys = append(keys, key)
}
return keys
}

View File

@ -1,60 +0,0 @@
// Package util provide some utility functions
package util
// RemoveTargetsFromSliceSimple define func to remove targets from a slice of strings
func RemoveTargetsFromSliceSimple(targetsSlice []string, targetsToRemove []string) []string {
targetsToRemoveSet := make(map[string]struct{}, len(targetsToRemove))
for _, target := range targetsToRemove {
targetsToRemoveSet[target] = struct{}{}
}
for i := len(targetsSlice) - 1; i >= 0; i-- {
if _, found := targetsToRemoveSet[targetsSlice[i]]; found {
targetsSlice[i] = targetsSlice[len(targetsSlice)-1]
targetsSlice = targetsSlice[:len(targetsSlice)-1]
}
}
return targetsSlice
}
// SliceToSet define func to convert string slice to set
func SliceToSet(targetsSlice []string) map[string]struct{} {
set := make(map[string]struct{}, len(targetsSlice))
for _, target := range targetsSlice {
set[target] = struct{}{}
}
return set
}
// DeduplicateAndReportDuplicates define func to deduplicate a slice of strings and report duplicates
func DeduplicateAndReportDuplicates(targetsSlice []string, sourceSlice []string) (deduplicated []string, duplicates []string) {
targetSet := SliceToSet(targetsSlice)
deduplicated = make([]string, 0, len(sourceSlice))
// duplicate items slice
duplicates = make([]string, 0, len(sourceSlice))
for _, source := range sourceSlice {
if _, found := targetSet[source]; found {
duplicates = append(duplicates, source)
continue
}
deduplicated = append(deduplicated, source)
}
return deduplicated, duplicates
}
// GetLongestCommonPrefixLength define func of get longest common prefix length between two strings
func GetLongestCommonPrefixLength(query string, result string) int {
if query == "" {
return 0
}
minLen := min(len(query), len(result))
for i := range minLen {
if query[i] != result[i] {
return i
}
}
return minLen
}

View File

@ -1,25 +0,0 @@
// Package util provide some utility functions
package util
import (
"strconv"
"time"
)
// GenNanoTsStr define func to generate nanosecond timestamp string by current time
func GenNanoTsStr() string {
now := time.Now()
nanoseconds := now.UnixNano()
timestampStr := strconv.FormatInt(nanoseconds, 10)
return timestampStr
}
// Numeric define interface to constraints supporting integer and floating-point types
type Numeric interface {
int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 | float32 | float64
}
// SecondsToDuration define func to convert Numeric type param to time duration
func SecondsToDuration[T Numeric](seconds T) time.Duration {
return time.Duration(seconds) * time.Second
}

View File

@ -7,7 +7,6 @@ import (
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"os" "os"
"strconv"
"strings" "strings"
"time" "time"
) )
@ -23,7 +22,7 @@ func GenerateClientToken(host string, serviceName string, secretKey string) (str
return "", fmt.Errorf("TOKEN_SECRET_KEY environment variable not set and no key provided in parameters") return "", fmt.Errorf("TOKEN_SECRET_KEY environment variable not set and no key provided in parameters")
} }
uniqueID := strconv.FormatInt(time.Now().UnixNano(), 10) uniqueID := fmt.Sprintf("%d", time.Now().UnixNano())
clientInfo := fmt.Sprintf("host=%s;service=%s;id=%s", host, serviceName, uniqueID) clientInfo := fmt.Sprintf("host=%s;service=%s;id=%s", host, serviceName, uniqueID)
mac := hmac.New(sha256.New, []byte(finalSecretKey)) mac := hmac.New(sha256.New, []byte(finalSecretKey))