Compare commits
36 Commits
develop
...
feature-jo
| Author | SHA1 | Date |
|---|---|---|
|
|
2a3852a246 | |
|
|
f48807e5e5 | |
|
|
3f70be0d1c | |
|
|
a21a423624 | |
|
|
666e1a9289 | |
|
|
46e72ce588 | |
|
|
b99c03296a | |
|
|
8a4116879b | |
|
|
10b91abee9 | |
|
|
329b4827f8 | |
|
|
a7d894d2de | |
|
|
fca6905d74 | |
|
|
6f3134b5e9 | |
|
|
b6e47177fb | |
|
|
5e311a7071 | |
|
|
36f267aec7 | |
|
|
357d06868e | |
|
|
46ee2a39f4 | |
|
|
dff74222c6 | |
|
|
9593c77c18 | |
|
|
8cbbfbd695 | |
|
|
d434a7737d | |
|
|
984ee3003d | |
|
|
041d7e5788 | |
|
|
b43adf9b67 | |
|
|
a82e02126d | |
|
|
93d1eea61f | |
|
|
8d6efe8bb1 | |
|
|
6de3c5955b | |
|
|
8090751914 | |
|
|
b75358e676 | |
|
|
f5ea909120 | |
|
|
594dc68ab1 | |
|
|
2584f6dacb | |
|
|
09700a86ee | |
|
|
954203b84d |
|
|
@ -22,6 +22,7 @@
|
||||||
go.work
|
go.work
|
||||||
|
|
||||||
.vscode
|
.vscode
|
||||||
|
.idea
|
||||||
# Shield all log files in the log folder
|
# Shield all log files in the log folder
|
||||||
/log/
|
/log/
|
||||||
# Shield config files in the configs folder
|
# Shield config files in the configs folder
|
||||||
|
|
|
||||||
|
|
@ -22,12 +22,12 @@ type ServiceConfig struct {
|
||||||
|
|
||||||
// KafkaConfig define config struct of kafka config
|
// KafkaConfig define config struct of kafka config
|
||||||
type KafkaConfig struct {
|
type KafkaConfig struct {
|
||||||
Servers string `mapstructure:"Servers"`
|
Servers string `mapstructure:"Servers"`
|
||||||
GroupID string `mapstructure:"group_id"`
|
GroupID string `mapstructure:"group_id"`
|
||||||
Topic string `mapstructure:"topic"`
|
Topic string `mapstructure:"topic"`
|
||||||
AutoOffsetReset string `mapstructure:"auto_offset_reset"`
|
AutoOffsetReset string `mapstructure:"auto_offset_reset"`
|
||||||
EnableAutoCommit string `mapstructure:"enable_auto_commit"`
|
EnableAutoCommit string `mapstructure:"enable_auto_commit"`
|
||||||
ReadMessageTimeDuration string `mapstructure:"read_message_time_duration"`
|
ReadMessageTimeDuration float32 `mapstructure:"read_message_time_duration"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PostgresConfig define config struct of postgres config
|
// PostgresConfig define config struct of postgres config
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,11 @@
|
||||||
|
// Package constants define constant variable
|
||||||
|
package constants
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SendMaxBatchSize define maximum buffer capacity
|
||||||
|
SendMaxBatchSize = 100
|
||||||
|
// SendMaxBatchInterval define maximum aggregate latency
|
||||||
|
SendMaxBatchInterval = 200 * time.Millisecond
|
||||||
|
)
|
||||||
|
|
@ -0,0 +1,7 @@
|
||||||
|
// Package constants define constant variable
|
||||||
|
package constants
|
||||||
|
|
||||||
|
type contextKey string
|
||||||
|
|
||||||
|
// MeasurementUUIDKey define measurement uuid key into context
|
||||||
|
const MeasurementUUIDKey contextKey = "measurement_uuid"
|
||||||
|
|
@ -49,3 +49,9 @@ var ErrChanIsNil = errors.New("this channel is nil")
|
||||||
|
|
||||||
// ErrConcurrentModify define error of concurrent modification detected
|
// ErrConcurrentModify define error of concurrent modification detected
|
||||||
var ErrConcurrentModify = errors.New("existed concurrent modification risk")
|
var ErrConcurrentModify = errors.New("existed concurrent modification risk")
|
||||||
|
|
||||||
|
// ErrUnsupportedSubAction define error of unsupported real time data subscription action
|
||||||
|
var ErrUnsupportedSubAction = errors.New("unsupported real time data subscription action")
|
||||||
|
|
||||||
|
// ErrUnsupportedLinkAction define error of unsupported measurement link process action
|
||||||
|
var ErrUnsupportedLinkAction = errors.New("unsupported rmeasurement link process action")
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,31 @@
|
||||||
|
// Package constants define constant variable
|
||||||
|
package constants
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TIBreachTriggerType define out of bounds type constant
|
||||||
|
TIBreachTriggerType = "trigger"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TelemetryUpLimit define telemetry upper limit
|
||||||
|
TelemetryUpLimit = "up"
|
||||||
|
// TelemetryUpUpLimit define telemetry upper upper limit
|
||||||
|
TelemetryUpUpLimit = "upup"
|
||||||
|
|
||||||
|
// TelemetryDownLimit define telemetry limit
|
||||||
|
TelemetryDownLimit = "down"
|
||||||
|
// TelemetryDownDownLimit define telemetry lower lower limit
|
||||||
|
TelemetryDownDownLimit = "downdown"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TelesignalRaising define telesignal raising edge
|
||||||
|
TelesignalRaising = "raising"
|
||||||
|
// TelesignalFalling define telesignal falling edge
|
||||||
|
TelesignalFalling = "falling"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MinBreachCount define min breach count of real time data
|
||||||
|
MinBreachCount = 10
|
||||||
|
)
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
// Package constants define constant variable
|
|
||||||
package constants
|
|
||||||
|
|
||||||
const (
|
|
||||||
// RedisAllGridSetKey define redis set key which store all grid keys
|
|
||||||
RedisAllGridSetKey = "grid_keys"
|
|
||||||
// RedisSpecGridZoneSetKey define redis set key which store all zone keys under specific grid
|
|
||||||
RedisSpecGridZoneSetKey = "grid_%s_zones_keys"
|
|
||||||
|
|
||||||
// RedisAllZoneSetKey define redis set key which store all zone keys
|
|
||||||
RedisAllZoneSetKey = "zone_keys"
|
|
||||||
// RedisSpecZoneStationSetKey define redis set key which store all station keys under specific zone
|
|
||||||
RedisSpecZoneStationSetKey = "zone_%s_stations_keys"
|
|
||||||
|
|
||||||
// RedisAllStationSetKey define redis set key which store all station keys
|
|
||||||
RedisAllStationSetKey = "station_keys"
|
|
||||||
// RedisSpecStationComponentSetKey define redis set key which store all component keys under specific station
|
|
||||||
RedisSpecStationComponentSetKey = "station_%s_components_keys"
|
|
||||||
|
|
||||||
// RedisAllComponentSetKey define redis set key which store all component keys
|
|
||||||
RedisAllComponentSetKey = "component_keys"
|
|
||||||
// RedisSpecComponentSetKey define redis set key which store all component keys under specific zone
|
|
||||||
RedisSpecComponentSetKey = "zone_%s_components_keys"
|
|
||||||
)
|
|
||||||
|
|
@ -29,3 +29,8 @@ const (
|
||||||
ChannelSuffixUBC = "UBC"
|
ChannelSuffixUBC = "UBC"
|
||||||
ChannelSuffixUCA = "UCA"
|
ChannelSuffixUCA = "UCA"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MaxIdentifyHierarchy define max data indentify syntax hierarchy
|
||||||
|
MaxIdentifyHierarchy = 7
|
||||||
|
)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,47 @@
|
||||||
|
// Package constants define constant variable
|
||||||
|
package constants
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RedisAllGridSetKey define redis set key which store all grid keys
|
||||||
|
RedisAllGridSetKey = "grid_keys"
|
||||||
|
|
||||||
|
// RedisAllZoneSetKey define redis set key which store all zone keys
|
||||||
|
RedisAllZoneSetKey = "zone_keys"
|
||||||
|
|
||||||
|
// RedisAllStationSetKey define redis set key which store all station keys
|
||||||
|
RedisAllStationSetKey = "station_keys"
|
||||||
|
|
||||||
|
// RedisAllCompNSPathSetKey define redis set key which store all component nspath keys
|
||||||
|
RedisAllCompNSPathSetKey = "component_nspath_keys"
|
||||||
|
|
||||||
|
// RedisAllCompTagSetKey define redis set key which store all component tag keys
|
||||||
|
RedisAllCompTagSetKey = "component_tag_keys"
|
||||||
|
|
||||||
|
// RedisAllConfigSetKey define redis set key which store all config keys
|
||||||
|
RedisAllConfigSetKey = "config_keys"
|
||||||
|
|
||||||
|
// RedisAllMeasTagSetKey define redis set key which store all measurement tag keys
|
||||||
|
RedisAllMeasTagSetKey = "measurement_tag_keys"
|
||||||
|
|
||||||
|
// RedisSpecGridZoneSetKey define redis set key which store all zone keys under specific grid
|
||||||
|
RedisSpecGridZoneSetKey = "%s_zones_keys"
|
||||||
|
|
||||||
|
// RedisSpecZoneStationSetKey define redis set key which store all station keys under specific zone
|
||||||
|
RedisSpecZoneStationSetKey = "%s_stations_keys"
|
||||||
|
|
||||||
|
// RedisSpecStationCompNSPATHSetKey define redis set key which store all component nspath keys under specific station
|
||||||
|
RedisSpecStationCompNSPATHSetKey = "%s_components_nspath_keys"
|
||||||
|
|
||||||
|
// RedisSpecStationCompTagSetKey define redis set key which store all component tag keys under specific station
|
||||||
|
RedisSpecStationCompTagSetKey = "%s_components_tag_keys"
|
||||||
|
|
||||||
|
// RedisSpecCompTagMeasSetKey define redis set key which store all measurement keys under specific component tag
|
||||||
|
RedisSpecCompTagMeasSetKey = "%s_measurement_keys"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SearchLinkAddAction define search link add action
|
||||||
|
SearchLinkAddAction = "add"
|
||||||
|
// SearchLinkDelAction define search link del action
|
||||||
|
SearchLinkDelAction = "del"
|
||||||
|
)
|
||||||
|
|
@ -0,0 +1,73 @@
|
||||||
|
// Package constants define constant variable
|
||||||
|
package constants
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SubStartAction define the real time subscription start action
|
||||||
|
SubStartAction string = "start"
|
||||||
|
// SubStopAction define the real time subscription stop action
|
||||||
|
SubStopAction string = "stop"
|
||||||
|
// SubAppendAction define the real time subscription append action
|
||||||
|
SubAppendAction string = "append"
|
||||||
|
// SubUpdateAction define the real time subscription update action
|
||||||
|
SubUpdateAction string = "update"
|
||||||
|
)
|
||||||
|
|
||||||
|
// 定义状态常量
|
||||||
|
const (
|
||||||
|
// SubSuccessCode define subscription success code
|
||||||
|
SubSuccessCode = "1001"
|
||||||
|
// SubFailedCode define subscription failed code
|
||||||
|
SubFailedCode = "1002"
|
||||||
|
// RTDSuccessCode define real time data return success code
|
||||||
|
RTDSuccessCode = "1003"
|
||||||
|
// RTDFailedCode define real time data return failed code
|
||||||
|
RTDFailedCode = "1004"
|
||||||
|
// CancelSubSuccessCode define cancel subscription success code
|
||||||
|
CancelSubSuccessCode = "1005"
|
||||||
|
// CancelSubFailedCode define cancel subscription failed code
|
||||||
|
CancelSubFailedCode = "1006"
|
||||||
|
// SubRepeatCode define subscription repeat code
|
||||||
|
SubRepeatCode = "1007"
|
||||||
|
// UpdateSubSuccessCode define update subscription success code
|
||||||
|
UpdateSubSuccessCode = "1008"
|
||||||
|
// UpdateSubFailedCode define update subscription failed code
|
||||||
|
UpdateSubFailedCode = "1009"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SubSuccessMsg define subscription success message
|
||||||
|
SubSuccessMsg = "subscription success"
|
||||||
|
// SubFailedMsg define subscription failed message
|
||||||
|
SubFailedMsg = "subscription failed"
|
||||||
|
// RTDSuccessMsg define real time data return success message
|
||||||
|
RTDSuccessMsg = "real time data return success"
|
||||||
|
// RTDFailedMsg define real time data return failed message
|
||||||
|
RTDFailedMsg = "real time data return failed"
|
||||||
|
// CancelSubSuccessMsg define cancel subscription success message
|
||||||
|
CancelSubSuccessMsg = "cancel subscription success"
|
||||||
|
// CancelSubFailedMsg define cancel subscription failed message
|
||||||
|
CancelSubFailedMsg = "cancel subscription failed"
|
||||||
|
// SubRepeatMsg define subscription repeat message
|
||||||
|
SubRepeatMsg = "subscription repeat in target interval"
|
||||||
|
// UpdateSubSuccessMsg define update subscription success message
|
||||||
|
UpdateSubSuccessMsg = "update subscription success"
|
||||||
|
// UpdateSubFailedMsg define update subscription failed message
|
||||||
|
UpdateSubFailedMsg = "update subscription failed"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TargetOperationType define constant to the target operation type
|
||||||
|
type TargetOperationType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// OpAppend define append new target to the subscription list
|
||||||
|
OpAppend TargetOperationType = iota
|
||||||
|
// OpRemove define remove exist target from the subscription list
|
||||||
|
OpRemove
|
||||||
|
// OpUpdate define update exist target from the subscription list
|
||||||
|
OpUpdate
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NoticeChanCap define real time data notice channel capacity
|
||||||
|
NoticeChanCap = 10000
|
||||||
|
)
|
||||||
|
|
@ -27,9 +27,9 @@ func CreateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo netwo
|
||||||
|
|
||||||
component := orm.Component{
|
component := orm.Component{
|
||||||
GlobalUUID: globalUUID,
|
GlobalUUID: globalUUID,
|
||||||
GridID: strconv.FormatInt(componentInfo.GridID, 10),
|
GridTag: strconv.FormatInt(componentInfo.GridID, 10),
|
||||||
ZoneID: strconv.FormatInt(componentInfo.ZoneID, 10),
|
ZoneTag: strconv.FormatInt(componentInfo.ZoneID, 10),
|
||||||
StationID: strconv.FormatInt(componentInfo.StationID, 10),
|
StationTag: strconv.FormatInt(componentInfo.StationID, 10),
|
||||||
Tag: componentInfo.Tag,
|
Tag: componentInfo.Tag,
|
||||||
Name: componentInfo.Name,
|
Name: componentInfo.Name,
|
||||||
Context: componentInfo.Context,
|
Context: componentInfo.Context,
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,50 @@
|
||||||
|
// Package database define database operation functions
|
||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"modelRT/common/errcode"
|
||||||
|
"modelRT/network"
|
||||||
|
"modelRT/orm"
|
||||||
|
|
||||||
|
"github.com/gofrs/uuid"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateMeasurement define create measurement info of the circuit diagram into DB
|
||||||
|
func CreateMeasurement(ctx context.Context, tx *gorm.DB, measurementInfo network.MeasurementCreateInfo) (string, error) {
|
||||||
|
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
globalUUID, err := uuid.FromString(measurementInfo.UUID)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("format uuid from string type failed:%w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
measurement := orm.Measurement{
|
||||||
|
Tag: "",
|
||||||
|
Name: "",
|
||||||
|
Type: -1,
|
||||||
|
Size: -1,
|
||||||
|
DataSource: nil,
|
||||||
|
EventPlan: nil,
|
||||||
|
BayUUID: globalUUID,
|
||||||
|
ComponentUUID: globalUUID,
|
||||||
|
Op: -1,
|
||||||
|
Ts: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
result := tx.WithContext(cancelCtx).Create(&measurement)
|
||||||
|
if result.Error != nil || result.RowsAffected == 0 {
|
||||||
|
err := result.Error
|
||||||
|
if result.RowsAffected == 0 {
|
||||||
|
err = fmt.Errorf("%w:please check insert component slice", errcode.ErrInsertRowUnexpected)
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("insert component info failed:%w", err)
|
||||||
|
}
|
||||||
|
return strconv.FormatInt(measurement.ID, 10), nil
|
||||||
|
}
|
||||||
|
|
@ -24,7 +24,6 @@ func CreateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, topol
|
||||||
UUIDFrom: info.UUIDFrom,
|
UUIDFrom: info.UUIDFrom,
|
||||||
UUIDTo: info.UUIDTo,
|
UUIDTo: info.UUIDTo,
|
||||||
Flag: info.Flag,
|
Flag: info.Flag,
|
||||||
Comment: info.Comment,
|
|
||||||
}
|
}
|
||||||
topologicSlice = append(topologicSlice, topologicInfo)
|
topologicSlice = append(topologicSlice, topologicInfo)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,88 @@
|
||||||
|
// Package database define database operation functions
|
||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"modelRT/logger"
|
||||||
|
"modelRT/model"
|
||||||
|
"modelRT/orm"
|
||||||
|
|
||||||
|
"gorm.io/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FillingShortTokenModel define filling short token model info
|
||||||
|
func FillingShortTokenModel(ctx context.Context, tx *gorm.DB, identModel *model.ShortIdentityTokenModel) error {
|
||||||
|
filterComponent := &orm.Component{
|
||||||
|
GridTag: identModel.GetGridTag(),
|
||||||
|
ZoneTag: identModel.GetZoneTag(),
|
||||||
|
StationTag: identModel.GetStationTag(),
|
||||||
|
}
|
||||||
|
|
||||||
|
component, measurement, err := QueryLongIdentModelInfoByToken(ctx, tx, identModel.MeasurementTag, filterComponent)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "query long identity token model info failed", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
identModel.ComponentInfo = component
|
||||||
|
identModel.MeasurementInfo = measurement
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FillingLongTokenModel define filling long token model info
|
||||||
|
func FillingLongTokenModel(ctx context.Context, tx *gorm.DB, identModel *model.LongIdentityTokenModel) error {
|
||||||
|
filterComponent := &orm.Component{
|
||||||
|
GridTag: identModel.GetGridTag(),
|
||||||
|
ZoneTag: identModel.GetZoneTag(),
|
||||||
|
StationTag: identModel.GetStationTag(),
|
||||||
|
Tag: identModel.GetComponentTag(),
|
||||||
|
}
|
||||||
|
component, measurement, err := QueryLongIdentModelInfoByToken(ctx, tx, identModel.MeasurementTag, filterComponent)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "query long identity token model info failed", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
identModel.ComponentInfo = component
|
||||||
|
identModel.MeasurementInfo = measurement
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseDataIdentifierToken define function to parse data identifier token function
|
||||||
|
func ParseDataIdentifierToken(ctx context.Context, tx *gorm.DB, identToken string) (model.IndentityTokenModelInterface, error) {
|
||||||
|
identSlice := strings.Split(identToken, ".")
|
||||||
|
identSliceLen := len(identSlice)
|
||||||
|
if identSliceLen == 4 {
|
||||||
|
// token1.token2.token3.token4.token7
|
||||||
|
shortIndentModel := &model.ShortIdentityTokenModel{
|
||||||
|
GridTag: identSlice[0],
|
||||||
|
ZoneTag: identSlice[1],
|
||||||
|
StationTag: identSlice[2],
|
||||||
|
NamespacePath: identSlice[3],
|
||||||
|
MeasurementTag: identSlice[6],
|
||||||
|
}
|
||||||
|
err := FillingShortTokenModel(ctx, tx, shortIndentModel)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return shortIndentModel, nil
|
||||||
|
} else if identSliceLen == 7 {
|
||||||
|
// token1.token2.token3.token4.token5.token6.token7
|
||||||
|
longIndentModel := &model.LongIdentityTokenModel{
|
||||||
|
GridTag: identSlice[0],
|
||||||
|
ZoneTag: identSlice[1],
|
||||||
|
StationTag: identSlice[2],
|
||||||
|
NamespacePath: identSlice[3],
|
||||||
|
ComponentTag: identSlice[4],
|
||||||
|
AttributeGroup: identSlice[5],
|
||||||
|
MeasurementTag: identSlice[6],
|
||||||
|
}
|
||||||
|
err := FillingLongTokenModel(ctx, tx, longIndentModel)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return longIndentModel, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("invalid identity token format: %s", identToken)
|
||||||
|
}
|
||||||
|
|
@ -3,6 +3,7 @@ package database
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"modelRT/orm"
|
"modelRT/orm"
|
||||||
|
|
@ -81,3 +82,58 @@ func QueryComponentByNsPath(ctx context.Context, tx *gorm.DB, nsPath string) (or
|
||||||
}
|
}
|
||||||
return component, nil
|
return component, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// QueryLongIdentModelInfoByToken define func to query long identity model info by long token
|
||||||
|
func QueryLongIdentModelInfoByToken(ctx context.Context, tx *gorm.DB, measTag string, condition *orm.Component) (*orm.Component, *orm.Measurement, error) {
|
||||||
|
var resultComp orm.Component
|
||||||
|
var meauserment orm.Measurement
|
||||||
|
|
||||||
|
// ctx timeout judgment
|
||||||
|
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).First(&resultComp, &condition)
|
||||||
|
if result.Error != nil {
|
||||||
|
if result.Error == gorm.ErrRecordNotFound {
|
||||||
|
return nil, nil, fmt.Errorf("component record not found by %v:%w", condition, result.Error)
|
||||||
|
}
|
||||||
|
return nil, nil, result.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
filterMap := map[string]any{"component_uuid": resultComp.GlobalUUID, "tag": measTag}
|
||||||
|
result = tx.WithContext(cancelCtx).Where(filterMap).Clauses(clause.Locking{Strength: "UPDATE"}).First(&meauserment)
|
||||||
|
if result.Error != nil {
|
||||||
|
if result.Error == gorm.ErrRecordNotFound {
|
||||||
|
return nil, nil, fmt.Errorf("measurement record not found by %v:%w", filterMap, result.Error)
|
||||||
|
}
|
||||||
|
return nil, nil, result.Error
|
||||||
|
}
|
||||||
|
return &resultComp, &meauserment, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryShortIdentModelInfoByToken define func to query short identity model info by short token
|
||||||
|
func QueryShortIdentModelInfoByToken(ctx context.Context, tx *gorm.DB, measTag string, condition *orm.Component) (*orm.Component, *orm.Measurement, error) {
|
||||||
|
var resultComp orm.Component
|
||||||
|
var meauserment orm.Measurement
|
||||||
|
// ctx timeout judgment
|
||||||
|
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).First(&resultComp, &condition)
|
||||||
|
if result.Error != nil {
|
||||||
|
if result.Error == gorm.ErrRecordNotFound {
|
||||||
|
return nil, nil, fmt.Errorf("component record not found by %v:%w", condition, result.Error)
|
||||||
|
}
|
||||||
|
return nil, nil, result.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
filterMap := map[string]any{"component_uuid": resultComp.GlobalUUID, "tag": measTag}
|
||||||
|
result = tx.WithContext(cancelCtx).Where(filterMap).Clauses(clause.Locking{Strength: "UPDATE"}).First(&meauserment)
|
||||||
|
if result.Error != nil {
|
||||||
|
if result.Error == gorm.ErrRecordNotFound {
|
||||||
|
return nil, nil, fmt.Errorf("measurement record not found by %v:%w", filterMap, result.Error)
|
||||||
|
}
|
||||||
|
return nil, nil, result.Error
|
||||||
|
}
|
||||||
|
return &resultComp, &meauserment, nil
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -13,13 +13,32 @@ import (
|
||||||
|
|
||||||
// QueryMeasurementByID return the result of query circuit diagram component measurement info by id from postgresDB
|
// QueryMeasurementByID return the result of query circuit diagram component measurement info by id from postgresDB
|
||||||
func QueryMeasurementByID(ctx context.Context, tx *gorm.DB, id int64) (orm.Measurement, error) {
|
func QueryMeasurementByID(ctx context.Context, tx *gorm.DB, id int64) (orm.Measurement, error) {
|
||||||
var component orm.Measurement
|
var measurement orm.Measurement
|
||||||
// ctx超时判断
|
// ctx超时判断
|
||||||
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
result := tx.WithContext(cancelCtx).
|
result := tx.WithContext(cancelCtx).
|
||||||
Where("id = ?", id).
|
Where("id = ?", id).
|
||||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||||
|
First(&measurement)
|
||||||
|
|
||||||
|
if result.Error != nil {
|
||||||
|
return orm.Measurement{}, result.Error
|
||||||
|
}
|
||||||
|
return measurement, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryMeasurementByToken define function query circuit diagram component measurement info by token from postgresDB
|
||||||
|
func QueryMeasurementByToken(ctx context.Context, tx *gorm.DB, token string) (orm.Measurement, error) {
|
||||||
|
// TODO parse token to avoid SQL injection
|
||||||
|
|
||||||
|
var component orm.Measurement
|
||||||
|
// ctx超时判断
|
||||||
|
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
result := tx.WithContext(cancelCtx).
|
||||||
|
Where(" = ?", token).
|
||||||
|
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||||
First(&component)
|
First(&component)
|
||||||
|
|
||||||
if result.Error != nil {
|
if result.Error != nil {
|
||||||
|
|
@ -27,3 +46,17 @@ func QueryMeasurementByID(ctx context.Context, tx *gorm.DB, id int64) (orm.Measu
|
||||||
}
|
}
|
||||||
return component, nil
|
return component, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetAllMeasurements define func to query all measurement info from postgresDB
|
||||||
|
func GetAllMeasurements(ctx context.Context, tx *gorm.DB) ([]orm.Measurement, error) {
|
||||||
|
var measurements []orm.Measurement
|
||||||
|
|
||||||
|
// ctx超时判断
|
||||||
|
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
result := tx.WithContext(cancelCtx).Clauses(clause.Locking{Strength: "UPDATE"}).Find(&measurements)
|
||||||
|
if result.Error != nil {
|
||||||
|
return nil, result.Error
|
||||||
|
}
|
||||||
|
return measurements, nil
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,80 @@
|
||||||
|
// Package database define database operation functions
|
||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"modelRT/orm"
|
||||||
|
|
||||||
|
"gorm.io/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func queryFirstByID(ctx context.Context, tx *gorm.DB, id any, dest any) error {
|
||||||
|
result := tx.WithContext(ctx).Where("id = ?", id).First(dest)
|
||||||
|
return result.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func queryFirstByTag(ctx context.Context, tx *gorm.DB, tagName any, dest any) error {
|
||||||
|
result := tx.WithContext(ctx).Where("tagname = ?", tagName).First(dest)
|
||||||
|
return result.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryNodeInfoByID return the result of query circuit diagram node info by id and level from postgresDB
|
||||||
|
func QueryNodeInfoByID(ctx context.Context, tx *gorm.DB, id int64, level int) (orm.CircuitDiagramNodeInterface, orm.CircuitDiagramNodeInterface, error) {
|
||||||
|
// 设置 Context 超时
|
||||||
|
cancelCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var currentNodeInfo orm.CircuitDiagramNodeInterface
|
||||||
|
var previousNodeInfo orm.CircuitDiagramNodeInterface
|
||||||
|
var err error
|
||||||
|
|
||||||
|
switch level {
|
||||||
|
case 0:
|
||||||
|
var grid orm.Grid
|
||||||
|
err = queryFirstByID(cancelCtx, tx, id, &grid)
|
||||||
|
currentNodeInfo = grid
|
||||||
|
case 1:
|
||||||
|
// current:Zone,Previous:Grid
|
||||||
|
var zone orm.Zone
|
||||||
|
err = queryFirstByID(cancelCtx, tx, id, &zone)
|
||||||
|
currentNodeInfo = zone
|
||||||
|
if err == nil {
|
||||||
|
var grid orm.Grid
|
||||||
|
err = queryFirstByID(cancelCtx, tx, zone.GridID, &grid)
|
||||||
|
previousNodeInfo = grid
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
// current:Station,Previous:Zone
|
||||||
|
var station orm.Station
|
||||||
|
err = queryFirstByID(cancelCtx, tx, id, &station)
|
||||||
|
currentNodeInfo = station
|
||||||
|
if err == nil {
|
||||||
|
var zone orm.Zone
|
||||||
|
err = queryFirstByID(cancelCtx, tx, station.ZoneID, &zone)
|
||||||
|
previousNodeInfo = zone
|
||||||
|
}
|
||||||
|
case 3, 4:
|
||||||
|
// current:Component, Previous:Station
|
||||||
|
var component orm.Component
|
||||||
|
err = queryFirstByID(cancelCtx, tx, id, &component)
|
||||||
|
currentNodeInfo = component
|
||||||
|
if err == nil {
|
||||||
|
var station orm.Station
|
||||||
|
err = queryFirstByTag(cancelCtx, tx, component.StationTag, &station)
|
||||||
|
previousNodeInfo = station
|
||||||
|
}
|
||||||
|
case 5:
|
||||||
|
// TODO[NONEED-ISSUE]暂无此层级增加或删除需求 #2
|
||||||
|
return nil, nil, nil
|
||||||
|
default:
|
||||||
|
return nil, nil, fmt.Errorf("unsupported node level: %d", level)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return previousNodeInfo, currentNodeInfo, nil
|
||||||
|
}
|
||||||
|
|
@ -37,9 +37,9 @@ func UpdateComponentIntoDB(ctx context.Context, tx *gorm.DB, componentInfo netwo
|
||||||
|
|
||||||
updateParams := orm.Component{
|
updateParams := orm.Component{
|
||||||
GlobalUUID: globalUUID,
|
GlobalUUID: globalUUID,
|
||||||
GridID: strconv.FormatInt(componentInfo.GridID, 10),
|
GridTag: strconv.FormatInt(componentInfo.GridID, 10),
|
||||||
ZoneID: strconv.FormatInt(componentInfo.ZoneID, 10),
|
ZoneTag: strconv.FormatInt(componentInfo.ZoneID, 10),
|
||||||
StationID: strconv.FormatInt(componentInfo.StationID, 10),
|
StationTag: strconv.FormatInt(componentInfo.StationID, 10),
|
||||||
Tag: componentInfo.Tag,
|
Tag: componentInfo.Tag,
|
||||||
Name: componentInfo.Name,
|
Name: componentInfo.Name,
|
||||||
Context: componentInfo.Context,
|
Context: componentInfo.Context,
|
||||||
|
|
|
||||||
|
|
@ -51,7 +51,6 @@ func UpdateTopologicIntoDB(ctx context.Context, tx *gorm.DB, pageID int64, chang
|
||||||
Flag: changeInfo.Flag,
|
Flag: changeInfo.Flag,
|
||||||
UUIDFrom: changeInfo.NewUUIDFrom,
|
UUIDFrom: changeInfo.NewUUIDFrom,
|
||||||
UUIDTo: changeInfo.NewUUIDTo,
|
UUIDTo: changeInfo.NewUUIDTo,
|
||||||
Comment: changeInfo.Comment,
|
|
||||||
}
|
}
|
||||||
result = tx.WithContext(cancelCtx).Create(&topologic)
|
result = tx.WithContext(cancelCtx).Create(&topologic)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
113
deploy/deploy.md
113
deploy/deploy.md
|
|
@ -88,45 +88,98 @@ docker logs redis
|
||||||
##### 2.4.1 Postgres数据注入
|
##### 2.4.1 Postgres数据注入
|
||||||
|
|
||||||
```SQL
|
```SQL
|
||||||
INSERT INTO public."Topologic" VALUES (2, 1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '10f155cf-bd27-4557-85b2-d126b6e2657f', 1, NULL);
|
INSERT INTO public.topologic(flag, uuid_from, uuid_to, context, description, op, ts)
|
||||||
INSERT INTO public."Topologic" VALUES (3, 1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', 1, NULL);
|
VALUES
|
||||||
INSERT INTO public."Topologic" VALUES (4, 1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '70c190f2-8a75-42a9-b166-ec5f87e0aa6b', 1, NULL);
|
(1, '00000000-0000-0000-0000-000000000000', '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '{}', '', 1, CURRENT_TIMESTAMP),
|
||||||
INSERT INTO public."Topologic" VALUES (5, 1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '70c200f2-8a75-42a9-c166-bf5f87e0aa6b', 1, NULL);
|
(1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '10f155cf-bd27-4557-85b2-d126b6e2657f', '{}', '', 1, CURRENT_TIMESTAMP),
|
||||||
INSERT INTO public."Topologic" VALUES (1, 1, '00000000-0000-0000-0000-000000000000', '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', 1, NULL);
|
(1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '{}', '', 1, CURRENT_TIMESTAMP),
|
||||||
|
(1, '70c190f2-8a60-42a9-b143-ec5f87e0aa6b', '70c190f2-8a75-42a9-b166-ec5f87e0aa6b', '{}', '', 1, CURRENT_TIMESTAMP),
|
||||||
|
(1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '70c200f2-8a75-42a9-c166-bf5f87e0aa6b', '{}', '', 1, CURRENT_TIMESTAMP),
|
||||||
|
(1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '968dd6e6-faec-4f78-b58a-d6e68426b09e', '{}', '', 1, CURRENT_TIMESTAMP),
|
||||||
|
(1, 'e32bc0be-67f4-4d79-a5da-eaa40a5bd77d', '968dd6e6-faec-4f78-b58a-d6e68426b08e', '{}', '', 1, CURRENT_TIMESTAMP);
|
||||||
|
|
||||||
|
|
||||||
|
INSERT INTO public.bay (bay_uuid, name, tag, type, unom, fla, capacity, description, in_service, state, grid, zone, station, business, context, from_uuids, to_uuids, dev_protect, dev_fault_record, dev_status, dev_dyn_sense, dev_instruct, dev_etc, components, op, ts)
|
||||||
|
VALUES (
|
||||||
|
'18e71a24-694a-43fa-93a7-c4d02a27d1bc',
|
||||||
|
'', '', '',
|
||||||
|
-1, -1, -1,
|
||||||
|
'',
|
||||||
|
false,
|
||||||
|
-1,
|
||||||
|
'', '', '',
|
||||||
|
'{}',
|
||||||
|
'{}',
|
||||||
|
'[]',
|
||||||
|
'[]',
|
||||||
|
'[]',
|
||||||
|
'[]',
|
||||||
|
'[]',
|
||||||
|
'[]',
|
||||||
|
'[]',
|
||||||
|
'[]',
|
||||||
|
ARRAY['968dd6e6-faec-4f78-b58a-d6e68426b09e', '968dd6e6-faec-4f78-b58a-d6e68426b08e']::uuid[],
|
||||||
|
-1,
|
||||||
|
CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO public.component (global_uuid, nspath, tag, name, model_name, description, grid, zone, station, type, in_service, state, status, connection, label, context, op, ts)
|
||||||
|
VALUES
|
||||||
|
(
|
||||||
|
'968dd6e6-faec-4f78-b58a-d6e68426b09e',
|
||||||
|
'ns1', 'tag1', 'component1', '', '',
|
||||||
|
'grid1', 'zone1', 'station1',
|
||||||
|
-1,
|
||||||
|
false,
|
||||||
|
-1, -1,
|
||||||
|
'{}',
|
||||||
|
'{}',
|
||||||
|
'{}',
|
||||||
|
-1,
|
||||||
|
CURRENT_TIMESTAMP
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'968dd6e6-faec-4f78-b58a-d6e68426b08e',
|
||||||
|
'ns1', 'tag2', 'component2', '', '',
|
||||||
|
'grid1', 'zone1', 'station1',
|
||||||
|
-1,
|
||||||
|
false,
|
||||||
|
-1, -1,
|
||||||
|
'{}',
|
||||||
|
'{}',
|
||||||
|
'{}',
|
||||||
|
-1,
|
||||||
|
CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO public.measurement (tag, name, type, size, data_source, event_plan, bay_uuid, component_uuid, op, ts)
|
||||||
|
VALUES (
|
||||||
|
'I11_C_rms',
|
||||||
|
'45母甲侧互连电流C相1',
|
||||||
|
-1,
|
||||||
|
200,
|
||||||
|
'{"type": 1, "io_address": {"device": "ssu001", "channel": "Telemetry1", "station": "001"}}',
|
||||||
|
'{"cause": {"up": 55.0, "down": 45.0}, "action": {"command": "warning", "parameters": ["I段母线甲侧互连电流C相1"]}, "enable": true}',
|
||||||
|
'18e71a24-694a-43fa-93a7-c4d02a27d1bc',
|
||||||
|
'968dd6e6-faec-4f78-b58a-d6e68426b09e',
|
||||||
|
-1,
|
||||||
|
CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
```
|
```
|
||||||
|
|
||||||
##### 2.4.2 Redis数据注入
|
##### 2.4.2 Redis数据注入
|
||||||
|
|
||||||
Redis数据脚本
|
Redis数据脚本
|
||||||
|
|
||||||
```Lua
|
```shell
|
||||||
redis.call('SADD', 'grid_keys', 'transformfeeder1_220', 'transformfeeder1_220_35', 'transformfeeder1_220_36')
|
deploy/redis-test-data/measurments-recommend/measurement_injection.go
|
||||||
redis.call('SADD', 'grid_transformfeeder1_220_zones_keys', 'I_A_rms', 'I_B_rms', 'I_C_rms')
|
|
||||||
redis.call('SADD', 'grid_transformfeeder1_220_35_zones_keys', 'I_A_rms', 'I_B_rms', 'I_C_rms')
|
|
||||||
redis.call('SADD', 'grid_transformfeeder1_220_36_zones_keys', 'I_A_rms', 'I_B_rms', 'I_C_rms')
|
|
||||||
|
|
||||||
local dict_key = 'search_suggestions_dict'
|
|
||||||
|
|
||||||
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220', 1)
|
|
||||||
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_35', 1)
|
|
||||||
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_36', 1)
|
|
||||||
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220.I_A_rms', 1)
|
|
||||||
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220.I_B_rms', 1)
|
|
||||||
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220.I_C_rms', 1)
|
|
||||||
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_35.I_A_rms', 1)
|
|
||||||
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_35.I_B_rms', 1)
|
|
||||||
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_35.I_C_rms', 1)
|
|
||||||
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_36.I_A_rms', 1)
|
|
||||||
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_36.I_B_rms', 1)
|
|
||||||
redis.call('FT.SUGADD', dict_key, 'transformfeeder1_220_36.I_C_rms', 1)
|
|
||||||
|
|
||||||
return 'OK'
|
|
||||||
```
|
```
|
||||||
|
|
||||||
在Redis CLI 中导入命令
|
运行脚本向 Reids 导入数据
|
||||||
|
|
||||||
1. 使用 `EVAL "lua脚本" 0`即可成功导入数据
|
```shell
|
||||||
2. 使用 `SCRIPT LOAD "lua脚本"`加载脚本,然后使用 `EVAL SHA1值 0` 命令执行上一步存储命令返回的哈希值即可
|
go run deploy/redis-test-data/measurments-recommend/measurement_injection.go
|
||||||
|
```
|
||||||
|
|
||||||
### 3\. 启动 ModelRT 服务
|
### 3\. 启动 ModelRT 服务
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,330 @@
|
||||||
|
// Package main implement redis test data injection
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/RediSearch/redisearch-go/v2/redisearch"
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ac *redisearch.Autocompleter
|
||||||
|
|
||||||
|
// InitAutocompleterWithPool define func of initialize the Autocompleter with redigo pool
|
||||||
|
func init() {
|
||||||
|
// ac = redisearch.NewAutocompleterFromPool(pool, redisSearchDictName)
|
||||||
|
ac = redisearch.NewAutocompleter("localhost:6379", redisSearchDictName)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
gridKeysSet = "grid_keys"
|
||||||
|
zoneKeysSet = "zone_keys"
|
||||||
|
stationKeysSet = "station_keys"
|
||||||
|
componentNSPathKeysSet = "component_nspath_keys"
|
||||||
|
componentTagKeysSet = "component_tag_keys"
|
||||||
|
configKeysSet = "config_keys"
|
||||||
|
measurementTagKeysSet = "measurement_tag_keys"
|
||||||
|
|
||||||
|
// Grid -> Zone (e.g., grid1_zones_keys)
|
||||||
|
gridZoneSetKeyFormat = "grid%d_zones_keys"
|
||||||
|
// Zone -> Station (e.g., zone1_1_stations_keys)
|
||||||
|
zoneStationSetKeyFormat = "zone%d_%d_stations_keys"
|
||||||
|
// Station -> NSPath (e.g., station1_1_1_components_nspath_keys)
|
||||||
|
stationNSPathKeyFormat = "station%d_%d_%d_components_nspath_keys"
|
||||||
|
// NSPath -> CompTag (e.g., ns1_1_1_1_components_tag_keys)
|
||||||
|
nsPathCompTagKeyFormat = "ns%d_%d_%d_%d_components_tag_keys"
|
||||||
|
// CompTag -> Measurement (e.g., comptag1_1_1_1_1_measurement_keys)
|
||||||
|
compTagMeasKeyFormat = "comptag%d_%d_%d_%d_%d_measurement_keys"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
redisSearchDictName = "search_suggestions_dict"
|
||||||
|
defaultScore = 1.0
|
||||||
|
)
|
||||||
|
|
||||||
|
var configMetrics = []any{
|
||||||
|
"component", "base_extend", "rated", "setup", "model",
|
||||||
|
"stable", "bay", "craft", "integrity", "behavior",
|
||||||
|
}
|
||||||
|
|
||||||
|
func bulkInsertAllHierarchySets(ctx context.Context, rdb *redis.Client) error {
|
||||||
|
log.Println("starting bulk insertion of Redis hierarchy sets")
|
||||||
|
|
||||||
|
if err := insertStaticSets(ctx, rdb); err != nil {
|
||||||
|
return fmt.Errorf("static set insertion failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := insertDynamicHierarchy(ctx, rdb); err != nil {
|
||||||
|
return fmt.Errorf("dynamic hierarchy insertion failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := insertAllHierarchySuggestions(ac); err != nil {
|
||||||
|
return fmt.Errorf("dynamic hierarchy insertion failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("bulk insertion complete")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func insertStaticSets(ctx context.Context, rdb *redis.Client) error {
|
||||||
|
// grid_keys
|
||||||
|
if err := rdb.SAdd(ctx, gridKeysSet, "grid1", "grid2", "grid3").Err(); err != nil {
|
||||||
|
return fmt.Errorf("sadd failed for %s: %w", gridKeysSet, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// zone_keys (3x3 = 9 members)
|
||||||
|
zoneMembers := make([]any, 0, 9)
|
||||||
|
for i := 1; i <= 3; i++ {
|
||||||
|
for j := 1; j <= 3; j++ {
|
||||||
|
zoneMembers = append(zoneMembers, fmt.Sprintf("zone%d_%d", i, j))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := rdb.SAdd(ctx, zoneKeysSet, zoneMembers...).Err(); err != nil {
|
||||||
|
return fmt.Errorf("sadd failed for %s: %w", zoneKeysSet, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// config_keys
|
||||||
|
if err := rdb.SAdd(ctx, configKeysSet, configMetrics...).Err(); err != nil {
|
||||||
|
return fmt.Errorf("sadd failed for %s: %w", configKeysSet, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("Static sets (grid_keys, zone_keys, config_keys) inserted.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func insertDynamicHierarchy(ctx context.Context, rdb *redis.Client) error {
|
||||||
|
allStationKeys := make([]any, 0, 27)
|
||||||
|
allNSPathKeys := make([]any, 0, 81)
|
||||||
|
allCompTagKeys := make([]any, 0, 243)
|
||||||
|
allMeasurementTagKeys := make([]any, 0, 729)
|
||||||
|
|
||||||
|
// S: Grid Prefix (1-3)
|
||||||
|
for S := 1; S <= 3; S++ {
|
||||||
|
// Grid-Zone Set Key: gridS_zones_keys
|
||||||
|
gridZoneKey := fmt.Sprintf(gridZoneSetKeyFormat, S)
|
||||||
|
gridZoneMembers := make([]any, 0, 3)
|
||||||
|
|
||||||
|
// Y: Zone Index (1-3)
|
||||||
|
for Y := 1; Y <= 3; Y++ {
|
||||||
|
zoneID := fmt.Sprintf("%d_%d", S, Y)
|
||||||
|
zoneMember := "zone" + zoneID
|
||||||
|
gridZoneMembers = append(gridZoneMembers, zoneMember)
|
||||||
|
|
||||||
|
// Zone-Station Set Key: zoneS_Y_stations_keys
|
||||||
|
zoneStationKey := fmt.Sprintf(zoneStationSetKeyFormat, S, Y)
|
||||||
|
zoneStationMembers := make([]any, 0, 3)
|
||||||
|
|
||||||
|
// Z: Station Index (1-3)
|
||||||
|
for Z := 1; Z <= 3; Z++ {
|
||||||
|
stationID := fmt.Sprintf("%d_%d_%d", S, Y, Z)
|
||||||
|
stationKey := "station" + stationID
|
||||||
|
allStationKeys = append(allStationKeys, stationKey)
|
||||||
|
zoneStationMembers = append(zoneStationMembers, stationKey)
|
||||||
|
|
||||||
|
// Station-NSPath Set Key: stationS_Y_Z_components_nspath_keys
|
||||||
|
stationNSPathKey := fmt.Sprintf(stationNSPathKeyFormat, S, Y, Z)
|
||||||
|
stationNSMembers := make([]any, 0, 3)
|
||||||
|
|
||||||
|
// D: NSPath Index (1-3)
|
||||||
|
for D := 1; D <= 3; D++ {
|
||||||
|
nsPathID := fmt.Sprintf("%s_%d", stationID, D)
|
||||||
|
nsPathKey := "ns" + nsPathID
|
||||||
|
allNSPathKeys = append(allNSPathKeys, nsPathKey)
|
||||||
|
stationNSMembers = append(stationNSMembers, nsPathKey)
|
||||||
|
|
||||||
|
// NSPath-CompTag Set Key: nsS_Y_Z_D_components_tag_keys
|
||||||
|
nsCompTagKey := fmt.Sprintf(nsPathCompTagKeyFormat, S, Y, Z, D)
|
||||||
|
nsCompTagMembers := make([]any, 0, 3)
|
||||||
|
|
||||||
|
// I: CompTag Index (1-3)
|
||||||
|
for I := 1; I <= 3; I++ {
|
||||||
|
compTagID := fmt.Sprintf("%s_%d", nsPathID, I)
|
||||||
|
compTagKey := "cmptag" + compTagID
|
||||||
|
allCompTagKeys = append(allCompTagKeys, compTagKey)
|
||||||
|
nsCompTagMembers = append(nsCompTagMembers, compTagKey)
|
||||||
|
|
||||||
|
// CompTag-Measurement Set Key: comptagS_Y_Z_D_I_measurement_keys
|
||||||
|
compTagMeasKey := fmt.Sprintf(compTagMeasKeyFormat, S, Y, Z, D, I)
|
||||||
|
compTagMeasMembers := make([]any, 0, 3)
|
||||||
|
|
||||||
|
// M: Measurement Index (1-3)
|
||||||
|
for M := 1; M <= 3; M++ {
|
||||||
|
measurementID := fmt.Sprintf("%s_%d", compTagID, M)
|
||||||
|
measurementKey := "meas" + measurementID
|
||||||
|
allMeasurementTagKeys = append(allMeasurementTagKeys, measurementKey)
|
||||||
|
compTagMeasMembers = append(compTagMeasMembers, measurementKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rdb.SAdd(ctx, compTagMeasKey, compTagMeasMembers...).Err(); err != nil {
|
||||||
|
return fmt.Errorf("sadd failed for %s: %w", compTagMeasKey, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rdb.SAdd(ctx, nsCompTagKey, nsCompTagMembers...).Err(); err != nil {
|
||||||
|
return fmt.Errorf("sadd failed for %s: %w", nsCompTagKey, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rdb.SAdd(ctx, stationNSPathKey, stationNSMembers...).Err(); err != nil {
|
||||||
|
return fmt.Errorf("sadd failed for %s: %w", stationNSPathKey, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rdb.SAdd(ctx, zoneStationKey, zoneStationMembers...).Err(); err != nil {
|
||||||
|
return fmt.Errorf("sadd failed for %s: %w", zoneStationKey, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rdb.SAdd(ctx, gridZoneKey, gridZoneMembers...).Err(); err != nil {
|
||||||
|
return fmt.Errorf("sadd failed for %s: %w", gridZoneKey, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 插入所有顶层动态 Set (将所有成员一次性插入到全局 Set 中)
|
||||||
|
if err := rdb.SAdd(ctx, stationKeysSet, allStationKeys...).Err(); err != nil {
|
||||||
|
return fmt.Errorf("sadd failed for %s: %w", stationKeysSet, err)
|
||||||
|
}
|
||||||
|
if err := rdb.SAdd(ctx, componentNSPathKeysSet, allNSPathKeys...).Err(); err != nil {
|
||||||
|
return fmt.Errorf("sadd failed for %s: %w", componentNSPathKeysSet, err)
|
||||||
|
}
|
||||||
|
if err := rdb.SAdd(ctx, componentTagKeysSet, allCompTagKeys...).Err(); err != nil {
|
||||||
|
return fmt.Errorf("sadd failed for %s: %w", componentTagKeysSet, err)
|
||||||
|
}
|
||||||
|
if err := rdb.SAdd(ctx, measurementTagKeysSet, allMeasurementTagKeys...).Err(); err != nil {
|
||||||
|
return fmt.Errorf("sadd failed for %s: %w", measurementTagKeysSet, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("inserted %d stations, %d nspaths, %d comptags, and %d measurements.\n",
|
||||||
|
len(allStationKeys), len(allNSPathKeys), len(allCompTagKeys), len(allMeasurementTagKeys))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func insertAllHierarchySuggestions(ac *redisearch.Autocompleter) error {
|
||||||
|
suggestions := make([]redisearch.Suggestion, 0, 10000)
|
||||||
|
// S: grid Index (1-3)
|
||||||
|
for S := 1; S <= 3; S++ {
|
||||||
|
gridStr := fmt.Sprintf("grid%d", S)
|
||||||
|
suggestions = append(suggestions, redisearch.Suggestion{Term: gridStr, Score: defaultScore})
|
||||||
|
|
||||||
|
// Y: zone Index (1-3)
|
||||||
|
for Y := 1; Y <= 3; Y++ {
|
||||||
|
zoneStr := fmt.Sprintf("zone%d_%d", S, Y)
|
||||||
|
gridZonePath := fmt.Sprintf("%s.%s", gridStr, zoneStr)
|
||||||
|
suggestions = append(suggestions, redisearch.Suggestion{Term: gridZonePath, Score: defaultScore})
|
||||||
|
|
||||||
|
// Z: station Index (1-3)
|
||||||
|
for Z := 1; Z <= 3; Z++ {
|
||||||
|
stationStr := fmt.Sprintf("station%d_%d_%d", S, Y, Z)
|
||||||
|
gridZoneStationPath := fmt.Sprintf("%s.%s", gridZonePath, stationStr)
|
||||||
|
suggestions = append(suggestions, redisearch.Suggestion{Term: gridZoneStationPath, Score: defaultScore})
|
||||||
|
|
||||||
|
// D: nsPath Index (1-3)
|
||||||
|
for D := 1; D <= 3; D++ {
|
||||||
|
nsPathStr := fmt.Sprintf("ns%d_%d_%d_%d", S, Y, Z, D)
|
||||||
|
gridZoneStationNSPath := fmt.Sprintf("%s.%s", gridZoneStationPath, nsPathStr)
|
||||||
|
suggestions = append(suggestions, redisearch.Suggestion{Term: gridZoneStationNSPath, Score: defaultScore})
|
||||||
|
|
||||||
|
// I: compTag Index (1-3)
|
||||||
|
for I := 1; I <= 3; I++ {
|
||||||
|
compTagStr := fmt.Sprintf("comptag%d_%d_%d_%d_%d", S, Y, Z, D, I)
|
||||||
|
fullCompTagPath := fmt.Sprintf("%s.%s", gridZoneStationNSPath, compTagStr)
|
||||||
|
suggestions = append(suggestions, redisearch.Suggestion{Term: fullCompTagPath, Score: defaultScore})
|
||||||
|
|
||||||
|
for _, metric := range configMetrics {
|
||||||
|
fullMetricPath := fmt.Sprintf("%s.%s", fullCompTagPath, metric)
|
||||||
|
suggestions = append(suggestions, redisearch.Suggestion{Term: fullMetricPath, Score: defaultScore})
|
||||||
|
// J: measTag Index (1-3)
|
||||||
|
for J := 1; J <= 3; J++ {
|
||||||
|
measTagStr := fmt.Sprintf("comptag%d_%d_%d_%d_%d_%d", S, Y, Z, D, I, J)
|
||||||
|
fullMeasurementPath := fmt.Sprintf("%s.%s", fullMetricPath, measTagStr)
|
||||||
|
suggestions = append(suggestions, redisearch.Suggestion{Term: fullMeasurementPath, Score: defaultScore})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("generated %d suggestions. starting bulk insertion into dictionary '%s'.", len(suggestions), redisSearchDictName)
|
||||||
|
|
||||||
|
// del ac suggestion
|
||||||
|
ac.Delete()
|
||||||
|
|
||||||
|
err := ac.AddTerms(suggestions...)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add %d suggestions: %w", len(suggestions), err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteAllHierarchySets(ctx context.Context, rdb *redis.Client) error {
|
||||||
|
log.Println("starting to collect all Redis Set keys for deletion...")
|
||||||
|
|
||||||
|
keysToDelete := []string{
|
||||||
|
gridKeysSet,
|
||||||
|
zoneKeysSet,
|
||||||
|
stationKeysSet,
|
||||||
|
componentNSPathKeysSet,
|
||||||
|
componentTagKeysSet,
|
||||||
|
configKeysSet,
|
||||||
|
measurementTagKeysSet,
|
||||||
|
}
|
||||||
|
|
||||||
|
for S := 1; S <= 3; S++ {
|
||||||
|
keysToDelete = append(keysToDelete, fmt.Sprintf(gridZoneSetKeyFormat, S))
|
||||||
|
|
||||||
|
for Y := 1; Y <= 3; Y++ {
|
||||||
|
keysToDelete = append(keysToDelete, fmt.Sprintf(zoneStationSetKeyFormat, S, Y))
|
||||||
|
|
||||||
|
for Z := 1; Z <= 3; Z++ {
|
||||||
|
keysToDelete = append(keysToDelete, fmt.Sprintf(stationNSPathKeyFormat, S, Y, Z))
|
||||||
|
|
||||||
|
for D := 1; D <= 3; D++ {
|
||||||
|
keysToDelete = append(keysToDelete, fmt.Sprintf(nsPathCompTagKeyFormat, S, Y, Z, D))
|
||||||
|
|
||||||
|
for I := 1; I <= 3; I++ {
|
||||||
|
keysToDelete = append(keysToDelete, fmt.Sprintf(compTagMeasKeyFormat, S, Y, Z, D, I))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("collected %d unique keys. Starting batch deletion...", len(keysToDelete))
|
||||||
|
|
||||||
|
deletedCount, err := rdb.Del(ctx, keysToDelete...).Result()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("batch deletion failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Successfully deleted %d keys (Sets) from Redis.", deletedCount)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
rdb := redis.NewClient(&redis.Options{
|
||||||
|
Addr: "localhost:6379",
|
||||||
|
Password: "",
|
||||||
|
DB: 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
if err := rdb.Ping(ctx).Err(); err != nil {
|
||||||
|
log.Fatalf("could not connect to Redis: %v", err)
|
||||||
|
}
|
||||||
|
log.Println("connected to Redis successfully")
|
||||||
|
|
||||||
|
if err := deleteAllHierarchySets(ctx, rdb); err != nil {
|
||||||
|
log.Fatalf("error delete exist set before bulk insertion: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := bulkInsertAllHierarchySets(ctx, rdb); err != nil {
|
||||||
|
log.Fatalf("error during bulk insertion: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,224 @@
|
||||||
|
// Package main implement redis test data injection
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math/rand"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"modelRT/orm"
|
||||||
|
|
||||||
|
util "modelRT/deploy/redis-test-data/util"
|
||||||
|
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
"gorm.io/driver/postgres"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
redisAddr = "localhost:6379"
|
||||||
|
)
|
||||||
|
|
||||||
|
var globalRedisClient *redis.Client
|
||||||
|
|
||||||
|
var (
|
||||||
|
highEnd, highStart, lowStart, lowEnd int
|
||||||
|
totalLength int
|
||||||
|
highSegmentLength int
|
||||||
|
lowSegmentLength int
|
||||||
|
)
|
||||||
|
|
||||||
|
func selectRandomInt() int {
|
||||||
|
options := []int{0, 2}
|
||||||
|
randomIndex := rand.Intn(len(options))
|
||||||
|
return options[randomIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateMixedData define func to generate a set of floating-point data that meets specific conditions
|
||||||
|
func generateMixedData(highMin, lowMin, highBase, lowBase, baseValue, normalBase float64) []float64 {
|
||||||
|
totalLength = 500
|
||||||
|
highSegmentLength = 20
|
||||||
|
lowSegmentLength = 20
|
||||||
|
|
||||||
|
seed := time.Now().UnixNano()
|
||||||
|
source := rand.NewSource(seed)
|
||||||
|
r := rand.New(source)
|
||||||
|
|
||||||
|
data := make([]float64, totalLength)
|
||||||
|
highStart = rand.Intn(totalLength - highSegmentLength - lowSegmentLength - 1)
|
||||||
|
highEnd = highStart + highSegmentLength
|
||||||
|
lowStart = rand.Intn(totalLength-lowSegmentLength-highEnd) + highEnd
|
||||||
|
lowEnd = lowStart + lowSegmentLength
|
||||||
|
|
||||||
|
for i := 0; i < totalLength; i++ {
|
||||||
|
if i >= highStart && i < highStart+highSegmentLength {
|
||||||
|
// 数据值均大于 55.0,在 [55.5, 60.0] 范围内随机
|
||||||
|
// rand.Float64() 生成 [0.0, 1.0) 范围的浮点数
|
||||||
|
data[i] = highMin + r.Float64()*(highBase)
|
||||||
|
} else if i >= lowStart && i < lowStart+lowSegmentLength {
|
||||||
|
// 数据值均小于 45.0,在 [40.0, 44.5] 范围内随机
|
||||||
|
data[i] = lowMin + r.Float64()*(lowBase)
|
||||||
|
} else {
|
||||||
|
// 数据在 [45.0, 55.0] 范围内随机 (baseValue ± 5)
|
||||||
|
// 50 + rand.Float64() * 10 - 5
|
||||||
|
change := normalBase - r.Float64()*normalBase*2
|
||||||
|
data[i] = baseValue + change
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateNormalData(baseValue, normalBase float64) []float64 {
|
||||||
|
totalLength = 500
|
||||||
|
seed := time.Now().UnixNano()
|
||||||
|
source := rand.NewSource(seed)
|
||||||
|
r := rand.New(source)
|
||||||
|
|
||||||
|
data := make([]float64, totalLength)
|
||||||
|
for i := 0; i < totalLength; i++ {
|
||||||
|
change := normalBase - r.Float64()*normalBase*2
|
||||||
|
data[i] = baseValue + change
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
rootCtx := context.Background()
|
||||||
|
|
||||||
|
pgURI := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", "192.168.1.101", 5432, "postgres", "coslight", "demo")
|
||||||
|
|
||||||
|
postgresDBClient, err := gorm.Open(postgres.Open(pgURI))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
sqlDB, err := postgresDBClient.DB()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
sqlDB.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
cancelCtx, cancel := context.WithTimeout(rootCtx, 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
var measurements []orm.Measurement
|
||||||
|
result := postgresDBClient.WithContext(cancelCtx).Find(&measurements)
|
||||||
|
if result.Error != nil {
|
||||||
|
panic(result.Error)
|
||||||
|
}
|
||||||
|
log.Println("总共读取到测量点数量:", len(measurements))
|
||||||
|
measInfos := util.ProcessMeasurements(measurements)
|
||||||
|
|
||||||
|
globalRedisClient = util.InitRedisClient(redisAddr)
|
||||||
|
rCancelCtx, cancel := context.WithCancel(rootCtx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
for key, measInfo := range measInfos {
|
||||||
|
randomType := selectRandomType()
|
||||||
|
var datas []float64
|
||||||
|
if randomType {
|
||||||
|
// 生成正常数据
|
||||||
|
log.Printf("key:%s generate normal data\n", key)
|
||||||
|
baseValue := measInfo.BaseValue
|
||||||
|
changes := measInfo.Changes
|
||||||
|
normalBase := changes[0]
|
||||||
|
noramlMin := baseValue - normalBase
|
||||||
|
normalMax := baseValue + normalBase
|
||||||
|
datas = generateNormalData(baseValue, normalBase)
|
||||||
|
allTrue := true
|
||||||
|
|
||||||
|
for i := 0; i < totalLength-1; i++ {
|
||||||
|
value := datas[i]
|
||||||
|
// log.Printf("index:%d, value:%.2f\n", i, value)
|
||||||
|
if value < noramlMin && value > normalMax {
|
||||||
|
allTrue = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("// 验证结果: 所有值是否 >= %.2f或 <= %.2f %t\n", noramlMin, normalMax, allTrue)
|
||||||
|
} else {
|
||||||
|
// 生成异常数据
|
||||||
|
log.Printf("key:%s generate abnormal data\n", key)
|
||||||
|
var highMin, highBase float64
|
||||||
|
var lowMin, lowBase float64
|
||||||
|
var normalBase float64
|
||||||
|
|
||||||
|
// TODO 生成一次测试数据
|
||||||
|
changes := measInfo.Changes
|
||||||
|
baseValue := measInfo.BaseValue
|
||||||
|
if len(changes) == 2 {
|
||||||
|
highMin = baseValue + changes[0]
|
||||||
|
lowMin = baseValue + changes[1]
|
||||||
|
highBase = changes[0]
|
||||||
|
lowBase = changes[1]
|
||||||
|
normalBase = changes[0]
|
||||||
|
} else {
|
||||||
|
randomIndex := selectRandomInt()
|
||||||
|
highMin = baseValue + changes[randomIndex]
|
||||||
|
lowMin = baseValue + changes[randomIndex+1]
|
||||||
|
highBase = changes[randomIndex]
|
||||||
|
lowBase = changes[randomIndex+1]
|
||||||
|
normalBase = changes[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
datas = generateMixedData(highMin, lowMin, highBase, lowBase, baseValue, normalBase)
|
||||||
|
// log.Printf("key:%s\n datas:%v\n", key, datas)
|
||||||
|
|
||||||
|
allHigh := true
|
||||||
|
for i := highStart; i < highEnd; i++ {
|
||||||
|
if datas[i] <= highMin {
|
||||||
|
allHigh = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("// 验证结果 (高值段在 %d-%d): 所有值是否 > %.2f? %t\n", highStart, highEnd-1, highMin, allHigh)
|
||||||
|
|
||||||
|
allLow := true
|
||||||
|
for i := lowStart; i < lowEnd; i++ {
|
||||||
|
if datas[i] >= lowMin {
|
||||||
|
allLow = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("// 验证结果 (低值段在 %d-%d): 所有值是否 < %.2f? %t\n", lowStart, lowEnd-1, lowMin, allLow)
|
||||||
|
|
||||||
|
allTrue := true
|
||||||
|
for i := 0; i < totalLength-1; i++ {
|
||||||
|
value := datas[i]
|
||||||
|
if i < highStart || (i >= highEnd && i < lowStart) || i >= lowEnd {
|
||||||
|
// log.Printf("index:%d, value:%.2f\n", i, value)
|
||||||
|
if value >= highMin && value <= lowMin {
|
||||||
|
allTrue = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("// 验证结果 (正常段在 %d-%d): 所有值是否 <= %.2f或>= %.2f %t\n", 0, totalLength-1, highMin, lowMin, allTrue)
|
||||||
|
}
|
||||||
|
log.Printf("启动数据写入程序, Redis Key: %s, 基准值: %.4f, 变化范围: %+v\n", key, measInfo.BaseValue, measInfo.Changes)
|
||||||
|
pipe := globalRedisClient.Pipeline()
|
||||||
|
redisZs := make([]redis.Z, 0, totalLength)
|
||||||
|
currentTime := time.Now().UnixNano()
|
||||||
|
for i := range totalLength {
|
||||||
|
sequentialTime := currentTime + int64(i)
|
||||||
|
z := redis.Z{
|
||||||
|
Score: datas[i],
|
||||||
|
Member: strconv.FormatInt(sequentialTime, 10),
|
||||||
|
}
|
||||||
|
redisZs = append(redisZs, z)
|
||||||
|
}
|
||||||
|
log.Printf("启动数据写入程序, Redis Key: %s, 写入数据量: %d\n", key, len(redisZs))
|
||||||
|
pipe.ZAdd(rCancelCtx, key, redisZs...)
|
||||||
|
_, err = pipe.Exec(rCancelCtx)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("redis pipeline execution failed: %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func selectRandomType() bool {
|
||||||
|
options := []int{0, 2}
|
||||||
|
randomValue := rand.Intn(len(options))
|
||||||
|
return randomValue != 0
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,449 @@
|
||||||
|
// Package main implement redis test data injection
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"modelRT/deploy/redis-test-data/util"
|
||||||
|
"modelRT/orm"
|
||||||
|
|
||||||
|
redis "github.com/redis/go-redis/v9"
|
||||||
|
"gorm.io/driver/postgres"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Redis配置
|
||||||
|
const (
|
||||||
|
redisAddr = "localhost:6379"
|
||||||
|
)
|
||||||
|
|
||||||
|
var globalRedisClient *redis.Client
|
||||||
|
|
||||||
|
// outlierConfig 异常段配置
|
||||||
|
type outlierConfig struct {
|
||||||
|
Enabled bool // 是否启用异常段
|
||||||
|
Count int // 异常段数量 (0=随机, 1-5=指定数量)
|
||||||
|
MinLength int // 异常段最小长度
|
||||||
|
MaxLength int // 异常段最大长度
|
||||||
|
Intensity float64 // 异常强度系数 (1.0=轻微超出, 2.0=显著超出)
|
||||||
|
Distribution string // 分布类型 "both"-上下都有, "upper"-只向上, "lower"-只向下
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateFloatSliceWithOutliers 生成包含连续异常段的数据
|
||||||
|
// baseValue: 基准值
|
||||||
|
// changes: 变化范围,每2个元素为一组 [minChange1, maxChange1, minChange2, maxChange2, ...]
|
||||||
|
// size: 生成的切片长度
|
||||||
|
// variationType: 变化类型
|
||||||
|
// outlierConfig: 异常段配置
|
||||||
|
func generateFloatSliceWithOutliers(baseValue float64, changes []float64, size int, variationType string, outlierConfig outlierConfig) ([]float64, error) {
|
||||||
|
// 先生成正常数据
|
||||||
|
data, err := generateFloatSlice(baseValue, changes, size, variationType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 插入异常段
|
||||||
|
if outlierConfig.Enabled {
|
||||||
|
data = insertOutliers(data, baseValue, changes, outlierConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 插入异常段
|
||||||
|
func insertOutliers(data []float64, baseValue float64, changes []float64, config outlierConfig) []float64 {
|
||||||
|
if len(data) == 0 || !config.Enabled {
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取变化范围的边界
|
||||||
|
minBound, maxBound := getChangeBounds(baseValue, changes)
|
||||||
|
// TODO delete
|
||||||
|
log.Printf("获取变化范围的边界,min:%.4f,max:%.4f\n", minBound, maxBound)
|
||||||
|
|
||||||
|
// 确定异常段数量
|
||||||
|
outlierCount := config.Count
|
||||||
|
if outlierCount == 0 {
|
||||||
|
// 随机生成1-3个异常段
|
||||||
|
outlierCount = rand.Intn(3) + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// 计算最大可能的异常段数量
|
||||||
|
maxPossibleOutliers := len(data) / (config.MinLength + 10)
|
||||||
|
if outlierCount > maxPossibleOutliers {
|
||||||
|
outlierCount = maxPossibleOutliers
|
||||||
|
}
|
||||||
|
|
||||||
|
// 生成异常段位置
|
||||||
|
segments := generateOutlierSegments(len(data), config.MinLength, config.MaxLength, outlierCount, config.Distribution)
|
||||||
|
// TODO 调试信息待删除
|
||||||
|
log.Printf("生成异常段位置:%+v\n", segments)
|
||||||
|
// 插入异常数据
|
||||||
|
for _, segment := range segments {
|
||||||
|
data = insertOutlierSegment(data, segment, minBound, maxBound, config)
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取变化范围的边界
|
||||||
|
func getChangeBounds(baseValue float64, changes []float64) (minBound, maxBound float64) {
|
||||||
|
if len(changes) == 0 {
|
||||||
|
return baseValue - 10, baseValue + 10
|
||||||
|
}
|
||||||
|
|
||||||
|
ranges := normalizeRanges(changes)
|
||||||
|
minBound, maxBound = baseValue+ranges[0][0], baseValue+ranges[0][1]
|
||||||
|
|
||||||
|
for _, r := range ranges {
|
||||||
|
if baseValue+r[0] < minBound {
|
||||||
|
minBound = baseValue + r[0]
|
||||||
|
}
|
||||||
|
if baseValue+r[1] > maxBound {
|
||||||
|
maxBound = baseValue + r[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return minBound, maxBound
|
||||||
|
}
|
||||||
|
|
||||||
|
// OutlierSegment 异常段定义
|
||||||
|
type OutlierSegment struct {
|
||||||
|
Start int
|
||||||
|
Length int
|
||||||
|
Type string // "upper"-向上异常, "lower"-向下异常
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateOutlierSegments(totalSize, minLength, maxLength, count int, distribution string) []OutlierSegment {
|
||||||
|
if count == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
segments := make([]OutlierSegment, 0, count)
|
||||||
|
usedPositions := make(map[int]bool)
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
// 尝试多次寻找合适的位置
|
||||||
|
for attempt := 0; attempt < 10; attempt++ {
|
||||||
|
length := rand.Intn(maxLength-minLength+1) + minLength
|
||||||
|
start := rand.Intn(totalSize - length)
|
||||||
|
|
||||||
|
// 检查是否与已有段重叠
|
||||||
|
overlap := false
|
||||||
|
for pos := start; pos < start+length; pos++ {
|
||||||
|
if usedPositions[pos] {
|
||||||
|
overlap = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !overlap {
|
||||||
|
// 标记已使用的位置
|
||||||
|
for pos := start; pos < start+length; pos++ {
|
||||||
|
usedPositions[pos] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// 根据 distribution 配置决定异常类型
|
||||||
|
var outlierType string
|
||||||
|
switch distribution {
|
||||||
|
case "upper":
|
||||||
|
outlierType = "upper"
|
||||||
|
case "lower":
|
||||||
|
outlierType = "lower"
|
||||||
|
case "both":
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
if rand.Float64() < 0.5 {
|
||||||
|
outlierType = "upper"
|
||||||
|
} else {
|
||||||
|
outlierType = "lower"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
segments = append(segments, OutlierSegment{
|
||||||
|
Start: start,
|
||||||
|
Length: length,
|
||||||
|
Type: outlierType,
|
||||||
|
})
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return segments
|
||||||
|
}
|
||||||
|
|
||||||
|
func insertOutlierSegment(data []float64, segment OutlierSegment, minBound, maxBound float64, config outlierConfig) []float64 {
|
||||||
|
rangeWidth := maxBound - minBound
|
||||||
|
|
||||||
|
// 确定整个异常段的方向
|
||||||
|
outlierType := segment.Type
|
||||||
|
if outlierType == "" {
|
||||||
|
switch config.Distribution {
|
||||||
|
case "upper":
|
||||||
|
outlierType = "upper"
|
||||||
|
case "lower":
|
||||||
|
outlierType = "lower"
|
||||||
|
default:
|
||||||
|
if rand.Float64() < 0.5 {
|
||||||
|
outlierType = "upper"
|
||||||
|
} else {
|
||||||
|
outlierType = "lower"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 为整个段生成同方向异常值
|
||||||
|
for i := segment.Start; i < segment.Start+segment.Length && i < len(data); i++ {
|
||||||
|
excess := rangeWidth * (0.3 + rand.Float64()*config.Intensity)
|
||||||
|
|
||||||
|
if outlierType == "upper" {
|
||||||
|
data[i] = maxBound + excess
|
||||||
|
} else {
|
||||||
|
data[i] = minBound - excess
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func detectOutlierSegments(data []float64, baseValue float64, changes []float64, minSegmentLength int) []OutlierSegment {
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
minBound, maxBound := getChangeBounds(baseValue, changes)
|
||||||
|
var segments []OutlierSegment
|
||||||
|
currentStart := -1
|
||||||
|
currentType := ""
|
||||||
|
|
||||||
|
for i, value := range data {
|
||||||
|
isOutlier := value > maxBound || value < minBound
|
||||||
|
|
||||||
|
if isOutlier {
|
||||||
|
outlierType := "upper"
|
||||||
|
if value < minBound {
|
||||||
|
outlierType = "lower"
|
||||||
|
}
|
||||||
|
|
||||||
|
if currentStart == -1 {
|
||||||
|
// 开始新的异常段
|
||||||
|
currentStart = i
|
||||||
|
currentType = outlierType
|
||||||
|
} else if currentType != outlierType {
|
||||||
|
// 类型变化,结束当前段
|
||||||
|
if i-currentStart >= minSegmentLength {
|
||||||
|
segments = append(segments, OutlierSegment{
|
||||||
|
Start: currentStart,
|
||||||
|
Length: i - currentStart,
|
||||||
|
Type: currentType,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
currentStart = i
|
||||||
|
currentType = outlierType
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if currentStart != -1 {
|
||||||
|
// 结束当前异常段
|
||||||
|
if i-currentStart >= minSegmentLength {
|
||||||
|
segments = append(segments, OutlierSegment{
|
||||||
|
Start: currentStart,
|
||||||
|
Length: i - currentStart,
|
||||||
|
Type: currentType,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
currentStart = -1
|
||||||
|
currentType = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 处理最后的异常段
|
||||||
|
if currentStart != -1 && len(data)-currentStart >= minSegmentLength {
|
||||||
|
segments = append(segments, OutlierSegment{
|
||||||
|
Start: currentStart,
|
||||||
|
Length: len(data) - currentStart,
|
||||||
|
Type: currentType,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return segments
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateFloatSlice(baseValue float64, changes []float64, size int, variationType string) ([]float64, error) {
|
||||||
|
return generateRandomData(baseValue, changes, size), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeRanges(changes []float64) [][2]float64 {
|
||||||
|
ranges := make([][2]float64, len(changes)/2)
|
||||||
|
for i := 0; i < len(changes); i += 2 {
|
||||||
|
min, max := changes[i], changes[i+1]
|
||||||
|
if min > max {
|
||||||
|
min, max = max, min
|
||||||
|
}
|
||||||
|
ranges[i/2] = [2]float64{min, max}
|
||||||
|
}
|
||||||
|
return ranges
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateRandomData(baseValue float64, changes []float64, size int) []float64 {
|
||||||
|
data := make([]float64, size)
|
||||||
|
ranges := normalizeRanges(changes)
|
||||||
|
for i := range data {
|
||||||
|
rangeIdx := rand.Intn(len(ranges))
|
||||||
|
minChange := ranges[rangeIdx][0]
|
||||||
|
maxChange := ranges[rangeIdx][1]
|
||||||
|
change := minChange + rand.Float64()*(maxChange-minChange)
|
||||||
|
data[i] = baseValue + change
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// simulateDataWrite 定时生成并写入模拟数据到 Redis ZSet
|
||||||
|
func simulateDataWrite(ctx context.Context, rdb *redis.Client, redisKey string, config outlierConfig, measInfo util.CalculationResult) {
|
||||||
|
log.Printf("启动数据写入程序, Redis Key: %s, 基准值: %.4f, 变化范围: %+v\n", redisKey, measInfo.BaseValue, measInfo.Changes)
|
||||||
|
ticker := time.NewTicker(3 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
pipe := rdb.Pipeline()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Printf("\n[%s] 写入程序已停止\n", redisKey)
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
minBound, maxBound := getChangeBounds(measInfo.BaseValue, measInfo.Changes)
|
||||||
|
log.Printf("计算边界: [%.4f, %.4f]\n", minBound, maxBound)
|
||||||
|
|
||||||
|
// 根据基准值类型决定如何处理
|
||||||
|
switch measInfo.BaseType {
|
||||||
|
case "TI":
|
||||||
|
// 边沿触发类型,生成特殊处理的数据
|
||||||
|
log.Printf("边沿触发类型,跳过异常数据生成\n")
|
||||||
|
return
|
||||||
|
case "TE":
|
||||||
|
// 正常上下限类型,生成包含异常的数据
|
||||||
|
if len(measInfo.Changes) == 0 {
|
||||||
|
log.Printf("无变化范围数据,跳过\n")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// 根据变化范围数量调整异常配置
|
||||||
|
if len(measInfo.Changes) == 2 {
|
||||||
|
// 只有上下限
|
||||||
|
config.Distribution = "both"
|
||||||
|
} else if len(measInfo.Changes) == 4 {
|
||||||
|
// 有上下限和预警上下限
|
||||||
|
config.Distribution = "both"
|
||||||
|
config.Intensity = 2.0 // 增强异常强度
|
||||||
|
}
|
||||||
|
|
||||||
|
// 生成包含异常的数据
|
||||||
|
data, err := generateFloatSliceWithOutliers(
|
||||||
|
measInfo.BaseValue,
|
||||||
|
measInfo.Changes,
|
||||||
|
measInfo.Size,
|
||||||
|
"random",
|
||||||
|
config,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("生成异常数据失败:%v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
segments := detectOutlierSegments(data, measInfo.BaseValue, measInfo.Changes, config.MinLength)
|
||||||
|
log.Printf("检测到异常段数量:%d\n", len(segments))
|
||||||
|
for i, segment := range segments {
|
||||||
|
log.Printf("异常段%d: 位置[%d-%d], 长度=%d, 类型=%s\n",
|
||||||
|
i+1, segment.Start, segment.Start+segment.Length-1, segment.Length, segment.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
redisZs := make([]redis.Z, 0, len(data))
|
||||||
|
for i := range len(data) {
|
||||||
|
z := redis.Z{
|
||||||
|
Score: data[i],
|
||||||
|
Member: strconv.FormatInt(time.Now().UnixNano(), 10),
|
||||||
|
}
|
||||||
|
redisZs = append(redisZs, z)
|
||||||
|
}
|
||||||
|
pipe.ZAdd(ctx, redisKey, redisZs...)
|
||||||
|
_, err = pipe.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("redis pipeline execution failed: %v", err)
|
||||||
|
}
|
||||||
|
log.Printf("生成 redis 实时数据成功\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func gracefulShutdown() {
|
||||||
|
if globalRedisClient != nil {
|
||||||
|
if err := globalRedisClient.Close(); err != nil {
|
||||||
|
log.Printf("关闭 Redis 客户端失败:%v", err)
|
||||||
|
} else {
|
||||||
|
log.Println("关闭 Redis 客户端成功")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
rootCtx := context.Background()
|
||||||
|
|
||||||
|
pgURI := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", "192.168.1.101", 5432, "postgres", "coslight", "demo")
|
||||||
|
|
||||||
|
postgresDBClient, err := gorm.Open(postgres.Open(pgURI))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
sqlDB, err := postgresDBClient.DB()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
sqlDB.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
cancelCtx, cancel := context.WithTimeout(rootCtx, 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
var measurements []orm.Measurement
|
||||||
|
result := postgresDBClient.WithContext(cancelCtx).Find(&measurements)
|
||||||
|
if result.Error != nil {
|
||||||
|
panic(result.Error)
|
||||||
|
}
|
||||||
|
log.Println("总共读取到测量点数量:", len(measurements))
|
||||||
|
measInfos := util.ProcessMeasurements(measurements)
|
||||||
|
|
||||||
|
// 测量点数据生成(包含异常数据)
|
||||||
|
// 配置异常段参数
|
||||||
|
outlierConfig := outlierConfig{
|
||||||
|
Enabled: true, // 是否产生异常段数据
|
||||||
|
Count: 2, // 异常段数量
|
||||||
|
MinLength: 10, // 异常段最小连续长度
|
||||||
|
MaxLength: 15, // 异常段最大连续长度
|
||||||
|
Intensity: 1.5, // 异常强度
|
||||||
|
Distribution: "both", // 分布类型
|
||||||
|
}
|
||||||
|
|
||||||
|
globalRedisClient = util.InitRedisClient(redisAddr)
|
||||||
|
rCancelCtx, cancel := context.WithCancel(rootCtx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
for key, measInfo := range measInfos {
|
||||||
|
go simulateDataWrite(rCancelCtx, globalRedisClient, key, outlierConfig, measInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
sigChan := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
<-sigChan
|
||||||
|
gracefulShutdown()
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,266 @@
|
||||||
|
// Package util provide some utility fun
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"modelRT/orm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CalculationResult struct {
|
||||||
|
BaseValue float64
|
||||||
|
Changes []float64
|
||||||
|
Size int
|
||||||
|
BaseType string // "normal", "warning", "edge"
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func ProcessMeasurements(measurements []orm.Measurement) map[string]CalculationResult {
|
||||||
|
results := make(map[string]CalculationResult, len(measurements))
|
||||||
|
for _, measurement := range measurements {
|
||||||
|
// 检查 DataSource 是否存在且 type 为 1
|
||||||
|
if measurement.DataSource == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// 检查 type 是否为 1
|
||||||
|
dataType, typeExists := measurement.DataSource["type"]
|
||||||
|
if !typeExists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// 类型断言,处理不同的数字类型
|
||||||
|
var typeValue int
|
||||||
|
switch v := dataType.(type) {
|
||||||
|
case int:
|
||||||
|
typeValue = v
|
||||||
|
case float64:
|
||||||
|
typeValue = int(v)
|
||||||
|
case int64:
|
||||||
|
typeValue = int(v)
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if typeValue != 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取 io_address
|
||||||
|
ioAddressRaw, ioExists := measurement.DataSource["io_address"]
|
||||||
|
if !ioExists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ioAddress, ok := ioAddressRaw.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
station, _ := ioAddress["station"].(string)
|
||||||
|
device, _ := ioAddress["device"].(string)
|
||||||
|
channel, _ := ioAddress["channel"].(string)
|
||||||
|
|
||||||
|
result := fmt.Sprintf("%s:%s:phasor:%s", station, device, channel)
|
||||||
|
if measurement.EventPlan == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
causeValue, causeExist := measurement.EventPlan["cause"]
|
||||||
|
if !causeExist {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
causeMap, ok := causeValue.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
calResult, err := calculateBaseValueEnhanced(causeMap)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
calResult.Size = measurement.Size
|
||||||
|
results[result] = calResult
|
||||||
|
}
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateBaseValueEnhanced(data map[string]any) (CalculationResult, error) {
|
||||||
|
result := CalculationResult{}
|
||||||
|
if edge, exists := data["edge"]; exists {
|
||||||
|
value, err := calculateEdgeValue(edge)
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
if edge == "raising" {
|
||||||
|
result.Changes = []float64{1.0}
|
||||||
|
} else {
|
||||||
|
result.Changes = []float64{0.0}
|
||||||
|
}
|
||||||
|
|
||||||
|
result.BaseValue = value
|
||||||
|
result.BaseType = "TI"
|
||||||
|
result.Message = "边沿触发基准值"
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
hasUpDown := HasKeys(data, "up", "down")
|
||||||
|
hasUpUpDownDown := HasKeys(data, "upup", "downdown")
|
||||||
|
result.BaseType = "TE"
|
||||||
|
switch {
|
||||||
|
case hasUpDown && hasUpUpDownDown:
|
||||||
|
value, err := calculateAverage(data, "up", "down")
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
result.BaseValue = value
|
||||||
|
result.Changes, err = calculateChanges(data, value, false, 4)
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
result.Message = "上下限基准值(忽略预警上上下下限)"
|
||||||
|
return result, nil
|
||||||
|
|
||||||
|
case hasUpDown:
|
||||||
|
value, err := calculateAverage(data, "up", "down")
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
result.BaseValue = value
|
||||||
|
result.Changes, err = calculateChanges(data, value, false, 2)
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
result.Message = "上下限基准值"
|
||||||
|
return result, nil
|
||||||
|
|
||||||
|
case hasUpUpDownDown:
|
||||||
|
value, err := calculateAverage(data, "upup", "downdown")
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
result.BaseValue = value
|
||||||
|
result.Changes, err = calculateChanges(data, value, true, 2)
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
result.Message = "上上下下限基准值"
|
||||||
|
return result, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return result, fmt.Errorf("不支持的数据结构: %v", data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateAverage(data map[string]any, key1, key2 string) (float64, error) {
|
||||||
|
val1, err := getFloatValue(data, key1)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
val2, err := getFloatValue(data, key2)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return (val1 + val2) / 2.0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateChanges(data map[string]any, baseValue float64, maxLimt bool, limitNum int) ([]float64, error) {
|
||||||
|
results := make([]float64, 0, limitNum)
|
||||||
|
switch limitNum {
|
||||||
|
case 2:
|
||||||
|
var key1, key2 string
|
||||||
|
if maxLimt {
|
||||||
|
key1 = "upup"
|
||||||
|
key2 = "downdown"
|
||||||
|
} else {
|
||||||
|
key1 = "up"
|
||||||
|
key2 = "down"
|
||||||
|
}
|
||||||
|
val1, err := getFloatValue(data, key1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
results = append(results, val1-baseValue)
|
||||||
|
|
||||||
|
val2, err := getFloatValue(data, key2)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
results = append(results, val2-baseValue)
|
||||||
|
case 4:
|
||||||
|
key1 := "up"
|
||||||
|
key2 := "down"
|
||||||
|
key3 := "upup"
|
||||||
|
key4 := "downdown"
|
||||||
|
|
||||||
|
val1, err := getFloatValue(data, key1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
results = append(results, val1-baseValue)
|
||||||
|
|
||||||
|
val2, err := getFloatValue(data, key2)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
results = append(results, val2-baseValue)
|
||||||
|
|
||||||
|
val3, err := getFloatValue(data, key3)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
results = append(results, val3-baseValue)
|
||||||
|
|
||||||
|
val4, err := getFloatValue(data, key4)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
results = append(results, val4-baseValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFloatValue(data map[string]any, key string) (float64, error) {
|
||||||
|
value, exists := data[key]
|
||||||
|
if !exists {
|
||||||
|
return 0, fmt.Errorf("缺少必需的键:%s", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := value.(type) {
|
||||||
|
case float64:
|
||||||
|
return v, nil
|
||||||
|
case int:
|
||||||
|
return float64(v), nil
|
||||||
|
case float32:
|
||||||
|
return float64(v), nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("键 %s 的值类型错误,期望数字类型,得到 %T", key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func HasKeys(data map[string]any, keys ...string) bool {
|
||||||
|
for _, key := range keys {
|
||||||
|
if _, exists := data[key]; !exists {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateEdgeValue(edge any) (float64, error) {
|
||||||
|
edgeStr, ok := edge.(string)
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("edge 字段类型错误,期望 string,得到 %T", edge)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch edgeStr {
|
||||||
|
case "raising":
|
||||||
|
return 1.0, nil
|
||||||
|
case "falling":
|
||||||
|
return 0.0, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("不支持的 edge 值: %s", edgeStr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,27 @@
|
||||||
|
// Package util provide some utility fun
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
)
|
||||||
|
|
||||||
|
// InitRedisClient define func to initialize and return a redis client
|
||||||
|
func InitRedisClient(redisAddr string) *redis.Client {
|
||||||
|
rdb := redis.NewClient(&redis.Options{
|
||||||
|
Addr: redisAddr,
|
||||||
|
Password: "",
|
||||||
|
DB: 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
_, err := rdb.Ping(ctx).Result()
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return rdb
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,36 @@
|
||||||
|
// Package diagram provide diagram data structure and operation
|
||||||
|
package diagram
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RedisClient define struct to accessing redis data that does not require the use of distributed locks
|
||||||
|
type RedisClient struct {
|
||||||
|
Client *redis.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRedisClient define func of new redis client instance
|
||||||
|
func NewRedisClient() *RedisClient {
|
||||||
|
return &RedisClient{
|
||||||
|
Client: GetRedisClientInstance(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryByZRangeByLex define func to query real time data from redis zset
|
||||||
|
func (rc *RedisClient) QueryByZRangeByLex(ctx context.Context, key string, size int64) ([]redis.Z, error) {
|
||||||
|
client := rc.Client
|
||||||
|
args := redis.ZRangeArgs{
|
||||||
|
Key: key,
|
||||||
|
Start: 0,
|
||||||
|
Stop: size,
|
||||||
|
ByScore: false,
|
||||||
|
ByLex: false,
|
||||||
|
Rev: false,
|
||||||
|
Offset: 0,
|
||||||
|
Count: 0,
|
||||||
|
}
|
||||||
|
return client.ZRangeArgsWithScores(ctx, args).Result()
|
||||||
|
}
|
||||||
|
|
@ -14,6 +14,7 @@ import (
|
||||||
// RedisSet defines the encapsulation struct of redis hash type
|
// RedisSet defines the encapsulation struct of redis hash type
|
||||||
type RedisSet struct {
|
type RedisSet struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
|
key string
|
||||||
rwLocker *locker.RedissionRWLocker
|
rwLocker *locker.RedissionRWLocker
|
||||||
storageClient *redis.Client
|
storageClient *redis.Client
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
|
|
@ -24,6 +25,7 @@ func NewRedisSet(ctx context.Context, setKey string, lockLeaseTime uint64, needR
|
||||||
token := ctx.Value("client_token").(string)
|
token := ctx.Value("client_token").(string)
|
||||||
return &RedisSet{
|
return &RedisSet{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
|
key: setKey,
|
||||||
rwLocker: locker.InitRWLocker(setKey, token, lockLeaseTime, needRefresh),
|
rwLocker: locker.InitRWLocker(setKey, token, lockLeaseTime, needRefresh),
|
||||||
storageClient: GetRedisClientInstance(),
|
storageClient: GetRedisClientInstance(),
|
||||||
logger: logger.GetLoggerInstance(),
|
logger: logger.GetLoggerInstance(),
|
||||||
|
|
@ -31,34 +33,34 @@ func NewRedisSet(ctx context.Context, setKey string, lockLeaseTime uint64, needR
|
||||||
}
|
}
|
||||||
|
|
||||||
// SADD define func of add redis set by members
|
// SADD define func of add redis set by members
|
||||||
func (rs *RedisSet) SADD(setKey string, members ...interface{}) error {
|
func (rs *RedisSet) SADD(members ...any) error {
|
||||||
err := rs.rwLocker.WLock(rs.ctx)
|
err := rs.rwLocker.WLock(rs.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", setKey, "error", err)
|
logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", rs.key, "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer rs.rwLocker.UnWLock(rs.ctx)
|
defer rs.rwLocker.UnWLock(rs.ctx)
|
||||||
|
|
||||||
err = rs.storageClient.SAdd(rs.ctx, setKey, members).Err()
|
err = rs.storageClient.SAdd(rs.ctx, rs.key, members).Err()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(rs.ctx, "add set by memebers failed", "set_key", setKey, "members", members, "error", err)
|
logger.Error(rs.ctx, "add set by memebers failed", "set_key", rs.key, "members", members, "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SREM define func of remove the specified members from redis set by key
|
// SREM define func of remove the specified members from redis set by key
|
||||||
func (rs *RedisSet) SREM(setKey string, members ...interface{}) error {
|
func (rs *RedisSet) SREM(members ...any) error {
|
||||||
err := rs.rwLocker.WLock(rs.ctx)
|
err := rs.rwLocker.WLock(rs.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", setKey, "error", err)
|
logger.Error(rs.ctx, "lock wLock by setKey failed", "set_key", rs.key, "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer rs.rwLocker.UnWLock(rs.ctx)
|
defer rs.rwLocker.UnWLock(rs.ctx)
|
||||||
|
|
||||||
count, err := rs.storageClient.SRem(rs.ctx, setKey, members).Result()
|
count, err := rs.storageClient.SRem(rs.ctx, rs.key, members).Result()
|
||||||
if err != nil || count != int64(len(members)) {
|
if err != nil || count != int64(len(members)) {
|
||||||
logger.Error(rs.ctx, "rem members from set failed", "set_key", setKey, "members", members, "error", err)
|
logger.Error(rs.ctx, "rem members from set failed", "set_key", rs.key, "members", members, "error", err)
|
||||||
|
|
||||||
return fmt.Errorf("rem members from set failed:%w", err)
|
return fmt.Errorf("rem members from set failed:%w", err)
|
||||||
}
|
}
|
||||||
|
|
@ -66,27 +68,27 @@ func (rs *RedisSet) SREM(setKey string, members ...interface{}) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SMembers define func of get all memebers from redis set by key
|
// SMembers define func of get all memebers from redis set by key
|
||||||
func (rs *RedisSet) SMembers(setKey string) ([]string, error) {
|
func (rs *RedisSet) SMembers() ([]string, error) {
|
||||||
err := rs.rwLocker.RLock(rs.ctx)
|
err := rs.rwLocker.RLock(rs.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(rs.ctx, "lock rLock by setKey failed", "set_key", setKey, "error", err)
|
logger.Error(rs.ctx, "lock rLock by setKey failed", "set_key", rs.key, "error", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer rs.rwLocker.UnRLock(rs.ctx)
|
defer rs.rwLocker.UnRLock(rs.ctx)
|
||||||
|
|
||||||
result, err := rs.storageClient.SMembers(rs.ctx, setKey).Result()
|
result, err := rs.storageClient.SMembers(rs.ctx, rs.key).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", setKey, "error", err)
|
logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", rs.key, "error", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SIsMember define func of determine whether an member is in set by key
|
// SIsMember define func of determine whether an member is in set by key
|
||||||
func (rs *RedisSet) SIsMember(setKey string, member interface{}) (bool, error) {
|
func (rs *RedisSet) SIsMember(member any) (bool, error) {
|
||||||
result, err := rs.storageClient.SIsMember(rs.ctx, setKey, member).Result()
|
result, err := rs.storageClient.SIsMember(rs.ctx, rs.key, member).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", setKey, "error", err)
|
logger.Error(rs.ctx, "get all set field by hash key failed", "set_key", rs.key, "error", err)
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,8 @@ type RedisZSet struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRedisZSet define func of new redis zset instance
|
// NewRedisZSet define func of new redis zset instance
|
||||||
func NewRedisZSet(ctx context.Context, key string, token string, lockLeaseTime uint64, needRefresh bool) *RedisZSet {
|
func NewRedisZSet(ctx context.Context, key string, lockLeaseTime uint64, needRefresh bool) *RedisZSet {
|
||||||
|
token := ctx.Value("client_token").(string)
|
||||||
return &RedisZSet{
|
return &RedisZSet{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
rwLocker: locker.InitRWLocker(key, token, lockLeaseTime, needRefresh),
|
rwLocker: locker.InitRWLocker(key, token, lockLeaseTime, needRefresh),
|
||||||
|
|
|
||||||
73
docs/docs.go
73
docs/docs.go
|
|
@ -23,6 +23,70 @@ const docTemplate = `{
|
||||||
"host": "{{.Host}}",
|
"host": "{{.Host}}",
|
||||||
"basePath": "{{.BasePath}}",
|
"basePath": "{{.BasePath}}",
|
||||||
"paths": {
|
"paths": {
|
||||||
|
"/data/realtime": {
|
||||||
|
"get": {
|
||||||
|
"description": "根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据",
|
||||||
|
"consumes": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"RealTime Component"
|
||||||
|
],
|
||||||
|
"summary": "获取实时测点数据",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"description": "测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)",
|
||||||
|
"name": "token",
|
||||||
|
"in": "query",
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "integer",
|
||||||
|
"description": "查询起始时间 (Unix时间戳, e.g., 1761008266)",
|
||||||
|
"name": "begin",
|
||||||
|
"in": "query",
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "integer",
|
||||||
|
"description": "查询结束时间 (Unix时间戳, e.g., 1761526675)",
|
||||||
|
"name": "end",
|
||||||
|
"in": "query",
|
||||||
|
"required": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "返回实时数据成功",
|
||||||
|
"schema": {
|
||||||
|
"allOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/definitions/network.SuccessResponse"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"payload": {
|
||||||
|
"$ref": "#/definitions/network.RealTimeDataPayload"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"description": "返回实时数据失败",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/network.FailureResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"/measurement/recommend": {
|
"/measurement/recommend": {
|
||||||
"get": {
|
"get": {
|
||||||
"description": "根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。",
|
"description": "根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。",
|
||||||
|
|
@ -164,6 +228,15 @@ const docTemplate = `{
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"network.RealTimeDataPayload": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"sub_pos": {
|
||||||
|
"description": "TODO 增加example tag",
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"network.SuccessResponse": {
|
"network.SuccessResponse": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,70 @@
|
||||||
"host": "localhost:8080",
|
"host": "localhost:8080",
|
||||||
"basePath": "/api/v1",
|
"basePath": "/api/v1",
|
||||||
"paths": {
|
"paths": {
|
||||||
|
"/data/realtime": {
|
||||||
|
"get": {
|
||||||
|
"description": "根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据",
|
||||||
|
"consumes": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"RealTime Component"
|
||||||
|
],
|
||||||
|
"summary": "获取实时测点数据",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"description": "测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)",
|
||||||
|
"name": "token",
|
||||||
|
"in": "query",
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "integer",
|
||||||
|
"description": "查询起始时间 (Unix时间戳, e.g., 1761008266)",
|
||||||
|
"name": "begin",
|
||||||
|
"in": "query",
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "integer",
|
||||||
|
"description": "查询结束时间 (Unix时间戳, e.g., 1761526675)",
|
||||||
|
"name": "end",
|
||||||
|
"in": "query",
|
||||||
|
"required": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "返回实时数据成功",
|
||||||
|
"schema": {
|
||||||
|
"allOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/definitions/network.SuccessResponse"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"payload": {
|
||||||
|
"$ref": "#/definitions/network.RealTimeDataPayload"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"description": "返回实时数据失败",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/network.FailureResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"/measurement/recommend": {
|
"/measurement/recommend": {
|
||||||
"get": {
|
"get": {
|
||||||
"description": "根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。",
|
"description": "根据用户输入的字符串,从 Redis 中查询可能的测量点或结构路径,并提供推荐列表。",
|
||||||
|
|
@ -158,6 +222,15 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"network.RealTimeDataPayload": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"sub_pos": {
|
||||||
|
"description": "TODO 增加example tag",
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"network.SuccessResponse": {
|
"network.SuccessResponse": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,12 @@ definitions:
|
||||||
example: trans
|
example: trans
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
|
network.RealTimeDataPayload:
|
||||||
|
properties:
|
||||||
|
sub_pos:
|
||||||
|
description: TODO 增加example tag
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
network.SuccessResponse:
|
network.SuccessResponse:
|
||||||
properties:
|
properties:
|
||||||
code:
|
code:
|
||||||
|
|
@ -58,6 +64,46 @@ info:
|
||||||
title: ModelRT 实时模型服务 API 文档
|
title: ModelRT 实时模型服务 API 文档
|
||||||
version: "1.0"
|
version: "1.0"
|
||||||
paths:
|
paths:
|
||||||
|
/data/realtime:
|
||||||
|
get:
|
||||||
|
consumes:
|
||||||
|
- application/json
|
||||||
|
description: 根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据
|
||||||
|
parameters:
|
||||||
|
- description: 测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)
|
||||||
|
in: query
|
||||||
|
name: token
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
- description: 查询起始时间 (Unix时间戳, e.g., 1761008266)
|
||||||
|
in: query
|
||||||
|
name: begin
|
||||||
|
required: true
|
||||||
|
type: integer
|
||||||
|
- description: 查询结束时间 (Unix时间戳, e.g., 1761526675)
|
||||||
|
in: query
|
||||||
|
name: end
|
||||||
|
required: true
|
||||||
|
type: integer
|
||||||
|
produces:
|
||||||
|
- application/json
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: 返回实时数据成功
|
||||||
|
schema:
|
||||||
|
allOf:
|
||||||
|
- $ref: '#/definitions/network.SuccessResponse'
|
||||||
|
- properties:
|
||||||
|
payload:
|
||||||
|
$ref: '#/definitions/network.RealTimeDataPayload'
|
||||||
|
type: object
|
||||||
|
"400":
|
||||||
|
description: 返回实时数据失败
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/network.FailureResponse'
|
||||||
|
summary: 获取实时测点数据
|
||||||
|
tags:
|
||||||
|
- RealTime Component
|
||||||
/measurement/recommend:
|
/measurement/recommend:
|
||||||
get:
|
get:
|
||||||
consumes:
|
consumes:
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,7 @@ func QueryAlertEventHandler(c *gin.Context) {
|
||||||
resp := network.SuccessResponse{
|
resp := network.SuccessResponse{
|
||||||
Code: 0,
|
Code: 0,
|
||||||
Msg: "success",
|
Msg: "success",
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]any{
|
||||||
"events": events,
|
"events": events,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -68,7 +68,7 @@ func ComponentAnchorReplaceHandler(c *gin.Context) {
|
||||||
resp := network.SuccessResponse{
|
resp := network.SuccessResponse{
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
Msg: "success",
|
Msg: "success",
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"uuid": request.UUID,
|
"uuid": request.UUID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -41,7 +41,7 @@ func AttrDeleteHandler(c *gin.Context) {
|
||||||
c.JSON(http.StatusOK, network.FailureResponse{
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{"attr_token": request.AttrToken},
|
Payload: map[string]interface{}{"attr_token": request.AttrToken},
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -49,7 +49,7 @@ func AttrDeleteHandler(c *gin.Context) {
|
||||||
c.JSON(http.StatusOK, network.SuccessResponse{
|
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
Msg: "success",
|
Msg: "success",
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"attr_token": request.AttrToken,
|
"attr_token": request.AttrToken,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ func AttrGetHandler(c *gin.Context) {
|
||||||
c.JSON(http.StatusOK, network.FailureResponse{
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{"attr_token": request.AttrToken},
|
Payload: map[string]interface{}{"attr_token": request.AttrToken},
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -59,7 +59,7 @@ func AttrGetHandler(c *gin.Context) {
|
||||||
c.JSON(http.StatusOK, network.SuccessResponse{
|
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
Msg: "success",
|
Msg: "success",
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"attr_token": request.AttrToken,
|
"attr_token": request.AttrToken,
|
||||||
"attr_value": attrValue,
|
"attr_value": attrValue,
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@ func AttrSetHandler(c *gin.Context) {
|
||||||
c.JSON(http.StatusOK, network.FailureResponse{
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{"attr_token": request.AttrToken},
|
Payload: map[string]interface{}{"attr_token": request.AttrToken},
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -51,7 +51,7 @@ func AttrSetHandler(c *gin.Context) {
|
||||||
c.JSON(http.StatusOK, network.SuccessResponse{
|
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
Msg: "success",
|
Msg: "success",
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"attr_token": request.AttrToken,
|
"attr_token": request.AttrToken,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"page_id": request.PageID,
|
"page_id": request.PageID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -65,7 +65,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"topologic_info": topologicLink,
|
"topologic_info": topologicLink,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -89,7 +89,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"topologic_infos": topologicCreateInfos,
|
"topologic_infos": topologicCreateInfos,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -111,7 +111,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"component_infos": request.ComponentInfos,
|
"component_infos": request.ComponentInfos,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -130,7 +130,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"uuid": info.UUID,
|
"uuid": info.UUID,
|
||||||
"component_params": info.Params,
|
"component_params": info.Params,
|
||||||
},
|
},
|
||||||
|
|
@ -152,7 +152,7 @@ func CircuitDiagramCreateHandler(c *gin.Context) {
|
||||||
resp := network.SuccessResponse{
|
resp := network.SuccessResponse{
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
Msg: "success",
|
Msg: "success",
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"page_id": request.PageID,
|
"page_id": request.PageID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -42,7 +42,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"page_id": request.PageID,
|
"page_id": request.PageID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -70,7 +70,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"topologic_info": topologicLink,
|
"topologic_info": topologicLink,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -95,7 +95,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"topologic_info": topologicDelInfo,
|
"topologic_info": topologicDelInfo,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -112,7 +112,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"topologic_info": topologicDelInfo,
|
"topologic_info": topologicDelInfo,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -138,7 +138,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"uuid": componentInfo.UUID,
|
"uuid": componentInfo.UUID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -162,7 +162,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"uuid": componentInfo.UUID,
|
"uuid": componentInfo.UUID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -184,7 +184,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"uuid": componentInfo.UUID,
|
"uuid": componentInfo.UUID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -205,7 +205,7 @@ func CircuitDiagramDeleteHandler(c *gin.Context) {
|
||||||
resp := network.SuccessResponse{
|
resp := network.SuccessResponse{
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
Msg: "success",
|
Msg: "success",
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"page_id": request.PageID,
|
"page_id": request.PageID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"page_id": pageID,
|
"page_id": pageID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -48,16 +48,16 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"page_id": pageID,
|
"page_id": pageID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
c.JSON(http.StatusOK, resp)
|
c.JSON(http.StatusOK, resp)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
payLoad := make(map[string]interface{})
|
payload := make(map[string]interface{})
|
||||||
payLoad["root_vertex"] = topologicInfo.RootVertex
|
payload["root_vertex"] = topologicInfo.RootVertex
|
||||||
payLoad["topologic"] = topologicInfo.VerticeLinks
|
payload["topologic"] = topologicInfo.VerticeLinks
|
||||||
|
|
||||||
componentParamMap := make(map[string]any)
|
componentParamMap := make(map[string]any)
|
||||||
for _, VerticeLink := range topologicInfo.VerticeLinks {
|
for _, VerticeLink := range topologicInfo.VerticeLinks {
|
||||||
|
|
@ -69,7 +69,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"uuid": componentUUID,
|
"uuid": componentUUID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -84,7 +84,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"uuid": componentUUID,
|
"uuid": componentUUID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -103,7 +103,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"uuid": topologicInfo.RootVertex,
|
"uuid": topologicInfo.RootVertex,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -118,7 +118,7 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"uuid": rootVertexUUID,
|
"uuid": rootVertexUUID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -127,12 +127,12 @@ func CircuitDiagramLoadHandler(c *gin.Context) {
|
||||||
}
|
}
|
||||||
componentParamMap[rootVertexUUID] = rootComponentParam
|
componentParamMap[rootVertexUUID] = rootComponentParam
|
||||||
|
|
||||||
payLoad["component_params"] = componentParamMap
|
payload["component_params"] = componentParamMap
|
||||||
|
|
||||||
resp := network.SuccessResponse{
|
resp := network.SuccessResponse{
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
Msg: "success",
|
Msg: "success",
|
||||||
PayLoad: payLoad,
|
Payload: payload,
|
||||||
}
|
}
|
||||||
c.JSON(http.StatusOK, resp)
|
c.JSON(http.StatusOK, resp)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"page_id": request.PageID,
|
"page_id": request.PageID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -52,7 +52,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"topologic_info": topologicLink,
|
"topologic_info": topologicLink,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -75,7 +75,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"topologic_info": topologicChangeInfo,
|
"topologic_info": topologicChangeInfo,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -92,7 +92,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"topologic_info": topologicChangeInfo,
|
"topologic_info": topologicChangeInfo,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -109,7 +109,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"page_id": request.PageID,
|
"page_id": request.PageID,
|
||||||
"component_info": request.ComponentInfos,
|
"component_info": request.ComponentInfos,
|
||||||
},
|
},
|
||||||
|
|
@ -129,7 +129,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
|
||||||
resp := network.FailureResponse{
|
resp := network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"uuid": info.UUID,
|
"uuid": info.UUID,
|
||||||
"component_params": info.Params,
|
"component_params": info.Params,
|
||||||
},
|
},
|
||||||
|
|
@ -152,7 +152,7 @@ func CircuitDiagramUpdateHandler(c *gin.Context) {
|
||||||
resp := network.SuccessResponse{
|
resp := network.SuccessResponse{
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
Msg: "success",
|
Msg: "success",
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]interface{}{
|
||||||
"page_id": request.PageID,
|
"page_id": request.PageID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,188 @@
|
||||||
|
// Package handler provides HTTP handlers for various endpoints.
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"modelRT/constants"
|
||||||
|
"modelRT/database"
|
||||||
|
"modelRT/diagram"
|
||||||
|
"modelRT/logger"
|
||||||
|
"modelRT/network"
|
||||||
|
"modelRT/orm"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
var linkSetConfigs = map[int]linkSetConfig{
|
||||||
|
// grid hierarchy
|
||||||
|
0: {CurrKey: constants.RedisAllGridSetKey, PrevIsNil: true},
|
||||||
|
// zone hierarchy
|
||||||
|
1: {CurrKey: constants.RedisAllZoneSetKey, PrevKeyTemplate: constants.RedisSpecGridZoneSetKey},
|
||||||
|
// station hierarchy
|
||||||
|
2: {CurrKey: constants.RedisAllStationSetKey, PrevKeyTemplate: constants.RedisSpecZoneStationSetKey},
|
||||||
|
// component nspath hierarchy
|
||||||
|
3: {CurrKey: constants.RedisAllCompNSPathSetKey, PrevKeyTemplate: constants.RedisSpecStationCompNSPATHSetKey},
|
||||||
|
// component tag hierarchy
|
||||||
|
4: {CurrKey: constants.RedisAllCompTagSetKey, PrevKeyTemplate: constants.RedisSpecStationCompTagSetKey},
|
||||||
|
// config hierarchy
|
||||||
|
5: {CurrKey: constants.RedisAllConfigSetKey, PrevIsNil: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiagramNodeLinkHandler defines the diagram node link process api
|
||||||
|
func DiagramNodeLinkHandler(c *gin.Context) {
|
||||||
|
var request network.DiagramNodeLinkRequest
|
||||||
|
clientToken := c.GetString("client_token")
|
||||||
|
if clientToken == "" {
|
||||||
|
err := constants.ErrGetClientToken
|
||||||
|
logger.Error(c, "failed to get client token from context", "error", err)
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.ShouldBindJSON(&request); err != nil {
|
||||||
|
logger.Error(c, "failed to unmarshal diagram node process request", "error", err)
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: "invalid request body format: " + err.Error(),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
pgClient := database.GetPostgresDBClient()
|
||||||
|
nodeID := request.NodeID
|
||||||
|
nodeLevel := request.NodeLevel
|
||||||
|
action := request.Action
|
||||||
|
prevNodeInfo, currNodeInfo, err := database.QueryNodeInfoByID(c, pgClient, nodeID, nodeLevel)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "failed to query diagram node info by nodeID and level from postgres", "node_id", nodeID, "level", nodeLevel, "error", err)
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: "failed to query measurement info record: " + err.Error(),
|
||||||
|
Payload: map[string]any{
|
||||||
|
"node_id": nodeID,
|
||||||
|
"node_level": nodeLevel,
|
||||||
|
"action": action,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
prevLinkSet, currLinkSet := generateLinkSet(c, nodeLevel, prevNodeInfo)
|
||||||
|
err = processLinkSetData(c, action, nodeLevel, prevLinkSet, currLinkSet, prevNodeInfo, currNodeInfo)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
Payload: map[string]any{
|
||||||
|
"node_id": nodeID,
|
||||||
|
"node_level": nodeLevel,
|
||||||
|
"action": action,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Info(c, "process diagram node link success", "node_id", nodeID, "level", nodeLevel, "action", request.Action)
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
Msg: "diagram node link process success",
|
||||||
|
Payload: map[string]any{
|
||||||
|
"node_id": nodeID,
|
||||||
|
"node_level": nodeLevel,
|
||||||
|
"action": action,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateLinkSet(ctx context.Context, level int, prevNodeInfo orm.CircuitDiagramNodeInterface) (*diagram.RedisSet, *diagram.RedisSet) {
|
||||||
|
config, ok := linkSetConfigs[level]
|
||||||
|
// level not supported
|
||||||
|
if !ok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
currLinkSet := diagram.NewRedisSet(ctx, config.CurrKey, 0, false)
|
||||||
|
if config.PrevIsNil {
|
||||||
|
return nil, currLinkSet
|
||||||
|
}
|
||||||
|
|
||||||
|
prevLinkSetKey := fmt.Sprintf(config.PrevKeyTemplate, prevNodeInfo.GetTagName())
|
||||||
|
prevLinkSet := diagram.NewRedisSet(ctx, prevLinkSetKey, 0, false)
|
||||||
|
return prevLinkSet, currLinkSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func processLinkSetData(ctx context.Context, action string, level int, prevLinkSet, currLinkSet *diagram.RedisSet, prevNodeInfo, currNodeInfo orm.CircuitDiagramNodeInterface) error {
|
||||||
|
var currMember string
|
||||||
|
var prevMember string
|
||||||
|
var err1, err2 error
|
||||||
|
|
||||||
|
switch level {
|
||||||
|
case 0, 1, 2, 4:
|
||||||
|
// grid、zone、station、component tag hierarchy
|
||||||
|
currMember = currNodeInfo.GetTagName()
|
||||||
|
if prevLinkSet != nil {
|
||||||
|
prevMember = prevNodeInfo.GetTagName()
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
// component NSPath hierarchy
|
||||||
|
currMember = currNodeInfo.GetNSPath()
|
||||||
|
prevMember = prevNodeInfo.GetTagName()
|
||||||
|
case 5:
|
||||||
|
// TODO[NONEED-ISSUE]暂无此层级增加或删除需求 #2
|
||||||
|
err := fmt.Errorf("currently hierarchy no need to add or delete this level: %d", level)
|
||||||
|
logger.Error(ctx, "no need level for link process", "level", level, "action", action, "error", err)
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
err := fmt.Errorf("unsupported diagram node level: %d", level)
|
||||||
|
logger.Error(ctx, "unsupport diagram node level for link process", "level", level, "action", action, "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch action {
|
||||||
|
case constants.SearchLinkAddAction:
|
||||||
|
err1 = currLinkSet.SADD(currMember)
|
||||||
|
if prevLinkSet != nil {
|
||||||
|
err2 = prevLinkSet.SADD(prevMember)
|
||||||
|
}
|
||||||
|
case constants.SearchLinkDelAction:
|
||||||
|
err1 = currLinkSet.SREM(currMember)
|
||||||
|
if prevLinkSet != nil {
|
||||||
|
err2 = prevLinkSet.SREM(prevMember)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
err := constants.ErrUnsupportedLinkAction
|
||||||
|
logger.Error(ctx, "unsupport diagram node link process action", "action", action, "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return processDiagramLinkError(err1, err2, action)
|
||||||
|
}
|
||||||
|
|
||||||
|
func processDiagramLinkError(err1, err2 error, action string) error {
|
||||||
|
var err error
|
||||||
|
if err1 != nil && err2 != nil {
|
||||||
|
err = errors.Join(err1, err2)
|
||||||
|
err = fmt.Errorf("process diagram node link failed, currLinkSet %s operation and prevLinkSet %s operation failed: %w", action, action, err)
|
||||||
|
} else if err1 != nil {
|
||||||
|
err = fmt.Errorf("process diagram node currLinkSet link failed: currLinkSet %s operation failed: %w", action, err1)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("process diagram node prevLinkSet link failed: prevLinkSet %s operation: %w", action, err2)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type linkSetConfig struct {
|
||||||
|
CurrKey string
|
||||||
|
PrevKeyTemplate string
|
||||||
|
PrevIsNil bool
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,58 @@
|
||||||
|
// Package handler provides HTTP handlers for various endpoints.
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"modelRT/alert"
|
||||||
|
"modelRT/constants"
|
||||||
|
"modelRT/logger"
|
||||||
|
"modelRT/network"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// QueryHistoryDataHandler define query history data process API
|
||||||
|
func QueryHistoryDataHandler(c *gin.Context) {
|
||||||
|
token := c.Query("token")
|
||||||
|
beginStr := c.Query("begin")
|
||||||
|
begin, err := strconv.Atoi(beginStr)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "convert begin param from string to int failed", "error", err)
|
||||||
|
|
||||||
|
resp := network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
}
|
||||||
|
c.JSON(http.StatusOK, resp)
|
||||||
|
}
|
||||||
|
endStr := c.Query("end")
|
||||||
|
end, err := strconv.Atoi(endStr)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "convert end param from string to int failed", "error", err)
|
||||||
|
|
||||||
|
resp := network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
}
|
||||||
|
c.JSON(http.StatusOK, resp)
|
||||||
|
}
|
||||||
|
fmt.Println(token, begin, end)
|
||||||
|
// TODO parse token to dataRT query params
|
||||||
|
var level int
|
||||||
|
var targetLevel constants.AlertLevel
|
||||||
|
alertManger := alert.GetAlertMangerInstance()
|
||||||
|
targetLevel = constants.AlertLevel(level)
|
||||||
|
events := alertManger.GetRangeEventsByLevel(targetLevel)
|
||||||
|
|
||||||
|
resp := network.SuccessResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
Msg: "success",
|
||||||
|
Payload: map[string]interface{}{
|
||||||
|
"events": events,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
c.JSON(http.StatusOK, resp)
|
||||||
|
}
|
||||||
|
|
@ -38,14 +38,14 @@ func MeasurementGetHandler(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
zset := diagram.NewRedisZSet(c, request.MeasurementToken, clientToken, 0, false)
|
zset := diagram.NewRedisZSet(c, request.MeasurementToken, 0, false)
|
||||||
points, err := zset.ZRANGE(request.MeasurementToken, 0, -1)
|
points, err := zset.ZRANGE(request.MeasurementToken, 0, -1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(c, "failed to get measurement data from redis", "measurement_token", request.MeasurementToken, "error", err)
|
logger.Error(c, "failed to get measurement data from redis", "measurement_token", request.MeasurementToken, "error", err)
|
||||||
c.JSON(http.StatusOK, network.FailureResponse{
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
Code: http.StatusInternalServerError,
|
Code: http.StatusInternalServerError,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]any{
|
||||||
"measurement_id": request.MeasurementID,
|
"measurement_id": request.MeasurementID,
|
||||||
"measurement_token": request.MeasurementToken,
|
"measurement_token": request.MeasurementToken,
|
||||||
},
|
},
|
||||||
|
|
@ -60,7 +60,7 @@ func MeasurementGetHandler(c *gin.Context) {
|
||||||
c.JSON(http.StatusOK, network.FailureResponse{
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]any{
|
||||||
"measurement_id": request.MeasurementID,
|
"measurement_id": request.MeasurementID,
|
||||||
"measurement_token": request.MeasurementToken,
|
"measurement_token": request.MeasurementToken,
|
||||||
"measurement_value": points,
|
"measurement_value": points,
|
||||||
|
|
@ -72,7 +72,7 @@ func MeasurementGetHandler(c *gin.Context) {
|
||||||
c.JSON(http.StatusOK, network.SuccessResponse{
|
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
Msg: "success",
|
Msg: "success",
|
||||||
PayLoad: map[string]interface{}{
|
Payload: map[string]any{
|
||||||
"measurement_id": request.MeasurementID,
|
"measurement_id": request.MeasurementID,
|
||||||
"measurement_token": request.MeasurementToken,
|
"measurement_token": request.MeasurementToken,
|
||||||
"measurement_info": measurementInfo,
|
"measurement_info": measurementInfo,
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"modelRT/logger"
|
"modelRT/logger"
|
||||||
"modelRT/model"
|
"modelRT/model"
|
||||||
"modelRT/network"
|
"modelRT/network"
|
||||||
|
"modelRT/util"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
)
|
)
|
||||||
|
|
@ -17,34 +18,36 @@ import (
|
||||||
// @Tags Measurement Recommend
|
// @Tags Measurement Recommend
|
||||||
// @Accept json
|
// @Accept json
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Param request body network.MeasurementRecommendRequest true "查询输入参数,例如 'trans' 或 'transformfeeder1_220.'"
|
// @Param input query string true "推荐关键词,例如 'grid1' 或 'grid1.'" Example("grid1")
|
||||||
// @Success 200 {object} network.SuccessResponse{payload=network.MeasurementRecommendPayload} "返回推荐列表成功"
|
// @Success 200 {object} network.SuccessResponse{payload=network.MeasurementRecommendPayload} "返回推荐列表成功"
|
||||||
//
|
//
|
||||||
// @Example 200 {
|
// @Example 200 {
|
||||||
// "code": 200,
|
// "code": 200,
|
||||||
// "msg": "success",
|
// "msg": "success",
|
||||||
// "payload": {
|
// "payload": {
|
||||||
// "input": "transformfeeder1_220.",
|
// "input": "grid1.zone1.station1.ns1.tag1.bay.",
|
||||||
// "offset": 21,
|
// "offset": 21,
|
||||||
// "recommended_list": [
|
// "recommended_list": [
|
||||||
// "I_A_rms",
|
// "I11_A_rms",
|
||||||
// "I_B_rms",
|
// "I11_B_rms.",
|
||||||
// "I_C_rms",
|
// "I11_C_rms.",
|
||||||
// ]
|
// ]
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// @Failure 400 {object} network.FailureResponse "返回推荐列表失败"
|
// @Failure 400 {object} network.FailureResponse "返回推荐列表失败"
|
||||||
// @Example 400 {
|
//
|
||||||
// "code": 400,
|
// @Example 400 {
|
||||||
// "msg": "failed to get recommend data from redis",
|
// "code": 400,
|
||||||
// }
|
// "msg": "failed to get recommend data from redis",
|
||||||
|
// }
|
||||||
|
//
|
||||||
// @Router /measurement/recommend [get]
|
// @Router /measurement/recommend [get]
|
||||||
func MeasurementRecommendHandler(c *gin.Context) {
|
func MeasurementRecommendHandler(c *gin.Context) {
|
||||||
var request network.MeasurementRecommendRequest
|
var request network.MeasurementRecommendRequest
|
||||||
|
|
||||||
if err := c.ShouldBindJSON(&request); err != nil {
|
if err := c.ShouldBindQuery(&request); err != nil {
|
||||||
logger.Error(c, "failed to unmarshal measurement recommend request", "error", err)
|
logger.Error(c, "failed to bind measurement recommend request", "error", err)
|
||||||
c.JSON(http.StatusOK, network.FailureResponse{
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
|
|
@ -58,7 +61,7 @@ func MeasurementRecommendHandler(c *gin.Context) {
|
||||||
c.JSON(http.StatusOK, network.FailureResponse{
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
Code: http.StatusInternalServerError,
|
Code: http.StatusInternalServerError,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
PayLoad: map[string]any{
|
Payload: map[string]any{
|
||||||
"input": request.Input,
|
"input": request.Input,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
@ -69,7 +72,7 @@ func MeasurementRecommendHandler(c *gin.Context) {
|
||||||
if isFuzzy {
|
if isFuzzy {
|
||||||
var maxOffset int
|
var maxOffset int
|
||||||
for index, recommend := range recommends {
|
for index, recommend := range recommends {
|
||||||
offset := model.GetLongestCommonPrefixLength(request.Input, recommend)
|
offset := util.GetLongestCommonPrefixLength(request.Input, recommend)
|
||||||
if index == 0 || offset > maxOffset {
|
if index == 0 || offset > maxOffset {
|
||||||
maxOffset = offset
|
maxOffset = offset
|
||||||
}
|
}
|
||||||
|
|
@ -78,7 +81,7 @@ func MeasurementRecommendHandler(c *gin.Context) {
|
||||||
} else {
|
} else {
|
||||||
var minOffset int
|
var minOffset int
|
||||||
for index, recommend := range recommends {
|
for index, recommend := range recommends {
|
||||||
offset := model.GetLongestCommonPrefixLength(request.Input, recommend)
|
offset := util.GetLongestCommonPrefixLength(request.Input, recommend)
|
||||||
if index == 0 || offset < minOffset {
|
if index == 0 || offset < minOffset {
|
||||||
minOffset = offset
|
minOffset = offset
|
||||||
}
|
}
|
||||||
|
|
@ -102,12 +105,7 @@ func MeasurementRecommendHandler(c *gin.Context) {
|
||||||
c.JSON(http.StatusOK, network.SuccessResponse{
|
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
Msg: "success",
|
Msg: "success",
|
||||||
// PayLoad: map[string]any{
|
Payload: &network.MeasurementRecommendPayload{
|
||||||
// "input": request.Input,
|
|
||||||
// "offset": finalOffset,
|
|
||||||
// "recommended_list": resultRecommends,
|
|
||||||
// },
|
|
||||||
PayLoad: &network.MeasurementRecommendPayload{
|
|
||||||
Input: request.Input,
|
Input: request.Input,
|
||||||
Offset: finalOffset,
|
Offset: finalOffset,
|
||||||
RecommendedList: resultRecommends,
|
RecommendedList: resultRecommends,
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,135 @@
|
||||||
|
// Package handler provides HTTP handlers for various endpoints.
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"modelRT/constants"
|
||||||
|
"modelRT/database"
|
||||||
|
"modelRT/diagram"
|
||||||
|
"modelRT/logger"
|
||||||
|
"modelRT/network"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MeasurementLinkHandler defines the measurement link process api
|
||||||
|
func MeasurementLinkHandler(c *gin.Context) {
|
||||||
|
var request network.MeasurementLinkRequest
|
||||||
|
clientToken := c.GetString("client_token")
|
||||||
|
if clientToken == "" {
|
||||||
|
err := constants.ErrGetClientToken
|
||||||
|
logger.Error(c, "failed to get client token from context", "error", err)
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.ShouldBindJSON(&request); err != nil {
|
||||||
|
logger.Error(c, "failed to unmarshal measurement process request", "error", err)
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: "invalid request body format: " + err.Error(),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
pgClient := database.GetPostgresDBClient()
|
||||||
|
measurementID := request.MeasurementID
|
||||||
|
action := request.Action
|
||||||
|
measurementInfo, err := database.QueryMeasurementByID(c, pgClient, measurementID)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "failed to query measurement info by measurement id from postgres", "meauserement_id", measurementID, "error", err)
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: "failed to query measurement info record: " + err.Error(),
|
||||||
|
Payload: map[string]any{
|
||||||
|
"id": measurementID,
|
||||||
|
"action": action,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
componentInfo, err := database.QueryComponentByUUID(c, pgClient, measurementInfo.ComponentUUID)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "failed to query component info by component uuid from postgres", "component_uuid", measurementInfo.ComponentUUID, "error", err)
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: "failed to query component info record: " + err.Error(),
|
||||||
|
Payload: map[string]any{
|
||||||
|
"id": measurementID,
|
||||||
|
"action": action,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
allMeasSet := diagram.NewRedisSet(c, constants.RedisAllMeasTagSetKey, 0, false)
|
||||||
|
compMeasLinkKey := fmt.Sprintf(constants.RedisSpecCompTagMeasSetKey, componentInfo.Tag)
|
||||||
|
compMeasLinkSet := diagram.NewRedisSet(c, compMeasLinkKey, 0, false)
|
||||||
|
|
||||||
|
switch action {
|
||||||
|
case constants.SearchLinkAddAction:
|
||||||
|
err1 := allMeasSet.SADD(measurementInfo.Tag)
|
||||||
|
err2 := compMeasLinkSet.SADD(measurementInfo.Tag)
|
||||||
|
err = processActionError(err1, err2, action)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "add measurement link process operation failed", "measurement_id", measurementID, "action", action, "error", err)
|
||||||
|
}
|
||||||
|
case constants.SearchLinkDelAction:
|
||||||
|
err1 := allMeasSet.SREM(measurementInfo.Tag)
|
||||||
|
err2 := compMeasLinkSet.SREM(measurementInfo.Tag)
|
||||||
|
err = processActionError(err1, err2, action)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "del measurement link process operation failed", "measurement_id", measurementID, "action", action, "error", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
err = constants.ErrUnsupportedLinkAction
|
||||||
|
logger.Error(c, "unsupport measurement link process action", "measurement_id", measurementID, "action", action, "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
Payload: map[string]any{
|
||||||
|
"measurement_id": request.MeasurementID,
|
||||||
|
"action": request.Action,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Info(c, "process measurement link success", "measurement_id", measurementID, "action", request.Action)
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
Msg: "measurement link process success",
|
||||||
|
Payload: map[string]any{
|
||||||
|
"measurement_id": measurementID,
|
||||||
|
"action": request.Action,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func processActionError(err1, err2 error, action string) error {
|
||||||
|
var err error
|
||||||
|
if err1 != nil && err2 != nil {
|
||||||
|
err = errors.Join(err1, err2)
|
||||||
|
err = fmt.Errorf("process measurement link failed, allMeasSet %s operation and compMeasLinkSet %s operation failed: %w", action, action, err)
|
||||||
|
} else if err1 != nil {
|
||||||
|
err = fmt.Errorf("process measurement link failed: allMeasSet %s operation failed: %w", action, err1)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("process measurement link failed: compMeasLinkSet %s operation: %w", action, err2)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,450 @@
|
||||||
|
// Package handler provides HTTP handlers for various endpoints.
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"maps"
|
||||||
|
"net/http"
|
||||||
|
"slices"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"modelRT/constants"
|
||||||
|
"modelRT/diagram"
|
||||||
|
"modelRT/logger"
|
||||||
|
"modelRT/model"
|
||||||
|
"modelRT/network"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/gorilla/websocket"
|
||||||
|
)
|
||||||
|
|
||||||
|
var pullUpgrader = websocket.Upgrader{
|
||||||
|
ReadBufferSize: 1024,
|
||||||
|
WriteBufferSize: 1024,
|
||||||
|
CheckOrigin: func(_ *http.Request) bool {
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// PullRealTimeDataHandler define real time data pull API
|
||||||
|
// @Summary 实时数据拉取 websocket api
|
||||||
|
// @Description 根据用户输入的clientID拉取对应的实时数据
|
||||||
|
// @Tags RealTime Component Websocket
|
||||||
|
// @Router /monitors/data/realtime/stream/:clientID [get]
|
||||||
|
func PullRealTimeDataHandler(c *gin.Context) {
|
||||||
|
clientID := c.Param("clientID")
|
||||||
|
if clientID == "" {
|
||||||
|
err := fmt.Errorf("clientID is missing from the path")
|
||||||
|
logger.Error(c, "query clientID from path failed", "error", err, "url", c.Request.RequestURI)
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := pullUpgrader.Upgrade(c.Writer, c.Request, nil)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "upgrade http protocol to websocket protocal failed", "error", err)
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(c.Request.Context())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// TODO[BACKPRESSURE-ISSUE] 先期使用固定大容量对扇入模型进行定义 #1
|
||||||
|
fanInChan := make(chan network.RealTimePullTarget, 10000)
|
||||||
|
go processTargetPolling(ctx, globalSubState, clientID, fanInChan)
|
||||||
|
go readClientMessages(ctx, conn, clientID, cancel)
|
||||||
|
|
||||||
|
bufferMaxSize := constants.SendMaxBatchSize
|
||||||
|
sendMaxInterval := constants.SendMaxBatchInterval
|
||||||
|
buffer := make([]network.RealTimePullTarget, 0, bufferMaxSize)
|
||||||
|
ticker := time.NewTicker(sendMaxInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case targetData, ok := <-fanInChan:
|
||||||
|
if !ok {
|
||||||
|
logger.Error(ctx, "fanInChan closed unexpectedly", "client_id", clientID)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
buffer = append(buffer, targetData)
|
||||||
|
|
||||||
|
if len(buffer) >= bufferMaxSize {
|
||||||
|
// buffer is full, send immediately
|
||||||
|
if err := sendAggregateRealTimeDataStream(conn, buffer); err != nil {
|
||||||
|
logger.Error(ctx, "when buffer is full, send the real time aggregate data failed", "client_id", clientID, "buffer", buffer, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// reset buffer
|
||||||
|
buffer = make([]network.RealTimePullTarget, 0, bufferMaxSize)
|
||||||
|
// reset the ticker to prevent it from triggering immediately after the ticker is sent
|
||||||
|
ticker.Reset(sendMaxInterval)
|
||||||
|
}
|
||||||
|
case <-ticker.C:
|
||||||
|
if len(buffer) > 0 {
|
||||||
|
// when the ticker is triggered, all data in the send buffer is sent
|
||||||
|
if err := sendAggregateRealTimeDataStream(conn, buffer); err != nil {
|
||||||
|
logger.Error(ctx, "when the ticker is triggered, send the real time aggregate data failed", "client_id", clientID, "buffer", buffer, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// reset buffer
|
||||||
|
buffer = make([]network.RealTimePullTarget, 0, bufferMaxSize)
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
// send the last remaining data
|
||||||
|
if err := sendAggregateRealTimeDataStream(conn, buffer); err != nil {
|
||||||
|
logger.Error(ctx, "send the last remaining data failed", "client_id", clientID, "buffer", buffer, "error", err)
|
||||||
|
}
|
||||||
|
logger.Info(ctx, "PullRealTimeDataHandler exiting as context is done.", "client_id", clientID)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readClientMessages 负责持续监听客户端发送的消息(例如 Ping/Pong, Close Frame, 或控制命令)
|
||||||
|
func readClientMessages(ctx context.Context, conn *websocket.Conn, clientID string, cancel context.CancelFunc) {
|
||||||
|
// conn.SetReadLimit(512)
|
||||||
|
for {
|
||||||
|
msgType, msgBytes, err := conn.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
if websocket.IsCloseError(err, websocket.CloseNormalClosure) {
|
||||||
|
logger.Info(ctx, "client actively and normally closed the connection", "client_id", clientID)
|
||||||
|
} else if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
|
||||||
|
logger.Error(ctx, "an unexpected error occurred while reading the webSocket connection", "client_id", clientID, "error", err)
|
||||||
|
} else {
|
||||||
|
// handle other read errors (eg, I/O errors)
|
||||||
|
logger.Error(ctx, "an error occurred while reading the webSocket connection", "client_id", clientID, "error", err)
|
||||||
|
}
|
||||||
|
cancel()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// process normal message from client
|
||||||
|
if msgType == websocket.TextMessage || msgType == websocket.BinaryMessage {
|
||||||
|
logger.Info(ctx, "read normal message from client", "client_id", clientID, "content", string(msgBytes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendAggregateRealTimeDataStream define func to responsible for continuously pushing aggregate real-time data to the client
|
||||||
|
func sendAggregateRealTimeDataStream(conn *websocket.Conn, targetsData []network.RealTimePullTarget) error {
|
||||||
|
if len(targetsData) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
response := network.SuccessResponse{
|
||||||
|
Code: 200,
|
||||||
|
Msg: "success",
|
||||||
|
Payload: network.RealTimePullPayload{
|
||||||
|
Targets: targetsData,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return conn.WriteJSON(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// processTargetPolling define function to process target in subscription map and data is continuously retrieved from redis based on the target
|
||||||
|
func processTargetPolling(ctx context.Context, s *SharedSubState, clientID string, fanInChan chan network.RealTimePullTarget) {
|
||||||
|
// ensure the fanInChan will not leak
|
||||||
|
defer close(fanInChan)
|
||||||
|
logger.Info(ctx, fmt.Sprintf("start processing real time data polling for clientID:%s", clientID))
|
||||||
|
stopChanMap := make(map[string]chan struct{})
|
||||||
|
s.globalMutex.RLock()
|
||||||
|
config, confExist := s.subMap[clientID]
|
||||||
|
if !confExist {
|
||||||
|
logger.Error(ctx, "can not found config into local stored map by clientID", "clientID", clientID)
|
||||||
|
s.globalMutex.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.globalMutex.RUnlock()
|
||||||
|
|
||||||
|
logger.Info(ctx, fmt.Sprintf("found subscription config for clientID:%s, start initial polling goroutines", clientID), "components len", config.measurements)
|
||||||
|
|
||||||
|
config.mutex.RLock()
|
||||||
|
for interval, measurementTargets := range config.measurements {
|
||||||
|
for _, target := range measurementTargets {
|
||||||
|
// add a secondary check to prevent the target from already existing in the stopChanMap
|
||||||
|
if _, exists := stopChanMap[target]; exists {
|
||||||
|
logger.Warn(ctx, "target already exists in polling map, skipping start-up", "target", target)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
targetContext, exist := config.targetContext[target]
|
||||||
|
if !exist {
|
||||||
|
logger.Error(ctx, "can not found subscription node param into param map", "target", target)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
measurementInfo := targetContext.measurement
|
||||||
|
|
||||||
|
queryGStopChan := make(chan struct{})
|
||||||
|
// store stop channel with target into map
|
||||||
|
stopChanMap[target] = queryGStopChan
|
||||||
|
queryKey, err := model.GenerateMeasureIdentifier(measurementInfo.DataSource)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "generate measurement indentifier by data_source field failed", "data_source", measurementInfo.DataSource, "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pollingConfig := redisPollingConfig{
|
||||||
|
targetID: target,
|
||||||
|
queryKey: queryKey,
|
||||||
|
interval: interval,
|
||||||
|
dataSize: int64(measurementInfo.Size),
|
||||||
|
}
|
||||||
|
go realTimeDataQueryFromRedis(ctx, pollingConfig, fanInChan, queryGStopChan)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
config.mutex.RUnlock()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case transportTargets, ok := <-config.noticeChan:
|
||||||
|
if !ok {
|
||||||
|
logger.Error(ctx, "notice channel was closed unexpectedly", "clientID", clientID)
|
||||||
|
stopAllPolling(ctx, stopChanMap)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
config.mutex.Lock()
|
||||||
|
switch transportTargets.OperationType {
|
||||||
|
case constants.OpAppend:
|
||||||
|
appendTargets(ctx, config, stopChanMap, fanInChan, transportTargets.Targets)
|
||||||
|
case constants.OpRemove:
|
||||||
|
removeTargets(ctx, stopChanMap, transportTargets.Targets)
|
||||||
|
case constants.OpUpdate:
|
||||||
|
updateTargets(ctx, config, stopChanMap, fanInChan, transportTargets.Targets)
|
||||||
|
}
|
||||||
|
config.mutex.Unlock()
|
||||||
|
case <-ctx.Done():
|
||||||
|
logger.Info(ctx, fmt.Sprintf("stop all data retrieval goroutines under this clientID:%s", clientID))
|
||||||
|
stopAllPolling(ctx, stopChanMap)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendTargets starts new polling goroutines for targets that were just added
|
||||||
|
func appendTargets(ctx context.Context, config *RealTimeSubConfig, stopChanMap map[string]chan struct{}, fanInChan chan network.RealTimePullTarget, appendTargets []string) {
|
||||||
|
appendTargetsSet := make(map[string]struct{}, len(appendTargets))
|
||||||
|
for _, target := range appendTargets {
|
||||||
|
appendTargetsSet[target] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, target := range appendTargets {
|
||||||
|
targetContext, exists := config.targetContext[target]
|
||||||
|
if !exists {
|
||||||
|
logger.Error(ctx, "the append target does not exist in the real time data config context map,skipping the startup step", "target", target)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, exists := stopChanMap[target]; exists {
|
||||||
|
logger.Error(ctx, "the append target already has a stop channel, skipping the startup step", "target", target)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
queryGStopChan := make(chan struct{})
|
||||||
|
stopChanMap[target] = queryGStopChan
|
||||||
|
|
||||||
|
interval := targetContext.interval
|
||||||
|
_, exists = config.measurements[interval]
|
||||||
|
if !exists {
|
||||||
|
logger.Error(ctx, "targetContext exist but measurements is missing, cannot update config", "target", target, "interval", interval)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(appendTargetsSet, target)
|
||||||
|
|
||||||
|
queryKey, err := model.GenerateMeasureIdentifier(targetContext.measurement.DataSource)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "the append target generate redis query key identifier failed", "target", target, "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pollingConfig := redisPollingConfig{
|
||||||
|
targetID: target,
|
||||||
|
queryKey: queryKey,
|
||||||
|
interval: targetContext.interval,
|
||||||
|
dataSize: int64(targetContext.measurement.Size),
|
||||||
|
}
|
||||||
|
go realTimeDataQueryFromRedis(ctx, pollingConfig, fanInChan, queryGStopChan)
|
||||||
|
|
||||||
|
logger.Info(ctx, "started new polling goroutine for appended target", "target", target, "interval", targetContext.interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
// allKeys := util.GetKeysFromSet(appendTargetsSet)
|
||||||
|
allKeys := slices.Sorted(maps.Keys(appendTargetsSet))
|
||||||
|
if len(allKeys) > 0 {
|
||||||
|
logger.Warn(ctx, fmt.Sprintf("the following targets:%v start up fetch real time data process goroutine not started", allKeys))
|
||||||
|
clear(appendTargetsSet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateTargets starts new polling goroutines for targets that were just updated
|
||||||
|
func updateTargets(ctx context.Context, config *RealTimeSubConfig, stopChanMap map[string]chan struct{}, fanInChan chan network.RealTimePullTarget, updateTargets []string) {
|
||||||
|
updateTargetsSet := make(map[string]struct{}, len(updateTargets))
|
||||||
|
for _, target := range updateTargets {
|
||||||
|
updateTargetsSet[target] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, target := range updateTargets {
|
||||||
|
targetContext, exists := config.targetContext[target]
|
||||||
|
if !exists {
|
||||||
|
logger.Error(ctx, "the update target does not exist in the real time data config context map,skipping the startup step", "target", target)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, exist := stopChanMap[target]; !exist {
|
||||||
|
logger.Error(ctx, "the update target does not has a stop channel, skipping the startup step", "target", target)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
oldQueryGStopChan := stopChanMap[target]
|
||||||
|
logger.Info(ctx, "stopped old polling goroutine for updated target", "target", target)
|
||||||
|
close(oldQueryGStopChan)
|
||||||
|
|
||||||
|
newQueryGStopChan := make(chan struct{})
|
||||||
|
stopChanMap[target] = newQueryGStopChan
|
||||||
|
|
||||||
|
interval := targetContext.interval
|
||||||
|
_, exists = config.measurements[interval]
|
||||||
|
if !exists {
|
||||||
|
logger.Error(ctx, "targetContext exist but measurements is missing, cannot update config", "target", target, "interval", interval)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(updateTargetsSet, target)
|
||||||
|
|
||||||
|
queryKey, err := model.GenerateMeasureIdentifier(targetContext.measurement.DataSource)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "the update target generate redis query key identifier failed", "target", target, "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pollingConfig := redisPollingConfig{
|
||||||
|
targetID: target,
|
||||||
|
queryKey: queryKey,
|
||||||
|
interval: targetContext.interval,
|
||||||
|
dataSize: int64(targetContext.measurement.Size),
|
||||||
|
}
|
||||||
|
go realTimeDataQueryFromRedis(ctx, pollingConfig, fanInChan, newQueryGStopChan)
|
||||||
|
|
||||||
|
logger.Info(ctx, "started new polling goroutine for update target", "target", target, "interval", targetContext.interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
// allKeys := util.GetKeysFromSet(updateTargetsSet)
|
||||||
|
allKeys := slices.Sorted(maps.Keys(updateTargetsSet))
|
||||||
|
if len(allKeys) > 0 {
|
||||||
|
logger.Warn(ctx, fmt.Sprintf("the following targets:%v start up fetch real time data process goroutine not started", allKeys))
|
||||||
|
clear(updateTargetsSet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeTargets define func to stops running polling goroutines for targets that were removed
|
||||||
|
func removeTargets(ctx context.Context, stopChanMap map[string]chan struct{}, removeTargets []string) {
|
||||||
|
for _, target := range removeTargets {
|
||||||
|
stopChan, exists := stopChanMap[target]
|
||||||
|
if !exists {
|
||||||
|
logger.Warn(ctx, "removeTarget was not running, skipping remove operation", "target", target)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
close(stopChan)
|
||||||
|
delete(stopChanMap, target)
|
||||||
|
logger.Info(ctx, "stopped polling goroutine for removed target", "target", target)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// stopAllPolling stops all running query goroutines for a specific client
|
||||||
|
func stopAllPolling(ctx context.Context, stopChanMap map[string]chan struct{}) {
|
||||||
|
for target, stopChan := range stopChanMap {
|
||||||
|
logger.Info(ctx, fmt.Sprintf("stop the data fetching behavior for the corresponding target:%s", target))
|
||||||
|
close(stopChan)
|
||||||
|
}
|
||||||
|
clear(stopChanMap)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// redisPollingConfig define struct for param which query real time data from redis
|
||||||
|
type redisPollingConfig struct {
|
||||||
|
targetID string
|
||||||
|
queryKey string
|
||||||
|
interval string
|
||||||
|
dataSize int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func realTimeDataQueryFromRedis(ctx context.Context, config redisPollingConfig, fanInChan chan network.RealTimePullTarget, stopChan chan struct{}) {
|
||||||
|
logger.Info(ctx, "start a redis query goroutine for real time data pulling", "targetID", config.targetID, "queryKey", config.queryKey, "interval", config.interval, "dataSize", config.dataSize)
|
||||||
|
duration, err := time.ParseDuration(config.interval)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "failed to parse the time string", "interval", config.interval, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ticker := time.NewTicker(duration)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
client := diagram.NewRedisClient()
|
||||||
|
needPerformQuery := true
|
||||||
|
for {
|
||||||
|
if needPerformQuery {
|
||||||
|
performQuery(ctx, client, config, fanInChan)
|
||||||
|
needPerformQuery = false
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
needPerformQuery = true
|
||||||
|
case <-stopChan:
|
||||||
|
logger.Info(ctx, "stop the redis query goroutine via a singal")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func performQuery(ctx context.Context, client *diagram.RedisClient, config redisPollingConfig, fanInChan chan network.RealTimePullTarget) {
|
||||||
|
members, err := client.QueryByZRangeByLex(ctx, config.queryKey, config.dataSize)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "query real time data from redis failed", "key", config.queryKey, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pullDatas := make([]network.RealTimePullData, 0, len(members))
|
||||||
|
for _, member := range members {
|
||||||
|
pullDatas = append(pullDatas, network.RealTimePullData{
|
||||||
|
Time: member.Member.(string),
|
||||||
|
Value: member.Score,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
sortPullDataByTimeAscending(ctx, pullDatas)
|
||||||
|
targetData := network.RealTimePullTarget{
|
||||||
|
ID: config.targetID,
|
||||||
|
Datas: pullDatas,
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case fanInChan <- targetData:
|
||||||
|
default:
|
||||||
|
// TODO[BACKPRESSURE-ISSUE] 考虑 fanInChan 阻塞,当出现大量数据阻塞查询循环并丢弃时,采取背压方式解决问题 #1
|
||||||
|
logger.Warn(ctx, "fanInChan is full, dropping real-time data frame", "key", config.queryKey, "data_size", len(members))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortPullDataByTimeAscending(ctx context.Context, data []network.RealTimePullData) {
|
||||||
|
sort.Slice(data, func(i, j int) bool {
|
||||||
|
t1, err1 := strconv.ParseInt(data[i].Time, 10, 64)
|
||||||
|
if err1 != nil {
|
||||||
|
logger.Error(ctx, "parsing real time data timestamp failed", "index", i, "time", data[i].Time, "error", err1)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
t2, err2 := strconv.ParseInt(data[j].Time, 10, 64)
|
||||||
|
if err2 != nil {
|
||||||
|
logger.Error(ctx, "parsing real time data timestamp failed", "index", j, "time", data[j].Time, "error", err2)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return t1 < t2
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
@ -2,43 +2,189 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
"modelRT/alert"
|
|
||||||
"modelRT/constants"
|
|
||||||
"modelRT/logger"
|
"modelRT/logger"
|
||||||
"modelRT/network"
|
"modelRT/network"
|
||||||
|
|
||||||
|
"github.com/bitly/go-simplejson"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/gorilla/websocket"
|
||||||
|
jsoniter "github.com/json-iterator/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var wsUpgrader = websocket.Upgrader{
|
||||||
|
ReadBufferSize: 1024,
|
||||||
|
WriteBufferSize: 1024,
|
||||||
|
// CheckOrigin 必须返回 true,否则浏览器会拒绝连接
|
||||||
|
CheckOrigin: func(_ *http.Request) bool {
|
||||||
|
// 在生产环境中,应该更严格地检查 Origin 头部
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
// QueryRealTimeDataHandler define query real time data process API
|
// QueryRealTimeDataHandler define query real time data process API
|
||||||
|
// @Summary 获取实时测点数据
|
||||||
|
// @Description 根据用户输入的组件token,从 dataRT 服务中持续获取测点实时数据
|
||||||
|
// @Tags RealTime Component
|
||||||
|
// @Accept json
|
||||||
|
// @Produce json
|
||||||
|
// @Param token query string true "测量点唯一标识符 (e.g.grid_1:zone_1:station_1:transformfeeder1_220.I_A_rms)"
|
||||||
|
// @Param begin query int true "查询起始时间 (Unix时间戳, e.g., 1761008266)"
|
||||||
|
// @Param end query int true "查询结束时间 (Unix时间戳, e.g., 1761526675)"
|
||||||
|
// @Success 200 {object} network.SuccessResponse{payload=network.RealTimeDataPayload} "返回实时数据成功"
|
||||||
|
//
|
||||||
|
// @Example 200 {
|
||||||
|
// "code": 200,
|
||||||
|
// "msg": "success",
|
||||||
|
// "payload": {
|
||||||
|
// "input": "grid1.zone1.station1.ns1.tag1.transformfeeder1_220.I_A_rms",
|
||||||
|
// "sub_pos": [
|
||||||
|
// {
|
||||||
|
// "time": 1736305467506000000,
|
||||||
|
// "value": 1
|
||||||
|
// }
|
||||||
|
// ]
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// @Failure 400 {object} network.FailureResponse "返回实时数据失败"
|
||||||
|
//
|
||||||
|
// @Example 400 {
|
||||||
|
// "code": 400,
|
||||||
|
// "msg": "failed to get real time data from dataRT",
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// @Router /data/realtime [get]
|
||||||
func QueryRealTimeDataHandler(c *gin.Context) {
|
func QueryRealTimeDataHandler(c *gin.Context) {
|
||||||
var targetLevel constants.AlertLevel
|
var request network.RealTimeQueryRequest
|
||||||
|
|
||||||
alertManger := alert.GetAlertMangerInstance()
|
if err := c.ShouldBindJSON(&request); err != nil {
|
||||||
|
logger.Error(c, "failed to unmarshal real time query request", "error", err)
|
||||||
levelStr := c.Query("level")
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
level, err := strconv.Atoi(levelStr)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(c, "convert alert level string to int failed", "error", err)
|
|
||||||
|
|
||||||
resp := network.FailureResponse{
|
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
Msg: err.Error(),
|
Msg: err.Error(),
|
||||||
}
|
})
|
||||||
c.JSON(http.StatusOK, resp)
|
return
|
||||||
}
|
}
|
||||||
targetLevel = constants.AlertLevel(level)
|
|
||||||
events := alertManger.GetRangeEventsByLevel(targetLevel)
|
|
||||||
|
|
||||||
resp := network.SuccessResponse{
|
conn, err := wsUpgrader.Upgrade(c.Writer, c.Request, nil)
|
||||||
Code: http.StatusOK,
|
if err != nil {
|
||||||
Msg: "success",
|
logger.Error(c, "upgrade http protocol to websocket protocal failed", "error", err)
|
||||||
PayLoad: map[string]interface{}{
|
return
|
||||||
"events": events,
|
}
|
||||||
},
|
defer conn.Close()
|
||||||
|
|
||||||
|
// start a goroutine to open a websocket service with the dataRT service and use the channel to pass data back. Start and maintain the websocket connection with the front-end UI in the local api
|
||||||
|
transportChannel := make(chan []any, 100)
|
||||||
|
closeChannel := make(chan struct{})
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case data := <-transportChannel:
|
||||||
|
respByte, err := jsoniter.Marshal(data)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "marshal real time data to bytes failed", "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err = conn.WriteMessage(websocket.TextMessage, respByte)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "write message to websocket connection failed", "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case <-closeChannel:
|
||||||
|
logger.Info(c, "data receiving goroutine has been closed")
|
||||||
|
// TODO 优化时间控制
|
||||||
|
deadline := time.Now().Add(5 * time.Second)
|
||||||
|
err := conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "the session ended normally"), deadline)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "sending close control message failed", "error", err)
|
||||||
|
}
|
||||||
|
// gracefully close session processing
|
||||||
|
err = conn.Close()
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "websocket conn closed failed", "error", err)
|
||||||
|
}
|
||||||
|
logger.Info(c, "websocket connection closed successfully.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// receiveRealTimeDataByWebSocket define func of receive real time data by websocket
|
||||||
|
func receiveRealTimeDataByWebSocket(ctx context.Context, params url.Values, transportChannel chan []any, closeChannel chan struct{}) {
|
||||||
|
serverURL := "ws://127.0.0.1:8888/ws/points"
|
||||||
|
u, err := url.Parse(serverURL)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "parse url failed", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
q := u.Query()
|
||||||
|
for key, values := range params {
|
||||||
|
for _, value := range values {
|
||||||
|
q.Add(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
u.RawQuery = q.Encode()
|
||||||
|
finalServerURL := u.String()
|
||||||
|
|
||||||
|
conn, resp, err := websocket.DefaultDialer.Dial(finalServerURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "dialing websocket server failed", "error", err)
|
||||||
|
if resp != nil {
|
||||||
|
logger.Error(ctx, "websocket server response", "status", resp.Status)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
for {
|
||||||
|
msgType, message, err := conn.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
// check if it is an expected shutdown error
|
||||||
|
if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) {
|
||||||
|
logger.Info(ctx, "connection closed normally")
|
||||||
|
} else {
|
||||||
|
logger.Error(ctx, "abnormal disconnection from websocket server", "err", err)
|
||||||
|
}
|
||||||
|
close(closeChannel)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
logger.Info(ctx, "received info from dataRT server", "msg_type", messageTypeToString(msgType), "message", string(message))
|
||||||
|
|
||||||
|
js, err := simplejson.NewJson(message)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "parse real time data from message failed", "message", string(message), "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
subPoss, err := js.Get("sub_pos").Array()
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "parse sub_pos struct from message json info", "sub_pos", js.Get("sub_pos"), "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
transportChannel <- subPoss
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// messageTypeToString define func of auxiliary to convert message type to string
|
||||||
|
func messageTypeToString(t int) string {
|
||||||
|
switch t {
|
||||||
|
case websocket.TextMessage:
|
||||||
|
return "TEXT"
|
||||||
|
case websocket.BinaryMessage:
|
||||||
|
return "BINARY"
|
||||||
|
case websocket.PingMessage:
|
||||||
|
return "PING"
|
||||||
|
case websocket.PongMessage:
|
||||||
|
return "PONG"
|
||||||
|
case websocket.CloseMessage:
|
||||||
|
return "CLOSE"
|
||||||
|
default:
|
||||||
|
return "UNKNOWN"
|
||||||
}
|
}
|
||||||
c.JSON(http.StatusOK, resp)
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,741 @@
|
||||||
|
// Package handler provides HTTP handlers for various endpoints.
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"maps"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"modelRT/constants"
|
||||||
|
"modelRT/database"
|
||||||
|
"modelRT/logger"
|
||||||
|
"modelRT/network"
|
||||||
|
"modelRT/orm"
|
||||||
|
"modelRT/util"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/gofrs/uuid"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
var globalSubState *SharedSubState
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
globalSubState = NewSharedSubState()
|
||||||
|
}
|
||||||
|
|
||||||
|
// RealTimeSubHandler define real time data subscriptions process API
|
||||||
|
// @Summary 开始或结束订阅实时数据
|
||||||
|
// @Description 根据用户输入的组件token,从 modelRT 服务中开始或结束对于量测节点的实时数据的订阅
|
||||||
|
// @Tags RealTime Component
|
||||||
|
// @Accept json
|
||||||
|
// @Produce json
|
||||||
|
// @Param request body network.RealTimeSubRequest true "量测节点实时数据订阅"
|
||||||
|
// @Success 200 {object} network.SuccessResponse{payload=network.RealTimeSubPayload} "订阅实时数据结果列表"
|
||||||
|
//
|
||||||
|
// @Example 200 {
|
||||||
|
// "code": 200,
|
||||||
|
// "msg": "success",
|
||||||
|
// "payload": {
|
||||||
|
// "targets": [
|
||||||
|
// {
|
||||||
|
// "id": "grid1.zone1.station1.ns1.tag1.bay.I11_C_rms",
|
||||||
|
// "code": "1001",
|
||||||
|
// "msg": "subscription success"
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// "id": "grid1.zone1.station1.ns1.tag1.bay.I11_B_rms",
|
||||||
|
// "code": "1002",
|
||||||
|
// "msg": "subscription failed"
|
||||||
|
// }
|
||||||
|
// ]
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// @Failure 400 {object} network.FailureResponse{payload=network.RealTimeSubPayload} "订阅实时数据结果列表"
|
||||||
|
//
|
||||||
|
// @Example 400 {
|
||||||
|
// "code": 400,
|
||||||
|
// "msg": "failed to get recommend data from redis",
|
||||||
|
// "payload": {
|
||||||
|
// "targets": [
|
||||||
|
// {
|
||||||
|
// "id": "grid1.zone1.station1.ns1.tag1.bay.I11_A_rms",
|
||||||
|
// "code": "1002",
|
||||||
|
// "msg": "subscription failed"
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// "id": "grid1.zone1.station1.ns1.tag1.bay.I11_B_rms",
|
||||||
|
// "code": "1002",
|
||||||
|
// "msg": "subscription failed"
|
||||||
|
// }
|
||||||
|
// ]
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// @Router /monitors/data/subscriptions [post]
|
||||||
|
func RealTimeSubHandler(c *gin.Context) {
|
||||||
|
var request network.RealTimeSubRequest
|
||||||
|
var subAction string
|
||||||
|
var clientID string
|
||||||
|
|
||||||
|
if err := c.ShouldBindJSON(&request); err != nil {
|
||||||
|
logger.Error(c, "failed to unmarshal real time query request", "error", err)
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if request.Action == constants.SubStartAction && request.ClientID == "" {
|
||||||
|
subAction = request.Action
|
||||||
|
id, err := uuid.NewV4()
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "failed to generate client id", "error", err)
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
clientID = id.String()
|
||||||
|
} else if request.Action == constants.SubStartAction && request.ClientID != "" {
|
||||||
|
subAction = constants.SubAppendAction
|
||||||
|
clientID = request.ClientID
|
||||||
|
} else if request.Action == constants.SubStopAction && request.ClientID != "" {
|
||||||
|
subAction = request.Action
|
||||||
|
clientID = request.ClientID
|
||||||
|
} else if request.Action == constants.SubUpdateAction && request.ClientID != "" {
|
||||||
|
subAction = request.Action
|
||||||
|
clientID = request.ClientID
|
||||||
|
}
|
||||||
|
|
||||||
|
pgClient := database.GetPostgresDBClient()
|
||||||
|
// open transaction
|
||||||
|
tx := pgClient.Begin()
|
||||||
|
defer tx.Commit()
|
||||||
|
|
||||||
|
switch subAction {
|
||||||
|
case constants.SubStartAction:
|
||||||
|
results, err := globalSubState.CreateConfig(c, tx, clientID, request.Measurements)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "create real time data subscription config failed", "error", err)
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
Payload: network.RealTimeSubPayload{
|
||||||
|
ClientID: clientID,
|
||||||
|
TargetResults: results,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
Msg: "success",
|
||||||
|
Payload: network.RealTimeSubPayload{
|
||||||
|
ClientID: clientID,
|
||||||
|
TargetResults: results,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
case constants.SubStopAction:
|
||||||
|
results, err := globalSubState.RemoveTargets(c, clientID, request.Measurements)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "remove target to real time data subscription config failed", "error", err)
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
Payload: network.RealTimeSubPayload{
|
||||||
|
ClientID: clientID,
|
||||||
|
TargetResults: results,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
Msg: "success",
|
||||||
|
Payload: network.RealTimeSubPayload{
|
||||||
|
ClientID: clientID,
|
||||||
|
TargetResults: results,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
case constants.SubAppendAction:
|
||||||
|
results, err := globalSubState.AppendTargets(c, tx, clientID, request.Measurements)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "append target to real time data subscription config failed", "error", err)
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
Payload: network.RealTimeSubPayload{
|
||||||
|
ClientID: clientID,
|
||||||
|
TargetResults: results,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
Msg: "success",
|
||||||
|
Payload: network.RealTimeSubPayload{
|
||||||
|
ClientID: clientID,
|
||||||
|
TargetResults: results,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
case constants.SubUpdateAction:
|
||||||
|
results, err := globalSubState.UpdateTargets(c, tx, clientID, request.Measurements)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(c, "update target to real time data subscription config failed", "error", err)
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
Payload: network.RealTimeSubPayload{
|
||||||
|
ClientID: clientID,
|
||||||
|
TargetResults: results,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, network.SuccessResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
Msg: "success",
|
||||||
|
Payload: network.RealTimeSubPayload{
|
||||||
|
ClientID: clientID,
|
||||||
|
TargetResults: results,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
err := fmt.Errorf("%w: request action is %s", constants.ErrUnsupportedSubAction, request.Action)
|
||||||
|
logger.Error(c, "unsupported action of real time data subscription request", "error", err)
|
||||||
|
requestTargetsCount := processRealTimeRequestCount(request.Measurements)
|
||||||
|
results := processRealTimeRequestTargets(request.Measurements, requestTargetsCount, err)
|
||||||
|
c.JSON(http.StatusOK, network.FailureResponse{
|
||||||
|
Code: http.StatusBadRequest,
|
||||||
|
Msg: err.Error(),
|
||||||
|
Payload: network.RealTimeSubPayload{
|
||||||
|
ClientID: clientID,
|
||||||
|
TargetResults: results,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RealTimeSubMeasurement define struct of real time subscription measurement
|
||||||
|
type RealTimeSubMeasurement struct {
|
||||||
|
targets []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// TargetPollingContext define struct of real time pulling reverse context
|
||||||
|
type TargetPollingContext struct {
|
||||||
|
interval string
|
||||||
|
measurement *orm.Measurement
|
||||||
|
}
|
||||||
|
|
||||||
|
// RealTimeSubConfig define struct of real time subscription config
|
||||||
|
type RealTimeSubConfig struct {
|
||||||
|
noticeChan chan *transportTargets
|
||||||
|
mutex sync.RWMutex
|
||||||
|
measurements map[string][]string
|
||||||
|
targetContext map[string]*TargetPollingContext
|
||||||
|
}
|
||||||
|
|
||||||
|
// SharedSubState define struct of shared subscription state with mutex
|
||||||
|
type SharedSubState struct {
|
||||||
|
subMap map[string]*RealTimeSubConfig
|
||||||
|
globalMutex sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSharedSubState define function to create new SharedSubState
|
||||||
|
func NewSharedSubState() *SharedSubState {
|
||||||
|
return &SharedSubState{
|
||||||
|
subMap: make(map[string]*RealTimeSubConfig),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// processAndValidateTargetsForStart define func to perform all database I/O operations in a lock-free state for start action
|
||||||
|
func processAndValidateTargetsForStart(ctx context.Context, tx *gorm.DB, measurements []network.RealTimeMeasurementItem, allReqTargetNum int) (
|
||||||
|
[]network.TargetResult, []string,
|
||||||
|
map[string][]string,
|
||||||
|
map[string]*TargetPollingContext,
|
||||||
|
) {
|
||||||
|
targetProcessResults := make([]network.TargetResult, 0, allReqTargetNum)
|
||||||
|
newMeasMap := make(map[string][]string)
|
||||||
|
successfulTargets := make([]string, 0, allReqTargetNum)
|
||||||
|
newMeasContextMap := make(map[string]*TargetPollingContext)
|
||||||
|
|
||||||
|
for _, measurementItem := range measurements {
|
||||||
|
interval := measurementItem.Interval
|
||||||
|
for _, target := range measurementItem.Targets {
|
||||||
|
var targetResult network.TargetResult
|
||||||
|
targetResult.ID = target
|
||||||
|
targetModel, err := database.ParseDataIdentifierToken(ctx, tx, target)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "parse data indentity token failed", "error", err, "identity_token", target)
|
||||||
|
targetResult.Code = constants.SubFailedCode
|
||||||
|
targetResult.Msg = fmt.Sprintf("%s: %s", constants.SubFailedMsg, err.Error())
|
||||||
|
targetProcessResults = append(targetProcessResults, targetResult)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
targetResult.Code = constants.SubSuccessCode
|
||||||
|
targetResult.Msg = constants.SubSuccessMsg
|
||||||
|
targetProcessResults = append(targetProcessResults, targetResult)
|
||||||
|
successfulTargets = append(successfulTargets, target)
|
||||||
|
|
||||||
|
if _, exists := newMeasMap[interval]; !exists {
|
||||||
|
newMeasMap[interval] = make([]string, 0, len(measurementItem.Targets))
|
||||||
|
}
|
||||||
|
|
||||||
|
meas := newMeasMap[interval]
|
||||||
|
meas = append(meas, target)
|
||||||
|
newMeasMap[interval] = meas
|
||||||
|
newMeasContextMap[target] = &TargetPollingContext{
|
||||||
|
interval: interval,
|
||||||
|
measurement: targetModel.GetMeasurementInfo(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return targetProcessResults, successfulTargets, newMeasMap, newMeasContextMap
|
||||||
|
}
|
||||||
|
|
||||||
|
// processAndValidateTargetsForUpdate define func to perform all database I/O operations in a lock-free state for update action
|
||||||
|
func processAndValidateTargetsForUpdate(ctx context.Context, tx *gorm.DB, config *RealTimeSubConfig, measurements []network.RealTimeMeasurementItem, allReqTargetNum int) (
|
||||||
|
[]network.TargetResult, []string,
|
||||||
|
map[string][]string,
|
||||||
|
map[string]*TargetPollingContext,
|
||||||
|
) {
|
||||||
|
targetProcessResults := make([]network.TargetResult, 0, allReqTargetNum)
|
||||||
|
newMeasMap := make(map[string][]string)
|
||||||
|
successfulTargets := make([]string, 0, allReqTargetNum)
|
||||||
|
newMeasContextMap := make(map[string]*TargetPollingContext)
|
||||||
|
|
||||||
|
for _, measurementItem := range measurements {
|
||||||
|
interval := measurementItem.Interval
|
||||||
|
for _, target := range measurementItem.Targets {
|
||||||
|
targetResult := network.TargetResult{ID: target}
|
||||||
|
if _, exist := config.targetContext[target]; !exist {
|
||||||
|
err := fmt.Errorf("target %s does not exists in subscription list", target)
|
||||||
|
logger.Error(ctx, "update target does not exist in subscription list", "error", err, "target", target)
|
||||||
|
targetResult.Code = constants.UpdateSubFailedCode
|
||||||
|
targetResult.Msg = fmt.Sprintf("%s: %s", constants.UpdateSubFailedMsg, err.Error())
|
||||||
|
targetProcessResults = append(targetProcessResults, targetResult)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
targetModel, err := database.ParseDataIdentifierToken(ctx, tx, target)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "parse data indentity token failed", "error", err, "identity_token", target)
|
||||||
|
targetResult.Code = constants.UpdateSubFailedCode
|
||||||
|
targetResult.Msg = fmt.Sprintf("%s: %s", constants.UpdateSubFailedMsg, err.Error())
|
||||||
|
targetProcessResults = append(targetProcessResults, targetResult)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
targetResult.Code = constants.UpdateSubSuccessCode
|
||||||
|
targetResult.Msg = constants.UpdateSubSuccessMsg
|
||||||
|
targetProcessResults = append(targetProcessResults, targetResult)
|
||||||
|
successfulTargets = append(successfulTargets, target)
|
||||||
|
|
||||||
|
if _, exists := newMeasMap[interval]; !exists {
|
||||||
|
newMeasMap[interval] = make([]string, 0, len(measurementItem.Targets))
|
||||||
|
}
|
||||||
|
|
||||||
|
meas := newMeasMap[interval]
|
||||||
|
meas = append(meas, target)
|
||||||
|
newMeasMap[interval] = meas
|
||||||
|
newMeasContextMap[target] = &TargetPollingContext{
|
||||||
|
interval: interval,
|
||||||
|
measurement: targetModel.GetMeasurementInfo(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return targetProcessResults, successfulTargets, newMeasMap, newMeasContextMap
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergeMeasurementsForStart define func to merge newMeasurementsMap into existingMeasurementsMap for start action
|
||||||
|
func mergeMeasurementsForStart(config *RealTimeSubConfig, newMeasurements map[string][]string, newMeasurementsContextMap map[string]*TargetPollingContext) []string {
|
||||||
|
allDuplicates := make([]string, 0)
|
||||||
|
for interval, newMeas := range newMeasurements {
|
||||||
|
if existingMeas, exists := config.measurements[interval]; exists {
|
||||||
|
// deduplication operations prevent duplicate subscriptions to the same measurement node
|
||||||
|
deduplicated, duplicates := util.DeduplicateAndReportDuplicates(existingMeas, newMeas)
|
||||||
|
|
||||||
|
if len(duplicates) > 0 {
|
||||||
|
for _, duplicate := range duplicates {
|
||||||
|
delete(newMeasurementsContextMap, duplicate)
|
||||||
|
}
|
||||||
|
allDuplicates = append(allDuplicates, duplicates...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(deduplicated) > 0 {
|
||||||
|
existingMeas = append(existingMeas, deduplicated...)
|
||||||
|
config.measurements[interval] = existingMeas
|
||||||
|
maps.Copy(config.targetContext, newMeasurementsContextMap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return allDuplicates
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergeMeasurementsForUpdate define func to merge newMeasurementsMap into existingMeasurementsMap for update action
|
||||||
|
func mergeMeasurementsForUpdate(config *RealTimeSubConfig, newMeasurements map[string][]string, newMeasurementsContextMap map[string]*TargetPollingContext) ([]string, error) {
|
||||||
|
allDuplicates := make([]string, 0)
|
||||||
|
delMeasMap := make(map[string][]string)
|
||||||
|
for _, newMeas := range newMeasurements {
|
||||||
|
for _, measurement := range newMeas {
|
||||||
|
oldInterval := config.targetContext[measurement].interval
|
||||||
|
if _, exists := delMeasMap[oldInterval]; !exists {
|
||||||
|
delMeasurements := []string{measurement}
|
||||||
|
delMeasMap[oldInterval] = delMeasurements
|
||||||
|
} else {
|
||||||
|
delMeasurements := delMeasMap[oldInterval]
|
||||||
|
delMeasurements = append(delMeasurements, measurement)
|
||||||
|
delMeasMap[oldInterval] = delMeasurements
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for interval, delMeas := range delMeasMap {
|
||||||
|
existingMeas, exist := config.measurements[interval]
|
||||||
|
if !exist {
|
||||||
|
return nil, fmt.Errorf("can not find exist measurements in %s interval", interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
measurements := util.RemoveTargetsFromSliceSimple(existingMeas, delMeas)
|
||||||
|
config.measurements[interval] = measurements
|
||||||
|
}
|
||||||
|
|
||||||
|
for interval, newMeas := range newMeasurements {
|
||||||
|
if existingMeas, exists := config.measurements[interval]; exists {
|
||||||
|
deduplicated, duplicates := util.DeduplicateAndReportDuplicates(existingMeas, newMeas)
|
||||||
|
|
||||||
|
if len(duplicates) > 0 {
|
||||||
|
for _, duplicate := range duplicates {
|
||||||
|
delete(newMeasurementsContextMap, duplicate)
|
||||||
|
}
|
||||||
|
allDuplicates = append(allDuplicates, duplicates...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(deduplicated) > 0 {
|
||||||
|
existingMeas = append(existingMeas, deduplicated...)
|
||||||
|
config.measurements[interval] = existingMeas
|
||||||
|
maps.Copy(config.targetContext, newMeasurementsContextMap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return allDuplicates, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateConfig define function to create config in SharedSubState
|
||||||
|
func (s *SharedSubState) CreateConfig(ctx context.Context, tx *gorm.DB, clientID string, measurements []network.RealTimeMeasurementItem) ([]network.TargetResult, error) {
|
||||||
|
requestTargetsCount := processRealTimeRequestCount(measurements)
|
||||||
|
targetProcessResults, _, newMeasurementsMap, measurementContexts := processAndValidateTargetsForStart(ctx, tx, measurements, requestTargetsCount)
|
||||||
|
s.globalMutex.Lock()
|
||||||
|
if _, exist := s.subMap[clientID]; exist {
|
||||||
|
s.globalMutex.Unlock()
|
||||||
|
err := fmt.Errorf("clientID %s already exists. use AppendTargets to modify existing config", clientID)
|
||||||
|
logger.Error(ctx, "clientID already exists. use AppendTargets to modify existing config", "error", err)
|
||||||
|
return targetProcessResults, err
|
||||||
|
}
|
||||||
|
|
||||||
|
config := &RealTimeSubConfig{
|
||||||
|
noticeChan: make(chan *transportTargets, constants.NoticeChanCap),
|
||||||
|
measurements: newMeasurementsMap,
|
||||||
|
targetContext: measurementContexts,
|
||||||
|
}
|
||||||
|
s.subMap[clientID] = config
|
||||||
|
s.globalMutex.Unlock()
|
||||||
|
return targetProcessResults, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendTargets define function to append targets in SharedSubState
|
||||||
|
func (s *SharedSubState) AppendTargets(ctx context.Context, tx *gorm.DB, clientID string, measurements []network.RealTimeMeasurementItem) ([]network.TargetResult, error) {
|
||||||
|
requestTargetsCount := processRealTimeRequestCount(measurements)
|
||||||
|
|
||||||
|
s.globalMutex.RLock()
|
||||||
|
config, exist := s.subMap[clientID]
|
||||||
|
s.globalMutex.RUnlock()
|
||||||
|
|
||||||
|
if !exist {
|
||||||
|
err := fmt.Errorf("clientID %s not found. use CreateConfig to start a new config", clientID)
|
||||||
|
logger.Error(ctx, "clientID not found. use CreateConfig to start a new config", "error", err)
|
||||||
|
return processRealTimeRequestTargets(measurements, requestTargetsCount, err), err
|
||||||
|
}
|
||||||
|
|
||||||
|
targetProcessResults, successfulTargets, newMeasMap, newMeasContextMap := processAndValidateTargetsForStart(ctx, tx, measurements, requestTargetsCount)
|
||||||
|
|
||||||
|
config.mutex.Lock()
|
||||||
|
allDuplicates := mergeMeasurementsForStart(config, newMeasMap, newMeasContextMap)
|
||||||
|
if len(allDuplicates) > 0 {
|
||||||
|
logger.Warn(ctx, "some targets are duplicate and have been ignored in append operation", "clientID", clientID, "duplicates", allDuplicates)
|
||||||
|
// process repeat target in targetProcessResults and successfulTargets
|
||||||
|
targetProcessResults, successfulTargets = filterAndDeduplicateRepeatTargets(targetProcessResults, successfulTargets, allDuplicates)
|
||||||
|
}
|
||||||
|
config.mutex.Unlock()
|
||||||
|
|
||||||
|
if len(successfulTargets) > 0 {
|
||||||
|
transportTargets := &transportTargets{
|
||||||
|
OperationType: constants.OpAppend,
|
||||||
|
Targets: successfulTargets,
|
||||||
|
}
|
||||||
|
config.noticeChan <- transportTargets
|
||||||
|
}
|
||||||
|
|
||||||
|
return targetProcessResults, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterAndDeduplicateRepeatTargets(resultsSlice []network.TargetResult, idsSlice []string, duplicates []string) ([]network.TargetResult, []string) {
|
||||||
|
filteredIDs := make([]string, 0, len(idsSlice))
|
||||||
|
set := make(map[string]struct{}, len(duplicates))
|
||||||
|
for _, duplicate := range duplicates {
|
||||||
|
set[duplicate] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for index := range resultsSlice {
|
||||||
|
if _, isTarget := set[resultsSlice[index].ID]; isTarget {
|
||||||
|
resultsSlice[index].Code = constants.SubRepeatCode
|
||||||
|
resultsSlice[index].Msg = constants.SubRepeatMsg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, id := range idsSlice {
|
||||||
|
if _, isTarget := set[id]; !isTarget {
|
||||||
|
filteredIDs = append(filteredIDs, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resultsSlice, filteredIDs
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpsertTargets define function to upsert targets in SharedSubState
|
||||||
|
func (s *SharedSubState) UpsertTargets(ctx context.Context, tx *gorm.DB, clientID string, measurements []network.RealTimeMeasurementItem) ([]network.TargetResult, error) {
|
||||||
|
requestTargetsCount := processRealTimeRequestCount(measurements)
|
||||||
|
targetProcessResults, successfulTargets, newMeasMap, newMeasContextMap := processAndValidateTargetsForStart(ctx, tx, measurements, requestTargetsCount)
|
||||||
|
|
||||||
|
s.globalMutex.RLock()
|
||||||
|
config, exist := s.subMap[clientID]
|
||||||
|
s.globalMutex.RUnlock()
|
||||||
|
|
||||||
|
var opType constants.TargetOperationType
|
||||||
|
if exist {
|
||||||
|
opType = constants.OpUpdate
|
||||||
|
config.mutex.Lock()
|
||||||
|
mergeMeasurementsForStart(config, newMeasMap, newMeasContextMap)
|
||||||
|
config.mutex.Unlock()
|
||||||
|
} else {
|
||||||
|
opType = constants.OpAppend
|
||||||
|
s.globalMutex.Lock()
|
||||||
|
if config, exist = s.subMap[clientID]; !exist {
|
||||||
|
config = &RealTimeSubConfig{
|
||||||
|
noticeChan: make(chan *transportTargets, constants.NoticeChanCap),
|
||||||
|
measurements: newMeasMap,
|
||||||
|
}
|
||||||
|
s.subMap[clientID] = config
|
||||||
|
} else {
|
||||||
|
s.globalMutex.Unlock()
|
||||||
|
config.mutex.Lock()
|
||||||
|
mergeMeasurementsForStart(config, newMeasMap, newMeasContextMap)
|
||||||
|
config.mutex.Unlock()
|
||||||
|
}
|
||||||
|
s.globalMutex.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(successfulTargets) > 0 {
|
||||||
|
transportTargets := &transportTargets{
|
||||||
|
OperationType: opType,
|
||||||
|
Targets: successfulTargets,
|
||||||
|
}
|
||||||
|
config.noticeChan <- transportTargets
|
||||||
|
}
|
||||||
|
return targetProcessResults, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveTargets define function to remove targets in SharedSubState
|
||||||
|
func (s *SharedSubState) RemoveTargets(ctx context.Context, clientID string, measurements []network.RealTimeMeasurementItem) ([]network.TargetResult, error) {
|
||||||
|
requestTargetsCount := processRealTimeRequestCount(measurements)
|
||||||
|
targetProcessResults := make([]network.TargetResult, 0, requestTargetsCount)
|
||||||
|
|
||||||
|
s.globalMutex.RLock()
|
||||||
|
config, exist := s.subMap[clientID]
|
||||||
|
if !exist {
|
||||||
|
s.globalMutex.RUnlock()
|
||||||
|
err := fmt.Errorf("clientID %s not found", clientID)
|
||||||
|
logger.Error(ctx, "clientID not found in remove targets operation", "error", err)
|
||||||
|
return processRealTimeRequestTargets(measurements, requestTargetsCount, err), err
|
||||||
|
}
|
||||||
|
s.globalMutex.RUnlock()
|
||||||
|
|
||||||
|
var shouldRemoveClient bool
|
||||||
|
// measurements is the list of items to be removed passed in the request
|
||||||
|
transportTargets := &transportTargets{
|
||||||
|
OperationType: constants.OpRemove,
|
||||||
|
Targets: make([]string, 0, requestTargetsCount),
|
||||||
|
}
|
||||||
|
config.mutex.Lock()
|
||||||
|
for _, measurement := range measurements {
|
||||||
|
interval := measurement.Interval
|
||||||
|
// meas is the locally running listener configuration
|
||||||
|
measTargets, measExist := config.measurements[interval]
|
||||||
|
if !measExist {
|
||||||
|
logger.Error(ctx, fmt.Sprintf("measurement with interval %s not found under clientID %s", interval, clientID), "clientID", clientID, "interval", interval)
|
||||||
|
for _, target := range measTargets {
|
||||||
|
targetResult := network.TargetResult{
|
||||||
|
ID: target,
|
||||||
|
Code: constants.CancelSubFailedCode,
|
||||||
|
Msg: constants.CancelSubFailedMsg,
|
||||||
|
}
|
||||||
|
targetProcessResults = append(targetProcessResults, targetResult)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
targetsToRemoveMap := make(map[string]struct{})
|
||||||
|
for _, target := range measurement.Targets {
|
||||||
|
targetsToRemoveMap[target] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var newTargets []string
|
||||||
|
for _, existingTarget := range measTargets {
|
||||||
|
if _, found := targetsToRemoveMap[existingTarget]; !found {
|
||||||
|
newTargets = append(newTargets, existingTarget)
|
||||||
|
} else {
|
||||||
|
transportTargets.Targets = append(transportTargets.Targets, existingTarget)
|
||||||
|
targetResult := network.TargetResult{
|
||||||
|
ID: existingTarget,
|
||||||
|
Code: constants.CancelSubSuccessCode,
|
||||||
|
Msg: constants.CancelSubSuccessMsg,
|
||||||
|
}
|
||||||
|
targetProcessResults = append(targetProcessResults, targetResult)
|
||||||
|
delete(targetsToRemoveMap, existingTarget)
|
||||||
|
delete(config.targetContext, existingTarget)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
measTargets = newTargets
|
||||||
|
|
||||||
|
if len(measTargets) == 0 {
|
||||||
|
delete(config.measurements, interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(config.measurements) == 0 {
|
||||||
|
shouldRemoveClient = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(targetsToRemoveMap) > 0 {
|
||||||
|
err := fmt.Errorf("target remove were not found under clientID %s and interval %s", clientID, interval)
|
||||||
|
for target := range targetsToRemoveMap {
|
||||||
|
targetResult := network.TargetResult{
|
||||||
|
ID: target,
|
||||||
|
Code: constants.CancelSubFailedCode,
|
||||||
|
Msg: fmt.Sprintf("%s: %s", constants.SubFailedMsg, err.Error()),
|
||||||
|
}
|
||||||
|
targetProcessResults = append(targetProcessResults, targetResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
config.mutex.Unlock()
|
||||||
|
// pass the removed subscription configuration to the notice channel
|
||||||
|
config.noticeChan <- transportTargets
|
||||||
|
|
||||||
|
if shouldRemoveClient {
|
||||||
|
s.globalMutex.Lock()
|
||||||
|
if currentConfig, exist := s.subMap[clientID]; exist && len(currentConfig.measurements) == 0 {
|
||||||
|
delete(s.subMap, clientID)
|
||||||
|
}
|
||||||
|
s.globalMutex.Unlock()
|
||||||
|
}
|
||||||
|
return targetProcessResults, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateTargets define function to update targets in SharedSubState
|
||||||
|
func (s *SharedSubState) UpdateTargets(ctx context.Context, tx *gorm.DB, clientID string, measurements []network.RealTimeMeasurementItem) ([]network.TargetResult, error) {
|
||||||
|
requestTargetsCount := processRealTimeRequestCount(measurements)
|
||||||
|
targetProcessResults := make([]network.TargetResult, 0, requestTargetsCount)
|
||||||
|
|
||||||
|
s.globalMutex.RLock()
|
||||||
|
config, exist := s.subMap[clientID]
|
||||||
|
s.globalMutex.RUnlock()
|
||||||
|
|
||||||
|
if !exist {
|
||||||
|
s.globalMutex.RUnlock()
|
||||||
|
err := fmt.Errorf("clientID %s not found", clientID)
|
||||||
|
logger.Error(ctx, "clientID not found in remove targets operation", "error", err)
|
||||||
|
return processRealTimeRequestTargets(measurements, requestTargetsCount, err), err
|
||||||
|
}
|
||||||
|
|
||||||
|
targetProcessResults, successfulTargets, newMeasMap, newMeasContextMap := processAndValidateTargetsForUpdate(ctx, tx, config, measurements, requestTargetsCount)
|
||||||
|
|
||||||
|
config.mutex.Lock()
|
||||||
|
allDuplicates, err := mergeMeasurementsForUpdate(config, newMeasMap, newMeasContextMap)
|
||||||
|
if err != nil {
|
||||||
|
logger.Warn(ctx, "can not find exist measurements in target interval", "clientID", clientID, "duplicates", allDuplicates, "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(allDuplicates) > 0 {
|
||||||
|
logger.Warn(ctx, "some targets are duplicate and have been ignored in append operation", "clientID", clientID, "duplicates", allDuplicates)
|
||||||
|
// process repeat target in targetProcessResults and successfulTargets
|
||||||
|
targetProcessResults, successfulTargets = filterAndDeduplicateRepeatTargets(targetProcessResults, successfulTargets, allDuplicates)
|
||||||
|
}
|
||||||
|
config.mutex.Unlock()
|
||||||
|
|
||||||
|
if len(successfulTargets) > 0 {
|
||||||
|
transportTargets := &transportTargets{
|
||||||
|
OperationType: constants.OpUpdate,
|
||||||
|
Targets: successfulTargets,
|
||||||
|
}
|
||||||
|
config.noticeChan <- transportTargets
|
||||||
|
}
|
||||||
|
|
||||||
|
return targetProcessResults, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get define function to get subscriptions config from SharedSubState
|
||||||
|
func (s *SharedSubState) Get(clientID string) (*RealTimeSubConfig, bool) {
|
||||||
|
s.globalMutex.RLock()
|
||||||
|
defer s.globalMutex.RUnlock()
|
||||||
|
|
||||||
|
config, exists := s.subMap[clientID]
|
||||||
|
if !exists {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return config, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func processRealTimeRequestCount(measurements []network.RealTimeMeasurementItem) int {
|
||||||
|
totalTargetsCount := 0
|
||||||
|
for _, measItem := range measurements {
|
||||||
|
totalTargetsCount += len(measItem.Targets)
|
||||||
|
}
|
||||||
|
return totalTargetsCount
|
||||||
|
}
|
||||||
|
|
||||||
|
func processRealTimeRequestTargets(measurements []network.RealTimeMeasurementItem, targetCount int, err error) []network.TargetResult {
|
||||||
|
targetProcessResults := make([]network.TargetResult, 0, targetCount)
|
||||||
|
for _, measurementItem := range measurements {
|
||||||
|
for _, target := range measurementItem.Targets {
|
||||||
|
var targetResult network.TargetResult
|
||||||
|
targetResult.ID = target
|
||||||
|
targetResult.Code = constants.SubFailedCode
|
||||||
|
targetResult.Msg = fmt.Sprintf("%s: %s", constants.SubFailedMsg, err.Error())
|
||||||
|
targetProcessResults = append(targetProcessResults, targetResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return targetProcessResults
|
||||||
|
}
|
||||||
|
|
||||||
|
// transportTargets define struct to transport update or remove target to real
|
||||||
|
// time pull api
|
||||||
|
type transportTargets struct {
|
||||||
|
OperationType constants.TargetOperationType
|
||||||
|
Targets []string
|
||||||
|
}
|
||||||
27
main.go
27
main.go
|
|
@ -22,11 +22,10 @@ import (
|
||||||
"modelRT/middleware"
|
"modelRT/middleware"
|
||||||
"modelRT/model"
|
"modelRT/model"
|
||||||
"modelRT/pool"
|
"modelRT/pool"
|
||||||
|
realtimedata "modelRT/real-time-data"
|
||||||
"modelRT/router"
|
"modelRT/router"
|
||||||
"modelRT/util"
|
"modelRT/util"
|
||||||
|
|
||||||
realtimedata "modelRT/real-time-data"
|
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/panjf2000/ants/v2"
|
"github.com/panjf2000/ants/v2"
|
||||||
swaggerFiles "github.com/swaggo/files"
|
swaggerFiles "github.com/swaggo/files"
|
||||||
|
|
@ -146,12 +145,6 @@ func main() {
|
||||||
}
|
}
|
||||||
defer anchorRealTimePool.Release()
|
defer anchorRealTimePool.Release()
|
||||||
|
|
||||||
// init cancel context
|
|
||||||
cancelCtx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
// init real time data receive channel
|
|
||||||
go realtimedata.ReceiveChan(cancelCtx)
|
|
||||||
|
|
||||||
postgresDBClient.Transaction(func(tx *gorm.DB) error {
|
postgresDBClient.Transaction(func(tx *gorm.DB) error {
|
||||||
// load circuit diagram from postgres
|
// load circuit diagram from postgres
|
||||||
// componentTypeMap, err := database.QueryCircuitDiagramComponentFromDB(cancelCtx, tx, parsePool)
|
// componentTypeMap, err := database.QueryCircuitDiagramComponentFromDB(cancelCtx, tx, parsePool)
|
||||||
|
|
@ -160,7 +153,13 @@ func main() {
|
||||||
// panic(err)
|
// panic(err)
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// TODO 暂时屏蔽完成 swagger 启动测试
|
allMeasurement, err := database.GetAllMeasurements(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "load topologic info from postgres failed", "error", err)
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
go realtimedata.StartRealTimeDataComputing(ctx, allMeasurement)
|
||||||
|
|
||||||
tree, err := database.QueryTopologicFromDB(ctx, tx)
|
tree, err := database.QueryTopologicFromDB(ctx, tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(ctx, "load topologic info from postgres failed", "error", err)
|
logger.Error(ctx, "load topologic info from postgres failed", "error", err)
|
||||||
|
|
@ -170,10 +169,6 @@ func main() {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
// TODO 完成订阅数据分析
|
|
||||||
// TODO 暂时屏蔽完成 swagger 启动测试
|
|
||||||
// go realtimedata.RealTimeDataComputer(ctx, nil, []string{}, "")
|
|
||||||
|
|
||||||
// use release mode in productio
|
// use release mode in productio
|
||||||
// gin.SetMode(gin.ReleaseMode)
|
// gin.SetMode(gin.ReleaseMode)
|
||||||
engine := gin.New()
|
engine := gin.New()
|
||||||
|
|
@ -225,7 +220,7 @@ func main() {
|
||||||
go func() {
|
go func() {
|
||||||
<-done
|
<-done
|
||||||
if err := server.Shutdown(context.Background()); err != nil {
|
if err := server.Shutdown(context.Background()); err != nil {
|
||||||
logger.Error(ctx, "ShutdownServerError", "err", err)
|
logger.Error(ctx, "shutdown serverError", "err", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
@ -234,10 +229,10 @@ func main() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == http.ErrServerClosed {
|
if err == http.ErrServerClosed {
|
||||||
// the service receives the shutdown signal normally and then closes
|
// the service receives the shutdown signal normally and then closes
|
||||||
logger.Info(ctx, "Server closed under request")
|
logger.Info(ctx, "server closed under request")
|
||||||
} else {
|
} else {
|
||||||
// abnormal shutdown of service
|
// abnormal shutdown of service
|
||||||
logger.Error(ctx, "Server closed unexpected", "err", err)
|
logger.Error(ctx, "server closed unexpected", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,47 @@
|
||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httputil"
|
||||||
|
"os"
|
||||||
|
"runtime/debug"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"modelRT/logger"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GinPanicRecovery define func of customizing gin recover output
|
||||||
|
func GinPanicRecovery() gin.HandlerFunc {
|
||||||
|
return func(c *gin.Context) {
|
||||||
|
defer func() {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
// Check for a broken connection, as it is not really a
|
||||||
|
// condition that warrants a panic stack trace.
|
||||||
|
var brokenPipe bool
|
||||||
|
if ne, ok := err.(*net.OpError); ok {
|
||||||
|
if se, ok := ne.Err.(*os.SyscallError); ok {
|
||||||
|
if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") {
|
||||||
|
brokenPipe = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
httpRequest, _ := httputil.DumpRequest(c.Request, false)
|
||||||
|
if brokenPipe {
|
||||||
|
logger.Error(c, "http request broken pipe", "path", c.Request.URL.Path, "error", err, "request", string(httpRequest))
|
||||||
|
// If the connection is dead, we can't write a status to it.
|
||||||
|
c.Error(err.(error))
|
||||||
|
c.Abort()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Error(c, "http_request_panic", "path", c.Request.URL.Path, "error", err, "request", string(httpRequest), "stack", string(debug.Stack()))
|
||||||
|
c.AbortWithError(http.StatusInternalServerError, err.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
c.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -19,7 +19,7 @@ type AttrModelInterface interface {
|
||||||
type LongAttrInfo struct {
|
type LongAttrInfo struct {
|
||||||
AttrGroupName string
|
AttrGroupName string
|
||||||
AttrKey string
|
AttrKey string
|
||||||
AttrValue interface{}
|
AttrValue any
|
||||||
GridInfo *orm.Grid
|
GridInfo *orm.Grid
|
||||||
ZoneInfo *orm.Zone
|
ZoneInfo *orm.Zone
|
||||||
StationInfo *orm.Station
|
StationInfo *orm.Station
|
||||||
|
|
@ -52,7 +52,7 @@ func (l *LongAttrInfo) IsLocal() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAttrValue define return the attribute value
|
// GetAttrValue define return the attribute value
|
||||||
func (l *LongAttrInfo) GetAttrValue() interface{} {
|
func (l *LongAttrInfo) GetAttrValue() any {
|
||||||
return l.AttrValue
|
return l.AttrValue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -60,7 +60,7 @@ func (l *LongAttrInfo) GetAttrValue() interface{} {
|
||||||
type ShortAttrInfo struct {
|
type ShortAttrInfo struct {
|
||||||
AttrGroupName string
|
AttrGroupName string
|
||||||
AttrKey string
|
AttrKey string
|
||||||
AttrValue interface{}
|
AttrValue any
|
||||||
ComponentInfo *orm.Component
|
ComponentInfo *orm.Component
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -90,6 +90,6 @@ func (s *ShortAttrInfo) IsLocal() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAttrValue define return the attribute value
|
// GetAttrValue define return the attribute value
|
||||||
func (l *ShortAttrInfo) GetAttrValue() interface{} {
|
func (l *ShortAttrInfo) GetAttrValue() any {
|
||||||
return l.AttrValue
|
return l.AttrValue
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,119 @@
|
||||||
|
// Package model define model struct of model runtime service
|
||||||
|
package model
|
||||||
|
|
||||||
|
import "modelRT/orm"
|
||||||
|
|
||||||
|
// IndentityTokenModelInterface define basic identity token model type interface
|
||||||
|
type IndentityTokenModelInterface interface {
|
||||||
|
GetComponentInfo() *orm.Component
|
||||||
|
GetMeasurementInfo() *orm.Measurement
|
||||||
|
GetGridTag() string // token1
|
||||||
|
GetZoneTag() string // token2
|
||||||
|
GetStationTag() string // token3
|
||||||
|
GetNamespacePath() string // token4(COMPONENT TABLE NSPATH)
|
||||||
|
GetComponentTag() string // token5(COMPONENT TABLE TAG)
|
||||||
|
GetAttributeGroup() string // token6(component attribute group information)
|
||||||
|
GetMeasurementTag() string // token7(measurement value or attribute field)
|
||||||
|
IsLocal() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// LongIdentityTokenModel define struct to long identity token info
|
||||||
|
type LongIdentityTokenModel struct {
|
||||||
|
ComponentInfo *orm.Component
|
||||||
|
MeasurementInfo *orm.Measurement
|
||||||
|
GridTag string
|
||||||
|
ZoneTag string
|
||||||
|
StationTag string
|
||||||
|
NamespacePath string
|
||||||
|
ComponentTag string
|
||||||
|
AttributeGroup string
|
||||||
|
MeasurementTag string
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetComponentInfo define return the component information in the long identity token
|
||||||
|
func (l *LongIdentityTokenModel) GetComponentInfo() *orm.Component {
|
||||||
|
return l.ComponentInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMeasurementInfo define return the measurement information in the long identity token
|
||||||
|
func (l *LongIdentityTokenModel) GetMeasurementInfo() *orm.Measurement {
|
||||||
|
return l.MeasurementInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetGridTag define function to return the grid tag information in the long identity token
|
||||||
|
func (l *LongIdentityTokenModel) GetGridTag() string { return l.GridTag }
|
||||||
|
|
||||||
|
// GetZoneTag define function to return the zone tag information in the long identity token
|
||||||
|
func (l *LongIdentityTokenModel) GetZoneTag() string { return l.ZoneTag }
|
||||||
|
|
||||||
|
// GetStationTag define function to return the station tag information in the long identity token
|
||||||
|
func (l *LongIdentityTokenModel) GetStationTag() string { return l.StationTag }
|
||||||
|
|
||||||
|
// GetNamespacePath define function to return the namespace path information in the long identity token
|
||||||
|
func (l *LongIdentityTokenModel) GetNamespacePath() string { return l.NamespacePath }
|
||||||
|
|
||||||
|
// GetComponentTag define function to return the component tag information in the long identity token
|
||||||
|
func (l *LongIdentityTokenModel) GetComponentTag() string { return l.ComponentTag }
|
||||||
|
|
||||||
|
// GetAttributeGroup define function to return the attribute group information in the long identity token
|
||||||
|
func (l *LongIdentityTokenModel) GetAttributeGroup() string { return l.AttributeGroup }
|
||||||
|
|
||||||
|
// GetMeasurementTag define function to return the measurement tag information in the long identity token
|
||||||
|
func (l *LongIdentityTokenModel) GetMeasurementTag() string {
|
||||||
|
return l.MeasurementTag
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsLocal define return the is_local information in the long identity token
|
||||||
|
func (l *LongIdentityTokenModel) IsLocal() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShortIdentityTokenModel define struct to short identity token info
|
||||||
|
type ShortIdentityTokenModel struct {
|
||||||
|
ComponentInfo *orm.Component
|
||||||
|
MeasurementInfo *orm.Measurement
|
||||||
|
|
||||||
|
GridTag string // token1
|
||||||
|
ZoneTag string // token2
|
||||||
|
StationTag string // token3
|
||||||
|
NamespacePath string // token4
|
||||||
|
MeasurementTag string // token7
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetComponentInfo define return the component information in the short identity token
|
||||||
|
func (s *ShortIdentityTokenModel) GetComponentInfo() *orm.Component {
|
||||||
|
return s.ComponentInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMeasurementInfo define return the measurement information in the long identity token
|
||||||
|
func (s *ShortIdentityTokenModel) GetMeasurementInfo() *orm.Measurement {
|
||||||
|
return s.MeasurementInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetGridTag define function to return the grid tag information in the short identity token
|
||||||
|
func (s *ShortIdentityTokenModel) GetGridTag() string { return "" }
|
||||||
|
|
||||||
|
// GetZoneTag define function to return the zone tag information in the short identity token
|
||||||
|
func (s *ShortIdentityTokenModel) GetZoneTag() string { return "" }
|
||||||
|
|
||||||
|
// GetStationTag define function to return the station tag information in the short identity token
|
||||||
|
func (s *ShortIdentityTokenModel) GetStationTag() string { return "" }
|
||||||
|
|
||||||
|
// GetNamespacePath define function to return the namespace path information in the short identity token
|
||||||
|
func (s *ShortIdentityTokenModel) GetNamespacePath() string { return s.NamespacePath }
|
||||||
|
|
||||||
|
// GetComponentTag define function to return the component tag information in the short identity token
|
||||||
|
func (s *ShortIdentityTokenModel) GetComponentTag() string { return "" }
|
||||||
|
|
||||||
|
// GetAttributeGroup define function to return the attribute group information in the short identity token
|
||||||
|
func (s *ShortIdentityTokenModel) GetAttributeGroup() string { return "" }
|
||||||
|
|
||||||
|
// GetMeasurementTag define function to return the measurement tag information in the short identity token
|
||||||
|
func (s *ShortIdentityTokenModel) GetMeasurementTag() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsLocal define return the is_local information in the short identity token
|
||||||
|
func (s *ShortIdentityTokenModel) IsLocal() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
@ -17,7 +17,7 @@ type MeasurementDataSource struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// IOAddress define interface of IO address
|
// IOAddress define interface of IO address
|
||||||
type IOAddress interface{}
|
type IOAddress any
|
||||||
|
|
||||||
// CL3611Address define CL3611 protol struct
|
// CL3611Address define CL3611 protol struct
|
||||||
type CL3611Address struct {
|
type CL3611Address struct {
|
||||||
|
|
@ -174,3 +174,98 @@ func (m MeasurementDataSource) GetIOAddress() (IOAddress, error) {
|
||||||
return nil, constants.ErrUnknownDataType
|
return nil, constants.ErrUnknownDataType
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GenerateMeasureIdentifier define func of generate measurement identifier
|
||||||
|
func GenerateMeasureIdentifier(source map[string]any) (string, error) {
|
||||||
|
regTypeVal, ok := source["type"]
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("can not find type in datasource field")
|
||||||
|
}
|
||||||
|
|
||||||
|
var regType int
|
||||||
|
switch v := regTypeVal.(type) {
|
||||||
|
case int:
|
||||||
|
regType = v
|
||||||
|
case float32:
|
||||||
|
if v != float32(int(v)) {
|
||||||
|
return "", fmt.Errorf("invalid type format in datasource field, expected integer value, got float: %f", v)
|
||||||
|
}
|
||||||
|
regType = int(v)
|
||||||
|
case float64:
|
||||||
|
if v != float64(int(v)) {
|
||||||
|
return "", fmt.Errorf("invalid type format in datasource field, expected integer value, got float: %f", v)
|
||||||
|
}
|
||||||
|
regType = int(v)
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("invalid type format in datasource field,%T", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
ioAddrVal, ok := source["io_address"]
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("can not find io_address from datasource field")
|
||||||
|
}
|
||||||
|
|
||||||
|
ioAddress, ok := ioAddrVal.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("io_address field is not a valid map")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch regType {
|
||||||
|
case constants.DataSourceTypeCL3611:
|
||||||
|
station, ok := ioAddress["station"].(string)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("CL3611:invalid or missing station field")
|
||||||
|
}
|
||||||
|
device, ok := ioAddress["device"].(string)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("CL3611:invalid or missing device field")
|
||||||
|
}
|
||||||
|
// 提取 channel (string)
|
||||||
|
channel, ok := ioAddress["channel"].(string)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("CL3611:invalid or missing channel field")
|
||||||
|
}
|
||||||
|
return concatCL361WithPlus(station, device, channel), nil
|
||||||
|
case constants.DataSourceTypePower104:
|
||||||
|
station, ok := ioAddress["station"].(string)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("Power104:invalid or missing station field")
|
||||||
|
}
|
||||||
|
packetVal, ok := ioAddress["packet"]
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("Power104: missing packet field")
|
||||||
|
}
|
||||||
|
var packet int
|
||||||
|
switch v := packetVal.(type) {
|
||||||
|
case int:
|
||||||
|
packet = v
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("Power104:invalid packet format")
|
||||||
|
}
|
||||||
|
|
||||||
|
offsetVal, ok := ioAddress["offset"]
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("Power104:missing offset field")
|
||||||
|
}
|
||||||
|
var offset int
|
||||||
|
switch v := offsetVal.(type) {
|
||||||
|
case int:
|
||||||
|
offset = v
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("Power104:invalid offset format")
|
||||||
|
}
|
||||||
|
return concatP104WithPlus(station, packet, offset), nil
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("unsupport regulation type %d into datasource field", regType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func concatP104WithPlus(station string, packet int, offset int) string {
|
||||||
|
packetStr := strconv.Itoa(packet)
|
||||||
|
offsetStr := strconv.Itoa(offset)
|
||||||
|
return station + ":" + packetStr + ":" + offsetStr
|
||||||
|
}
|
||||||
|
|
||||||
|
func concatCL361WithPlus(station, device, channel string) string {
|
||||||
|
return station + ":" + device + ":" + "phasor" + ":" + channel
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,7 @@ import (
|
||||||
|
|
||||||
"github.com/RediSearch/redisearch-go/v2/redisearch"
|
"github.com/RediSearch/redisearch-go/v2/redisearch"
|
||||||
redigo "github.com/gomodule/redigo/redis"
|
redigo "github.com/gomodule/redigo/redis"
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ac *redisearch.Autocompleter
|
var ac *redisearch.Autocompleter
|
||||||
|
|
@ -27,26 +28,248 @@ func RedisSearchRecommend(ctx context.Context, input string) ([]string, bool, er
|
||||||
rdb := diagram.GetRedisClientInstance()
|
rdb := diagram.GetRedisClientInstance()
|
||||||
|
|
||||||
if input == "" {
|
if input == "" {
|
||||||
// 返回所有 grid 名
|
// return all grid tagname
|
||||||
return getAllGridKeys(ctx, constants.RedisAllGridSetKey)
|
return getKeyBySpecificsLevel(ctx, rdb, 1, input)
|
||||||
}
|
}
|
||||||
|
|
||||||
inputSlice := strings.Split(input, ".")
|
inputSlice := strings.Split(input, ".")
|
||||||
inputSliceLen := len(inputSlice)
|
inputSliceLen := len(inputSlice)
|
||||||
originInputLen := len(inputSlice)
|
|
||||||
|
|
||||||
switch inputSliceLen {
|
switch inputSliceLen {
|
||||||
case 1:
|
case 1:
|
||||||
// TODO 优化成NewSet的形式
|
// grid tagname search
|
||||||
gridExist, err := rdb.SIsMember(ctx, constants.RedisAllGridSetKey, input).Result()
|
gridSearchInput := inputSlice[0]
|
||||||
|
gridExists, err := rdb.SIsMember(ctx, constants.RedisAllGridSetKey, gridSearchInput).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(ctx, "check grid key exist failed ", "grid_key", input, "error", err)
|
logger.Error(ctx, "check grid key exist failed ", "grid_key", input, "error", err)
|
||||||
return []string{}, false, err
|
return []string{}, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
searchInput := input
|
if gridExists {
|
||||||
inputLen := inputSliceLen
|
return []string{"."}, false, nil
|
||||||
for inputLen != 0 && !gridExist {
|
}
|
||||||
|
|
||||||
|
// start grid tagname fuzzy search
|
||||||
|
recommends, err := runFuzzySearch(ctx, gridSearchInput, "", inputSliceLen)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "fuzzy search failed for level 1", "search_input", gridSearchInput, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recommends) > 0 {
|
||||||
|
return recommends, true, nil
|
||||||
|
}
|
||||||
|
return []string{}, true, nil
|
||||||
|
|
||||||
|
case 2:
|
||||||
|
return handleLevelFuzzySearch(ctx, rdb, 2, constants.RedisAllZoneSetKey, inputSlice)
|
||||||
|
case 3:
|
||||||
|
return handleLevelFuzzySearch(ctx, rdb, 3, constants.RedisAllStationSetKey, inputSlice)
|
||||||
|
case 4:
|
||||||
|
return handleLevelFuzzySearch(ctx, rdb, 4, constants.RedisAllCompNSPathSetKey, inputSlice)
|
||||||
|
case 5:
|
||||||
|
return handleLevelFuzzySearch(ctx, rdb, 5, constants.RedisAllCompTagSetKey, inputSlice)
|
||||||
|
case 6:
|
||||||
|
return handleLevelFuzzySearch(ctx, rdb, 6, constants.RedisAllConfigSetKey, inputSlice)
|
||||||
|
case 7:
|
||||||
|
return handleLevelFuzzySearch(ctx, rdb, 7, constants.RedisAllMeasTagSetKey, inputSlice)
|
||||||
|
|
||||||
|
default:
|
||||||
|
logger.Error(ctx, "unsupport length of search input", "input_len", inputSliceLen)
|
||||||
|
return []string{}, false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getKeyBySpecificsLevel(ctx context.Context, rdb *redis.Client, inputLen int, input string) ([]string, bool, error) {
|
||||||
|
queryKey := getSpecificKeyByLength(inputLen, input)
|
||||||
|
results, err := rdb.SMembers(ctx, queryKey).Result()
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, false, fmt.Errorf("get all keys failed, error: %w", err)
|
||||||
|
}
|
||||||
|
return results, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func combineQueryResultByInput(inputSliceLen int, inputSlice []string, queryResults []string) []string {
|
||||||
|
prefixs := make([]string, 0, len(inputSlice))
|
||||||
|
recommandResults := make([]string, 0, len(queryResults))
|
||||||
|
switch inputSliceLen {
|
||||||
|
case 2:
|
||||||
|
prefixs = []string{inputSlice[0]}
|
||||||
|
case 3:
|
||||||
|
prefixs = inputSlice[0:2]
|
||||||
|
case 4:
|
||||||
|
prefixs = inputSlice[0:3]
|
||||||
|
case 5:
|
||||||
|
prefixs = inputSlice[0:4]
|
||||||
|
case 6:
|
||||||
|
prefixs = inputSlice[0:5]
|
||||||
|
case 7:
|
||||||
|
prefixs = inputSlice[0:6]
|
||||||
|
default:
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, queryResult := range queryResults {
|
||||||
|
combineStrs := make([]string, 0, len(inputSlice))
|
||||||
|
combineStrs = append(combineStrs, prefixs...)
|
||||||
|
combineStrs = append(combineStrs, queryResult)
|
||||||
|
recommandResult := strings.Join(combineStrs, ".")
|
||||||
|
recommandResults = append(recommandResults, recommandResult)
|
||||||
|
}
|
||||||
|
return recommandResults
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSpecificKeyByLength(inputLen int, input string) string {
|
||||||
|
switch inputLen {
|
||||||
|
case 1:
|
||||||
|
return constants.RedisAllGridSetKey
|
||||||
|
case 2:
|
||||||
|
return fmt.Sprintf(constants.RedisSpecGridZoneSetKey, input)
|
||||||
|
case 3:
|
||||||
|
return fmt.Sprintf(constants.RedisSpecZoneStationSetKey, input)
|
||||||
|
case 4:
|
||||||
|
return fmt.Sprintf(constants.RedisSpecStationCompNSPATHSetKey, input)
|
||||||
|
case 5:
|
||||||
|
return fmt.Sprintf(constants.RedisSpecStationCompTagSetKey, input)
|
||||||
|
case 6:
|
||||||
|
return constants.RedisAllConfigSetKey
|
||||||
|
case 7:
|
||||||
|
return fmt.Sprintf(constants.RedisSpecCompTagMeasSetKey, input)
|
||||||
|
default:
|
||||||
|
return constants.RedisAllGridSetKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleLevelFuzzySearch define func to process recommendation logic for specific levels(level >= 2)
|
||||||
|
func handleLevelFuzzySearch(ctx context.Context, rdb *redis.Client, hierarchy int, keySetKey string, inputSlice []string) ([]string, bool, error) {
|
||||||
|
searchInputIndex := hierarchy - 1
|
||||||
|
searchInput := inputSlice[searchInputIndex]
|
||||||
|
searchPrefix := strings.Join(inputSlice[0:searchInputIndex], ".")
|
||||||
|
|
||||||
|
if searchInput == "" {
|
||||||
|
var specificalKey string
|
||||||
|
specificalKeyIndex := searchInputIndex - 1
|
||||||
|
if specificalKeyIndex >= 0 {
|
||||||
|
specificalKey = inputSlice[specificalKeyIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
allResults, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, hierarchy, specificalKey)
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
recommandResults := combineQueryResultByInput(hierarchy, inputSlice, allResults)
|
||||||
|
return recommandResults, isFuzzy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
keyExists, err := rdb.SIsMember(ctx, keySetKey, searchInput).Result()
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "check key exist failed ", "key", searchInput, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if keyExists {
|
||||||
|
if hierarchy == constants.MaxIdentifyHierarchy {
|
||||||
|
return []string{""}, false, nil
|
||||||
|
}
|
||||||
|
return []string{"."}, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// start redis fuzzy search
|
||||||
|
recommends, err := runFuzzySearch(ctx, searchInput, searchPrefix, hierarchy)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "fuzzy search failed by hierarchy", "hierarchy", hierarchy, "search_input", searchInput, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recommends) == 0 {
|
||||||
|
logger.Error(ctx, "fuzzy search without result", "hierarchy", hierarchy, "search_input", searchInput, "error", err)
|
||||||
|
return []string{}, true, nil
|
||||||
|
}
|
||||||
|
return combineQueryResultByInput(hierarchy, inputSlice, recommends), true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// runFuzzySearch define func to process redis fuzzy search
|
||||||
|
func runFuzzySearch(ctx context.Context, searchInput string, searchPrefix string, inputSliceLen int) ([]string, error) {
|
||||||
|
searchInputLen := len(searchInput)
|
||||||
|
|
||||||
|
for searchInputLen != 0 {
|
||||||
|
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
|
||||||
|
Num: math.MaxInt16,
|
||||||
|
Fuzzy: true,
|
||||||
|
WithScores: false,
|
||||||
|
WithPayloads: false,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "query key by redis fuzzy search failed", "query_key", searchInput, "error", err)
|
||||||
|
return nil, fmt.Errorf("redisearch suggest failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(results) == 0 {
|
||||||
|
// 如果没有结果,退一步(删除最后一个字节)并继续循环
|
||||||
|
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
|
||||||
|
searchInput = searchInput[:len(searchInput)-1]
|
||||||
|
searchInputLen = len(searchInput)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var recommends []string
|
||||||
|
for _, result := range results {
|
||||||
|
term := result.Term
|
||||||
|
var termSliceLen int
|
||||||
|
var termPrefix string
|
||||||
|
|
||||||
|
lastDotIndex := strings.LastIndex(term, ".")
|
||||||
|
if lastDotIndex == -1 {
|
||||||
|
termPrefix = ""
|
||||||
|
} else {
|
||||||
|
termPrefix = term[:lastDotIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
if result.Term == "" {
|
||||||
|
termSliceLen = 1
|
||||||
|
} else {
|
||||||
|
termSliceLen = strings.Count(result.Term, ".") + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if termSliceLen == inputSliceLen && termPrefix == searchPrefix {
|
||||||
|
recommends = append(recommends, result.Term)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return recommends, nil
|
||||||
|
}
|
||||||
|
return []string{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedisSearchRecommend1 define func of redis search by input string and return recommend results
|
||||||
|
func RedisSearchRecommend1(ctx context.Context, input string) ([]string, bool, error) {
|
||||||
|
rdb := diagram.GetRedisClientInstance()
|
||||||
|
|
||||||
|
if input == "" {
|
||||||
|
// 返回所有 grid 名
|
||||||
|
return getKeyBySpecificsLevel(ctx, rdb, 1, input)
|
||||||
|
}
|
||||||
|
|
||||||
|
inputSlice := strings.Split(input, ".")
|
||||||
|
inputSliceLen := len(inputSlice)
|
||||||
|
|
||||||
|
switch inputSliceLen {
|
||||||
|
case 1:
|
||||||
|
// grid tagname search
|
||||||
|
gridSearchInput := inputSlice[0]
|
||||||
|
gridExists, err := rdb.SIsMember(ctx, constants.RedisAllGridSetKey, gridSearchInput).Result()
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "check grid key exist failed ", "grid_key", input, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if gridExists {
|
||||||
|
return []string{"."}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// start grid tagname fuzzy search
|
||||||
|
searchInput := gridSearchInput
|
||||||
|
searchInputLen := len(searchInput)
|
||||||
|
for searchInputLen != 0 && !gridExists {
|
||||||
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
|
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
|
||||||
Num: math.MaxInt16,
|
Num: math.MaxInt16,
|
||||||
Fuzzy: true,
|
Fuzzy: true,
|
||||||
|
|
@ -54,65 +277,51 @@ func RedisSearchRecommend(ctx context.Context, input string) ([]string, bool, er
|
||||||
WithPayloads: false,
|
WithPayloads: false,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(ctx, "query info by fuzzy failed", "query_key", input, "error", err)
|
logger.Error(ctx, "query grid key by redis fuzzy search failed", "query_key", searchInput, "error", err)
|
||||||
return []string{}, false, err
|
return []string{}, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(results) == 0 {
|
if len(results) == 0 {
|
||||||
// TODO 构建 for 循环返回所有可能的补全
|
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
|
||||||
searchInput = searchInput[:len(searchInput)-1]
|
searchInput = searchInput[:len(searchInput)-1]
|
||||||
inputLen = len(searchInput)
|
searchInputLen = len(searchInput)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var recommends []string
|
var recommends []string
|
||||||
for _, result := range results {
|
for _, result := range results {
|
||||||
termSlice := strings.Split(result.Term, ".")
|
termSlice := strings.Split(result.Term, ".")
|
||||||
if len(termSlice) <= originInputLen {
|
if len(termSlice) <= inputSliceLen {
|
||||||
recommends = append(recommends, result.Term)
|
recommends = append(recommends, result.Term)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// 返回模糊查询结果
|
// return fuzzy search results
|
||||||
return recommends, true, nil
|
return recommends, true, nil
|
||||||
}
|
}
|
||||||
|
case 2:
|
||||||
// 处理 input 不为空、不含有.并且 input 是一个完整的 grid key 的情况
|
// zone tagname search
|
||||||
if strings.HasSuffix(input, ".") == false {
|
zoneSearchInput := inputSlice[1]
|
||||||
recommend := input + "."
|
if zoneSearchInput == "" {
|
||||||
return []string{recommend}, false, nil
|
specificalGrid := inputSlice[0]
|
||||||
}
|
allZones, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, inputSliceLen, specificalGrid)
|
||||||
default:
|
recommandResults := combineQueryResultByInput(inputSliceLen, inputSlice, allZones)
|
||||||
lastInput := inputSlice[inputSliceLen-1]
|
return recommandResults, isFuzzy, err
|
||||||
// 判断 queryKey 是否是空值,空值则返回上一级别下的所有key
|
|
||||||
if lastInput == "" {
|
|
||||||
setKey := getCombinedConstantsKeyByLength(inputSlice[inputSliceLen-2], inputSliceLen)
|
|
||||||
targetSet := diagram.NewRedisSet(ctx, setKey, 10, true)
|
|
||||||
keys, err := targetSet.SMembers(setKey)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(ctx, "get all recommend key by setKey failed", "set_key", setKey, "error", err)
|
|
||||||
return []string{}, false, fmt.Errorf("get all recommend key by setKey failed,%w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var results []string
|
|
||||||
for _, key := range keys {
|
|
||||||
result := input + key
|
|
||||||
results = append(results, result)
|
|
||||||
}
|
|
||||||
return results, false, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
setKey := getCombinedConstantsKeyByLength(inputSlice[inputSliceLen-2], inputSliceLen)
|
zoneExists, err := rdb.SIsMember(ctx, constants.RedisAllZoneSetKey, zoneSearchInput).Result()
|
||||||
targetSet := diagram.NewRedisSet(ctx, setKey, 10, true)
|
|
||||||
exist, err := targetSet.SIsMember(setKey, lastInput)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(ctx, "check keys exist failed", "set_key", setKey, "query_key", lastInput, "error", err)
|
logger.Error(ctx, "check zone key exist failed ", "zone_key", zoneSearchInput, "error", err)
|
||||||
return []string{}, false, fmt.Errorf("check keys failed,%w", err)
|
return []string{}, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
searchInput := input
|
if zoneExists {
|
||||||
inputLen := len(searchInput)
|
return []string{"."}, false, err
|
||||||
for inputLen != 0 && !exist {
|
}
|
||||||
logger.Info(ctx, "use fuzzy query", "input", input)
|
|
||||||
|
// start zone tagname fuzzy search
|
||||||
|
searchInput := zoneSearchInput
|
||||||
|
searchInputLen := len(searchInput)
|
||||||
|
for searchInputLen != 0 && !zoneExists {
|
||||||
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
|
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
|
||||||
Num: math.MaxInt16,
|
Num: math.MaxInt16,
|
||||||
Fuzzy: true,
|
Fuzzy: true,
|
||||||
|
|
@ -120,94 +329,292 @@ func RedisSearchRecommend(ctx context.Context, input string) ([]string, bool, er
|
||||||
WithPayloads: false,
|
WithPayloads: false,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(ctx, "query info by fuzzy failed", "query_key", input, "error", err)
|
logger.Error(ctx, "query zone key by redis fuzzy search failed", "query_key", searchInput, "error", err)
|
||||||
return []string{}, false, err
|
return []string{}, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(results) == 0 {
|
if len(results) == 0 {
|
||||||
// TODO 构建 for 循环返回所有可能的补全
|
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
|
||||||
searchInput = input[:inputLen-1]
|
searchInput = searchInput[:len(searchInput)-1]
|
||||||
inputLen = len(searchInput)
|
searchInputLen = len(searchInput)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var terms []string
|
var recommends []string
|
||||||
for _, result := range results {
|
for _, result := range results {
|
||||||
terms = append(terms, result.Term)
|
termSlice := strings.Split(result.Term, ".")
|
||||||
|
if len(termSlice) <= inputSliceLen {
|
||||||
|
recommends = append(recommends, result.Term)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// 返回模糊查询结果
|
// return fuzzy search results
|
||||||
return terms, true, nil
|
return combineQueryResultByInput(inputSliceLen, inputSlice, recommends), true, nil
|
||||||
}
|
}
|
||||||
return []string{input}, false, nil
|
case 3:
|
||||||
|
// station tagname search
|
||||||
|
stationSearchInput := inputSlice[2]
|
||||||
|
if stationSearchInput == "" {
|
||||||
|
specificalZone := inputSlice[1]
|
||||||
|
allStations, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, inputSliceLen, specificalZone)
|
||||||
|
recommandResults := combineQueryResultByInput(inputSliceLen, inputSlice, allStations)
|
||||||
|
return recommandResults, isFuzzy, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stationExists, err := rdb.SIsMember(ctx, constants.RedisAllStationSetKey, stationSearchInput).Result()
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "check station key exist failed ", "station_key", stationSearchInput, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if stationExists {
|
||||||
|
return []string{"."}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// start station tagname fuzzy search
|
||||||
|
searchInput := stationSearchInput
|
||||||
|
searchInputLen := len(searchInput)
|
||||||
|
for searchInputLen != 0 && !stationExists {
|
||||||
|
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
|
||||||
|
Num: math.MaxInt16,
|
||||||
|
Fuzzy: true,
|
||||||
|
WithScores: false,
|
||||||
|
WithPayloads: false,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "query station key by redis fuzzy search failed", "query_key", searchInput, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(results) == 0 {
|
||||||
|
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
|
||||||
|
searchInput = searchInput[:len(searchInput)-1]
|
||||||
|
searchInputLen = len(searchInput)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var recommends []string
|
||||||
|
for _, result := range results {
|
||||||
|
termSlice := strings.Split(result.Term, ".")
|
||||||
|
if len(termSlice) <= inputSliceLen {
|
||||||
|
recommends = append(recommends, result.Term)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// return fuzzy search results
|
||||||
|
return combineQueryResultByInput(inputSliceLen, inputSlice, recommends), true, nil
|
||||||
|
}
|
||||||
|
case 4:
|
||||||
|
// component nspath search
|
||||||
|
compNSPSearchInput := inputSlice[3]
|
||||||
|
if compNSPSearchInput == "" {
|
||||||
|
specificalStation := inputSlice[2]
|
||||||
|
allCompNSPaths, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, inputSliceLen, specificalStation)
|
||||||
|
recommandResults := combineQueryResultByInput(inputSliceLen, inputSlice, allCompNSPaths)
|
||||||
|
return recommandResults, isFuzzy, err
|
||||||
|
}
|
||||||
|
|
||||||
|
compNSPathExists, err := rdb.SIsMember(ctx, constants.RedisAllCompNSPathSetKey, compNSPSearchInput).Result()
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "check component nspath key exist failed ", "component_nspath_key", compNSPSearchInput, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if compNSPathExists {
|
||||||
|
return []string{"."}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// start grid fuzzy search
|
||||||
|
searchInput := compNSPSearchInput
|
||||||
|
searchInputLen := len(searchInput)
|
||||||
|
for searchInputLen != 0 && !compNSPathExists {
|
||||||
|
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
|
||||||
|
Num: math.MaxInt16,
|
||||||
|
Fuzzy: true,
|
||||||
|
WithScores: false,
|
||||||
|
WithPayloads: false,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "query component nspath key by redis fuzzy search failed", "query_key", searchInput, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(results) == 0 {
|
||||||
|
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
|
||||||
|
searchInput = searchInput[:len(searchInput)-1]
|
||||||
|
searchInputLen = len(searchInput)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var recommends []string
|
||||||
|
for _, result := range results {
|
||||||
|
termSlice := strings.Split(result.Term, ".")
|
||||||
|
if len(termSlice) <= inputSliceLen {
|
||||||
|
recommends = append(recommends, result.Term)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// return fuzzy search results
|
||||||
|
return combineQueryResultByInput(inputSliceLen, inputSlice, recommends), true, nil
|
||||||
|
}
|
||||||
|
case 5:
|
||||||
|
// component tag search
|
||||||
|
compTagSearchInput := inputSlice[4]
|
||||||
|
if compTagSearchInput == "" {
|
||||||
|
// TODO 优化考虑是否使用 station 作为 key 的一部分
|
||||||
|
specificalStation := inputSlice[2]
|
||||||
|
allCompNSPaths, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, inputSliceLen, specificalStation)
|
||||||
|
recommandResults := combineQueryResultByInput(inputSliceLen, inputSlice, allCompNSPaths)
|
||||||
|
return recommandResults, isFuzzy, err
|
||||||
|
}
|
||||||
|
|
||||||
|
compTagExists, err := rdb.SIsMember(ctx, constants.RedisAllCompTagSetKey, compTagSearchInput).Result()
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "check component tag key exist failed ", "component_tag_key", compTagSearchInput, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if compTagExists {
|
||||||
|
return []string{"."}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// start grid fuzzy search
|
||||||
|
searchInput := compTagSearchInput
|
||||||
|
searchInputLen := len(searchInput)
|
||||||
|
for searchInputLen != 0 && !compTagExists {
|
||||||
|
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
|
||||||
|
Num: math.MaxInt16,
|
||||||
|
Fuzzy: true,
|
||||||
|
WithScores: false,
|
||||||
|
WithPayloads: false,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "query component tag key by redis fuzzy search failed", "query_key", searchInput, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(results) == 0 {
|
||||||
|
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
|
||||||
|
searchInput = searchInput[:len(searchInput)-1]
|
||||||
|
searchInputLen = len(searchInput)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var recommends []string
|
||||||
|
for _, result := range results {
|
||||||
|
termSlice := strings.Split(result.Term, ".")
|
||||||
|
if len(termSlice) <= inputSliceLen {
|
||||||
|
recommends = append(recommends, result.Term)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// return fuzzy search results
|
||||||
|
return combineQueryResultByInput(inputSliceLen, inputSlice, recommends), true, nil
|
||||||
|
}
|
||||||
|
case 6:
|
||||||
|
// configuration search
|
||||||
|
// TODO 优化
|
||||||
|
configSearchInput := inputSlice[5]
|
||||||
|
if configSearchInput == "" {
|
||||||
|
allCompNSPaths, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, inputSliceLen, "")
|
||||||
|
recommandResults := combineQueryResultByInput(inputSliceLen, inputSlice, allCompNSPaths)
|
||||||
|
return recommandResults, isFuzzy, err
|
||||||
|
}
|
||||||
|
|
||||||
|
configExists, err := rdb.SIsMember(ctx, constants.RedisAllConfigSetKey, configSearchInput).Result()
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "check config key exist failed ", "config_key", configSearchInput, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if configExists {
|
||||||
|
return []string{"."}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// start grid fuzzy search
|
||||||
|
searchInput := configSearchInput
|
||||||
|
searchInputLen := len(searchInput)
|
||||||
|
for searchInputLen != 0 && !configExists {
|
||||||
|
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
|
||||||
|
Num: math.MaxInt16,
|
||||||
|
Fuzzy: true,
|
||||||
|
WithScores: false,
|
||||||
|
WithPayloads: false,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "query config key by redis fuzzy search failed", "query_key", searchInput, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(results) == 0 {
|
||||||
|
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
|
||||||
|
searchInput = searchInput[:len(searchInput)-1]
|
||||||
|
searchInputLen = len(searchInput)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var recommends []string
|
||||||
|
for _, result := range results {
|
||||||
|
termSlice := strings.Split(result.Term, ".")
|
||||||
|
if len(termSlice) <= inputSliceLen {
|
||||||
|
recommends = append(recommends, result.Term)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// return fuzzy search results
|
||||||
|
return combineQueryResultByInput(inputSliceLen, inputSlice, recommends), true, nil
|
||||||
|
}
|
||||||
|
case 7:
|
||||||
|
// measurement search
|
||||||
|
measSearchInput := inputSlice[6]
|
||||||
|
if measSearchInput == "" {
|
||||||
|
// use compoent tag for redis unique key prefix
|
||||||
|
specificalCompTag := inputSlice[4]
|
||||||
|
allMeasTags, isFuzzy, err := getKeyBySpecificsLevel(ctx, rdb, inputSliceLen, specificalCompTag)
|
||||||
|
recommandResults := combineQueryResultByInput(inputSliceLen, inputSlice, allMeasTags)
|
||||||
|
return recommandResults, isFuzzy, err
|
||||||
|
}
|
||||||
|
|
||||||
|
measTagExists, err := rdb.SIsMember(ctx, constants.RedisAllMeasTagSetKey, measSearchInput).Result()
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "check component tag key exist failed ", "component_tag_key", measSearchInput, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if measTagExists {
|
||||||
|
return []string{"."}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// start measurement tag fuzzy search
|
||||||
|
searchInput := measSearchInput
|
||||||
|
searchInputLen := len(searchInput)
|
||||||
|
for searchInputLen != 0 && !measTagExists {
|
||||||
|
results, err := ac.SuggestOpts(searchInput, redisearch.SuggestOptions{
|
||||||
|
Num: math.MaxInt16,
|
||||||
|
Fuzzy: true,
|
||||||
|
WithScores: false,
|
||||||
|
WithPayloads: false,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "query measurement tag key by redis fuzzy search failed", "query_key", searchInput, "error", err)
|
||||||
|
return []string{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(results) == 0 {
|
||||||
|
// TODO 考虑使用其他方式代替 for 循环退一字节的查询方式
|
||||||
|
searchInput = searchInput[:len(searchInput)-1]
|
||||||
|
searchInputLen = len(searchInput)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var recommends []string
|
||||||
|
for _, result := range results {
|
||||||
|
termSlice := strings.Split(result.Term, ".")
|
||||||
|
if len(termSlice) <= inputSliceLen {
|
||||||
|
recommends = append(recommends, result.Term)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// return fuzzy search results
|
||||||
|
return combineQueryResultByInput(inputSliceLen, inputSlice, recommends), true, nil
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
logger.Error(ctx, "unsupport length of search input", "input_len", inputSliceLen)
|
||||||
|
return []string{}, false, nil
|
||||||
}
|
}
|
||||||
return []string{}, false, nil
|
return []string{}, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAllGridKeys(ctx context.Context, setKey string) ([]string, bool, error) {
|
|
||||||
// 从redis set 中获取所有的 grid key
|
|
||||||
gridSets := diagram.NewRedisSet(ctx, setKey, 10, true)
|
|
||||||
keys, err := gridSets.SMembers("grid_keys")
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, false, fmt.Errorf("get all root keys failed, error: %v", err)
|
|
||||||
}
|
|
||||||
return keys, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSpecificZoneKeys(ctx context.Context, input string) ([]string, bool, error) {
|
|
||||||
setKey := fmt.Sprintf(constants.RedisSpecGridZoneSetKey, input)
|
|
||||||
// TODO 从redis set 中获取指定 grid 下的 zone key
|
|
||||||
zoneSets := diagram.NewRedisSet(ctx, setKey, 10, true)
|
|
||||||
keys, err := zoneSets.SMembers(setKey)
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, false, fmt.Errorf("get all root keys failed, error: %v", err)
|
|
||||||
}
|
|
||||||
var results []string
|
|
||||||
for _, key := range keys {
|
|
||||||
result := input + "." + key
|
|
||||||
results = append(results, result)
|
|
||||||
}
|
|
||||||
return results, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getConstantsKeyByLength(inputLen int) string {
|
|
||||||
switch inputLen {
|
|
||||||
case 1:
|
|
||||||
return constants.RedisAllGridSetKey
|
|
||||||
case 2:
|
|
||||||
return constants.RedisAllZoneSetKey
|
|
||||||
case 3:
|
|
||||||
return constants.RedisAllStationSetKey
|
|
||||||
case 4:
|
|
||||||
return constants.RedisAllComponentSetKey
|
|
||||||
default:
|
|
||||||
return constants.RedisAllGridSetKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCombinedConstantsKeyByLength(key string, inputLen int) string {
|
|
||||||
switch inputLen {
|
|
||||||
case 2:
|
|
||||||
return fmt.Sprintf(constants.RedisSpecGridZoneSetKey, key)
|
|
||||||
case 3:
|
|
||||||
return fmt.Sprintf(constants.RedisSpecZoneStationSetKey, key)
|
|
||||||
case 4:
|
|
||||||
return fmt.Sprintf(constants.RedisSpecStationComponentSetKey, key)
|
|
||||||
default:
|
|
||||||
return constants.RedisAllGridSetKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLongestCommonPrefixLength define func of get longest common prefix length between two strings
|
|
||||||
func GetLongestCommonPrefixLength(input string, recommendResult string) int {
|
|
||||||
if input == "" {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
minLen := min(len(input), len(recommendResult))
|
|
||||||
|
|
||||||
for i := range minLen {
|
|
||||||
if input[i] != recommendResult[i] {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return minLen
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -23,18 +23,32 @@ type TopologicUUIDCreateInfo struct {
|
||||||
Comment string `json:"comment"`
|
Comment string `json:"comment"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ComponentCreateInfo defines circuit diagram component create index info
|
// ComponentCreateInfo defines circuit diagram component create info
|
||||||
type ComponentCreateInfo struct {
|
type ComponentCreateInfo struct {
|
||||||
UUID string `json:"uuid"`
|
UUID string `json:"uuid"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Context string `json:"context"`
|
Context map[string]any `json:"context"`
|
||||||
GridID int64 `json:"grid_id"`
|
GridID int64 `json:"grid_id"`
|
||||||
ZoneID int64 `json:"zone_id"`
|
ZoneID int64 `json:"zone_id"`
|
||||||
StationID int64 `json:"station_id"`
|
StationID int64 `json:"station_id"`
|
||||||
PageID int64 `json:"page_id"`
|
PageID int64 `json:"page_id"`
|
||||||
Tag string `json:"tag"`
|
Tag string `json:"tag"`
|
||||||
Params string `json:"params"`
|
Params string `json:"params"`
|
||||||
Op int `json:"op"`
|
Op int `json:"op"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MeasurementCreateInfo defines circuit diagram measurement create info
|
||||||
|
type MeasurementCreateInfo struct {
|
||||||
|
UUID string `json:"uuid"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Context map[string]any `json:"context"`
|
||||||
|
GridID int64 `json:"grid_id"`
|
||||||
|
ZoneID int64 `json:"zone_id"`
|
||||||
|
StationID int64 `json:"station_id"`
|
||||||
|
PageID int64 `json:"page_id"`
|
||||||
|
Tag string `json:"tag"`
|
||||||
|
Params string `json:"params"`
|
||||||
|
Op int `json:"op"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CircuitDiagramCreateRequest defines request params of circuit diagram create api
|
// CircuitDiagramCreateRequest defines request params of circuit diagram create api
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,22 @@
|
||||||
|
// Package network define struct of network operation
|
||||||
|
package network
|
||||||
|
|
||||||
|
// MeasurementLinkRequest defines the request payload for process an measurement link
|
||||||
|
type MeasurementLinkRequest struct {
|
||||||
|
// required: true
|
||||||
|
MeasurementID int64 `json:"measurement_id" example:"1001"`
|
||||||
|
// required: true
|
||||||
|
// enum: [add, del]
|
||||||
|
Action string `json:"action" example:"add"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiagramNodeLinkRequest defines the request payload for process an diagram node link
|
||||||
|
type DiagramNodeLinkRequest struct {
|
||||||
|
// required: true
|
||||||
|
NodeID int64 `json:"node_id" example:"1001"`
|
||||||
|
// required: true
|
||||||
|
NodeLevel int `json:"node_level" example:"1"`
|
||||||
|
// required: true
|
||||||
|
// enum: [add, del]
|
||||||
|
Action string `json:"action" example:"add"`
|
||||||
|
}
|
||||||
|
|
@ -36,16 +36,16 @@ type TopologicUUIDChangeInfos struct {
|
||||||
|
|
||||||
// ComponentUpdateInfo defines circuit diagram component params info
|
// ComponentUpdateInfo defines circuit diagram component params info
|
||||||
type ComponentUpdateInfo struct {
|
type ComponentUpdateInfo struct {
|
||||||
ID int64 `json:"id"`
|
ID int64 `json:"id"`
|
||||||
UUID string `json:"uuid"`
|
UUID string `json:"uuid"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Context string `json:"context"`
|
Context map[string]any `json:"context"`
|
||||||
GridID int64 `json:"grid_id"`
|
GridID int64 `json:"grid_id"`
|
||||||
ZoneID int64 `json:"zone_id"`
|
ZoneID int64 `json:"zone_id"`
|
||||||
StationID int64 `json:"station_id"`
|
StationID int64 `json:"station_id"`
|
||||||
Params string `json:"params"`
|
Params string `json:"params"`
|
||||||
Op int `json:"op"`
|
Op int `json:"op"`
|
||||||
Tag string `json:"tag"`
|
Tag string `json:"tag"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CircuitDiagramUpdateRequest defines request params of circuit diagram update api
|
// CircuitDiagramUpdateRequest defines request params of circuit diagram update api
|
||||||
|
|
|
||||||
|
|
@ -9,5 +9,5 @@ type MeasurementGetRequest struct {
|
||||||
|
|
||||||
// MeasurementRecommendRequest defines the request payload for an measurement recommend
|
// MeasurementRecommendRequest defines the request payload for an measurement recommend
|
||||||
type MeasurementRecommendRequest struct {
|
type MeasurementRecommendRequest struct {
|
||||||
Input string `json:"input" example:"trans"`
|
Input string `form:"input,omitempty" example:"grid1"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,26 @@
|
||||||
|
// Package network define struct of network operation
|
||||||
|
package network
|
||||||
|
|
||||||
|
// RealTimeDataReceiveRequest defines request params of real time data receive api
|
||||||
|
type RealTimeDataReceiveRequest struct {
|
||||||
|
PayLoad RealTimeDataReceivePayload `json:"payload"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RealTimeDataReceivePayload defines request payload of real time data receive api
|
||||||
|
type RealTimeDataReceivePayload struct {
|
||||||
|
ComponentUUID string `json:"component_uuid"`
|
||||||
|
Point string `json:"point"`
|
||||||
|
Values []RealTimeDataPoint `json:"values"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RealTimeDataPoint define struct of real time data point
|
||||||
|
type RealTimeDataPoint struct {
|
||||||
|
Time int64 `json:"time" example:"1678886400"`
|
||||||
|
Value float64 `json:"value" example:"123.1"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RealTimeDataPayload define struct of real time data payload
|
||||||
|
type RealTimeDataPayload struct {
|
||||||
|
// TODO 增加example tag
|
||||||
|
RealTimeDataPoints []RealTimeDataPoint `json:"sub_pos" swaggertype:"object"`
|
||||||
|
}
|
||||||
|
|
@ -1,20 +1,48 @@
|
||||||
// Package network define struct of network operation
|
// Package network define struct of network operation
|
||||||
package network
|
package network
|
||||||
|
|
||||||
// RealTimeDataReceiveRequest defines request params of real time data receive api
|
// RealTimeQueryRequest define struct of real time data query request
|
||||||
type RealTimeDataReceiveRequest struct {
|
type RealTimeQueryRequest struct {
|
||||||
PayLoad RealTimeDataReceivePayload `json:"payload"`
|
// required: true
|
||||||
|
// enum: [start, stop]
|
||||||
|
Action string `json:"action" example:"start" description:"请求的操作,例如 start/stop"`
|
||||||
|
// TODO 增加monitorID的example值说明
|
||||||
|
ClientID string `json:"client_id" example:"xxxx" description:"用于标识不同client的监控请求ID"`
|
||||||
|
|
||||||
|
// required: true
|
||||||
|
Measurements []RealTimeMeasurementItem `json:"measurements" description:"定义不同的数据采集策略和目标"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RealTimeDataReceivePayload defines request payload of real time data receive api
|
// RealTimeSubRequest define struct of real time data subscription request
|
||||||
type RealTimeDataReceivePayload struct {
|
type RealTimeSubRequest struct {
|
||||||
ComponentUUID string `json:"component_uuid"`
|
// required: true
|
||||||
Point string `json:"point"`
|
// enum: [start, stop]
|
||||||
Values []RealTimeDataReceiveParam `json:"values"`
|
Action string `json:"action" example:"start" description:"请求的操作,例如 start/stop"`
|
||||||
|
ClientID string `json:"client_id" example:"5d72f2d9-e33a-4f1b-9c76-88a44b9a953e" description:"用于标识不同client的监控请求ID"`
|
||||||
|
// required: true
|
||||||
|
Measurements []RealTimeMeasurementItem `json:"measurements" description:"定义不同的数据采集策略和目标"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RealTimeDataReceiveParam defines request param of real time data receive api
|
// RealTimeMeasurementItem define struct of real time measurement item
|
||||||
type RealTimeDataReceiveParam struct {
|
type RealTimeMeasurementItem struct {
|
||||||
Time int64 `json:"time"`
|
Interval string `json:"interval" example:"1" description:"数据采集的时间间隔(秒)"`
|
||||||
Value float64 `json:"value"`
|
Targets []string `json:"targets" example:"[\"grid1.zone1.station1.ns1.tag1.bay.I11_A_rms\",\"grid1.zone1.station1.ns1.tag1.tag1.bay.I11_B_rms\"]" description:"需要采集数据的测点或标签名称列表"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RealTimePullPayload define struct of pull real time data payload
|
||||||
|
type RealTimePullPayload struct {
|
||||||
|
// required: true
|
||||||
|
Targets []RealTimePullTarget `json:"targets" example:"{\"targets\":[{\"id\":\"grid1.zone1.station1.ns1.tag1.bay.I11_A_rms\",\"datas\":[{\"time\":1736305467506000000,\"value\":1},{\"time\":1736305467506000000,\"value\":1}]},{\"id\":\"grid1.zone1.station1.ns1.tag1.bay.I11_B_rms\",\"datas\":[{\"time\":1736305467506000000,\"value\":1},{\"time\":1736305467506000000,\"value\":1}]}]}" description:"实时数据"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RealTimePullTarget define struct of pull real time data target
|
||||||
|
type RealTimePullTarget struct {
|
||||||
|
ID string `json:"id" example:"grid1.zone1.station1.ns1.tag1.bay.I11_A_rms" description:"实时数据ID值"`
|
||||||
|
Datas []RealTimePullData `json:"datas" example:"[{\"time\":1736305467506000000,\"value\":220},{\"time\":1736305467506000000,\"value\":220}]" description:"实时数据值数组"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RealTimePullData define struct of pull real time data param
|
||||||
|
type RealTimePullData struct {
|
||||||
|
Time string `json:"time" example:"1736305467506000000" description:"实时数据时间戳"`
|
||||||
|
Value float64 `json:"value" example:"220" description:"实时数据值"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -5,14 +5,14 @@ package network
|
||||||
type FailureResponse struct {
|
type FailureResponse struct {
|
||||||
Code int `json:"code" example:"500"`
|
Code int `json:"code" example:"500"`
|
||||||
Msg string `json:"msg" example:"failed to get recommend data from redis"`
|
Msg string `json:"msg" example:"failed to get recommend data from redis"`
|
||||||
PayLoad any `json:"payload" swaggertype:"object"`
|
Payload any `json:"payload" swaggertype:"object"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SuccessResponse define struct of standard successful API response format
|
// SuccessResponse define struct of standard successful API response format
|
||||||
type SuccessResponse struct {
|
type SuccessResponse struct {
|
||||||
Code int `json:"code" example:"200"`
|
Code int `json:"code" example:"200"`
|
||||||
Msg string `json:"msg" example:"success"`
|
Msg string `json:"msg" example:"success"`
|
||||||
PayLoad any `json:"payload" swaggertype:"object"`
|
Payload any `json:"payload" swaggertype:"object"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MeasurementRecommendPayload define struct of represents the data payload for the successful recommendation response.
|
// MeasurementRecommendPayload define struct of represents the data payload for the successful recommendation response.
|
||||||
|
|
@ -20,5 +20,17 @@ type MeasurementRecommendPayload struct {
|
||||||
Input string `json:"input" example:"transformfeeder1_220."`
|
Input string `json:"input" example:"transformfeeder1_220."`
|
||||||
Offset int `json:"offset" example:"21"`
|
Offset int `json:"offset" example:"21"`
|
||||||
RecommendedList []string `json:"recommended_list" example:"[\"I_A_rms\", \"I_B_rms\",\"I_C_rms\"]"`
|
RecommendedList []string `json:"recommended_list" example:"[\"I_A_rms\", \"I_B_rms\",\"I_C_rms\"]"`
|
||||||
// RecommendedList []string `json:"recommended_list"`
|
}
|
||||||
|
|
||||||
|
// TargetResult define struct of target item in real time data subscription response payload
|
||||||
|
type TargetResult struct {
|
||||||
|
ID string `json:"id" example:"grid1.zone1.station1.ns1.tag1.transformfeeder1_220.I_A_rms"`
|
||||||
|
Code string `json:"code" example:"1001"`
|
||||||
|
Msg string `json:"msg" example:"subscription success"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RealTimeSubPayload define struct of real time data subscription request
|
||||||
|
type RealTimeSubPayload struct {
|
||||||
|
ClientID string `json:"client_id" example:"5d72f2d9-e33a-4f1b-9c76-88a44b9a953e" description:"用于标识不同client的监控请求ID"`
|
||||||
|
TargetResults []TargetResult `json:"targets"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,62 +9,35 @@ import (
|
||||||
|
|
||||||
// Bay structure define abstracted info set of electrical bay
|
// Bay structure define abstracted info set of electrical bay
|
||||||
type Bay struct {
|
type Bay struct {
|
||||||
BayUUID uuid.UUID `gorm:"column:BAY_UUID;type:uuid;primaryKey;default:gen_random_uuid()"`
|
BayUUID uuid.UUID `gorm:"column:bay_uuid;type:uuid;primaryKey;default:gen_random_uuid()"`
|
||||||
Name string `gorm:"column:NAME;size:64;not null;default:''"`
|
Name string `gorm:"column:name;size:64;not null;default:''"`
|
||||||
Type string `gorm:"column:TYPE;size:64;not null;default:''"`
|
Tag string `gorm:"column:tag;size:32;not null;default:''"`
|
||||||
Unom float64 `gorm:"column:UNOM;not null;default:-1"`
|
Type string `gorm:"column:type;size:64;not null;default:''"`
|
||||||
Fla float64 `gorm:"column:FLA;not null;default:-1"`
|
Unom float64 `gorm:"column:unom;not null;default:-1"`
|
||||||
Capacity float64 `gorm:"column:CAPACITY;not null;default:-1"`
|
Fla float64 `gorm:"column:fla;not null;default:-1"`
|
||||||
Description string `gorm:"column:DESCRIPTION;size:512;not null;default:''"`
|
Capacity float64 `gorm:"column:capacity;not null;default:-1"`
|
||||||
InService bool `gorm:"column:IN_SERVICE;not null;default:false"`
|
Description string `gorm:"column:description;size:512;not null;default:''"`
|
||||||
State int `gorm:"column:STATE;not null;default:-1"`
|
InService bool `gorm:"column:in_service;not null;default:false"`
|
||||||
Grid string `gorm:"column:GRID;size:64;not null;default:''"`
|
State int `gorm:"column:state;not null;default:-1"`
|
||||||
Zone string `gorm:"column:ZONE;size:64;not null;default:''"`
|
Grid string `gorm:"column:grid;size:64;not null;default:''"`
|
||||||
Station string `gorm:"column:STATION;size:64;not null;default:''"`
|
Zone string `gorm:"column:zone;size:64;not null;default:''"`
|
||||||
Business map[string]interface{} `gorm:"column:BUSINESS;type:jsonb;not null;default:'{}'"`
|
Station string `gorm:"column:station;size:64;not null;default:''"`
|
||||||
Context map[string]interface{} `gorm:"column:CONTEXT;type:jsonb;not null;default:'{}'"`
|
Business JSONMap `gorm:"column:business;type:jsonb;not null;default:'{}'"`
|
||||||
FromUUIDs []uuid.UUID `gorm:"column:FROM_UUIDS;type:jsonb;not null;default:'[]'"`
|
Context JSONMap `gorm:"column:context;type:jsonb;not null;default:'{}'"`
|
||||||
ToUUIDs []uuid.UUID `gorm:"column:TO_UUIDS;type:jsonb;not null;default:'[]'"`
|
FromUUIDs []uuid.UUID `gorm:"column:from_uuids;type:jsonb;not null;default:'[]'"`
|
||||||
DevProtect []interface{} `gorm:"column:DEV_PROTECT;type:jsonb;not null;default:'[]'"`
|
ToUUIDs []uuid.UUID `gorm:"column:to_uuids;type:jsonb;not null;default:'[]'"`
|
||||||
DevFaultRecord []interface{} `gorm:"column:DEV_FAULT_RECORD;type:jsonb;not null;default:'[]'"`
|
DevProtect JSONMap `gorm:"column:dev_protect;type:jsonb;not null;default:'[]'"`
|
||||||
DevStatus []interface{} `gorm:"column:DEV_STATUS;type:jsonb;not null;default:'[]'"`
|
DevFaultRecord JSONMap `gorm:"column:dev_fault_record;type:jsonb;not null;default:'[]'"`
|
||||||
DevDynSense []interface{} `gorm:"column:DEV_DYN_SENSE;type:jsonb;not null;default:'[]'"`
|
DevStatus JSONMap `gorm:"column:dev_status;type:jsonb;not null;default:'[]'"`
|
||||||
DevInstruct []interface{} `gorm:"column:DEV_INSTRUCT;type:jsonb;not null;default:'[]'"`
|
DevDynSense JSONMap `gorm:"column:dev_dyn_sense;type:jsonb;not null;default:'[]'"`
|
||||||
DevEtc []interface{} `gorm:"column:DEV_ETC;type:jsonb;not null;default:'[]'"`
|
DevInstruct JSONMap `gorm:"column:dev_instruct;type:jsonb;not null;default:'[]'"`
|
||||||
Components []uuid.UUID `gorm:"column:COMPONENTS;type:uuid[];not null;default:'{}'"`
|
DevEtc JSONMap `gorm:"column:dev_etc;type:jsonb;not null;default:'[]'"`
|
||||||
Op int `gorm:"column:OP;not null;default:-1"`
|
Components []uuid.UUID `gorm:"column:components;type:uuid[];not null;default:'{}'"`
|
||||||
Ts time.Time `gorm:"column:TS;type:timestamptz;not null;default:CURRENT_TIMESTAMP"`
|
Op int `gorm:"column:op;not null;default:-1"`
|
||||||
|
Ts time.Time `gorm:"column:ts;type:timestamptz;not null;default:CURRENT_TIMESTAMP"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableName func respresent return table name of Bay
|
// TableName func respresent return table name of Bay
|
||||||
func (Bay) TableName() string {
|
func (b *Bay) TableName() string {
|
||||||
return "BAY"
|
return "bay"
|
||||||
}
|
}
|
||||||
|
|
||||||
// CREATE TABLE PUBLIC.BAY (
|
|
||||||
// BAY_UUID UUID PRIMARY KEY DEFAULT GEN_RANDOM_UUID(),
|
|
||||||
// NAME VARCHAR(64) NOT NULL DEFAULT '',
|
|
||||||
// TYPE VARCHAR(64) NOT NULL DEFAULT '',
|
|
||||||
// UNOM DOUBLE PRECISION NOT NULL DEFAULT -1,
|
|
||||||
// FLA DOUBLE PRECISION NOT NULL DEFAULT -1,
|
|
||||||
// CAPACITY DOUBLE PRECISION NOT NULL DEFAULT -1,
|
|
||||||
// DESCRIPTION VARCHAR(512) NOT NULL DEFAULT '',
|
|
||||||
// IN_SERVICE BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
// STATE INTEGER NOT NULL DEFAULT -1,
|
|
||||||
// GRID VARCHAR(64) NOT NULL DEFAULT '',
|
|
||||||
// ZONE VARCHAR(64) NOT NULL DEFAULT '',
|
|
||||||
// STATION VARCHAR(64) NOT NULL DEFAULT '',
|
|
||||||
// BUSINESS JSONB NOT NULL DEFAULT '{}', -- for Server
|
|
||||||
// CONTEXT JSONB NOT NULL DEFAULT '{}', -- for UI
|
|
||||||
// FROM_UUIDS JSONB NOT NULL DEFAULT '[]', -- uuids
|
|
||||||
// TO_UUIDS JSONB NOT NULL DEFAULT '[]', -- uuids
|
|
||||||
// DEV_PROTECT JSONB NOT NULL DEFAULT '[]', -- devices
|
|
||||||
// DEV_FAULT_RECORD JSONB NOT NULL DEFAULT '[]', -- devices
|
|
||||||
// DEV_STATUS JSONB NOT NULL DEFAULT '[]', -- devices
|
|
||||||
// DEV_DYN_SENSE JSONB NOT NULL DEFAULT '[]', -- devices
|
|
||||||
// DEV_INSTRUCT JSONB NOT NULL DEFAULT '[]', -- devices
|
|
||||||
// DEV_ETC JSONB NOT NULL DEFAULT '[]', -- devices
|
|
||||||
// COMPONENTS UUID[] NOT NULL DEFAULT '{}',
|
|
||||||
// OP INTEGER NOT NULL DEFAULT -1,
|
|
||||||
// TS TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
||||||
// );
|
|
||||||
|
|
|
||||||
|
|
@ -9,27 +9,37 @@ import (
|
||||||
|
|
||||||
// Component structure define abstracted info set of electrical component
|
// Component structure define abstracted info set of electrical component
|
||||||
type Component struct {
|
type Component struct {
|
||||||
GlobalUUID uuid.UUID `gorm:"column:GLOBAL_UUID;primaryKey"`
|
GlobalUUID uuid.UUID `gorm:"column:global_uuid;primaryKey"`
|
||||||
NsPath string `gorm:"column:NSPATH"`
|
NsPath string `gorm:"column:nspath"`
|
||||||
Tag string `gorm:"column:TAG"`
|
Tag string `gorm:"column:tag"`
|
||||||
Name string `gorm:"column:NAME"`
|
Name string `gorm:"column:name"`
|
||||||
ModelName string `gorm:"column:MODEL_NAME"`
|
ModelName string `gorm:"column:model_name"`
|
||||||
Description string `gorm:"column:DESCRIPTION"`
|
Description string `gorm:"column:description"`
|
||||||
GridID string `gorm:"column:GRID"`
|
GridTag string `gorm:"column:grid"`
|
||||||
ZoneID string `gorm:"column:ZONE"`
|
ZoneTag string `gorm:"column:zone"`
|
||||||
StationID string `gorm:"column:STATION"`
|
StationTag string `gorm:"column:station"`
|
||||||
Type int `gorm:"column:TYPE"`
|
Type int `gorm:"column:type"`
|
||||||
InService bool `gorm:"column:IN_SERVICE"`
|
InService bool `gorm:"column:in_service"`
|
||||||
State int `gorm:"column:STATE"`
|
State int `gorm:"column:state"`
|
||||||
Status int `gorm:"column:STATUS"`
|
Status int `gorm:"column:status"`
|
||||||
Connection map[string]interface{} `gorm:"column:CONNECTION;type:jsonb;default:'{}'"`
|
Connection JSONMap `gorm:"column:connection;type:jsonb;default:'{}'"`
|
||||||
Label map[string]interface{} `gorm:"column:LABEL;type:jsonb;default:'{}'"`
|
Label JSONMap `gorm:"column:label;type:jsonb;default:'{}'"`
|
||||||
Context string `gorm:"column:CONTEXT"`
|
Context JSONMap `gorm:"column:context;type:jsonb;default:'{}'"`
|
||||||
Op int `gorm:"column:OP"`
|
Op int `gorm:"column:op"`
|
||||||
Ts time.Time `gorm:"column:TS"`
|
Ts time.Time `gorm:"column:ts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableName func respresent return table name of Component
|
// TableName func respresent return table name of Component
|
||||||
func (c *Component) TableName() string {
|
func (c *Component) TableName() string {
|
||||||
return "COMPONENT"
|
return "component"
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTagName define func to inplement CircuitDiagramNodeInterface interface
|
||||||
|
func (c Component) GetTagName() string {
|
||||||
|
return c.Tag
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNSPath define func to inplement CircuitDiagramNodeInterface interface
|
||||||
|
func (c Component) GetNSPath() string {
|
||||||
|
return c.NsPath
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -7,15 +7,25 @@ import (
|
||||||
|
|
||||||
// Grid structure define abstracted info set of electrical grid
|
// Grid structure define abstracted info set of electrical grid
|
||||||
type Grid struct {
|
type Grid struct {
|
||||||
ID int64 `gorm:"column:ID;primaryKey"`
|
ID int64 `gorm:"column:id;primaryKey"`
|
||||||
TAGNAME string `gorm:"column:TAGNAME"`
|
TAGNAME string `gorm:"column:tagname"`
|
||||||
Name string `gorm:"column:NAME"`
|
Name string `gorm:"column:name"`
|
||||||
Description string `gorm:"column:DESCRIPTION"`
|
Description string `gorm:"column:description"`
|
||||||
Op int `gorm:"column:OP"`
|
Op int `gorm:"column:op"`
|
||||||
Ts time.Time `gorm:"column:TS"`
|
Ts time.Time `gorm:"column:ts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableName func respresent return table name of Grid
|
// TableName func respresent return table name of Grid
|
||||||
func (g *Grid) TableName() string {
|
func (g *Grid) TableName() string {
|
||||||
return "GRID"
|
return "grid"
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTagName define func to inplement CircuitDiagramNodeInterface interface
|
||||||
|
func (g Grid) GetTagName() string {
|
||||||
|
return g.TAGNAME
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNSPath define func to inplement CircuitDiagramNodeInterface interface
|
||||||
|
func (g Grid) GetNSPath() string {
|
||||||
|
return ""
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,20 +9,20 @@ import (
|
||||||
|
|
||||||
// Measurement structure define abstracted info set of electrical measurement
|
// Measurement structure define abstracted info set of electrical measurement
|
||||||
type Measurement struct {
|
type Measurement struct {
|
||||||
ID int64 `gorm:"column:ID;primaryKey;autoIncrement"`
|
ID int64 `gorm:"column:id;primaryKey;autoIncrement"`
|
||||||
Tag string `gorm:"column:TAG;size:64;not null;default:''"`
|
Tag string `gorm:"column:tag;size:64;not null;default:''"`
|
||||||
Name string `gorm:"column:NAME;size:64;not null;default:''"`
|
Name string `gorm:"column:name;size:64;not null;default:''"`
|
||||||
Type int16 `gorm:"column:TYPE;not null;default:-1"`
|
Type int16 `gorm:"column:type;not null;default:-1"`
|
||||||
Size int `gorm:"column:SIZE;not null;default:-1"`
|
Size int `gorm:"column:size;not null;default:-1"`
|
||||||
DataSource map[string]interface{} `gorm:"column:DATA_SOURCE;type:jsonb;not null;default:'{}'"`
|
DataSource JSONMap `gorm:"column:data_source;type:jsonb;not null;default:'{}'"`
|
||||||
EventPlan map[string]interface{} `gorm:"column:EVENT_PLAN;type:jsonb;not null;default:'{}'"`
|
EventPlan JSONMap `gorm:"column:event_plan;type:jsonb;not null;default:'{}'"`
|
||||||
BayUUID uuid.UUID `gorm:"column:BAY_UUID;type:uuid;not null"`
|
BayUUID uuid.UUID `gorm:"column:bay_uuid;type:uuid;not null"`
|
||||||
ComponentUUID uuid.UUID `gorm:"column:COMPONENT_UUID;type:uuid;not null"`
|
ComponentUUID uuid.UUID `gorm:"column:component_uuid;type:uuid;not null"`
|
||||||
Op int `gorm:"column:OP;not null;default:-1"`
|
Op int `gorm:"column:op;not null;default:-1"`
|
||||||
Ts time.Time `gorm:"column:TS;type:timestamptz;not null;default:CURRENT_TIMESTAMP"`
|
Ts time.Time `gorm:"column:ts;type:timestamptz;not null;default:CURRENT_TIMESTAMP"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableName func respresent return table name of Measurement
|
// TableName func respresent return table name of Measurement
|
||||||
func (Measurement) TableName() string {
|
func (Measurement) TableName() string {
|
||||||
return "MEASUREMENT"
|
return "measurement"
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,8 @@
|
||||||
|
// Package orm define database data struct
|
||||||
|
package orm
|
||||||
|
|
||||||
|
// CircuitDiagramNodeInterface define general node type interface
|
||||||
|
type CircuitDiagramNodeInterface interface {
|
||||||
|
GetTagName() string
|
||||||
|
GetNSPath() string
|
||||||
|
}
|
||||||
|
|
@ -5,17 +5,17 @@ import "time"
|
||||||
|
|
||||||
// Page structure define circuit diagram page info set
|
// Page structure define circuit diagram page info set
|
||||||
type Page struct {
|
type Page struct {
|
||||||
ID int64 `gorm:"column:ID;primaryKey"`
|
ID int64 `gorm:"column:id;primaryKey"`
|
||||||
Tag string `gorm:"column:TAG"`
|
Tag string `gorm:"column:tag"`
|
||||||
Name string `gorm:"column:NAME"`
|
Name string `gorm:"column:name"`
|
||||||
Label map[string]interface{} `gorm:"column:LABEL;type:jsonb;default:'{}'"`
|
Label JSONMap `gorm:"column:label;type:jsonb;default:'{}'"`
|
||||||
Context map[string]interface{} `gorm:"column:CONTEXT;type:jsonb;default:'{}'"`
|
Context JSONMap `gorm:"column:context;type:jsonb;default:'{}'"`
|
||||||
Description string `gorm:"column:DESCRIPTION"`
|
Description string `gorm:"column:description"`
|
||||||
Op int `gorm:"column:OP"`
|
Op int `gorm:"column:op"`
|
||||||
Ts time.Time `gorm:"column:TS"`
|
Ts time.Time `gorm:"column:ts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableName func respresent return table name of Page
|
// TableName func respresent return table name of Page
|
||||||
func (p *Page) TableName() string {
|
func (p *Page) TableName() string {
|
||||||
return "PAGE"
|
return "page"
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -7,17 +7,27 @@ import (
|
||||||
|
|
||||||
// Station structure define abstracted info set of electrical Station
|
// Station structure define abstracted info set of electrical Station
|
||||||
type Station struct {
|
type Station struct {
|
||||||
ID int64 `gorm:"column:ID;primaryKey"`
|
ID int64 `gorm:"column:id;primaryKey"`
|
||||||
ZoneID int64 `gorm:"column:ZONE_ID"`
|
ZoneID int64 `gorm:"column:zone_id"`
|
||||||
TAGNAME string `gorm:"column:TAGNAME"`
|
TAGNAME string `gorm:"column:tagname"`
|
||||||
Name string `gorm:"column:NAME"`
|
Name string `gorm:"column:name"`
|
||||||
Description string `gorm:"column:DESCRIPTION"`
|
Description string `gorm:"column:description"`
|
||||||
IsLocal bool `gorm:"column:IS_LOCAL"`
|
IsLocal bool `gorm:"column:is_local"`
|
||||||
Op int `gorm:"column:OP"`
|
Op int `gorm:"column:op"`
|
||||||
Ts time.Time `gorm:"column:TS"`
|
Ts time.Time `gorm:"column:ts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableName func respresent return table name of Station
|
// TableName func respresent return table name of Station
|
||||||
func (s *Station) TableName() string {
|
func (s *Station) TableName() string {
|
||||||
return "STATION"
|
return "station"
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTagName define func to inplement CircuitDiagramNodeInterface interface
|
||||||
|
func (s Station) GetTagName() string {
|
||||||
|
return s.TAGNAME
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNSPath define func to inplement CircuitDiagramNodeInterface interface
|
||||||
|
func (s Station) GetNSPath() string {
|
||||||
|
return ""
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,18 +1,25 @@
|
||||||
// Package orm define database data struct
|
// Package orm define database data struct
|
||||||
package orm
|
package orm
|
||||||
|
|
||||||
import "github.com/gofrs/uuid"
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gofrs/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
// Topologic structure define topologic info set of circuit diagram
|
// Topologic structure define topologic info set of circuit diagram
|
||||||
type Topologic struct {
|
type Topologic struct {
|
||||||
ID int64 `gorm:"column:id"`
|
ID int64 `gorm:"column:id"`
|
||||||
Flag int `gorm:"column:flag"`
|
UUIDFrom uuid.UUID `gorm:"column:uuid_from"`
|
||||||
UUIDFrom uuid.UUID `gorm:"column:uuid_from"`
|
UUIDTo uuid.UUID `gorm:"column:uuid_to"`
|
||||||
UUIDTo uuid.UUID `gorm:"column:uuid_to"`
|
Context JSONMap `gorm:"column:context;type:jsonb;default:'{}'"`
|
||||||
Comment string `gorm:"column:comment"`
|
Flag int `gorm:"column:flag"`
|
||||||
|
Description string `gorm:"column:description;size:512;not null;default:''"`
|
||||||
|
Op int `gorm:"column:op;not null;default:-1"`
|
||||||
|
Ts time.Time `gorm:"column:ts;type:timestamptz;not null;default:CURRENT_TIMESTAMP"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableName func respresent return table name of Page
|
// TableName func respresent return table name of Page
|
||||||
func (t *Topologic) TableName() string {
|
func (t *Topologic) TableName() string {
|
||||||
return "Topologic"
|
return "topologic"
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -7,16 +7,26 @@ import (
|
||||||
|
|
||||||
// Zone structure define abstracted info set of electrical zone
|
// Zone structure define abstracted info set of electrical zone
|
||||||
type Zone struct {
|
type Zone struct {
|
||||||
ID int64 `gorm:"column:ID;primaryKey"`
|
ID int64 `gorm:"column:id;primaryKey"`
|
||||||
GridID int64 `gorm:"column:GRID_ID"`
|
GridID int64 `gorm:"column:grid_id"`
|
||||||
TAGNAME string `gorm:"column:TAGNAME"`
|
TAGNAME string `gorm:"column:tagname"`
|
||||||
Name string `gorm:"column:NAME"`
|
Name string `gorm:"column:name"`
|
||||||
Description string `gorm:"column:DESCRIPTION"`
|
Description string `gorm:"column:description"`
|
||||||
Op int `gorm:"column:OP"`
|
Op int `gorm:"column:op"`
|
||||||
Ts time.Time `gorm:"column:TS"`
|
Ts time.Time `gorm:"column:ts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableName func respresent return table name of Zone
|
// TableName func respresent return table name of Zone
|
||||||
func (z *Zone) TableName() string {
|
func (z *Zone) TableName() string {
|
||||||
return "ZONE"
|
return "zone"
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTagName define func to inplement CircuitDiagramNodeInterface interface
|
||||||
|
func (z Zone) GetTagName() string {
|
||||||
|
return z.TAGNAME
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNSPath define func to inplement CircuitDiagramNodeInterface interface
|
||||||
|
func (z Zone) GetNSPath() string {
|
||||||
|
return ""
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,38 @@
|
||||||
|
// Package orm define database data struct
|
||||||
|
package orm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JSONMap define struct of implements the sql.Scanner and driver.Valuer interfaces for handling JSONB fields
|
||||||
|
type JSONMap map[string]any
|
||||||
|
|
||||||
|
// Value define func to convert the JSONMap to driver.Value([]byte) for writing to the database
|
||||||
|
func (j JSONMap) Value() (driver.Value, error) {
|
||||||
|
if j == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return json.Marshal(j)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan define to scanned values([]bytes) in the database and parsed into a JSONMap for data retrieval
|
||||||
|
func (j *JSONMap) Scan(value any) error {
|
||||||
|
if value == nil {
|
||||||
|
*j = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var source []byte
|
||||||
|
switch v := value.(type) {
|
||||||
|
case []byte:
|
||||||
|
source = v
|
||||||
|
case string:
|
||||||
|
source = []byte(v)
|
||||||
|
default:
|
||||||
|
return errors.New("unsupported data type for JSONMap Scan")
|
||||||
|
}
|
||||||
|
return json.Unmarshal(source, j)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,342 @@
|
||||||
|
// Package realtimedata define real time data operation functions
|
||||||
|
package realtimedata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"modelRT/constants"
|
||||||
|
"modelRT/logger"
|
||||||
|
"modelRT/real-time-data/event"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RealTimeAnalyzer define interface general methods for real-time data analysis and event triggering
|
||||||
|
type RealTimeAnalyzer interface {
|
||||||
|
AnalyzeAndTriggerEvent(ctx context.Context, conf *ComputeConfig, realTimeValues []float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// teEventThresholds define struct of store the telemetry float point threshold parsed from conf field cause
|
||||||
|
type teEventThresholds struct {
|
||||||
|
up float64
|
||||||
|
upup float64
|
||||||
|
down float64
|
||||||
|
downdown float64
|
||||||
|
isFloatCause bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTEThresholds define func to parse telemetry thresholds by casue map
|
||||||
|
func parseTEThresholds(cause map[string]any) (teEventThresholds, error) {
|
||||||
|
t := teEventThresholds{}
|
||||||
|
floatKeys := map[string]*float64{
|
||||||
|
"upup": &t.upup,
|
||||||
|
"up": &t.up,
|
||||||
|
"down": &t.down,
|
||||||
|
"downdown": &t.downdown,
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, ptr := range floatKeys {
|
||||||
|
if value, exists := cause[key]; exists {
|
||||||
|
if floatVal, ok := value.(float64); ok {
|
||||||
|
*ptr = floatVal
|
||||||
|
t.isFloatCause = true
|
||||||
|
} else {
|
||||||
|
return teEventThresholds{}, fmt.Errorf("key:%s type is incorrect. expected float64, actual %T", key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// quickly check mutual exclusion
|
||||||
|
if _, exists := cause["edge"]; exists && t.isFloatCause {
|
||||||
|
return teEventThresholds{}, errors.New("cause config error: 'up/down' keys and 'edge' key are mutually exclusive, but both found")
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getTEBreachType define func to determine which type of out-of-limit the telemetry real time data belongs to
|
||||||
|
func getTEBreachType(value float64, t teEventThresholds) string {
|
||||||
|
if t.upup > 0 && value > t.upup {
|
||||||
|
return constants.TelemetryUpUpLimit
|
||||||
|
}
|
||||||
|
if t.up > 0 && value > t.up {
|
||||||
|
return constants.TelemetryUpLimit
|
||||||
|
}
|
||||||
|
if t.downdown > 0 && value < t.downdown {
|
||||||
|
return constants.TelemetryDownDownLimit
|
||||||
|
}
|
||||||
|
if t.down > 0 && value < t.down {
|
||||||
|
return constants.TelemetryDownLimit
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// TEAnalyzer define struct of store the thresholds required for telemetry and implements the analysis logic.
|
||||||
|
type TEAnalyzer struct {
|
||||||
|
Thresholds teEventThresholds
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnalyzeAndTriggerEvent define func to implemented the RealTimeAnalyzer interface
|
||||||
|
func (t *TEAnalyzer) AnalyzeAndTriggerEvent(ctx context.Context, conf *ComputeConfig, realTimeValues []float64) {
|
||||||
|
analyzeTEDataLogic(ctx, conf, t.Thresholds, realTimeValues)
|
||||||
|
}
|
||||||
|
|
||||||
|
// analyzeTEDataLogic define func to processing telemetry data and event triggering
|
||||||
|
func analyzeTEDataLogic(ctx context.Context, conf *ComputeConfig, thresholds teEventThresholds, realTimeValues []float64) {
|
||||||
|
windowSize := conf.minBreachCount
|
||||||
|
if windowSize <= 0 {
|
||||||
|
logger.Error(ctx, "variable minBreachCount is invalid or zero, analysis skipped", "minBreachCount", windowSize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// mark whether any events have been triggered in this batch
|
||||||
|
var eventTriggered bool
|
||||||
|
breachTriggers := map[string]bool{
|
||||||
|
"up": false, "upup": false, "down": false, "downdown": false,
|
||||||
|
}
|
||||||
|
|
||||||
|
// implement slide window to determine breach counts
|
||||||
|
for i := 0; i <= len(realTimeValues)-windowSize; i++ {
|
||||||
|
window := realTimeValues[i : i+windowSize]
|
||||||
|
firstValueBreachType := getTEBreachType(window[0], thresholds)
|
||||||
|
|
||||||
|
if firstValueBreachType == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
allMatch := true
|
||||||
|
for j := 1; j < windowSize; j++ {
|
||||||
|
currentValueBreachType := getTEBreachType(window[j], thresholds)
|
||||||
|
if currentValueBreachType != firstValueBreachType {
|
||||||
|
allMatch = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if allMatch {
|
||||||
|
// in the case of a continuous sequence of out-of-limit events, check whether this type of event has already been triggered in the current batch of data
|
||||||
|
if !breachTriggers[firstValueBreachType] {
|
||||||
|
// trigger event
|
||||||
|
logger.Warn(ctx, "event triggered by sliding window", "breach_type", firstValueBreachType, "value", window[windowSize-1])
|
||||||
|
|
||||||
|
breachTriggers[firstValueBreachType] = true
|
||||||
|
eventTriggered = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if eventTriggered {
|
||||||
|
command, content := genTEEventCommandAndContent(ctx, conf.Action)
|
||||||
|
// TODO 考虑 content 是否可以为空,先期不允许
|
||||||
|
if command == "" || content == "" {
|
||||||
|
logger.Error(ctx, "generate telemetry evnet command or content failed", "action", conf.Action, "command", command, "content", content)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
event.TriggerEventAction(ctx, command, content)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func genTEEventCommandAndContent(ctx context.Context, action map[string]any) (command string, content string) {
|
||||||
|
cmdValue, exist := action["command"]
|
||||||
|
if !exist {
|
||||||
|
logger.Error(ctx, "can not find command variable into action map", "action", action)
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
|
||||||
|
commandStr, ok := cmdValue.(string)
|
||||||
|
if !ok {
|
||||||
|
logger.Error(ctx, "convert command to string type failed", "command", cmdValue, "type", fmt.Sprintf("%T", cmdValue))
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
command = commandStr
|
||||||
|
|
||||||
|
paramsValue, exist := action["parameters"]
|
||||||
|
if !exist {
|
||||||
|
logger.Error(ctx, "can not find parameters variable into action map", "action", action)
|
||||||
|
return command, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
parameterSlice, ok := paramsValue.([]any)
|
||||||
|
if !ok {
|
||||||
|
logger.Error(ctx, "convert parameters to []any type failed", "parameters", paramsValue, "type", fmt.Sprintf("%T", paramsValue))
|
||||||
|
return command, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var builder strings.Builder
|
||||||
|
for i, parameter := range parameterSlice {
|
||||||
|
if i > 0 {
|
||||||
|
builder.WriteString(",")
|
||||||
|
}
|
||||||
|
parameterStr, ok := parameter.(string)
|
||||||
|
if !ok {
|
||||||
|
logger.Warn(ctx, "parameter type is incorrect, skip this parameter", "parameter", parameter, "type", fmt.Sprintf("%T", parameter))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
builder.WriteString(parameterStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return command, builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// tiEventThresholds define struct of store the telesignal float point threshold parsed from conf field cause
|
||||||
|
type tiEventThresholds struct {
|
||||||
|
edge string
|
||||||
|
isFloatCause bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTEThresholds define func to parse telesignal thresholds by casue map
|
||||||
|
func parseTIThresholds(cause map[string]any) (tiEventThresholds, error) {
|
||||||
|
edgeKey := "edge"
|
||||||
|
t := tiEventThresholds{
|
||||||
|
isFloatCause: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, exists := cause[edgeKey]; exists {
|
||||||
|
if strVal, ok := value.(string); ok {
|
||||||
|
switch strVal {
|
||||||
|
case "raising", "falling":
|
||||||
|
t.edge = strVal
|
||||||
|
return t, nil
|
||||||
|
default:
|
||||||
|
return tiEventThresholds{}, fmt.Errorf("key:%s value is incorrect, actual value %s. expected 'raising' or 'falling'", edgeKey, strVal)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return tiEventThresholds{}, fmt.Errorf("key:%s already exists but type is incorrect. expected string, actual %T", edgeKey, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tiEventThresholds{}, fmt.Errorf("cause map is invalid for telesignal: missing required key '%s'", edgeKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getTIBreachType define func to determine which type of out-of-limit the telesignal real time data belongs to
|
||||||
|
func getTIBreachType(currentValue float64, previousValue float64, t tiEventThresholds) string {
|
||||||
|
if t.edge == constants.TelesignalRaising {
|
||||||
|
if previousValue == 0.0 && currentValue == 1.0 {
|
||||||
|
return constants.TIBreachTriggerType
|
||||||
|
}
|
||||||
|
} else if t.edge == constants.TelesignalFalling {
|
||||||
|
if previousValue == 1.0 && currentValue == 0.0 {
|
||||||
|
return constants.TIBreachTriggerType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// TIAnalyzer define struct of store the thresholds required for remote signaling and implements the analysis logic
|
||||||
|
type TIAnalyzer struct {
|
||||||
|
Thresholds tiEventThresholds
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnalyzeAndTriggerEvent define func to implemented the RealTimeAnalyzer interface
|
||||||
|
func (t *TIAnalyzer) AnalyzeAndTriggerEvent(ctx context.Context, conf *ComputeConfig, realTimeValues []float64) {
|
||||||
|
analyzeTIDataLogic(ctx, conf, t.Thresholds, realTimeValues)
|
||||||
|
}
|
||||||
|
|
||||||
|
// analyzeTIDataLogic define func to processing telesignal data and event triggering
|
||||||
|
func analyzeTIDataLogic(ctx context.Context, conf *ComputeConfig, thresholds tiEventThresholds, realTimeValues []float64) {
|
||||||
|
windowSize := conf.minBreachCount
|
||||||
|
if windowSize <= 0 {
|
||||||
|
logger.Error(ctx, "variable minBreachCount is invalid or zero, analysis skipped", "minBreachCount", windowSize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
numDataPoints := len(realTimeValues)
|
||||||
|
if numDataPoints < 2 {
|
||||||
|
logger.Info(ctx, "data points less than 2, no change event possible, analysis skipped", "data_points", numDataPoints)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// pre calculate the change event type for all adjacent point pairs
|
||||||
|
numChanges := numDataPoints - 1
|
||||||
|
changeBreachTypes := make([]string, numChanges)
|
||||||
|
|
||||||
|
for i := range numChanges {
|
||||||
|
previousValue := realTimeValues[i]
|
||||||
|
currentValue := realTimeValues[i+1]
|
||||||
|
|
||||||
|
changeBreachTypes[i] = getTIBreachType(currentValue, previousValue, thresholds)
|
||||||
|
}
|
||||||
|
|
||||||
|
if numChanges < windowSize {
|
||||||
|
logger.Error(ctx, "number of change events is less than window size, analysis skipped", "num_changes", numChanges, "window_size", windowSize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var eventTriggered bool
|
||||||
|
breachTriggers := map[string]bool{
|
||||||
|
constants.TIBreachTriggerType: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i <= numChanges-windowSize; i++ {
|
||||||
|
windowBreachTypes := changeBreachTypes[i : i+windowSize]
|
||||||
|
firstBreachType := windowBreachTypes[0]
|
||||||
|
|
||||||
|
if firstBreachType == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
allMatch := true
|
||||||
|
for j := 1; j < windowSize; j++ {
|
||||||
|
if windowBreachTypes[j] != firstBreachType {
|
||||||
|
allMatch = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if allMatch {
|
||||||
|
if !breachTriggers[firstBreachType] {
|
||||||
|
finalValueIndex := i + windowSize
|
||||||
|
logger.Warn(ctx, "event triggered by sliding window", "breach_type", firstBreachType, "value", realTimeValues[finalValueIndex])
|
||||||
|
|
||||||
|
breachTriggers[firstBreachType] = true
|
||||||
|
eventTriggered = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if eventTriggered {
|
||||||
|
command, content := genTIEventCommandAndContent(conf.Action)
|
||||||
|
// TODO 考虑 content 是否可以为空,先期不允许
|
||||||
|
if command == "" || content == "" {
|
||||||
|
logger.Error(ctx, "generate telemetry evnet command or content failed", "action", conf.Action, "command", command, "content", content)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
event.TriggerEventAction(ctx, command, content)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func genTIEventCommandAndContent(action map[string]any) (command string, content string) {
|
||||||
|
cmdValue, exist := action["command"]
|
||||||
|
if !exist {
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
|
||||||
|
commandStr, ok := cmdValue.(string)
|
||||||
|
if !ok {
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
command = commandStr
|
||||||
|
|
||||||
|
paramsValue, exist := action["parametes"]
|
||||||
|
if !exist {
|
||||||
|
return command, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
parameterSlice, ok := paramsValue.([]string)
|
||||||
|
if !ok {
|
||||||
|
return command, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var builder strings.Builder
|
||||||
|
for i, parameter := range parameterSlice {
|
||||||
|
if i > 0 {
|
||||||
|
builder.WriteString(",")
|
||||||
|
}
|
||||||
|
builder.WriteString(parameter)
|
||||||
|
}
|
||||||
|
|
||||||
|
return command, builder.String()
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,72 @@
|
||||||
|
// Package realtimedata define real time data operation functions
|
||||||
|
package realtimedata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ComputeConfig define struct of measurement computation
|
||||||
|
type ComputeConfig struct {
|
||||||
|
Cause map[string]any
|
||||||
|
Action map[string]any
|
||||||
|
// TODO 预留自由调整的入口
|
||||||
|
// min consecutive breach count
|
||||||
|
minBreachCount int
|
||||||
|
Duration int
|
||||||
|
DataSize int64
|
||||||
|
QueryKey string
|
||||||
|
StopGchan chan struct{}
|
||||||
|
Analyzer RealTimeAnalyzer
|
||||||
|
}
|
||||||
|
|
||||||
|
// MeasComputeState define struct of manages the state of measurement computations using sync.Map
|
||||||
|
type MeasComputeState struct {
|
||||||
|
measMap sync.Map
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMeasComputeState define func to create and returns a new instance of MeasComputeState
|
||||||
|
func NewMeasComputeState() *MeasComputeState {
|
||||||
|
return &MeasComputeState{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store define func to store a compute configuration for the specified key
|
||||||
|
func (m *MeasComputeState) Store(key string, config *ComputeConfig) {
|
||||||
|
m.measMap.Store(key, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load define func to retrieve the compute configuration for the specified key
|
||||||
|
func (m *MeasComputeState) Load(key string) (*ComputeConfig, bool) {
|
||||||
|
value, ok := m.measMap.Load(key)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return value.(*ComputeConfig), true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete define func to remove the compute configuration for the specified key
|
||||||
|
func (m *MeasComputeState) Delete(key string) {
|
||||||
|
m.measMap.Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadOrStore define func to returns the existing compute configuration for the key if present,otherwise stores and returns the given configuration
|
||||||
|
func (m *MeasComputeState) LoadOrStore(key string, config *ComputeConfig) (*ComputeConfig, bool) {
|
||||||
|
value, loaded := m.measMap.LoadOrStore(key, config)
|
||||||
|
return value.(*ComputeConfig), loaded
|
||||||
|
}
|
||||||
|
|
||||||
|
// Range define func to iterate over all key-configuration pairs in the map
|
||||||
|
func (m *MeasComputeState) Range(f func(key string, config *ComputeConfig) bool) {
|
||||||
|
m.measMap.Range(func(key, value any) bool {
|
||||||
|
return f(key.(string), value.(*ComputeConfig))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len define func to return the number of compute configurations in the map
|
||||||
|
func (m *MeasComputeState) Len() int {
|
||||||
|
count := 0
|
||||||
|
m.measMap.Range(func(_, _ any) bool {
|
||||||
|
count++
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,74 @@
|
||||||
|
// Package event define real time data evnet operation functions
|
||||||
|
package event
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"modelRT/logger"
|
||||||
|
)
|
||||||
|
|
||||||
|
type actionHandler func(ctx context.Context, content string) error
|
||||||
|
|
||||||
|
// actionDispatchMap define variable to store all action handler into map
|
||||||
|
var actionDispatchMap = map[string]actionHandler{
|
||||||
|
"info": handleInfoAction,
|
||||||
|
"warning": handleWarningAction,
|
||||||
|
"error": handleErrorAction,
|
||||||
|
"critical": handleCriticalAction,
|
||||||
|
"exception": handleExceptionAction,
|
||||||
|
}
|
||||||
|
|
||||||
|
// TriggerEventAction define func to trigger event by action in compute config
|
||||||
|
func TriggerEventAction(ctx context.Context, command string, content string) {
|
||||||
|
handler, exists := actionDispatchMap[command]
|
||||||
|
if !exists {
|
||||||
|
logger.Error(ctx, "unknown action command", "command", command)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err := handler(ctx, content)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "action handler failed", "command", command, "content", content, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
logger.Info(ctx, "action handler success", "command", command, "content", content)
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleInfoAction(ctx context.Context, content string) error {
|
||||||
|
// 实际执行发送警告、记录日志等操作
|
||||||
|
actionParams := content
|
||||||
|
// ... logic to send info level event using actionParams ...
|
||||||
|
logger.Warn(ctx, "trigger info event", "message", actionParams)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleWarningAction(ctx context.Context, content string) error {
|
||||||
|
// 实际执行发送警告、记录日志等操作
|
||||||
|
actionParams := content
|
||||||
|
// ... logic to send warning level event using actionParams ...
|
||||||
|
logger.Warn(ctx, "trigger warning event", "message", actionParams)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleErrorAction(ctx context.Context, content string) error {
|
||||||
|
// 实际执行发送警告、记录日志等操作
|
||||||
|
actionParams := content
|
||||||
|
// ... logic to send error level event using actionParams ...
|
||||||
|
logger.Warn(ctx, "trigger error event", "message", actionParams)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleCriticalAction(ctx context.Context, content string) error {
|
||||||
|
// 实际执行发送警告、记录日志等操作
|
||||||
|
actionParams := content
|
||||||
|
// ... logic to send critical level event using actionParams ...
|
||||||
|
logger.Warn(ctx, "trigger critical event", "message", actionParams)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleExceptionAction(ctx context.Context, content string) error {
|
||||||
|
// 实际执行发送警告、记录日志等操作
|
||||||
|
actionParams := content
|
||||||
|
// ... logic to send except level event using actionParams ...
|
||||||
|
logger.Warn(ctx, "trigger except event", "message", actionParams)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
@ -1,63 +0,0 @@
|
||||||
// Package realtimedata define real time data operation functions
|
|
||||||
package realtimedata
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"modelRT/logger"
|
|
||||||
|
|
||||||
"github.com/confluentinc/confluent-kafka-go/kafka"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RealTimeDataComputer continuously processing real-time data from Kafka specified topics
|
|
||||||
func RealTimeDataComputer(ctx context.Context, consumerConfig kafka.ConfigMap, topics []string, duration string) {
|
|
||||||
// context for graceful shutdown
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// setup a channel to listen for interrupt signals
|
|
||||||
// TODO 将中断信号放到入参中
|
|
||||||
interrupt := make(chan struct{}, 1)
|
|
||||||
|
|
||||||
// read message (-1 means wait indefinitely)
|
|
||||||
timeoutDuration, err := time.ParseDuration(duration)
|
|
||||||
|
|
||||||
// create a new consumer
|
|
||||||
consumer, err := kafka.NewConsumer(&consumerConfig)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(ctx, "init kafka consume by config failed", "config", consumerConfig, "error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// subscribe to the topic
|
|
||||||
err = consumer.SubscribeTopics(topics, nil)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(ctx, "subscribe to the topic failed", "topic", topics, "error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// start a goroutine to handle shutdown
|
|
||||||
go func() {
|
|
||||||
<-interrupt
|
|
||||||
cancel()
|
|
||||||
consumer.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// continuously read messages from Kafka
|
|
||||||
for {
|
|
||||||
msg, err := consumer.ReadMessage(timeoutDuration)
|
|
||||||
if err != nil {
|
|
||||||
if ctx.Err() == context.Canceled {
|
|
||||||
logger.Info(ctx, "context canceled, stopping read loop")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
logger.Error(ctx, "consumer read message failed", "error", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO 使用 ants.pool处理 kafka 的订阅数据
|
|
||||||
_, err = consumer.CommitMessage(msg)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(ctx, "manual submission information failed", "message", msg, "error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -0,0 +1,405 @@
|
||||||
|
// Package realtimedata define real time data operation functions
|
||||||
|
package realtimedata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"modelRT/config"
|
||||||
|
"modelRT/constants"
|
||||||
|
"modelRT/diagram"
|
||||||
|
"modelRT/logger"
|
||||||
|
"modelRT/model"
|
||||||
|
"modelRT/network"
|
||||||
|
"modelRT/orm"
|
||||||
|
"modelRT/pool"
|
||||||
|
"modelRT/util"
|
||||||
|
|
||||||
|
"github.com/confluentinc/confluent-kafka-go/kafka"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// RealTimeDataChan define channel of real time data receive
|
||||||
|
RealTimeDataChan chan network.RealTimeDataReceiveRequest
|
||||||
|
globalComputeState *MeasComputeState
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
RealTimeDataChan = make(chan network.RealTimeDataReceiveRequest, 100)
|
||||||
|
globalComputeState = NewMeasComputeState()
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartRealTimeDataComputing define func to start real time data process goroutines by measurement info
|
||||||
|
func StartRealTimeDataComputing(ctx context.Context, measurements []orm.Measurement) {
|
||||||
|
for _, measurement := range measurements {
|
||||||
|
enableValue, exist := measurement.EventPlan["enable"]
|
||||||
|
enable, ok := enableValue.(bool)
|
||||||
|
if !exist || !enable {
|
||||||
|
logger.Info(ctx, "measurement object do not need real time data computing", "measurement_uuid", measurement.ComponentUUID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
logger.Error(ctx, "covert enable variable to boolean type failed", "measurement_uuid", measurement.ComponentUUID, "enable", enableValue)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
conf, err := initComputeConfig(measurement)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "failed to initialize real time compute config", "measurement_uuid", measurement.ComponentUUID, "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if conf == nil {
|
||||||
|
logger.Info(ctx, "measurement object is disabled or does not require real time computing", "measurement_uuid", measurement.ComponentUUID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
uuidStr := measurement.ComponentUUID.String()
|
||||||
|
enrichedCtx := context.WithValue(ctx, constants.MeasurementUUIDKey, uuidStr)
|
||||||
|
conf.StopGchan = make(chan struct{})
|
||||||
|
globalComputeState.Store(uuidStr, conf)
|
||||||
|
logger.Info(ctx, "starting real time data computing for measurement", "measurement_uuid", measurement.ComponentUUID)
|
||||||
|
go continuousComputation(enrichedCtx, conf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func initComputeConfig(measurement orm.Measurement) (*ComputeConfig, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
enableValue, exist := measurement.EventPlan["enable"]
|
||||||
|
enable, ok := enableValue.(bool)
|
||||||
|
if !exist {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("field enable can not be converted to boolean, found type: %T", enableValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !enable {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
conf := &ComputeConfig{}
|
||||||
|
|
||||||
|
causeValue, exist := measurement.EventPlan["cause"]
|
||||||
|
if !exist {
|
||||||
|
return nil, errors.New("missing required field cause")
|
||||||
|
}
|
||||||
|
|
||||||
|
cause, ok := causeValue.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("field cause can not be converted to map[string]any, found type: %T", causeValue)
|
||||||
|
}
|
||||||
|
conf.Cause, err = processCauseMap(cause)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parse content of field cause failed:%w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
actionValue, exist := measurement.EventPlan["action"]
|
||||||
|
if !exist {
|
||||||
|
return nil, errors.New("missing required field action")
|
||||||
|
}
|
||||||
|
action, ok := actionValue.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("field action can not be converted to map[string]any, found type: %T", actionValue)
|
||||||
|
}
|
||||||
|
conf.Action = action
|
||||||
|
|
||||||
|
queryKey, err := model.GenerateMeasureIdentifier(measurement.DataSource)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("generate redis query key by datasource failed: %w", err)
|
||||||
|
}
|
||||||
|
conf.QueryKey = queryKey
|
||||||
|
conf.DataSize = int64(measurement.Size)
|
||||||
|
// TODO use constant values for temporary settings
|
||||||
|
conf.minBreachCount = constants.MinBreachCount
|
||||||
|
// TODO 后续优化 duration 创建方式
|
||||||
|
conf.Duration = 10
|
||||||
|
|
||||||
|
isFloatCause := false
|
||||||
|
if _, exists := conf.Cause["up"]; exists {
|
||||||
|
isFloatCause = true
|
||||||
|
} else if _, exists := conf.Cause["down"]; exists {
|
||||||
|
isFloatCause = true
|
||||||
|
} else if _, exists := conf.Cause["upup"]; exists {
|
||||||
|
isFloatCause = true
|
||||||
|
} else if _, exists := conf.Cause["downdown"]; exists {
|
||||||
|
isFloatCause = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if isFloatCause {
|
||||||
|
// te config
|
||||||
|
teThresholds, err := parseTEThresholds(conf.Cause)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse telemetry thresholds: %w", err)
|
||||||
|
}
|
||||||
|
conf.Analyzer = &TEAnalyzer{Thresholds: teThresholds}
|
||||||
|
} else {
|
||||||
|
// ti config
|
||||||
|
tiThresholds, err := parseTIThresholds(conf.Cause)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse telesignal thresholds: %w", err)
|
||||||
|
}
|
||||||
|
conf.Analyzer = &TIAnalyzer{Thresholds: tiThresholds}
|
||||||
|
}
|
||||||
|
|
||||||
|
return conf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func processCauseMap(data map[string]any) (map[string]any, error) {
|
||||||
|
causeResult := make(map[string]any)
|
||||||
|
keysToExtract := []string{"up", "down", "upup", "downdown"}
|
||||||
|
|
||||||
|
var foundFloatKey bool
|
||||||
|
for _, key := range keysToExtract {
|
||||||
|
if value, exists := data[key]; exists {
|
||||||
|
|
||||||
|
foundFloatKey = true
|
||||||
|
|
||||||
|
// check value type
|
||||||
|
if floatVal, ok := value.(float64); ok {
|
||||||
|
causeResult[key] = floatVal
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("key:%s already exists but type is incorrect.expected float64, actual %T", key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if foundFloatKey == true {
|
||||||
|
return causeResult, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
edgeKey := "edge"
|
||||||
|
if value, exists := data[edgeKey]; exists {
|
||||||
|
if stringVal, ok := value.(string); ok {
|
||||||
|
switch stringVal {
|
||||||
|
case "raising":
|
||||||
|
fallthrough
|
||||||
|
case "falling":
|
||||||
|
causeResult[edgeKey] = stringVal
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("key:%s value is incorrect,actual value %s", edgeKey, value)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("key:%s already exists but type is incorrect.expected string, actual %T", edgeKey, value)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("key:%s do not exists", edgeKey)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("cause map is invalid: missing required keys (%v) or '%s'", keysToExtract, edgeKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func continuousComputation(ctx context.Context, conf *ComputeConfig) {
|
||||||
|
client := diagram.NewRedisClient()
|
||||||
|
uuid, _ := ctx.Value(constants.MeasurementUUIDKey).(string)
|
||||||
|
duration := util.SecondsToDuration(conf.Duration)
|
||||||
|
ticker := time.NewTicker(duration)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-conf.StopGchan:
|
||||||
|
logger.Info(ctx, "continuous computing groutine stopped by local StopGchan", "uuid", uuid)
|
||||||
|
return
|
||||||
|
case <-ctx.Done():
|
||||||
|
logger.Info(ctx, "continuous computing goroutine stopped by parent context done signal")
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
members, err := client.QueryByZRangeByLex(ctx, conf.QueryKey, conf.DataSize)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "query real time data from redis failed", "key", conf.QueryKey, "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
realTimedatas := util.ConvertZSetMembersToFloat64(members)
|
||||||
|
if conf.Analyzer != nil {
|
||||||
|
conf.Analyzer.AnalyzeAndTriggerEvent(ctx, conf, realTimedatas)
|
||||||
|
} else {
|
||||||
|
logger.Error(ctx, "analyzer is not initialized for this measurement", "uuid", uuid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReceiveChan define func to real time data receive and process
|
||||||
|
func ReceiveChan(ctx context.Context, consumerConfig *kafka.ConfigMap, topics []string, duration float32) {
|
||||||
|
consumer, err := kafka.NewConsumer(consumerConfig)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "create kafka consumer failed", "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer consumer.Close()
|
||||||
|
|
||||||
|
err = consumer.SubscribeTopics(topics, nil)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "subscribe kafka topics failed", "topic", topics, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
batchSize := 100
|
||||||
|
batchTimeout := util.SecondsToDuration(duration)
|
||||||
|
messages := make([]*kafka.Message, 0, batchSize)
|
||||||
|
lastCommit := time.Now()
|
||||||
|
logger.Info(ctx, "start consuming from kafka", "topic", topics)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
logger.Info(ctx, "stop real time data computing by context cancel")
|
||||||
|
return
|
||||||
|
case realTimeData := <-RealTimeDataChan:
|
||||||
|
componentUUID := realTimeData.PayLoad.ComponentUUID
|
||||||
|
component, err := diagram.GetComponentMap(componentUUID)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "query component info from diagram map by componet id failed", "component_uuid", componentUUID, "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
componentType := component.Type
|
||||||
|
if componentType != constants.DemoType {
|
||||||
|
logger.Error(ctx, "can not process real time data of component type not equal DemoType", "component_uuid", componentUUID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var anchorName string
|
||||||
|
var compareValUpperLimit, compareValLowerLimit float64
|
||||||
|
var anchorRealTimeData []float64
|
||||||
|
var calculateFunc func(archorValue float64, args ...float64) float64
|
||||||
|
|
||||||
|
// calculateFunc, params := config.SelectAnchorCalculateFuncAndParams(componentType, anchorName, componentData)
|
||||||
|
|
||||||
|
for _, param := range realTimeData.PayLoad.Values {
|
||||||
|
anchorRealTimeData = append(anchorRealTimeData, param.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
anchorConfig := config.AnchorParamConfig{
|
||||||
|
AnchorParamBaseConfig: config.AnchorParamBaseConfig{
|
||||||
|
ComponentUUID: componentUUID,
|
||||||
|
AnchorName: anchorName,
|
||||||
|
CompareValUpperLimit: compareValUpperLimit,
|
||||||
|
CompareValLowerLimit: compareValLowerLimit,
|
||||||
|
AnchorRealTimeData: anchorRealTimeData,
|
||||||
|
},
|
||||||
|
CalculateFunc: calculateFunc,
|
||||||
|
CalculateParams: []float64{},
|
||||||
|
}
|
||||||
|
anchorChan, err := pool.GetAnchorParamChan(ctx, componentUUID)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "get anchor param chan failed", "component_uuid", componentUUID, "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
anchorChan <- anchorConfig
|
||||||
|
default:
|
||||||
|
msg, err := consumer.ReadMessage(batchTimeout)
|
||||||
|
if err != nil {
|
||||||
|
if err.(kafka.Error).Code() == kafka.ErrTimedOut {
|
||||||
|
// process accumulated messages when timeout
|
||||||
|
if len(messages) > 0 {
|
||||||
|
processMessageBatch(ctx, messages)
|
||||||
|
consumer.Commit()
|
||||||
|
messages = messages[:0]
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
logger.Error(ctx, "read message from kafka failed", "error", err, "msg", msg)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
messages = append(messages, msg)
|
||||||
|
// process messages when batch size or timeout period is reached
|
||||||
|
if len(messages) >= batchSize || time.Since(lastCommit) >= batchTimeout {
|
||||||
|
processMessageBatch(ctx, messages)
|
||||||
|
consumer.Commit()
|
||||||
|
messages = messages[:0]
|
||||||
|
lastCommit = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type realTimeDataPayload struct {
|
||||||
|
ComponentUUID string
|
||||||
|
Values []float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type realTimeData struct {
|
||||||
|
Payload realTimeDataPayload
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseKafkaMessage(msgValue []byte) (*realTimeData, error) {
|
||||||
|
var realTimeData realTimeData
|
||||||
|
err := json.Unmarshal(msgValue, &realTimeData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unmarshal real time data failed: %w", err)
|
||||||
|
}
|
||||||
|
return &realTimeData, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func processRealTimeData(ctx context.Context, realTimeData *realTimeData) {
|
||||||
|
componentUUID := realTimeData.Payload.ComponentUUID
|
||||||
|
component, err := diagram.GetComponentMap(componentUUID)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "query component info from diagram map by component id failed",
|
||||||
|
"component_uuid", componentUUID, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
componentType := component.Type
|
||||||
|
if componentType != constants.DemoType {
|
||||||
|
logger.Error(ctx, "can not process real time data of component type not equal DemoType",
|
||||||
|
"component_uuid", componentUUID)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var anchorName string
|
||||||
|
var compareValUpperLimit, compareValLowerLimit float64
|
||||||
|
var anchorRealTimeData []float64
|
||||||
|
var calculateFunc func(archorValue float64, args ...float64) float64
|
||||||
|
|
||||||
|
for _, param := range realTimeData.Payload.Values {
|
||||||
|
anchorRealTimeData = append(anchorRealTimeData, param)
|
||||||
|
}
|
||||||
|
|
||||||
|
anchorConfig := config.AnchorParamConfig{
|
||||||
|
AnchorParamBaseConfig: config.AnchorParamBaseConfig{
|
||||||
|
ComponentUUID: componentUUID,
|
||||||
|
AnchorName: anchorName,
|
||||||
|
CompareValUpperLimit: compareValUpperLimit,
|
||||||
|
CompareValLowerLimit: compareValLowerLimit,
|
||||||
|
AnchorRealTimeData: anchorRealTimeData,
|
||||||
|
},
|
||||||
|
CalculateFunc: calculateFunc,
|
||||||
|
CalculateParams: []float64{},
|
||||||
|
}
|
||||||
|
|
||||||
|
anchorChan, err := pool.GetAnchorParamChan(ctx, componentUUID)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "get anchor param chan failed",
|
||||||
|
"component_uuid", componentUUID, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case anchorChan <- anchorConfig:
|
||||||
|
case <-ctx.Done():
|
||||||
|
logger.Info(ctx, "context done while sending to anchor chan")
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
logger.Error(ctx, "timeout sending to anchor chan", "component_uuid", componentUUID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// processMessageBatch define func to bathc process kafka message
|
||||||
|
func processMessageBatch(ctx context.Context, messages []*kafka.Message) {
|
||||||
|
for _, msg := range messages {
|
||||||
|
realTimeData, err := parseKafkaMessage(msg.Value)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(ctx, "parse kafka message failed", "error", err, "msg", msg)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
go processRealTimeData(ctx, realTimeData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,73 +0,0 @@
|
||||||
// Package realtimedata define real time data operation functions
|
|
||||||
package realtimedata
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"modelRT/config"
|
|
||||||
"modelRT/constants"
|
|
||||||
"modelRT/diagram"
|
|
||||||
"modelRT/logger"
|
|
||||||
"modelRT/network"
|
|
||||||
"modelRT/pool"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RealTimeDataChan define channel of real time data receive
|
|
||||||
var RealTimeDataChan chan network.RealTimeDataReceiveRequest
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
RealTimeDataChan = make(chan network.RealTimeDataReceiveRequest, 100)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReceiveChan define func of real time data receive and process
|
|
||||||
func ReceiveChan(ctx context.Context) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case realTimeData := <-RealTimeDataChan:
|
|
||||||
componentUUID := realTimeData.PayLoad.ComponentUUID
|
|
||||||
component, err := diagram.GetComponentMap(componentUUID)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(ctx, "query component info from diagram map by componet id failed", "component_uuid", componentUUID, "error", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
componentType := component.Type
|
|
||||||
if componentType != constants.DemoType {
|
|
||||||
logger.Error(ctx, "can not process real time data of component type not equal DemoType", "component_uuid", componentUUID)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var anchorName string
|
|
||||||
var compareValUpperLimit, compareValLowerLimit float64
|
|
||||||
var anchorRealTimeData []float64
|
|
||||||
var calculateFunc func(archorValue float64, args ...float64) float64
|
|
||||||
|
|
||||||
// calculateFunc, params := config.SelectAnchorCalculateFuncAndParams(componentType, anchorName, componentData)
|
|
||||||
|
|
||||||
for _, param := range realTimeData.PayLoad.Values {
|
|
||||||
anchorRealTimeData = append(anchorRealTimeData, param.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
anchorConfig := config.AnchorParamConfig{
|
|
||||||
AnchorParamBaseConfig: config.AnchorParamBaseConfig{
|
|
||||||
ComponentUUID: componentUUID,
|
|
||||||
AnchorName: anchorName,
|
|
||||||
CompareValUpperLimit: compareValUpperLimit,
|
|
||||||
CompareValLowerLimit: compareValLowerLimit,
|
|
||||||
AnchorRealTimeData: anchorRealTimeData,
|
|
||||||
},
|
|
||||||
CalculateFunc: calculateFunc,
|
|
||||||
CalculateParams: []float64{},
|
|
||||||
}
|
|
||||||
anchorChan, err := pool.GetAnchorParamChan(ctx, componentUUID)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(ctx, "get anchor param chan failed", "component_uuid", componentUUID, "error", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
anchorChan <- anchorConfig
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -0,0 +1,16 @@
|
||||||
|
// Package router provides router config
|
||||||
|
package router
|
||||||
|
|
||||||
|
import (
|
||||||
|
"modelRT/handler"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// registerDataRoutes define func of register data routes
|
||||||
|
func registerDataRoutes(rg *gin.RouterGroup) {
|
||||||
|
g := rg.Group("/data/")
|
||||||
|
// TODO 修改为ws路径
|
||||||
|
// g.GET("realtime", handler.QueryRealTimeMonitorHandler)
|
||||||
|
g.GET("history", handler.QueryHistoryDataHandler)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,15 @@
|
||||||
|
// Package router provides router config
|
||||||
|
package router
|
||||||
|
|
||||||
|
import (
|
||||||
|
"modelRT/handler"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// registerMonitorRoutes define func of register monitordata routes
|
||||||
|
func registerMonitorRoutes(rg *gin.RouterGroup) {
|
||||||
|
g := rg.Group("/monitors/")
|
||||||
|
g.POST("data/subscriptions", handler.RealTimeSubHandler)
|
||||||
|
g.GET("data/realtime/stream/:clientID", handler.PullRealTimeDataHandler)
|
||||||
|
}
|
||||||
|
|
@ -23,4 +23,6 @@ func RegisterRoutes(engine *gin.Engine, clientToken string) {
|
||||||
registerDiagramRoutes(routeGroup)
|
registerDiagramRoutes(routeGroup)
|
||||||
registerAttrRoutes(routeGroup)
|
registerAttrRoutes(routeGroup)
|
||||||
registerMeasurementRoutes(routeGroup, clientToken)
|
registerMeasurementRoutes(routeGroup, clientToken)
|
||||||
|
registerDataRoutes(routeGroup)
|
||||||
|
registerMonitorRoutes(routeGroup)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,14 +1,14 @@
|
||||||
// Package sql define database sql statement
|
// Package sql define database sql statement
|
||||||
package sql
|
package sql
|
||||||
|
|
||||||
// RecursiveSQL define Topologic table recursive query statement
|
// RecursiveSQL define topologic table recursive query statement
|
||||||
var RecursiveSQL = `WITH RECURSIVE recursive_tree as (
|
var RecursiveSQL = `WITH RECURSIVE recursive_tree as (
|
||||||
SELECT uuid_from,uuid_to,flag
|
SELECT uuid_from,uuid_to,flag
|
||||||
FROM "Topologic"
|
FROM "topologic"
|
||||||
WHERE uuid_from = ?
|
WHERE uuid_from = ?
|
||||||
UNION ALL
|
UNION ALL
|
||||||
SELECT t.uuid_from,t.uuid_to,t.flag
|
SELECT t.uuid_from,t.uuid_to,t.flag
|
||||||
FROM "Topologic" t
|
FROM "topologic" t
|
||||||
JOIN recursive_tree rt ON t.uuid_from = rt.uuid_to
|
JOIN recursive_tree rt ON t.uuid_from = rt.uuid_to
|
||||||
)
|
)
|
||||||
SELECT * FROM recursive_tree;`
|
SELECT * FROM recursive_tree;`
|
||||||
|
|
|
||||||
|
|
@ -43,14 +43,13 @@ func TestUserDao_CreateUser(t *testing.T) {
|
||||||
topologicInfo := &orm.Topologic{
|
topologicInfo := &orm.Topologic{
|
||||||
UUIDFrom: uuid.FromStringOrNil("70c190f2-8a60-42a9-b143-ec5f87e0aa6b"),
|
UUIDFrom: uuid.FromStringOrNil("70c190f2-8a60-42a9-b143-ec5f87e0aa6b"),
|
||||||
UUIDTo: uuid.FromStringOrNil("70c190f2-8a75-42a9-b166-ec5f87e0aa6b"),
|
UUIDTo: uuid.FromStringOrNil("70c190f2-8a75-42a9-b166-ec5f87e0aa6b"),
|
||||||
Comment: "test",
|
|
||||||
Flag: 1,
|
Flag: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ud := dao2.NewUserDao(context.TODO())
|
// ud := dao2.NewUserDao(context.TODO())
|
||||||
mock.ExpectBegin()
|
mock.ExpectBegin()
|
||||||
mock.ExpectExec(regexp.QuoteMeta("INSERT INTO `Topologic`")).
|
mock.ExpectExec(regexp.QuoteMeta("INSERT INTO `Topologic`")).
|
||||||
WithArgs(topologicInfo.Flag, topologicInfo.UUIDFrom, topologicInfo.UUIDTo, topologicInfo.Comment).
|
WithArgs(topologicInfo.Flag, topologicInfo.UUIDFrom, topologicInfo.UUIDTo).
|
||||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||||
mock.ExpectCommit()
|
mock.ExpectCommit()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,27 @@
|
||||||
|
// Package util provide some utility fun
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConvertZSetMembersToFloat64 define func to conver zset member type to float64
|
||||||
|
func ConvertZSetMembersToFloat64(members []redis.Z) []float64 {
|
||||||
|
dataFloats := make([]float64, 0, len(members))
|
||||||
|
// recovery time sorted in ascending order
|
||||||
|
sortRedisZByTimeMemberAscending(members)
|
||||||
|
for _, member := range members {
|
||||||
|
dataFloats = append(dataFloats, member.Score)
|
||||||
|
}
|
||||||
|
return dataFloats
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortRedisZByTimeMemberAscending(data []redis.Z) {
|
||||||
|
sort.Slice(data, func(i, j int) bool {
|
||||||
|
memberI := data[i].Member.(string)
|
||||||
|
memberJ := data[j].Member.(string)
|
||||||
|
return memberI < memberJ
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,11 @@
|
||||||
|
// Package util provide some utility functions
|
||||||
|
package util
|
||||||
|
|
||||||
|
// GetKeysFromSet define func to get all keys from a map[string]struct{}
|
||||||
|
func GetKeysFromSet(set map[string]struct{}) []string {
|
||||||
|
keys := make([]string, 0, len(set))
|
||||||
|
for key := range set {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,60 @@
|
||||||
|
// Package util provide some utility functions
|
||||||
|
package util
|
||||||
|
|
||||||
|
// RemoveTargetsFromSliceSimple define func to remove targets from a slice of strings
|
||||||
|
func RemoveTargetsFromSliceSimple(targetsSlice []string, targetsToRemove []string) []string {
|
||||||
|
targetsToRemoveSet := make(map[string]struct{}, len(targetsToRemove))
|
||||||
|
for _, target := range targetsToRemove {
|
||||||
|
targetsToRemoveSet[target] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := len(targetsSlice) - 1; i >= 0; i-- {
|
||||||
|
if _, found := targetsToRemoveSet[targetsSlice[i]]; found {
|
||||||
|
targetsSlice[i] = targetsSlice[len(targetsSlice)-1]
|
||||||
|
targetsSlice = targetsSlice[:len(targetsSlice)-1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return targetsSlice
|
||||||
|
}
|
||||||
|
|
||||||
|
// SliceToSet define func to convert string slice to set
|
||||||
|
func SliceToSet(targetsSlice []string) map[string]struct{} {
|
||||||
|
set := make(map[string]struct{}, len(targetsSlice))
|
||||||
|
for _, target := range targetsSlice {
|
||||||
|
set[target] = struct{}{}
|
||||||
|
}
|
||||||
|
return set
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeduplicateAndReportDuplicates define func to deduplicate a slice of strings and report duplicates
|
||||||
|
func DeduplicateAndReportDuplicates(targetsSlice []string, sourceSlice []string) (deduplicated []string, duplicates []string) {
|
||||||
|
targetSet := SliceToSet(targetsSlice)
|
||||||
|
deduplicated = make([]string, 0, len(sourceSlice))
|
||||||
|
// duplicate items slice
|
||||||
|
duplicates = make([]string, 0, len(sourceSlice))
|
||||||
|
|
||||||
|
for _, source := range sourceSlice {
|
||||||
|
if _, found := targetSet[source]; found {
|
||||||
|
duplicates = append(duplicates, source)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
deduplicated = append(deduplicated, source)
|
||||||
|
}
|
||||||
|
return deduplicated, duplicates
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLongestCommonPrefixLength define func of get longest common prefix length between two strings
|
||||||
|
func GetLongestCommonPrefixLength(query string, result string) int {
|
||||||
|
if query == "" {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
minLen := min(len(query), len(result))
|
||||||
|
|
||||||
|
for i := range minLen {
|
||||||
|
if query[i] != result[i] {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return minLen
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,25 @@
|
||||||
|
// Package util provide some utility functions
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GenNanoTsStr define func to generate nanosecond timestamp string by current time
|
||||||
|
func GenNanoTsStr() string {
|
||||||
|
now := time.Now()
|
||||||
|
nanoseconds := now.UnixNano()
|
||||||
|
timestampStr := strconv.FormatInt(nanoseconds, 10)
|
||||||
|
return timestampStr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Numeric define interface to constraints supporting integer and floating-point types
|
||||||
|
type Numeric interface {
|
||||||
|
int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 | float32 | float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecondsToDuration define func to convert Numeric type param to time duration
|
||||||
|
func SecondsToDuration[T Numeric](seconds T) time.Duration {
|
||||||
|
return time.Duration(seconds) * time.Second
|
||||||
|
}
|
||||||
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
@ -22,7 +23,7 @@ func GenerateClientToken(host string, serviceName string, secretKey string) (str
|
||||||
return "", fmt.Errorf("TOKEN_SECRET_KEY environment variable not set and no key provided in parameters")
|
return "", fmt.Errorf("TOKEN_SECRET_KEY environment variable not set and no key provided in parameters")
|
||||||
}
|
}
|
||||||
|
|
||||||
uniqueID := fmt.Sprintf("%d", time.Now().UnixNano())
|
uniqueID := strconv.FormatInt(time.Now().UnixNano(), 10)
|
||||||
clientInfo := fmt.Sprintf("host=%s;service=%s;id=%s", host, serviceName, uniqueID)
|
clientInfo := fmt.Sprintf("host=%s;service=%s;id=%s", host, serviceName, uniqueID)
|
||||||
|
|
||||||
mac := hmac.New(sha256.New, []byte(finalSecretKey))
|
mac := hmac.New(sha256.New, []byte(finalSecretKey))
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue