add api and update files

This commit is contained in:
zhuxu 2025-11-06 21:09:50 +08:00
parent 7a9bd01b37
commit 16475fe3c1
21 changed files with 548 additions and 193 deletions

View File

@ -1,9 +1,9 @@
{
"default":{
"addrs":["192.168.46.100:27017"],
"username":"mongo",
"password":"123RTYjkl",
"authsource":"events",
"addrs":["127.0.0.1:27017"],
"username":"admin",
"password":"password",
"authsource":"admin",
"authmechanism":"SCRAM-SHA-256"
}
}

View File

@ -3,6 +3,7 @@ package data
import (
"context"
"datart/data/influx"
"datart/data/postgres"
"github.com/redis/go-redis/v9"
)
@ -18,6 +19,7 @@ func NewProcess() *Process {
func (p *Process) StartDataProcessing() {
ctx, cancel := context.WithCancel(context.Background())
p.cancel = cancel
postgres.GenSSU2ChannelSizes(ctx, 500)
updatingRedisPhasor(ctx)
}

View File

@ -264,7 +264,7 @@ func convertJsonToF2TVs(cols []string, data [][]any) (map[string][]TV, error) {
func convertCsvToTVs(data [][]string) ([]TV, error) {
ret := make([]TV, 0, len(data))
for _, row := range data {
for _, row := range data[1:] {
if len(row) > 3 {
ns, err := strconv.ParseInt(row[2], 10, 64)
if err != nil {
@ -288,7 +288,7 @@ func convertCsvToTVs(data [][]string) ([]TV, error) {
func convertCsvToF2TVs(data [][]string) (map[string][]TV, error) {
f2tvs := make(map[string][]TV)
for _, row := range data {
for _, row := range data[1:] {
if len(row) > 3 {
ns, err := strconv.ParseInt(row[2], 10, 64)
if err != nil {

View File

@ -16,25 +16,6 @@ type influxClient struct {
org string
}
type Request struct {
RespType string
Bucket string
Measure string
Station string
MainPos string
SubPos string // separate whith ','
Begin int64
End int64
Operate string
Step string
Default string
}
const (
PhasorBucket = "influxBucket"
SampleBucket = "influxBucket"
)
var client *influxClient
func init() {
@ -71,19 +52,19 @@ func NewInfluxClient(cli *http.Client, url, org, token string) *influxClient {
}
}
func GetBucket(tp string) (string, error) {
func GetDB(tp string) (string, error) {
switch tp {
case "phasor":
return PhasorBucket, nil
return dbphasor, nil
case "sample":
return SampleBucket, nil
return dbsample, nil
}
return "", errors.New("invalid type")
}
// serverConf
func GetMeasurement(tp string, mainPos string) (string, error) {
func GetTable(tp string, mainPos string) (string, error) {
switch tp {
case "phasor":
ssu2Type := config.Conf().ServerConf().GetSSUType()
@ -102,39 +83,26 @@ func GetMeasurement(tp string, mainPos string) (string, error) {
return "", errors.New("invalid type")
}
func WriteLinesData(ctx context.Context, bucket string, data []byte) error {
return client.WriteLinesData(ctx, bucket, data)
func WriteLinesData(ctx context.Context, db string, data []byte) error {
return client.WriteLinesData(ctx, db, data)
}
type Request struct {
DB string
Table string
Type string
Station string
MainPos string
SubPos string // separate whith ','
Begin int64
End int64
Operate string
Step string
Default string
}
type TV struct {
Time int64 `json:"time"`
Value float64 `json:"value"`
}
func GetSSUPointLastLimit(ctx context.Context, req *Request, limit int) ([]TV, error) {
req.Begin = time.Now().UnixMilli() - int64(limit*20+20)
return client.GetSSUPointLastLimit(ctx, req, limit)
}
func GetSSUPointsLastLimit(ctx context.Context, req *Request, limit int) (map[string][]TV, error) {
req.Begin = time.Now().UnixMilli() - int64(limit*20+20)
return client.GetSSUPointsLastLimit(ctx, req, limit)
}
func GetSSUPointDurationData(ctx context.Context, req *Request) ([]TV, error) {
return client.GetSSUPointDurationData(ctx, req)
}
func GetSSUPointsDurationData(ctx context.Context, req *Request) (map[string][]TV, error) {
return client.GetSSUPointsDurationData(ctx, req)
}
func GetSSUPointsAfterLimit(ctx context.Context, req *Request, limit int) (map[string][]TV, error) {
req.End = req.Begin + int64(limit*20+20)
return client.GetSSUPointsAfterLimit(ctx, req, limit)
}
func GetSSUPointsBeforeLimit(ctx context.Context, req *Request, limit int) (map[string][]TV, error) {
req.Begin = req.End - int64(limit*20+20)
return client.GetSSUPointsBeforeLimit(ctx, req, limit)
}

View File

@ -5,11 +5,18 @@ import (
"fmt"
"net/url"
"strings"
"time"
)
const (
FieldCPrifix string = "c"
FieldIPrefix string = "i"
dbphasor = "influxBucket"
dbsample = "influxBucket"
)
// keep consistent with telegraf
const (
FieldYCPrefix string = "tm"
FieldYXPrefix string = "ts"
// FieldP string = "p"
// FieldQ string = "q"
@ -28,20 +35,48 @@ const (
FieldSuffixRMS = "_rms"
)
func GetSSUPointLastLimit(ctx context.Context, req *Request, limit int) ([]TV, error) {
req.Begin = time.Now().UnixMilli() - int64(limit*20+20)
return client.GetSSUPointLastLimit(ctx, req, limit)
}
func GetSSUPointsLastLimit(ctx context.Context, req *Request, limit int) (map[string][]TV, error) {
req.Begin = time.Now().UnixMilli() - int64(limit*20+20)
return client.GetSSUPointsLastLimit(ctx, req, limit)
}
func GetSSUPointDurationData(ctx context.Context, req *Request) ([]TV, error) {
return client.GetSSUPointDurationData(ctx, req)
}
func GetSSUPointsDurationData(ctx context.Context, req *Request) (map[string][]TV, error) {
return client.GetSSUPointsDurationData(ctx, req)
}
func GetSSUPointsAfterLimit(ctx context.Context, req *Request, limit int) (map[string][]TV, error) {
req.End = req.Begin + int64(limit*20+20)
return client.GetSSUPointsAfterLimit(ctx, req, limit)
}
func GetSSUPointsBeforeLimit(ctx context.Context, req *Request, limit int) (map[string][]TV, error) {
req.Begin = req.End - int64(limit*20+20)
return client.GetSSUPointsBeforeLimit(ctx, req, limit)
}
func (client *influxClient) GetSSUPointLastLimit(ctx context.Context, req *Request, limit int) ([]TV, error) {
sql := fmt.Sprintf("select last(%s) as %s from %s where station='%s' and device='%s';",
req.SubPos, req.SubPos, req.Measure, req.Station, req.MainPos)
req.SubPos, req.SubPos, req.Table, req.Station, req.MainPos)
if limit > 1 {
sql = fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>%dms order by time desc limit %d;",
req.SubPos, req.Measure, req.Station, req.MainPos, req.Begin, limit) // begin = time.Now().UnixMilli()-int64(limit*20+20)
sql = fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>=%dms order by time desc limit %d;",
req.SubPos, req.Table, req.Station, req.MainPos, req.Begin, limit) // begin = time.Now().UnixMilli()-int64(limit*20+20)
}
reqData := url.Values{
"db": {req.Bucket},
"db": {req.DB},
"q": {sql},
}
return client.getTVsResp(ctx, reqData, req.RespType)
return client.getTVsResp(ctx, reqData, "csv")
}
func (client *influxClient) GetSSUPointsLastLimit(ctx context.Context, req *Request, limit int) (map[string][]TV, error) {
@ -52,40 +87,50 @@ func (client *influxClient) GetSSUPointsLastLimit(ctx context.Context, req *Requ
fields[i] = "last(" + field + ") as " + field
}
sql = fmt.Sprintf("select %s from %s where station='%s' and device='%s';",
strings.Join(fields, ","), req.Measure, req.Station, req.MainPos)
strings.Join(fields, ","), req.Table, req.Station, req.MainPos)
} else {
sql = fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>%dms order by time desc limit %d;",
req.SubPos, req.Measure, req.Station, req.MainPos, req.Begin, limit) // begin = time.Now().UnixMilli()-int64(limit*20+20)
sql = fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>=%dms order by time desc limit %d;",
req.SubPos, req.Table, req.Station, req.MainPos, req.Begin, limit) // begin = time.Now().UnixMilli()-int64(limit*20+20)
}
reqData := url.Values{
"db": {req.Bucket},
"db": {req.DB},
"q": {sql},
}
return client.getF2TVsResp(ctx, reqData, req.RespType)
f2tvs, err := client.getF2TVsResp(ctx, reqData, "csv")
if err != nil {
return f2tvs, nil
}
ret := make(map[string][]TV, len(f2tvs))
for f, tvs := range f2tvs {
ret[strings.Join([]string{req.Station, req.MainPos, f}, ".")] = tvs // only req.SubPos support multiple
}
return ret, nil
}
func (client *influxClient) GetSSUPointDurationData(ctx context.Context, req *Request) ([]TV, error) {
sql := fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>=%dms and time<%dms;",
req.SubPos, req.Measure, req.Station, req.MainPos, req.Begin, req.End)
sql := fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>=%dms and time<=%dms;",
req.SubPos, req.Table, req.Station, req.MainPos, req.Begin, req.End)
if req.Operate != "" && req.Step != "" && req.Default != "" {
sql = fmt.Sprintf("select %s(%s) as %s from %s where station='%s' and device='%s' and time>=%dms and time<%dms group by time(%s) fill(%s);",
req.Operate, req.SubPos, req.SubPos, req.Measure, req.Station, req.MainPos, req.Begin, req.End, req.Step, req.Default)
sql = fmt.Sprintf("select %s(%s) as %s from %s where station='%s' and device='%s' and time>=%dms and time<=%dms group by time(%s) fill(%s);",
req.Operate, req.SubPos, req.SubPos, req.Table, req.Station, req.MainPos, req.Begin, req.End, req.Step, req.Default)
}
reqData := url.Values{
"db": {req.Bucket},
"db": {req.DB},
"q": {sql},
}
return client.getTVsResp(ctx, reqData, req.RespType)
return client.getTVsResp(ctx, reqData, "csv")
}
func (client *influxClient) GetSSUPointsDurationData(ctx context.Context, req *Request) (map[string][]TV, error) {
sql := fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>=%dms and time<%dms;",
req.SubPos, req.Measure, req.Station, req.MainPos, req.Begin, req.End)
sql := fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>=%dms and time<=%dms;",
req.SubPos, req.Table, req.Station, req.MainPos, req.Begin, req.End)
if req.Operate != "" && req.Step != "" && req.Default != "" {
subPoss := strings.Split(req.SubPos, ",")
@ -93,52 +138,82 @@ func (client *influxClient) GetSSUPointsDurationData(ctx context.Context, req *R
for i, subPos := range subPoss {
selectSections[i] = req.Operate + "(" + subPos + ")" + " as " + subPos
}
sql = fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>=%dms and time<%dms group by time(%s) fill(%s);",
strings.Join(selectSections, ", "), req.Measure, req.Station, req.MainPos, req.Begin, req.End, req.Step, req.Default)
sql = fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>=%dms and time<=%dms group by time(%s) fill(%s);",
strings.Join(selectSections, ", "), req.Table, req.Station, req.MainPos, req.Begin, req.End, req.Step, req.Default)
}
reqData := url.Values{
"db": {req.Bucket},
"db": {req.DB},
"q": {sql},
}
return client.getF2TVsResp(ctx, reqData, req.RespType)
f2tvs, err := client.getF2TVsResp(ctx, reqData, "csv")
if err != nil {
return f2tvs, nil
}
ret := make(map[string][]TV, len(f2tvs))
for f, tvs := range f2tvs {
ret[strings.Join([]string{req.Station, req.MainPos, f}, ".")] = tvs // only req.SubPos support multiple
}
return ret, nil
}
func (client *influxClient) GetSSUPointAfterLimit(ctx context.Context, req *Request, limit int) ([]TV, error) {
sql := fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>=%dms and time<=%dms limit %d;",
req.SubPos, req.Measure, req.Station, req.MainPos, req.Begin, req.End, limit)
req.SubPos, req.Table, req.Station, req.MainPos, req.Begin, req.End, limit)
reqData := url.Values{
"db": {req.Bucket},
"db": {req.DB},
"q": {sql},
}
return client.getTVsResp(ctx, reqData, req.RespType)
return client.getTVsResp(ctx, reqData, "csv")
}
func (client *influxClient) GetSSUPointsAfterLimit(ctx context.Context, req *Request, limit int) (map[string][]TV, error) {
sql := fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>=%dms and time<=%dms limit %d;",
req.SubPos, req.Measure, req.Station, req.MainPos, req.Begin, req.End, limit)
req.SubPos, req.Table, req.Station, req.MainPos, req.Begin, req.End, limit)
reqData := url.Values{
"db": {req.Bucket},
"db": {req.DB},
"q": {sql},
}
return client.getF2TVsResp(ctx, reqData, req.RespType)
f2tvs, err := client.getF2TVsResp(ctx, reqData, "csv")
if err != nil {
return f2tvs, nil
}
ret := make(map[string][]TV, len(f2tvs))
for f, tvs := range f2tvs {
ret[strings.Join([]string{req.Station, req.MainPos, f}, ".")] = tvs // only req.SubPos support multiple
}
return ret, nil
}
func (client *influxClient) GetSSUPointsBeforeLimit(ctx context.Context, req *Request, limit int) (map[string][]TV, error) {
reqData := url.Values{
"db": {req.Bucket},
"q": {fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>%dms and time<=%dms order by time desc limit %d;",
req.SubPos, req.Measure, req.Station, req.MainPos, req.Begin, req.End, limit)}, // begin = req.End-20-20
"db": {req.DB},
"q": {fmt.Sprintf("select %s from %s where station='%s' and device='%s' and time>=%dms and time<=%dms order by time desc limit %d;",
req.SubPos, req.Table, req.Station, req.MainPos, req.Begin, req.End, limit)}, // begin = req.End-20-20
}
return client.getF2TVsResp(ctx, reqData, req.RespType)
f2tvs, err := client.getF2TVsResp(ctx, reqData, "csv")
if err != nil {
return f2tvs, nil
}
ret := make(map[string][]TV, len(f2tvs))
for f, tvs := range f2tvs {
ret[strings.Join([]string{req.Station, req.MainPos, f}, ".")] = tvs // only req.SubPos support multiple
}
return ret, nil
}
func (client *influxClient) WriteLinesData(ctx context.Context, bucket string, data []byte) error {
return client.writeLinesData(ctx, bucket, data, true)
func (client *influxClient) WriteLinesData(ctx context.Context, db string, data []byte) error {
return client.writeLinesData(ctx, db, data, true)
}

View File

@ -28,7 +28,7 @@ type Alarm struct {
AlarmStatus int `bson:"alarm_status" json:"alarm_status"` // 0 "复位", 1 "动作/产生/告警"
}
var almCode2Name = map[int]string{
var almCode2Name = []string{
almCodeCommmExcept: "通信异常",
almCodeADFault: "AD故障",
almCodePPSExcept: "同步秒脉冲异常",
@ -45,20 +45,33 @@ func (a *Alarm) GetName() string {
return almCode2Name[a.AlarmCode]
}
func (a *Alarm) GetType() int {
switch a.AlarmCode {
case almCodeBackup, almCodeReserve, almCodeUnitInit, almCodeStartSample:
return genEventType(0, 0)
case almCodeOverSample, almCodeUnderSample:
return genEventType(0, 1)
case almCodeCommmExcept, almCodeADFault, almCodePPSExcept, almCodeReadParamErr:
return genEventType(0, 2)
}
return -1
}
func (a *Alarm) ConvertToEvent(ip string) *Event {
e := new(Event)
if a != nil {
e.Event = a.GetName()
e.EventUUID = uuid.NewString()
e.Type = genEventType(0, 2)
e.Type = a.GetType()
e.Priority = 5
e.Status = EventStatusHappen
e.Timestamp = a.AlarmTime / 1e3 // TODO ms ?
e.Timestamp = a.AlarmTime
e.From = "station"
e.Operations = append(e.Operations, &operation{
Action: EventActionHappened, // TODO
OP: ip,
TS: a.AlarmTime / 1e3,
TS: a.AlarmTime,
})
e.Alarm = a
}

View File

@ -9,8 +9,8 @@ import (
)
const (
dbevent string = "event"
tbevent string = "event"
dbevent string = "cl"
tbevent string = "events"
)
const (
@ -59,31 +59,31 @@ func InsertEvents(ctx context.Context, docs []*Event) error {
return err
}
func DeleteOneEvent[T bson.M | *bson.D](ctx context.Context, filter T) error {
func DeleteOneEvent[T bson.M | bson.D](ctx context.Context, filter T) error {
_, err := getCollection(dbevent, tbevent).DeleteOne(ctx, filter)
return err
}
func DeleteEvents[T bson.M | *bson.D](ctx context.Context, filter T) error {
func DeleteEvents[T bson.M | bson.D](ctx context.Context, filter T) error {
_, err := getCollection(dbevent, tbevent).DeleteMany(ctx, filter)
return err
}
// insert if not update
func UpdateOneEvent[T bson.M | *bson.D](ctx context.Context, filter T, update T) error {
func UpsertOneEvent[T bson.M | bson.D](ctx context.Context, filter T, update T) error {
opts := options.UpdateOne().SetUpsert(true)
_, err := getCollection(dbevent, tbevent).UpdateOne(ctx, filter, update, opts)
return err
}
// insert if not update
func UpdateEvents[T bson.M | *bson.D](ctx context.Context, filter T, update T) error {
func UpsertEvents[T bson.M | bson.D](ctx context.Context, filter T, update T) error {
opts := options.UpdateMany().SetUpsert(true)
_, err := getCollection(dbevent, tbevent).UpdateMany(ctx, filter, update, opts)
return err
}
func FindOneEvent[T bson.M | *bson.D](ctx context.Context, filter T) (*Event, error) {
func FindOneEvent[T bson.M | bson.D](ctx context.Context, filter T) (*Event, error) {
doc := new(Event)
err := getCollection(dbevent, tbevent).FindOne(ctx, filter).Decode(doc)
// if errors.Is(err, mongo.ErrNoDocuments) {
@ -96,7 +96,7 @@ func FindOneEvent[T bson.M | *bson.D](ctx context.Context, filter T) (*Event, er
return doc, nil
}
func FindEvents[T bson.M | *bson.D](ctx context.Context, filter T) ([]*Event, error) {
func FindEvents[T bson.M | bson.D](ctx context.Context, filter T) ([]*Event, error) {
cursor, err := getCollection(dbevent, tbevent).Find(ctx, filter)
if err != nil {
return nil, err
@ -111,7 +111,7 @@ func FindEvents[T bson.M | *bson.D](ctx context.Context, filter T) ([]*Event, er
return docs, nil
}
func FindEventsInBatch[T bson.M | *bson.D](ctx context.Context, filter T,
func FindEventsInBatch[T bson.M | bson.D](ctx context.Context, filter T,
batchSize int32) ([]*Event, error) {
opt := options.Find().SetBatchSize(batchSize)
@ -129,15 +129,23 @@ func FindEventsInBatch[T bson.M | *bson.D](ctx context.Context, filter T,
}
docs = append(docs, doc)
}
if err := cursor.Err(); err != nil {
return docs, err
}
return docs, nil
}
func FindEventsWithPageLimit[T bson.M | *bson.D](ctx context.Context, filter T,
func FindEventsWithPageLimit[T bson.M | bson.D](ctx context.Context, filter T,
sort int, page int64, limit int64) ([]*Event, error) {
// TODO
opt := options.Find().SetSort(bson.D{{Key: "timestamp", Value: sort}}).
SetSkip(limit * (page - 1)).SetLimit(limit)
opt := options.Find()
if sort == 1 || sort == -1 {
opt.SetSort(bson.D{{Key: "timestamp", Value: sort}})
}
if page > 0 && limit > 0 {
opt.SetSkip(limit * (page - 1)).SetLimit(limit)
}
cursor, err := getCollection(dbevent, tbevent).Find(ctx, filter, opt)
if err != nil {
@ -153,13 +161,16 @@ func FindEventsWithPageLimit[T bson.M | *bson.D](ctx context.Context, filter T,
}
docs = append(docs, doc)
}
if err := cursor.Err(); err != nil {
return docs, err
}
return docs, nil
}
// sys: 0-hard/1-platform/2-application
//
// level:1-info/2-warn/3-error
// level:0-info/1-warn/2-error
func genEventType(sys int, level int) int {
return sys + level*3
}

View File

@ -45,6 +45,10 @@ func Disconnect(ctx context.Context) error {
return client.Disconnect(ctx)
}
func GetSession() (*mongo.Session, error) {
return client.StartSession()
}
func getCollection(db string, tb string) *mongo.Collection {
return client.Database(db).Collection(tb)
}

View File

@ -13,14 +13,14 @@ const (
)
const (
ChannelCPrefix string = "TM"
ChannelIPrefix string = "TS"
ChannelYCPrefix string = "TM"
ChannelYXPrefix string = "TS"
ChannelP string = "P"
ChannelQ string = "Q"
ChannelS string = "S"
ChannelPF string = "PF"
ChannelF string = "F"
ChannelDF string = "deltaF"
ChannelDF string = "dF"
ChannelUPrefix string = "U"
)
@ -114,6 +114,7 @@ func GetMeasurements(ctx context.Context, batchSize int) ([]*measurement, error)
var records []*measurement
result := client.WithContext(ctx).Table(tbmeasurement).Where("id > ?", id).
Order("id ASC").Limit(batchSize).Find(&records)
if result.Error != nil {
return totalRecords, result.Error
}
@ -137,9 +138,11 @@ func GenSSU2ChannelSizes(ctx context.Context, batchSize int) error {
var records []*measurement
result := client.WithContext(ctx).Table(tbmeasurement).
Where("id > ?", id).Order("id ASC").Limit(batchSize).Find(&records)
if result.Error != nil {
return result.Error
}
length := len(records)
if length <= 0 {
break

View File

@ -16,7 +16,7 @@ func init() {
postgresConfig.GetHost(), postgresConfig.GetUser(), postgresConfig.GetPassword(),
postgresConfig.GetDBName(), postgresConfig.GetPort(), postgresConfig.GetSSLMode(),
postgresConfig.GetTimeZone())
db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})
db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{SkipDefaultTransaction: true})
if err != nil {
panic(err)
}

View File

@ -10,16 +10,15 @@ import (
rmq "github.com/rabbitmq/rabbitmq-amqp-go-client/pkg/rabbitmqamqp"
)
var eventXQR = rabbit.XQR{
ExchangeName: "event_produce_exchange",
QueueName: "event_produce_queue",
RoutingKey: "event_produce_route",
var eventXQK = rabbit.XQK{
Exchange: "event_produce_exchange",
// Key: "event_produce_key",
}
var eventPublisher *rmq.Publisher
func init() {
publisher, err := rabbit.NewPublisher(context.Background(), "default", &eventXQR)
publisher, err := rabbit.NewPublisher(context.Background(), "default", &eventXQK)
if err != nil {
panic(err)
}
@ -27,10 +26,6 @@ func init() {
}
func PublishEvent(ctx context.Context, event *mongo.Event) error {
if err := mongo.InsertOneEvent(ctx, event); err != nil {
return err
}
data, err := event.Marshall()
if err != nil {
return err

View File

@ -11,18 +11,22 @@ type rabbitClient struct {
conn *rmq.AmqpConnection
}
func NewClient(ctx context.Context, endpoints []rmq.Endpoint) (*rabbitClient, error) {
func NewClient(ctx context.Context, tag string) (*rabbitClient, error) {
env := rmq.NewClusterEnvironment(endpoints)
conn, err := client.env.NewConnection(ctx)
endpoints, err := genEndpoints(tag)
if err != nil {
return nil, err
}
return &rabbitClient{
env: env,
conn: conn,
}, nil
cli := new(rabbitClient)
cli.env = rmq.NewClusterEnvironment(endpoints)
conn, err := cli.env.NewConnection(context.Background())
if err != nil {
return nil, err
}
client.conn = conn
return cli, nil
}
func (c *rabbitClient) Management() *rmq.AmqpManagement {

View File

@ -7,20 +7,17 @@ import (
rmq "github.com/rabbitmq/rabbitmq-amqp-go-client/pkg/rabbitmqamqp"
)
func NewConsumer(ctx context.Context, tag string, xqr *XQR) (*rmq.Consumer, error) {
func NewConsumer(ctx context.Context, tag string, xqk *XQK) (*rmq.Consumer, error) {
cli := client
if tag != "default" {
endpoints, err := genEndpoints(tag)
if err != nil {
return nil, err
}
cli, err = NewClient(ctx, endpoints)
var err error
cli, err = NewClient(ctx, tag)
if err != nil {
return nil, err
}
}
return cli.conn.NewConsumer(ctx, xqr.QueueName, nil)
return cli.conn.NewConsumer(ctx, xqk.Queue, nil)
}
func Consuming(ctx context.Context, consumer *rmq.Consumer, msgChan chan<- []byte) {

View File

@ -6,7 +6,7 @@ import (
rmq "github.com/rabbitmq/rabbitmq-amqp-go-client/pkg/rabbitmqamqp"
)
type Management struct {
type Manage struct {
m *rmq.AmqpManagement
xName string
x rmq.IExchangeSpecification
@ -16,7 +16,7 @@ type Management struct {
b rmq.IBindingSpecification
}
func (m *Management) Init(ctx context.Context, rm *rmq.AmqpManagement,
func (m *Manage) Init(ctx context.Context, rm *rmq.AmqpManagement,
rx rmq.IExchangeSpecification, rq rmq.IQueueSpecification,
rb rmq.IBindingSpecification) {
@ -26,7 +26,7 @@ func (m *Management) Init(ctx context.Context, rm *rmq.AmqpManagement,
m.b = rb
}
func (m *Management) DeclareExchangeQueue(ctx context.Context) error {
func (m *Manage) DeclareExchangeQueue(ctx context.Context) error {
_, err := m.m.DeclareExchange(ctx, m.x)
if err != nil {
return err
@ -40,7 +40,7 @@ func (m *Management) DeclareExchangeQueue(ctx context.Context) error {
return nil
}
func (m *Management) DeclareAndBind(ctx context.Context) error {
func (m *Manage) DeclareAndBind(ctx context.Context) error {
xinfo, err := m.m.DeclareExchange(ctx, m.x)
if err != nil {
return err
@ -62,7 +62,7 @@ func (m *Management) DeclareAndBind(ctx context.Context) error {
return nil
}
func (m *Management) UnbindAndDelete(ctx context.Context) (purged int, err error) {
func (m *Manage) UnbindAndDelete(ctx context.Context) (purged int, err error) {
err = m.m.Unbind(ctx, m.bPath)
if err != nil {
return

View File

@ -8,21 +8,18 @@ import (
rmq "github.com/rabbitmq/rabbitmq-amqp-go-client/pkg/rabbitmqamqp"
)
func NewPublisher(ctx context.Context, tag string, xqr *XQR) (*rmq.Publisher, error) {
func NewPublisher(ctx context.Context, tag string, xqk *XQK) (*rmq.Publisher, error) {
cli := client
if tag != "default" {
endpoints, err := genEndpoints(tag)
if err != nil {
return nil, err
}
cli, err = NewClient(ctx, endpoints)
var err error
cli, err = NewClient(ctx, tag)
if err != nil {
return nil, err
}
}
return cli.conn.NewPublisher(context.Background(), &rmq.ExchangeAddress{
Exchange: xqr.ExchangeName,
Key: xqr.RoutingKey,
Exchange: xqk.Exchange,
Key: xqk.Key,
}, nil)
}

View File

@ -8,11 +8,11 @@ import (
rmq "github.com/rabbitmq/rabbitmq-amqp-go-client/pkg/rabbitmqamqp"
)
type XQR struct {
ExchangeName string `json:"exchangename" yaml:"exchangename"`
QueueName string `json:"queuename" yaml:"queuename"`
RoutingKey string `json:"routingkey" yaml:"routingkey"`
QueueLength int64 `json:"queuelength" yaml:"queuelength"`
type XQK struct {
Exchange string `json:"exchange" yaml:"exchange"`
Queue string `json:"queue" yaml:"queue"`
Key string `json:"key" yaml:"key"`
QueueCap int64 `json:"queuecap" yaml:"queuecap"`
}
var client *rabbitClient

View File

@ -84,19 +84,20 @@ func sendSSUZUnit(ctx context.Context, channelSize postgres.ChannelSize, ssuChan
func queryInfluxPhasor(ctx context.Context, station string, device string,
fileds []string, size int) (map[string][]influx.TV, error) {
measure, err := influx.GetMeasurement("phasor", device)
bucket, err := influx.GetDB("phasor")
if err != nil {
return nil, err
}
bucket, err := influx.GetBucket("phasor")
measure, err := influx.GetTable("phasor", device)
if err != nil {
return nil, err
}
req := &influx.Request{
RespType: "csv",
Bucket: bucket,
Measure: measure,
DB: bucket,
Table: measure,
Type: "phasor",
Station: station,
MainPos: device,
SubPos: strings.Join(fileds, ","),
@ -110,27 +111,35 @@ func updateZUnitToRedis(ctx context.Context, unit zUnit) error {
func genPhasorFields(channel string) []string {
fields := make([]string, 0, 3)
switch {
case strings.HasPrefix(channel, postgres.ChannelCPrefix):
if after, ok := strings.CutPrefix(channel, postgres.ChannelCPrefix); ok {
case strings.HasPrefix(channel, postgres.ChannelYCPrefix):
fieldPrefix := strings.ToLower(channel)
fields = append(fields,
influx.FieldCPrifix+after+influx.FieldSuffixAMP,
influx.FieldCPrifix+after+influx.FieldSuffixPA,
influx.FieldCPrifix+after+influx.FieldSuffixRMS)
}
case strings.HasPrefix(channel, postgres.ChannelIPrefix):
if after, ok := strings.CutPrefix(channel, postgres.ChannelCPrefix); ok {
fields = append(fields, influx.FieldIPrefix+after)
}
fieldPrefix+influx.FieldSuffixAMP,
fieldPrefix+influx.FieldSuffixPA,
fieldPrefix+influx.FieldSuffixRMS)
case strings.HasPrefix(channel, postgres.ChannelYXPrefix):
fields = append(fields, strings.ToLower(channel))
case strings.HasPrefix(channel, postgres.ChannelUPrefix):
fieldUPrefix := strings.ToLower(channel)
fields = append(fields, fieldUPrefix+influx.FieldSuffixAMP,
fieldUPrefix+influx.FieldSuffixPA, fieldUPrefix+influx.FieldSuffixRMS)
case channel == postgres.ChannelDF:
fields = append(fields, influx.FieldDF)
case channel == postgres.ChannelP, channel == postgres.ChannelQ,
channel == postgres.ChannelS, channel == postgres.ChannelPF,
channel == postgres.ChannelF:
fields = append(fields,
fieldUPrefix+influx.FieldSuffixAMP,
fieldUPrefix+influx.FieldSuffixPA,
fieldUPrefix+influx.FieldSuffixRMS)
case channel == postgres.ChannelP,
channel == postgres.ChannelQ,
channel == postgres.ChannelS,
channel == postgres.ChannelPF,
channel == postgres.ChannelF,
channel == postgres.ChannelDF:
fields = append(fields, strings.ToLower(channel))
}

View File

@ -1,9 +1,11 @@
package api
import (
"datart/data"
"datart/data/mongo"
"datart/log"
"errors"
"fmt"
"regexp"
"github.com/gin-gonic/gin"
@ -12,7 +14,9 @@ import (
func (a *Api) PostInsertAlarm(ctx *gin.Context) {
alarm, ip, err := a.checkAndGenInsertAlarmRequest(ctx)
if err != nil {
log.Error(err)
ctx.JSON(200, gin.H{
"code": 1,
"msg": "invalid param",
@ -20,8 +24,30 @@ func (a *Api) PostInsertAlarm(ctx *gin.Context) {
return
}
_ = alarm
_ = ip
event := alarm.ConvertToEvent(ip)
err = mongo.InsertOneEvent(ctx.Request.Context(), event)
if err != nil {
log.Error(err, fmt.Sprintf(" params: %v", event))
ctx.JSON(200, gin.H{
"code": 2,
"msg": "insert error",
})
return
}
err = data.PublishEvent(ctx.Request.Context(), event)
if err != nil {
log.Error(err, fmt.Sprintf(" params: %v", event))
ctx.JSON(200, gin.H{
"code": 3,
"msg": "publish error",
})
return
}
ctx.JSON(200, gin.H{
"code": 0,
@ -31,6 +57,7 @@ func (a *Api) PostInsertAlarm(ctx *gin.Context) {
func (a *Api) checkAndGenInsertAlarmRequest(ctx *gin.Context) (*mongo.Alarm, string, error) {
alarm := new(mongo.Alarm)
err := ctx.ShouldBindJSON(alarm)
if err != nil {
return nil, "", err
@ -40,6 +67,7 @@ func (a *Api) checkAndGenInsertAlarmRequest(ctx *gin.Context) (*mongo.Alarm, str
if err != nil {
return nil, "", err
}
if !ok {
return nil, "", errors.New("invalid device_no")
}

View File

@ -1 +1,131 @@
package api
import (
"datart/data/mongo"
"datart/log"
"errors"
"fmt"
"strconv"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"go.mongodb.org/mongo-driver/v2/bson"
)
const (
pageSizeLimit = 500
)
func (a *Api) GetEvents(ctx *gin.Context) {
filter, sort, pageNo, pageSize, err := a.checkAndGenGetEventsRequest(ctx)
if err != nil {
log.Error(err)
ctx.JSON(200, gin.H{
"code": 1,
"msg": err.Error(),
})
return
}
events, err := mongo.FindEventsWithPageLimit(ctx.Request.Context(), filter, sort, pageNo, pageSize)
if err != nil {
log.Error(err, fmt.Sprintf(" params: %v, %d, %d, %d", filter, sort, pageNo, pageSize))
ctx.JSON(200, gin.H{
"code": 2,
"msg": err.Error(),
})
return
}
ctx.JSON(200, gin.H{
"code": 0,
"msg": "success",
"data": events,
})
}
func (a *Api) checkAndGenGetEventsRequest(ctx *gin.Context) (bson.M, int, int64, int64, error) {
uuidStr := ctx.Query("uuid")
if len(uuidStr) > 0 {
if uuid.Validate(uuidStr) != nil {
return nil, 0, -1, -1, errors.New("invalid uuid")
}
return bson.M{"event_uuid": uuidStr}, 0, -1, -1, nil
}
filter := bson.M{}
var err error
begin, end := int64(-1), int64(-1)
beginStr := ctx.Query("begin")
if len(beginStr) > 0 {
if begin, err = strconv.ParseInt(beginStr, 10, 64); err != nil {
return nil, 0, -1, -1, err
}
}
endStr := ctx.Query("end")
if len(endStr) > 0 {
if end, err = strconv.ParseInt(endStr, 10, 64); err != nil {
return nil, 0, -1, -1, err
}
}
if begin > 0 && end > 0 && begin > end {
return nil, 0, -1, -1, errors.New("invalid time")
}
switch {
case begin > 0 && end < 0:
filter["timestamp"] = bson.M{"$gte": begin}
case begin < 0 && end > 0:
filter["timestamp"] = bson.M{"$lte": end}
case begin > 0 && end > 0:
filter["timestamp"] = bson.M{"$gte": begin, "$lte": end}
}
var sort int
sortStr := ctx.Query("sort")
if len(sortStr) > 0 {
s, err := strconv.Atoi(sortStr)
if err != nil {
return nil, 0, -1, -1, err
}
if s != 1 && s != -1 {
return nil, 0, -1, -1, errors.New("invalid sort")
}
sort = s
}
pageNo, pageSize := -1, -1
pageNoStr := ctx.Query("page_no")
pageSizeStr := ctx.Query("page_size")
if len(pageNoStr) > 0 && len(pageSizeStr) > 0 {
pageNo, err = strconv.Atoi(pageNoStr)
if err != nil {
return nil, 0, -1, -1, err
}
pageSize, err = strconv.Atoi(pageSizeStr)
if err != nil {
return nil, 0, -1, -1, err
}
if pageNo <= 0 || pageSize <= 0 {
return nil, 0, -1, -1, errors.New("invalid page param")
}
if pageSize > pageSizeLimit {
pageSize = pageSizeLimit
}
}
return filter, sort, int64(pageNo), int64(pageSize), nil
}

View File

@ -1 +1,120 @@
package api
import (
"datart/data/influx"
"datart/log"
"datart/util"
"errors"
"fmt"
"github.com/gin-gonic/gin"
)
func (a *Api) GetPointData(ctx *gin.Context) {
request, err := a.checkAndGenGetPointRequest(ctx)
if err != nil {
log.Error(err)
ctx.JSON(200, gin.H{
"code": 1,
"msg": err.Error(),
})
return
}
var data map[string][]influx.TV
switch {
case request.Begin > 0 && request.End > 0:
data, err = influx.GetSSUPointsDurationData(ctx.Request.Context(), request)
case request.Begin > 0 && request.End < 0:
data, err = influx.GetSSUPointsAfterLimit(ctx.Request.Context(), request, 1)
case request.Begin < 0 && request.End > 0:
data, err = influx.GetSSUPointsBeforeLimit(ctx.Request.Context(), request, 1)
default:
data, err = influx.GetSSUPointsLastLimit(ctx.Request.Context(), request, 1)
}
if err != nil {
log.Error(err, fmt.Sprintf(" params: %v", request))
ctx.JSON(200, gin.H{
"code": 2,
"msg": "query database error",
})
return
}
ctx.JSON(200, gin.H{
"code": 0,
"msg": "success",
"data": data,
})
}
func (a *Api) checkAndGenGetPointRequest(ctx *gin.Context) (*influx.Request, error) {
typeStr := ctx.DefaultQuery("type", "")
if len(typeStr) <= 0 {
return nil, errors.New("invalid type")
}
// tag TODO
station := ctx.DefaultQuery("station", "")
if len(station) <= 0 {
return nil, errors.New("invalid station")
}
mainPos := ctx.DefaultQuery("main_pos", "")
if len(mainPos) <= 0 {
return nil, errors.New("invalid main_pos")
}
subPos := ctx.DefaultQuery("sub_pos", "")
if len(subPos) <= 0 {
return nil, errors.New("invalid sub_pos")
}
beginStr := ctx.DefaultQuery("begin", "")
endStr := ctx.DefaultQuery("end", "")
operate := ctx.DefaultQuery("operate", "")
step := ctx.DefaultQuery("step", "")
defaultV := ctx.DefaultQuery("default", "")
begin := util.ConvertToInt64Default(beginStr, -1)
end := util.ConvertToInt64Default(endStr, -1)
bucket, err := influx.GetDB(typeStr)
if err != nil {
return nil, err
}
measure, err := influx.GetTable(typeStr, mainPos)
if err != nil {
return nil, err
}
return &influx.Request{
DB: bucket,
Table: measure,
Type: typeStr,
Station: station,
MainPos: mainPos,
SubPos: subPos,
Begin: begin,
End: end,
Operate: operate,
Step: step,
Default: defaultV,
}, nil
}

View File

@ -4,7 +4,7 @@ import (
"strconv"
)
func ConvertToTimestampDefault(tsStr string, defaultTS int64) int64 {
func ConvertToInt64Default(tsStr string, defaultTS int64) int64 {
ts, err := strconv.ParseInt(tsStr, 10, 64)
if err != nil {
return defaultTS