modelRT/handler/real_time_data_pull.go

359 lines
12 KiB
Go
Raw Normal View History

2025-11-08 17:11:07 +08:00
// Package handler provides HTTP handlers for various endpoints.
package handler
import (
"context"
"fmt"
"net/http"
"time"
2025-11-10 17:32:18 +08:00
"modelRT/constants"
"modelRT/diagram"
2025-11-08 17:11:07 +08:00
"modelRT/logger"
2025-11-10 17:32:18 +08:00
"modelRT/model"
2025-11-08 17:11:07 +08:00
"modelRT/network"
2025-11-10 17:32:18 +08:00
"modelRT/util"
2025-11-08 17:11:07 +08:00
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
)
var pullUpgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(_ *http.Request) bool {
return true
},
}
// PullRealTimeDataHandler define real time data pull API
// @Summary 实时数据拉取 websocket api
// @Description 根据用户输入的clientID拉取对应的实时数据
// @Tags RealTime Component Websocket
// @Router /monitors/data/realtime/stream/:clientID [get]
func PullRealTimeDataHandler(c *gin.Context) {
clientID := c.Param("clientID")
if clientID == "" {
err := fmt.Errorf("clientID is missing from the path")
logger.Error(c, "query clientID from path failed", "error", err, "url", c.Request.RequestURI)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
conn, err := pullUpgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
logger.Error(c, "upgrade http protocol to websocket protocal failed", "error", err)
c.JSON(http.StatusOK, network.FailureResponse{
Code: http.StatusBadRequest,
Msg: err.Error(),
})
return
}
defer conn.Close()
ctx, cancel := context.WithCancel(c.Request.Context())
defer cancel()
2025-11-10 17:32:18 +08:00
// TODO[BACKPRESSURE-ISSUE] 先期使用固定大容量对扇入模型进行定义 #1
fanInChan := make(chan network.RealTimePullTarget, 10000)
2025-11-08 17:11:07 +08:00
go processTargetPolling(ctx, globalMonitorState, clientID, fanInChan)
go readClientMessages(ctx, conn, clientID, cancel)
2025-11-08 17:11:07 +08:00
bufferMaxSize := constants.SendMaxBatchSize
sendMaxInterval := constants.SendMaxBatchInterval
buffer := make([]network.RealTimePullTarget, 0, bufferMaxSize)
ticker := time.NewTicker(sendMaxInterval)
defer ticker.Stop()
2025-11-08 17:11:07 +08:00
for {
select {
case targetData, ok := <-fanInChan:
if !ok {
logger.Error(ctx, "fanInChan closed unexpectedly", "client_id", clientID)
return
}
buffer = append(buffer, targetData)
if len(buffer) >= bufferMaxSize {
// buffer is full, send immediately
if err := sendAggregateRealTimeDataStream(conn, buffer); err != nil {
logger.Error(nil, "when buffer is full, send the real time aggregate data failed", "client_id", clientID, "buffer", buffer, "error", err)
return
}
// reset buffer
buffer = make([]network.RealTimePullTarget, 0, bufferMaxSize)
// reset the ticker to prevent it from triggering immediately after the ticker is sent
ticker.Reset(sendMaxInterval)
}
case <-ticker.C:
if len(buffer) > 0 {
// when the ticker is triggered, all data in the send buffer is sent
if err := sendAggregateRealTimeDataStream(conn, buffer); err != nil {
logger.Error(nil, "when the ticker is triggered, send the real time aggregate data failed", "client_id", clientID, "buffer", buffer, "error", err)
return
}
// reset buffer
buffer = make([]network.RealTimePullTarget, 0, bufferMaxSize)
}
case <-ctx.Done():
// send the last remaining data
if err := sendAggregateRealTimeDataStream(conn, buffer); err != nil {
logger.Error(nil, "send the last remaining data failed", "client_id", clientID, "buffer", buffer, "error", err)
}
logger.Info(ctx, "PullRealTimeDataHandler exiting as context is done.", "client_id", clientID)
return
2025-11-08 17:11:07 +08:00
}
}
}
// readClientMessages 负责持续监听客户端发送的消息(例如 Ping/Pong, Close Frame, 或控制命令)
func readClientMessages(ctx context.Context, conn *websocket.Conn, clientID string, cancel context.CancelFunc) {
2025-11-08 17:11:07 +08:00
// conn.SetReadLimit(512)
for {
msgType, msgBytes, err := conn.ReadMessage()
2025-11-08 17:11:07 +08:00
if err != nil {
if websocket.IsCloseError(err, websocket.CloseNormalClosure) {
logger.Info(ctx, "client actively and normally closed the connection", "client_id", clientID)
2025-11-08 17:11:07 +08:00
} else if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
logger.Error(ctx, "an unexpected error occurred while reading the webSocket connection", "client_id", clientID, "error", err)
2025-11-08 17:11:07 +08:00
} else {
// handle other read errors (eg, I/O errors)
logger.Error(ctx, "an error occurred while reading the webSocket connection", "client_id", clientID, "error", err)
2025-11-08 17:11:07 +08:00
}
cancel()
break
2025-11-08 17:11:07 +08:00
}
// process normal message from client
2025-11-08 17:11:07 +08:00
if msgType == websocket.TextMessage || msgType == websocket.BinaryMessage {
logger.Info(ctx, "read normal message from client", "client_id", clientID, "msg", string(msgBytes))
2025-11-08 17:11:07 +08:00
}
}
}
// sendAggregateRealTimeDataStream define func to responsible for continuously pushing aggregate real-time data to the client
func sendAggregateRealTimeDataStream(conn *websocket.Conn, targetsData []network.RealTimePullTarget) error {
if len(targetsData) == 0 {
return nil
}
response := network.SuccessResponse{
Code: 200,
Msg: "success",
Payload: network.RealTimePullPayload{
Targets: targetsData,
},
}
return conn.WriteJSON(response)
2025-11-08 17:11:07 +08:00
}
// processTargetPolling define function to process target in monitor map and data is continuously retrieved from redis based on the target
func processTargetPolling(ctx context.Context, s *SharedMonitorState, clientID string, fanInChan chan network.RealTimePullTarget) {
// ensure the fanInChan will not leak
defer close(fanInChan)
2025-11-08 17:11:07 +08:00
stopChanMap := make(map[string]chan struct{})
s.globalMutex.RLock()
2025-11-08 17:11:07 +08:00
config, confExist := s.monitorMap[clientID]
if !confExist {
logger.Error(ctx, "can not found config into local stored map by clientID", "clientID", clientID)
s.globalMutex.RUnlock()
2025-11-08 17:11:07 +08:00
return
}
s.globalMutex.RUnlock()
2025-11-08 17:11:07 +08:00
config.mutex.RLock()
2025-11-08 17:11:07 +08:00
for interval, componentItems := range config.components {
for _, target := range componentItems.targets {
// add a secondary check to prevent the target from already existing in the stopChanMap
if _, exists := stopChanMap[target]; exists {
logger.Warn(ctx, "target already exists in polling map, skipping start-up", "target", target)
continue
}
2025-11-10 17:32:18 +08:00
measurement, exist := componentItems.targetParam[target]
2025-11-08 17:11:07 +08:00
if !exist {
logger.Error(ctx, "can not found subscription node param into param map", "target", target)
continue
}
2025-11-10 17:32:18 +08:00
queryGStopChan := make(chan struct{})
2025-11-08 17:11:07 +08:00
// store stop channel with target into map
2025-11-10 17:32:18 +08:00
stopChanMap[target] = queryGStopChan
queryKey, err := model.GenerateMeasureIdentifier(measurement.DataSource)
if err != nil {
logger.Error(ctx, "generate measurement indentifier by data_source field failed", "data_source", measurement.DataSource, "error", err)
continue
}
pollingConfig := redisPollingConfig{
targetID: target,
queryKey: queryKey,
interval: interval,
dataSize: int64(measurement.Size),
}
go realTimeDataQueryFromRedis(ctx, pollingConfig, fanInChan, queryGStopChan)
2025-11-08 17:11:07 +08:00
}
}
config.mutex.RUnlock()
2025-11-08 17:11:07 +08:00
for {
select {
2025-11-10 17:32:18 +08:00
case transportTargets, ok := <-config.noticeChan:
2025-11-08 17:11:07 +08:00
if !ok {
logger.Error(ctx, "notice channel was closed unexpectedly", "clientID", clientID)
stopAllPolling(ctx, stopChanMap)
return
}
config.mutex.Lock()
2025-11-10 17:32:18 +08:00
switch transportTargets.OperationType {
case constants.OpAppend:
appendTargets(ctx, config, stopChanMap, fanInChan, transportTargets.Targets)
2025-11-10 17:32:18 +08:00
case constants.OpRemove:
removeTargets(ctx, stopChanMap, transportTargets.Targets)
2025-11-10 17:32:18 +08:00
}
config.mutex.Unlock()
2025-11-08 17:11:07 +08:00
case <-ctx.Done():
logger.Info(ctx, fmt.Sprintf("stop all data retrieval goroutines under this clientID:%s", clientID))
stopAllPolling(ctx, stopChanMap)
return
}
}
}
// appendTargets starts new polling goroutines for targets that were just added
func appendTargets(ctx context.Context, config *RealTimeMonitorConfig, stopChanMap map[string]chan struct{}, fanInChan chan network.RealTimePullTarget, appendTargets []string) {
targetSet := make(map[string]struct{}, len(appendTargets))
for _, target := range appendTargets {
targetSet[target] = struct{}{}
}
for interval, componentItems := range config.components {
for _, target := range componentItems.targets {
if _, needsToAdd := targetSet[target]; !needsToAdd {
continue
}
if _, exists := stopChanMap[target]; exists {
logger.Warn(ctx, "append target already running, skipping", "target", target)
continue
}
measurement, exist := componentItems.targetParam[target]
if !exist {
logger.Error(ctx, "append target can not find measurement params for new target", "target", target)
continue
}
queryKey, err := model.GenerateMeasureIdentifier(measurement.DataSource)
if err != nil {
logger.Error(ctx, "append target generate measurement identifier failed", "target", target, "error", err)
continue
}
pollingConfig := redisPollingConfig{
targetID: target,
queryKey: queryKey,
interval: interval,
dataSize: int64(measurement.Size),
}
queryGStopChan := make(chan struct{})
stopChanMap[target] = queryGStopChan
go realTimeDataQueryFromRedis(ctx, pollingConfig, fanInChan, queryGStopChan)
logger.Info(ctx, "started new polling goroutine for appended target", "target", target, "interval", interval)
delete(targetSet, target)
}
}
for target := range targetSet {
logger.Error(ctx, "append target: failed to find config for target, goroutine not started", "target", target)
}
}
// removeTargets define func to stops running polling goroutines for targets that were removed
func removeTargets(ctx context.Context, stopChanMap map[string]chan struct{}, targetsToRemove []string) {
for _, target := range targetsToRemove {
stopChan, exists := stopChanMap[target]
if !exists {
logger.Warn(ctx, "removeTarget was not running, skipping", "target", target)
continue
}
close(stopChan)
delete(stopChanMap, target)
logger.Info(ctx, "stopped polling goroutine for removed target", "target", target)
}
}
// stopAllPolling stops all running query goroutines for a specific client
2025-11-08 17:11:07 +08:00
func stopAllPolling(ctx context.Context, stopChanMap map[string]chan struct{}) {
for target, stopChan := range stopChanMap {
logger.Info(ctx, fmt.Sprintf("stop the data fetching behavior for the corresponding target:%s", target))
close(stopChan)
}
clear(stopChanMap)
return
}
// redisPollingConfig define struct for param which query real time data from redis
type redisPollingConfig struct {
targetID string
queryKey string
interval string
dataSize int64
}
func realTimeDataQueryFromRedis(ctx context.Context, config redisPollingConfig, fanInChan chan network.RealTimePullTarget, stopChan chan struct{}) {
duration, err := time.ParseDuration(config.interval)
2025-11-08 17:11:07 +08:00
if err != nil {
logger.Error(ctx, "failed to parse the time string", "interval", config.interval, "error", err)
2025-11-08 17:11:07 +08:00
return
}
ticker := time.NewTicker(duration * time.Second)
defer ticker.Stop()
2025-11-10 17:32:18 +08:00
client := diagram.NewRedisClient()
startTimestamp := util.GenNanoTsStr()
2025-11-08 17:11:07 +08:00
for {
select {
case <-ticker.C:
2025-11-10 17:32:18 +08:00
stopTimestamp := util.GenNanoTsStr()
members, err := client.QueryByZRangeByLex(ctx, config.queryKey, config.dataSize, startTimestamp, stopTimestamp)
2025-11-10 17:32:18 +08:00
if err != nil {
logger.Error(ctx, "query real time data from redis failed", "key", config.queryKey, "error", err)
2025-11-10 17:32:18 +08:00
continue
2025-11-08 17:11:07 +08:00
}
2025-11-10 17:32:18 +08:00
// use end timestamp reset start timestamp
startTimestamp = stopTimestamp
pullDatas := make([]network.RealTimePullData, 0, len(members))
for _, member := range members {
pullDatas = append(pullDatas, network.RealTimePullData{
Time: member.Member.(string),
Value: member.Score,
})
}
targetData := network.RealTimePullTarget{
ID: config.targetID,
Datas: pullDatas,
}
select {
case fanInChan <- targetData:
default:
// TODO[BACKPRESSURE-ISSUE] 考虑 fanInChan 阻塞,当出现大量数据阻塞查询循环并丢弃时,采取背压方式解决问题 #1
logger.Warn(ctx, "fanInChan is full, dropping real-time data frame", "key", config.queryKey, "data_size", len(members))
}
2025-11-08 17:11:07 +08:00
case <-stopChan:
2025-11-10 17:32:18 +08:00
logger.Info(ctx, "stop the redis query goroutine via a singal")
2025-11-08 17:11:07 +08:00
return
}
}
}