430 lines
13 KiB
Go
430 lines
13 KiB
Go
// Package task provides asynchronous task processing with handler factory pattern
|
|
package task
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"sync"
|
|
"time"
|
|
|
|
"modelRT/database"
|
|
"modelRT/logger"
|
|
"modelRT/orm"
|
|
|
|
"github.com/gofrs/uuid"
|
|
"gorm.io/gorm"
|
|
)
|
|
|
|
// TaskHandler defines the interface for task processors
|
|
type TaskHandler interface {
|
|
// Execute processes a task with the given ID, type, and params from the MQ message
|
|
Execute(ctx context.Context, taskID uuid.UUID, taskType TaskType, params map[string]any, db *gorm.DB) error
|
|
// CanHandle returns true if this handler can process the given task type
|
|
CanHandle(taskType TaskType) bool
|
|
// Name returns the name of the handler for logging and metrics
|
|
Name() string
|
|
}
|
|
|
|
// HandlerFactory creates task handlers based on task type
|
|
type HandlerFactory struct {
|
|
handlers map[TaskType]TaskHandler
|
|
mu sync.RWMutex
|
|
}
|
|
|
|
// NewHandlerFactory creates a new HandlerFactory
|
|
func NewHandlerFactory() *HandlerFactory {
|
|
return &HandlerFactory{
|
|
handlers: make(map[TaskType]TaskHandler),
|
|
}
|
|
}
|
|
|
|
// RegisterHandler registers a handler for a specific task type
|
|
func (f *HandlerFactory) RegisterHandler(ctx context.Context, taskType TaskType, handler TaskHandler) {
|
|
f.mu.Lock()
|
|
defer f.mu.Unlock()
|
|
|
|
f.handlers[taskType] = handler
|
|
logger.Info(ctx, "Handler registered",
|
|
"task_type", taskType,
|
|
"handler_name", handler.Name(),
|
|
)
|
|
}
|
|
|
|
// GetHandler returns a handler for the given task type
|
|
func (f *HandlerFactory) GetHandler(taskType TaskType) (TaskHandler, error) {
|
|
f.mu.RLock()
|
|
handler, exists := f.handlers[taskType]
|
|
f.mu.RUnlock()
|
|
|
|
if !exists {
|
|
return nil, fmt.Errorf("no handler registered for task type: %s", taskType)
|
|
}
|
|
|
|
return handler, nil
|
|
}
|
|
|
|
// CreateDefaultHandlers registers all default task handlers
|
|
func (f *HandlerFactory) CreateDefaultHandlers(ctx context.Context) {
|
|
f.RegisterHandler(ctx, TypeTopologyAnalysis, &TopologyAnalysisHandler{})
|
|
f.RegisterHandler(ctx, TypeEventAnalysis, &EventAnalysisHandler{})
|
|
f.RegisterHandler(ctx, TypeBatchImport, &BatchImportHandler{})
|
|
f.RegisterHandler(ctx, TaskType(TaskTypeTest), NewTestTaskHandler())
|
|
}
|
|
|
|
// BaseHandler provides common functionality for all task handlers
|
|
type BaseHandler struct {
|
|
name string
|
|
}
|
|
|
|
// NewBaseHandler creates a new BaseHandler
|
|
func NewBaseHandler(name string) *BaseHandler {
|
|
return &BaseHandler{name: name}
|
|
}
|
|
|
|
// Name returns the handler name
|
|
func (h *BaseHandler) Name() string {
|
|
return h.name
|
|
}
|
|
|
|
// TopologyAnalysisHandler handles topology analysis tasks
|
|
type TopologyAnalysisHandler struct {
|
|
BaseHandler
|
|
}
|
|
|
|
// NewTopologyAnalysisHandler creates a new TopologyAnalysisHandler
|
|
func NewTopologyAnalysisHandler() *TopologyAnalysisHandler {
|
|
return &TopologyAnalysisHandler{
|
|
BaseHandler: *NewBaseHandler("topology_analysis_handler"),
|
|
}
|
|
}
|
|
|
|
// Execute processes a topology analysis task.
|
|
// Params (all sourced from the MQ message, no DB lookup needed):
|
|
// - start_component_uuid (string, required): BFS origin
|
|
// - end_component_uuid (string, required): reachability target
|
|
// - check_in_service (bool, optional, default true): skip out-of-service components
|
|
func (h *TopologyAnalysisHandler) Execute(ctx context.Context, taskID uuid.UUID, taskType TaskType, params map[string]any, db *gorm.DB) error {
|
|
logger.Info(ctx, "topology analysis started", "task_id", taskID)
|
|
|
|
// Phase 1: parse params from MQ message
|
|
startComponentUUID, endComponentUUID, checkInService, err := parseTopologyAnalysisParams(params)
|
|
if err != nil {
|
|
return fmt.Errorf("invalid topology analysis params: %w", err)
|
|
}
|
|
|
|
logger.Info(ctx, "topology params parsed",
|
|
"task_id", taskID,
|
|
"start", startComponentUUID,
|
|
"end", endComponentUUID,
|
|
"check_in_service", checkInService,
|
|
)
|
|
|
|
if err := database.UpdateAsyncTaskProgress(ctx, db, taskID, 20); err != nil {
|
|
logger.Warn(ctx, "update progress failed", "task_id", taskID, "progress", 20, "error", err)
|
|
}
|
|
|
|
// Phase 2: query topology edges from startComponentUUID, build adjacency list
|
|
topoEdges, err := database.QueryTopologicByStartUUID(ctx, db, startComponentUUID)
|
|
if err != nil {
|
|
return fmt.Errorf("query topology from start node: %w", err)
|
|
}
|
|
|
|
// adjacency list: uuid_from → []uuid_to
|
|
adjMap := make(map[uuid.UUID][]uuid.UUID, len(topoEdges))
|
|
// collect all UUIDs for batch InService query
|
|
allUUIDs := make(map[uuid.UUID]struct{}, len(topoEdges)*2)
|
|
allUUIDs[startComponentUUID] = struct{}{}
|
|
for _, edge := range topoEdges {
|
|
adjMap[edge.UUIDFrom] = append(adjMap[edge.UUIDFrom], edge.UUIDTo)
|
|
allUUIDs[edge.UUIDFrom] = struct{}{}
|
|
allUUIDs[edge.UUIDTo] = struct{}{}
|
|
}
|
|
|
|
if err := database.UpdateAsyncTaskProgress(ctx, db, taskID, 40); err != nil {
|
|
logger.Warn(ctx, "update progress failed", "task_id", taskID, "progress", 40, "error", err)
|
|
}
|
|
|
|
// Phase 3: batch-load InService status (only when checkInService is true)
|
|
inServiceMap := make(map[uuid.UUID]bool)
|
|
if checkInService {
|
|
uuidSlice := make([]uuid.UUID, 0, len(allUUIDs))
|
|
for id := range allUUIDs {
|
|
uuidSlice = append(uuidSlice, id)
|
|
}
|
|
inServiceMap, err = database.QueryComponentsInServiceByUUIDs(ctx, db, uuidSlice)
|
|
if err != nil {
|
|
return fmt.Errorf("query component in_service status: %w", err)
|
|
}
|
|
|
|
// check the start node itself before BFS
|
|
if !inServiceMap[startComponentUUID] {
|
|
return persistTopologyResult(ctx, db, taskID, startComponentUUID, endComponentUUID,
|
|
checkInService, false, nil, &startComponentUUID)
|
|
}
|
|
}
|
|
|
|
if err := database.UpdateAsyncTaskProgress(ctx, db, taskID, 60); err != nil {
|
|
logger.Warn(ctx, "update progress failed", "task_id", taskID, "progress", 60, "error", err)
|
|
}
|
|
|
|
// Phase 4: BFS reachability check
|
|
visited := make(map[uuid.UUID]struct{})
|
|
parent := make(map[uuid.UUID]uuid.UUID) // for path reconstruction
|
|
queue := []uuid.UUID{startComponentUUID}
|
|
visited[startComponentUUID] = struct{}{}
|
|
isReachable := false
|
|
var blockedBy *uuid.UUID
|
|
|
|
for len(queue) > 0 {
|
|
cur := queue[0]
|
|
queue = queue[1:]
|
|
|
|
if cur == endComponentUUID {
|
|
isReachable = true
|
|
break
|
|
}
|
|
|
|
for _, next := range adjMap[cur] {
|
|
if _, seen := visited[next]; seen {
|
|
continue
|
|
}
|
|
if checkInService && !inServiceMap[next] {
|
|
// record first out-of-service blocker but keep searching other branches
|
|
if blockedBy == nil {
|
|
id := next
|
|
blockedBy = &id
|
|
}
|
|
continue
|
|
}
|
|
visited[next] = struct{}{}
|
|
parent[next] = cur
|
|
queue = append(queue, next)
|
|
}
|
|
}
|
|
|
|
if err := database.UpdateAsyncTaskProgress(ctx, db, taskID, 80); err != nil {
|
|
logger.Warn(ctx, "update progress failed", "task_id", taskID, "progress", 80, "error", err)
|
|
}
|
|
|
|
// Phase 5: reconstruct path (if reachable) and persist result
|
|
var path []uuid.UUID
|
|
if isReachable {
|
|
blockedBy = nil // reachable path found — clear any partial blocker
|
|
path = reconstructPath(parent, startComponentUUID, endComponentUUID)
|
|
}
|
|
|
|
return persistTopologyResult(ctx, db, taskID, startComponentUUID, endComponentUUID,
|
|
checkInService, isReachable, path, blockedBy)
|
|
}
|
|
|
|
// parseTopologyAnalysisParams extracts and validates the three required fields.
|
|
// check_in_service defaults to true when absent.
|
|
func parseTopologyAnalysisParams(params map[string]any) (startID, endID uuid.UUID, checkInService bool, err error) {
|
|
startStr, ok := params["start_component_uuid"].(string)
|
|
if !ok || startStr == "" {
|
|
err = fmt.Errorf("missing or invalid start_component_uuid")
|
|
return
|
|
}
|
|
endStr, ok := params["end_component_uuid"].(string)
|
|
if !ok || endStr == "" {
|
|
err = fmt.Errorf("missing or invalid end_component_uuid")
|
|
return
|
|
}
|
|
startID, err = uuid.FromString(startStr)
|
|
if err != nil {
|
|
err = fmt.Errorf("parse start_component_uuid %q: %w", startStr, err)
|
|
return
|
|
}
|
|
endID, err = uuid.FromString(endStr)
|
|
if err != nil {
|
|
err = fmt.Errorf("parse end_component_uuid %q: %w", endStr, err)
|
|
return
|
|
}
|
|
|
|
// check_in_service defaults to true
|
|
checkInService = true
|
|
if v, exists := params["check_in_service"]; exists {
|
|
if b, isBool := v.(bool); isBool {
|
|
checkInService = b
|
|
}
|
|
}
|
|
return
|
|
}
|
|
|
|
// reconstructPath walks the parent map backwards from end to start.
|
|
func reconstructPath(parent map[uuid.UUID]uuid.UUID, start, end uuid.UUID) []uuid.UUID {
|
|
var path []uuid.UUID
|
|
for cur := end; cur != start; cur = parent[cur] {
|
|
path = append(path, cur)
|
|
}
|
|
path = append(path, start)
|
|
// reverse: path was built end→start
|
|
for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
|
|
path[i], path[j] = path[j], path[i]
|
|
}
|
|
return path
|
|
}
|
|
|
|
// persistTopologyResult serialises the analysis outcome and writes it to async_task_result.
|
|
func persistTopologyResult(
|
|
ctx context.Context, db *gorm.DB, taskID uuid.UUID,
|
|
startID, endID uuid.UUID, checkInService, isReachable bool,
|
|
path []uuid.UUID, blockedBy *uuid.UUID,
|
|
) error {
|
|
pathStrs := make([]string, 0, len(path))
|
|
for _, id := range path {
|
|
pathStrs = append(pathStrs, id.String())
|
|
}
|
|
|
|
result := orm.JSONMap{
|
|
"start_component_uuid": startID.String(),
|
|
"end_component_uuid": endID.String(),
|
|
"check_in_service": checkInService,
|
|
"is_reachable": isReachable,
|
|
"path": pathStrs,
|
|
"computed_at": time.Now().Unix(),
|
|
}
|
|
if blockedBy != nil {
|
|
result["blocked_by"] = blockedBy.String()
|
|
}
|
|
|
|
if err := database.CreateAsyncTaskResult(ctx, db, taskID, result); err != nil {
|
|
return fmt.Errorf("save task result: %w", err)
|
|
}
|
|
|
|
logger.Info(ctx, "topology analysis completed",
|
|
"task_id", taskID,
|
|
"is_reachable", isReachable,
|
|
"path_length", len(path),
|
|
)
|
|
return nil
|
|
}
|
|
|
|
// CanHandle returns true for topology analysis tasks
|
|
func (h *TopologyAnalysisHandler) CanHandle(taskType TaskType) bool {
|
|
return taskType == TypeTopologyAnalysis
|
|
}
|
|
|
|
// EventAnalysisHandler handles event analysis tasks
|
|
type EventAnalysisHandler struct {
|
|
BaseHandler
|
|
}
|
|
|
|
// NewEventAnalysisHandler creates a new EventAnalysisHandler
|
|
func NewEventAnalysisHandler() *EventAnalysisHandler {
|
|
return &EventAnalysisHandler{
|
|
BaseHandler: *NewBaseHandler("event_analysis_handler"),
|
|
}
|
|
}
|
|
|
|
// Execute processes an event analysis task
|
|
func (h *EventAnalysisHandler) Execute(ctx context.Context, taskID uuid.UUID, taskType TaskType, params map[string]any, db *gorm.DB) error {
|
|
logger.Info(ctx, "Starting event analysis",
|
|
"task_id", taskID,
|
|
"task_type", taskType,
|
|
)
|
|
|
|
// TODO: Implement actual event analysis logic
|
|
// This would typically involve:
|
|
// 1. Fetching motor and trigger information
|
|
// 2. Analyzing events within the specified duration
|
|
// 3. Generating analysis report
|
|
// 4. Storing results in database
|
|
|
|
// Simulate work
|
|
logger.Info(ctx, "Event analysis completed",
|
|
"task_id", taskID,
|
|
"task_type", taskType,
|
|
)
|
|
|
|
return nil
|
|
}
|
|
|
|
// CanHandle returns true for event analysis tasks
|
|
func (h *EventAnalysisHandler) CanHandle(taskType TaskType) bool {
|
|
return taskType == TypeEventAnalysis
|
|
}
|
|
|
|
// BatchImportHandler handles batch import tasks
|
|
type BatchImportHandler struct {
|
|
BaseHandler
|
|
}
|
|
|
|
// NewBatchImportHandler creates a new BatchImportHandler
|
|
func NewBatchImportHandler() *BatchImportHandler {
|
|
return &BatchImportHandler{
|
|
BaseHandler: *NewBaseHandler("batch_import_handler"),
|
|
}
|
|
}
|
|
|
|
// Execute processes a batch import task
|
|
func (h *BatchImportHandler) Execute(ctx context.Context, taskID uuid.UUID, taskType TaskType, params map[string]any, db *gorm.DB) error {
|
|
logger.Info(ctx, "Starting batch import",
|
|
"task_id", taskID,
|
|
"task_type", taskType,
|
|
)
|
|
|
|
// TODO: Implement actual batch import logic
|
|
// This would typically involve:
|
|
// 1. Reading file from specified path
|
|
// 2. Parsing file content (CSV, Excel, etc.)
|
|
// 3. Validating and importing data into database
|
|
// 4. Generating import report
|
|
|
|
// Simulate work
|
|
logger.Info(ctx, "Batch import completed",
|
|
"task_id", taskID,
|
|
"task_type", taskType,
|
|
)
|
|
|
|
return nil
|
|
}
|
|
|
|
// CanHandle returns true for batch import tasks
|
|
func (h *BatchImportHandler) CanHandle(taskType TaskType) bool {
|
|
return taskType == TypeBatchImport
|
|
}
|
|
|
|
// CompositeHandler can handle multiple task types by delegating to appropriate handlers
|
|
type CompositeHandler struct {
|
|
factory *HandlerFactory
|
|
}
|
|
|
|
// NewCompositeHandler creates a new CompositeHandler
|
|
func NewCompositeHandler(factory *HandlerFactory) *CompositeHandler {
|
|
return &CompositeHandler{factory: factory}
|
|
}
|
|
|
|
// Execute delegates task execution to the appropriate handler
|
|
func (h *CompositeHandler) Execute(ctx context.Context, taskID uuid.UUID, taskType TaskType, params map[string]any, db *gorm.DB) error {
|
|
handler, err := h.factory.GetHandler(taskType)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to get handler for task type %s: %w", taskType, err)
|
|
}
|
|
|
|
return handler.Execute(ctx, taskID, taskType, params, db)
|
|
}
|
|
|
|
// CanHandle returns true if any registered handler can handle the task type
|
|
func (h *CompositeHandler) CanHandle(taskType TaskType) bool {
|
|
_, err := h.factory.GetHandler(taskType)
|
|
return err == nil
|
|
}
|
|
|
|
// Name returns the composite handler name
|
|
func (h *CompositeHandler) Name() string {
|
|
return "composite_handler"
|
|
}
|
|
|
|
// DefaultHandlerFactory returns a HandlerFactory with all default handlers registered
|
|
func DefaultHandlerFactory(ctx context.Context) *HandlerFactory {
|
|
factory := NewHandlerFactory()
|
|
factory.CreateDefaultHandlers(ctx)
|
|
return factory
|
|
}
|
|
|
|
// DefaultCompositeHandler returns a CompositeHandler with all default handlers
|
|
func DefaultCompositeHandler(ctx context.Context) TaskHandler {
|
|
factory := DefaultHandlerFactory(ctx)
|
|
return NewCompositeHandler(factory)
|
|
} |