perf:resolve log issue in TODO

This commit is contained in:
douxu 2024-07-26 16:48:31 +08:00
parent 04546ff646
commit 5f68e61ff8
8 changed files with 32 additions and 19 deletions

View File

@ -59,7 +59,6 @@ func TraverseMonitorDir(ctx context.Context, monitorDir string, dbName string, c
err := filepath.Walk(monitorDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
logger.Error("accessing a path failed", zap.Error(err))
// TODO 嵌套error
return err
}

View File

@ -95,13 +95,11 @@ func processComtradeFile(ctx context.Context, addFilePath string, monitorDir str
case constant.ConfigFileSuffix:
err := processConfigFile(ctx, dbName, addFilePath, delChan, comtradeMap, pool)
if err != nil {
// TODO 对error进行嵌套返回
return fmt.Errorf("process comtrade failed:%w", err)
}
case constant.DataFileSuffix:
err := processDataFile(monitorDir, fileName, addFilePath, comtradeMap, addChan, logger)
if err != nil {
// TODO 对error进行嵌套返回
return fmt.Errorf("process comtrade failed:%w", err)
}
default:

View File

@ -53,6 +53,7 @@ func ReadAndInitConfig(configDir, configName, configType string) (waveRecordConf
waveRecordConfig.MongoDBURI = strings.Join([]string{"mongodb://", mongoDBHost, ":", mongoDBPort}, "")
waveRecordConfig.MongoDBDataBase = config.GetString("mongodb_database")
// init zap log config from config.yaml
waveRecordConfig.LCfg.Mode = config.GetString("log_mode")
waveRecordConfig.LCfg.Level = config.GetString("log_level")
waveRecordConfig.LCfg.FileName = fmt.Sprintf(config.GetString("log_filepath"), time.Now().Format(constant.LogTimeFormate))
waveRecordConfig.LCfg.MaxSize = config.GetInt("log_maxsize")

View File

@ -5,6 +5,7 @@ mongodb_host: "localhost"
mongodb_port: "27017"
mongodb_database: "wave_record"
log_mode: "development"
log_level: "debug"
log_filepath: "/home/douxu/log/wave_record-%s.log"
log_maxsize: 1

View File

@ -0,0 +1,9 @@
// Package constant define constant value
package constant
const (
// DevelopmentLogMode define development operator environment for wave record project
DevelopmentLogMode = "development"
// ProductionLogMode define production operator environment for wave record project
ProductionLogMode = "production"
)

View File

@ -3,5 +3,5 @@ package constant
const (
// LogTimeFormate define time format for log file name
LogTimeFormate = "2006-01-02 15-04-05"
LogTimeFormate = "2006-01-02 15:04:05"
)

View File

@ -5,6 +5,8 @@ import (
"os"
"sync"
"wave_record/constant"
"github.com/natefinch/lumberjack"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
@ -17,6 +19,7 @@ var (
// CutLogConfig define log config of wave record project
type CutLogConfig struct {
Mode string `json:"mode"` // Mode 日志模式 development、production
Level string `json:"level"` // Level 最低日志等级DEBUG<INFO<WARN<ERROR<FATAL INFO-->收集INFO等级以上的日志
FileName string `json:"file_name"` // FileName 日志文件位置
MaxSize int `json:"max_size"` // MaxSize 进行切割之前,日志文件的最大大小(MB为单位)默认为100MB
@ -26,17 +29,17 @@ type CutLogConfig struct {
// getEncoder responsible for setting the log format for encoding
func getEncoder() zapcore.Encoder {
encodeConfig := zap.NewProductionEncoderConfig()
encoderConfig := zap.NewProductionEncoderConfig()
// serialization time eg:2006-01-02 15:04:05
encodeConfig.EncodeTime = zapcore.TimeEncoderOfLayout("2006-01-02 15:04:05")
encodeConfig.TimeKey = "time"
encodeConfig.EncodeLevel = zapcore.CapitalLevelEncoder
encodeConfig.EncodeCaller = zapcore.ShortCallerEncoder
return zapcore.NewJSONEncoder(encodeConfig)
encoderConfig.EncodeTime = zapcore.TimeEncoderOfLayout("2006-01-02 15:04:05")
encoderConfig.TimeKey = "time"
encoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
encoderConfig.EncodeCaller = zapcore.ShortCallerEncoder
return zapcore.NewJSONEncoder(encoderConfig)
}
// getLogWriter responsible for setting the location of log storage
func getLogWriter(filename string, maxsize, maxBackup, maxAge int) zapcore.WriteSyncer {
func getLogWriter(mode, filename string, maxsize, maxBackup, maxAge int) zapcore.WriteSyncer {
lumberJackLogger := &lumberjack.Logger{
Filename: filename, // log file position
MaxSize: maxsize, // log file maxsize
@ -44,15 +47,19 @@ func getLogWriter(filename string, maxsize, maxBackup, maxAge int) zapcore.Write
MaxBackups: maxBackup, // maximum number of old files retained
Compress: false, // whether to compress
}
syncFile := zapcore.AddSync(lumberJackLogger)
// TODO:增加调试输出到控制台设置,其他模式无控制台数据
syncConsole := zapcore.AddSync(os.Stderr)
if mode == constant.DevelopmentLogMode {
return syncConsole
}
syncFile := zapcore.AddSync(lumberJackLogger)
return zapcore.NewMultiWriteSyncer(syncFile, syncConsole)
}
// initLogger return successfully initialized zap logger
func initLogger(lCfg CutLogConfig) *zap.Logger {
writeSyncer := getLogWriter(lCfg.FileName, lCfg.MaxSize, lCfg.MaxBackups, lCfg.MaxAge)
writeSyncer := getLogWriter(lCfg.Mode, lCfg.FileName, lCfg.MaxSize, lCfg.MaxBackups, lCfg.MaxAge)
encoder := getEncoder()
l := new(zapcore.Level)

View File

@ -45,24 +45,22 @@ func main() {
// init mongoDBClient
mongoDBClient = database.GetMongoDBInstance(ctx, waveRecordConfig.MongoDBURI)
// init logger
logger = log.GetLoggerInstance(waveRecordConfig.LCfg)
defer func() {
if err := mongoDBClient.Disconnect(ctx); err != nil {
panic(err)
}
}()
// init logger
logger = log.GetLoggerInstance(waveRecordConfig.LCfg)
defer logger.Sync()
defer ants.Release()
pool, err := ants.NewPoolWithFunc(waveRecordConfig.ParseConcurrentQuantity, comtrade.ParseFunc)
if err != nil {
logger.Error("init concurrent parse task pool failed", zap.Error(err))
panic(err)
}
defer ants.Release()
go comtrade.TraverseMonitorDir(ctx, waveRecordConfig.MonitorDir, waveRecordConfig.MongoDBDataBase, &comtradeMap, addChan, delChan, pool)