perf:resolve log issue in TODO
This commit is contained in:
parent
04546ff646
commit
5f68e61ff8
|
|
@ -59,7 +59,6 @@ func TraverseMonitorDir(ctx context.Context, monitorDir string, dbName string, c
|
||||||
err := filepath.Walk(monitorDir, func(path string, info os.FileInfo, err error) error {
|
err := filepath.Walk(monitorDir, func(path string, info os.FileInfo, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("accessing a path failed", zap.Error(err))
|
logger.Error("accessing a path failed", zap.Error(err))
|
||||||
// TODO 嵌套error
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -95,13 +95,11 @@ func processComtradeFile(ctx context.Context, addFilePath string, monitorDir str
|
||||||
case constant.ConfigFileSuffix:
|
case constant.ConfigFileSuffix:
|
||||||
err := processConfigFile(ctx, dbName, addFilePath, delChan, comtradeMap, pool)
|
err := processConfigFile(ctx, dbName, addFilePath, delChan, comtradeMap, pool)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO 对error进行嵌套返回
|
|
||||||
return fmt.Errorf("process comtrade failed:%w", err)
|
return fmt.Errorf("process comtrade failed:%w", err)
|
||||||
}
|
}
|
||||||
case constant.DataFileSuffix:
|
case constant.DataFileSuffix:
|
||||||
err := processDataFile(monitorDir, fileName, addFilePath, comtradeMap, addChan, logger)
|
err := processDataFile(monitorDir, fileName, addFilePath, comtradeMap, addChan, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO 对error进行嵌套返回
|
|
||||||
return fmt.Errorf("process comtrade failed:%w", err)
|
return fmt.Errorf("process comtrade failed:%w", err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|
|
||||||
|
|
@ -53,6 +53,7 @@ func ReadAndInitConfig(configDir, configName, configType string) (waveRecordConf
|
||||||
waveRecordConfig.MongoDBURI = strings.Join([]string{"mongodb://", mongoDBHost, ":", mongoDBPort}, "")
|
waveRecordConfig.MongoDBURI = strings.Join([]string{"mongodb://", mongoDBHost, ":", mongoDBPort}, "")
|
||||||
waveRecordConfig.MongoDBDataBase = config.GetString("mongodb_database")
|
waveRecordConfig.MongoDBDataBase = config.GetString("mongodb_database")
|
||||||
// init zap log config from config.yaml
|
// init zap log config from config.yaml
|
||||||
|
waveRecordConfig.LCfg.Mode = config.GetString("log_mode")
|
||||||
waveRecordConfig.LCfg.Level = config.GetString("log_level")
|
waveRecordConfig.LCfg.Level = config.GetString("log_level")
|
||||||
waveRecordConfig.LCfg.FileName = fmt.Sprintf(config.GetString("log_filepath"), time.Now().Format(constant.LogTimeFormate))
|
waveRecordConfig.LCfg.FileName = fmt.Sprintf(config.GetString("log_filepath"), time.Now().Format(constant.LogTimeFormate))
|
||||||
waveRecordConfig.LCfg.MaxSize = config.GetInt("log_maxsize")
|
waveRecordConfig.LCfg.MaxSize = config.GetInt("log_maxsize")
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ mongodb_host: "localhost"
|
||||||
mongodb_port: "27017"
|
mongodb_port: "27017"
|
||||||
mongodb_database: "wave_record"
|
mongodb_database: "wave_record"
|
||||||
|
|
||||||
|
log_mode: "development"
|
||||||
log_level: "debug"
|
log_level: "debug"
|
||||||
log_filepath: "/home/douxu/log/wave_record-%s.log"
|
log_filepath: "/home/douxu/log/wave_record-%s.log"
|
||||||
log_maxsize: 1
|
log_maxsize: 1
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,9 @@
|
||||||
|
// Package constant define constant value
|
||||||
|
package constant
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DevelopmentLogMode define development operator environment for wave record project
|
||||||
|
DevelopmentLogMode = "development"
|
||||||
|
// ProductionLogMode define production operator environment for wave record project
|
||||||
|
ProductionLogMode = "production"
|
||||||
|
)
|
||||||
|
|
@ -3,5 +3,5 @@ package constant
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// LogTimeFormate define time format for log file name
|
// LogTimeFormate define time format for log file name
|
||||||
LogTimeFormate = "2006-01-02 15-04-05"
|
LogTimeFormate = "2006-01-02 15:04:05"
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"wave_record/constant"
|
||||||
|
|
||||||
"github.com/natefinch/lumberjack"
|
"github.com/natefinch/lumberjack"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"go.uber.org/zap/zapcore"
|
"go.uber.org/zap/zapcore"
|
||||||
|
|
@ -17,6 +19,7 @@ var (
|
||||||
|
|
||||||
// CutLogConfig define log config of wave record project
|
// CutLogConfig define log config of wave record project
|
||||||
type CutLogConfig struct {
|
type CutLogConfig struct {
|
||||||
|
Mode string `json:"mode"` // Mode 日志模式 development、production
|
||||||
Level string `json:"level"` // Level 最低日志等级,DEBUG<INFO<WARN<ERROR<FATAL INFO-->收集INFO等级以上的日志
|
Level string `json:"level"` // Level 最低日志等级,DEBUG<INFO<WARN<ERROR<FATAL INFO-->收集INFO等级以上的日志
|
||||||
FileName string `json:"file_name"` // FileName 日志文件位置
|
FileName string `json:"file_name"` // FileName 日志文件位置
|
||||||
MaxSize int `json:"max_size"` // MaxSize 进行切割之前,日志文件的最大大小(MB为单位)默认为100MB
|
MaxSize int `json:"max_size"` // MaxSize 进行切割之前,日志文件的最大大小(MB为单位)默认为100MB
|
||||||
|
|
@ -26,17 +29,17 @@ type CutLogConfig struct {
|
||||||
|
|
||||||
// getEncoder responsible for setting the log format for encoding
|
// getEncoder responsible for setting the log format for encoding
|
||||||
func getEncoder() zapcore.Encoder {
|
func getEncoder() zapcore.Encoder {
|
||||||
encodeConfig := zap.NewProductionEncoderConfig()
|
encoderConfig := zap.NewProductionEncoderConfig()
|
||||||
// serialization time eg:2006-01-02 15:04:05
|
// serialization time eg:2006-01-02 15:04:05
|
||||||
encodeConfig.EncodeTime = zapcore.TimeEncoderOfLayout("2006-01-02 15:04:05")
|
encoderConfig.EncodeTime = zapcore.TimeEncoderOfLayout("2006-01-02 15:04:05")
|
||||||
encodeConfig.TimeKey = "time"
|
encoderConfig.TimeKey = "time"
|
||||||
encodeConfig.EncodeLevel = zapcore.CapitalLevelEncoder
|
encoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
|
||||||
encodeConfig.EncodeCaller = zapcore.ShortCallerEncoder
|
encoderConfig.EncodeCaller = zapcore.ShortCallerEncoder
|
||||||
return zapcore.NewJSONEncoder(encodeConfig)
|
return zapcore.NewJSONEncoder(encoderConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getLogWriter responsible for setting the location of log storage
|
// getLogWriter responsible for setting the location of log storage
|
||||||
func getLogWriter(filename string, maxsize, maxBackup, maxAge int) zapcore.WriteSyncer {
|
func getLogWriter(mode, filename string, maxsize, maxBackup, maxAge int) zapcore.WriteSyncer {
|
||||||
lumberJackLogger := &lumberjack.Logger{
|
lumberJackLogger := &lumberjack.Logger{
|
||||||
Filename: filename, // log file position
|
Filename: filename, // log file position
|
||||||
MaxSize: maxsize, // log file maxsize
|
MaxSize: maxsize, // log file maxsize
|
||||||
|
|
@ -44,15 +47,19 @@ func getLogWriter(filename string, maxsize, maxBackup, maxAge int) zapcore.Write
|
||||||
MaxBackups: maxBackup, // maximum number of old files retained
|
MaxBackups: maxBackup, // maximum number of old files retained
|
||||||
Compress: false, // whether to compress
|
Compress: false, // whether to compress
|
||||||
}
|
}
|
||||||
syncFile := zapcore.AddSync(lumberJackLogger)
|
|
||||||
// TODO:增加调试输出到控制台设置,其他模式无控制台数据
|
|
||||||
syncConsole := zapcore.AddSync(os.Stderr)
|
syncConsole := zapcore.AddSync(os.Stderr)
|
||||||
|
if mode == constant.DevelopmentLogMode {
|
||||||
|
return syncConsole
|
||||||
|
}
|
||||||
|
|
||||||
|
syncFile := zapcore.AddSync(lumberJackLogger)
|
||||||
return zapcore.NewMultiWriteSyncer(syncFile, syncConsole)
|
return zapcore.NewMultiWriteSyncer(syncFile, syncConsole)
|
||||||
}
|
}
|
||||||
|
|
||||||
// initLogger return successfully initialized zap logger
|
// initLogger return successfully initialized zap logger
|
||||||
func initLogger(lCfg CutLogConfig) *zap.Logger {
|
func initLogger(lCfg CutLogConfig) *zap.Logger {
|
||||||
writeSyncer := getLogWriter(lCfg.FileName, lCfg.MaxSize, lCfg.MaxBackups, lCfg.MaxAge)
|
writeSyncer := getLogWriter(lCfg.Mode, lCfg.FileName, lCfg.MaxSize, lCfg.MaxBackups, lCfg.MaxAge)
|
||||||
encoder := getEncoder()
|
encoder := getEncoder()
|
||||||
|
|
||||||
l := new(zapcore.Level)
|
l := new(zapcore.Level)
|
||||||
|
|
|
||||||
|
|
@ -45,24 +45,22 @@ func main() {
|
||||||
// init mongoDBClient
|
// init mongoDBClient
|
||||||
mongoDBClient = database.GetMongoDBInstance(ctx, waveRecordConfig.MongoDBURI)
|
mongoDBClient = database.GetMongoDBInstance(ctx, waveRecordConfig.MongoDBURI)
|
||||||
|
|
||||||
// init logger
|
|
||||||
logger = log.GetLoggerInstance(waveRecordConfig.LCfg)
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := mongoDBClient.Disconnect(ctx); err != nil {
|
if err := mongoDBClient.Disconnect(ctx); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
// init logger
|
||||||
|
logger = log.GetLoggerInstance(waveRecordConfig.LCfg)
|
||||||
defer logger.Sync()
|
defer logger.Sync()
|
||||||
|
|
||||||
defer ants.Release()
|
|
||||||
|
|
||||||
pool, err := ants.NewPoolWithFunc(waveRecordConfig.ParseConcurrentQuantity, comtrade.ParseFunc)
|
pool, err := ants.NewPoolWithFunc(waveRecordConfig.ParseConcurrentQuantity, comtrade.ParseFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("init concurrent parse task pool failed", zap.Error(err))
|
logger.Error("init concurrent parse task pool failed", zap.Error(err))
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
defer ants.Release()
|
||||||
|
|
||||||
go comtrade.TraverseMonitorDir(ctx, waveRecordConfig.MonitorDir, waveRecordConfig.MongoDBDataBase, &comtradeMap, addChan, delChan, pool)
|
go comtrade.TraverseMonitorDir(ctx, waveRecordConfig.MonitorDir, waveRecordConfig.MongoDBDataBase, &comtradeMap, addChan, delChan, pool)
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue