chore: Fix linter findings for `revive:exported` in `plugins/inputs/f*` (#16048)
This commit is contained in:
parent
75194db18b
commit
5c4ef13c66
|
|
@ -17,31 +17,30 @@ import (
|
||||||
var sampleConfig string
|
var sampleConfig string
|
||||||
|
|
||||||
var (
|
var (
|
||||||
execCommand = exec.Command // execCommand is used to mock commands in tests.
|
execCommand = exec.Command // execCommand is used to mock commands in tests.
|
||||||
|
metricsTargets = []struct {
|
||||||
|
target string
|
||||||
|
field string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
target: "Currently failed:",
|
||||||
|
field: "failed",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
target: "Currently banned:",
|
||||||
|
field: "banned",
|
||||||
|
},
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const cmd = "fail2ban-client"
|
||||||
|
|
||||||
type Fail2ban struct {
|
type Fail2ban struct {
|
||||||
UseSudo bool `toml:"use_sudo"`
|
UseSudo bool `toml:"use_sudo"`
|
||||||
Socket string `toml:"socket"`
|
Socket string `toml:"socket"`
|
||||||
path string
|
path string
|
||||||
}
|
}
|
||||||
|
|
||||||
var metricsTargets = []struct {
|
|
||||||
target string
|
|
||||||
field string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
target: "Currently failed:",
|
|
||||||
field: "failed",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
target: "Currently banned:",
|
|
||||||
field: "banned",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
const cmd = "fail2ban-client"
|
|
||||||
|
|
||||||
func (*Fail2ban) SampleConfig() string {
|
func (*Fail2ban) SampleConfig() string {
|
||||||
return sampleConfig
|
return sampleConfig
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,6 @@ var sampleConfig string
|
||||||
|
|
||||||
const defaultTimeout = 5 * time.Second
|
const defaultTimeout = 5 * time.Second
|
||||||
|
|
||||||
// Fibaro contains connection information
|
|
||||||
type Fibaro struct {
|
type Fibaro struct {
|
||||||
URL string `toml:"url"`
|
URL string `toml:"url"`
|
||||||
Username string `toml:"username"`
|
Username string `toml:"username"`
|
||||||
|
|
@ -32,6 +31,55 @@ type Fibaro struct {
|
||||||
client *http.Client
|
client *http.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (*Fibaro) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fibaro) Init() error {
|
||||||
|
switch f.DeviceType {
|
||||||
|
case "":
|
||||||
|
f.DeviceType = "HC2"
|
||||||
|
case "HC2", "HC3":
|
||||||
|
default:
|
||||||
|
return errors.New("invalid option for device type")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fibaro) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if f.client == nil {
|
||||||
|
f.client = &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
},
|
||||||
|
Timeout: time.Duration(f.Timeout),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sections, err := f.getJSON("/api/sections")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rooms, err := f.getJSON("/api/rooms")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
devices, err := f.getJSON("/api/devices")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch f.DeviceType {
|
||||||
|
case "HC2":
|
||||||
|
return hc2.Parse(acc, sections, rooms, devices)
|
||||||
|
case "HC3":
|
||||||
|
return hc3.Parse(acc, sections, rooms, devices)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// getJSON connects, authenticates and reads JSON payload returned by Fibaro box
|
// getJSON connects, authenticates and reads JSON payload returned by Fibaro box
|
||||||
func (f *Fibaro) getJSON(path string) ([]byte, error) {
|
func (f *Fibaro) getJSON(path string) ([]byte, error) {
|
||||||
var requestURL = f.URL + path
|
var requestURL = f.URL + path
|
||||||
|
|
@ -66,56 +114,6 @@ func (f *Fibaro) getJSON(path string) ([]byte, error) {
|
||||||
return bodyBytes, nil
|
return bodyBytes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fibaro) Init() error {
|
|
||||||
switch f.DeviceType {
|
|
||||||
case "":
|
|
||||||
f.DeviceType = "HC2"
|
|
||||||
case "HC2", "HC3":
|
|
||||||
default:
|
|
||||||
return errors.New("invalid option for device type")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Fibaro) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gather fetches all required information to output metrics
|
|
||||||
func (f *Fibaro) Gather(acc telegraf.Accumulator) error {
|
|
||||||
if f.client == nil {
|
|
||||||
f.client = &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Proxy: http.ProxyFromEnvironment,
|
|
||||||
},
|
|
||||||
Timeout: time.Duration(f.Timeout),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sections, err := f.getJSON("/api/sections")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rooms, err := f.getJSON("/api/rooms")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
devices, err := f.getJSON("/api/devices")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch f.DeviceType {
|
|
||||||
case "HC2":
|
|
||||||
return hc2.Parse(acc, sections, rooms, devices)
|
|
||||||
case "HC3":
|
|
||||||
return hc3.Parse(acc, sections, rooms, devices)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("fibaro", func() telegraf.Input {
|
inputs.Add("fibaro", func() telegraf.Input {
|
||||||
return &Fibaro{
|
return &Fibaro{
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,8 @@ import (
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Parse(acc telegraf.Accumulator, sectionBytes, roomBytes, deviecsBytes []byte) error {
|
// Parse parses data from sections, rooms and devices, and adds measurements containing parsed data.
|
||||||
|
func Parse(acc telegraf.Accumulator, sectionBytes, roomBytes, devicesBytes []byte) error {
|
||||||
var tmpSections []Sections
|
var tmpSections []Sections
|
||||||
if err := json.Unmarshal(sectionBytes, &tmpSections); err != nil {
|
if err := json.Unmarshal(sectionBytes, &tmpSections); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
@ -28,7 +29,7 @@ func Parse(acc telegraf.Accumulator, sectionBytes, roomBytes, deviecsBytes []byt
|
||||||
}
|
}
|
||||||
|
|
||||||
var devices []Devices
|
var devices []Devices
|
||||||
if err := json.Unmarshal(deviecsBytes, &devices); err != nil {
|
if err := json.Unmarshal(devicesBytes, &devices); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,8 @@ import (
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdata/telegraf/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Parse(acc telegraf.Accumulator, sectionBytes, roomBytes, deviecsBytes []byte) error {
|
// Parse parses data from sections, rooms and devices, and adds measurements containing parsed data.
|
||||||
|
func Parse(acc telegraf.Accumulator, sectionBytes, roomBytes, devicesBytes []byte) error {
|
||||||
var tmpSections []Sections
|
var tmpSections []Sections
|
||||||
if err := json.Unmarshal(sectionBytes, &tmpSections); err != nil {
|
if err := json.Unmarshal(sectionBytes, &tmpSections); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
@ -29,7 +30,7 @@ func Parse(acc telegraf.Accumulator, sectionBytes, roomBytes, deviecsBytes []byt
|
||||||
}
|
}
|
||||||
|
|
||||||
var devices []Devices
|
var devices []Devices
|
||||||
if err := json.Unmarshal(deviecsBytes, &devices); err != nil {
|
if err := json.Unmarshal(devicesBytes, &devices); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -45,6 +45,10 @@ func (f *File) Init() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *File) SetParserFunc(fn telegraf.ParserFunc) {
|
||||||
|
f.parserFunc = fn
|
||||||
|
}
|
||||||
|
|
||||||
func (f *File) Gather(acc telegraf.Accumulator) error {
|
func (f *File) Gather(acc telegraf.Accumulator) error {
|
||||||
err := f.refreshFilePaths()
|
err := f.refreshFilePaths()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -71,10 +75,6 @@ func (f *File) Gather(acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *File) SetParserFunc(fn telegraf.ParserFunc) {
|
|
||||||
f.parserFunc = fn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *File) refreshFilePaths() error {
|
func (f *File) refreshFilePaths() error {
|
||||||
var allFiles []string
|
var allFiles []string
|
||||||
for _, file := range f.Files {
|
for _, file := range f.Files {
|
||||||
|
|
|
||||||
|
|
@ -21,22 +21,41 @@ import (
|
||||||
var sampleConfig string
|
var sampleConfig string
|
||||||
|
|
||||||
type FileCount struct {
|
type FileCount struct {
|
||||||
Directory string `toml:"directory" deprecated:"1.9.0;1.35.0;use 'directories' instead"`
|
Directory string `toml:"directory" deprecated:"1.9.0;1.35.0;use 'directories' instead"`
|
||||||
Directories []string
|
Directories []string `toml:"directories"`
|
||||||
Name string
|
Name string `toml:"name"`
|
||||||
Recursive bool
|
Recursive bool `toml:"recursive"`
|
||||||
RegularOnly bool
|
RegularOnly bool `toml:"regular_only"`
|
||||||
FollowSymlinks bool
|
FollowSymlinks bool `toml:"follow_symlinks"`
|
||||||
Size config.Size
|
Size config.Size `toml:"size"`
|
||||||
MTime config.Duration `toml:"mtime"`
|
MTime config.Duration `toml:"mtime"`
|
||||||
fileFilters []fileFilterFunc
|
Log telegraf.Logger `toml:"-"`
|
||||||
globPaths []globpath.GlobPath
|
|
||||||
Fs fileSystem
|
fs fileSystem
|
||||||
Log telegraf.Logger
|
fileFilters []fileFilterFunc
|
||||||
|
globPaths []globpath.GlobPath
|
||||||
}
|
}
|
||||||
|
|
||||||
type fileFilterFunc func(os.FileInfo) (bool, error)
|
type fileFilterFunc func(os.FileInfo) (bool, error)
|
||||||
|
|
||||||
|
func (*FileCount) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fc *FileCount) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if fc.globPaths == nil {
|
||||||
|
fc.initGlobPaths(acc)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, glob := range fc.globPaths {
|
||||||
|
for _, dir := range fc.onlyDirectories(glob.GetRoots()) {
|
||||||
|
fc.count(acc, dir, glob)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func rejectNilFilters(filters []fileFilterFunc) []fileFilterFunc {
|
func rejectNilFilters(filters []fileFilterFunc) []fileFilterFunc {
|
||||||
filtered := make([]fileFilterFunc, 0, len(filters))
|
filtered := make([]fileFilterFunc, 0, len(filters))
|
||||||
for _, f := range filters {
|
for _, f := range filters {
|
||||||
|
|
@ -226,29 +245,11 @@ func (fc *FileCount) filter(file os.FileInfo) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*FileCount) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fc *FileCount) Gather(acc telegraf.Accumulator) error {
|
|
||||||
if fc.globPaths == nil {
|
|
||||||
fc.initGlobPaths(acc)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, glob := range fc.globPaths {
|
|
||||||
for _, dir := range fc.onlyDirectories(glob.GetRoots()) {
|
|
||||||
fc.count(acc, dir, glob)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fc *FileCount) resolveLink(path string) (os.FileInfo, error) {
|
func (fc *FileCount) resolveLink(path string) (os.FileInfo, error) {
|
||||||
if fc.FollowSymlinks {
|
if fc.FollowSymlinks {
|
||||||
return fc.Fs.Stat(path)
|
return fc.fs.stat(path)
|
||||||
}
|
}
|
||||||
fi, err := fc.Fs.Lstat(path)
|
fi, err := fc.fs.lstat(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fi, err
|
return fi, err
|
||||||
}
|
}
|
||||||
|
|
@ -262,7 +263,7 @@ func (fc *FileCount) resolveLink(path string) (os.FileInfo, error) {
|
||||||
func (fc *FileCount) onlyDirectories(directories []string) []string {
|
func (fc *FileCount) onlyDirectories(directories []string) []string {
|
||||||
out := make([]string, 0)
|
out := make([]string, 0)
|
||||||
for _, path := range directories {
|
for _, path := range directories {
|
||||||
info, err := fc.Fs.Stat(path)
|
info, err := fc.fs.stat(path)
|
||||||
if err == nil && info.IsDir() {
|
if err == nil && info.IsDir() {
|
||||||
out = append(out, path)
|
out = append(out, path)
|
||||||
}
|
}
|
||||||
|
|
@ -295,7 +296,7 @@ func (fc *FileCount) initGlobPaths(acc telegraf.Accumulator) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFileCount() *FileCount {
|
func newFileCount() *FileCount {
|
||||||
return &FileCount{
|
return &FileCount{
|
||||||
Directory: "",
|
Directory: "",
|
||||||
Directories: []string{},
|
Directories: []string{},
|
||||||
|
|
@ -306,12 +307,12 @@ func NewFileCount() *FileCount {
|
||||||
Size: config.Size(0),
|
Size: config.Size(0),
|
||||||
MTime: config.Duration(0),
|
MTime: config.Duration(0),
|
||||||
fileFilters: nil,
|
fileFilters: nil,
|
||||||
Fs: osFS{},
|
fs: osFS{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("filecount", func() telegraf.Input {
|
inputs.Add("filecount", func() telegraf.Input {
|
||||||
return NewFileCount()
|
return newFileCount()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -13,10 +13,11 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/config"
|
"github.com/influxdata/telegraf/config"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNoFilters(t *testing.T) {
|
func TestNoFilters(t *testing.T) {
|
||||||
|
|
@ -147,7 +148,7 @@ func TestDirectoryWithTrailingSlash(t *testing.T) {
|
||||||
Directories: []string{getTestdataDir() + string(filepath.Separator)},
|
Directories: []string{getTestdataDir() + string(filepath.Separator)},
|
||||||
Name: "*",
|
Name: "*",
|
||||||
Recursive: true,
|
Recursive: true,
|
||||||
Fs: getFakeFileSystem(getTestdataDir()),
|
fs: getFakeFileSystem(getTestdataDir()),
|
||||||
}
|
}
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
|
@ -184,7 +185,7 @@ func getNoFilterFileCount() FileCount {
|
||||||
Size: config.Size(0),
|
Size: config.Size(0),
|
||||||
MTime: config.Duration(0),
|
MTime: config.Duration(0),
|
||||||
fileFilters: nil,
|
fileFilters: nil,
|
||||||
Fs: getFakeFileSystem(getTestdataDir()),
|
fs: getFakeFileSystem(getTestdataDir()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,9 +12,9 @@ import (
|
||||||
*/
|
*/
|
||||||
|
|
||||||
type fileSystem interface {
|
type fileSystem interface {
|
||||||
Open(name string) (file, error)
|
open(name string) (file, error)
|
||||||
Stat(name string) (os.FileInfo, error)
|
stat(name string) (os.FileInfo, error)
|
||||||
Lstat(name string) (os.FileInfo, error)
|
lstat(name string) (os.FileInfo, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type file interface {
|
type file interface {
|
||||||
|
|
@ -28,6 +28,6 @@ type file interface {
|
||||||
// osFS implements fileSystem using the local disk
|
// osFS implements fileSystem using the local disk
|
||||||
type osFS struct{}
|
type osFS struct{}
|
||||||
|
|
||||||
func (osFS) Open(name string) (file, error) { return os.Open(name) }
|
func (osFS) open(name string) (file, error) { return os.Open(name) }
|
||||||
func (osFS) Stat(name string) (os.FileInfo, error) { return os.Stat(name) }
|
func (osFS) stat(name string) (os.FileInfo, error) { return os.Stat(name) }
|
||||||
func (osFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
|
func (osFS) lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
|
||||||
|
|
|
||||||
|
|
@ -40,18 +40,18 @@ func (f fakeFileInfo) ModTime() time.Time { return f.modtime }
|
||||||
func (f fakeFileInfo) IsDir() bool { return f.isdir }
|
func (f fakeFileInfo) IsDir() bool { return f.isdir }
|
||||||
func (f fakeFileInfo) Sys() interface{} { return f.sys }
|
func (f fakeFileInfo) Sys() interface{} { return f.sys }
|
||||||
|
|
||||||
func (f fakeFileSystem) Open(name string) (file, error) {
|
func (f fakeFileSystem) open(name string) (file, error) {
|
||||||
return nil, &os.PathError{Op: "Open", Path: name, Err: errors.New("not implemented by fake filesystem")}
|
return nil, &os.PathError{Op: "Open", Path: name, Err: errors.New("not implemented by fake filesystem")}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f fakeFileSystem) Stat(name string) (os.FileInfo, error) {
|
func (f fakeFileSystem) stat(name string) (os.FileInfo, error) {
|
||||||
if fakeInfo, found := f.files[name]; found {
|
if fakeInfo, found := f.files[name]; found {
|
||||||
return fakeInfo, nil
|
return fakeInfo, nil
|
||||||
}
|
}
|
||||||
return nil, &os.PathError{Op: "Stat", Path: name, Err: errors.New("no such file or directory")}
|
return nil, &os.PathError{Op: "Stat", Path: name, Err: errors.New("no such file or directory")}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f fakeFileSystem) Lstat(name string) (os.FileInfo, error) {
|
func (f fakeFileSystem) lstat(name string) (os.FileInfo, error) {
|
||||||
// not able to test with symlinks currently
|
// not able to test with symlinks currently
|
||||||
return f.Stat(name)
|
return f.stat(name)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ func TestMTime(t *testing.T) {
|
||||||
mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC)
|
mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC)
|
||||||
|
|
||||||
fs := getTestFileSystem()
|
fs := getTestFileSystem()
|
||||||
fileInfo, err := fs.Stat("/testdata/foo")
|
fileInfo, err := fs.stat("/testdata/foo")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, mtime, fileInfo.ModTime())
|
require.Equal(t, mtime, fileInfo.ModTime())
|
||||||
}
|
}
|
||||||
|
|
@ -26,25 +26,24 @@ func TestSize(t *testing.T) {
|
||||||
// this is the time our foo file should have
|
// this is the time our foo file should have
|
||||||
size := int64(4096)
|
size := int64(4096)
|
||||||
fs := getTestFileSystem()
|
fs := getTestFileSystem()
|
||||||
fileInfo, err := fs.Stat("/testdata")
|
fileInfo, err := fs.stat("/testdata")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, size, fileInfo.Size())
|
require.Equal(t, size, fileInfo.Size())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsDir(t *testing.T) {
|
func TestIsDir(t *testing.T) {
|
||||||
// this is the time our foo file should have
|
// this is the time our foo file should have
|
||||||
dir := true
|
|
||||||
fs := getTestFileSystem()
|
fs := getTestFileSystem()
|
||||||
fileInfo, err := fs.Stat("/testdata")
|
fileInfo, err := fs.stat("/testdata")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, dir, fileInfo.IsDir())
|
require.True(t, fileInfo.IsDir())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRealFS(t *testing.T) {
|
func TestRealFS(t *testing.T) {
|
||||||
// test that the default (non-test) empty FS causes expected behaviour
|
// test that the default (non-test) empty FS causes expected behaviour
|
||||||
var fs fileSystem = osFS{}
|
var fs fileSystem = osFS{}
|
||||||
// the following file exists on disk - and not in our fake fs
|
// the following file exists on disk - and not in our fake fs
|
||||||
fileInfo, err := fs.Stat(getTestdataDir() + "/qux")
|
fileInfo, err := fs.stat(getTestdataDir() + "/qux")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.False(t, fileInfo.IsDir())
|
require.False(t, fileInfo.IsDir())
|
||||||
require.Equal(t, int64(446), fileInfo.Size())
|
require.Equal(t, int64(446), fileInfo.Size())
|
||||||
|
|
@ -53,10 +52,10 @@ func TestRealFS(t *testing.T) {
|
||||||
fs = getTestFileSystem()
|
fs = getTestFileSystem()
|
||||||
// now, the same test as above will return an error as the file doesn't exist in our fake fs
|
// now, the same test as above will return an error as the file doesn't exist in our fake fs
|
||||||
expectedError := "Stat " + getTestdataDir() + "/qux: No such file or directory"
|
expectedError := "Stat " + getTestdataDir() + "/qux: No such file or directory"
|
||||||
_, err = fs.Stat(getTestdataDir() + "/qux")
|
_, err = fs.stat(getTestdataDir() + "/qux")
|
||||||
require.Error(t, err, expectedError)
|
require.Error(t, err, expectedError)
|
||||||
// and verify that what we DO expect to find, we do
|
// and verify that what we DO expect to find, we do
|
||||||
fileInfo, err = fs.Stat("/testdata/foo")
|
fileInfo, err = fs.stat("/testdata/foo")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, fileInfo)
|
require.NotNil(t, fileInfo)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -17,10 +17,10 @@ import (
|
||||||
var sampleConfig string
|
var sampleConfig string
|
||||||
|
|
||||||
type FileStat struct {
|
type FileStat struct {
|
||||||
Md5 bool
|
Md5 bool `toml:"md5"`
|
||||||
Files []string
|
Files []string `toml:"files"`
|
||||||
|
|
||||||
Log telegraf.Logger
|
Log telegraf.Logger `toml:"-"`
|
||||||
|
|
||||||
// maps full file paths to globmatch obj
|
// maps full file paths to globmatch obj
|
||||||
globs map[string]*globpath.GlobPath
|
globs map[string]*globpath.GlobPath
|
||||||
|
|
@ -31,14 +31,6 @@ type FileStat struct {
|
||||||
filesWithErrors map[string]bool
|
filesWithErrors map[string]bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFileStat() *FileStat {
|
|
||||||
return &FileStat{
|
|
||||||
globs: make(map[string]*globpath.GlobPath),
|
|
||||||
missingFiles: make(map[string]bool),
|
|
||||||
filesWithErrors: make(map[string]bool),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*FileStat) SampleConfig() string {
|
func (*FileStat) SampleConfig() string {
|
||||||
return sampleConfig
|
return sampleConfig
|
||||||
}
|
}
|
||||||
|
|
@ -134,8 +126,16 @@ func getMd5(file string) (string, error) {
|
||||||
return hex.EncodeToString(hash.Sum(nil)), nil
|
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newFileStat() *FileStat {
|
||||||
|
return &FileStat{
|
||||||
|
globs: make(map[string]*globpath.GlobPath),
|
||||||
|
missingFiles: make(map[string]bool),
|
||||||
|
filesWithErrors: make(map[string]bool),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("filestat", func() telegraf.Input {
|
inputs.Add("filestat", func() telegraf.Input {
|
||||||
return NewFileStat()
|
return newFileStat()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -15,12 +15,10 @@ import (
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var testdataDir = getTestdataDir()
|
||||||
testdataDir = getTestdataDir()
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGatherNoMd5(t *testing.T) {
|
func TestGatherNoMd5(t *testing.T) {
|
||||||
fs := NewFileStat()
|
fs := newFileStat()
|
||||||
fs.Log = testutil.Logger{}
|
fs.Log = testutil.Logger{}
|
||||||
fs.Files = []string{
|
fs.Files = []string{
|
||||||
filepath.Join(testdataDir, "log1.log"),
|
filepath.Join(testdataDir, "log1.log"),
|
||||||
|
|
@ -50,7 +48,7 @@ func TestGatherNoMd5(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherExplicitFiles(t *testing.T) {
|
func TestGatherExplicitFiles(t *testing.T) {
|
||||||
fs := NewFileStat()
|
fs := newFileStat()
|
||||||
fs.Log = testutil.Logger{}
|
fs.Log = testutil.Logger{}
|
||||||
fs.Md5 = true
|
fs.Md5 = true
|
||||||
fs.Files = []string{
|
fs.Files = []string{
|
||||||
|
|
@ -83,7 +81,7 @@ func TestGatherExplicitFiles(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNonExistentFile(t *testing.T) {
|
func TestNonExistentFile(t *testing.T) {
|
||||||
fs := NewFileStat()
|
fs := newFileStat()
|
||||||
fs.Log = testutil.Logger{}
|
fs.Log = testutil.Logger{}
|
||||||
fs.Md5 = true
|
fs.Md5 = true
|
||||||
fs.Files = []string{
|
fs.Files = []string{
|
||||||
|
|
@ -100,7 +98,7 @@ func TestNonExistentFile(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherGlob(t *testing.T) {
|
func TestGatherGlob(t *testing.T) {
|
||||||
fs := NewFileStat()
|
fs := newFileStat()
|
||||||
fs.Log = testutil.Logger{}
|
fs.Log = testutil.Logger{}
|
||||||
fs.Md5 = true
|
fs.Md5 = true
|
||||||
fs.Files = []string{
|
fs.Files = []string{
|
||||||
|
|
@ -126,7 +124,7 @@ func TestGatherGlob(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGatherSuperAsterisk(t *testing.T) {
|
func TestGatherSuperAsterisk(t *testing.T) {
|
||||||
fs := NewFileStat()
|
fs := newFileStat()
|
||||||
fs.Log = testutil.Logger{}
|
fs.Log = testutil.Logger{}
|
||||||
fs.Md5 = true
|
fs.Md5 = true
|
||||||
fs.Files = []string{
|
fs.Files = []string{
|
||||||
|
|
@ -159,7 +157,7 @@ func TestGatherSuperAsterisk(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestModificationTime(t *testing.T) {
|
func TestModificationTime(t *testing.T) {
|
||||||
fs := NewFileStat()
|
fs := newFileStat()
|
||||||
fs.Log = testutil.Logger{}
|
fs.Log = testutil.Logger{}
|
||||||
fs.Files = []string{
|
fs.Files = []string{
|
||||||
filepath.Join(testdataDir, "log1.log"),
|
filepath.Join(testdataDir, "log1.log"),
|
||||||
|
|
@ -177,7 +175,7 @@ func TestModificationTime(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoModificationTime(t *testing.T) {
|
func TestNoModificationTime(t *testing.T) {
|
||||||
fs := NewFileStat()
|
fs := newFileStat()
|
||||||
fs.Log = testutil.Logger{}
|
fs.Log = testutil.Logger{}
|
||||||
fs.Files = []string{
|
fs.Files = []string{
|
||||||
filepath.Join(testdataDir, "non_existent_file"),
|
filepath.Join(testdataDir, "non_existent_file"),
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,6 @@ import (
|
||||||
//go:embed sample.conf
|
//go:embed sample.conf
|
||||||
var sampleConfig string
|
var sampleConfig string
|
||||||
|
|
||||||
// Fireboard gathers statistics from the fireboard.io servers
|
|
||||||
type Fireboard struct {
|
type Fireboard struct {
|
||||||
AuthToken string `toml:"auth_token"`
|
AuthToken string `toml:"auth_token"`
|
||||||
URL string `toml:"url"`
|
URL string `toml:"url"`
|
||||||
|
|
@ -27,35 +26,23 @@ type Fireboard struct {
|
||||||
client *http.Client
|
client *http.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFireboard return a new instance of Fireboard with a default http client
|
type rtt struct {
|
||||||
func NewFireboard() *Fireboard {
|
|
||||||
tr := &http.Transport{ResponseHeaderTimeout: 3 * time.Second}
|
|
||||||
client := &http.Client{
|
|
||||||
Transport: tr,
|
|
||||||
Timeout: 4 * time.Second,
|
|
||||||
}
|
|
||||||
return &Fireboard{client: client}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RTT fireboardStats represents the data that is received from Fireboard
|
|
||||||
type RTT struct {
|
|
||||||
Temp float64 `json:"temp"`
|
Temp float64 `json:"temp"`
|
||||||
Channel int64 `json:"channel"`
|
Channel int64 `json:"channel"`
|
||||||
Degreetype int `json:"degreetype"`
|
DegreeType int `json:"degreetype"`
|
||||||
Created string `json:"created"`
|
Created string `json:"created"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type fireboardStats struct {
|
type fireboardStats struct {
|
||||||
Title string `json:"title"`
|
Title string `json:"title"`
|
||||||
UUID string `json:"uuid"`
|
UUID string `json:"uuid"`
|
||||||
Latesttemps []RTT `json:"latest_temps"`
|
LatestTemps []rtt `json:"latest_temps"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Fireboard) SampleConfig() string {
|
func (*Fireboard) SampleConfig() string {
|
||||||
return sampleConfig
|
return sampleConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init the things
|
|
||||||
func (r *Fireboard) Init() error {
|
func (r *Fireboard) Init() error {
|
||||||
if len(r.AuthToken) == 0 {
|
if len(r.AuthToken) == 0 {
|
||||||
return errors.New("you must specify an authToken")
|
return errors.New("you must specify an authToken")
|
||||||
|
|
@ -73,7 +60,6 @@ func (r *Fireboard) Init() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gather Reads stats from all configured servers.
|
|
||||||
func (r *Fireboard) Gather(acc telegraf.Accumulator) error {
|
func (r *Fireboard) Gather(acc telegraf.Accumulator) error {
|
||||||
// Perform the GET request to the fireboard servers
|
// Perform the GET request to the fireboard servers
|
||||||
req, err := http.NewRequest("GET", r.URL, nil)
|
req, err := http.NewRequest("GET", r.URL, nil)
|
||||||
|
|
@ -122,12 +108,12 @@ func scale(n int) string {
|
||||||
func (r *Fireboard) gatherTemps(s fireboardStats, acc telegraf.Accumulator) {
|
func (r *Fireboard) gatherTemps(s fireboardStats, acc telegraf.Accumulator) {
|
||||||
// Construct lookup for scale values
|
// Construct lookup for scale values
|
||||||
|
|
||||||
for _, t := range s.Latesttemps {
|
for _, t := range s.LatestTemps {
|
||||||
tags := map[string]string{
|
tags := map[string]string{
|
||||||
"title": s.Title,
|
"title": s.Title,
|
||||||
"uuid": s.UUID,
|
"uuid": s.UUID,
|
||||||
"channel": strconv.FormatInt(t.Channel, 10),
|
"channel": strconv.FormatInt(t.Channel, 10),
|
||||||
"scale": scale(t.Degreetype),
|
"scale": scale(t.DegreeType),
|
||||||
}
|
}
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"temperature": t.Temp,
|
"temperature": t.Temp,
|
||||||
|
|
@ -136,8 +122,17 @@ func (r *Fireboard) gatherTemps(s fireboardStats, acc telegraf.Accumulator) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newFireboard() *Fireboard {
|
||||||
|
tr := &http.Transport{ResponseHeaderTimeout: 3 * time.Second}
|
||||||
|
client := &http.Client{
|
||||||
|
Transport: tr,
|
||||||
|
Timeout: 4 * time.Second,
|
||||||
|
}
|
||||||
|
return &Fireboard{client: client}
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("fireboard", func() telegraf.Input {
|
inputs.Add("fireboard", func() telegraf.Input {
|
||||||
return NewFireboard()
|
return newFireboard()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ func TestFireboard(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Create a new fb instance with our given test server
|
// Create a new fb instance with our given test server
|
||||||
fireboard := NewFireboard()
|
fireboard := newFireboard()
|
||||||
fireboard.AuthToken = "b4bb6e6a7b6231acb9f71b304edb2274693d8849"
|
fireboard.AuthToken = "b4bb6e6a7b6231acb9f71b304edb2274693d8849"
|
||||||
fireboard.URL = u.String()
|
fireboard.URL = u.String()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -20,11 +20,11 @@ var sampleConfig string
|
||||||
|
|
||||||
const measurement = "fluentd"
|
const measurement = "fluentd"
|
||||||
|
|
||||||
// Fluentd - plugin main structure
|
|
||||||
type Fluentd struct {
|
type Fluentd struct {
|
||||||
Endpoint string
|
Endpoint string `toml:"endpoint"`
|
||||||
Exclude []string
|
Exclude []string `toml:"exclude"`
|
||||||
client *http.Client
|
|
||||||
|
client *http.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
type endpointInfo struct {
|
type endpointInfo struct {
|
||||||
|
|
@ -51,32 +51,10 @@ type pluginData struct {
|
||||||
AvailBufferSpaceRatios *float64 `json:"buffer_available_buffer_space_ratios"`
|
AvailBufferSpaceRatios *float64 `json:"buffer_available_buffer_space_ratios"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse JSON from fluentd Endpoint
|
|
||||||
// Parameters:
|
|
||||||
//
|
|
||||||
// data: unprocessed json received from endpoint
|
|
||||||
//
|
|
||||||
// Returns:
|
|
||||||
//
|
|
||||||
// pluginData: slice that contains parsed plugins
|
|
||||||
// error: error that may have occurred
|
|
||||||
func parse(data []byte) (datapointArray []pluginData, err error) {
|
|
||||||
var endpointData endpointInfo
|
|
||||||
|
|
||||||
if err = json.Unmarshal(data, &endpointData); err != nil {
|
|
||||||
err = errors.New("processing JSON structure")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
datapointArray = append(datapointArray, endpointData.Payload...)
|
|
||||||
return datapointArray, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Fluentd) SampleConfig() string {
|
func (*Fluentd) SampleConfig() string {
|
||||||
return sampleConfig
|
return sampleConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gather - Main code responsible for gathering, processing and creating metrics
|
|
||||||
func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
|
func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
|
||||||
_, err := url.Parse(h.Endpoint)
|
_, err := url.Parse(h.Endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -219,6 +197,27 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parse JSON from fluentd Endpoint
|
||||||
|
// Parameters:
|
||||||
|
//
|
||||||
|
// data: unprocessed json received from endpoint
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
//
|
||||||
|
// pluginData: slice that contains parsed plugins
|
||||||
|
// error: error that may have occurred
|
||||||
|
func parse(data []byte) (datapointArray []pluginData, err error) {
|
||||||
|
var endpointData endpointInfo
|
||||||
|
|
||||||
|
if err = json.Unmarshal(data, &endpointData); err != nil {
|
||||||
|
err = errors.New("processing JSON structure")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
datapointArray = append(datapointArray, endpointData.Payload...)
|
||||||
|
return datapointArray, err
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("fluentd", func() telegraf.Input { return &Fluentd{} })
|
inputs.Add("fluentd", func() telegraf.Input { return &Fluentd{} })
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue