fix(inputs): Linter issues (#11576)
This commit is contained in:
parent
a6367e7a07
commit
e2578462a5
|
|
@ -126,7 +126,7 @@ issues:
|
|||
- path: _test\.go
|
||||
text: "parameter.*seems to be a control flag, avoid control coupling"
|
||||
|
||||
- path: (^agent/|^cmd/|^config/|^filter/|^internal/|^logger/|^metric/|^models/|^selfstat/|^testutil/|^plugins/serializers/)
|
||||
- path: (^agent/|^cmd/|^config/|^filter/|^internal/|^logger/|^metric/|^models/|^selfstat/|^testutil/|^plugins/serializers/|^plugins/inputs/zipkin/cmd)
|
||||
text: "imports-blacklist: should not use the following blacklisted import: \"log\""
|
||||
linters:
|
||||
- revive
|
||||
|
|
|
|||
|
|
@ -465,7 +465,7 @@ func TestGetDiscoveryDataAcrossRegions(t *testing.T) {
|
|||
totalCount: 0,
|
||||
pageSize: 0,
|
||||
pageNumber: 0,
|
||||
expectedErrorString: `Didn't find root key "LoadBalancers" in discovery response`,
|
||||
expectedErrorString: `didn't find root key "LoadBalancers" in discovery response`,
|
||||
},
|
||||
{
|
||||
name: "1 object discovered",
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ type parsedDResp struct {
|
|||
func getRPCReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, error) {
|
||||
if reflect.ValueOf(req).Type().Kind() != reflect.Ptr ||
|
||||
reflect.ValueOf(req).IsNil() {
|
||||
return nil, errors.Errorf("Not expected type of the discovery request object: %q, %q", reflect.ValueOf(req).Type(), reflect.ValueOf(req).Kind())
|
||||
return nil, errors.Errorf("unexpected type of the discovery request object: %q, %q", reflect.ValueOf(req).Type(), reflect.ValueOf(req).Kind())
|
||||
}
|
||||
|
||||
ptrV := reflect.Indirect(reflect.ValueOf(req))
|
||||
|
|
@ -65,19 +65,19 @@ func getRPCReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest,
|
|||
for i := 0; i < ptrV.NumField(); i++ {
|
||||
if ptrV.Field(i).Type().String() == "*requests.RpcRequest" {
|
||||
if !ptrV.Field(i).CanInterface() {
|
||||
return nil, errors.Errorf("Can't get interface of %v", ptrV.Field(i))
|
||||
return nil, errors.Errorf("can't get interface of %v", ptrV.Field(i))
|
||||
}
|
||||
|
||||
rpcReq, ok := ptrV.Field(i).Interface().(*requests.RpcRequest)
|
||||
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Cant convert interface of %v to '*requests.RpcRequest' type", ptrV.Field(i).Interface())
|
||||
return nil, errors.Errorf("can't convert interface of %v to '*requests.RpcRequest' type", ptrV.Field(i).Interface())
|
||||
}
|
||||
|
||||
return rpcReq, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.Errorf("Didn't find *requests.RpcRequest embedded struct in %q", ptrV.Type())
|
||||
return nil, errors.Errorf("didn't find *requests.RpcRequest embedded struct in %q", ptrV.Type())
|
||||
}
|
||||
|
||||
//newDiscoveryTool function returns discovery tool object.
|
||||
|
|
@ -224,7 +224,7 @@ func newDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred
|
|||
case "acs_cds":
|
||||
return nil, noDiscoverySupportErr
|
||||
default:
|
||||
return nil, errors.Errorf("project %q is not recognized by discovery...", project)
|
||||
return nil, errors.Errorf("project %q is not recognized by discovery", project)
|
||||
}
|
||||
|
||||
cli[region], err = sdk.NewClientWithOptions(region, sdk.NewConfig(), credential)
|
||||
|
|
@ -234,7 +234,7 @@ func newDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred
|
|||
}
|
||||
|
||||
if len(dscReq) == 0 || len(cli) == 0 {
|
||||
return nil, errors.Errorf("Can't build discovery request for project: %q,\nregions: %v", project, regions)
|
||||
return nil, errors.Errorf("can't build discovery request for project: %q, regions: %v", project, regions)
|
||||
}
|
||||
|
||||
return &discoveryTool{
|
||||
|
|
@ -261,11 +261,11 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse)
|
|||
|
||||
data = resp.GetHttpContentBytes()
|
||||
if data == nil { //No data
|
||||
return nil, errors.Errorf("No data in response to be parsed")
|
||||
return nil, errors.New("no data in response to be parsed")
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &fullOutput); err != nil {
|
||||
return nil, errors.Errorf("Can't parse JSON from discovery response: %v", err)
|
||||
return nil, errors.Errorf("can't parse JSON from discovery response: %v", err)
|
||||
}
|
||||
|
||||
for key, val := range fullOutput {
|
||||
|
|
@ -274,7 +274,7 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse)
|
|||
foundRootKey = true
|
||||
rootKeyVal, ok := val.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Content of root key %q, is not an object: %v", key, val)
|
||||
return nil, errors.Errorf("content of root key %q, is not an object: %v", key, val)
|
||||
}
|
||||
|
||||
//It should contain the array with discovered data
|
||||
|
|
@ -284,7 +284,7 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse)
|
|||
}
|
||||
}
|
||||
if !foundDataItem {
|
||||
return nil, errors.Errorf("Didn't find array item in root key %q", key)
|
||||
return nil, errors.Errorf("didn't find array item in root key %q", key)
|
||||
}
|
||||
case "TotalCount", "TotalRecordCount":
|
||||
pdResp.totalCount = int(val.(float64))
|
||||
|
|
@ -295,7 +295,7 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse)
|
|||
}
|
||||
}
|
||||
if !foundRootKey {
|
||||
return nil, errors.Errorf("Didn't find root key %q in discovery response", dt.respRootKey)
|
||||
return nil, errors.Errorf("didn't find root key %q in discovery response", dt.respRootKey)
|
||||
}
|
||||
|
||||
return pdResp, nil
|
||||
|
|
@ -341,7 +341,7 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com
|
|||
for _, raw := range discoveryData {
|
||||
elem, ok := raw.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, errors.Errorf("can't parse input data element, not a map[string]interface{} type")
|
||||
return nil, errors.New("can't parse input data element, not a map[string]interface{} type")
|
||||
}
|
||||
if objectID, ok := elem[dt.respObjectIDKey].(string); ok {
|
||||
preparedData[objectID] = elem
|
||||
|
|
@ -363,7 +363,7 @@ func (dt *discoveryTool) getDiscoveryDataAcrossRegions(lmtr chan bool) (map[stri
|
|||
//which aliyun object type (project) is used
|
||||
dscReq, ok := dt.req[region]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Error building common discovery request: not valid region %q", region)
|
||||
return nil, errors.Errorf("error building common discovery request: not valid region %q", region)
|
||||
}
|
||||
|
||||
rpcReq, err := getRPCReqFromDiscoveryRequest(dscReq)
|
||||
|
|
|
|||
|
|
@ -1,13 +0,0 @@
|
|||
[agent]
|
||||
interval="1s"
|
||||
flush_interval="1s"
|
||||
|
||||
[[inputs.docker]]
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
timeout = "5s"
|
||||
perdevice = true
|
||||
total = false
|
||||
container_names = []
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
|
|
@ -108,10 +108,6 @@ var baseClient = MockClient{
|
|||
},
|
||||
}
|
||||
|
||||
func newClient(_ string, _ *tls.Config) (Client, error) {
|
||||
return &baseClient, nil
|
||||
}
|
||||
|
||||
func TestDockerGatherContainerStats(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := testStats()
|
||||
|
|
@ -925,7 +921,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
d := Docker{
|
||||
Log: testutil.Logger{},
|
||||
newClient: newClient,
|
||||
newClient: func(string, *tls.Config) (Client, error) { return &baseClient, nil },
|
||||
TagEnvironment: []string{"ENVVAR1", "ENVVAR2", "ENVVAR3", "ENVVAR5",
|
||||
"ENVVAR6", "ENVVAR7", "ENVVAR8", "ENVVAR9"},
|
||||
PerDeviceInclude: []string{"cpu", "network", "blkio"},
|
||||
|
|
@ -1078,7 +1074,7 @@ func TestDockerGatherSwarmInfo(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
d := Docker{
|
||||
Log: testutil.Logger{},
|
||||
newClient: newClient,
|
||||
newClient: func(string, *tls.Config) (Client, error) { return &baseClient, nil },
|
||||
}
|
||||
|
||||
err := acc.GatherError(d.Gather)
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
gnmiLib "github.com/openconfig/gnmi/proto/gnmi"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
|
@ -269,12 +270,13 @@ func (c *GNMI) newSubscribeRequest() (*gnmiLib.SubscribeRequest, error) {
|
|||
|
||||
// SubscribeGNMI and extract telemetry data
|
||||
func (c *GNMI) subscribeGNMI(ctx context.Context, worker *Worker, tlscfg *tls.Config, request *gnmiLib.SubscribeRequest) error {
|
||||
var opt grpc.DialOption
|
||||
var creds credentials.TransportCredentials
|
||||
if tlscfg != nil {
|
||||
opt = grpc.WithTransportCredentials(credentials.NewTLS(tlscfg))
|
||||
creds = credentials.NewTLS(tlscfg)
|
||||
} else {
|
||||
opt = grpc.WithInsecure()
|
||||
creds = insecure.NewCredentials()
|
||||
}
|
||||
opt := grpc.WithTransportCredentials(creds)
|
||||
|
||||
client, err := grpc.DialContext(ctx, worker.address, opt)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -26,18 +26,6 @@ type Fetcher interface {
|
|||
Fetch(address string) ([]gohddtemp.Disk, error)
|
||||
}
|
||||
|
||||
var hddtempSampleConfig = `
|
||||
## By default, telegraf gathers temps data from all disks detected by the
|
||||
## hddtemp.
|
||||
##
|
||||
## Only collect temps from the selected disks.
|
||||
##
|
||||
## A * as the device name will return the temperature values of all disks.
|
||||
##
|
||||
# address = "127.0.0.1:7634"
|
||||
# devices = ["sda", "*"]
|
||||
`
|
||||
|
||||
func (*HDDTemp) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@ func (*InternetSpeed) SampleConfig() string {
|
|||
}
|
||||
|
||||
func (is *InternetSpeed) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
// Get closest server
|
||||
s := is.serverCache
|
||||
if s == nil {
|
||||
|
|
|
|||
|
|
@ -819,7 +819,7 @@ func TestSanitizeIPMICmd(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var sanitizedArgs []string = sanitizeIPMICmd(tt.args)
|
||||
sanitizedArgs := sanitizeIPMICmd(tt.args)
|
||||
require.Equal(t, tt.expected, sanitizedArgs)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ import (
|
|||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
|
@ -301,16 +302,17 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
// Parse TLS config
|
||||
var opts []grpc.DialOption
|
||||
var creds credentials.TransportCredentials
|
||||
if m.EnableTLS {
|
||||
tlscfg, err := m.ClientConfig.TLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tlscfg)))
|
||||
creds = credentials.NewTLS(tlscfg)
|
||||
} else {
|
||||
opts = append(opts, grpc.WithInsecure())
|
||||
creds = insecure.NewCredentials()
|
||||
}
|
||||
opt := grpc.WithTransportCredentials(creds)
|
||||
|
||||
// Connect to given list of servers and start collecting data
|
||||
var grpcClientConn *grpc.ClientConn
|
||||
|
|
@ -325,7 +327,7 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error {
|
|||
continue
|
||||
}
|
||||
|
||||
grpcClientConn, err = grpc.Dial(server, opts...)
|
||||
grpcClientConn, err = grpc.Dial(server, opt)
|
||||
if err != nil {
|
||||
m.Log.Errorf("Failed to connect to %s: %s", server, err.Error())
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -296,8 +296,7 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) {
|
|||
// err = cg.ConsumeClaim(session, &claim)
|
||||
//require.NoError(t, err)
|
||||
// So stick with the line below for now.
|
||||
//nolint:errcheck
|
||||
cg.ConsumeClaim(session, &claim)
|
||||
_ = cg.ConsumeClaim(session, &claim)
|
||||
|
||||
err = cg.Cleanup(session)
|
||||
require.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -128,8 +128,6 @@ func TestDaemonSet(t *testing.T) {
|
|||
|
||||
func TestDaemonSetSelectorFilter(t *testing.T) {
|
||||
cli := &client{}
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
|
||||
|
||||
responseMap := map[string]interface{}{
|
||||
"/daemonsets/": &v1.DaemonSetList{
|
||||
|
|
|
|||
|
|
@ -22,9 +22,6 @@ type SysctlFS struct {
|
|||
path string
|
||||
}
|
||||
|
||||
var sysctlFSDescription = `Provides Linux sysctl fs metrics`
|
||||
var sysctlFSSampleConfig = ``
|
||||
|
||||
func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fieldNames ...string) error {
|
||||
bs, err := os.ReadFile(sfs.path + "/" + file)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -360,7 +360,6 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping) e
|
|||
fieldSplitter := regexp.MustCompile(`[ :]+`)
|
||||
|
||||
for _, file := range files {
|
||||
|
||||
/* From /proc/fs/lustre/obdfilter/<ost_name>/stats and similar
|
||||
* extract the object store target name,
|
||||
* and for per-client files under
|
||||
|
|
|
|||
|
|
@ -11,9 +11,10 @@ import (
|
|||
)
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
var lvm LVM = LVM{UseSudo: false}
|
||||
var acc testutil.Accumulator
|
||||
|
||||
lvm := LVM{UseSudo: false}
|
||||
|
||||
// overwriting exec commands with mock commands
|
||||
execCommand = fakeExecCommand
|
||||
err := lvm.Gather(&acc)
|
||||
|
|
@ -129,9 +130,10 @@ func TestHelperProcess(_ *testing.T) {
|
|||
|
||||
// test when no lvm devices exist
|
||||
func TestGatherNoLVM(t *testing.T) {
|
||||
var noLVM LVM = LVM{UseSudo: false}
|
||||
var acc testutil.Accumulator
|
||||
|
||||
noLVM := LVM{UseSudo: false}
|
||||
|
||||
// overwriting exec commands with mock commands
|
||||
execCommand = fakeExecCommandNoLVM
|
||||
err := noLVM.Gather(&acc)
|
||||
|
|
|
|||
|
|
@ -111,16 +111,16 @@ func (r *response) WriteHeader(code int) {
|
|||
r.header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code))
|
||||
r.header.Write(r.w)
|
||||
r.w.WriteString("\r\n")
|
||||
_, _ = fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code))
|
||||
_ = r.header.Write(r.w)
|
||||
_, _ = r.w.WriteString("\r\n")
|
||||
}
|
||||
|
||||
func (r *response) Flush() {
|
||||
if !r.wroteHeader {
|
||||
r.WriteHeader(http.StatusOK)
|
||||
}
|
||||
r.w.Flush()
|
||||
_ = r.w.Flush()
|
||||
}
|
||||
|
||||
func (r *response) Close() error {
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
// Blank import required to register driver
|
||||
_ "github.com/jackc/pgx/v4/stdlib"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ type podResponse struct {
|
|||
Kind string `json:"kind"`
|
||||
APIVersion string `json:"apiVersion"`
|
||||
Metadata podMetadata `json:"metadata"`
|
||||
Items []*corev1.Pod `json:"items,string,omitempty"`
|
||||
Items []*corev1.Pod `json:"items,omitempty"`
|
||||
}
|
||||
|
||||
const cAdvisorPodListDefaultInterval = 60
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ package snmp_legacy
|
|||
|
||||
import (
|
||||
_ "embed"
|
||||
"log"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
|
|
@ -29,7 +29,7 @@ type Snmp struct {
|
|||
Subtable []Subtable
|
||||
SnmptranslateFile string
|
||||
|
||||
Log telegraf.Logger
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
|
||||
nameToOid map[string]string
|
||||
initNode Node
|
||||
|
|
@ -701,7 +701,7 @@ func (h *Host) HandleResponse(
|
|||
acc.AddFields(fieldName, fields, tags)
|
||||
case gosnmp.NoSuchObject, gosnmp.NoSuchInstance:
|
||||
// Oid not found
|
||||
log.Printf("E! [inputs.snmp_legacy] oid %q not found", oidKey)
|
||||
acc.AddError(fmt.Errorf("oid %q not found", oidKey))
|
||||
default:
|
||||
// delete other data
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,5 +10,6 @@
|
|||
package sql
|
||||
|
||||
import (
|
||||
// Blank imports to register the sqlite driver
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,4 +0,0 @@
|
|||
//go:build arm || mips || mipsle || mips64 || mips64le || ppc64 || (freebsd && arm64)
|
||||
// +build arm mips mipsle mips64 mips64le ppc64 freebsd,arm64
|
||||
|
||||
package sql
|
||||
|
|
@ -72,32 +72,41 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// Execute a dummy call against the server to make sure the client is
|
||||
// still functional. If not, try to log back in. If that doesn't work,
|
||||
// we give up.
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout))
|
||||
defer cancel1()
|
||||
if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil {
|
||||
cf.parent.Log.Info("Client session seems to have time out. Reauthenticating!")
|
||||
ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout))
|
||||
defer cancel2()
|
||||
if err := cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)); err != nil {
|
||||
if !retrying {
|
||||
// The client went stale. Probably because someone rebooted vCenter. Clear it to
|
||||
// force us to create a fresh one. We only get one chance at this. If we fail a second time
|
||||
// we will simply skip this collection round and hope things have stabilized for the next one.
|
||||
retrying = true
|
||||
cf.client = nil
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("renewing authentication failed: %s", err.Error())
|
||||
err := cf.testClient(ctx)
|
||||
if err != nil {
|
||||
if !retrying {
|
||||
// The client went stale. Probably because someone rebooted vCenter. Clear it to
|
||||
// force us to create a fresh one. We only get one chance at this. If we fail a second time
|
||||
// we will simply skip this collection round and hope things have stabilized for the next one.
|
||||
retrying = true
|
||||
cf.client = nil
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cf.client, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (cf *ClientFactory) testClient(ctx context.Context) error {
|
||||
// Execute a dummy call against the server to make sure the client is
|
||||
// still functional. If not, try to log back in. If that doesn't work,
|
||||
// we give up.
|
||||
ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout))
|
||||
defer cancel1()
|
||||
if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil {
|
||||
cf.parent.Log.Info("Client session seems to have time out. Reauthenticating!")
|
||||
ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout))
|
||||
defer cancel2()
|
||||
if err := cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)); err != nil {
|
||||
return fmt.Errorf("renewing authentication failed: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewClient creates a new vSphere client based on the url and setting passed as parameters.
|
||||
func NewClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*Client, error) {
|
||||
sw := NewStopwatch("connect", vSphereURL.Host)
|
||||
|
|
|
|||
|
|
@ -21,23 +21,6 @@ import (
|
|||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var configHeader = `
|
||||
[agent]
|
||||
interval = "10s"
|
||||
round_interval = true
|
||||
metric_batch_size = 1000
|
||||
metric_buffer_limit = 10000
|
||||
collection_jitter = "0s"
|
||||
flush_interval = "10s"
|
||||
flush_jitter = "0s"
|
||||
precision = ""
|
||||
debug = false
|
||||
quiet = false
|
||||
logfile = ""
|
||||
hostname = ""
|
||||
omit_hostname = false
|
||||
`
|
||||
|
||||
func defaultVSphere() *VSphere {
|
||||
return &VSphere{
|
||||
Log: testutil.Logger{},
|
||||
|
|
|
|||
Loading…
Reference in New Issue