chore(linters): Fix remaining errcheck warnings (#15518)
Co-authored-by: Joshua Powers <powersj@fastmail.com>
This commit is contained in:
parent
a8355c74b9
commit
19737fcb52
|
|
@ -73,6 +73,7 @@ linters-settings:
|
|||
- "(*hash/maphash.Hash).Write"
|
||||
- "(*hash/maphash.Hash).WriteByte"
|
||||
- "(*hash/maphash.Hash).WriteString"
|
||||
- "(*github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate.Template).UnmarshalText"
|
||||
check-blank: true
|
||||
gocritic:
|
||||
# Disable all checks.
|
||||
|
|
@ -335,9 +336,6 @@ issues:
|
|||
- package comment should be of the form "(.+)...
|
||||
# EXC0015 revive: Annoying issue about not having a comment. The rare codebase has such comments
|
||||
- should have a package comment
|
||||
# nolintlint: directive `//nolint:errcheck` is unused for linter "errcheck"
|
||||
# temporary while these are being fixed
|
||||
- directive `//nolint:errcheck //.*` is unused for linter "errcheck"
|
||||
|
||||
# Excluding configuration per-path, per-linter, per-text and per-source
|
||||
exclude-rules:
|
||||
|
|
@ -348,11 +346,7 @@ issues:
|
|||
- path: cmd/telegraf/(main|printer|cmd_plugins).go
|
||||
text: "Error return value of `outputBuffer.Write` is not checked" #errcheck
|
||||
|
||||
# temporary disabling of errcheck as this linter is gradually being applied across the codebase
|
||||
- path: plugins/inputs/*
|
||||
linters:
|
||||
- errcheck
|
||||
- path: plugins/outputs/*
|
||||
- path: plugins/inputs/win_perf_counters/pdh.go
|
||||
linters:
|
||||
- errcheck
|
||||
|
||||
|
|
|
|||
|
|
@ -302,7 +302,8 @@ func TestPluginMetricsInitialize(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestUpdateWindow(t *testing.T) {
|
||||
duration, _ := time.ParseDuration("1m")
|
||||
duration, err := time.ParseDuration("1m")
|
||||
require.NoError(t, err)
|
||||
internalDuration := config.Duration(duration)
|
||||
|
||||
plugin := &AliyunCMS{
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ package amd_rocm_smi
|
|||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
|
|
@ -33,7 +34,10 @@ func (*ROCmSMI) SampleConfig() string {
|
|||
|
||||
// Gather implements the telegraf interface
|
||||
func (rsmi *ROCmSMI) Gather(acc telegraf.Accumulator) error {
|
||||
data := rsmi.pollROCmSMI()
|
||||
data, err := rsmi.pollROCmSMI()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute command in pollROCmSMI: %w", err)
|
||||
}
|
||||
|
||||
return gatherROCmSMI(data, acc)
|
||||
}
|
||||
|
|
@ -61,7 +65,7 @@ func init() {
|
|||
})
|
||||
}
|
||||
|
||||
func (rsmi *ROCmSMI) pollROCmSMI() []byte {
|
||||
func (rsmi *ROCmSMI) pollROCmSMI() ([]byte, error) {
|
||||
// Construct and execute metrics query, there currently exist (ROCm v4.3.x) a "-a" option
|
||||
// that does not provide all the information, so each needed parameter is set manually
|
||||
cmd := exec.Command(rsmi.BinPath,
|
||||
|
|
@ -104,8 +108,7 @@ func (rsmi *ROCmSMI) pollROCmSMI() []byte {
|
|||
"--showtoponuma",
|
||||
"--json")
|
||||
|
||||
ret, _ := internal.StdOutputTimeout(cmd, time.Duration(rsmi.Timeout))
|
||||
return ret
|
||||
return internal.StdOutputTimeout(cmd, time.Duration(rsmi.Timeout))
|
||||
}
|
||||
|
||||
func gatherROCmSMI(ret []byte, acc telegraf.Accumulator) error {
|
||||
|
|
@ -145,7 +148,9 @@ func genTagsFields(gpus map[string]GPU, system map[string]sysInfo) []metric {
|
|||
fields := map[string]interface{}{}
|
||||
|
||||
payload := gpus[cardID]
|
||||
//nolint:errcheck // silently treat as zero if malformed
|
||||
totVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalMemory, 10, 64)
|
||||
//nolint:errcheck // silently treat as zero if malformed
|
||||
usdVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalUsedMemory, 10, 64)
|
||||
strFree := strconv.FormatInt(totVRAM-usdVRAM, 10)
|
||||
|
||||
|
|
|
|||
|
|
@ -25,10 +25,12 @@ type Bcache struct {
|
|||
}
|
||||
|
||||
func getTags(bdev string) map[string]string {
|
||||
//nolint:errcheck // unable to propagate
|
||||
backingDevFile, _ := os.Readlink(bdev)
|
||||
backingDevPath := strings.Split(backingDevFile, "/")
|
||||
backingDev := backingDevPath[len(backingDevPath)-2]
|
||||
|
||||
//nolint:errcheck // unable to propagate
|
||||
bcacheDevFile, _ := os.Readlink(bdev + "/dev")
|
||||
bcacheDevPath := strings.Split(bcacheDevFile, "/")
|
||||
bcacheDev := bcacheDevPath[len(bcacheDevPath)-1]
|
||||
|
|
@ -52,6 +54,7 @@ func prettyToBytes(v string) uint64 {
|
|||
v = v[:len(v)-1]
|
||||
factor = factors[prefix]
|
||||
}
|
||||
//nolint:errcheck // unable to propagate
|
||||
result, _ := strconv.ParseFloat(v, 32)
|
||||
result = result * float64(factor)
|
||||
|
||||
|
|
@ -88,7 +91,10 @@ func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error {
|
|||
value := prettyToBytes(rawValue)
|
||||
fields[key] = value
|
||||
} else {
|
||||
value, _ := strconv.ParseUint(rawValue, 10, 64)
|
||||
value, err := strconv.ParseUint(rawValue, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
|
|
@ -114,8 +120,8 @@ func (b *Bcache) Gather(acc telegraf.Accumulator) error {
|
|||
if len(bcachePath) == 0 {
|
||||
bcachePath = "/sys/fs/bcache"
|
||||
}
|
||||
bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*")
|
||||
if len(bdevs) < 1 {
|
||||
bdevs, err := filepath.Glob(bcachePath + "/*/bdev*")
|
||||
if len(bdevs) < 1 || err != nil {
|
||||
return errors.New("can't find any bcache device")
|
||||
}
|
||||
for _, bdev := range bdevs {
|
||||
|
|
|
|||
|
|
@ -15,7 +15,8 @@ import (
|
|||
func TestBindJsonStats(t *testing.T) {
|
||||
ts := httptest.NewServer(http.FileServer(http.Dir("testdata")))
|
||||
url := ts.Listener.Addr().String()
|
||||
host, port, _ := net.SplitHostPort(url)
|
||||
host, port, err := net.SplitHostPort(url)
|
||||
require.NoError(t, err)
|
||||
defer ts.Close()
|
||||
|
||||
b := Bind{
|
||||
|
|
@ -28,7 +29,7 @@ func TestBindJsonStats(t *testing.T) {
|
|||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(b.Gather)
|
||||
err = acc.GatherError(b.Gather)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
@ -188,7 +189,8 @@ func TestBindJsonStats(t *testing.T) {
|
|||
func TestBindXmlStatsV2(t *testing.T) {
|
||||
ts := httptest.NewServer(http.FileServer(http.Dir("testdata")))
|
||||
url := ts.Listener.Addr().String()
|
||||
host, port, _ := net.SplitHostPort(url)
|
||||
host, port, err := net.SplitHostPort(url)
|
||||
require.NoError(t, err)
|
||||
defer ts.Close()
|
||||
|
||||
b := Bind{
|
||||
|
|
@ -201,7 +203,7 @@ func TestBindXmlStatsV2(t *testing.T) {
|
|||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(b.Gather)
|
||||
err = acc.GatherError(b.Gather)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
@ -393,7 +395,8 @@ func TestBindXmlStatsV2(t *testing.T) {
|
|||
func TestBindXmlStatsV3(t *testing.T) {
|
||||
ts := httptest.NewServer(http.FileServer(http.Dir("testdata")))
|
||||
url := ts.Listener.Addr().String()
|
||||
host, port, _ := net.SplitHostPort(url)
|
||||
host, port, err := net.SplitHostPort(url)
|
||||
require.NoError(t, err)
|
||||
defer ts.Close()
|
||||
|
||||
b := Bind{
|
||||
|
|
@ -406,7 +409,7 @@ func TestBindXmlStatsV3(t *testing.T) {
|
|||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(b.Gather)
|
||||
err = acc.GatherError(b.Gather)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
|||
|
|
@ -72,7 +72,10 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st
|
|||
grouper := metric.NewSeriesGrouper()
|
||||
ts := time.Now()
|
||||
tags := map[string]string{"url": urlTag}
|
||||
host, port, _ := net.SplitHostPort(urlTag)
|
||||
host, port, err := net.SplitHostPort(urlTag)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
}
|
||||
tags["source"] = host
|
||||
tags["port"] = port
|
||||
|
||||
|
|
|
|||
|
|
@ -105,7 +105,10 @@ func (b *Bind) readStatsXMLv2(addr *url.URL, acc telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
tags := map[string]string{"url": addr.Host}
|
||||
host, port, _ := net.SplitHostPort(addr.Host)
|
||||
host, port, err := net.SplitHostPort(addr.Host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse address host %q: %w", addr.Host, err)
|
||||
}
|
||||
tags["source"] = host
|
||||
tags["port"] = port
|
||||
|
||||
|
|
|
|||
|
|
@ -71,7 +71,10 @@ type v3CounterGroup struct {
|
|||
func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort string) {
|
||||
grouper := metric.NewSeriesGrouper()
|
||||
ts := time.Now()
|
||||
host, port, _ := net.SplitHostPort(hostPort)
|
||||
host, port, err := net.SplitHostPort(hostPort)
|
||||
if err != nil {
|
||||
acc.AddError(err)
|
||||
}
|
||||
// Counter groups
|
||||
for _, cg := range stats.Server.CounterGroups {
|
||||
for _, c := range cg.Counters {
|
||||
|
|
|
|||
|
|
@ -196,7 +196,9 @@ func (bond *Bond) gatherSysDetails(bondName string, files sysFiles, acc telegraf
|
|||
interacting with the upstream switch ports
|
||||
a failed conversion can be treated as 0 ports
|
||||
*/
|
||||
adPortCount, _ = strconv.Atoi(strings.TrimSpace(files.ADPortsFile))
|
||||
if pc, err := strconv.Atoi(strings.TrimSpace(files.ADPortsFile)); err == nil {
|
||||
adPortCount = pc
|
||||
}
|
||||
} else {
|
||||
adPortCount = len(slaves)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,7 +27,10 @@ func getResponseJSON(requestURI string) ([]byte, int) {
|
|||
}
|
||||
|
||||
// respond with file
|
||||
b, _ := os.ReadFile(jsonFile)
|
||||
b, err := os.ReadFile(jsonFile)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b, code
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -802,14 +802,14 @@ func (s *Server) serve(t *testing.T) {
|
|||
t.Log("mock server [source stats]: successfully wrote reply")
|
||||
}
|
||||
case 44: // activity
|
||||
_, err := s.conn.WriteTo(s.encodeActivityReply(seqno), addr)
|
||||
_, err := s.conn.WriteTo(s.encodeActivityReply(seqno, t), addr)
|
||||
if err != nil {
|
||||
t.Logf("mock server [activity]: writing reply failed: %v", err)
|
||||
} else {
|
||||
t.Log("mock server [activity]: successfully wrote reply")
|
||||
}
|
||||
case 54: // server stats
|
||||
_, err := s.conn.WriteTo(s.encodeServerStatsReply(seqno), addr)
|
||||
_, err := s.conn.WriteTo(s.encodeServerStatsReply(seqno, t), addr)
|
||||
if err != nil {
|
||||
t.Logf("mock server [serverstats]: writing reply failed: %v", err)
|
||||
} else {
|
||||
|
|
@ -833,13 +833,13 @@ func (s *Server) serve(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Server) encodeActivityReply(sequence uint32) []byte {
|
||||
func (s *Server) encodeActivityReply(sequence uint32, t *testing.T) []byte {
|
||||
// Encode the header
|
||||
buf := encodeHeader(44, 12, 0, sequence) // activity request
|
||||
|
||||
// Encode data
|
||||
b := bytes.NewBuffer(buf)
|
||||
_ = binary.Write(b, binary.BigEndian, s.ActivityInfo)
|
||||
require.NoError(t, binary.Write(b, binary.BigEndian, s.ActivityInfo))
|
||||
|
||||
return b.Bytes()
|
||||
}
|
||||
|
|
@ -873,7 +873,7 @@ func (s *Server) encodeTrackingReply(sequence uint32) []byte {
|
|||
return buf
|
||||
}
|
||||
|
||||
func (s *Server) encodeServerStatsReply(sequence uint32) []byte {
|
||||
func (s *Server) encodeServerStatsReply(sequence uint32, t *testing.T) []byte {
|
||||
var b *bytes.Buffer
|
||||
switch info := s.ServerStatInfo.(type) {
|
||||
case *fbchrony.ServerStats:
|
||||
|
|
@ -882,21 +882,21 @@ func (s *Server) encodeServerStatsReply(sequence uint32) []byte {
|
|||
|
||||
// Encode data
|
||||
b = bytes.NewBuffer(buf)
|
||||
_ = binary.Write(b, binary.BigEndian, info)
|
||||
require.NoError(t, binary.Write(b, binary.BigEndian, info))
|
||||
case *fbchrony.ServerStats2:
|
||||
// Encode the header
|
||||
buf := encodeHeader(54, 22, 0, sequence) // activity request
|
||||
|
||||
// Encode data
|
||||
b = bytes.NewBuffer(buf)
|
||||
_ = binary.Write(b, binary.BigEndian, info)
|
||||
require.NoError(t, binary.Write(b, binary.BigEndian, info))
|
||||
case *fbchrony.ServerStats3:
|
||||
// Encode the header
|
||||
buf := encodeHeader(54, 24, 0, sequence) // activity request
|
||||
|
||||
// Encode data
|
||||
b = bytes.NewBuffer(buf)
|
||||
_ = binary.Write(b, binary.BigEndian, info)
|
||||
require.NoError(t, binary.Write(b, binary.BigEndian, info))
|
||||
}
|
||||
|
||||
return b.Bytes()
|
||||
|
|
|
|||
|
|
@ -529,7 +529,10 @@ func (ch *ClickHouse) execQuery(address *url.URL, query string, i interface{}) e
|
|||
q := address.Query()
|
||||
q.Set("query", query+" FORMAT JSON")
|
||||
address.RawQuery = q.Encode()
|
||||
req, _ := http.NewRequest("GET", address.String(), nil)
|
||||
req, err := http.NewRequest("GET", address.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ch.Username != "" {
|
||||
req.Header.Add("X-ClickHouse-User", ch.Username)
|
||||
}
|
||||
|
|
@ -542,6 +545,7 @@ func (ch *ClickHouse) execQuery(address *url.URL, query string, i interface{}) e
|
|||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
if resp.StatusCode >= 300 {
|
||||
//nolint:errcheck // reading body for error reporting
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 200))
|
||||
return &clickhouseError{
|
||||
StatusCode: resp.StatusCode,
|
||||
|
|
|
|||
|
|
@ -29,7 +29,8 @@ func TestRunParse(t *testing.T) {
|
|||
}
|
||||
sub.receiver = testMessagesReceive(sub)
|
||||
|
||||
decoder, _ := internal.NewContentDecoder("identity")
|
||||
decoder, err := internal.NewContentDecoder("identity")
|
||||
require.NoError(t, err)
|
||||
|
||||
ps := &PubSub{
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -74,7 +75,8 @@ func TestRunBase64(t *testing.T) {
|
|||
}
|
||||
sub.receiver = testMessagesReceive(sub)
|
||||
|
||||
decoder, _ := internal.NewContentDecoder("identity")
|
||||
decoder, err := internal.NewContentDecoder("identity")
|
||||
require.NoError(t, err)
|
||||
|
||||
ps := &PubSub{
|
||||
Log: testutil.Logger{},
|
||||
|
|
|
|||
|
|
@ -163,7 +163,8 @@ func TestSnakeCase(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
duration, _ := time.ParseDuration("1m")
|
||||
duration, err := time.ParseDuration("1m")
|
||||
require.NoError(t, err)
|
||||
internalDuration := config.Duration(duration)
|
||||
c := &CloudWatch{
|
||||
CredentialConfig: internalaws.CredentialConfig{
|
||||
|
|
@ -199,7 +200,8 @@ func TestGather(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGatherDenseMetric(t *testing.T) {
|
||||
duration, _ := time.ParseDuration("1m")
|
||||
duration, err := time.ParseDuration("1m")
|
||||
require.NoError(t, err)
|
||||
internalDuration := config.Duration(duration)
|
||||
c := &CloudWatch{
|
||||
CredentialConfig: internalaws.CredentialConfig{
|
||||
|
|
@ -237,7 +239,8 @@ func TestGatherDenseMetric(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMultiAccountGather(t *testing.T) {
|
||||
duration, _ := time.ParseDuration("1m")
|
||||
duration, err := time.ParseDuration("1m")
|
||||
require.NoError(t, err)
|
||||
internalDuration := config.Duration(duration)
|
||||
c := &CloudWatch{
|
||||
CredentialConfig: internalaws.CredentialConfig{
|
||||
|
|
@ -280,7 +283,8 @@ func TestMultiAccountGather(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGather_MultipleNamespaces(t *testing.T) {
|
||||
duration, _ := time.ParseDuration("1m")
|
||||
duration, err := time.ParseDuration("1m")
|
||||
require.NoError(t, err)
|
||||
internalDuration := config.Duration(duration)
|
||||
c := &CloudWatch{
|
||||
Namespaces: []string{"AWS/ELB", "AWS/EC2"},
|
||||
|
|
@ -363,7 +367,8 @@ func (m *mockSelectMetricsCloudWatchClient) GetMetricData(
|
|||
}
|
||||
|
||||
func TestSelectMetrics(t *testing.T) {
|
||||
duration, _ := time.ParseDuration("1m")
|
||||
duration, err := time.ParseDuration("1m")
|
||||
require.NoError(t, err)
|
||||
internalDuration := config.Duration(duration)
|
||||
c := &CloudWatch{
|
||||
CredentialConfig: internalaws.CredentialConfig{
|
||||
|
|
@ -413,7 +418,8 @@ func TestGenerateStatisticsInputParams(t *testing.T) {
|
|||
Namespace: aws.String(namespace),
|
||||
}
|
||||
|
||||
duration, _ := time.ParseDuration("1m")
|
||||
duration, err := time.ParseDuration("1m")
|
||||
require.NoError(t, err)
|
||||
internalDuration := config.Duration(duration)
|
||||
|
||||
c := &CloudWatch{
|
||||
|
|
@ -430,7 +436,8 @@ func TestGenerateStatisticsInputParams(t *testing.T) {
|
|||
|
||||
c.updateWindow(now)
|
||||
|
||||
statFilter, _ := filter.NewIncludeExcludeFilter(nil, nil)
|
||||
statFilter, err := filter.NewIncludeExcludeFilter(nil, nil)
|
||||
require.NoError(t, err)
|
||||
queries := c.getDataQueries([]filteredMetric{{metrics: []types.Metric{m}, statFilter: statFilter}})
|
||||
params := c.getDataInputs(queries[namespace])
|
||||
|
||||
|
|
@ -454,7 +461,8 @@ func TestGenerateStatisticsInputParamsFiltered(t *testing.T) {
|
|||
Namespace: aws.String(namespace),
|
||||
}
|
||||
|
||||
duration, _ := time.ParseDuration("1m")
|
||||
duration, err := time.ParseDuration("1m")
|
||||
require.NoError(t, err)
|
||||
internalDuration := config.Duration(duration)
|
||||
|
||||
c := &CloudWatch{
|
||||
|
|
@ -471,7 +479,8 @@ func TestGenerateStatisticsInputParamsFiltered(t *testing.T) {
|
|||
|
||||
c.updateWindow(now)
|
||||
|
||||
statFilter, _ := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil)
|
||||
statFilter, err := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil)
|
||||
require.NoError(t, err)
|
||||
queries := c.getDataQueries([]filteredMetric{{metrics: []types.Metric{m}, statFilter: statFilter}})
|
||||
params := c.getDataInputs(queries[namespace])
|
||||
|
||||
|
|
@ -495,7 +504,8 @@ func TestMetricsCacheTimeout(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestUpdateWindow(t *testing.T) {
|
||||
duration, _ := time.ParseDuration("1m")
|
||||
duration, err := time.ParseDuration("1m")
|
||||
require.NoError(t, err)
|
||||
internalDuration := config.Duration(duration)
|
||||
|
||||
c := &CloudWatch{
|
||||
|
|
|
|||
|
|
@ -74,8 +74,9 @@ func TestConsulStats(t *testing.T) {
|
|||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.RequestURI == "/v1/agent/metrics" {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
responseKeyMetrics, _ := os.ReadFile("testdata/response_key_metrics.json")
|
||||
_, err := fmt.Fprintln(w, string(responseKeyMetrics))
|
||||
responseKeyMetrics, err := os.ReadFile("testdata/response_key_metrics.json")
|
||||
require.NoError(t, err)
|
||||
_, err = fmt.Fprintln(w, string(responseKeyMetrics))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}))
|
||||
|
|
|
|||
|
|
@ -17,13 +17,17 @@ func TestGatherServer(t *testing.T) {
|
|||
bucket := "blastro-df"
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/pools" {
|
||||
_, _ = w.Write(readJSON(t, "testdata/pools_response.json"))
|
||||
_, err := w.Write(readJSON(t, "testdata/pools_response.json"))
|
||||
require.NoError(t, err)
|
||||
} else if r.URL.Path == "/pools/default" {
|
||||
_, _ = w.Write(readJSON(t, "testdata/pools_default_response.json"))
|
||||
_, err := w.Write(readJSON(t, "testdata/pools_default_response.json"))
|
||||
require.NoError(t, err)
|
||||
} else if r.URL.Path == "/pools/default/buckets" {
|
||||
_, _ = w.Write(readJSON(t, "testdata/bucket_response.json"))
|
||||
_, err := w.Write(readJSON(t, "testdata/bucket_response.json"))
|
||||
require.NoError(t, err)
|
||||
} else if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" {
|
||||
_, _ = w.Write(readJSON(t, "testdata/bucket_stats_response.json"))
|
||||
_, err := w.Write(readJSON(t, "testdata/bucket_stats_response.json"))
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
|
@ -112,7 +116,8 @@ func TestGatherDetailedBucketMetrics(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" || r.URL.Path == "/pools/default/buckets/"+bucket+"/nodes/"+node+"/stats" {
|
||||
_, _ = w.Write(test.response)
|
||||
_, err := w.Write(test.response)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
|
@ -148,11 +153,14 @@ func TestGatherDetailedBucketMetrics(t *testing.T) {
|
|||
func TestGatherNodeOnly(t *testing.T) {
|
||||
faker := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/pools" {
|
||||
_, _ = w.Write(readJSON(t, "testdata/pools_response.json"))
|
||||
_, err := w.Write(readJSON(t, "testdata/pools_response.json"))
|
||||
require.NoError(t, err)
|
||||
} else if r.URL.Path == "/pools/default" {
|
||||
_, _ = w.Write(readJSON(t, "testdata/pools_default_response.json"))
|
||||
_, err := w.Write(readJSON(t, "testdata/pools_default_response.json"))
|
||||
require.NoError(t, err)
|
||||
} else if r.URL.Path == "/pools/default/buckets" {
|
||||
_, _ = w.Write(readJSON(t, "testdata/bucket_response.json"))
|
||||
_, err := w.Write(readJSON(t, "testdata/bucket_response.json"))
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
|
@ -175,13 +183,17 @@ func TestGatherFailover(t *testing.T) {
|
|||
faker := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/pools":
|
||||
_, _ = w.Write(readJSON(t, "testdata/pools_response.json"))
|
||||
_, err := w.Write(readJSON(t, "testdata/pools_response.json"))
|
||||
require.NoError(t, err)
|
||||
case "/pools/default":
|
||||
_, _ = w.Write(readJSON(t, "testdata/pools_default_response.json"))
|
||||
_, err := w.Write(readJSON(t, "testdata/pools_default_response.json"))
|
||||
require.NoError(t, err)
|
||||
case "/pools/default/buckets":
|
||||
_, _ = w.Write(readJSON(t, "testdata/bucket_response.json"))
|
||||
_, err := w.Write(readJSON(t, "testdata/bucket_response.json"))
|
||||
require.NoError(t, err)
|
||||
case "/settings/autoFailover":
|
||||
_, _ = w.Write(readJSON(t, "testdata/settings_autofailover.json"))
|
||||
_, err := w.Write(readJSON(t, "testdata/settings_autofailover.json"))
|
||||
require.NoError(t, err)
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -305,7 +305,8 @@ func TestBasic(t *testing.T) {
|
|||
`
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/_stats" {
|
||||
_, _ = w.Write([]byte(js))
|
||||
_, err := w.Write([]byte(js))
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,15 +24,18 @@ func TestCPUStats(t *testing.T) {
|
|||
server.SetAuthHandler(func(c *rcontest.Context) {
|
||||
if c.Request().Body() == c.Server().Settings.Password {
|
||||
pkg := rcon.NewPacket(rcon.SERVERDATA_AUTH_RESPONSE, c.Request().ID, "")
|
||||
_, _ = pkg.WriteTo(c.Conn())
|
||||
_, err := pkg.WriteTo(c.Conn())
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
pkg := rcon.NewPacket(rcon.SERVERDATA_AUTH_RESPONSE, -1, string([]byte{0x00}))
|
||||
_, _ = pkg.WriteTo(c.Conn())
|
||||
_, err := pkg.WriteTo(c.Conn())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
server.SetCommandHandler(func(c *rcontest.Context) {
|
||||
pkg := rcon.NewPacket(rcon.SERVERDATA_RESPONSE_VALUE, c.Request().ID, input)
|
||||
_, _ = pkg.WriteTo(c.Conn())
|
||||
_, err := pkg.WriteTo(c.Conn())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
server.Start()
|
||||
defer server.Close()
|
||||
|
|
|
|||
|
|
@ -517,9 +517,11 @@ func TestParseCompleteFile(t *testing.T) {
|
|||
}`
|
||||
|
||||
// Write json file to process into the 'process' directory.
|
||||
f, _ := os.CreateTemp(processDirectory, "test.json")
|
||||
_, _ = f.WriteString(testJSON)
|
||||
_ = f.Close()
|
||||
f, err := os.CreateTemp(processDirectory, "test.json")
|
||||
require.NoError(t, err)
|
||||
_, err = f.WriteString(testJSON)
|
||||
require.NoError(t, err)
|
||||
f.Close()
|
||||
|
||||
err = r.Start(&acc)
|
||||
require.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -114,7 +114,10 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype stri
|
|||
if strings.HasPrefix(addr, "/") {
|
||||
host = addr
|
||||
} else {
|
||||
host, _, _ = net.SplitHostPort(addr)
|
||||
host, _, err = net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading address failed for dovecot server %q: %w", addr, err)
|
||||
}
|
||||
}
|
||||
|
||||
gatherStats(&buf, acc, host, qtype)
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ func Test_Init(t *testing.T) {
|
|||
|
||||
require.Equal(t, "", dpdk.SocketPath)
|
||||
|
||||
_ = dpdk.Init()
|
||||
require.NoError(t, dpdk.Init())
|
||||
|
||||
require.Equal(t, defaultPathToSocket, dpdk.SocketPath)
|
||||
})
|
||||
|
|
@ -44,7 +44,7 @@ func Test_Init(t *testing.T) {
|
|||
}
|
||||
require.Nil(t, dpdk.MetadataFields)
|
||||
|
||||
_ = dpdk.Init()
|
||||
require.NoError(t, dpdk.Init())
|
||||
require.Equal(t, []string{dpdkMetadataFieldPidName, dpdkMetadataFieldVersionName}, dpdk.MetadataFields)
|
||||
})
|
||||
|
||||
|
|
@ -54,7 +54,7 @@ func Test_Init(t *testing.T) {
|
|||
}
|
||||
require.Nil(t, dpdk.PluginOptions)
|
||||
|
||||
_ = dpdk.Init()
|
||||
require.NoError(t, dpdk.Init())
|
||||
require.Equal(t, []string{dpdkPluginOptionInMemory}, dpdk.PluginOptions)
|
||||
})
|
||||
|
||||
|
|
@ -481,7 +481,9 @@ func Test_getCommandsAndParamsCombinations(t *testing.T) {
|
|||
|
||||
dpdk.DeviceTypes = []string{"ethdev"}
|
||||
dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"}
|
||||
dpdk.ethdevExcludedCommandsFilter, _ = filter.Compile([]string{})
|
||||
var err error
|
||||
dpdk.ethdevExcludedCommandsFilter, err = filter.Compile([]string{})
|
||||
require.NoError(t, err)
|
||||
dpdk.AdditionalCommands = []string{}
|
||||
commands := dpdk.gatherCommands(mockAcc, dpdk.connectors[0])
|
||||
|
||||
|
|
@ -514,7 +516,9 @@ func Test_getCommandsAndParamsCombinations(t *testing.T) {
|
|||
|
||||
dpdk.DeviceTypes = []string{"ethdev"}
|
||||
dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"}
|
||||
dpdk.ethdevExcludedCommandsFilter, _ = filter.Compile([]string{"/ethdev/xstats"})
|
||||
var err error
|
||||
dpdk.ethdevExcludedCommandsFilter, err = filter.Compile([]string{"/ethdev/xstats"})
|
||||
require.NoError(t, err)
|
||||
dpdk.AdditionalCommands = []string{}
|
||||
commands := dpdk.gatherCommands(mockAcc, dpdk.connectors[0])
|
||||
|
||||
|
|
@ -529,7 +533,9 @@ func Test_getCommandsAndParamsCombinations(t *testing.T) {
|
|||
|
||||
dpdk.DeviceTypes = []string{"ethdev"}
|
||||
dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"}
|
||||
dpdk.ethdevExcludedCommandsFilter, _ = filter.Compile([]string{})
|
||||
var err error
|
||||
dpdk.ethdevExcludedCommandsFilter, err = filter.Compile([]string{})
|
||||
require.NoError(t, err)
|
||||
dpdk.AdditionalCommands = []string{}
|
||||
commands := dpdk.gatherCommands(mockAcc, dpdk.connectors[0])
|
||||
|
||||
|
|
@ -604,7 +610,7 @@ func Test_Gather(t *testing.T) {
|
|||
PluginOptions: []string{},
|
||||
}
|
||||
|
||||
_ = dpdk.Init()
|
||||
require.NoError(t, dpdk.Init())
|
||||
|
||||
err := dpdk.Gather(mockAcc)
|
||||
require.Error(t, err)
|
||||
|
|
@ -618,7 +624,7 @@ func Test_Gather(t *testing.T) {
|
|||
PluginOptions: []string{},
|
||||
UnreachableSocketBehavior: unreachableSocketBehaviorIgnore,
|
||||
}
|
||||
_ = dpdk.Init()
|
||||
require.NoError(t, dpdk.Init())
|
||||
|
||||
err := dpdk.Gather(mockAcc)
|
||||
require.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -107,7 +107,10 @@ type EcsClient struct {
|
|||
|
||||
// Task calls the ECS metadata endpoint and returns a populated Task
|
||||
func (c *EcsClient) Task() (*Task, error) {
|
||||
req, _ := http.NewRequest("GET", c.taskURL, nil)
|
||||
req, err := http.NewRequest("GET", c.taskURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -115,7 +118,7 @@ func (c *EcsClient) Task() (*Task, error) {
|
|||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
// ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
|
||||
//nolint:errcheck // LimitReader returns io.EOF and we're not interested in read errors.
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 200))
|
||||
return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.taskURL, resp.Status, body)
|
||||
}
|
||||
|
|
@ -130,7 +133,10 @@ func (c *EcsClient) Task() (*Task, error) {
|
|||
|
||||
// ContainerStats calls the ECS stats endpoint and returns a populated container stats map
|
||||
func (c *EcsClient) ContainerStats() (map[string]*types.StatsJSON, error) {
|
||||
req, _ := http.NewRequest("GET", c.statsURL, nil)
|
||||
req, err := http.NewRequest("GET", c.statsURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -139,7 +145,7 @@ func (c *EcsClient) ContainerStats() (map[string]*types.StatsJSON, error) {
|
|||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
// ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
|
||||
//nolint:errcheck // LimitReader returns io.EOF and we're not interested in read errors.
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 200))
|
||||
return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.statsURL, resp.Status, body)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -95,7 +95,6 @@ func (m mockDo) Do(*http.Request) (*http.Response, error) {
|
|||
}
|
||||
|
||||
func TestEcsClient_Task(t *testing.T) {
|
||||
rc, _ := os.Open("testdata/metadata.golden")
|
||||
tests := []struct {
|
||||
name string
|
||||
client httpClient
|
||||
|
|
@ -106,6 +105,10 @@ func TestEcsClient_Task(t *testing.T) {
|
|||
name: "happy",
|
||||
client: mockDo{
|
||||
do: func() (*http.Response, error) {
|
||||
rc, err := os.Open("testdata/metadata.golden")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(rc),
|
||||
|
|
@ -166,7 +169,6 @@ func TestEcsClient_Task(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEcsClient_ContainerStats(t *testing.T) {
|
||||
rc, _ := os.Open("testdata/stats.golden")
|
||||
tests := []struct {
|
||||
name string
|
||||
client httpClient
|
||||
|
|
@ -177,6 +179,10 @@ func TestEcsClient_ContainerStats(t *testing.T) {
|
|||
name: "happy",
|
||||
client: mockDo{
|
||||
do: func() (*http.Response, error) {
|
||||
rc, err := os.Open("testdata/stats.golden")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(rc),
|
||||
|
|
|
|||
|
|
@ -14,11 +14,19 @@ import (
|
|||
const pauseStatsKey = "e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba"
|
||||
const nginxStatsKey = "fffe894e232d46c76475cfeabf4907f712e8b92618a37fca3ef0805bbbfb0299"
|
||||
|
||||
var pauseStatsRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:40:00.936081344Z")
|
||||
var pauseStatsPreRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:39:59.933000984Z")
|
||||
var pauseStatsRead = mustParseNano("2018-11-19T15:40:00.936081344Z")
|
||||
var pauseStatsPreRead = mustParseNano("2018-11-19T15:39:59.933000984Z")
|
||||
|
||||
var nginxStatsRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:40:00.93733207Z")
|
||||
var nginxStatsPreRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:39:59.934291009Z")
|
||||
var nginxStatsRead = mustParseNano("2018-11-19T15:40:00.93733207Z")
|
||||
var nginxStatsPreRead = mustParseNano("2018-11-19T15:39:59.934291009Z")
|
||||
|
||||
func mustParseNano(value string) time.Time {
|
||||
t, err := time.Parse(time.RFC3339Nano, value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
var validStats = map[string]*types.StatsJSON{
|
||||
pauseStatsKey: {
|
||||
|
|
@ -682,12 +690,12 @@ var validStats = map[string]*types.StatsJSON{
|
|||
}
|
||||
|
||||
// meta
|
||||
var metaPauseCreated, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:26.641964373Z")
|
||||
var metaPauseStarted, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.035698679Z")
|
||||
var metaCreated, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.614884084Z")
|
||||
var metaStarted, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.975996351Z")
|
||||
var metaPullStart, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.197327103Z")
|
||||
var metaPullStop, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.609089471Z")
|
||||
var metaPauseCreated = mustParseNano("2018-11-19T15:31:26.641964373Z")
|
||||
var metaPauseStarted = mustParseNano("2018-11-19T15:31:27.035698679Z")
|
||||
var metaCreated = mustParseNano("2018-11-19T15:31:27.614884084Z")
|
||||
var metaStarted = mustParseNano("2018-11-19T15:31:27.975996351Z")
|
||||
var metaPullStart = mustParseNano("2018-11-19T15:31:27.197327103Z")
|
||||
var metaPullStop = mustParseNano("2018-11-19T15:31:27.609089471Z")
|
||||
|
||||
var validMeta = Task{
|
||||
Cluster: "test",
|
||||
|
|
|
|||
|
|
@ -564,8 +564,10 @@ func setupIntegrationTest(t *testing.T) (*testutil.Container, error) {
|
|||
|
||||
for scanner.Scan() {
|
||||
parts := strings.Split(scanner.Text(), " ")
|
||||
size, _ := strconv.Atoi(parts[9])
|
||||
responseTime, _ := strconv.Atoi(parts[len(parts)-1])
|
||||
size, err := strconv.Atoi(parts[9])
|
||||
require.NoError(t, err)
|
||||
responseTime, err := strconv.Atoi(parts[len(parts)-1])
|
||||
require.NoError(t, err)
|
||||
|
||||
logline := nginxlog{
|
||||
IPaddress: parts[0],
|
||||
|
|
|
|||
|
|
@ -69,7 +69,8 @@ func TestFileTag(t *testing.T) {
|
|||
|
||||
func TestJSONParserCompile(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
wd, _ := os.Getwd()
|
||||
wd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
r := File{
|
||||
Files: []string{filepath.Join(wd, "dev", "testfiles", "json_a.log")},
|
||||
Log: testutil.Logger{},
|
||||
|
|
@ -88,13 +89,14 @@ func TestJSONParserCompile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGrokParser(t *testing.T) {
|
||||
wd, _ := os.Getwd()
|
||||
wd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
var acc testutil.Accumulator
|
||||
r := File{
|
||||
Files: []string{filepath.Join(wd, "dev", "testfiles", "grok_a.log")},
|
||||
Log: testutil.Logger{},
|
||||
}
|
||||
err := r.Init()
|
||||
err = r.Init()
|
||||
require.NoError(t, err)
|
||||
|
||||
r.SetParserFunc(func() (telegraf.Parser, error) {
|
||||
|
|
|
|||
|
|
@ -33,7 +33,8 @@ func TestSplitRepositoryNameWithWorkingExample(t *testing.T) {
|
|||
|
||||
for _, tt := range validRepositoryNames {
|
||||
t.Run(tt.fullName, func(t *testing.T) {
|
||||
owner, repository, _ := splitRepositoryName(tt.fullName)
|
||||
owner, repository, err := splitRepositoryName(tt.fullName)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tt.owner, owner)
|
||||
require.Equal(t, tt.repository, repository)
|
||||
|
|
|
|||
|
|
@ -164,7 +164,11 @@ func (h *handler) handleSubscribeResponseUpdate(acc telegraf.Accumulator, respon
|
|||
prefix := newInfoFromPath(response.Update.Prefix)
|
||||
|
||||
// Add info to the tags
|
||||
headerTags["source"], _, _ = net.SplitHostPort(h.address)
|
||||
var err error
|
||||
headerTags["source"], _, err = net.SplitHostPort(h.address)
|
||||
if err != nil {
|
||||
h.log.Errorf("unable to parse address %s: %v", h.address, err)
|
||||
}
|
||||
if !prefix.empty() {
|
||||
headerTags["path"] = prefix.FullPath()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -320,9 +320,11 @@ func stateFullGCSServer(t *testing.T) *httptest.Server {
|
|||
failPath(r.URL.Path, t, w)
|
||||
}
|
||||
case "/upload/storage/v1/b/test-iteration-bucket/o":
|
||||
_, params, _ := mime.ParseMediaType(r.Header["Content-Type"][0])
|
||||
_, params, err := mime.ParseMediaType(r.Header["Content-Type"][0])
|
||||
require.NoError(t, err)
|
||||
boundary := params["boundary"]
|
||||
currentOffSetKey, _ = fetchJSON(t, boundary, r.Body)
|
||||
currentOffSetKey, err = fetchJSON(t, boundary, r.Body)
|
||||
require.NoError(t, err)
|
||||
default:
|
||||
serveBlobs(t, w, r.URL.Path, currentOffSetKey)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -141,7 +141,10 @@ func (h *GrayLog) gatherServer(
|
|||
return fmt.Errorf("unable to parse address %q: %w", serverURL, err)
|
||||
}
|
||||
|
||||
host, port, _ := net.SplitHostPort(requestURL.Host)
|
||||
host, port, err := net.SplitHostPort(requestURL.Host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse address host %q: %w", requestURL.Host, err)
|
||||
}
|
||||
var dat ResponseMetrics
|
||||
if err := json.Unmarshal([]byte(resp), &dat); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -30,7 +30,10 @@ func (s statServer) serverSocket(l net.Listener) {
|
|||
defer c.Close()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := c.Read(buf)
|
||||
n, err := c.Read(buf)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
data := buf[:n]
|
||||
if string(data) == "show stat\n" {
|
||||
|
|
|
|||
|
|
@ -32,7 +32,8 @@ import (
|
|||
func TestHTTPWithJSONFormat(t *testing.T) {
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
_, _ = w.Write([]byte(simpleJSON))
|
||||
_, err := w.Write([]byte(simpleJSON))
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
|
@ -72,7 +73,8 @@ func TestHTTPHeaders(t *testing.T) {
|
|||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
if r.Header.Get(header) == headerValue {
|
||||
_, _ = w.Write([]byte(simpleJSON))
|
||||
_, err := w.Write([]byte(simpleJSON))
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
}
|
||||
|
|
@ -105,7 +107,8 @@ func TestHTTPContentLengthHeader(t *testing.T) {
|
|||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
if r.Header.Get("Content-Length") != "" {
|
||||
_, _ = w.Write([]byte(simpleJSON))
|
||||
_, err := w.Write([]byte(simpleJSON))
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
}
|
||||
|
|
@ -405,7 +408,8 @@ func TestOAuthClientCredentialsGrant(t *testing.T) {
|
|||
func TestHTTPWithCSVFormat(t *testing.T) {
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
_, _ = w.Write([]byte(simpleCSVWithHeader))
|
||||
_, err := w.Write([]byte(simpleCSVWithHeader))
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
|
@ -462,7 +466,8 @@ func TestConnectionOverUnixSocket(t *testing.T) {
|
|||
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/data" {
|
||||
w.Header().Set("Content-Type", "text/csv")
|
||||
_, _ = w.Write([]byte(simpleCSVWithHeader))
|
||||
_, err := w.Write([]byte(simpleCSVWithHeader))
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -494,7 +494,10 @@ func TestHTTPHeaderTags(t *testing.T) {
|
|||
}
|
||||
|
||||
func findInterface() (net.Interface, error) {
|
||||
potential, _ := net.Interfaces()
|
||||
potential, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return net.Interface{}, err
|
||||
}
|
||||
|
||||
for _, i := range potential {
|
||||
// we are only interest in loopback interfaces which are up
|
||||
|
|
@ -502,8 +505,7 @@ func findInterface() (net.Interface, error) {
|
|||
continue
|
||||
}
|
||||
|
||||
if addrs, _ := i.Addrs(); len(addrs) > 0 {
|
||||
// return interface if it has at least one unicast address
|
||||
if addrs, err := i.Addrs(); err == nil && len(addrs) > 0 {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -221,7 +221,10 @@ func (h *InfluxDBListener) handlePing() http.HandlerFunc {
|
|||
if verbose != "" && verbose != "0" && verbose != "false" {
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.WriteHeader(http.StatusOK)
|
||||
b, _ := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above
|
||||
b, err := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above
|
||||
if err != nil {
|
||||
h.Log.Debugf("error marshalling json in handlePing: %v", err)
|
||||
}
|
||||
if _, err := res.Write(b); err != nil {
|
||||
h.Log.Debugf("error writing result in handlePing: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -241,12 +241,15 @@ func (h *InfluxDBV2Listener) handleReady() http.HandlerFunc {
|
|||
// respond to ready requests
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.WriteHeader(http.StatusOK)
|
||||
b, _ := json.Marshal(map[string]string{
|
||||
b, err := json.Marshal(map[string]string{
|
||||
"started": h.startTime.Format(time.RFC3339Nano),
|
||||
"status": "ready",
|
||||
"up": h.timeFunc().Sub(h.startTime).String()})
|
||||
if err != nil {
|
||||
h.Log.Debugf("error marshalling json in handleReady: %v", err)
|
||||
}
|
||||
if _, err := res.Write(b); err != nil {
|
||||
h.Log.Debugf("error writing in handle-ready: %v", err)
|
||||
h.Log.Debugf("error writing in handleReady: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -399,11 +402,14 @@ func tooLarge(res http.ResponseWriter, maxLength int64) error {
|
|||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Error", "http: request body too large")
|
||||
res.WriteHeader(http.StatusRequestEntityTooLarge)
|
||||
b, _ := json.Marshal(map[string]string{
|
||||
b, err := json.Marshal(map[string]string{
|
||||
"code": fmt.Sprint(Invalid),
|
||||
"message": "http: request body too large",
|
||||
"maxLength": strconv.FormatInt(maxLength, 10)})
|
||||
_, err := res.Write(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = res.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -414,13 +420,16 @@ func badRequest(res http.ResponseWriter, code BadRequestCode, errString string)
|
|||
}
|
||||
res.Header().Set("X-Influxdb-Error", errString)
|
||||
res.WriteHeader(http.StatusBadRequest)
|
||||
b, _ := json.Marshal(map[string]string{
|
||||
b, err := json.Marshal(map[string]string{
|
||||
"code": fmt.Sprint(code),
|
||||
"message": errString,
|
||||
"op": "",
|
||||
"err": errString,
|
||||
})
|
||||
_, err := res.Write(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = res.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,8 @@ func TestDLB_Init(t *testing.T) {
|
|||
}
|
||||
require.Equal(t, "", dlb.SocketPath)
|
||||
|
||||
_ = dlb.Init()
|
||||
//nolint:errcheck // we are just testing that socket path gets set to default, not that default is valid
|
||||
dlb.Init()
|
||||
|
||||
require.Equal(t, defaultSocketPath, dlb.SocketPath)
|
||||
})
|
||||
|
|
@ -941,11 +942,12 @@ func simulateSocketResponseForGather(socket net.Listener, t *testing.T) {
|
|||
Pid int `json:"pid"`
|
||||
MaxOutputLen uint32 `json:"max_output_len"`
|
||||
}
|
||||
initMsg, _ := json.Marshal(initMessage{
|
||||
initMsg, err := json.Marshal(initMessage{
|
||||
Version: "",
|
||||
Pid: 1,
|
||||
MaxOutputLen: 1024,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = conn.Write(initMsg)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
|||
|
|
@ -145,7 +145,8 @@ func TestResolveEntities(t *testing.T) {
|
|||
|
||||
t.Run("uncore event found in core entity", func(t *testing.T) {
|
||||
mQuals := []string{"config1=0x23h"}
|
||||
mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build()
|
||||
mOptions, err := ia.NewOptions().SetAttrModifiers(mQuals).Build()
|
||||
require.NoError(t, err)
|
||||
eventName := "uncore event 1"
|
||||
|
||||
testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals},
|
||||
|
|
@ -156,7 +157,7 @@ func TestResolveEntities(t *testing.T) {
|
|||
mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once()
|
||||
|
||||
mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false}
|
||||
err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil)
|
||||
err = mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil)
|
||||
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), fmt.Sprintf("uncore event %q found in core entity", eventName))
|
||||
|
|
@ -165,7 +166,8 @@ func TestResolveEntities(t *testing.T) {
|
|||
|
||||
t.Run("core event found in uncore entity", func(t *testing.T) {
|
||||
mQuals := []string{"config1=0x23h"}
|
||||
mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build()
|
||||
mOptions, err := ia.NewOptions().SetAttrModifiers(mQuals).Build()
|
||||
require.NoError(t, err)
|
||||
eventName := "core event 1"
|
||||
|
||||
testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals},
|
||||
|
|
@ -176,7 +178,7 @@ func TestResolveEntities(t *testing.T) {
|
|||
mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once()
|
||||
|
||||
mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false}
|
||||
err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity})
|
||||
err = mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity})
|
||||
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), fmt.Sprintf("core event %q found in uncore entity", eventName))
|
||||
|
|
@ -188,8 +190,10 @@ func TestResolveEntities(t *testing.T) {
|
|||
var nUncoreEvents []*eventWithQuals
|
||||
|
||||
mQuals := []string{"config1=0x23h"}
|
||||
mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build()
|
||||
emptyOptions, _ := ia.NewOptions().Build()
|
||||
mOptions, err := ia.NewOptions().SetAttrModifiers(mQuals).Build()
|
||||
require.NoError(t, err)
|
||||
emptyOptions, err := ia.NewOptions().Build()
|
||||
require.NoError(t, err)
|
||||
|
||||
coreTestCases := []test{
|
||||
{event: &eventWithQuals{name: "core1", qualifiers: mQuals},
|
||||
|
|
@ -228,7 +232,7 @@ func TestResolveEntities(t *testing.T) {
|
|||
|
||||
mCoreEntity := &CoreEventEntity{parsedEvents: mCoreEvents, allEvents: false}
|
||||
mUncoreEntity := &UncoreEventEntity{parsedEvents: nUncoreEvents, allEvents: false}
|
||||
err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity})
|
||||
err = mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity})
|
||||
|
||||
require.NoError(t, err)
|
||||
for _, test := range append(coreTestCases, uncoreTestCases...) {
|
||||
|
|
@ -274,7 +278,8 @@ func TestResolveAllEvents(t *testing.T) {
|
|||
uncorePerfEvent1 := &ia.PerfEvent{Name: "mock3", Uncore: true}
|
||||
uncorePerfEvent2 := &ia.PerfEvent{Name: "mock4", Uncore: true}
|
||||
|
||||
options, _ := ia.NewOptions().Build()
|
||||
options, err := ia.NewOptions().Build()
|
||||
require.NoError(t, err)
|
||||
perfEvents := []*ia.PerfEvent{perfEvent1, perfEvent2, uncorePerfEvent1, uncorePerfEvent2}
|
||||
|
||||
expectedCore := []*eventWithQuals{
|
||||
|
|
@ -362,7 +367,8 @@ func TestResolveEvent(t *testing.T) {
|
|||
mPerfEvent := &ia.PerfEvent{Name: event}
|
||||
mPerfEvents := []*ia.PerfEvent{mPerfEvent}
|
||||
|
||||
expectedOptions, _ := ia.NewOptions().SetAttrModifiers(qualifiers).Build()
|
||||
expectedOptions, err := ia.NewOptions().SetAttrModifiers(qualifiers).Build()
|
||||
require.NoError(t, err)
|
||||
|
||||
mTransformer.On("Transform", nil, matcher).Once().Return(mPerfEvents, nil)
|
||||
|
||||
|
|
|
|||
|
|
@ -329,8 +329,8 @@ func shutDownPqos(pqos *exec.Cmd) error {
|
|||
timeout := time.Second * 2
|
||||
|
||||
if pqos.Process != nil {
|
||||
// try to send interrupt signal, ignore err for now
|
||||
_ = pqos.Process.Signal(os.Interrupt)
|
||||
//nolint:errcheck // try to send interrupt signal, ignore err for now
|
||||
pqos.Process.Signal(os.Interrupt)
|
||||
|
||||
// wait and constantly check if pqos is still running
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
|
|
@ -548,8 +548,8 @@ func makeRange(min, max int) []int {
|
|||
func init() {
|
||||
inputs.Add("intel_rdt", func() telegraf.Input {
|
||||
rdt := IntelRDT{}
|
||||
pathPqos, _ := exec.LookPath("pqos")
|
||||
if len(pathPqos) > 0 {
|
||||
pathPqos, err := exec.LookPath("pqos")
|
||||
if len(pathPqos) > 0 && err != nil {
|
||||
rdt.PqosPath = pathPqos
|
||||
}
|
||||
return &rdt
|
||||
|
|
|
|||
|
|
@ -94,6 +94,7 @@ func (c *Connection) LocalIP() string {
|
|||
return c.Hostname
|
||||
}
|
||||
_ = conn.Close()
|
||||
//nolint:errcheck // unable to propagate
|
||||
host, _, _ := net.SplitHostPort(conn.LocalAddr().String())
|
||||
return host
|
||||
}
|
||||
|
|
|
|||
|
|
@ -627,7 +627,8 @@ func TestJolokia2_ClientAuthRequest(t *testing.T) {
|
|||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
username, password, _ = r.BasicAuth()
|
||||
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
body, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, json.Unmarshal(body, &requests))
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
|
|
|||
|
|
@ -84,10 +84,11 @@ func TestJolokia2_ClientProxyAuthRequest(t *testing.T) {
|
|||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
username, password, _ = r.BasicAuth()
|
||||
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
body, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, json.Unmarshal(body, &requests))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := fmt.Fprintf(w, "[]")
|
||||
_, err = fmt.Fprintf(w, "[]")
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
|
|
|||
|
|
@ -318,7 +318,8 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) {
|
|||
// err = cg.ConsumeClaim(session, &claim)
|
||||
//require.NoError(t, err)
|
||||
// So stick with the line below for now.
|
||||
_ = cg.ConsumeClaim(session, &claim)
|
||||
//nolint:errcheck // see above
|
||||
cg.ConsumeClaim(session, &claim)
|
||||
|
||||
err = cg.Cleanup(session)
|
||||
require.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -18,7 +18,8 @@ func TestKapacitor(t *testing.T) {
|
|||
|
||||
fakeInfluxServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/endpoint" {
|
||||
_, _ = w.Write(kapacitorReturn)
|
||||
_, err := w.Write(kapacitorReturn)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -236,7 +236,7 @@ func (k *Kibana) gatherJSONData(url string, v interface{}) (host string, err err
|
|||
defer response.Body.Close()
|
||||
|
||||
if response.StatusCode != http.StatusOK {
|
||||
// ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
|
||||
//nolint:errcheck // LimitReader returns io.EOF and we're not interested in read errors.
|
||||
body, _ := io.ReadAll(io.LimitReader(response.Body, 200))
|
||||
return request.Host, fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,16 +15,18 @@ import (
|
|||
)
|
||||
|
||||
func TestKinesisConsumer_onMessage(t *testing.T) {
|
||||
zlibBytpes, _ := base64.StdEncoding.DecodeString(
|
||||
zlibBytpes, err := base64.StdEncoding.DecodeString(
|
||||
"eF5FjlFrgzAUhf9KuM+2aNB2zdsQ2xe3whQGW8qIeqdhaiSJK0P874u1Y4+Hc/jON0GHxoga858BgUF8fs5fzunHU5Jlj6cEPFDXHvXStGqsrsKWTapq44pW1SetxsF1a8qsRtGt0Yy" +
|
||||
"FKbUcrFT9UbYWtQH2frntkm/s7RInkNU6t9JpWNE5WBAFPo3CcHeg+9D703OziUOhCg6MQ/yakrspuZsyEjdYfsm+Jg2K1jZEfZLKQWUvFglylBobZXDLwSP8//EGpD4NNj7dUJpT6" +
|
||||
"hQY3W33h/AhCt84zDBf5l/MDl08",
|
||||
)
|
||||
gzippedBytes, _ := base64.StdEncoding.DecodeString(
|
||||
require.NoError(t, err)
|
||||
gzippedBytes, err := base64.StdEncoding.DecodeString(
|
||||
"H4sIAAFXNGAAA0WOUWuDMBSF/0q4z7Zo0HbN2xDbF7fCFAZbyoh6p2FqJIkrQ/zvi7Vjj4dz+M43QYfGiBrznwGBQXx+zl/O6cdTkmWPpwQ8UNce9dK0aqyuwpZNqmrjilbVJ63GwXVr" +
|
||||
"yqxG0a3RjIUptRysVP1Rtha1AfZ+ue2Sb+ztEieQ1Tq30mlY0TlYEAU+jcJwd6D70PvTc7OJQ6EKDoxD/JqSuym5mzISN1h+yb4mDYrWNkR9kspBZS8WCXKUGhtlcMvBI/z/8QakPg02" +
|
||||
"Pt1QmlPqFBjdbfeH8CEK3zjMMF/mX0TaxZUpAQAA",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
notZippedBytes := []byte(`{
|
||||
"messageType": "CONTROL_MESSAGE",
|
||||
"owner": "CloudwatchLogs",
|
||||
|
|
@ -200,7 +202,7 @@ func TestKinesisConsumer_onMessage(t *testing.T) {
|
|||
k := &KinesisConsumer{
|
||||
ContentEncoding: "notsupported",
|
||||
}
|
||||
err := k.Init()
|
||||
err = k.Init()
|
||||
require.Error(t, err)
|
||||
|
||||
for _, tt := range tests {
|
||||
|
|
|
|||
|
|
@ -26,7 +26,8 @@ func TestKubernetesStats(t *testing.T) {
|
|||
}))
|
||||
defer ts.Close()
|
||||
|
||||
labelFilter, _ := filter.NewIncludeExcludeFilter([]string{"app", "superkey"}, nil)
|
||||
labelFilter, err := filter.NewIncludeExcludeFilter([]string{"app", "superkey"}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
k := &Kubernetes{
|
||||
URL: ts.URL,
|
||||
|
|
@ -35,7 +36,7 @@ func TestKubernetesStats(t *testing.T) {
|
|||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := acc.GatherError(k.Gather)
|
||||
err = acc.GatherError(k.Gather)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
|
|
|
|||
|
|
@ -165,7 +165,7 @@ func (logstash *Logstash) gatherJSONData(address string, value interface{}) erro
|
|||
|
||||
defer response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
// ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
|
||||
//nolint:errcheck // LimitReader returns io.EOF and we're not interested in read errors.
|
||||
body, _ := io.ReadAll(io.LimitReader(response.Body, 200))
|
||||
return fmt.Errorf("%s returned HTTP status %s: %q", address, response.Status, body)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -148,7 +148,7 @@ func (a *ChimpAPI) runChimp(params ReportsParams) ([]byte, error) {
|
|||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
// ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
|
||||
//nolint:errcheck // LimitReader returns io.EOF and we're not interested in read errors.
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 200))
|
||||
return nil, fmt.Errorf("%s returned HTTP status %s: %q", a.url.String(), resp.Status, body)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,7 +38,10 @@ func (m *MailChimp) Gather(acc telegraf.Accumulator) error {
|
|||
since := ""
|
||||
if m.DaysOld > 0 {
|
||||
now := time.Now()
|
||||
d, _ := time.ParseDuration(fmt.Sprintf("%dh", 24*m.DaysOld))
|
||||
d, err := time.ParseDuration(fmt.Sprintf("%dh", 24*m.DaysOld))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
since = now.Add(-d).Format(time.RFC3339)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -404,6 +404,7 @@ func (d *MongodbData) AddDefaultStats() {
|
|||
for key, value := range wiredTigerStats {
|
||||
val := statLine.FieldByName(value).Interface()
|
||||
percentVal := fmt.Sprintf("%.1f", val.(float64)*100)
|
||||
//nolint:errcheck // guaranteed to be formatted properly because of the above
|
||||
floatVal, _ := strconv.ParseFloat(percentVal, 64)
|
||||
d.add(key, floatVal)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,8 @@ import (
|
|||
)
|
||||
|
||||
func TestFileTypes(t *testing.T) {
|
||||
wd, _ := os.Getwd()
|
||||
wd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
|
||||
m := MultiFile{
|
||||
BaseDir: path.Join(wd, `testdata`),
|
||||
|
|
@ -43,7 +44,8 @@ func TestFileTypes(t *testing.T) {
|
|||
}
|
||||
|
||||
func FailEarly(failEarly bool, t *testing.T) error {
|
||||
wd, _ := os.Getwd()
|
||||
wd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
|
||||
m := MultiFile{
|
||||
BaseDir: path.Join(wd, `testdata`),
|
||||
|
|
@ -57,7 +59,7 @@ func FailEarly(failEarly bool, t *testing.T) error {
|
|||
var acc testutil.Accumulator
|
||||
|
||||
require.NoError(t, m.Init())
|
||||
err := m.Gather(&acc)
|
||||
err = m.Gather(&acc)
|
||||
|
||||
if err == nil {
|
||||
require.Equal(t, map[string]interface{}{
|
||||
|
|
|
|||
|
|
@ -115,6 +115,7 @@ func (n *NetIOStats) Gather(acc telegraf.Accumulator) error {
|
|||
// Get system wide stats for different network protocols
|
||||
// (ignore these stats if the call fails)
|
||||
if !n.IgnoreProtocolStats {
|
||||
//nolint:errcheck // stats ignored on fail
|
||||
netprotos, _ := n.ps.NetProto()
|
||||
fields := make(map[string]interface{})
|
||||
for _, proto := range netprotos {
|
||||
|
|
|
|||
|
|
@ -272,7 +272,8 @@ func UDPServer(t *testing.T, wg *sync.WaitGroup) {
|
|||
require.NoError(t, err)
|
||||
wg.Done()
|
||||
buf := make([]byte, 1024)
|
||||
_, remoteaddr, _ := conn.ReadFromUDP(buf)
|
||||
_, remoteaddr, err := conn.ReadFromUDP(buf)
|
||||
require.NoError(t, err)
|
||||
_, err = conn.WriteToUDP(buf, remoteaddr)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, conn.Close())
|
||||
|
|
|
|||
|
|
@ -253,8 +253,8 @@ func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator
|
|||
if len(n.IncludeMounts) > 0 {
|
||||
skip = true
|
||||
for _, RE := range n.IncludeMounts {
|
||||
matched, _ := regexp.MatchString(RE, mount)
|
||||
if matched {
|
||||
matched, err := regexp.MatchString(RE, mount)
|
||||
if matched && err != nil {
|
||||
skip = false
|
||||
break
|
||||
}
|
||||
|
|
@ -263,8 +263,8 @@ func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator
|
|||
|
||||
if !skip && len(n.ExcludeMounts) > 0 {
|
||||
for _, RE := range n.ExcludeMounts {
|
||||
matched, _ := regexp.MatchString(RE, mount)
|
||||
if matched {
|
||||
matched, err := regexp.MatchString(RE, mount)
|
||||
if matched && err != nil {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
|
|
|
|||
|
|
@ -94,12 +94,13 @@ func TestNFSClientProcessStat(t *testing.T) {
|
|||
nfsclient := NFSClient{}
|
||||
nfsclient.Fullstat = false
|
||||
|
||||
file, _ := os.Open(getMountStatsPath())
|
||||
file, err := os.Open(getMountStatsPath())
|
||||
require.NoError(t, err)
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
|
||||
err := nfsclient.processText(scanner, &acc)
|
||||
err = nfsclient.processText(scanner, &acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fieldsReadstat := map[string]interface{}{
|
||||
|
|
@ -142,12 +143,13 @@ func TestNFSClientProcessFull(t *testing.T) {
|
|||
nfsclient := NFSClient{}
|
||||
nfsclient.Fullstat = true
|
||||
|
||||
file, _ := os.Open(getMountStatsPath())
|
||||
file, err := os.Open(getMountStatsPath())
|
||||
require.NoError(t, err)
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
|
||||
err := nfsclient.processText(scanner, &acc)
|
||||
err = nfsclient.processText(scanner, &acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fieldsEvents := map[string]interface{}{
|
||||
|
|
|
|||
|
|
@ -29,9 +29,9 @@ Reading: 8 Writing: 125 Waiting: 946
|
|||
// Verify that nginx tags are properly parsed based on the server
|
||||
func TestNginxTags(t *testing.T) {
|
||||
urls := []string{"http://localhost/endpoint", "http://localhost:80/endpoint"}
|
||||
var addr *url.URL
|
||||
for _, url1 := range urls {
|
||||
addr, _ = url.Parse(url1)
|
||||
addr, err := url.Parse(url1)
|
||||
require.NoError(t, err)
|
||||
tagMap := getTags(addr)
|
||||
require.Contains(t, tagMap["server"], "localhost")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ func (check *NginxUpstreamCheck) gatherJSONData(address string, value interface{
|
|||
|
||||
defer response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
// ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
|
||||
//nolint:errcheck // LimitReader returns io.EOF and we're not interested in read errors.
|
||||
body, _ := io.ReadAll(io.LimitReader(response.Body, 200))
|
||||
return fmt.Errorf("%s returned HTTP status %s: %q", address, response.Status, body)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -81,8 +81,9 @@ func TestNomadStats(t *testing.T) {
|
|||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.RequestURI == "/v1/metrics" {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
responseKeyMetrics, _ := os.ReadFile("testdata/response_key_metrics.json")
|
||||
_, err := fmt.Fprintln(w, string(responseKeyMetrics))
|
||||
responseKeyMetrics, err := os.ReadFile("testdata/response_key_metrics.json")
|
||||
require.NoError(t, err)
|
||||
_, err = fmt.Fprintln(w, string(responseKeyMetrics))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}))
|
||||
|
|
|
|||
|
|
@ -36,7 +36,8 @@ func TestReadsMetricsFromNSQ(t *testing.T) {
|
|||
{100 * time.Millisecond, -1, []byte("exit")},
|
||||
}
|
||||
|
||||
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:4155")
|
||||
addr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:4155")
|
||||
require.NoError(t, err)
|
||||
newMockNSQD(t, script, addr.String())
|
||||
|
||||
consumer := &NSQConsumer{
|
||||
|
|
@ -186,7 +187,11 @@ func (n *mockNSQD) handle(conn net.Conn) {
|
|||
goto exit
|
||||
}
|
||||
case bytes.Equal(params[0], []byte("RDY")):
|
||||
rdy, _ := strconv.Atoi(string(params[1]))
|
||||
rdy, err := strconv.Atoi(string(params[1]))
|
||||
if err != nil {
|
||||
log.Print(err.Error())
|
||||
goto exit
|
||||
}
|
||||
rdyCount = rdy
|
||||
case bytes.Equal(params[0], []byte("FIN")):
|
||||
case bytes.Equal(params[0], []byte("REQ")):
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package opensearch_query
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type AggregationRequest interface {
|
||||
|
|
@ -27,7 +26,7 @@ type aggregationFunction struct {
|
|||
func (a *aggregationFunction) MarshalJSON() ([]byte, error) {
|
||||
agg := make(map[string]interface{})
|
||||
field := map[string]interface{}{"field": a.field}
|
||||
if t, _ := getAggregationFunctionType(a.aggType); t == "bucket" {
|
||||
if t := getAggregationFunctionType(a.aggType); t == "bucket" {
|
||||
// We'll use the default size of 10 if it hasn't been set; size == 0 is illegal in a bucket aggregation
|
||||
if a.size == 0 {
|
||||
a.size = 10
|
||||
|
|
@ -54,13 +53,13 @@ func (a *aggregationFunction) Missing(missing string) {
|
|||
a.missing = missing
|
||||
}
|
||||
|
||||
func getAggregationFunctionType(field string) (string, error) {
|
||||
func getAggregationFunctionType(field string) string {
|
||||
switch field {
|
||||
case "avg", "sum", "min", "max", "value_count", "stats", "extended_stats", "percentiles":
|
||||
return "metric", nil
|
||||
return "metric"
|
||||
case "terms":
|
||||
return "bucket", nil
|
||||
return "bucket"
|
||||
default:
|
||||
return "", fmt.Errorf("invalid aggregation function %s", field)
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import "fmt"
|
|||
type MetricAggregationRequest map[string]*aggregationFunction
|
||||
|
||||
func (m MetricAggregationRequest) AddAggregation(name, aggType, field string) error {
|
||||
if t, _ := getAggregationFunctionType(aggType); t != "metric" {
|
||||
if t := getAggregationFunctionType(aggType); t != "metric" {
|
||||
return fmt.Errorf("aggregation function %q not supported", aggType)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -256,7 +256,10 @@ func (aggregation *osAggregation) buildAggregationQuery() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = bucket.BucketSize(name, 1000)
|
||||
err = bucket.BucketSize(name, 1000)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if aggregation.IncludeMissingTag && aggregation.MissingTagValue != "" {
|
||||
bucket.Missing(name, aggregation.MissingTagValue)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -593,8 +593,10 @@ func setupIntegrationTest(t *testing.T, image string) (*testutil.Container, *Ope
|
|||
|
||||
for scanner.Scan() {
|
||||
parts := strings.Split(scanner.Text(), " ")
|
||||
size, _ := strconv.Atoi(parts[9])
|
||||
responseTime, _ := strconv.Atoi(parts[len(parts)-1])
|
||||
size, err := strconv.Atoi(parts[9])
|
||||
require.NoError(t, err)
|
||||
responseTime, err := strconv.Atoi(parts[len(parts)-1])
|
||||
require.NoError(t, err)
|
||||
|
||||
logline := nginxlog{
|
||||
IPaddress: parts[0],
|
||||
|
|
|
|||
|
|
@ -43,7 +43,8 @@ func TestOpenTelemetry(t *testing.T) {
|
|||
})),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = metricExporter.Shutdown(ctx) })
|
||||
//nolint:errcheck // test cleanup
|
||||
t.Cleanup(func() { metricExporter.Shutdown(ctx) })
|
||||
|
||||
reader := metric.NewManualReader()
|
||||
mp := metric.NewMeterProvider(metric.WithReader(reader))
|
||||
|
|
|
|||
|
|
@ -100,7 +100,8 @@ func TestCases(t *testing.T) {
|
|||
key := strings.TrimPrefix(r.URL.Path, "/data/2.5/")
|
||||
if resp, found := input[key]; found {
|
||||
w.Header()["Content-Type"] = []string{"application/json"}
|
||||
_, _ = w.Write(resp)
|
||||
_, err := w.Write(resp)
|
||||
require.NoError(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -114,7 +115,8 @@ func TestCases(t *testing.T) {
|
|||
key += "_" + ids[0]
|
||||
if resp, found := input[key]; found {
|
||||
w.Header()["Content-Type"] = []string{"application/json"}
|
||||
_, _ = w.Write(resp)
|
||||
_, err := w.Write(resp)
|
||||
require.NoError(t, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,11 +46,13 @@ func createEntityCounterEntry(
|
|||
func NewTestP4RuntimeClient(
|
||||
p4RuntimeClient *fakeP4RuntimeClient,
|
||||
addr string,
|
||||
t *testing.T,
|
||||
) *P4runtime {
|
||||
conn, _ := grpc.NewClient(
|
||||
conn, err := grpc.NewClient(
|
||||
addr,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
return &P4runtime{
|
||||
Endpoint: addr,
|
||||
DeviceID: uint64(1),
|
||||
|
|
@ -100,7 +102,7 @@ func TestErrorGetP4Info(t *testing.T) {
|
|||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String())
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.Error(t, plugin.Gather(&acc))
|
||||
|
|
@ -243,7 +245,7 @@ func TestOneCounterRead(t *testing.T) {
|
|||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String())
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
|
@ -331,7 +333,7 @@ func TestMultipleEntitiesSingleCounterRead(t *testing.T) {
|
|||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String())
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
|
@ -423,7 +425,7 @@ func TestSingleEntitiesMultipleCounterRead(t *testing.T) {
|
|||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String())
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
|
@ -455,7 +457,7 @@ func TestNoCountersAvailable(t *testing.T) {
|
|||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String())
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
|
@ -482,7 +484,7 @@ func TestFilterCounters(t *testing.T) {
|
|||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String())
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t)
|
||||
|
||||
plugin.CounterNamesInclude = []string{"oof"}
|
||||
|
||||
|
|
@ -532,7 +534,7 @@ func TestFailReadCounterEntryFromEntry(t *testing.T) {
|
|||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String())
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
|
@ -575,7 +577,7 @@ func TestFailReadAllEntries(t *testing.T) {
|
|||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String())
|
||||
plugin := NewTestP4RuntimeClient(p4RtClient, listener.Addr().String(), t)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
|
|
|||
|
|
@ -112,8 +112,10 @@ func (r *response) WriteHeader(code int) {
|
|||
}
|
||||
|
||||
fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code))
|
||||
_ = r.header.Write(r.w)
|
||||
_, _ = r.w.WriteString("\r\n")
|
||||
//nolint:errcheck // unable to propagate
|
||||
r.header.Write(r.w)
|
||||
//nolint:errcheck // unable to propagate
|
||||
r.w.WriteString("\r\n")
|
||||
}
|
||||
|
||||
func (r *response) Flush() {
|
||||
|
|
|
|||
|
|
@ -158,7 +158,10 @@ func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
|
|||
return fmt.Errorf("url does not follow required 'address:port' format: %s", u.Host)
|
||||
}
|
||||
fcgiIP := socketAddr[0]
|
||||
fcgiPort, _ := strconv.Atoi(socketAddr[1])
|
||||
fcgiPort, err := strconv.Atoi(socketAddr[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse server port %q: %w", socketAddr[1], err)
|
||||
}
|
||||
fcgi, err = newFcgiClient(time.Duration(p.Timeout), fcgiIP, fcgiPort)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ func (s statServer) serverSocket(l net.Listener) {
|
|||
|
||||
go func(c net.Conn) {
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := c.Read(buf)
|
||||
n, _ := c.Read(buf) //nolint:errcheck // ignore the returned error as we need to close the socket anyway
|
||||
|
||||
data := buf[:n]
|
||||
if string(data) == "show * \n" {
|
||||
|
|
|
|||
|
|
@ -236,7 +236,7 @@ func (p *Procstat) gatherOld(acc telegraf.Accumulator) error {
|
|||
// We've found a process that was not recorded before so add it
|
||||
// to the list of processes
|
||||
|
||||
// Assumption: if a process has no name, it probably does not exist
|
||||
//nolint:errcheck // Assumption: if a process has no name, it probably does not exist
|
||||
if name, _ := proc.Name(); name == "" {
|
||||
continue
|
||||
}
|
||||
|
|
@ -327,7 +327,7 @@ func (p *Procstat) gatherNew(acc telegraf.Accumulator) error {
|
|||
pid := PID(gp.Pid)
|
||||
proc, found := p.processes[pid]
|
||||
if !found {
|
||||
// Assumption: if a process has no name, it probably does not exist
|
||||
//nolint:errcheck // Assumption: if a process has no name, it probably does not exist
|
||||
if name, _ := gp.Name(); name == "" {
|
||||
continue
|
||||
}
|
||||
|
|
|
|||
|
|
@ -156,7 +156,8 @@ func TestAddMultipleDuplicatePods(t *testing.T) {
|
|||
p.Name = "Pod2"
|
||||
registerPod(p, prom)
|
||||
|
||||
urls, _ := prom.GetAllURLs()
|
||||
urls, err := prom.GetAllURLs()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, urls, 1)
|
||||
}
|
||||
|
||||
|
|
@ -179,7 +180,8 @@ func TestDeletePods(t *testing.T) {
|
|||
p.Annotations = map[string]string{"prometheus.io/scrape": "true"}
|
||||
registerPod(p, prom)
|
||||
|
||||
podID, _ := cache.MetaNamespaceKeyFunc(p)
|
||||
podID, err := cache.MetaNamespaceKeyFunc(p)
|
||||
require.NoError(t, err)
|
||||
unregisterPod(PodID(podID), prom)
|
||||
require.Empty(t, prom.kubernetesPods)
|
||||
}
|
||||
|
|
@ -191,7 +193,8 @@ func TestKeepDefaultNamespaceLabelName(t *testing.T) {
|
|||
p.Annotations = map[string]string{"prometheus.io/scrape": "true"}
|
||||
registerPod(p, prom)
|
||||
|
||||
podID, _ := cache.MetaNamespaceKeyFunc(p)
|
||||
podID, err := cache.MetaNamespaceKeyFunc(p)
|
||||
require.NoError(t, err)
|
||||
tags := prom.kubernetesPods[PodID(podID)].Tags
|
||||
require.Equal(t, "default", tags["namespace"])
|
||||
}
|
||||
|
|
@ -203,7 +206,8 @@ func TestChangeNamespaceLabelName(t *testing.T) {
|
|||
p.Annotations = map[string]string{"prometheus.io/scrape": "true"}
|
||||
registerPod(p, prom)
|
||||
|
||||
podID, _ := cache.MetaNamespaceKeyFunc(p)
|
||||
podID, err := cache.MetaNamespaceKeyFunc(p)
|
||||
require.NoError(t, err)
|
||||
tags := prom.kubernetesPods[PodID(podID)].Tags
|
||||
require.Equal(t, "default", tags["pod_namespace"])
|
||||
require.Equal(t, "", tags["namespace"])
|
||||
|
|
|
|||
|
|
@ -401,7 +401,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) (map[s
|
|||
return nil, nil, fmt.Errorf("unable to create new request %q: %w", addr, err)
|
||||
}
|
||||
|
||||
// ignore error because it's been handled before getting here
|
||||
//nolint:errcheck // ignore error because it's been handled before getting here
|
||||
tlsCfg, _ := p.HTTPClientConfig.TLSConfig()
|
||||
uClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
|
|
|
|||
|
|
@ -152,7 +152,8 @@ func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) {
|
|||
err := p.Init()
|
||||
require.NoError(t, err)
|
||||
|
||||
u, _ := url.Parse(ts.URL)
|
||||
u, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
tsAddress := u.Hostname()
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
@ -524,7 +525,8 @@ func TestUnsupportedFieldSelector(t *testing.T) {
|
|||
fieldSelectorString := "spec.containerName=container"
|
||||
prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString}
|
||||
|
||||
fieldSelector, _ := fields.ParseSelector(prom.KubernetesFieldSelector)
|
||||
fieldSelector, err := fields.ParseSelector(prom.KubernetesFieldSelector)
|
||||
require.NoError(t, err)
|
||||
isValid, invalidSelector := fieldSelectorIsSupported(fieldSelector)
|
||||
require.False(t, isValid)
|
||||
require.Equal(t, "spec.containerName", invalidSelector)
|
||||
|
|
@ -724,7 +726,8 @@ go_memstats_heap_alloc_bytes 1.581062048e+09
|
|||
`
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Header().Add("Content-Type", "application/openmetrics-text;version=1.0.0")
|
||||
_, _ = w.Write([]byte(data))
|
||||
_, err := w.Write([]byte(data))
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
|
|
@ -772,7 +775,8 @@ func TestOpenmetricsProtobuf(t *testing.T) {
|
|||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Header().Add("Content-Type", "application/openmetrics-protobuf;version=1.0.0")
|
||||
_, _ = w.Write(data)
|
||||
_, err := w.Write(data)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
|
|
@ -830,7 +834,8 @@ go_memstats_heap_alloc_bytes 1.581062048e+09
|
|||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
// Provide a wrong version
|
||||
w.Header().Add("Content-Type", "application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited")
|
||||
_, _ = w.Write([]byte(data))
|
||||
_, err := w.Write([]byte(data))
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ func (px *Proxmox) Gather(acc telegraf.Accumulator) error {
|
|||
func (px *Proxmox) Init() error {
|
||||
// Set hostname as default node name for backwards compatibility
|
||||
if px.NodeName == "" {
|
||||
//nolint:errcheck // best attempt setting of NodeName
|
||||
hostname, _ := os.Hostname()
|
||||
px.NodeName = hostname
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,10 +36,10 @@ writing: 200
|
|||
// Verify that raindrops tags are properly parsed based on the server
|
||||
func TestRaindropsTags(t *testing.T) {
|
||||
urls := []string{"http://localhost/_raindrops", "http://localhost:80/_raindrops"}
|
||||
var addr *url.URL
|
||||
r := &Raindrops{}
|
||||
for _, url1 := range urls {
|
||||
addr, _ = url.Parse(url1)
|
||||
addr, err := url.Parse(url1)
|
||||
require.NoError(t, err)
|
||||
tagMap := r.getTags(addr)
|
||||
require.Contains(t, tagMap["server"], "localhost")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -320,6 +320,7 @@ func parseDate(date string) (time.Time, error) {
|
|||
|
||||
func init() {
|
||||
inputs.Add("ras", func() telegraf.Input {
|
||||
//nolint:errcheck // known timestamp
|
||||
defaultTimestamp, _ := parseDate("1970-01-01 00:00:01 -0700")
|
||||
return &Ras{
|
||||
DBPath: defaultDbPath,
|
||||
|
|
|
|||
|
|
@ -133,6 +133,7 @@ func TestEmptyDatabase(t *testing.T) {
|
|||
}
|
||||
|
||||
func newRas() *Ras {
|
||||
//nolint:errcheck // known timestamp
|
||||
defaultTimestamp, _ := parseDate("1970-01-01 00:00:01 -0700")
|
||||
return &Ras{
|
||||
DBPath: defaultDbPath,
|
||||
|
|
|
|||
|
|
@ -785,8 +785,10 @@ func coerceType(value interface{}, typ reflect.Type) reflect.Value {
|
|||
case reflect.String:
|
||||
// types match
|
||||
case reflect.Int64:
|
||||
//nolint:errcheck // no way to propagate, shouldn't panic
|
||||
value, _ = strconv.ParseInt(value.(string), 10, 64)
|
||||
case reflect.Float64:
|
||||
//nolint:errcheck // no way to propagate, shouldn't panic
|
||||
value, _ = strconv.ParseFloat(value.(string), 64)
|
||||
default:
|
||||
panic("unhandled destination type " + typ.Kind().String())
|
||||
|
|
|
|||
|
|
@ -81,7 +81,10 @@ func (s *Server) getServerStatus() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("unable to determine provided hostname from %s", s.URL.Host)
|
||||
}
|
||||
driverPort, _ := strconv.Atoi(port)
|
||||
driverPort, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse port from %s: %w", port, err)
|
||||
}
|
||||
for _, ss := range serverStatuses {
|
||||
for _, address := range ss.Network.Addresses {
|
||||
if address.Host == host && ss.Network.DriverPort == driverPort {
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ func (s *Salesforce) login() error {
|
|||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
// ignore the err here; LimitReader returns io.EOF and we're not interested in read errors.
|
||||
//nolint:errcheck // LimitReader returns io.EOF and we're not interested in read errors.
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 200))
|
||||
return fmt.Errorf("%s returned HTTP status %s: %q", loginEndpoint, resp.Status, body)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,8 @@ import (
|
|||
func Test_Gather(t *testing.T) {
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(testJSON))
|
||||
_, err := w.Write([]byte(testJSON))
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
|
||||
|
|
|
|||
|
|
@ -400,11 +400,13 @@ func (m *Smart) Init() error {
|
|||
|
||||
//if `path_smartctl` is not provided in config, try to find smartctl binary in PATH
|
||||
if len(m.PathSmartctl) == 0 {
|
||||
//nolint:errcheck // error handled later
|
||||
m.PathSmartctl, _ = exec.LookPath("smartctl")
|
||||
}
|
||||
|
||||
//if `path_nvme` is not provided in config, try to find nvme binary in PATH
|
||||
if len(m.PathNVMe) == 0 {
|
||||
//nolint:errcheck // error handled later
|
||||
m.PathNVMe, _ = exec.LookPath("nvme")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -68,7 +68,8 @@ func TestCases(t *testing.T) {
|
|||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
_, _ = w.Write(page)
|
||||
_, err := w.Write(page)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
require.NotNil(t, server)
|
||||
defer server.Close()
|
||||
|
|
|
|||
|
|
@ -767,6 +767,7 @@ func (s *Statsd) parseName(bucket string) (name string, field string, tags map[s
|
|||
|
||||
if err == nil {
|
||||
p.DefaultTags = tags
|
||||
//nolint:errcheck // unable to propagate
|
||||
name, tags, field, _ = p.ApplyTemplate(name)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -242,7 +242,9 @@ func TestCases(t *testing.T) {
|
|||
|
||||
// Create a fake sender
|
||||
var client net.Conn
|
||||
if srvTLS, _ := plugin.TLSConfig(); srvTLS != nil {
|
||||
srvTLS, err := plugin.TLSConfig()
|
||||
require.NoError(t, err)
|
||||
if srvTLS != nil {
|
||||
tlscfg, err := pki.TLSClientConfig().TLSConfig()
|
||||
require.NoError(t, err)
|
||||
tlscfg.ServerName = "localhost"
|
||||
|
|
|
|||
|
|
@ -870,6 +870,7 @@ func BenchmarkAllUnitsIntegration(b *testing.B) {
|
|||
b.Logf("produced %d metrics", acc.NMetrics())
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
//nolint:errcheck // skip check in benchmarking
|
||||
_ = plugin.Gather(acc)
|
||||
}
|
||||
}
|
||||
|
|
@ -887,6 +888,7 @@ func BenchmarkAllLoadedUnitsIntegration(b *testing.B) {
|
|||
b.Logf("produced %d metrics", acc.NMetrics())
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
//nolint:errcheck // skip check in benchmarking
|
||||
_ = plugin.Gather(acc)
|
||||
}
|
||||
}
|
||||
|
|
@ -901,6 +903,7 @@ func (c *fakeClient) fixPropertyTypes() {
|
|||
for unit, u := range c.units {
|
||||
for k, value := range u.properties {
|
||||
if strings.HasPrefix(k, "Memory") {
|
||||
//nolint:errcheck // will cause issues later in tests
|
||||
u.properties[k], _ = internal.ToUint64(value)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -163,7 +163,8 @@ func TestTailDosLineEndings(t *testing.T) {
|
|||
|
||||
func TestGrokParseLogFilesWithMultiline(t *testing.T) {
|
||||
//we make sure the timeout won't kick in
|
||||
d, _ := time.ParseDuration("100s")
|
||||
d, err := time.ParseDuration("100s")
|
||||
require.NoError(t, err)
|
||||
duration := config.Duration(d)
|
||||
tt := NewTail()
|
||||
tt.Log = testutil.Logger{}
|
||||
|
|
@ -177,7 +178,7 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) {
|
|||
}
|
||||
tt.SetParserFunc(createGrokParser)
|
||||
|
||||
err := tt.Init()
|
||||
err = tt.Init()
|
||||
require.NoError(t, err)
|
||||
|
||||
acc := testutil.Accumulator{}
|
||||
|
|
|
|||
|
|
@ -307,6 +307,7 @@ func sensorsTemperaturesOld(syspath string) ([]host.TemperatureStat, error) {
|
|||
|
||||
// Get the label of the temperature you are reading
|
||||
var label string
|
||||
//nolint:errcheck // skip on error
|
||||
c, _ := os.ReadFile(filepath.Join(filepath.Dir(file), filename[0]+"_label"))
|
||||
if c != nil {
|
||||
//format the label from "Core 0" to "core0_"
|
||||
|
|
|
|||
|
|
@ -18,9 +18,9 @@ const tengineSampleResponse = `127.0.0.1,784,1511,2,2,1,0,1,0,0,0,0,0,0,1,0,0,0,
|
|||
// Verify that tengine tags are properly parsed based on the server
|
||||
func TestTengineTags(t *testing.T) {
|
||||
urls := []string{"http://localhost/us", "http://localhost:80/us"}
|
||||
var addr *url.URL
|
||||
for _, url1 := range urls {
|
||||
addr, _ = url.Parse(url1)
|
||||
addr, err := url.Parse(url1)
|
||||
require.NoError(t, err)
|
||||
tagMap := getTags(addr, "127.0.0.1")
|
||||
require.Contains(t, tagMap["server"], "localhost")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -66,7 +66,10 @@ func mockTwemproxyServer() (net.Listener, error) {
|
|||
return nil, err
|
||||
}
|
||||
go func(l net.Listener) {
|
||||
conn, _ := l.Accept()
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if _, err := conn.Write([]byte(sampleStats)); err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -203,7 +203,10 @@ func (s *mockServer) listen(ctx context.Context) (*net.TCPAddr, error) {
|
|||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
_ = conn.SetReadDeadline(time.Now().Add(time.Minute))
|
||||
err = conn.SetReadDeadline(time.Now().Add(time.Minute))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
in := make([]byte, 128)
|
||||
for _, interaction := range s.protocol {
|
||||
|
|
|
|||
|
|
@ -113,7 +113,8 @@ func TestBasic(t *testing.T) {
|
|||
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/" {
|
||||
_, _ = w.Write([]byte(js))
|
||||
_, err := w.Write([]byte(js))
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
|
@ -144,7 +145,8 @@ func TestInvalidJSON(t *testing.T) {
|
|||
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/" {
|
||||
_, _ = w.Write([]byte(js))
|
||||
_, err := w.Write([]byte(js))
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -158,7 +158,8 @@ func TestRedirect(t *testing.T) {
|
|||
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
|
||||
case "/custom/metrics":
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write(response)
|
||||
_, err := w.Write(response)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
|
|
|||
|
|
@ -6,13 +6,16 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func ArtifactoryWebhookRequest(domain string, event string, jsonString string, t *testing.T) {
|
||||
func ArtifactoryWebhookRequest(t *testing.T, domain string, event string, jsonString string) {
|
||||
var acc testutil.Accumulator
|
||||
awh := &ArtifactoryWebhook{Path: "/artifactory", acc: &acc, log: testutil.Logger{}}
|
||||
req, _ := http.NewRequest("POST", "/artifactory", strings.NewReader(jsonString))
|
||||
req, err := http.NewRequest("POST", "/artifactory", strings.NewReader(jsonString))
|
||||
require.NoError(t, err)
|
||||
w := httptest.NewRecorder()
|
||||
awh.eventHandler(w, req)
|
||||
if w.Code != http.StatusOK {
|
||||
|
|
@ -23,7 +26,8 @@ func ArtifactoryWebhookRequest(domain string, event string, jsonString string, t
|
|||
func ArtifactoryWebhookRequestWithSignature(event string, jsonString string, t *testing.T, signature string, expectedStatus int) {
|
||||
var acc testutil.Accumulator
|
||||
awh := &ArtifactoryWebhook{Path: "/artifactory", acc: &acc, log: testutil.Logger{}}
|
||||
req, _ := http.NewRequest("POST", "/artifactory", strings.NewReader(jsonString))
|
||||
req, err := http.NewRequest("POST", "/artifactory", strings.NewReader(jsonString))
|
||||
require.NoError(t, err)
|
||||
req.Header.Add("x-jfrog-event-auth", signature)
|
||||
w := httptest.NewRecorder()
|
||||
awh.eventHandler(w, req)
|
||||
|
|
@ -35,7 +39,8 @@ func ArtifactoryWebhookRequestWithSignature(event string, jsonString string, t *
|
|||
func TestUnsupportedEvent(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
awh := &ArtifactoryWebhook{Path: "/artifactory", acc: &acc, log: testutil.Logger{}}
|
||||
req, _ := http.NewRequest("POST", "/artifactory", strings.NewReader(UnsupportedEventJSON()))
|
||||
req, err := http.NewRequest("POST", "/artifactory", strings.NewReader(UnsupportedEventJSON()))
|
||||
require.NoError(t, err)
|
||||
w := httptest.NewRecorder()
|
||||
awh.eventHandler(w, req)
|
||||
if w.Code != http.StatusBadRequest {
|
||||
|
|
@ -44,95 +49,95 @@ func TestUnsupportedEvent(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestArtifactDeployedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("artifact", "deployed", ArtifactDeployedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "artifact", "deployed", ArtifactDeployedEventJSON())
|
||||
}
|
||||
|
||||
func TestArtifactDeleted(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("artifact", "deleted", ArtifactDeletedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "artifact", "deleted", ArtifactDeletedEventJSON())
|
||||
}
|
||||
|
||||
func TestArtifactMovedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("artifact", "moved", ArtifactMovedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "artifact", "moved", ArtifactMovedEventJSON())
|
||||
}
|
||||
|
||||
func TestArtifactCopiedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("artifact", "copied", ArtifactCopiedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "artifact", "copied", ArtifactCopiedEventJSON())
|
||||
}
|
||||
|
||||
func TestArtifactPropertiesAddedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("artifact_property", "added", ArtifactPropertiesAddedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "artifact_property", "added", ArtifactPropertiesAddedEventJSON())
|
||||
}
|
||||
|
||||
func TestArtifactPropertiesDeletedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("artifact_property", "deleted", ArtifactPropertiesDeletedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "artifact_property", "deleted", ArtifactPropertiesDeletedEventJSON())
|
||||
}
|
||||
|
||||
func TestDockerPushedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("docker", "pushed", DockerPushedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "docker", "pushed", DockerPushedEventJSON())
|
||||
}
|
||||
|
||||
func TestDockerDeletedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("docker", "deleted", DockerDeletedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "docker", "deleted", DockerDeletedEventJSON())
|
||||
}
|
||||
|
||||
func TestDockerPromotedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("docker", "promoted", DockerPromotedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "docker", "promoted", DockerPromotedEventJSON())
|
||||
}
|
||||
|
||||
func TestBuildUploadedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("build", "uploaded", BuildUploadedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "build", "uploaded", BuildUploadedEventJSON())
|
||||
}
|
||||
|
||||
func TestBuildDeletedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("build", "deleted", BuildDeletedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "build", "deleted", BuildDeletedEventJSON())
|
||||
}
|
||||
|
||||
func TestBuildPromotedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("build", "promoted", BuildPromotedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "build", "promoted", BuildPromotedEventJSON())
|
||||
}
|
||||
|
||||
func TestReleaseBundleCreatedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("release_bundle", "created", ReleaseBundleCreatedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "release_bundle", "created", ReleaseBundleCreatedEventJSON())
|
||||
}
|
||||
|
||||
func TestReleaseBundleSignedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("release_bundle", "signed", ReleaseBundleSignedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "release_bundle", "signed", ReleaseBundleSignedEventJSON())
|
||||
}
|
||||
|
||||
func TestReleaseBundleDeletedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("release_bundle", "deleted", ReleaseBundleDeletedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "release_bundle", "deleted", ReleaseBundleDeletedEventJSON())
|
||||
}
|
||||
|
||||
func TestDistributionStartedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("distribution", "distribute_started", DistributionStartedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "distribution", "distribute_started", DistributionStartedEventJSON())
|
||||
}
|
||||
|
||||
func TestDistributionCompletedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("distribution", "distribute_started", DistributionCompletedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "distribution", "distribute_started", DistributionCompletedEventJSON())
|
||||
}
|
||||
|
||||
func TestDistributionAbortedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("distribution", "distribute_aborted", DistributionAbortedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "distribution", "distribute_aborted", DistributionAbortedEventJSON())
|
||||
}
|
||||
|
||||
func TestDistributionFailedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("distribution", "distribute_failed", DistributionFailedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "distribution", "distribute_failed", DistributionFailedEventJSON())
|
||||
}
|
||||
|
||||
func TestDestinationReceivedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("destination", "received", DestinationReceivedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "destination", "received", DestinationReceivedEventJSON())
|
||||
}
|
||||
|
||||
func TestDestinationDeletedStartedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("destination", "delete_started", DestinationDeleteStartedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "destination", "delete_started", DestinationDeleteStartedEventJSON())
|
||||
}
|
||||
|
||||
func TestDestinationDeletedCompletedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("destination", "delete_completed", DestinationDeleteCompletedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "destination", "delete_completed", DestinationDeleteCompletedEventJSON())
|
||||
}
|
||||
|
||||
func TestDestinationDeleteFailedEvent(t *testing.T) {
|
||||
ArtifactoryWebhookRequest("destination", "delete_failed", DestinationDeleteFailedEventJSON(), t)
|
||||
ArtifactoryWebhookRequest(t, "destination", "delete_failed", DestinationDeleteFailedEventJSON())
|
||||
}
|
||||
|
||||
func TestEventWithSignatureSuccess(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -13,8 +13,9 @@ import (
|
|||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func postWebhooks(md *FilestackWebhook, eventBodyFile io.Reader) *httptest.ResponseRecorder {
|
||||
req, _ := http.NewRequest("POST", "/filestack", eventBodyFile)
|
||||
func postWebhooks(t *testing.T, md *FilestackWebhook, eventBodyFile io.Reader) *httptest.ResponseRecorder {
|
||||
req, err := http.NewRequest("POST", "/filestack", eventBodyFile)
|
||||
require.NoError(t, err)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
md.eventHandler(w, req)
|
||||
|
|
@ -25,7 +26,7 @@ func postWebhooks(md *FilestackWebhook, eventBodyFile io.Reader) *httptest.Respo
|
|||
func TestDialogEvent(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
fs := &FilestackWebhook{Path: "/filestack", acc: &acc}
|
||||
resp := postWebhooks(fs, getFile(t, "testdata/dialog_open.json"))
|
||||
resp := postWebhooks(t, fs, getFile(t, "testdata/dialog_open.json"))
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
|
@ -43,7 +44,7 @@ func TestDialogEvent(t *testing.T) {
|
|||
|
||||
func TestParseError(t *testing.T) {
|
||||
fs := &FilestackWebhook{Path: "/filestack"}
|
||||
resp := postWebhooks(fs, strings.NewReader(""))
|
||||
resp := postWebhooks(t, fs, strings.NewReader(""))
|
||||
if resp.Code != http.StatusBadRequest {
|
||||
t.Errorf("POST returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusBadRequest)
|
||||
}
|
||||
|
|
@ -52,7 +53,7 @@ func TestParseError(t *testing.T) {
|
|||
func TestUploadEvent(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
fs := &FilestackWebhook{Path: "/filestack", acc: &acc}
|
||||
resp := postWebhooks(fs, getFile(t, "testdata/upload.json"))
|
||||
resp := postWebhooks(t, fs, getFile(t, "testdata/upload.json"))
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
|
@ -71,7 +72,7 @@ func TestUploadEvent(t *testing.T) {
|
|||
func TestVideoConversionEvent(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
fs := &FilestackWebhook{Path: "/filestack", acc: &acc}
|
||||
resp := postWebhooks(fs, getFile(t, "testdata/video_conversion.json"))
|
||||
resp := postWebhooks(t, fs, getFile(t, "testdata/video_conversion.json"))
|
||||
if resp.Code != http.StatusBadRequest {
|
||||
t.Errorf("POST returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusBadRequest)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,13 +6,16 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func GithubWebhookRequest(event string, jsonString string, t *testing.T) {
|
||||
func GithubWebhookRequest(t *testing.T, event string, jsonString string) {
|
||||
var acc testutil.Accumulator
|
||||
gh := &GithubWebhook{Path: "/github", acc: &acc, log: testutil.Logger{}}
|
||||
req, _ := http.NewRequest("POST", "/github", strings.NewReader(jsonString))
|
||||
req, err := http.NewRequest("POST", "/github", strings.NewReader(jsonString))
|
||||
require.NoError(t, err)
|
||||
req.Header.Add("X-Github-Event", event)
|
||||
w := httptest.NewRecorder()
|
||||
gh.eventHandler(w, req)
|
||||
|
|
@ -24,7 +27,8 @@ func GithubWebhookRequest(event string, jsonString string, t *testing.T) {
|
|||
func GithubWebhookRequestWithSignature(event string, jsonString string, t *testing.T, signature string, expectedStatus int) {
|
||||
var acc testutil.Accumulator
|
||||
gh := &GithubWebhook{Path: "/github", Secret: "signature", acc: &acc, log: testutil.Logger{}}
|
||||
req, _ := http.NewRequest("POST", "/github", strings.NewReader(jsonString))
|
||||
req, err := http.NewRequest("POST", "/github", strings.NewReader(jsonString))
|
||||
require.NoError(t, err)
|
||||
req.Header.Add("X-Github-Event", event)
|
||||
req.Header.Add("X-Hub-Signature", signature)
|
||||
w := httptest.NewRecorder()
|
||||
|
|
@ -35,83 +39,83 @@ func GithubWebhookRequestWithSignature(event string, jsonString string, t *testi
|
|||
}
|
||||
|
||||
func TestCommitCommentEvent(t *testing.T) {
|
||||
GithubWebhookRequest("commit_comment", CommitCommentEventJSON(), t)
|
||||
GithubWebhookRequest(t, "commit_comment", CommitCommentEventJSON())
|
||||
}
|
||||
|
||||
func TestPingEvent(t *testing.T) {
|
||||
GithubWebhookRequest("ping", "", t)
|
||||
GithubWebhookRequest(t, "ping", "")
|
||||
}
|
||||
|
||||
func TestDeleteEvent(t *testing.T) {
|
||||
GithubWebhookRequest("delete", DeleteEventJSON(), t)
|
||||
GithubWebhookRequest(t, "delete", DeleteEventJSON())
|
||||
}
|
||||
|
||||
func TestDeploymentEvent(t *testing.T) {
|
||||
GithubWebhookRequest("deployment", DeploymentEventJSON(), t)
|
||||
GithubWebhookRequest(t, "deployment", DeploymentEventJSON())
|
||||
}
|
||||
|
||||
func TestDeploymentStatusEvent(t *testing.T) {
|
||||
GithubWebhookRequest("deployment_status", DeploymentStatusEventJSON(), t)
|
||||
GithubWebhookRequest(t, "deployment_status", DeploymentStatusEventJSON())
|
||||
}
|
||||
|
||||
func TestForkEvent(t *testing.T) {
|
||||
GithubWebhookRequest("fork", ForkEventJSON(), t)
|
||||
GithubWebhookRequest(t, "fork", ForkEventJSON())
|
||||
}
|
||||
|
||||
func TestGollumEvent(t *testing.T) {
|
||||
GithubWebhookRequest("gollum", GollumEventJSON(), t)
|
||||
GithubWebhookRequest(t, "gollum", GollumEventJSON())
|
||||
}
|
||||
|
||||
func TestIssueCommentEvent(t *testing.T) {
|
||||
GithubWebhookRequest("issue_comment", IssueCommentEventJSON(), t)
|
||||
GithubWebhookRequest(t, "issue_comment", IssueCommentEventJSON())
|
||||
}
|
||||
|
||||
func TestIssuesEvent(t *testing.T) {
|
||||
GithubWebhookRequest("issues", IssuesEventJSON(), t)
|
||||
GithubWebhookRequest(t, "issues", IssuesEventJSON())
|
||||
}
|
||||
|
||||
func TestMemberEvent(t *testing.T) {
|
||||
GithubWebhookRequest("member", MemberEventJSON(), t)
|
||||
GithubWebhookRequest(t, "member", MemberEventJSON())
|
||||
}
|
||||
|
||||
func TestMembershipEvent(t *testing.T) {
|
||||
GithubWebhookRequest("membership", MembershipEventJSON(), t)
|
||||
GithubWebhookRequest(t, "membership", MembershipEventJSON())
|
||||
}
|
||||
|
||||
func TestPageBuildEvent(t *testing.T) {
|
||||
GithubWebhookRequest("page_build", PageBuildEventJSON(), t)
|
||||
GithubWebhookRequest(t, "page_build", PageBuildEventJSON())
|
||||
}
|
||||
|
||||
func TestPublicEvent(t *testing.T) {
|
||||
GithubWebhookRequest("public", PublicEventJSON(), t)
|
||||
GithubWebhookRequest(t, "public", PublicEventJSON())
|
||||
}
|
||||
|
||||
func TestPullRequestReviewCommentEvent(t *testing.T) {
|
||||
GithubWebhookRequest("pull_request_review_comment", PullRequestReviewCommentEventJSON(), t)
|
||||
GithubWebhookRequest(t, "pull_request_review_comment", PullRequestReviewCommentEventJSON())
|
||||
}
|
||||
|
||||
func TestPushEvent(t *testing.T) {
|
||||
GithubWebhookRequest("push", PushEventJSON(), t)
|
||||
GithubWebhookRequest(t, "push", PushEventJSON())
|
||||
}
|
||||
|
||||
func TestReleaseEvent(t *testing.T) {
|
||||
GithubWebhookRequest("release", ReleaseEventJSON(), t)
|
||||
GithubWebhookRequest(t, "release", ReleaseEventJSON())
|
||||
}
|
||||
|
||||
func TestRepositoryEvent(t *testing.T) {
|
||||
GithubWebhookRequest("repository", RepositoryEventJSON(), t)
|
||||
GithubWebhookRequest(t, "repository", RepositoryEventJSON())
|
||||
}
|
||||
|
||||
func TestStatusEvent(t *testing.T) {
|
||||
GithubWebhookRequest("status", StatusEventJSON(), t)
|
||||
GithubWebhookRequest(t, "status", StatusEventJSON())
|
||||
}
|
||||
|
||||
func TestTeamAddEvent(t *testing.T) {
|
||||
GithubWebhookRequest("team_add", TeamAddEventJSON(), t)
|
||||
GithubWebhookRequest(t, "team_add", TeamAddEventJSON())
|
||||
}
|
||||
|
||||
func TestWatchEvent(t *testing.T) {
|
||||
GithubWebhookRequest("watch", WatchEventJSON(), t)
|
||||
GithubWebhookRequest(t, "watch", WatchEventJSON())
|
||||
}
|
||||
|
||||
func TestEventWithSignatureFail(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -13,10 +13,11 @@ import (
|
|||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func postWebhooks(md *MandrillWebhook, eventBody string) *httptest.ResponseRecorder {
|
||||
func postWebhooks(t *testing.T, md *MandrillWebhook, eventBody string) *httptest.ResponseRecorder {
|
||||
body := url.Values{}
|
||||
body.Set("mandrill_events", eventBody)
|
||||
req, _ := http.NewRequest("POST", "/mandrill", strings.NewReader(body.Encode()))
|
||||
req, err := http.NewRequest("POST", "/mandrill", strings.NewReader(body.Encode()))
|
||||
require.NoError(t, err)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
md.eventHandler(w, req)
|
||||
|
|
@ -24,8 +25,9 @@ func postWebhooks(md *MandrillWebhook, eventBody string) *httptest.ResponseRecor
|
|||
return w
|
||||
}
|
||||
|
||||
func headRequest(md *MandrillWebhook) *httptest.ResponseRecorder {
|
||||
req, _ := http.NewRequest("HEAD", "/mandrill", strings.NewReader(""))
|
||||
func headRequest(md *MandrillWebhook, t *testing.T) *httptest.ResponseRecorder {
|
||||
req, err := http.NewRequest("HEAD", "/mandrill", strings.NewReader(""))
|
||||
require.NoError(t, err)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
md.returnOK(w, req)
|
||||
|
|
@ -35,7 +37,7 @@ func headRequest(md *MandrillWebhook) *httptest.ResponseRecorder {
|
|||
|
||||
func TestHead(t *testing.T) {
|
||||
md := &MandrillWebhook{Path: "/mandrill"}
|
||||
resp := headRequest(md)
|
||||
resp := headRequest(md, t)
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("HEAD returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
|
@ -44,7 +46,7 @@ func TestHead(t *testing.T) {
|
|||
func TestSendEvent(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
md := &MandrillWebhook{Path: "/mandrill", acc: &acc}
|
||||
resp := postWebhooks(md, "["+readFile(t, "testdata/send_event.json")+"]")
|
||||
resp := postWebhooks(t, md, "["+readFile(t, "testdata/send_event.json")+"]")
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST send returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
|
@ -63,7 +65,7 @@ func TestSendEvent(t *testing.T) {
|
|||
func TestMultipleEvents(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
md := &MandrillWebhook{Path: "/mandrill", acc: &acc}
|
||||
resp := postWebhooks(md, "["+readFile(t, "testdata/send_event.json")+","+readFile(t, "testdata/hard_bounce_event.json")+"]")
|
||||
resp := postWebhooks(t, md, "["+readFile(t, "testdata/send_event.json")+","+readFile(t, "testdata/hard_bounce_event.json")+"]")
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST send returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,8 +15,9 @@ const (
|
|||
contentType = "application/x-www-form-urlencoded"
|
||||
)
|
||||
|
||||
func post(pt *PapertrailWebhook, contentType string, body string) *httptest.ResponseRecorder {
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(body))
|
||||
func post(t *testing.T, pt *PapertrailWebhook, contentType string, body string) *httptest.ResponseRecorder {
|
||||
req, err := http.NewRequest("POST", "/", strings.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
w := httptest.NewRecorder()
|
||||
pt.eventHandler(w, req)
|
||||
|
|
@ -30,7 +31,7 @@ func TestWrongContentType(t *testing.T) {
|
|||
form.Set("payload", sampleEventPayload)
|
||||
data := form.Encode()
|
||||
|
||||
resp := post(pt, "", data)
|
||||
resp := post(t, pt, "", data)
|
||||
require.Equal(t, http.StatusUnsupportedMediaType, resp.Code)
|
||||
}
|
||||
|
||||
|
|
@ -38,7 +39,7 @@ func TestMissingPayload(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc}
|
||||
|
||||
resp := post(pt, contentType, "")
|
||||
resp := post(t, pt, contentType, "")
|
||||
require.Equal(t, http.StatusBadRequest, resp.Code)
|
||||
}
|
||||
|
||||
|
|
@ -46,7 +47,7 @@ func TestPayloadNotJSON(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc}
|
||||
|
||||
resp := post(pt, contentType, "payload={asdf]")
|
||||
resp := post(t, pt, contentType, "payload={asdf]")
|
||||
require.Equal(t, http.StatusBadRequest, resp.Code)
|
||||
}
|
||||
|
||||
|
|
@ -54,7 +55,7 @@ func TestPayloadInvalidJSON(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc}
|
||||
|
||||
resp := post(pt, contentType, `payload={"value": 42}`)
|
||||
resp := post(t, pt, contentType, `payload={"value": 42}`)
|
||||
require.Equal(t, http.StatusBadRequest, resp.Code)
|
||||
}
|
||||
|
||||
|
|
@ -64,7 +65,7 @@ func TestEventPayload(t *testing.T) {
|
|||
|
||||
form := url.Values{}
|
||||
form.Set("payload", sampleEventPayload)
|
||||
resp := post(pt, contentType, form.Encode())
|
||||
resp := post(t, pt, contentType, form.Encode())
|
||||
require.Equal(t, http.StatusOK, resp.Code)
|
||||
|
||||
fields1 := map[string]interface{}{
|
||||
|
|
@ -113,7 +114,7 @@ func TestCountPayload(t *testing.T) {
|
|||
pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc}
|
||||
form := url.Values{}
|
||||
form.Set("payload", sampleCountPayload)
|
||||
resp := post(pt, contentType, form.Encode())
|
||||
resp := post(t, pt, contentType, form.Encode())
|
||||
require.Equal(t, http.StatusOK, resp.Code)
|
||||
|
||||
fields1 := map[string]interface{}{
|
||||
|
|
|
|||
|
|
@ -6,11 +6,14 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func postWebhooks(rb *ParticleWebhook, eventBody string) *httptest.ResponseRecorder {
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(eventBody))
|
||||
func postWebhooks(t *testing.T, rb *ParticleWebhook, eventBody string) *httptest.ResponseRecorder {
|
||||
req, err := http.NewRequest("POST", "/", strings.NewReader(eventBody))
|
||||
require.NoError(t, err)
|
||||
w := httptest.NewRecorder()
|
||||
w.Code = 500
|
||||
|
||||
|
|
@ -23,7 +26,7 @@ func TestNewItem(t *testing.T) {
|
|||
t.Parallel()
|
||||
var acc testutil.Accumulator
|
||||
rb := &ParticleWebhook{Path: "/particle", acc: &acc}
|
||||
resp := postWebhooks(rb, NewItemJSON())
|
||||
resp := postWebhooks(t, rb, NewItemJSON())
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST new_item returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
|
@ -51,7 +54,7 @@ func TestUnknowItem(t *testing.T) {
|
|||
t.Parallel()
|
||||
var acc testutil.Accumulator
|
||||
rb := &ParticleWebhook{Path: "/particle", acc: &acc}
|
||||
resp := postWebhooks(rb, UnknowJSON())
|
||||
resp := postWebhooks(t, rb, UnknowJSON())
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST unknown returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
|
@ -61,7 +64,7 @@ func TestDefaultMeasurementName(t *testing.T) {
|
|||
t.Parallel()
|
||||
var acc testutil.Accumulator
|
||||
rb := &ParticleWebhook{Path: "/particle", acc: &acc}
|
||||
resp := postWebhooks(rb, BlankMeasurementJSON())
|
||||
resp := postWebhooks(t, rb, BlankMeasurementJSON())
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST new_item returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,11 +6,14 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
func postWebhooks(rb *RollbarWebhook, eventBody string) *httptest.ResponseRecorder {
|
||||
req, _ := http.NewRequest("POST", "/", strings.NewReader(eventBody))
|
||||
func postWebhooks(t *testing.T, rb *RollbarWebhook, eventBody string) *httptest.ResponseRecorder {
|
||||
req, err := http.NewRequest("POST", "/", strings.NewReader(eventBody))
|
||||
require.NoError(t, err)
|
||||
w := httptest.NewRecorder()
|
||||
w.Code = 500
|
||||
|
||||
|
|
@ -22,7 +25,7 @@ func postWebhooks(rb *RollbarWebhook, eventBody string) *httptest.ResponseRecord
|
|||
func TestNewItem(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
rb := &RollbarWebhook{Path: "/rollbar", acc: &acc}
|
||||
resp := postWebhooks(rb, NewItemJSON())
|
||||
resp := postWebhooks(t, rb, NewItemJSON())
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST new_item returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
|
@ -45,7 +48,7 @@ func TestNewItem(t *testing.T) {
|
|||
func TestOccurrence(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
rb := &RollbarWebhook{Path: "/rollbar", acc: &acc}
|
||||
resp := postWebhooks(rb, OccurrenceJSON())
|
||||
resp := postWebhooks(t, rb, OccurrenceJSON())
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST occurrence returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
|
@ -68,7 +71,7 @@ func TestOccurrence(t *testing.T) {
|
|||
func TestDeploy(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
rb := &RollbarWebhook{Path: "/rollbar", acc: &acc}
|
||||
resp := postWebhooks(rb, DeployJSON())
|
||||
resp := postWebhooks(t, rb, DeployJSON())
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST deploy returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
|
@ -88,7 +91,7 @@ func TestDeploy(t *testing.T) {
|
|||
|
||||
func TestUnknowItem(t *testing.T) {
|
||||
rb := &RollbarWebhook{Path: "/rollbar"}
|
||||
resp := postWebhooks(rb, UnknowJSON())
|
||||
resp := postWebhooks(t, rb, UnknowJSON())
|
||||
if resp.Code != http.StatusOK {
|
||||
t.Errorf("POST unknow returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK)
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue