chore: Fix linter findings for `revive:max-public-structs` in `plugins/inputs/[a-n]*` (#15858)
This commit is contained in:
parent
5d996ac0a2
commit
46c056f49d
|
|
@ -36,23 +36,23 @@ type ActiveMQ struct {
|
|||
baseURL *url.URL
|
||||
}
|
||||
|
||||
type Topics struct {
|
||||
type topics struct {
|
||||
XMLName xml.Name `xml:"topics"`
|
||||
TopicItems []Topic `xml:"topic"`
|
||||
TopicItems []topic `xml:"topic"`
|
||||
}
|
||||
|
||||
type Topic struct {
|
||||
type topic struct {
|
||||
XMLName xml.Name `xml:"topic"`
|
||||
Name string `xml:"name,attr"`
|
||||
Stats Stats `xml:"stats"`
|
||||
Stats stats `xml:"stats"`
|
||||
}
|
||||
|
||||
type Subscribers struct {
|
||||
type subscribers struct {
|
||||
XMLName xml.Name `xml:"subscribers"`
|
||||
SubscriberItems []Subscriber `xml:"subscriber"`
|
||||
SubscriberItems []subscriber `xml:"subscriber"`
|
||||
}
|
||||
|
||||
type Subscriber struct {
|
||||
type subscriber struct {
|
||||
XMLName xml.Name `xml:"subscriber"`
|
||||
ClientID string `xml:"clientId,attr"`
|
||||
SubscriptionName string `xml:"subscriptionName,attr"`
|
||||
|
|
@ -60,21 +60,21 @@ type Subscriber struct {
|
|||
DestinationName string `xml:"destinationName,attr"`
|
||||
Selector string `xml:"selector,attr"`
|
||||
Active string `xml:"active,attr"`
|
||||
Stats Stats `xml:"stats"`
|
||||
Stats stats `xml:"stats"`
|
||||
}
|
||||
|
||||
type Queues struct {
|
||||
type queues struct {
|
||||
XMLName xml.Name `xml:"queues"`
|
||||
QueueItems []Queue `xml:"queue"`
|
||||
QueueItems []queue `xml:"queue"`
|
||||
}
|
||||
|
||||
type Queue struct {
|
||||
type queue struct {
|
||||
XMLName xml.Name `xml:"queue"`
|
||||
Name string `xml:"name,attr"`
|
||||
Stats Stats `xml:"stats"`
|
||||
Stats stats `xml:"stats"`
|
||||
}
|
||||
|
||||
type Stats struct {
|
||||
type stats struct {
|
||||
XMLName xml.Name `xml:"stats"`
|
||||
Size int `xml:"size,attr"`
|
||||
ConsumerCount int `xml:"consumerCount,attr"`
|
||||
|
|
@ -161,7 +161,7 @@ func (a *ActiveMQ) GetMetrics(u string) ([]byte, error) {
|
|||
return io.ReadAll(resp.Body)
|
||||
}
|
||||
|
||||
func (a *ActiveMQ) GatherQueuesMetrics(acc telegraf.Accumulator, queues Queues) {
|
||||
func (a *ActiveMQ) GatherQueuesMetrics(acc telegraf.Accumulator, queues queues) {
|
||||
for _, queue := range queues.QueueItems {
|
||||
records := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
|
|
@ -179,7 +179,7 @@ func (a *ActiveMQ) GatherQueuesMetrics(acc telegraf.Accumulator, queues Queues)
|
|||
}
|
||||
}
|
||||
|
||||
func (a *ActiveMQ) GatherTopicsMetrics(acc telegraf.Accumulator, topics Topics) {
|
||||
func (a *ActiveMQ) GatherTopicsMetrics(acc telegraf.Accumulator, topics topics) {
|
||||
for _, topic := range topics.TopicItems {
|
||||
records := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
|
|
@ -197,7 +197,7 @@ func (a *ActiveMQ) GatherTopicsMetrics(acc telegraf.Accumulator, topics Topics)
|
|||
}
|
||||
}
|
||||
|
||||
func (a *ActiveMQ) GatherSubscribersMetrics(acc telegraf.Accumulator, subscribers Subscribers) {
|
||||
func (a *ActiveMQ) GatherSubscribersMetrics(acc telegraf.Accumulator, subscribers subscribers) {
|
||||
for _, subscriber := range subscribers.SubscriberItems {
|
||||
records := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
|
|
@ -226,7 +226,7 @@ func (a *ActiveMQ) Gather(acc telegraf.Accumulator) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
queues := Queues{}
|
||||
queues := queues{}
|
||||
err = xml.Unmarshal(dataQueues, &queues)
|
||||
if err != nil {
|
||||
return fmt.Errorf("queues XML unmarshal error: %w", err)
|
||||
|
|
@ -236,7 +236,7 @@ func (a *ActiveMQ) Gather(acc telegraf.Accumulator) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
topics := Topics{}
|
||||
topics := topics{}
|
||||
err = xml.Unmarshal(dataTopics, &topics)
|
||||
if err != nil {
|
||||
return fmt.Errorf("topics XML unmarshal error: %w", err)
|
||||
|
|
@ -246,7 +246,7 @@ func (a *ActiveMQ) Gather(acc telegraf.Accumulator) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
subscribers := Subscribers{}
|
||||
subscribers := subscribers{}
|
||||
err = xml.Unmarshal(dataSubscribers, &subscribers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("subscribers XML unmarshal error: %w", err)
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ func TestGatherQueuesMetrics(t *testing.T) {
|
|||
</queue>
|
||||
</queues>`
|
||||
|
||||
queues := Queues{}
|
||||
queues := queues{}
|
||||
|
||||
require.NoError(t, xml.Unmarshal([]byte(s), &queues))
|
||||
|
||||
|
|
@ -75,7 +75,7 @@ func TestGatherTopicsMetrics(t *testing.T) {
|
|||
</topic>
|
||||
</topics>`
|
||||
|
||||
topics := Topics{}
|
||||
topics := topics{}
|
||||
|
||||
require.NoError(t, xml.Unmarshal([]byte(s), &topics))
|
||||
|
||||
|
|
@ -109,7 +109,7 @@ func TestGatherSubscribersMetrics(t *testing.T) {
|
|||
</subscriber>
|
||||
</subscribers>`
|
||||
|
||||
subscribers := Subscribers{}
|
||||
subscribers := subscribers{}
|
||||
require.NoError(t, xml.Unmarshal([]byte(s), &subscribers))
|
||||
|
||||
records := make(map[string]interface{})
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ type CtrlXDataLayer struct {
|
|||
Password config.Secret `toml:"password"`
|
||||
|
||||
Log telegraf.Logger `toml:"-"`
|
||||
Subscription []Subscription
|
||||
Subscription []subscription
|
||||
|
||||
url string
|
||||
wg sync.WaitGroup
|
||||
|
|
@ -68,7 +68,7 @@ func convertTimestamp2UnixTime(t int64) time.Time {
|
|||
}
|
||||
|
||||
// createSubscription uses the official 'ctrlX Data Layer API' to create the sse subscription.
|
||||
func (c *CtrlXDataLayer) createSubscription(sub *Subscription) (string, error) {
|
||||
func (c *CtrlXDataLayer) createSubscription(sub *subscription) (string, error) {
|
||||
sseURL := c.url + subscriptionPath
|
||||
|
||||
id := "telegraf_" + uuid.New().String()
|
||||
|
|
@ -101,7 +101,7 @@ func (c *CtrlXDataLayer) createSubscription(sub *Subscription) (string, error) {
|
|||
|
||||
// createSubscriptionAndSseClient creates a sse subscription on the server and
|
||||
// initializes a sse client to receive sse events from the server.
|
||||
func (c *CtrlXDataLayer) createSubscriptionAndSseClient(sub *Subscription) (*sseclient.SseClient, error) {
|
||||
func (c *CtrlXDataLayer) createSubscriptionAndSseClient(sub *subscription) (*sseclient.SseClient, error) {
|
||||
t, err := c.tokenManager.RequestAuthToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -118,7 +118,7 @@ func (c *CtrlXDataLayer) createSubscriptionAndSseClient(sub *Subscription) (*sse
|
|||
}
|
||||
|
||||
// addMetric writes sse metric into accumulator.
|
||||
func (c *CtrlXDataLayer) addMetric(se *sseclient.SseEvent, sub *Subscription) {
|
||||
func (c *CtrlXDataLayer) addMetric(se *sseclient.SseEvent, sub *subscription) {
|
||||
switch se.Event {
|
||||
case "update":
|
||||
// Received an updated value, that we translate into a metric
|
||||
|
|
@ -152,7 +152,7 @@ func (c *CtrlXDataLayer) addMetric(se *sseclient.SseEvent, sub *Subscription) {
|
|||
}
|
||||
|
||||
// createMetric - create metric depending on flag 'output_json' and data type
|
||||
func (c *CtrlXDataLayer) createMetric(em *sseEventData, sub *Subscription) (telegraf.Metric, error) {
|
||||
func (c *CtrlXDataLayer) createMetric(em *sseEventData, sub *subscription) (telegraf.Metric, error) {
|
||||
t := convertTimestamp2UnixTime(em.Timestamp)
|
||||
node := sub.node(em.Node)
|
||||
if node == nil {
|
||||
|
|
@ -314,7 +314,7 @@ func (c *CtrlXDataLayer) Start(acc telegraf.Accumulator) error {
|
|||
func (c *CtrlXDataLayer) gatherLoop(ctx context.Context) {
|
||||
for _, sub := range c.Subscription {
|
||||
c.wg.Add(1)
|
||||
go func(sub Subscription) {
|
||||
go func(sub subscription) {
|
||||
defer c.wg.Done()
|
||||
for {
|
||||
select {
|
||||
|
|
|
|||
|
|
@ -32,17 +32,17 @@ const (
|
|||
subscriptionPath = "/automation/api/v2/events"
|
||||
)
|
||||
|
||||
// Node contains all properties of a node configuration
|
||||
type Node struct {
|
||||
// node contains all properties of a node configuration
|
||||
type node struct {
|
||||
Name string `toml:"name"`
|
||||
Address string `toml:"address"`
|
||||
Tags map[string]string `toml:"tags"`
|
||||
}
|
||||
|
||||
// Subscription contains all properties of a subscription configuration
|
||||
type Subscription struct {
|
||||
// subscription contains all properties of a subscription configuration
|
||||
type subscription struct {
|
||||
index int
|
||||
Nodes []Node `toml:"nodes"`
|
||||
Nodes []node `toml:"nodes"`
|
||||
Tags map[string]string `toml:"tags"`
|
||||
Measurement string `toml:"measurement"`
|
||||
PublishInterval config.Duration `toml:"publish_interval"`
|
||||
|
|
@ -56,52 +56,52 @@ type Subscription struct {
|
|||
OutputJSONString bool `toml:"output_json_string"`
|
||||
}
|
||||
|
||||
// Rule can be used to override default rule settings.
|
||||
type Rule struct {
|
||||
// rule can be used to override default rule settings.
|
||||
type rule struct {
|
||||
RuleType string `json:"rule_type"`
|
||||
Rule interface{} `json:"rule"`
|
||||
}
|
||||
|
||||
// Sampling can be used to override default sampling settings.
|
||||
type Sampling struct {
|
||||
// sampling can be used to override default sampling settings.
|
||||
type sampling struct {
|
||||
SamplingInterval uint64 `json:"samplingInterval"`
|
||||
}
|
||||
|
||||
// Queueing can be used to override default queuing settings.
|
||||
type Queueing struct {
|
||||
// queueing can be used to override default queuing settings.
|
||||
type queueing struct {
|
||||
QueueSize uint `json:"queueSize"`
|
||||
Behaviour string `json:"behaviour"`
|
||||
}
|
||||
|
||||
// DataChangeFilter can be used to override default data change filter settings.
|
||||
type DataChangeFilter struct {
|
||||
// dataChangeFilter can be used to override default data change filter settings.
|
||||
type dataChangeFilter struct {
|
||||
DeadBandValue float64 `json:"deadBandValue"`
|
||||
}
|
||||
|
||||
// ChangeEvents can be used to override default change events settings.
|
||||
type ChangeEvents struct {
|
||||
// changeEvents can be used to override default change events settings.
|
||||
type changeEvents struct {
|
||||
ValueChange string `json:"valueChange"`
|
||||
BrowselistChange bool `json:"browselistChange"`
|
||||
MetadataChange bool `json:"metadataChange"`
|
||||
}
|
||||
|
||||
// SubscriptionProperties can be used to override default subscription settings.
|
||||
type SubscriptionProperties struct {
|
||||
// subscriptionProperties can be used to override default subscription settings.
|
||||
type subscriptionProperties struct {
|
||||
KeepaliveInterval int64 `json:"keepaliveInterval"`
|
||||
Rules []Rule `json:"rules"`
|
||||
Rules []rule `json:"rules"`
|
||||
ID string `json:"id"`
|
||||
PublishInterval int64 `json:"publishInterval"`
|
||||
ErrorInterval int64 `json:"errorInterval"`
|
||||
}
|
||||
|
||||
// SubscriptionRequest can be used to to create a sse subscription at the ctrlX Data Layer.
|
||||
type SubscriptionRequest struct {
|
||||
Properties SubscriptionProperties `json:"properties"`
|
||||
// subscriptionRequest can be used to create a sse subscription at the ctrlX Data Layer.
|
||||
type subscriptionRequest struct {
|
||||
Properties subscriptionProperties `json:"properties"`
|
||||
Nodes []string `json:"nodes"`
|
||||
}
|
||||
|
||||
// applyDefaultSettings applies the default settings if they are not configured in the config file.
|
||||
func (s *Subscription) applyDefaultSettings() {
|
||||
func (s *subscription) applyDefaultSettings() {
|
||||
if s.Measurement == "" {
|
||||
s.Measurement = defaultMeasurementName
|
||||
}
|
||||
|
|
@ -130,14 +130,14 @@ func (s *Subscription) applyDefaultSettings() {
|
|||
|
||||
// createRequestBody builds the request body for the sse subscription, based on the subscription configuration.
|
||||
// The request body can be send to the server to create a new subscription.
|
||||
func (s *Subscription) createRequest(id string) SubscriptionRequest {
|
||||
pl := SubscriptionRequest{
|
||||
Properties: SubscriptionProperties{
|
||||
Rules: []Rule{
|
||||
{"Sampling", Sampling{uint64(time.Duration(s.SamplingInterval).Microseconds())}},
|
||||
{"Queueing", Queueing{s.QueueSize, s.QueueBehaviour}},
|
||||
{"DataChangeFilter", DataChangeFilter{s.DeadBandValue}},
|
||||
{"ChangeEvents", ChangeEvents{s.ValueChange, false, false}},
|
||||
func (s *subscription) createRequest(id string) subscriptionRequest {
|
||||
pl := subscriptionRequest{
|
||||
Properties: subscriptionProperties{
|
||||
Rules: []rule{
|
||||
{"Sampling", sampling{uint64(time.Duration(s.SamplingInterval).Microseconds())}},
|
||||
{"Queueing", queueing{s.QueueSize, s.QueueBehaviour}},
|
||||
{"DataChangeFilter", dataChangeFilter{s.DeadBandValue}},
|
||||
{"ChangeEvents", changeEvents{s.ValueChange, false, false}},
|
||||
},
|
||||
ID: id,
|
||||
KeepaliveInterval: time.Duration(s.KeepaliveInterval).Milliseconds(),
|
||||
|
|
@ -151,7 +151,7 @@ func (s *Subscription) createRequest(id string) SubscriptionRequest {
|
|||
}
|
||||
|
||||
// addressList lists all configured node addresses
|
||||
func (s *Subscription) addressList() []string {
|
||||
func (s *subscription) addressList() []string {
|
||||
addressList := []string{}
|
||||
for _, node := range s.Nodes {
|
||||
addressList = append(addressList, node.Address)
|
||||
|
|
@ -160,7 +160,7 @@ func (s *Subscription) addressList() []string {
|
|||
}
|
||||
|
||||
// node finds the node according the node address
|
||||
func (s *Subscription) node(address string) *Node {
|
||||
func (s *subscription) node(address string) *node {
|
||||
for _, node := range s.Nodes {
|
||||
if address == node.Address {
|
||||
return &node
|
||||
|
|
@ -170,7 +170,7 @@ func (s *Subscription) node(address string) *Node {
|
|||
}
|
||||
|
||||
// fieldKey determines the field key out of node name or address
|
||||
func (n *Node) fieldKey() string {
|
||||
func (n *node) fieldKey() string {
|
||||
if n.Name != "" {
|
||||
// return user defined node name as field key
|
||||
return n.Name
|
||||
|
|
|
|||
|
|
@ -11,15 +11,15 @@ import (
|
|||
func TestSubscription_createRequest(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
subscription Subscription
|
||||
subscription subscription
|
||||
id string
|
||||
wantBody SubscriptionRequest
|
||||
wantBody subscriptionRequest
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Should_Return_Expected_Request",
|
||||
subscription: Subscription{
|
||||
Nodes: []Node{
|
||||
subscription: subscription{
|
||||
Nodes: []node{
|
||||
{
|
||||
Name: "node1",
|
||||
Address: "path/to/node1",
|
||||
|
|
@ -44,32 +44,32 @@ func TestSubscription_createRequest(t *testing.T) {
|
|||
OutputJSONString: true,
|
||||
},
|
||||
id: "sub_id",
|
||||
wantBody: SubscriptionRequest{
|
||||
Properties: SubscriptionProperties{
|
||||
wantBody: subscriptionRequest{
|
||||
Properties: subscriptionProperties{
|
||||
KeepaliveInterval: 10000,
|
||||
Rules: []Rule{
|
||||
Rules: []rule{
|
||||
{
|
||||
"Sampling",
|
||||
Sampling{
|
||||
sampling{
|
||||
SamplingInterval: 100000,
|
||||
},
|
||||
},
|
||||
{
|
||||
"Queueing",
|
||||
Queueing{
|
||||
queueing{
|
||||
QueueSize: 100,
|
||||
Behaviour: "DiscardNewest",
|
||||
},
|
||||
},
|
||||
{
|
||||
"DataChangeFilter",
|
||||
DataChangeFilter{
|
||||
dataChangeFilter{
|
||||
DeadBandValue: 1.12345,
|
||||
},
|
||||
},
|
||||
{
|
||||
"ChangeEvents",
|
||||
ChangeEvents{
|
||||
changeEvents{
|
||||
ValueChange: "StatusValueTimestamp",
|
||||
},
|
||||
},
|
||||
|
|
@ -97,13 +97,13 @@ func TestSubscription_createRequest(t *testing.T) {
|
|||
func TestSubscription_node(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []Node
|
||||
nodes []node
|
||||
address string
|
||||
want *Node
|
||||
want *node
|
||||
}{
|
||||
{
|
||||
name: "Should_Return_Node_Of_Given_Address",
|
||||
nodes: []Node{
|
||||
nodes: []node{
|
||||
{
|
||||
Name: "node1",
|
||||
Address: "path/to/node1",
|
||||
|
|
@ -121,7 +121,7 @@ func TestSubscription_node(t *testing.T) {
|
|||
},
|
||||
},
|
||||
address: "path/to/node3",
|
||||
want: &Node{
|
||||
want: &node{
|
||||
Name: "",
|
||||
Address: "path/to/node3",
|
||||
Tags: map[string]string{},
|
||||
|
|
@ -129,7 +129,7 @@ func TestSubscription_node(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Should_Return_Nil_If_Node_With_Given_Address_Not_Found",
|
||||
nodes: []Node{
|
||||
nodes: []node{
|
||||
{
|
||||
Name: "Node1",
|
||||
Address: "path/to/node1",
|
||||
|
|
@ -153,7 +153,7 @@ func TestSubscription_node(t *testing.T) {
|
|||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Subscription{
|
||||
s := &subscription{
|
||||
Nodes: tt.nodes,
|
||||
}
|
||||
require.Equal(t, tt.want, s.node(tt.address))
|
||||
|
|
@ -164,12 +164,12 @@ func TestSubscription_node(t *testing.T) {
|
|||
func TestSubscription_addressList(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []Node
|
||||
nodes []node
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "Should_Return_AddressArray_Of_All_Nodes",
|
||||
nodes: []Node{
|
||||
nodes: []node{
|
||||
{
|
||||
Address: "framework/metrics/system/memused-mb",
|
||||
},
|
||||
|
|
@ -193,7 +193,7 @@ func TestSubscription_addressList(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Subscription{
|
||||
s := &subscription{
|
||||
Nodes: tt.nodes,
|
||||
}
|
||||
require.Equal(t, tt.want, s.addressList())
|
||||
|
|
@ -204,12 +204,12 @@ func TestSubscription_addressList(t *testing.T) {
|
|||
func TestNode_fieldKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
node Node
|
||||
node node
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "Should_Return_Name_When_Name_Is_Not_Empty",
|
||||
node: Node{
|
||||
node: node{
|
||||
Name: "used",
|
||||
Address: "framework/metrics/system/memused-mb",
|
||||
},
|
||||
|
|
@ -217,7 +217,7 @@ func TestNode_fieldKey(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Should_Return_Address_Base_When_Name_Is_Empty_And_Address_Contains_Full_Path",
|
||||
node: Node{
|
||||
node: node{
|
||||
Name: "",
|
||||
Address: "framework/metrics/system/memused-mb",
|
||||
},
|
||||
|
|
@ -225,7 +225,7 @@ func TestNode_fieldKey(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Should_Return_Address_Base_Root_When_Name_Is_Empty_And_Address_Contains_Root_Path",
|
||||
node: Node{
|
||||
node: node{
|
||||
Name: "",
|
||||
Address: "root",
|
||||
},
|
||||
|
|
@ -233,7 +233,7 @@ func TestNode_fieldKey(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "Should_Return_Empty_When_Name_and_Address_Are_Empty",
|
||||
node: Node{
|
||||
node: node{
|
||||
Name: "",
|
||||
Address: "",
|
||||
},
|
||||
|
|
|
|||
|
|
@ -41,10 +41,10 @@ func TestCtrlXCreateSubscriptionBasic(t *testing.T) {
|
|||
}))
|
||||
defer server.Close()
|
||||
|
||||
subs := make([]Subscription, 0)
|
||||
subs = append(subs, Subscription{
|
||||
subs := make([]subscription, 0)
|
||||
subs = append(subs, subscription{
|
||||
index: 0,
|
||||
Nodes: []Node{
|
||||
Nodes: []node{
|
||||
{Name: "counter", Address: "plc/app/Application/sym/PLC_PRG/counter"},
|
||||
{Name: "counterReverse", Address: "plc/app/Application/sym/PLC_PRG/counterReverse"},
|
||||
},
|
||||
|
|
@ -84,9 +84,9 @@ func TestCtrlXCreateSubscriptionDriven(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
}))
|
||||
defer server.Close()
|
||||
subs := make([]Subscription, 0)
|
||||
subs = append(subs, Subscription{
|
||||
Nodes: []Node{
|
||||
subs := make([]subscription, 0)
|
||||
subs = append(subs, subscription{
|
||||
Nodes: []node{
|
||||
{Name: "counter", Address: "plc/app/Application/sym/PLC_PRG/counter"},
|
||||
{Name: "counterReverse", Address: "plc/app/Application/sym/PLC_PRG/counterReverse"},
|
||||
},
|
||||
|
|
@ -170,10 +170,10 @@ func cleanup(server *httptest.Server) {
|
|||
func initRunner(t *testing.T) (*CtrlXDataLayer, *httptest.Server) {
|
||||
server := newServer(t)
|
||||
|
||||
subs := make([]Subscription, 0)
|
||||
subs = append(subs, Subscription{
|
||||
subs := make([]subscription, 0)
|
||||
subs = append(subs, subscription{
|
||||
Measurement: "ctrlx",
|
||||
Nodes: []Node{
|
||||
Nodes: []node{
|
||||
{Name: "counter", Address: "plc/app/Application/sym/PLC_PRG/counter"},
|
||||
},
|
||||
},
|
||||
|
|
@ -236,9 +236,9 @@ func TestCtrlXMetricsMulti(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCtrlXCreateSseClient(t *testing.T) {
|
||||
sub := Subscription{
|
||||
sub := subscription{
|
||||
Measurement: "ctrlx",
|
||||
Nodes: []Node{
|
||||
Nodes: []node{
|
||||
{Name: "counter", Address: "plc/app/Application/sym/PLC_PRG/counter"},
|
||||
{Name: "counterReverse", Address: "plc/app/Application/sym/PLC_PRG/counterReverse"},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -22,76 +22,76 @@ const (
|
|||
type Client interface {
|
||||
SetToken(token string)
|
||||
|
||||
Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error)
|
||||
GetSummary(ctx context.Context) (*Summary, error)
|
||||
GetContainers(ctx context.Context, node string) ([]Container, error)
|
||||
GetNodeMetrics(ctx context.Context, node string) (*Metrics, error)
|
||||
GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error)
|
||||
GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error)
|
||||
Login(ctx context.Context, sa *ServiceAccount) (*authToken, error)
|
||||
GetSummary(ctx context.Context) (*summary, error)
|
||||
GetContainers(ctx context.Context, node string) ([]container, error)
|
||||
GetNodeMetrics(ctx context.Context, node string) (*metrics, error)
|
||||
GetContainerMetrics(ctx context.Context, node, container string) (*metrics, error)
|
||||
GetAppMetrics(ctx context.Context, node, container string) (*metrics, error)
|
||||
}
|
||||
|
||||
type APIError struct {
|
||||
type apiError struct {
|
||||
URL string
|
||||
StatusCode int
|
||||
Title string
|
||||
Description string
|
||||
}
|
||||
|
||||
// Login is request data for logging in.
|
||||
type Login struct {
|
||||
// login is request data for logging in.
|
||||
type login struct {
|
||||
UID string `json:"uid"`
|
||||
Exp int64 `json:"exp"`
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
// LoginError is the response when login fails.
|
||||
type LoginError struct {
|
||||
// loginError is the response when login fails.
|
||||
type loginError struct {
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// LoginAuth is the response to a successful login.
|
||||
type LoginAuth struct {
|
||||
// loginAuth is the response to a successful login.
|
||||
type loginAuth struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
// Slave is a node in the cluster.
|
||||
type Slave struct {
|
||||
// slave is a node in the cluster.
|
||||
type slave struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// Summary provides high level cluster wide information.
|
||||
type Summary struct {
|
||||
// summary provides high level cluster wide information.
|
||||
type summary struct {
|
||||
Cluster string
|
||||
Slaves []Slave
|
||||
Slaves []slave
|
||||
}
|
||||
|
||||
// Container is a container on a node.
|
||||
type Container struct {
|
||||
// container is a container on a node.
|
||||
type container struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
type DataPoint struct {
|
||||
type dataPoint struct {
|
||||
Name string `json:"name"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
Unit string `json:"unit"`
|
||||
Value float64 `json:"value"`
|
||||
}
|
||||
|
||||
// Metrics are the DCOS metrics
|
||||
type Metrics struct {
|
||||
Datapoints []DataPoint `json:"datapoints"`
|
||||
// metrics are the DCOS metrics
|
||||
type metrics struct {
|
||||
Datapoints []dataPoint `json:"datapoints"`
|
||||
Dimensions map[string]interface{} `json:"dimensions"`
|
||||
}
|
||||
|
||||
// AuthToken is the authentication token.
|
||||
type AuthToken struct {
|
||||
// authToken is the authentication token.
|
||||
type authToken struct {
|
||||
Text string
|
||||
Expire time.Time
|
||||
}
|
||||
|
||||
// ClusterClient is a Client that uses the cluster URL.
|
||||
type ClusterClient struct {
|
||||
// clusterClient is a Client that uses the cluster URL.
|
||||
type clusterClient struct {
|
||||
clusterURL *url.URL
|
||||
httpClient *http.Client
|
||||
token string
|
||||
|
|
@ -103,19 +103,14 @@ type claims struct {
|
|||
jwt.RegisteredClaims
|
||||
}
|
||||
|
||||
func (e APIError) Error() string {
|
||||
func (e apiError) Error() string {
|
||||
if e.Description != "" {
|
||||
return fmt.Sprintf("[%s] %s: %s", e.URL, e.Title, e.Description)
|
||||
}
|
||||
return fmt.Sprintf("[%s] %s", e.URL, e.Title)
|
||||
}
|
||||
|
||||
func NewClusterClient(
|
||||
clusterURL *url.URL,
|
||||
timeout time.Duration,
|
||||
maxConns int,
|
||||
tlsConfig *tls.Config,
|
||||
) *ClusterClient {
|
||||
func NewClusterClient(clusterURL *url.URL, timeout time.Duration, maxConns int, tlsConfig *tls.Config) *clusterClient {
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: maxConns,
|
||||
|
|
@ -125,7 +120,7 @@ func NewClusterClient(
|
|||
}
|
||||
semaphore := make(chan struct{}, maxConns)
|
||||
|
||||
c := &ClusterClient{
|
||||
c := &clusterClient{
|
||||
clusterURL: clusterURL,
|
||||
httpClient: httpClient,
|
||||
semaphore: semaphore,
|
||||
|
|
@ -133,11 +128,11 @@ func NewClusterClient(
|
|||
return c
|
||||
}
|
||||
|
||||
func (c *ClusterClient) SetToken(token string) {
|
||||
func (c *clusterClient) SetToken(token string) {
|
||||
c.token = token
|
||||
}
|
||||
|
||||
func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error) {
|
||||
func (c *clusterClient) Login(ctx context.Context, sa *ServiceAccount) (*authToken, error) {
|
||||
token, err := c.createLoginToken(sa)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -145,7 +140,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok
|
|||
|
||||
exp := time.Now().Add(loginDuration)
|
||||
|
||||
body := &Login{
|
||||
body := &login{
|
||||
UID: sa.AccountID,
|
||||
Exp: exp.Unix(),
|
||||
Token: token,
|
||||
|
|
@ -171,25 +166,25 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok
|
|||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
auth := &LoginAuth{}
|
||||
auth := &loginAuth{}
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
err = dec.Decode(auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
token := &AuthToken{
|
||||
token := &authToken{
|
||||
Text: auth.Token,
|
||||
Expire: exp,
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
loginError := &LoginError{}
|
||||
loginError := &loginError{}
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
err = dec.Decode(loginError)
|
||||
if err != nil {
|
||||
err := &APIError{
|
||||
err := &apiError{
|
||||
URL: loc,
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: resp.Status,
|
||||
|
|
@ -197,7 +192,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok
|
|||
return nil, err
|
||||
}
|
||||
|
||||
err = &APIError{
|
||||
err = &apiError{
|
||||
URL: loc,
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: loginError.Title,
|
||||
|
|
@ -206,8 +201,8 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok
|
|||
return nil, err
|
||||
}
|
||||
|
||||
func (c *ClusterClient) GetSummary(ctx context.Context) (*Summary, error) {
|
||||
summary := &Summary{}
|
||||
func (c *clusterClient) GetSummary(ctx context.Context) (*summary, error) {
|
||||
summary := &summary{}
|
||||
err := c.doGet(ctx, c.toURL("/mesos/master/state-summary"), summary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -216,7 +211,7 @@ func (c *ClusterClient) GetSummary(ctx context.Context) (*Summary, error) {
|
|||
return summary, nil
|
||||
}
|
||||
|
||||
func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Container, error) {
|
||||
func (c *clusterClient) GetContainers(ctx context.Context, node string) ([]container, error) {
|
||||
list := []string{}
|
||||
|
||||
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers", node)
|
||||
|
|
@ -225,16 +220,16 @@ func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Conta
|
|||
return nil, err
|
||||
}
|
||||
|
||||
containers := make([]Container, 0, len(list))
|
||||
containers := make([]container, 0, len(list))
|
||||
for _, c := range list {
|
||||
containers = append(containers, Container{ID: c})
|
||||
containers = append(containers, container{ID: c})
|
||||
}
|
||||
|
||||
return containers, nil
|
||||
}
|
||||
|
||||
func (c *ClusterClient) getMetrics(ctx context.Context, address string) (*Metrics, error) {
|
||||
metrics := &Metrics{}
|
||||
func (c *clusterClient) getMetrics(ctx context.Context, address string) (*metrics, error) {
|
||||
metrics := &metrics{}
|
||||
|
||||
err := c.doGet(ctx, address, metrics)
|
||||
if err != nil {
|
||||
|
|
@ -244,17 +239,17 @@ func (c *ClusterClient) getMetrics(ctx context.Context, address string) (*Metric
|
|||
return metrics, nil
|
||||
}
|
||||
|
||||
func (c *ClusterClient) GetNodeMetrics(ctx context.Context, node string) (*Metrics, error) {
|
||||
func (c *clusterClient) GetNodeMetrics(ctx context.Context, node string) (*metrics, error) {
|
||||
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/node", node)
|
||||
return c.getMetrics(ctx, c.toURL(path))
|
||||
}
|
||||
|
||||
func (c *ClusterClient) GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error) {
|
||||
func (c *clusterClient) GetContainerMetrics(ctx context.Context, node, container string) (*metrics, error) {
|
||||
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s", node, container)
|
||||
return c.getMetrics(ctx, c.toURL(path))
|
||||
}
|
||||
|
||||
func (c *ClusterClient) GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error) {
|
||||
func (c *clusterClient) GetAppMetrics(ctx context.Context, node, container string) (*metrics, error) {
|
||||
path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s/app", node, container)
|
||||
return c.getMetrics(ctx, c.toURL(path))
|
||||
}
|
||||
|
|
@ -273,7 +268,7 @@ func createGetRequest(address string, token string) (*http.Request, error) {
|
|||
return req, nil
|
||||
}
|
||||
|
||||
func (c *ClusterClient) doGet(ctx context.Context, address string, v interface{}) error {
|
||||
func (c *clusterClient) doGet(ctx context.Context, address string, v interface{}) error {
|
||||
req, err := createGetRequest(address, c.token)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -302,7 +297,7 @@ func (c *ClusterClient) doGet(ctx context.Context, address string, v interface{}
|
|||
}
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return &APIError{
|
||||
return &apiError{
|
||||
URL: address,
|
||||
StatusCode: resp.StatusCode,
|
||||
Title: resp.Status,
|
||||
|
|
@ -317,13 +312,13 @@ func (c *ClusterClient) doGet(ctx context.Context, address string, v interface{}
|
|||
return err
|
||||
}
|
||||
|
||||
func (c *ClusterClient) toURL(path string) string {
|
||||
func (c *clusterClient) toURL(path string) string {
|
||||
clusterURL := *c.clusterURL
|
||||
clusterURL.Path = path
|
||||
return clusterURL.String()
|
||||
}
|
||||
|
||||
func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) {
|
||||
func (c *clusterClient) createLoginToken(sa *ServiceAccount) (string, error) {
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims{
|
||||
UID: sa.AccountID,
|
||||
RegisteredClaims: jwt.RegisteredClaims{
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ func TestLogin(t *testing.T) {
|
|||
name: "Unauthorized Error",
|
||||
responseCode: http.StatusUnauthorized,
|
||||
responseBody: `{"title": "x", "description": "y"}`,
|
||||
expectedError: &APIError{
|
||||
expectedError: &apiError{
|
||||
URL: ts.URL + "/acs/api/v1/auth/login",
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
Title: "x",
|
||||
|
|
@ -88,14 +88,14 @@ func TestGetSummary(t *testing.T) {
|
|||
name string
|
||||
responseCode int
|
||||
responseBody string
|
||||
expectedValue *Summary
|
||||
expectedValue *summary
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "No nodes",
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{"cluster": "a", "slaves": []}`,
|
||||
expectedValue: &Summary{Cluster: "a", Slaves: []Slave{}},
|
||||
expectedValue: &summary{Cluster: "a", Slaves: []slave{}},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
|
|
@ -103,7 +103,7 @@ func TestGetSummary(t *testing.T) {
|
|||
responseCode: http.StatusUnauthorized,
|
||||
responseBody: `<html></html>`,
|
||||
expectedValue: nil,
|
||||
expectedError: &APIError{
|
||||
expectedError: &apiError{
|
||||
URL: ts.URL + "/mesos/master/state-summary",
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
Title: "401 Unauthorized",
|
||||
|
|
@ -113,9 +113,9 @@ func TestGetSummary(t *testing.T) {
|
|||
name: "Has nodes",
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{"cluster": "a", "slaves": [{"id": "a"}, {"id": "b"}]}`,
|
||||
expectedValue: &Summary{
|
||||
expectedValue: &summary{
|
||||
Cluster: "a",
|
||||
Slaves: []Slave{
|
||||
Slaves: []slave{
|
||||
{ID: "a"},
|
||||
{ID: "b"},
|
||||
},
|
||||
|
|
@ -153,14 +153,14 @@ func TestGetNodeMetrics(t *testing.T) {
|
|||
name string
|
||||
responseCode int
|
||||
responseBody string
|
||||
expectedValue *Metrics
|
||||
expectedValue *metrics
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Empty Body",
|
||||
responseCode: http.StatusOK,
|
||||
responseBody: `{}`,
|
||||
expectedValue: &Metrics{},
|
||||
expectedValue: &metrics{},
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
|
@ -194,14 +194,14 @@ func TestGetContainerMetrics(t *testing.T) {
|
|||
name string
|
||||
responseCode int
|
||||
responseBody string
|
||||
expectedValue *Metrics
|
||||
expectedValue *metrics
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "204 No Content",
|
||||
responseCode: http.StatusNoContent,
|
||||
responseBody: ``,
|
||||
expectedValue: &Metrics{},
|
||||
expectedValue: &metrics{},
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ type ServiceAccount struct {
|
|||
AccountID string
|
||||
PrivateKey *rsa.PrivateKey
|
||||
|
||||
auth *AuthToken
|
||||
auth *authToken
|
||||
}
|
||||
|
||||
type TokenCreds struct {
|
||||
|
|
|
|||
|
|
@ -147,7 +147,7 @@ func (d *DCOS) GatherContainers(ctx context.Context, acc telegraf.Accumulator, c
|
|||
defer wg.Done()
|
||||
m, err := d.client.GetContainerMetrics(ctx, node, container)
|
||||
if err != nil {
|
||||
var apiErr APIError
|
||||
var apiErr apiError
|
||||
if errors.As(err, &apiErr) && apiErr.StatusCode == 404 {
|
||||
return
|
||||
}
|
||||
|
|
@ -164,7 +164,7 @@ func (d *DCOS) GatherContainers(ctx context.Context, acc telegraf.Accumulator, c
|
|||
defer wg.Done()
|
||||
m, err := d.client.GetAppMetrics(ctx, node, container)
|
||||
if err != nil {
|
||||
var apiErr APIError
|
||||
var apiErr apiError
|
||||
if errors.As(err, &apiErr) && apiErr.StatusCode == 404 {
|
||||
return
|
||||
}
|
||||
|
|
@ -184,7 +184,7 @@ type point struct {
|
|||
fields map[string]interface{}
|
||||
}
|
||||
|
||||
func (d *DCOS) createPoints(m *Metrics) []*point {
|
||||
func (d *DCOS) createPoints(m *metrics) []*point {
|
||||
points := make(map[string]*point)
|
||||
for _, dp := range m.Datapoints {
|
||||
fieldKey := strings.ReplaceAll(dp.Name, ".", "_")
|
||||
|
|
@ -244,7 +244,7 @@ func (d *DCOS) createPoints(m *Metrics) []*point {
|
|||
return results
|
||||
}
|
||||
|
||||
func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *Metrics, tagDimensions []string) {
|
||||
func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *metrics, tagDimensions []string) {
|
||||
tm := time.Now()
|
||||
|
||||
points := d.createPoints(m)
|
||||
|
|
@ -266,15 +266,15 @@ func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *Me
|
|||
}
|
||||
}
|
||||
|
||||
func (d *DCOS) addNodeMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
|
||||
func (d *DCOS) addNodeMetrics(acc telegraf.Accumulator, cluster string, m *metrics) {
|
||||
d.addMetrics(acc, cluster, "dcos_node", m, nodeDimensions)
|
||||
}
|
||||
|
||||
func (d *DCOS) addContainerMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
|
||||
func (d *DCOS) addContainerMetrics(acc telegraf.Accumulator, cluster string, m *metrics) {
|
||||
d.addMetrics(acc, cluster, "dcos_container", m, containerDimensions)
|
||||
}
|
||||
|
||||
func (d *DCOS) addAppMetrics(acc telegraf.Accumulator, cluster string, m *Metrics) {
|
||||
func (d *DCOS) addAppMetrics(acc telegraf.Accumulator, cluster string, m *metrics) {
|
||||
d.addMetrics(acc, cluster, "dcos_app", m, appDimensions)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -12,52 +12,52 @@ import (
|
|||
|
||||
type mockClient struct {
|
||||
SetTokenF func()
|
||||
LoginF func(ctx context.Context, sa *ServiceAccount) (*AuthToken, error)
|
||||
GetSummaryF func() (*Summary, error)
|
||||
GetContainersF func() ([]Container, error)
|
||||
GetNodeMetricsF func() (*Metrics, error)
|
||||
GetContainerMetricsF func(ctx context.Context, node, container string) (*Metrics, error)
|
||||
GetAppMetricsF func(ctx context.Context, node, container string) (*Metrics, error)
|
||||
LoginF func(ctx context.Context, sa *ServiceAccount) (*authToken, error)
|
||||
GetSummaryF func() (*summary, error)
|
||||
GetContainersF func() ([]container, error)
|
||||
GetNodeMetricsF func() (*metrics, error)
|
||||
GetContainerMetricsF func(ctx context.Context, node, container string) (*metrics, error)
|
||||
GetAppMetricsF func(ctx context.Context, node, container string) (*metrics, error)
|
||||
}
|
||||
|
||||
func (c *mockClient) SetToken(string) {
|
||||
c.SetTokenF()
|
||||
}
|
||||
|
||||
func (c *mockClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthToken, error) {
|
||||
func (c *mockClient) Login(ctx context.Context, sa *ServiceAccount) (*authToken, error) {
|
||||
return c.LoginF(ctx, sa)
|
||||
}
|
||||
|
||||
func (c *mockClient) GetSummary(context.Context) (*Summary, error) {
|
||||
func (c *mockClient) GetSummary(context.Context) (*summary, error) {
|
||||
return c.GetSummaryF()
|
||||
}
|
||||
|
||||
func (c *mockClient) GetContainers(context.Context, string) ([]Container, error) {
|
||||
func (c *mockClient) GetContainers(context.Context, string) ([]container, error) {
|
||||
return c.GetContainersF()
|
||||
}
|
||||
|
||||
func (c *mockClient) GetNodeMetrics(context.Context, string) (*Metrics, error) {
|
||||
func (c *mockClient) GetNodeMetrics(context.Context, string) (*metrics, error) {
|
||||
return c.GetNodeMetricsF()
|
||||
}
|
||||
|
||||
func (c *mockClient) GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error) {
|
||||
func (c *mockClient) GetContainerMetrics(ctx context.Context, node, container string) (*metrics, error) {
|
||||
return c.GetContainerMetricsF(ctx, node, container)
|
||||
}
|
||||
|
||||
func (c *mockClient) GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error) {
|
||||
func (c *mockClient) GetAppMetrics(ctx context.Context, node, container string) (*metrics, error) {
|
||||
return c.GetAppMetricsF(ctx, node, container)
|
||||
}
|
||||
|
||||
func TestAddNodeMetrics(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
metrics *Metrics
|
||||
metrics *metrics
|
||||
check func(*testutil.Accumulator) []bool
|
||||
}{
|
||||
{
|
||||
name: "basic datapoint conversion",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
metrics: &metrics{
|
||||
Datapoints: []dataPoint{
|
||||
{
|
||||
Name: "process.count",
|
||||
Unit: "count",
|
||||
|
|
@ -77,8 +77,8 @@ func TestAddNodeMetrics(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "path added as tag",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
metrics: &metrics{
|
||||
Datapoints: []dataPoint{
|
||||
{
|
||||
Name: "filesystem.inode.free",
|
||||
Tags: map[string]string{
|
||||
|
|
@ -102,8 +102,8 @@ func TestAddNodeMetrics(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "interface added as tag",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
metrics: &metrics{
|
||||
Datapoints: []dataPoint{
|
||||
{
|
||||
Name: "network.out.dropped",
|
||||
Tags: map[string]string{
|
||||
|
|
@ -127,8 +127,8 @@ func TestAddNodeMetrics(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "bytes unit appended to fieldkey",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
metrics: &metrics{
|
||||
Datapoints: []dataPoint{
|
||||
{
|
||||
Name: "network.in",
|
||||
Tags: map[string]string{
|
||||
|
|
@ -152,8 +152,8 @@ func TestAddNodeMetrics(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "dimensions added as tags",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
metrics: &metrics{
|
||||
Datapoints: []dataPoint{
|
||||
{
|
||||
Name: "process.count",
|
||||
Tags: map[string]string{},
|
||||
|
|
@ -209,13 +209,13 @@ func TestAddNodeMetrics(t *testing.T) {
|
|||
func TestAddContainerMetrics(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
metrics *Metrics
|
||||
metrics *metrics
|
||||
check func(*testutil.Accumulator) []bool
|
||||
}{
|
||||
{
|
||||
name: "container",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
metrics: &metrics{
|
||||
Datapoints: []dataPoint{
|
||||
{
|
||||
Name: "net.rx.errors",
|
||||
Tags: map[string]string{
|
||||
|
|
@ -280,13 +280,13 @@ func TestAddContainerMetrics(t *testing.T) {
|
|||
func TestAddAppMetrics(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
metrics *Metrics
|
||||
metrics *metrics
|
||||
check func(*testutil.Accumulator) []bool
|
||||
}{
|
||||
{
|
||||
name: "tags are optional",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
metrics: &metrics{
|
||||
Datapoints: []dataPoint{
|
||||
{
|
||||
Name: "dcos.metrics.module.container_throttled_bytes_per_sec",
|
||||
Unit: "",
|
||||
|
|
@ -308,8 +308,8 @@ func TestAddAppMetrics(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "dimensions are tagged",
|
||||
metrics: &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
metrics: &metrics{
|
||||
Datapoints: []dataPoint{
|
||||
{
|
||||
Name: "dcos.metrics.module.container_throttled_bytes_per_sec",
|
||||
Unit: "",
|
||||
|
|
@ -363,10 +363,10 @@ func TestGatherFilterNode(t *testing.T) {
|
|||
name: "cluster without nodes has no metrics",
|
||||
client: &mockClient{
|
||||
SetTokenF: func() {},
|
||||
GetSummaryF: func() (*Summary, error) {
|
||||
return &Summary{
|
||||
GetSummaryF: func() (*summary, error) {
|
||||
return &summary{
|
||||
Cluster: "a",
|
||||
Slaves: []Slave{},
|
||||
Slaves: []slave{},
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
|
|
@ -381,21 +381,21 @@ func TestGatherFilterNode(t *testing.T) {
|
|||
nodeInclude: []string{"x"},
|
||||
client: &mockClient{
|
||||
SetTokenF: func() {},
|
||||
GetSummaryF: func() (*Summary, error) {
|
||||
return &Summary{
|
||||
GetSummaryF: func() (*summary, error) {
|
||||
return &summary{
|
||||
Cluster: "a",
|
||||
Slaves: []Slave{
|
||||
Slaves: []slave{
|
||||
{ID: "x"},
|
||||
{ID: "y"},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
GetContainersF: func() ([]Container, error) {
|
||||
return []Container{}, nil
|
||||
GetContainersF: func() ([]container, error) {
|
||||
return []container{}, nil
|
||||
},
|
||||
GetNodeMetricsF: func() (*Metrics, error) {
|
||||
return &Metrics{
|
||||
Datapoints: []DataPoint{
|
||||
GetNodeMetricsF: func() (*metrics, error) {
|
||||
return &metrics{
|
||||
Datapoints: []dataPoint{
|
||||
{
|
||||
Name: "value",
|
||||
Value: 42.0,
|
||||
|
|
|
|||
|
|
@ -24,17 +24,6 @@ import (
|
|||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
type ResponseMetrics struct {
|
||||
Metrics []Metric `json:"metrics"`
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
FullName string `json:"full_name"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Fields map[string]interface{} `json:"metric"`
|
||||
}
|
||||
|
||||
type GrayLog struct {
|
||||
Servers []string `toml:"servers"`
|
||||
Metrics []string `toml:"metrics"`
|
||||
|
|
@ -46,6 +35,25 @@ type GrayLog struct {
|
|||
client HTTPClient
|
||||
}
|
||||
|
||||
type responseMetrics struct {
|
||||
Metrics []metric `json:"metrics"`
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
FullName string `json:"full_name"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Fields map[string]interface{} `json:"metric"`
|
||||
}
|
||||
|
||||
type messageBody struct {
|
||||
Metrics []string `json:"metrics"`
|
||||
}
|
||||
|
||||
type realHTTPClient struct {
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
type HTTPClient interface {
|
||||
// Returns the result of an http request
|
||||
//
|
||||
|
|
@ -61,23 +69,15 @@ type HTTPClient interface {
|
|||
HTTPClient() *http.Client
|
||||
}
|
||||
|
||||
type Messagebody struct {
|
||||
Metrics []string `json:"metrics"`
|
||||
}
|
||||
|
||||
type RealHTTPClient struct {
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func (c *RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
func (c *realHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
||||
return c.client.Do(req)
|
||||
}
|
||||
|
||||
func (c *RealHTTPClient) SetHTTPClient(client *http.Client) {
|
||||
func (c *realHTTPClient) SetHTTPClient(client *http.Client) {
|
||||
c.client = client
|
||||
}
|
||||
|
||||
func (c *RealHTTPClient) HTTPClient() *http.Client {
|
||||
func (c *realHTTPClient) HTTPClient() *http.Client {
|
||||
return c.client
|
||||
}
|
||||
|
||||
|
|
@ -145,7 +145,7 @@ func (h *GrayLog) gatherServer(
|
|||
if err != nil {
|
||||
return fmt.Errorf("unable to parse address host %q: %w", requestURL.Host, err)
|
||||
}
|
||||
var dat ResponseMetrics
|
||||
var dat responseMetrics
|
||||
if err := json.Unmarshal([]byte(resp), &dat); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -216,7 +216,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) {
|
|||
headers["X-Requested-By"] = "Telegraf"
|
||||
|
||||
if strings.Contains(requestURL.String(), "multiple") {
|
||||
m := &Messagebody{Metrics: h.Metrics}
|
||||
m := &messageBody{Metrics: h.Metrics}
|
||||
httpBody, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return "", -1, fmt.Errorf("invalid list of Metrics %s", h.Metrics)
|
||||
|
|
@ -262,7 +262,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) {
|
|||
func init() {
|
||||
inputs.Add("graylog", func() telegraf.Input {
|
||||
return &GrayLog{
|
||||
client: &RealHTTPClient{},
|
||||
client: &realHTTPClient{},
|
||||
Timeout: config.Duration(5 * time.Second),
|
||||
}
|
||||
})
|
||||
|
|
|
|||
|
|
@ -82,9 +82,9 @@ type ConsumerGroupCreator interface {
|
|||
Create(brokers []string, group string, cfg *sarama.Config) (ConsumerGroup, error)
|
||||
}
|
||||
|
||||
type SaramaCreator struct{}
|
||||
type saramaCreator struct{}
|
||||
|
||||
func (*SaramaCreator) Create(brokers []string, group string, cfg *sarama.Config) (ConsumerGroup, error) {
|
||||
func (*saramaCreator) Create(brokers []string, group string, cfg *sarama.Config) (ConsumerGroup, error) {
|
||||
return sarama.NewConsumerGroup(brokers, group, cfg)
|
||||
}
|
||||
|
||||
|
|
@ -155,7 +155,7 @@ func (k *KafkaConsumer) Init() error {
|
|||
}
|
||||
|
||||
if k.ConsumerCreator == nil {
|
||||
k.ConsumerCreator = &SaramaCreator{}
|
||||
k.ConsumerCreator = &saramaCreator{}
|
||||
}
|
||||
|
||||
cfg.Net.ResolveCanonicalBootstrapServers = k.ResolveCanonicalBootstrapServersOnly
|
||||
|
|
@ -385,26 +385,25 @@ func (k *KafkaConsumer) Stop() {
|
|||
k.wg.Wait()
|
||||
}
|
||||
|
||||
// Message is an aggregate type binding the Kafka message and the session so
|
||||
// that offsets can be updated.
|
||||
type Message struct {
|
||||
// message is an aggregate type binding the Kafka message and the session so that offsets can be updated.
|
||||
type message struct {
|
||||
message *sarama.ConsumerMessage
|
||||
session sarama.ConsumerGroupSession
|
||||
}
|
||||
|
||||
func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser telegraf.Parser, log telegraf.Logger) *ConsumerGroupHandler {
|
||||
handler := &ConsumerGroupHandler{
|
||||
func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser telegraf.Parser, log telegraf.Logger) *consumerGroupHandler {
|
||||
handler := &consumerGroupHandler{
|
||||
acc: acc.WithTracking(maxUndelivered),
|
||||
sem: make(chan empty, maxUndelivered),
|
||||
undelivered: make(map[telegraf.TrackingID]Message, maxUndelivered),
|
||||
undelivered: make(map[telegraf.TrackingID]message, maxUndelivered),
|
||||
parser: parser,
|
||||
log: log,
|
||||
}
|
||||
return handler
|
||||
}
|
||||
|
||||
// ConsumerGroupHandler is a sarama.ConsumerGroupHandler implementation.
|
||||
type ConsumerGroupHandler struct {
|
||||
// consumerGroupHandler is a sarama.ConsumerGroupHandler implementation.
|
||||
type consumerGroupHandler struct {
|
||||
MaxMessageLen int
|
||||
TopicTag string
|
||||
MsgHeadersToTags map[string]bool
|
||||
|
|
@ -418,15 +417,15 @@ type ConsumerGroupHandler struct {
|
|||
cancel context.CancelFunc
|
||||
|
||||
mu sync.Mutex
|
||||
undelivered map[telegraf.TrackingID]Message
|
||||
undelivered map[telegraf.TrackingID]message
|
||||
|
||||
log telegraf.Logger
|
||||
}
|
||||
|
||||
// Setup is called once when a new session is opened. It setups up the handler
|
||||
// and begins processing delivered messages.
|
||||
func (h *ConsumerGroupHandler) Setup(sarama.ConsumerGroupSession) error {
|
||||
h.undelivered = make(map[telegraf.TrackingID]Message)
|
||||
func (h *consumerGroupHandler) Setup(sarama.ConsumerGroupSession) error {
|
||||
h.undelivered = make(map[telegraf.TrackingID]message)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
h.cancel = cancel
|
||||
|
|
@ -440,7 +439,7 @@ func (h *ConsumerGroupHandler) Setup(sarama.ConsumerGroupSession) error {
|
|||
}
|
||||
|
||||
// Run processes any delivered metrics during the lifetime of the session.
|
||||
func (h *ConsumerGroupHandler) run(ctx context.Context) {
|
||||
func (h *consumerGroupHandler) run(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
|
@ -451,7 +450,7 @@ func (h *ConsumerGroupHandler) run(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
|
||||
func (h *ConsumerGroupHandler) onDelivery(track telegraf.DeliveryInfo) {
|
||||
func (h *consumerGroupHandler) onDelivery(track telegraf.DeliveryInfo) {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
|
|
@ -470,7 +469,7 @@ func (h *ConsumerGroupHandler) onDelivery(track telegraf.DeliveryInfo) {
|
|||
}
|
||||
|
||||
// Reserve blocks until there is an available slot for a new message.
|
||||
func (h *ConsumerGroupHandler) Reserve(ctx context.Context) error {
|
||||
func (h *consumerGroupHandler) Reserve(ctx context.Context) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
|
|
@ -479,13 +478,13 @@ func (h *ConsumerGroupHandler) Reserve(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
|
||||
func (h *ConsumerGroupHandler) release() {
|
||||
func (h *consumerGroupHandler) release() {
|
||||
<-h.sem
|
||||
}
|
||||
|
||||
// Handle processes a message and if successful saves it to be acknowledged
|
||||
// after delivery.
|
||||
func (h *ConsumerGroupHandler) Handle(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error {
|
||||
func (h *consumerGroupHandler) Handle(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error {
|
||||
if h.MaxMessageLen != 0 && len(msg.Value) > h.MaxMessageLen {
|
||||
session.MarkMessage(msg, "")
|
||||
h.release()
|
||||
|
|
@ -547,14 +546,14 @@ func (h *ConsumerGroupHandler) Handle(session sarama.ConsumerGroupSession, msg *
|
|||
|
||||
h.mu.Lock()
|
||||
id := h.acc.AddTrackingMetricGroup(metrics)
|
||||
h.undelivered[id] = Message{session: session, message: msg}
|
||||
h.undelivered[id] = message{session: session, message: msg}
|
||||
h.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConsumeClaim is called once each claim in a goroutine and must be
|
||||
// thread-safe. Should run until the claim is closed.
|
||||
func (h *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
|
||||
func (h *consumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
|
||||
ctx := session.Context()
|
||||
|
||||
for {
|
||||
|
|
@ -580,7 +579,7 @@ func (h *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession,
|
|||
|
||||
// Cleanup stops the internal goroutine and is called after all ConsumeClaim
|
||||
// functions have completed.
|
||||
func (h *ConsumerGroupHandler) Cleanup(sarama.ConsumerGroupSession) error {
|
||||
func (h *consumerGroupHandler) Cleanup(sarama.ConsumerGroupSession) error {
|
||||
h.cancel()
|
||||
h.wg.Wait()
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -27,6 +27,10 @@ import (
|
|||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
const (
|
||||
defaultServiceAccountPath = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
)
|
||||
|
||||
// Kubernetes represents the config object for the plugin
|
||||
type Kubernetes struct {
|
||||
URL string `toml:"url"`
|
||||
|
|
@ -44,10 +48,6 @@ type Kubernetes struct {
|
|||
httpClient *http.Client
|
||||
}
|
||||
|
||||
const (
|
||||
defaultServiceAccountPath = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
)
|
||||
|
||||
func init() {
|
||||
inputs.Add("kubernetes", func() telegraf.Input {
|
||||
return &Kubernetes{
|
||||
|
|
@ -156,7 +156,7 @@ func getNodeAddress(addresses []v1.NodeAddress) string {
|
|||
}
|
||||
|
||||
func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error {
|
||||
summaryMetrics := &SummaryMetrics{}
|
||||
summaryMetrics := &summaryMetrics{}
|
||||
err := k.LoadJSON(baseURL+"/stats/summary", summaryMetrics)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -172,7 +172,7 @@ func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) err
|
|||
return nil
|
||||
}
|
||||
|
||||
func buildSystemContainerMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) {
|
||||
func buildSystemContainerMetrics(summaryMetrics *summaryMetrics, acc telegraf.Accumulator) {
|
||||
for _, container := range summaryMetrics.Node.SystemContainers {
|
||||
tags := map[string]string{
|
||||
"node_name": summaryMetrics.Node.NodeName,
|
||||
|
|
@ -194,7 +194,7 @@ func buildSystemContainerMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Ac
|
|||
}
|
||||
}
|
||||
|
||||
func buildNodeMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator, metricName string) {
|
||||
func buildNodeMetrics(summaryMetrics *summaryMetrics, acc telegraf.Accumulator, metricName string) {
|
||||
tags := map[string]string{
|
||||
"node_name": summaryMetrics.Node.NodeName,
|
||||
}
|
||||
|
|
@ -283,7 +283,7 @@ func (k *Kubernetes) LoadJSON(url string, v interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []Item, labelFilter filter.Filter, acc telegraf.Accumulator) {
|
||||
func buildPodMetrics(summaryMetrics *summaryMetrics, podInfo []Item, labelFilter filter.Filter, acc telegraf.Accumulator) {
|
||||
for _, pod := range summaryMetrics.Pods {
|
||||
podLabels := make(map[string]string)
|
||||
containerImages := make(map[string]string)
|
||||
|
|
|
|||
|
|
@ -2,63 +2,63 @@ package kubernetes
|
|||
|
||||
import "time"
|
||||
|
||||
// SummaryMetrics represents all the summary data about a particular node retrieved from a kubelet
|
||||
type SummaryMetrics struct {
|
||||
Node NodeMetrics `json:"node"`
|
||||
Pods []PodMetrics `json:"pods"`
|
||||
// summaryMetrics represents all the summary data about a particular node retrieved from a kubelet
|
||||
type summaryMetrics struct {
|
||||
Node nodeMetrics `json:"node"`
|
||||
Pods []podMetrics `json:"pods"`
|
||||
}
|
||||
|
||||
// NodeMetrics represents detailed information about a node
|
||||
type NodeMetrics struct {
|
||||
// nodeMetrics represents detailed information about a node
|
||||
type nodeMetrics struct {
|
||||
NodeName string `json:"nodeName"`
|
||||
SystemContainers []ContainerMetrics `json:"systemContainers"`
|
||||
SystemContainers []containerMetrics `json:"systemContainers"`
|
||||
StartTime time.Time `json:"startTime"`
|
||||
CPU CPUMetrics `json:"cpu"`
|
||||
Memory MemoryMetrics `json:"memory"`
|
||||
Network NetworkMetrics `json:"network"`
|
||||
FileSystem FileSystemMetrics `json:"fs"`
|
||||
Runtime RuntimeMetrics `json:"runtime"`
|
||||
CPU cpuMetrics `json:"cpu"`
|
||||
Memory memoryMetrics `json:"memory"`
|
||||
Network networkMetrics `json:"network"`
|
||||
FileSystem fileSystemMetrics `json:"fs"`
|
||||
Runtime runtimeMetrics `json:"runtime"`
|
||||
}
|
||||
|
||||
// ContainerMetrics represents the metric data collect about a container from the kubelet
|
||||
type ContainerMetrics struct {
|
||||
// containerMetrics represents the metric data collect about a container from the kubelet
|
||||
type containerMetrics struct {
|
||||
Name string `json:"name"`
|
||||
StartTime time.Time `json:"startTime"`
|
||||
CPU CPUMetrics `json:"cpu"`
|
||||
Memory MemoryMetrics `json:"memory"`
|
||||
RootFS FileSystemMetrics `json:"rootfs"`
|
||||
LogsFS FileSystemMetrics `json:"logs"`
|
||||
CPU cpuMetrics `json:"cpu"`
|
||||
Memory memoryMetrics `json:"memory"`
|
||||
RootFS fileSystemMetrics `json:"rootfs"`
|
||||
LogsFS fileSystemMetrics `json:"logs"`
|
||||
}
|
||||
|
||||
// RuntimeMetrics contains metric data on the runtime of the system
|
||||
type RuntimeMetrics struct {
|
||||
ImageFileSystem FileSystemMetrics `json:"imageFs"`
|
||||
// runtimeMetrics contains metric data on the runtime of the system
|
||||
type runtimeMetrics struct {
|
||||
ImageFileSystem fileSystemMetrics `json:"imageFs"`
|
||||
}
|
||||
|
||||
// CPUMetrics represents the cpu usage data of a pod or node
|
||||
type CPUMetrics struct {
|
||||
// cpuMetrics represents the cpu usage data of a pod or node
|
||||
type cpuMetrics struct {
|
||||
Time time.Time `json:"time"`
|
||||
UsageNanoCores int64 `json:"usageNanoCores"`
|
||||
UsageCoreNanoSeconds int64 `json:"usageCoreNanoSeconds"`
|
||||
}
|
||||
|
||||
// PodMetrics contains metric data on a given pod
|
||||
type PodMetrics struct {
|
||||
PodRef PodReference `json:"podRef"`
|
||||
// podMetrics contains metric data on a given pod
|
||||
type podMetrics struct {
|
||||
PodRef podReference `json:"podRef"`
|
||||
StartTime *time.Time `json:"startTime"`
|
||||
Containers []ContainerMetrics `json:"containers"`
|
||||
Network NetworkMetrics `json:"network"`
|
||||
Volumes []VolumeMetrics `json:"volume"`
|
||||
Containers []containerMetrics `json:"containers"`
|
||||
Network networkMetrics `json:"network"`
|
||||
Volumes []volumeMetrics `json:"volume"`
|
||||
}
|
||||
|
||||
// PodReference is how a pod is identified
|
||||
type PodReference struct {
|
||||
// podReference is how a pod is identified
|
||||
type podReference struct {
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
}
|
||||
|
||||
// MemoryMetrics represents the memory metrics for a pod or node
|
||||
type MemoryMetrics struct {
|
||||
// memoryMetrics represents the memory metrics for a pod or node
|
||||
type memoryMetrics struct {
|
||||
Time time.Time `json:"time"`
|
||||
AvailableBytes int64 `json:"availableBytes"`
|
||||
UsageBytes int64 `json:"usageBytes"`
|
||||
|
|
@ -68,15 +68,15 @@ type MemoryMetrics struct {
|
|||
MajorPageFaults int64 `json:"majorPageFaults"`
|
||||
}
|
||||
|
||||
// FileSystemMetrics represents disk usage metrics for a pod or node
|
||||
type FileSystemMetrics struct {
|
||||
// fileSystemMetrics represents disk usage metrics for a pod or node
|
||||
type fileSystemMetrics struct {
|
||||
AvailableBytes int64 `json:"availableBytes"`
|
||||
CapacityBytes int64 `json:"capacityBytes"`
|
||||
UsedBytes int64 `json:"usedBytes"`
|
||||
}
|
||||
|
||||
// NetworkMetrics represents network usage data for a pod or node
|
||||
type NetworkMetrics struct {
|
||||
// networkMetrics represents network usage data for a pod or node
|
||||
type networkMetrics struct {
|
||||
Time time.Time `json:"time"`
|
||||
RXBytes int64 `json:"rxBytes"`
|
||||
RXErrors int64 `json:"rxErrors"`
|
||||
|
|
@ -84,8 +84,8 @@ type NetworkMetrics struct {
|
|||
TXErrors int64 `json:"txErrors"`
|
||||
}
|
||||
|
||||
// VolumeMetrics represents the disk usage data for a given volume
|
||||
type VolumeMetrics struct {
|
||||
// volumeMetrics represents the disk usage data for a given volume
|
||||
type volumeMetrics struct {
|
||||
Name string `json:"name"`
|
||||
AvailableBytes int64 `json:"availableBytes"`
|
||||
CapacityBytes int64 `json:"capacityBytes"`
|
||||
|
|
|
|||
|
|
@ -23,6 +23,13 @@ import (
|
|||
//go:embed sample.conf
|
||||
var sampleConfig string
|
||||
|
||||
const (
|
||||
jvmStatsNode = "/_node/stats/jvm"
|
||||
processStatsNode = "/_node/stats/process"
|
||||
pipelinesStatsNode = "/_node/stats/pipelines"
|
||||
pipelineStatsNode = "/_node/stats/pipeline"
|
||||
)
|
||||
|
||||
type Logstash struct {
|
||||
URL string `toml:"url"`
|
||||
|
||||
|
|
@ -39,20 +46,7 @@ type Logstash struct {
|
|||
httpconfig.HTTPClientConfig
|
||||
}
|
||||
|
||||
// NewLogstash create an instance of the plugin with default settings
|
||||
func NewLogstash() *Logstash {
|
||||
return &Logstash{
|
||||
URL: "http://127.0.0.1:9600",
|
||||
SinglePipeline: false,
|
||||
Collect: []string{"pipelines", "process", "jvm"},
|
||||
Headers: make(map[string]string),
|
||||
HTTPClientConfig: httpconfig.HTTPClientConfig{
|
||||
Timeout: config.Duration(5 * time.Second),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type ProcessStats struct {
|
||||
type processStats struct {
|
||||
ID string `json:"id"`
|
||||
Process interface{} `json:"process"`
|
||||
Name string `json:"name"`
|
||||
|
|
@ -60,7 +54,7 @@ type ProcessStats struct {
|
|||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type JVMStats struct {
|
||||
type jvmStats struct {
|
||||
ID string `json:"id"`
|
||||
JVM interface{} `json:"jvm"`
|
||||
Name string `json:"name"`
|
||||
|
|
@ -68,30 +62,30 @@ type JVMStats struct {
|
|||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type PipelinesStats struct {
|
||||
type pipelinesStats struct {
|
||||
ID string `json:"id"`
|
||||
Pipelines map[string]Pipeline `json:"pipelines"`
|
||||
Pipelines map[string]pipeline `json:"pipelines"`
|
||||
Name string `json:"name"`
|
||||
Host string `json:"host"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type PipelineStats struct {
|
||||
type pipelineStats struct {
|
||||
ID string `json:"id"`
|
||||
Pipeline Pipeline `json:"pipeline"`
|
||||
Pipeline pipeline `json:"pipeline"`
|
||||
Name string `json:"name"`
|
||||
Host string `json:"host"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type Pipeline struct {
|
||||
type pipeline struct {
|
||||
Events interface{} `json:"events"`
|
||||
Plugins PipelinePlugins `json:"plugins"`
|
||||
Plugins pipelinePlugins `json:"plugins"`
|
||||
Reloads interface{} `json:"reloads"`
|
||||
Queue PipelineQueue `json:"queue"`
|
||||
Queue pipelineQueue `json:"queue"`
|
||||
}
|
||||
|
||||
type Plugin struct {
|
||||
type plugin struct {
|
||||
ID string `json:"id"`
|
||||
Events interface{} `json:"events"`
|
||||
Name string `json:"name"`
|
||||
|
|
@ -100,13 +94,13 @@ type Plugin struct {
|
|||
Documents map[string]interface{} `json:"documents"`
|
||||
}
|
||||
|
||||
type PipelinePlugins struct {
|
||||
Inputs []Plugin `json:"inputs"`
|
||||
Filters []Plugin `json:"filters"`
|
||||
Outputs []Plugin `json:"outputs"`
|
||||
type pipelinePlugins struct {
|
||||
Inputs []plugin `json:"inputs"`
|
||||
Filters []plugin `json:"filters"`
|
||||
Outputs []plugin `json:"outputs"`
|
||||
}
|
||||
|
||||
type PipelineQueue struct {
|
||||
type pipelineQueue struct {
|
||||
Events float64 `json:"events"`
|
||||
EventsCount *float64 `json:"events_count"`
|
||||
Type string `json:"type"`
|
||||
|
|
@ -116,11 +110,6 @@ type PipelineQueue struct {
|
|||
MaxQueueSizeInBytes *float64 `json:"max_queue_size_in_bytes"`
|
||||
}
|
||||
|
||||
const jvmStats = "/_node/stats/jvm"
|
||||
const processStats = "/_node/stats/process"
|
||||
const pipelinesStats = "/_node/stats/pipelines"
|
||||
const pipelineStats = "/_node/stats/pipeline"
|
||||
|
||||
func (*Logstash) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
|
@ -180,7 +169,7 @@ func (logstash *Logstash) gatherJSONData(address string, value interface{}) erro
|
|||
|
||||
// gatherJVMStats gather the JVM metrics and add results to the accumulator
|
||||
func (logstash *Logstash) gatherJVMStats(address string, accumulator telegraf.Accumulator) error {
|
||||
jvmStats := &JVMStats{}
|
||||
jvmStats := &jvmStats{}
|
||||
|
||||
err := logstash.gatherJSONData(address, jvmStats)
|
||||
if err != nil {
|
||||
|
|
@ -206,7 +195,7 @@ func (logstash *Logstash) gatherJVMStats(address string, accumulator telegraf.Ac
|
|||
|
||||
// gatherJVMStats gather the Process metrics and add results to the accumulator
|
||||
func (logstash *Logstash) gatherProcessStats(address string, accumulator telegraf.Accumulator) error {
|
||||
processStats := &ProcessStats{}
|
||||
processStats := &processStats{}
|
||||
|
||||
err := logstash.gatherJSONData(address, processStats)
|
||||
if err != nil {
|
||||
|
|
@ -232,7 +221,7 @@ func (logstash *Logstash) gatherProcessStats(address string, accumulator telegra
|
|||
|
||||
// gatherPluginsStats go through a list of plugins and add their metrics to the accumulator
|
||||
func (logstash *Logstash) gatherPluginsStats(
|
||||
plugins []Plugin,
|
||||
plugins []plugin,
|
||||
pluginType string,
|
||||
tags map[string]string,
|
||||
accumulator telegraf.Accumulator,
|
||||
|
|
@ -318,7 +307,7 @@ func (logstash *Logstash) gatherPluginsStats(
|
|||
return nil
|
||||
}
|
||||
|
||||
func (logstash *Logstash) gatherQueueStats(queue PipelineQueue, tags map[string]string, acc telegraf.Accumulator) error {
|
||||
func (logstash *Logstash) gatherQueueStats(queue pipelineQueue, tags map[string]string, acc telegraf.Accumulator) error {
|
||||
queueTags := map[string]string{
|
||||
"queue_type": queue.Type,
|
||||
}
|
||||
|
|
@ -365,7 +354,7 @@ func (logstash *Logstash) gatherQueueStats(queue PipelineQueue, tags map[string]
|
|||
|
||||
// gatherJVMStats gather the Pipeline metrics and add results to the accumulator (for Logstash < 6)
|
||||
func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegraf.Accumulator) error {
|
||||
pipelineStats := &PipelineStats{}
|
||||
pipelineStats := &pipelineStats{}
|
||||
|
||||
err := logstash.gatherJSONData(address, pipelineStats)
|
||||
if err != nil {
|
||||
|
|
@ -409,7 +398,7 @@ func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegr
|
|||
|
||||
// gatherJVMStats gather the Pipelines metrics and add results to the accumulator (for Logstash >= 6)
|
||||
func (logstash *Logstash) gatherPipelinesStats(address string, accumulator telegraf.Accumulator) error {
|
||||
pipelinesStats := &PipelinesStats{}
|
||||
pipelinesStats := &pipelinesStats{}
|
||||
|
||||
err := logstash.gatherJSONData(address, pipelinesStats)
|
||||
if err != nil {
|
||||
|
|
@ -470,7 +459,7 @@ func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
if choice.Contains("jvm", logstash.Collect) {
|
||||
jvmURL, err := url.Parse(logstash.URL + jvmStats)
|
||||
jvmURL, err := url.Parse(logstash.URL + jvmStatsNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -480,7 +469,7 @@ func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error {
|
|||
}
|
||||
|
||||
if choice.Contains("process", logstash.Collect) {
|
||||
processURL, err := url.Parse(logstash.URL + processStats)
|
||||
processURL, err := url.Parse(logstash.URL + processStatsNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -491,7 +480,7 @@ func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error {
|
|||
|
||||
if choice.Contains("pipelines", logstash.Collect) {
|
||||
if logstash.SinglePipeline {
|
||||
pipelineURL, err := url.Parse(logstash.URL + pipelineStats)
|
||||
pipelineURL, err := url.Parse(logstash.URL + pipelineStatsNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -499,7 +488,7 @@ func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
} else {
|
||||
pipelinesURL, err := url.Parse(logstash.URL + pipelinesStats)
|
||||
pipelinesURL, err := url.Parse(logstash.URL + pipelinesStatsNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -521,6 +510,18 @@ func (logstash *Logstash) Stop() {
|
|||
// init registers this plugin instance
|
||||
func init() {
|
||||
inputs.Add("logstash", func() telegraf.Input {
|
||||
return NewLogstash()
|
||||
return newLogstash()
|
||||
})
|
||||
}
|
||||
|
||||
// newLogstash create an instance of the plugin with default settings
|
||||
func newLogstash() *Logstash {
|
||||
return &Logstash{
|
||||
URL: "http://127.0.0.1:9600",
|
||||
Collect: []string{"pipelines", "process", "jvm"},
|
||||
Headers: make(map[string]string),
|
||||
HTTPClientConfig: httpconfig.HTTPClientConfig{
|
||||
Timeout: config.Duration(5 * time.Second),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var logstashTest = NewLogstash()
|
||||
var logstashTest = newLogstash()
|
||||
|
||||
var (
|
||||
logstash5accPipelineStats testutil.Accumulator
|
||||
|
|
@ -44,7 +44,7 @@ func Test_Logstash5GatherProcessStats(test *testing.T) {
|
|||
logstashTest.client = client
|
||||
}
|
||||
|
||||
err = logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash5accProcessStats)
|
||||
err = logstashTest.gatherProcessStats(logstashTest.URL+processStatsNode, &logstash5accProcessStats)
|
||||
require.NoError(test, err, "Can't gather Process stats")
|
||||
|
||||
logstash5accProcessStats.AssertContainsTaggedFields(
|
||||
|
|
@ -89,7 +89,7 @@ func Test_Logstash6GatherProcessStats(test *testing.T) {
|
|||
logstashTest.client = client
|
||||
}
|
||||
|
||||
err = logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash6accProcessStats)
|
||||
err = logstashTest.gatherProcessStats(logstashTest.URL+processStatsNode, &logstash6accProcessStats)
|
||||
require.NoError(test, err, "Can't gather Process stats")
|
||||
|
||||
logstash6accProcessStats.AssertContainsTaggedFields(
|
||||
|
|
@ -135,7 +135,7 @@ func Test_Logstash5GatherPipelineStats(test *testing.T) {
|
|||
logstashTest.client = client
|
||||
}
|
||||
|
||||
err = logstashTest.gatherPipelineStats(logstashTest.URL+pipelineStats, &logstash5accPipelineStats)
|
||||
err = logstashTest.gatherPipelineStats(logstashTest.URL+pipelineStatsNode, &logstash5accPipelineStats)
|
||||
require.NoError(test, err, "Can't gather Pipeline stats")
|
||||
|
||||
logstash5accPipelineStats.AssertContainsTaggedFields(
|
||||
|
|
@ -233,7 +233,7 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) {
|
|||
logstashTest.client = client
|
||||
}
|
||||
|
||||
err = logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash6accPipelinesStats)
|
||||
err = logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStatsNode, &logstash6accPipelinesStats)
|
||||
require.NoError(test, err, "Can't gather Pipeline stats")
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
|
|
@ -575,7 +575,7 @@ func Test_Logstash5GatherJVMStats(test *testing.T) {
|
|||
logstashTest.client = client
|
||||
}
|
||||
|
||||
err = logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash5accJVMStats)
|
||||
err = logstashTest.gatherJVMStats(logstashTest.URL+jvmStatsNode, &logstash5accJVMStats)
|
||||
require.NoError(test, err, "Can't gather JVM stats")
|
||||
|
||||
logstash5accJVMStats.AssertContainsTaggedFields(
|
||||
|
|
@ -639,7 +639,7 @@ func Test_Logstash6GatherJVMStats(test *testing.T) {
|
|||
logstashTest.client = client
|
||||
}
|
||||
|
||||
err = logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash6accJVMStats)
|
||||
err = logstashTest.gatherJVMStats(logstashTest.URL+jvmStatsNode, &logstash6accJVMStats)
|
||||
require.NoError(test, err, "Can't gather JVM stats")
|
||||
|
||||
logstash6accJVMStats.AssertContainsTaggedFields(
|
||||
|
|
@ -710,7 +710,7 @@ func Test_Logstash7GatherPipelinesQueueStats(test *testing.T) {
|
|||
logstashTest.client = client
|
||||
}
|
||||
|
||||
if err := logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash7accPipelinesStats); err != nil {
|
||||
if err := logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStatsNode, &logstash7accPipelinesStats); err != nil {
|
||||
test.Logf("Can't gather Pipeline stats")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ const (
|
|||
|
||||
var mailchimpDatacenter = regexp.MustCompile("[a-z]+[0-9]+$")
|
||||
|
||||
type ChimpAPI struct {
|
||||
type chimpAPI struct {
|
||||
Transport http.RoundTripper
|
||||
debug bool
|
||||
|
||||
|
|
@ -31,14 +31,14 @@ type ChimpAPI struct {
|
|||
log telegraf.Logger
|
||||
}
|
||||
|
||||
type ReportsParams struct {
|
||||
type reportsParams struct {
|
||||
Count string
|
||||
Offset string
|
||||
SinceSendTime string
|
||||
BeforeSendTime string
|
||||
}
|
||||
|
||||
func (p *ReportsParams) String() string {
|
||||
func (p *reportsParams) String() string {
|
||||
v := url.Values{}
|
||||
if p.Count != "" {
|
||||
v.Set("count", p.Count)
|
||||
|
|
@ -55,15 +55,15 @@ func (p *ReportsParams) String() string {
|
|||
return v.Encode()
|
||||
}
|
||||
|
||||
func NewChimpAPI(apiKey string, log telegraf.Logger) *ChimpAPI {
|
||||
func NewChimpAPI(apiKey string, log telegraf.Logger) *chimpAPI {
|
||||
u := &url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = mailchimpDatacenter.FindString(apiKey) + ".api.mailchimp.com"
|
||||
u.User = url.UserPassword("", apiKey)
|
||||
return &ChimpAPI{url: u, log: log}
|
||||
return &chimpAPI{url: u, log: log}
|
||||
}
|
||||
|
||||
type APIError struct {
|
||||
type apiError struct {
|
||||
Status int `json:"status"`
|
||||
Type string `json:"type"`
|
||||
Title string `json:"title"`
|
||||
|
|
@ -71,12 +71,12 @@ type APIError struct {
|
|||
Instance string `json:"instance"`
|
||||
}
|
||||
|
||||
func (e APIError) Error() string {
|
||||
func (e apiError) Error() string {
|
||||
return fmt.Sprintf("ERROR %v: %v. See %v", e.Status, e.Title, e.Type)
|
||||
}
|
||||
|
||||
func chimpErrorCheck(body []byte) error {
|
||||
var e APIError
|
||||
var e apiError
|
||||
if err := json.Unmarshal(body, &e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -86,12 +86,12 @@ func chimpErrorCheck(body []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) {
|
||||
func (a *chimpAPI) GetReports(params reportsParams) (reportsResponse, error) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
a.url.Path = reportsEndpoint
|
||||
|
||||
var response ReportsResponse
|
||||
var response reportsResponse
|
||||
rawjson, err := a.runChimp(params)
|
||||
if err != nil {
|
||||
return response, err
|
||||
|
|
@ -105,13 +105,13 @@ func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) {
|
|||
return response, nil
|
||||
}
|
||||
|
||||
func (a *ChimpAPI) GetReport(campaignID string) (Report, error) {
|
||||
func (a *chimpAPI) GetReport(campaignID string) (report, error) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
a.url.Path = fmt.Sprintf(reportsEndpointCampaign, campaignID)
|
||||
|
||||
var response Report
|
||||
rawjson, err := a.runChimp(ReportsParams{})
|
||||
var response report
|
||||
rawjson, err := a.runChimp(reportsParams{})
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
|
@ -124,7 +124,7 @@ func (a *ChimpAPI) GetReport(campaignID string) (Report, error) {
|
|||
return response, nil
|
||||
}
|
||||
|
||||
func (a *ChimpAPI) runChimp(params ReportsParams) ([]byte, error) {
|
||||
func (a *chimpAPI) runChimp(params reportsParams) ([]byte, error) {
|
||||
client := &http.Client{
|
||||
Transport: a.Transport,
|
||||
Timeout: 4 * time.Second,
|
||||
|
|
@ -167,12 +167,12 @@ func (a *ChimpAPI) runChimp(params ReportsParams) ([]byte, error) {
|
|||
return body, nil
|
||||
}
|
||||
|
||||
type ReportsResponse struct {
|
||||
Reports []Report `json:"reports"`
|
||||
type reportsResponse struct {
|
||||
Reports []report `json:"reports"`
|
||||
TotalItems int `json:"total_items"`
|
||||
}
|
||||
|
||||
type Report struct {
|
||||
type report struct {
|
||||
ID string `json:"id"`
|
||||
CampaignTitle string `json:"campaign_title"`
|
||||
Type string `json:"type"`
|
||||
|
|
@ -181,35 +181,35 @@ type Report struct {
|
|||
Unsubscribed int `json:"unsubscribed"`
|
||||
SendTime string `json:"send_time"`
|
||||
|
||||
TimeSeries []TimeSeries
|
||||
Bounces Bounces `json:"bounces"`
|
||||
Forwards Forwards `json:"forwards"`
|
||||
Opens Opens `json:"opens"`
|
||||
Clicks Clicks `json:"clicks"`
|
||||
FacebookLikes FacebookLikes `json:"facebook_likes"`
|
||||
IndustryStats IndustryStats `json:"industry_stats"`
|
||||
ListStats ListStats `json:"list_stats"`
|
||||
TimeSeries []timeSeries
|
||||
Bounces bounces `json:"bounces"`
|
||||
Forwards forwards `json:"forwards"`
|
||||
Opens opens `json:"opens"`
|
||||
Clicks clicks `json:"clicks"`
|
||||
FacebookLikes facebookLikes `json:"facebook_likes"`
|
||||
IndustryStats industryStats `json:"industry_stats"`
|
||||
ListStats listStats `json:"list_stats"`
|
||||
}
|
||||
|
||||
type Bounces struct {
|
||||
type bounces struct {
|
||||
HardBounces int `json:"hard_bounces"`
|
||||
SoftBounces int `json:"soft_bounces"`
|
||||
SyntaxErrors int `json:"syntax_errors"`
|
||||
}
|
||||
|
||||
type Forwards struct {
|
||||
type forwards struct {
|
||||
ForwardsCount int `json:"forwards_count"`
|
||||
ForwardsOpens int `json:"forwards_opens"`
|
||||
}
|
||||
|
||||
type Opens struct {
|
||||
type opens struct {
|
||||
OpensTotal int `json:"opens_total"`
|
||||
UniqueOpens int `json:"unique_opens"`
|
||||
OpenRate float64 `json:"open_rate"`
|
||||
LastOpen string `json:"last_open"`
|
||||
}
|
||||
|
||||
type Clicks struct {
|
||||
type clicks struct {
|
||||
ClicksTotal int `json:"clicks_total"`
|
||||
UniqueClicks int `json:"unique_clicks"`
|
||||
UniqueSubscriberClicks int `json:"unique_subscriber_clicks"`
|
||||
|
|
@ -217,13 +217,13 @@ type Clicks struct {
|
|||
LastClick string `json:"last_click"`
|
||||
}
|
||||
|
||||
type FacebookLikes struct {
|
||||
type facebookLikes struct {
|
||||
RecipientLikes int `json:"recipient_likes"`
|
||||
UniqueLikes int `json:"unique_likes"`
|
||||
FacebookLikes int `json:"facebook_likes"`
|
||||
}
|
||||
|
||||
type IndustryStats struct {
|
||||
type industryStats struct {
|
||||
Type string `json:"type"`
|
||||
OpenRate float64 `json:"open_rate"`
|
||||
ClickRate float64 `json:"click_rate"`
|
||||
|
|
@ -233,14 +233,14 @@ type IndustryStats struct {
|
|||
AbuseRate float64 `json:"abuse_rate"`
|
||||
}
|
||||
|
||||
type ListStats struct {
|
||||
type listStats struct {
|
||||
SubRate float64 `json:"sub_rate"`
|
||||
UnsubRate float64 `json:"unsub_rate"`
|
||||
OpenRate float64 `json:"open_rate"`
|
||||
ClickRate float64 `json:"click_rate"`
|
||||
}
|
||||
|
||||
type TimeSeries struct {
|
||||
type timeSeries struct {
|
||||
TimeStamp string `json:"timestamp"`
|
||||
EmailsSent int `json:"emails_sent"`
|
||||
UniqueOpens int `json:"unique_opens"`
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
var sampleConfig string
|
||||
|
||||
type MailChimp struct {
|
||||
api *ChimpAPI
|
||||
api *chimpAPI
|
||||
|
||||
APIKey string `toml:"api_key"`
|
||||
DaysOld int `toml:"days_old"`
|
||||
|
|
@ -45,7 +45,7 @@ func (m *MailChimp) Gather(acc telegraf.Accumulator) error {
|
|||
since = now.Add(-d).Format(time.RFC3339)
|
||||
}
|
||||
|
||||
reports, err := m.api.GetReports(ReportsParams{
|
||||
reports, err := m.api.GetReports(reportsParams{
|
||||
SinceSendTime: since,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -68,7 +68,7 @@ func (m *MailChimp) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func gatherReport(acc telegraf.Accumulator, report Report, now time.Time) {
|
||||
func gatherReport(acc telegraf.Accumulator, report report, now time.Time) {
|
||||
tags := make(map[string]string)
|
||||
tags["id"] = report.ID
|
||||
tags["campaign_title"] = report.CampaignTitle
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ func TestMailChimpGatherReports(t *testing.T) {
|
|||
u, err := url.ParseRequestURI(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
api := &ChimpAPI{
|
||||
api := &chimpAPI{
|
||||
url: u,
|
||||
debug: true,
|
||||
log: testutil.Logger{},
|
||||
|
|
@ -91,7 +91,7 @@ func TestMailChimpGatherReport(t *testing.T) {
|
|||
u, err := url.ParseRequestURI(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
api := &ChimpAPI{
|
||||
api := &chimpAPI{
|
||||
url: u,
|
||||
debug: true,
|
||||
log: testutil.Logger{},
|
||||
|
|
@ -157,7 +157,7 @@ func TestMailChimpGatherError(t *testing.T) {
|
|||
u, err := url.ParseRequestURI(ts.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
api := &ChimpAPI{
|
||||
api := &chimpAPI{
|
||||
url: u,
|
||||
debug: true,
|
||||
log: testutil.Logger{},
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
)
|
||||
|
||||
type MongodbData struct {
|
||||
StatLine *StatLine
|
||||
StatLine *statLine
|
||||
Fields map[string]interface{}
|
||||
Tags map[string]string
|
||||
DbData []DbData
|
||||
|
|
@ -29,7 +29,7 @@ type ColData struct {
|
|||
Fields map[string]interface{}
|
||||
}
|
||||
|
||||
func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData {
|
||||
func NewMongodbData(statLine *statLine, tags map[string]string) *MongodbData {
|
||||
return &MongodbData{
|
||||
StatLine: statLine,
|
||||
Tags: tags,
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ var tags = make(map[string]string)
|
|||
|
||||
func TestAddNonReplStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
&statLine{
|
||||
StorageEngine: "",
|
||||
Time: time.Now(),
|
||||
UptimeNanos: 0,
|
||||
|
|
@ -72,7 +72,7 @@ func TestAddNonReplStats(t *testing.T) {
|
|||
|
||||
func TestAddReplStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
&statLine{
|
||||
StorageEngine: "mmapv1",
|
||||
Mapped: 0,
|
||||
NonMapped: 0,
|
||||
|
|
@ -93,7 +93,7 @@ func TestAddReplStats(t *testing.T) {
|
|||
|
||||
func TestAddWiredTigerStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
&statLine{
|
||||
StorageEngine: "wiredTiger",
|
||||
CacheDirtyPercent: 0,
|
||||
CacheUsedPercent: 0,
|
||||
|
|
@ -140,7 +140,7 @@ func TestAddWiredTigerStats(t *testing.T) {
|
|||
|
||||
func TestAddShardStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
&statLine{
|
||||
TotalInUse: 0,
|
||||
TotalAvailable: 0,
|
||||
TotalCreated: 0,
|
||||
|
|
@ -161,7 +161,7 @@ func TestAddShardStats(t *testing.T) {
|
|||
|
||||
func TestAddLatencyStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
&statLine{
|
||||
CommandOpsCnt: 73,
|
||||
CommandLatency: 364,
|
||||
ReadOpsCnt: 113,
|
||||
|
|
@ -184,7 +184,7 @@ func TestAddLatencyStats(t *testing.T) {
|
|||
|
||||
func TestAddAssertsStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
&statLine{
|
||||
Regular: 3,
|
||||
Warning: 9,
|
||||
Msg: 2,
|
||||
|
|
@ -206,7 +206,7 @@ func TestAddAssertsStats(t *testing.T) {
|
|||
|
||||
func TestAddCommandsStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
&statLine{
|
||||
AggregateCommandTotal: 12,
|
||||
AggregateCommandFailed: 2,
|
||||
CountCommandTotal: 18,
|
||||
|
|
@ -241,7 +241,7 @@ func TestAddCommandsStats(t *testing.T) {
|
|||
|
||||
func TestAddTCMallocStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
&statLine{
|
||||
TCMallocCurrentAllocatedBytes: 5877253096,
|
||||
TCMallocHeapSize: 8067108864,
|
||||
TCMallocPageheapFreeBytes: 1054994432,
|
||||
|
|
@ -277,7 +277,7 @@ func TestAddTCMallocStats(t *testing.T) {
|
|||
|
||||
func TestAddStorageStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
&statLine{
|
||||
StorageFreelistSearchBucketExhausted: 0,
|
||||
StorageFreelistSearchRequests: 0,
|
||||
StorageFreelistSearchScanned: 0,
|
||||
|
|
@ -297,9 +297,9 @@ func TestAddStorageStats(t *testing.T) {
|
|||
|
||||
func TestAddShardHostStats(t *testing.T) {
|
||||
expectedHosts := []string{"hostA", "hostB"}
|
||||
hostStatLines := map[string]ShardHostStatLine{}
|
||||
hostStatLines := map[string]shardHostStatLine{}
|
||||
for _, host := range expectedHosts {
|
||||
hostStatLines[host] = ShardHostStatLine{
|
||||
hostStatLines[host] = shardHostStatLine{
|
||||
InUse: 0,
|
||||
Available: 0,
|
||||
Created: 0,
|
||||
|
|
@ -308,7 +308,7 @@ func TestAddShardHostStats(t *testing.T) {
|
|||
}
|
||||
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
&statLine{
|
||||
ShardHostStatsLines: hostStatLines,
|
||||
},
|
||||
map[string]string{}, // Use empty tags, so we don't break existing tests
|
||||
|
|
@ -334,7 +334,7 @@ func TestAddShardHostStats(t *testing.T) {
|
|||
|
||||
func TestStateTag(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
&statLine{
|
||||
StorageEngine: "",
|
||||
Time: time.Now(),
|
||||
Insert: 0,
|
||||
|
|
@ -498,9 +498,9 @@ func TestStateTag(t *testing.T) {
|
|||
|
||||
func TestAddTopStats(t *testing.T) {
|
||||
collections := []string{"collectionOne", "collectionTwo"}
|
||||
topStatLines := make([]TopStatLine, 0, len(collections))
|
||||
topStatLines := make([]topStatLine, 0, len(collections))
|
||||
for _, collection := range collections {
|
||||
topStatLine := TopStatLine{
|
||||
topStatLine := topStatLine{
|
||||
CollectionName: collection,
|
||||
TotalTime: 0,
|
||||
TotalCount: 0,
|
||||
|
|
@ -525,7 +525,7 @@ func TestAddTopStats(t *testing.T) {
|
|||
}
|
||||
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
&statLine{
|
||||
TopStatLines: topStatLines,
|
||||
},
|
||||
tags,
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import (
|
|||
type Server struct {
|
||||
client *mongo.Client
|
||||
hostname string
|
||||
lastResult *MongoStatus
|
||||
lastResult *mongoStatus
|
||||
|
||||
Log telegraf.Logger
|
||||
}
|
||||
|
|
@ -61,8 +61,8 @@ func (s *Server) runCommand(database string, cmd interface{}, result interface{}
|
|||
return r.Decode(result)
|
||||
}
|
||||
|
||||
func (s *Server) gatherServerStatus() (*ServerStatus, error) {
|
||||
serverStatus := &ServerStatus{}
|
||||
func (s *Server) gatherServerStatus() (*serverStatus, error) {
|
||||
serverStatus := &serverStatus{}
|
||||
err := s.runCommand("admin", bson.D{
|
||||
{
|
||||
Key: "serverStatus",
|
||||
|
|
@ -79,8 +79,8 @@ func (s *Server) gatherServerStatus() (*ServerStatus, error) {
|
|||
return serverStatus, nil
|
||||
}
|
||||
|
||||
func (s *Server) gatherReplSetStatus() (*ReplSetStatus, error) {
|
||||
replSetStatus := &ReplSetStatus{}
|
||||
func (s *Server) gatherReplSetStatus() (*replSetStatus, error) {
|
||||
replSetStatus := &replSetStatus{}
|
||||
err := s.runCommand("admin", bson.D{
|
||||
{
|
||||
Key: "replSetGetStatus",
|
||||
|
|
@ -93,7 +93,7 @@ func (s *Server) gatherReplSetStatus() (*ReplSetStatus, error) {
|
|||
return replSetStatus, nil
|
||||
}
|
||||
|
||||
func (s *Server) gatherTopStatData() (*TopStats, error) {
|
||||
func (s *Server) gatherTopStatData() (*topStats, error) {
|
||||
var dest map[string]interface{}
|
||||
err := s.runCommand("admin", bson.D{
|
||||
{
|
||||
|
|
@ -116,21 +116,21 @@ func (s *Server) gatherTopStatData() (*TopStats, error) {
|
|||
return nil, errors.New("unable to marshal totals")
|
||||
}
|
||||
|
||||
topInfo := make(map[string]TopStatCollection)
|
||||
topInfo := make(map[string]topStatCollection)
|
||||
if err := bson.Unmarshal(recorded, &topInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed unmarshalling records: %w", err)
|
||||
}
|
||||
|
||||
return &TopStats{Totals: topInfo}, nil
|
||||
return &topStats{Totals: topInfo}, nil
|
||||
}
|
||||
|
||||
func (s *Server) gatherClusterStatus() (*ClusterStatus, error) {
|
||||
func (s *Server) gatherClusterStatus() (*clusterStatus, error) {
|
||||
chunkCount, err := s.client.Database("config").Collection("chunks").CountDocuments(context.Background(), bson.M{"jumbo": true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ClusterStatus{
|
||||
return &clusterStatus{
|
||||
JumboChunksCount: chunkCount,
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -148,13 +148,13 @@ func poolStatsCommand(version string) (string, error) {
|
|||
return "shardConnPoolStats", nil
|
||||
}
|
||||
|
||||
func (s *Server) gatherShardConnPoolStats(version string) (*ShardStats, error) {
|
||||
func (s *Server) gatherShardConnPoolStats(version string) (*shardStats, error) {
|
||||
command, err := poolStatsCommand(version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
shardStats := &ShardStats{}
|
||||
shardStats := &shardStats{}
|
||||
err = s.runCommand("admin", bson.D{
|
||||
{
|
||||
Key: command,
|
||||
|
|
@ -167,8 +167,8 @@ func (s *Server) gatherShardConnPoolStats(version string) (*ShardStats, error) {
|
|||
return shardStats, nil
|
||||
}
|
||||
|
||||
func (s *Server) gatherDBStats(name string) (*Db, error) {
|
||||
stats := &DbStatsData{}
|
||||
func (s *Server) gatherDBStats(name string) (*db, error) {
|
||||
stats := &dbStatsData{}
|
||||
err := s.runCommand(name, bson.D{
|
||||
{
|
||||
Key: "dbStats",
|
||||
|
|
@ -179,13 +179,13 @@ func (s *Server) gatherDBStats(name string) (*Db, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return &Db{
|
||||
return &db{
|
||||
Name: name,
|
||||
DbStatsData: stats,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) getOplogReplLag(collection string) (*OplogStats, error) {
|
||||
func (s *Server) getOplogReplLag(collection string) (*oplogStats, error) {
|
||||
query := bson.M{"ts": bson.M{"$exists": true}}
|
||||
|
||||
var first oplogEntry
|
||||
|
|
@ -208,7 +208,7 @@ func (s *Server) getOplogReplLag(collection string) (*OplogStats, error) {
|
|||
|
||||
firstTime := time.Unix(int64(first.Timestamp.T), 0)
|
||||
lastTime := time.Unix(int64(last.Timestamp.T), 0)
|
||||
stats := &OplogStats{
|
||||
stats := &oplogStats{
|
||||
TimeDiff: int64(lastTime.Sub(firstTime).Seconds()),
|
||||
}
|
||||
return stats, nil
|
||||
|
|
@ -219,7 +219,7 @@ func (s *Server) getOplogReplLag(collection string) (*OplogStats, error) {
|
|||
// The "oplog.$main" collection is created on the master node of a
|
||||
// master-slave replicated deployment. As of MongoDB 3.2, master-slave
|
||||
// replication has been deprecated.
|
||||
func (s *Server) gatherOplogStats() (*OplogStats, error) {
|
||||
func (s *Server) gatherOplogStats() (*oplogStats, error) {
|
||||
stats, err := s.getOplogReplLag("oplog.rs")
|
||||
if err == nil {
|
||||
return stats, nil
|
||||
|
|
@ -228,13 +228,13 @@ func (s *Server) gatherOplogStats() (*OplogStats, error) {
|
|||
return s.getOplogReplLag("oplog.$main")
|
||||
}
|
||||
|
||||
func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) {
|
||||
func (s *Server) gatherCollectionStats(colStatsDbs []string) (*colStats, error) {
|
||||
names, err := s.client.ListDatabaseNames(context.Background(), bson.D{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
results := &ColStats{}
|
||||
results := &colStats{}
|
||||
for _, dbName := range names {
|
||||
if stringInSlice(dbName, colStatsDbs) || len(colStatsDbs) == 0 {
|
||||
// skip views as they fail on collStats below
|
||||
|
|
@ -247,7 +247,7 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error)
|
|||
continue
|
||||
}
|
||||
for _, colName := range colls {
|
||||
colStatLine := &ColStatsData{}
|
||||
colStatLine := &colStatsData{}
|
||||
err = s.runCommand(dbName, bson.D{
|
||||
{
|
||||
Key: "collStats",
|
||||
|
|
@ -258,7 +258,7 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error)
|
|||
s.authLog(fmt.Errorf("error getting col stats from %q: %w", colName, err))
|
||||
continue
|
||||
}
|
||||
collection := &Collection{
|
||||
collection := &collection{
|
||||
Name: colName,
|
||||
DbName: dbName,
|
||||
ColStatsData: colStatLine,
|
||||
|
|
@ -292,7 +292,7 @@ func (s *Server) gatherData(
|
|||
|
||||
// Gather the oplog if we are a member of a replica set. Non-replica set
|
||||
// members do not have the oplog collections.
|
||||
var oplogStats *OplogStats
|
||||
var oplogStats *oplogStats
|
||||
if replSetStatus != nil {
|
||||
oplogStats, err = s.gatherOplogStats()
|
||||
if err != nil {
|
||||
|
|
@ -300,7 +300,7 @@ func (s *Server) gatherData(
|
|||
}
|
||||
}
|
||||
|
||||
var clusterStatus *ClusterStatus
|
||||
var clusterStatus *clusterStatus
|
||||
if gatherClusterStatus {
|
||||
status, err := s.gatherClusterStatus()
|
||||
if err != nil {
|
||||
|
|
@ -314,7 +314,7 @@ func (s *Server) gatherData(
|
|||
s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %w", err))
|
||||
}
|
||||
|
||||
var collectionStats *ColStats
|
||||
var collectionStats *colStats
|
||||
if gatherColStats {
|
||||
stats, err := s.gatherCollectionStats(colStatsDbs)
|
||||
if err != nil {
|
||||
|
|
@ -323,7 +323,7 @@ func (s *Server) gatherData(
|
|||
collectionStats = stats
|
||||
}
|
||||
|
||||
dbStats := &DbStats{}
|
||||
dbStats := &dbStats{}
|
||||
if gatherDbStats {
|
||||
names, err := s.client.ListDatabaseNames(context.Background(), bson.D{})
|
||||
if err != nil {
|
||||
|
|
@ -339,7 +339,7 @@ func (s *Server) gatherData(
|
|||
}
|
||||
}
|
||||
|
||||
topStatData := &TopStats{}
|
||||
topStatData := &topStats{}
|
||||
if gatherTopStat {
|
||||
topStats, err := s.gatherTopStatData()
|
||||
if err != nil {
|
||||
|
|
@ -349,7 +349,7 @@ func (s *Server) gatherData(
|
|||
topStatData = topStats
|
||||
}
|
||||
|
||||
result := &MongoStatus{
|
||||
result := &mongoStatus{
|
||||
ServerStatus: serverStatus,
|
||||
ReplSetStatus: replSetStatus,
|
||||
ClusterStatus: clusterStatus,
|
||||
|
|
|
|||
|
|
@ -28,19 +28,19 @@ const (
|
|||
WTOnly // only active if node has wiredtiger-specific fields
|
||||
)
|
||||
|
||||
type MongoStatus struct {
|
||||
type mongoStatus struct {
|
||||
SampleTime time.Time
|
||||
ServerStatus *ServerStatus
|
||||
ReplSetStatus *ReplSetStatus
|
||||
ClusterStatus *ClusterStatus
|
||||
DbStats *DbStats
|
||||
ColStats *ColStats
|
||||
ShardStats *ShardStats
|
||||
OplogStats *OplogStats
|
||||
TopStats *TopStats
|
||||
ServerStatus *serverStatus
|
||||
ReplSetStatus *replSetStatus
|
||||
ClusterStatus *clusterStatus
|
||||
DbStats *dbStats
|
||||
ColStats *colStats
|
||||
ShardStats *shardStats
|
||||
OplogStats *oplogStats
|
||||
TopStats *topStats
|
||||
}
|
||||
|
||||
type ServerStatus struct {
|
||||
type serverStatus struct {
|
||||
SampleTime time.Time `bson:""`
|
||||
Flattened map[string]interface{} `bson:""`
|
||||
Host string `bson:"host"`
|
||||
|
|
@ -51,40 +51,40 @@ type ServerStatus struct {
|
|||
UptimeMillis int64 `bson:"uptimeMillis"`
|
||||
UptimeEstimate int64 `bson:"uptimeEstimate"`
|
||||
LocalTime time.Time `bson:"localTime"`
|
||||
Asserts *AssertsStats `bson:"asserts"`
|
||||
BackgroundFlushing *FlushStats `bson:"backgroundFlushing"`
|
||||
ExtraInfo *ExtraInfo `bson:"extra_info"`
|
||||
Connections *ConnectionStats `bson:"connections"`
|
||||
Dur *DurStats `bson:"dur"`
|
||||
GlobalLock *GlobalLockStats `bson:"globalLock"`
|
||||
Locks map[string]LockStats `bson:"locks,omitempty"`
|
||||
Network *NetworkStats `bson:"network"`
|
||||
Opcounters *OpcountStats `bson:"opcounters"`
|
||||
OpcountersRepl *OpcountStats `bson:"opcountersRepl"`
|
||||
OpLatencies *OpLatenciesStats `bson:"opLatencies"`
|
||||
RecordStats *DBRecordStats `bson:"recordStats"`
|
||||
Mem *MemStats `bson:"mem"`
|
||||
Repl *ReplStatus `bson:"repl"`
|
||||
Asserts *assertsStats `bson:"asserts"`
|
||||
BackgroundFlushing *flushStats `bson:"backgroundFlushing"`
|
||||
ExtraInfo *extraInfo `bson:"extra_info"`
|
||||
Connections *connectionStats `bson:"connections"`
|
||||
Dur *durStats `bson:"dur"`
|
||||
GlobalLock *globalLockStats `bson:"globalLock"`
|
||||
Locks map[string]lockStats `bson:"locks,omitempty"`
|
||||
Network *networkStats `bson:"network"`
|
||||
Opcounters *opcountStats `bson:"opcounters"`
|
||||
OpcountersRepl *opcountStats `bson:"opcountersRepl"`
|
||||
OpLatencies *opLatenciesStats `bson:"opLatencies"`
|
||||
RecordStats *dbRecordStats `bson:"recordStats"`
|
||||
Mem *memStats `bson:"mem"`
|
||||
Repl *replStatus `bson:"repl"`
|
||||
ShardCursorType map[string]interface{} `bson:"shardCursorType"`
|
||||
StorageEngine *StorageEngine `bson:"storageEngine"`
|
||||
WiredTiger *WiredTiger `bson:"wiredTiger"`
|
||||
Metrics *MetricsStats `bson:"metrics"`
|
||||
TCMallocStats *TCMallocStats `bson:"tcmalloc"`
|
||||
StorageEngine *storageEngine `bson:"storageEngine"`
|
||||
WiredTiger *wiredTiger `bson:"wiredTiger"`
|
||||
Metrics *metricsStats `bson:"metrics"`
|
||||
TCMallocStats *tcMallocStats `bson:"tcmalloc"`
|
||||
}
|
||||
|
||||
// DbStats stores stats from all dbs
|
||||
type DbStats struct {
|
||||
Dbs []Db
|
||||
// dbStats stores stats from all dbs
|
||||
type dbStats struct {
|
||||
Dbs []db
|
||||
}
|
||||
|
||||
// Db represent a single DB
|
||||
type Db struct {
|
||||
// db represent a single DB
|
||||
type db struct {
|
||||
Name string
|
||||
DbStatsData *DbStatsData
|
||||
DbStatsData *dbStatsData
|
||||
}
|
||||
|
||||
// DbStatsData stores stats from a db
|
||||
type DbStatsData struct {
|
||||
// dbStatsData stores stats from a db
|
||||
type dbStatsData struct {
|
||||
Db string `bson:"db"`
|
||||
Collections int64 `bson:"collections"`
|
||||
Objects int64 `bson:"objects"`
|
||||
|
|
@ -100,17 +100,17 @@ type DbStatsData struct {
|
|||
FsTotalSize int64 `bson:"fsTotalSize"`
|
||||
}
|
||||
|
||||
type ColStats struct {
|
||||
Collections []Collection
|
||||
type colStats struct {
|
||||
Collections []collection
|
||||
}
|
||||
|
||||
type Collection struct {
|
||||
type collection struct {
|
||||
Name string
|
||||
DbName string
|
||||
ColStatsData *ColStatsData
|
||||
ColStatsData *colStatsData
|
||||
}
|
||||
|
||||
type ColStatsData struct {
|
||||
type colStatsData struct {
|
||||
Collection string `bson:"ns"`
|
||||
Count int64 `bson:"count"`
|
||||
Size int64 `bson:"size"`
|
||||
|
|
@ -120,24 +120,24 @@ type ColStatsData struct {
|
|||
Ok int64 `bson:"ok"`
|
||||
}
|
||||
|
||||
// ClusterStatus stores information related to the whole cluster
|
||||
type ClusterStatus struct {
|
||||
// clusterStatus stores information related to the whole cluster
|
||||
type clusterStatus struct {
|
||||
JumboChunksCount int64
|
||||
}
|
||||
|
||||
// ReplSetStatus stores information from replSetGetStatus
|
||||
type ReplSetStatus struct {
|
||||
Members []ReplSetMember `bson:"members"`
|
||||
// replSetStatus stores information from replSetGetStatus
|
||||
type replSetStatus struct {
|
||||
Members []replSetMember `bson:"members"`
|
||||
MyState int64 `bson:"myState"`
|
||||
}
|
||||
|
||||
// OplogStatus stores information from getReplicationInfo
|
||||
type OplogStats struct {
|
||||
// oplogStats stores information from getReplicationInfo
|
||||
type oplogStats struct {
|
||||
TimeDiff int64
|
||||
}
|
||||
|
||||
// ReplSetMember stores information related to a replica set member
|
||||
type ReplSetMember struct {
|
||||
// replSetMember stores information related to a replica set member
|
||||
type replSetMember struct {
|
||||
Name string `bson:"name"`
|
||||
Health int64 `bson:"health"`
|
||||
State int64 `bson:"state"`
|
||||
|
|
@ -145,72 +145,71 @@ type ReplSetMember struct {
|
|||
OptimeDate time.Time `bson:"optimeDate"`
|
||||
}
|
||||
|
||||
// WiredTiger stores information related to the WiredTiger storage engine.
|
||||
type WiredTiger struct {
|
||||
Transaction TransactionStats `bson:"transaction"`
|
||||
Concurrent ConcurrentTransactions `bson:"concurrentTransactions"`
|
||||
Cache CacheStats `bson:"cache"`
|
||||
Connection WTConnectionStats `bson:"connection"`
|
||||
DataHandle DataHandleStats `bson:"data-handle"`
|
||||
// wiredTiger stores information related to the wiredTiger storage engine.
|
||||
type wiredTiger struct {
|
||||
Transaction transactionStats `bson:"transaction"`
|
||||
Concurrent concurrentTransactions `bson:"concurrentTransactions"`
|
||||
Cache cacheStats `bson:"cache"`
|
||||
Connection wtConnectionStats `bson:"connection"`
|
||||
DataHandle dataHandleStats `bson:"data-handle"`
|
||||
}
|
||||
|
||||
// ShardStats stores information from shardConnPoolStats.
|
||||
type ShardStats struct {
|
||||
ShardStatsData `bson:",inline"`
|
||||
Hosts map[string]ShardHostStatsData `bson:"hosts"`
|
||||
// shardStats stores information from shardConnPoolStats.
|
||||
type shardStats struct {
|
||||
shardStatsData `bson:",inline"`
|
||||
Hosts map[string]shardHostStatsData `bson:"hosts"`
|
||||
}
|
||||
|
||||
// ShardStatsData is the total Shard Stats from shardConnPoolStats database command.
|
||||
type ShardStatsData struct {
|
||||
// shardStatsData is the total Shard Stats from shardConnPoolStats database command.
|
||||
type shardStatsData struct {
|
||||
TotalInUse int64 `bson:"totalInUse"`
|
||||
TotalAvailable int64 `bson:"totalAvailable"`
|
||||
TotalCreated int64 `bson:"totalCreated"`
|
||||
TotalRefreshing int64 `bson:"totalRefreshing"`
|
||||
}
|
||||
|
||||
// ShardHostStatsData is the host-specific stats
|
||||
// from shardConnPoolStats database command.
|
||||
type ShardHostStatsData struct {
|
||||
// shardHostStatsData is the host-specific stats from shardConnPoolStats database command.
|
||||
type shardHostStatsData struct {
|
||||
InUse int64 `bson:"inUse"`
|
||||
Available int64 `bson:"available"`
|
||||
Created int64 `bson:"created"`
|
||||
Refreshing int64 `bson:"refreshing"`
|
||||
}
|
||||
|
||||
type TopStats struct {
|
||||
Totals map[string]TopStatCollection `bson:"totals"`
|
||||
type topStats struct {
|
||||
Totals map[string]topStatCollection `bson:"totals"`
|
||||
}
|
||||
|
||||
type TopStatCollection struct {
|
||||
Total TopStatCollectionData `bson:"total"`
|
||||
ReadLock TopStatCollectionData `bson:"readLock"`
|
||||
WriteLock TopStatCollectionData `bson:"writeLock"`
|
||||
Queries TopStatCollectionData `bson:"queries"`
|
||||
GetMore TopStatCollectionData `bson:"getmore"`
|
||||
Insert TopStatCollectionData `bson:"insert"`
|
||||
Update TopStatCollectionData `bson:"update"`
|
||||
Remove TopStatCollectionData `bson:"remove"`
|
||||
Commands TopStatCollectionData `bson:"commands"`
|
||||
type topStatCollection struct {
|
||||
Total topStatCollectionData `bson:"total"`
|
||||
ReadLock topStatCollectionData `bson:"readLock"`
|
||||
WriteLock topStatCollectionData `bson:"writeLock"`
|
||||
Queries topStatCollectionData `bson:"queries"`
|
||||
GetMore topStatCollectionData `bson:"getmore"`
|
||||
Insert topStatCollectionData `bson:"insert"`
|
||||
Update topStatCollectionData `bson:"update"`
|
||||
Remove topStatCollectionData `bson:"remove"`
|
||||
Commands topStatCollectionData `bson:"commands"`
|
||||
}
|
||||
|
||||
type TopStatCollectionData struct {
|
||||
type topStatCollectionData struct {
|
||||
Time int64 `bson:"time"`
|
||||
Count int64 `bson:"count"`
|
||||
}
|
||||
|
||||
type ConcurrentTransactions struct {
|
||||
Write ConcurrentTransStats `bson:"write"`
|
||||
Read ConcurrentTransStats `bson:"read"`
|
||||
type concurrentTransactions struct {
|
||||
Write concurrentTransStats `bson:"write"`
|
||||
Read concurrentTransStats `bson:"read"`
|
||||
}
|
||||
|
||||
type ConcurrentTransStats struct {
|
||||
type concurrentTransStats struct {
|
||||
Out int64 `bson:"out"`
|
||||
Available int64 `bson:"available"`
|
||||
TotalTickets int64 `bson:"totalTickets"`
|
||||
}
|
||||
|
||||
// AssertsStats stores information related to assertions raised since the MongoDB process started
|
||||
type AssertsStats struct {
|
||||
// assertsStats stores information related to assertions raised since the MongoDB process started
|
||||
type assertsStats struct {
|
||||
Regular int64 `bson:"regular"`
|
||||
Warning int64 `bson:"warning"`
|
||||
Msg int64 `bson:"msg"`
|
||||
|
|
@ -218,8 +217,8 @@ type AssertsStats struct {
|
|||
Rollovers int64 `bson:"rollovers"`
|
||||
}
|
||||
|
||||
// CacheStats stores cache statistics for WiredTiger.
|
||||
type CacheStats struct {
|
||||
// cacheStats stores cache statistics for wiredTiger.
|
||||
type cacheStats struct {
|
||||
TrackedDirtyBytes int64 `bson:"tracked dirty bytes in the cache"`
|
||||
CurrentCachedBytes int64 `bson:"bytes currently in the cache"`
|
||||
MaxBytesConfigured int64 `bson:"maximum bytes configured"`
|
||||
|
|
@ -241,28 +240,28 @@ type CacheStats struct {
|
|||
UnmodifiedPagesEvicted int64 `bson:"unmodified pages evicted"`
|
||||
}
|
||||
|
||||
type StorageEngine struct {
|
||||
type storageEngine struct {
|
||||
Name string `bson:"name"`
|
||||
}
|
||||
|
||||
// TransactionStats stores transaction checkpoints in WiredTiger.
|
||||
type TransactionStats struct {
|
||||
// transactionStats stores transaction checkpoints in wiredTiger.
|
||||
type transactionStats struct {
|
||||
TransCheckpointsTotalTimeMsecs int64 `bson:"transaction checkpoint total time (msecs)"`
|
||||
TransCheckpoints int64 `bson:"transaction checkpoints"`
|
||||
}
|
||||
|
||||
// WTConnectionStats stores statistics on wiredTiger connections
|
||||
type WTConnectionStats struct {
|
||||
// wtConnectionStats stores statistics on wiredTiger connections
|
||||
type wtConnectionStats struct {
|
||||
FilesCurrentlyOpen int64 `bson:"files currently open"`
|
||||
}
|
||||
|
||||
// DataHandleStats stores statistics for wiredTiger data-handles
|
||||
type DataHandleStats struct {
|
||||
// dataHandleStats stores statistics for wiredTiger data-handles
|
||||
type dataHandleStats struct {
|
||||
DataHandlesCurrentlyActive int64 `bson:"connection data handles currently active"`
|
||||
}
|
||||
|
||||
// ReplStatus stores data related to replica sets.
|
||||
type ReplStatus struct {
|
||||
// replStatus stores data related to replica sets.
|
||||
type replStatus struct {
|
||||
SetName string `bson:"setName"`
|
||||
IsWritablePrimary interface{} `bson:"isWritablePrimary"` // mongodb 5.x
|
||||
IsMaster interface{} `bson:"ismaster"`
|
||||
|
|
@ -274,21 +273,21 @@ type ReplStatus struct {
|
|||
Me string `bson:"me"`
|
||||
}
|
||||
|
||||
// DBRecordStats stores data related to memory operations across databases.
|
||||
type DBRecordStats struct {
|
||||
// dbRecordStats stores data related to memory operations across databases.
|
||||
type dbRecordStats struct {
|
||||
AccessesNotInMemory int64 `bson:"accessesNotInMemory"`
|
||||
PageFaultExceptionsThrown int64 `bson:"pageFaultExceptionsThrown"`
|
||||
DBRecordAccesses map[string]RecordAccesses `bson:",inline"`
|
||||
DBRecordAccesses map[string]recordAccesses `bson:",inline"`
|
||||
}
|
||||
|
||||
// RecordAccesses stores data related to memory operations scoped to a database.
|
||||
type RecordAccesses struct {
|
||||
// recordAccesses stores data related to memory operations scoped to a database.
|
||||
type recordAccesses struct {
|
||||
AccessesNotInMemory int64 `bson:"accessesNotInMemory"`
|
||||
PageFaultExceptionsThrown int64 `bson:"pageFaultExceptionsThrown"`
|
||||
}
|
||||
|
||||
// MemStats stores data related to memory statistics.
|
||||
type MemStats struct {
|
||||
// memStats stores data related to memory statistics.
|
||||
type memStats struct {
|
||||
Bits int64 `bson:"bits"`
|
||||
Resident int64 `bson:"resident"`
|
||||
Virtual int64 `bson:"virtual"`
|
||||
|
|
@ -297,8 +296,8 @@ type MemStats struct {
|
|||
MappedWithJournal int64 `bson:"mappedWithJournal"`
|
||||
}
|
||||
|
||||
// FlushStats stores information about memory flushes.
|
||||
type FlushStats struct {
|
||||
// flushStats stores information about memory flushes.
|
||||
type flushStats struct {
|
||||
Flushes int64 `bson:"flushes"`
|
||||
TotalMs int64 `bson:"total_ms"`
|
||||
AverageMs float64 `bson:"average_ms"`
|
||||
|
|
@ -306,15 +305,15 @@ type FlushStats struct {
|
|||
LastFinished time.Time `bson:"last_finished"`
|
||||
}
|
||||
|
||||
// ConnectionStats stores information related to incoming database connections.
|
||||
type ConnectionStats struct {
|
||||
// connectionStats stores information related to incoming database connections.
|
||||
type connectionStats struct {
|
||||
Current int64 `bson:"current"`
|
||||
Available int64 `bson:"available"`
|
||||
TotalCreated int64 `bson:"totalCreated"`
|
||||
}
|
||||
|
||||
// DurTiming stores information related to journaling.
|
||||
type DurTiming struct {
|
||||
// durTiming stores information related to journaling.
|
||||
type durTiming struct {
|
||||
Dt int64 `bson:"dt"`
|
||||
PrepLogBuffer int64 `bson:"prepLogBuffer"`
|
||||
WriteToJournal int64 `bson:"writeToJournal"`
|
||||
|
|
@ -322,48 +321,48 @@ type DurTiming struct {
|
|||
RemapPrivateView int64 `bson:"remapPrivateView"`
|
||||
}
|
||||
|
||||
// DurStats stores information related to journaling statistics.
|
||||
type DurStats struct {
|
||||
// durStats stores information related to journaling statistics.
|
||||
type durStats struct {
|
||||
Commits float64 `bson:"commits"`
|
||||
JournaledMB float64 `bson:"journaledMB"`
|
||||
WriteToDataFilesMB float64 `bson:"writeToDataFilesMB"`
|
||||
Compression float64 `bson:"compression"`
|
||||
CommitsInWriteLock float64 `bson:"commitsInWriteLock"`
|
||||
EarlyCommits float64 `bson:"earlyCommits"`
|
||||
TimeMs DurTiming
|
||||
TimeMs durTiming
|
||||
}
|
||||
|
||||
// QueueStats stores the number of queued read/write operations.
|
||||
type QueueStats struct {
|
||||
// queueStats stores the number of queued read/write operations.
|
||||
type queueStats struct {
|
||||
Total int64 `bson:"total"`
|
||||
Readers int64 `bson:"readers"`
|
||||
Writers int64 `bson:"writers"`
|
||||
}
|
||||
|
||||
// ClientStats stores the number of active read/write operations.
|
||||
type ClientStats struct {
|
||||
// clientStats stores the number of active read/write operations.
|
||||
type clientStats struct {
|
||||
Total int64 `bson:"total"`
|
||||
Readers int64 `bson:"readers"`
|
||||
Writers int64 `bson:"writers"`
|
||||
}
|
||||
|
||||
// GlobalLockStats stores information related locks in the MMAP storage engine.
|
||||
type GlobalLockStats struct {
|
||||
// globalLockStats stores information related locks in the MMAP storage engine.
|
||||
type globalLockStats struct {
|
||||
TotalTime int64 `bson:"totalTime"`
|
||||
LockTime int64 `bson:"lockTime"`
|
||||
CurrentQueue *QueueStats `bson:"currentQueue"`
|
||||
ActiveClients *ClientStats `bson:"activeClients"`
|
||||
CurrentQueue *queueStats `bson:"currentQueue"`
|
||||
ActiveClients *clientStats `bson:"activeClients"`
|
||||
}
|
||||
|
||||
// NetworkStats stores information related to network traffic.
|
||||
type NetworkStats struct {
|
||||
// networkStats stores information related to network traffic.
|
||||
type networkStats struct {
|
||||
BytesIn int64 `bson:"bytesIn"`
|
||||
BytesOut int64 `bson:"bytesOut"`
|
||||
NumRequests int64 `bson:"numRequests"`
|
||||
}
|
||||
|
||||
// OpcountStats stores information related to commands and basic CRUD operations.
|
||||
type OpcountStats struct {
|
||||
// opcountStats stores information related to commands and basic CRUD operations.
|
||||
type opcountStats struct {
|
||||
Insert int64 `bson:"insert"`
|
||||
Query int64 `bson:"query"`
|
||||
Update int64 `bson:"update"`
|
||||
|
|
@ -372,169 +371,168 @@ type OpcountStats struct {
|
|||
Command int64 `bson:"command"`
|
||||
}
|
||||
|
||||
// OpLatenciesStats stores information related to operation latencies for the database as a whole
|
||||
type OpLatenciesStats struct {
|
||||
Reads *LatencyStats `bson:"reads"`
|
||||
Writes *LatencyStats `bson:"writes"`
|
||||
Commands *LatencyStats `bson:"commands"`
|
||||
// opLatenciesStats stores information related to operation latencies for the database as a whole
|
||||
type opLatenciesStats struct {
|
||||
Reads *latencyStats `bson:"reads"`
|
||||
Writes *latencyStats `bson:"writes"`
|
||||
Commands *latencyStats `bson:"commands"`
|
||||
}
|
||||
|
||||
// LatencyStats lists total latency in microseconds and count of operations, enabling you to obtain an average
|
||||
type LatencyStats struct {
|
||||
// latencyStats lists total latency in microseconds and count of operations, enabling you to obtain an average
|
||||
type latencyStats struct {
|
||||
Latency int64 `bson:"latency"`
|
||||
Ops int64 `bson:"ops"`
|
||||
}
|
||||
|
||||
// MetricsStats stores information related to metrics
|
||||
type MetricsStats struct {
|
||||
TTL *TTLStats `bson:"ttl"`
|
||||
Cursor *CursorStats `bson:"cursor"`
|
||||
Document *DocumentStats `bson:"document"`
|
||||
Commands *CommandsStats `bson:"commands"`
|
||||
Operation *OperationStats `bson:"operation"`
|
||||
QueryExecutor *QueryExecutorStats `bson:"queryExecutor"`
|
||||
Repl *ReplStats `bson:"repl"`
|
||||
Storage *StorageStats `bson:"storage"`
|
||||
// metricsStats stores information related to metrics
|
||||
type metricsStats struct {
|
||||
TTL *ttlStats `bson:"ttl"`
|
||||
Cursor *cursorStats `bson:"cursor"`
|
||||
Document *documentStats `bson:"document"`
|
||||
Commands *commandsStats `bson:"commands"`
|
||||
Operation *operationStats `bson:"operation"`
|
||||
QueryExecutor *queryExecutorStats `bson:"queryExecutor"`
|
||||
Repl *replStats `bson:"repl"`
|
||||
Storage *storageStats `bson:"storage"`
|
||||
}
|
||||
|
||||
// TTLStats stores information related to documents with a ttl index.
|
||||
type TTLStats struct {
|
||||
// ttlStats stores information related to documents with a ttl index.
|
||||
type ttlStats struct {
|
||||
DeletedDocuments int64 `bson:"deletedDocuments"`
|
||||
Passes int64 `bson:"passes"`
|
||||
}
|
||||
|
||||
// CursorStats stores information related to cursor metrics.
|
||||
type CursorStats struct {
|
||||
// cursorStats stores information related to cursor metrics.
|
||||
type cursorStats struct {
|
||||
TimedOut int64 `bson:"timedOut"`
|
||||
Open *OpenCursorStats `bson:"open"`
|
||||
Open *openCursorStats `bson:"open"`
|
||||
}
|
||||
|
||||
// DocumentStats stores information related to document metrics.
|
||||
type DocumentStats struct {
|
||||
// documentStats stores information related to document metrics.
|
||||
type documentStats struct {
|
||||
Deleted int64 `bson:"deleted"`
|
||||
Inserted int64 `bson:"inserted"`
|
||||
Returned int64 `bson:"returned"`
|
||||
Updated int64 `bson:"updated"`
|
||||
}
|
||||
|
||||
// CommandsStats stores information related to document metrics.
|
||||
type CommandsStats struct {
|
||||
Aggregate *CommandsStatsValue `bson:"aggregate"`
|
||||
Count *CommandsStatsValue `bson:"count"`
|
||||
Delete *CommandsStatsValue `bson:"delete"`
|
||||
Distinct *CommandsStatsValue `bson:"distinct"`
|
||||
Find *CommandsStatsValue `bson:"find"`
|
||||
FindAndModify *CommandsStatsValue `bson:"findAndModify"`
|
||||
GetMore *CommandsStatsValue `bson:"getMore"`
|
||||
Insert *CommandsStatsValue `bson:"insert"`
|
||||
Update *CommandsStatsValue `bson:"update"`
|
||||
// commandsStats stores information related to document metrics.
|
||||
type commandsStats struct {
|
||||
Aggregate *commandsStatsValue `bson:"aggregate"`
|
||||
Count *commandsStatsValue `bson:"count"`
|
||||
Delete *commandsStatsValue `bson:"delete"`
|
||||
Distinct *commandsStatsValue `bson:"distinct"`
|
||||
Find *commandsStatsValue `bson:"find"`
|
||||
FindAndModify *commandsStatsValue `bson:"findAndModify"`
|
||||
GetMore *commandsStatsValue `bson:"getMore"`
|
||||
Insert *commandsStatsValue `bson:"insert"`
|
||||
Update *commandsStatsValue `bson:"update"`
|
||||
}
|
||||
|
||||
type CommandsStatsValue struct {
|
||||
type commandsStatsValue struct {
|
||||
Failed int64 `bson:"failed"`
|
||||
Total int64 `bson:"total"`
|
||||
}
|
||||
|
||||
// OpenCursorStats stores information related to open cursor metrics
|
||||
type OpenCursorStats struct {
|
||||
// openCursorStats stores information related to open cursor metrics
|
||||
type openCursorStats struct {
|
||||
NoTimeout int64 `bson:"noTimeout"`
|
||||
Pinned int64 `bson:"pinned"`
|
||||
Total int64 `bson:"total"`
|
||||
}
|
||||
|
||||
// OperationStats stores information related to query operations
|
||||
// operationStats stores information related to query operations
|
||||
// using special operation types
|
||||
type OperationStats struct {
|
||||
type operationStats struct {
|
||||
ScanAndOrder int64 `bson:"scanAndOrder"`
|
||||
WriteConflicts int64 `bson:"writeConflicts"`
|
||||
}
|
||||
|
||||
// QueryExecutorStats stores information related to query execution
|
||||
type QueryExecutorStats struct {
|
||||
// queryExecutorStats stores information related to query execution
|
||||
type queryExecutorStats struct {
|
||||
Scanned int64 `bson:"scanned"`
|
||||
ScannedObjects int64 `bson:"scannedObjects"`
|
||||
}
|
||||
|
||||
// ReplStats stores information related to replication process
|
||||
type ReplStats struct {
|
||||
Apply *ReplApplyStats `bson:"apply"`
|
||||
Buffer *ReplBufferStats `bson:"buffer"`
|
||||
Executor *ReplExecutorStats `bson:"executor,omitempty"`
|
||||
Network *ReplNetworkStats `bson:"network"`
|
||||
// replStats stores information related to replication process
|
||||
type replStats struct {
|
||||
Apply *replApplyStats `bson:"apply"`
|
||||
Buffer *replBufferStats `bson:"buffer"`
|
||||
Executor *replExecutorStats `bson:"executor,omitempty"`
|
||||
Network *replNetworkStats `bson:"network"`
|
||||
}
|
||||
|
||||
// ReplApplyStats stores information related to oplog application process
|
||||
type ReplApplyStats struct {
|
||||
Batches *BasicStats `bson:"batches"`
|
||||
// replApplyStats stores information related to oplog application process
|
||||
type replApplyStats struct {
|
||||
Batches *basicStats `bson:"batches"`
|
||||
Ops int64 `bson:"ops"`
|
||||
}
|
||||
|
||||
// ReplBufferStats stores information related to oplog buffer
|
||||
type ReplBufferStats struct {
|
||||
// replBufferStats stores information related to oplog buffer
|
||||
type replBufferStats struct {
|
||||
Count int64 `bson:"count"`
|
||||
SizeBytes int64 `bson:"sizeBytes"`
|
||||
}
|
||||
|
||||
// ReplExecutorStats stores information related to replication executor
|
||||
type ReplExecutorStats struct {
|
||||
// replExecutorStats stores information related to replication executor
|
||||
type replExecutorStats struct {
|
||||
Pool map[string]int64 `bson:"pool"`
|
||||
Queues map[string]int64 `bson:"queues"`
|
||||
UnsignaledEvents int64 `bson:"unsignaledEvents"`
|
||||
}
|
||||
|
||||
// ReplNetworkStats stores information related to network usage by replication process
|
||||
type ReplNetworkStats struct {
|
||||
// replNetworkStats stores information related to network usage by replication process
|
||||
type replNetworkStats struct {
|
||||
Bytes int64 `bson:"bytes"`
|
||||
GetMores *BasicStats `bson:"getmores"`
|
||||
GetMores *basicStats `bson:"getmores"`
|
||||
Ops int64 `bson:"ops"`
|
||||
}
|
||||
|
||||
// BasicStats stores information about an operation
|
||||
type BasicStats struct {
|
||||
// basicStats stores information about an operation
|
||||
type basicStats struct {
|
||||
Num int64 `bson:"num"`
|
||||
TotalMillis int64 `bson:"totalMillis"`
|
||||
}
|
||||
|
||||
// ReadWriteLockTimes stores time spent holding read/write locks.
|
||||
type ReadWriteLockTimes struct {
|
||||
// readWriteLockTimes stores time spent holding read/write locks.
|
||||
type readWriteLockTimes struct {
|
||||
Read int64 `bson:"R"`
|
||||
Write int64 `bson:"W"`
|
||||
ReadLower int64 `bson:"r"`
|
||||
WriteLower int64 `bson:"w"`
|
||||
}
|
||||
|
||||
// LockStats stores information related to time spent acquiring/holding locks
|
||||
// for a given database.
|
||||
type LockStats struct {
|
||||
TimeLockedMicros ReadWriteLockTimes `bson:"timeLockedMicros"`
|
||||
TimeAcquiringMicros ReadWriteLockTimes `bson:"timeAcquiringMicros"`
|
||||
// lockStats stores information related to time spent acquiring/holding locks for a given database.
|
||||
type lockStats struct {
|
||||
TimeLockedMicros readWriteLockTimes `bson:"timeLockedMicros"`
|
||||
TimeAcquiringMicros readWriteLockTimes `bson:"timeAcquiringMicros"`
|
||||
|
||||
// AcquireCount and AcquireWaitCount are new fields of the lock stats only populated on 3.0 or newer.
|
||||
// Typed as a pointer so that if it is nil, mongostat can assume the field is not populated
|
||||
// with real namespace data.
|
||||
AcquireCount *ReadWriteLockTimes `bson:"acquireCount,omitempty"`
|
||||
AcquireWaitCount *ReadWriteLockTimes `bson:"acquireWaitCount,omitempty"`
|
||||
AcquireCount *readWriteLockTimes `bson:"acquireCount,omitempty"`
|
||||
AcquireWaitCount *readWriteLockTimes `bson:"acquireWaitCount,omitempty"`
|
||||
}
|
||||
|
||||
// ExtraInfo stores additional platform specific information.
|
||||
type ExtraInfo struct {
|
||||
// extraInfo stores additional platform specific information.
|
||||
type extraInfo struct {
|
||||
PageFaults *int64 `bson:"page_faults"`
|
||||
}
|
||||
|
||||
// TCMallocStats stores information related to TCMalloc memory allocator metrics
|
||||
type TCMallocStats struct {
|
||||
Generic *GenericTCMAllocStats `bson:"generic"`
|
||||
TCMalloc *DetailedTCMallocStats `bson:"tcmalloc"`
|
||||
// tcMallocStats stores information related to TCMalloc memory allocator metrics
|
||||
type tcMallocStats struct {
|
||||
Generic *genericTCMAllocStats `bson:"generic"`
|
||||
TCMalloc *detailedTCMallocStats `bson:"tcmalloc"`
|
||||
}
|
||||
|
||||
// GenericTCMAllocStats stores generic TCMalloc memory allocator metrics
|
||||
type GenericTCMAllocStats struct {
|
||||
// genericTCMAllocStats stores generic TCMalloc memory allocator metrics
|
||||
type genericTCMAllocStats struct {
|
||||
CurrentAllocatedBytes int64 `bson:"current_allocated_bytes"`
|
||||
HeapSize int64 `bson:"heap_size"`
|
||||
}
|
||||
|
||||
// DetailedTCMallocStats stores detailed TCMalloc memory allocator metrics
|
||||
type DetailedTCMallocStats struct {
|
||||
// detailedTCMallocStats stores detailed TCMalloc memory allocator metrics
|
||||
type detailedTCMallocStats struct {
|
||||
PageheapFreeBytes int64 `bson:"pageheap_free_bytes"`
|
||||
PageheapUnmappedBytes int64 `bson:"pageheap_unmapped_bytes"`
|
||||
MaxTotalThreadCacheBytes int64 `bson:"max_total_thread_cache_bytes"`
|
||||
|
|
@ -554,16 +552,15 @@ type DetailedTCMallocStats struct {
|
|||
SpinLockTotalDelayNanos int64 `bson:"spinlock_total_delay_ns"`
|
||||
}
|
||||
|
||||
// StorageStats stores information related to record allocations
|
||||
type StorageStats struct {
|
||||
// storageStats stores information related to record allocations
|
||||
type storageStats struct {
|
||||
FreelistSearchBucketExhausted int64 `bson:"freelist.search.bucketExhausted"`
|
||||
FreelistSearchRequests int64 `bson:"freelist.search.requests"`
|
||||
FreelistSearchScanned int64 `bson:"freelist.search.scanned"`
|
||||
}
|
||||
|
||||
// StatHeader describes a single column for mongostat's terminal output,
|
||||
// its formatting, and in which modes it should be displayed.
|
||||
type StatHeader struct {
|
||||
// statHeader describes a single column for mongostat's terminal output, its formatting, and in which modes it should be displayed.
|
||||
type statHeader struct {
|
||||
// The text to appear in the column's header cell
|
||||
HeaderText string
|
||||
|
||||
|
|
@ -572,7 +569,7 @@ type StatHeader struct {
|
|||
}
|
||||
|
||||
// StatHeaders are the complete set of data metrics supported by mongostat.
|
||||
var StatHeaders = []StatHeader{
|
||||
var StatHeaders = []statHeader{
|
||||
{"", Always}, // placeholder for hostname column (blank header text)
|
||||
{"insert", Always},
|
||||
{"query", Always},
|
||||
|
|
@ -601,17 +598,17 @@ var StatHeaders = []StatHeader{
|
|||
{"time", Always},
|
||||
}
|
||||
|
||||
// NamespacedLocks stores information on the LockStatus of namespaces.
|
||||
type NamespacedLocks map[string]LockStatus
|
||||
// NamespacedLocks stores information on the lockStatus of namespaces.
|
||||
type NamespacedLocks map[string]lockStatus
|
||||
|
||||
// LockUsage stores information related to a namespace's lock usage.
|
||||
type LockUsage struct {
|
||||
// lockUsage stores information related to a namespace's lock usage.
|
||||
type lockUsage struct {
|
||||
Namespace string
|
||||
Reads int64
|
||||
Writes int64
|
||||
}
|
||||
|
||||
type lockUsages []LockUsage
|
||||
type lockUsages []lockUsage
|
||||
|
||||
func percentageInt64(value, outOf int64) float64 {
|
||||
if value == 0 || outOf == 0 {
|
||||
|
|
@ -639,23 +636,23 @@ func (slice lockUsages) Swap(i, j int) {
|
|||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
|
||||
// CollectionLockStatus stores a collection's lock statistics.
|
||||
type CollectionLockStatus struct {
|
||||
// collectionLockStatus stores a collection's lock statistics.
|
||||
type collectionLockStatus struct {
|
||||
ReadAcquireWaitsPercentage float64
|
||||
WriteAcquireWaitsPercentage float64
|
||||
ReadAcquireTimeMicros int64
|
||||
WriteAcquireTimeMicros int64
|
||||
}
|
||||
|
||||
// LockStatus stores a database's lock statistics.
|
||||
type LockStatus struct {
|
||||
// lockStatus stores a database's lock statistics.
|
||||
type lockStatus struct {
|
||||
DBName string
|
||||
Percentage float64
|
||||
Global bool
|
||||
}
|
||||
|
||||
// StatLine is a wrapper for all metrics reported by mongostat for monitored hosts.
|
||||
type StatLine struct {
|
||||
// statLine is a wrapper for all metrics reported by mongostat for monitored hosts.
|
||||
type statLine struct {
|
||||
Key string
|
||||
// What storage engine is being used for the node with this stat line
|
||||
StorageEngine string
|
||||
|
|
@ -667,10 +664,10 @@ type StatLine struct {
|
|||
|
||||
UptimeNanos int64
|
||||
|
||||
// The time at which this StatLine was generated.
|
||||
// The time at which this statLine was generated.
|
||||
Time time.Time
|
||||
|
||||
// The last time at which this StatLine was printed to output.
|
||||
// The last time at which this statLine was printed to output.
|
||||
LastPrinted time.Time
|
||||
|
||||
// Opcounter fields
|
||||
|
|
@ -730,7 +727,7 @@ type StatLine struct {
|
|||
CurrentC, AvailableC, TotalCreatedC int64
|
||||
|
||||
// Collection locks (3.0 mmap only)
|
||||
CollectionLocks *CollectionLockStatus
|
||||
CollectionLocks *collectionLockStatus
|
||||
|
||||
// Cache utilization (wiredtiger only)
|
||||
CacheDirtyPercent float64
|
||||
|
|
@ -770,12 +767,12 @@ type StatLine struct {
|
|||
GetMoreR, GetMoreRCnt int64
|
||||
CommandR, CommandRCnt int64
|
||||
ReplLag int64
|
||||
OplogStats *OplogStats
|
||||
OplogStats *oplogStats
|
||||
Flushes, FlushesCnt int64
|
||||
FlushesTotalTime int64
|
||||
Mapped, Virtual, Resident, NonMapped int64
|
||||
Faults, FaultsCnt int64
|
||||
HighestLocked *LockStatus
|
||||
HighestLocked *lockStatus
|
||||
QueuedReaders, QueuedWriters int64
|
||||
ActiveReaders, ActiveWriters int64
|
||||
AvailableReaders, AvailableWriters int64
|
||||
|
|
@ -809,18 +806,18 @@ type StatLine struct {
|
|||
JumboChunksCount int64
|
||||
|
||||
// DB stats field
|
||||
DbStatsLines []DbStatLine
|
||||
DbStatsLines []dbStatLine
|
||||
|
||||
// Col Stats field
|
||||
ColStatsLines []ColStatLine
|
||||
ColStatsLines []colStatLine
|
||||
|
||||
// Shard stats
|
||||
TotalInUse, TotalAvailable, TotalCreated, TotalRefreshing int64
|
||||
|
||||
// Shard Hosts stats field
|
||||
ShardHostStatsLines map[string]ShardHostStatLine
|
||||
ShardHostStatsLines map[string]shardHostStatLine
|
||||
|
||||
TopStatLines []TopStatLine
|
||||
TopStatLines []topStatLine
|
||||
|
||||
// TCMalloc stats field
|
||||
TCMallocCurrentAllocatedBytes int64
|
||||
|
|
@ -849,7 +846,7 @@ type StatLine struct {
|
|||
StorageFreelistSearchScanned int64
|
||||
}
|
||||
|
||||
type DbStatLine struct {
|
||||
type dbStatLine struct {
|
||||
Name string
|
||||
Collections int64
|
||||
Objects int64
|
||||
|
|
@ -863,7 +860,7 @@ type DbStatLine struct {
|
|||
FsUsedSize int64
|
||||
FsTotalSize int64
|
||||
}
|
||||
type ColStatLine struct {
|
||||
type colStatLine struct {
|
||||
Name string
|
||||
DbName string
|
||||
Count int64
|
||||
|
|
@ -874,14 +871,14 @@ type ColStatLine struct {
|
|||
Ok int64
|
||||
}
|
||||
|
||||
type ShardHostStatLine struct {
|
||||
type shardHostStatLine struct {
|
||||
InUse int64
|
||||
Available int64
|
||||
Created int64
|
||||
Refreshing int64
|
||||
}
|
||||
|
||||
type TopStatLine struct {
|
||||
type topStatLine struct {
|
||||
CollectionName string
|
||||
TotalTime, TotalCount int64
|
||||
ReadLockTime, ReadLockCount int64
|
||||
|
|
@ -894,10 +891,10 @@ type TopStatLine struct {
|
|||
CommandsTime, CommandsCount int64
|
||||
}
|
||||
|
||||
func parseLocks(stat ServerStatus) map[string]LockUsage {
|
||||
returnVal := map[string]LockUsage{}
|
||||
func parseLocks(stat serverStatus) map[string]lockUsage {
|
||||
returnVal := map[string]lockUsage{}
|
||||
for namespace, lockInfo := range stat.Locks {
|
||||
returnVal[namespace] = LockUsage{
|
||||
returnVal[namespace] = lockUsage{
|
||||
namespace,
|
||||
lockInfo.TimeLockedMicros.Read + lockInfo.TimeLockedMicros.ReadLower,
|
||||
lockInfo.TimeLockedMicros.Write + lockInfo.TimeLockedMicros.WriteLower,
|
||||
|
|
@ -906,8 +903,8 @@ func parseLocks(stat ServerStatus) map[string]LockUsage {
|
|||
return returnVal
|
||||
}
|
||||
|
||||
func computeLockDiffs(prevLocks, curLocks map[string]LockUsage) []LockUsage {
|
||||
lockUsages := lockUsages(make([]LockUsage, 0, len(curLocks)))
|
||||
func computeLockDiffs(prevLocks, curLocks map[string]lockUsage) []lockUsage {
|
||||
lockUsages := lockUsages(make([]lockUsage, 0, len(curLocks)))
|
||||
for namespace, curUsage := range curLocks {
|
||||
prevUsage, hasKey := prevLocks[namespace]
|
||||
if !hasKey {
|
||||
|
|
@ -917,7 +914,7 @@ func computeLockDiffs(prevLocks, curLocks map[string]LockUsage) []LockUsage {
|
|||
}
|
||||
// Calculate diff of lock usage for this namespace and add to the list
|
||||
lockUsages = append(lockUsages,
|
||||
LockUsage{
|
||||
lockUsage{
|
||||
namespace,
|
||||
curUsage.Reads - prevUsage.Reads,
|
||||
curUsage.Writes - prevUsage.Writes,
|
||||
|
|
@ -936,12 +933,12 @@ func diff(newVal, oldVal, sampleTime int64) (avg int64, newValue int64) {
|
|||
return d / sampleTime, newVal
|
||||
}
|
||||
|
||||
// NewStatLine constructs a StatLine object from two MongoStatus objects.
|
||||
func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSecs int64) *StatLine {
|
||||
// NewStatLine constructs a statLine object from two mongoStatus objects.
|
||||
func NewStatLine(oldMongo, newMongo mongoStatus, key string, all bool, sampleSecs int64) *statLine {
|
||||
oldStat := *oldMongo.ServerStatus
|
||||
newStat := *newMongo.ServerStatus
|
||||
|
||||
returnVal := &StatLine{
|
||||
returnVal := &statLine{
|
||||
Key: key,
|
||||
Host: newStat.Host,
|
||||
Version: newStat.Version,
|
||||
|
|
@ -1240,7 +1237,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
|||
writeTotalCountDiff := newStat.Locks["Collection"].AcquireCount.Write - oldStat.Locks["Collection"].AcquireCount.Write
|
||||
readAcquireTimeDiff := newStat.Locks["Collection"].TimeAcquiringMicros.Read - oldStat.Locks["Collection"].TimeAcquiringMicros.Read
|
||||
writeAcquireTimeDiff := newStat.Locks["Collection"].TimeAcquiringMicros.Write - oldStat.Locks["Collection"].TimeAcquiringMicros.Write
|
||||
returnVal.CollectionLocks = &CollectionLockStatus{
|
||||
returnVal.CollectionLocks = &collectionLockStatus{
|
||||
ReadAcquireWaitsPercentage: percentageInt64(readWaitCountDiff, readTotalCountDiff),
|
||||
WriteAcquireWaitsPercentage: percentageInt64(writeWaitCountDiff, writeTotalCountDiff),
|
||||
ReadAcquireTimeMicros: averageInt64(readAcquireTimeDiff, readWaitCountDiff),
|
||||
|
|
@ -1253,7 +1250,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
|||
lockdiffs := computeLockDiffs(prevLocks, curLocks)
|
||||
if len(lockdiffs) == 0 {
|
||||
if newStat.GlobalLock != nil {
|
||||
returnVal.HighestLocked = &LockStatus{
|
||||
returnVal.HighestLocked = &lockStatus{
|
||||
DBName: "",
|
||||
Percentage: percentageInt64(newStat.GlobalLock.LockTime, newStat.GlobalLock.TotalTime),
|
||||
Global: true,
|
||||
|
|
@ -1279,7 +1276,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
|||
// divide by 1000 so that they units match
|
||||
lockToReport /= 1000
|
||||
|
||||
returnVal.HighestLocked = &LockStatus{
|
||||
returnVal.HighestLocked = &lockStatus{
|
||||
DBName: highestLocked.Namespace,
|
||||
Percentage: percentageInt64(lockToReport, timeDiffMillis),
|
||||
Global: false,
|
||||
|
|
@ -1339,8 +1336,8 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
|||
if newReplStat.Members != nil {
|
||||
myName := newStat.Repl.Me
|
||||
// Find the master and myself
|
||||
master := ReplSetMember{}
|
||||
me := ReplSetMember{}
|
||||
master := replSetMember{}
|
||||
me := replSetMember{}
|
||||
for _, member := range newReplStat.Members {
|
||||
if member.Name == myName {
|
||||
// Store my state string
|
||||
|
|
@ -1413,7 +1410,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
|||
if dbStatsData.Db == "" {
|
||||
dbStatsData.Db = db.Name
|
||||
}
|
||||
dbStatLine := &DbStatLine{
|
||||
dbStatLine := &dbStatLine{
|
||||
Name: dbStatsData.Db,
|
||||
Collections: dbStatsData.Collections,
|
||||
Objects: dbStatsData.Objects,
|
||||
|
|
@ -1438,7 +1435,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
|||
if colStatsData.Collection == "" {
|
||||
colStatsData.Collection = col.Name
|
||||
}
|
||||
colStatLine := &ColStatLine{
|
||||
colStatLine := &colStatLine{
|
||||
Name: colStatsData.Collection,
|
||||
DbName: col.DbName,
|
||||
Count: colStatsData.Count,
|
||||
|
|
@ -1459,9 +1456,9 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
|||
returnVal.TotalAvailable = newShardStats.TotalAvailable
|
||||
returnVal.TotalCreated = newShardStats.TotalCreated
|
||||
returnVal.TotalRefreshing = newShardStats.TotalRefreshing
|
||||
returnVal.ShardHostStatsLines = map[string]ShardHostStatLine{}
|
||||
returnVal.ShardHostStatsLines = map[string]shardHostStatLine{}
|
||||
for host, stats := range newShardStats.Hosts {
|
||||
shardStatLine := &ShardHostStatLine{
|
||||
shardStatLine := &shardHostStatLine{
|
||||
InUse: stats.InUse,
|
||||
Available: stats.Available,
|
||||
Created: stats.Created,
|
||||
|
|
@ -1474,7 +1471,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
|||
|
||||
if newMongo.TopStats != nil {
|
||||
for collection, data := range newMongo.TopStats.Totals {
|
||||
topStatDataLine := &TopStatLine{
|
||||
topStatDataLine := &topStatLine{
|
||||
CollectionName: collection,
|
||||
TotalTime: data.Total.Time,
|
||||
TotalCount: data.Total.Count,
|
||||
|
|
|
|||
|
|
@ -8,10 +8,10 @@ import (
|
|||
|
||||
func TestLatencyStats(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
|
|
@ -21,10 +21,10 @@ func TestLatencyStats(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
|
|
@ -32,16 +32,16 @@ func TestLatencyStats(t *testing.T) {
|
|||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &OpLatenciesStats{
|
||||
Reads: &LatencyStats{
|
||||
OpLatencies: &opLatenciesStats{
|
||||
Reads: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Writes: &LatencyStats{
|
||||
Writes: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Commands: &LatencyStats{
|
||||
Commands: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
|
|
@ -63,10 +63,10 @@ func TestLatencyStats(t *testing.T) {
|
|||
|
||||
func TestLatencyStatsDiffZero(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
|
|
@ -74,26 +74,26 @@ func TestLatencyStatsDiffZero(t *testing.T) {
|
|||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &OpLatenciesStats{
|
||||
Reads: &LatencyStats{
|
||||
OpLatencies: &opLatenciesStats{
|
||||
Reads: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Writes: &LatencyStats{
|
||||
Writes: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Commands: &LatencyStats{
|
||||
Commands: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
|
|
@ -101,16 +101,16 @@ func TestLatencyStatsDiffZero(t *testing.T) {
|
|||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &OpLatenciesStats{
|
||||
Reads: &LatencyStats{
|
||||
OpLatencies: &opLatenciesStats{
|
||||
Reads: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Writes: &LatencyStats{
|
||||
Writes: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
Commands: &LatencyStats{
|
||||
Commands: &latencyStats{
|
||||
Ops: 0,
|
||||
Latency: 0,
|
||||
},
|
||||
|
|
@ -132,10 +132,10 @@ func TestLatencyStatsDiffZero(t *testing.T) {
|
|||
|
||||
func TestLatencyStatsDiff(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
|
|
@ -143,26 +143,26 @@ func TestLatencyStatsDiff(t *testing.T) {
|
|||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &OpLatenciesStats{
|
||||
Reads: &LatencyStats{
|
||||
OpLatencies: &opLatenciesStats{
|
||||
Reads: &latencyStats{
|
||||
Ops: 4189041956,
|
||||
Latency: 2255922322753,
|
||||
},
|
||||
Writes: &LatencyStats{
|
||||
Writes: &latencyStats{
|
||||
Ops: 1691019457,
|
||||
Latency: 494478256915,
|
||||
},
|
||||
Commands: &LatencyStats{
|
||||
Commands: &latencyStats{
|
||||
Ops: 1019150402,
|
||||
Latency: 59177710371,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Bits: 0,
|
||||
Resident: 0,
|
||||
Virtual: 0,
|
||||
|
|
@ -170,16 +170,16 @@ func TestLatencyStatsDiff(t *testing.T) {
|
|||
Mapped: 0,
|
||||
MappedWithJournal: 0,
|
||||
},
|
||||
OpLatencies: &OpLatenciesStats{
|
||||
Reads: &LatencyStats{
|
||||
OpLatencies: &opLatenciesStats{
|
||||
Reads: &latencyStats{
|
||||
Ops: 4189049884,
|
||||
Latency: 2255946760057,
|
||||
},
|
||||
Writes: &LatencyStats{
|
||||
Writes: &latencyStats{
|
||||
Ops: 1691021287,
|
||||
Latency: 494479456987,
|
||||
},
|
||||
Commands: &LatencyStats{
|
||||
Commands: &latencyStats{
|
||||
Ops: 1019152861,
|
||||
Latency: 59177981552,
|
||||
},
|
||||
|
|
@ -201,23 +201,23 @@ func TestLatencyStatsDiff(t *testing.T) {
|
|||
|
||||
func TestLocksStatsNilWhenLocksMissingInOldStat(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -232,24 +232,24 @@ func TestLocksStatsNilWhenLocksMissingInOldStat(t *testing.T) {
|
|||
|
||||
func TestLocksStatsNilWhenGlobalLockStatsMissingInOldStat(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{},
|
||||
Locks: map[string]lockStats{},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -264,26 +264,26 @@ func TestLocksStatsNilWhenGlobalLockStatsMissingInOldStat(t *testing.T) {
|
|||
|
||||
func TestLocksStatsNilWhenGlobalLockStatsEmptyInOldStat(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -298,28 +298,28 @@ func TestLocksStatsNilWhenGlobalLockStatsEmptyInOldStat(t *testing.T) {
|
|||
|
||||
func TestLocksStatsNilWhenCollectionLockStatsMissingInOldStat(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -334,29 +334,29 @@ func TestLocksStatsNilWhenCollectionLockStatsMissingInOldStat(t *testing.T) {
|
|||
|
||||
func TestLocksStatsNilWhenCollectionLockStatsEmptyInOldStat(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
"Collection": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -371,23 +371,23 @@ func TestLocksStatsNilWhenCollectionLockStatsEmptyInOldStat(t *testing.T) {
|
|||
|
||||
func TestLocksStatsNilWhenLocksMissingInNewStat(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
},
|
||||
|
|
@ -402,26 +402,26 @@ func TestLocksStatsNilWhenLocksMissingInNewStat(t *testing.T) {
|
|||
|
||||
func TestLocksStatsNilWhenGlobalLockStatsMissingInNewStat(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{},
|
||||
Locks: map[string]lockStats{},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
|
|
@ -434,26 +434,26 @@ func TestLocksStatsNilWhenGlobalLockStatsMissingInNewStat(t *testing.T) {
|
|||
|
||||
func TestLocksStatsNilWhenGlobalLockStatsEmptyInNewStat(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {},
|
||||
},
|
||||
},
|
||||
|
|
@ -468,28 +468,28 @@ func TestLocksStatsNilWhenGlobalLockStatsEmptyInNewStat(t *testing.T) {
|
|||
|
||||
func TestLocksStatsNilWhenCollectionLockStatsMissingInNewStat(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -504,28 +504,28 @@ func TestLocksStatsNilWhenCollectionLockStatsMissingInNewStat(t *testing.T) {
|
|||
|
||||
func TestLocksStatsNilWhenCollectionLockStatsEmptyInNewStat(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
"Collection": {},
|
||||
},
|
||||
|
|
@ -541,26 +541,26 @@ func TestLocksStatsNilWhenCollectionLockStatsEmptyInNewStat(t *testing.T) {
|
|||
|
||||
func TestLocksStatsPopulated(t *testing.T) {
|
||||
sl := NewStatLine(
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
"Collection": {
|
||||
AcquireWaitCount: &ReadWriteLockTimes{
|
||||
AcquireWaitCount: &readWriteLockTimes{
|
||||
Read: 1,
|
||||
Write: 2,
|
||||
},
|
||||
AcquireCount: &ReadWriteLockTimes{
|
||||
AcquireCount: &readWriteLockTimes{
|
||||
Read: 5,
|
||||
Write: 10,
|
||||
},
|
||||
TimeAcquiringMicros: ReadWriteLockTimes{
|
||||
TimeAcquiringMicros: readWriteLockTimes{
|
||||
Read: 100,
|
||||
Write: 200,
|
||||
},
|
||||
|
|
@ -568,26 +568,26 @@ func TestLocksStatsPopulated(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
MongoStatus{
|
||||
ServerStatus: &ServerStatus{
|
||||
Connections: &ConnectionStats{},
|
||||
Mem: &MemStats{
|
||||
mongoStatus{
|
||||
ServerStatus: &serverStatus{
|
||||
Connections: &connectionStats{},
|
||||
Mem: &memStats{
|
||||
Supported: false,
|
||||
},
|
||||
Locks: map[string]LockStats{
|
||||
Locks: map[string]lockStats{
|
||||
"Global": {
|
||||
AcquireCount: &ReadWriteLockTimes{},
|
||||
AcquireCount: &readWriteLockTimes{},
|
||||
},
|
||||
"Collection": {
|
||||
AcquireWaitCount: &ReadWriteLockTimes{
|
||||
AcquireWaitCount: &readWriteLockTimes{
|
||||
Read: 2,
|
||||
Write: 4,
|
||||
},
|
||||
AcquireCount: &ReadWriteLockTimes{
|
||||
AcquireCount: &readWriteLockTimes{
|
||||
Read: 10,
|
||||
Write: 30,
|
||||
},
|
||||
TimeAcquiringMicros: ReadWriteLockTimes{
|
||||
TimeAcquiringMicros: readWriteLockTimes{
|
||||
Read: 250,
|
||||
Write: 310,
|
||||
},
|
||||
|
|
@ -600,7 +600,7 @@ func TestLocksStatsPopulated(t *testing.T) {
|
|||
60,
|
||||
)
|
||||
|
||||
expected := &CollectionLockStatus{
|
||||
expected := &collectionLockStatus{
|
||||
ReadAcquireWaitsPercentage: 20,
|
||||
WriteAcquireWaitsPercentage: 10,
|
||||
ReadAcquireTimeMicros: 150,
|
||||
|
|
|
|||
|
|
@ -25,21 +25,21 @@ const (
|
|||
file = "2"
|
||||
process = "3"
|
||||
remoteHost = "4"
|
||||
system = "5"
|
||||
sstm = "5"
|
||||
fifo = "6"
|
||||
program = "7"
|
||||
prgrm = "7"
|
||||
network = "8"
|
||||
)
|
||||
|
||||
var pendingActions = []string{"ignore", "alert", "restart", "stop", "exec", "unmonitor", "start", "monitor"}
|
||||
|
||||
type Status struct {
|
||||
Server Server `xml:"server"`
|
||||
Platform Platform `xml:"platform"`
|
||||
Services []Service `xml:"service"`
|
||||
type status struct {
|
||||
Server server `xml:"server"`
|
||||
Platform platform `xml:"platform"`
|
||||
Services []service `xml:"service"`
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
type server struct {
|
||||
ID string `xml:"id"`
|
||||
Version string `xml:"version"`
|
||||
Uptime int64 `xml:"uptime"`
|
||||
|
|
@ -49,7 +49,7 @@ type Server struct {
|
|||
ControlFile string `xml:"controlfile"`
|
||||
}
|
||||
|
||||
type Platform struct {
|
||||
type platform struct {
|
||||
Name string `xml:"name"`
|
||||
Release string `xml:"release"`
|
||||
Version string `xml:"version"`
|
||||
|
|
@ -59,38 +59,38 @@ type Platform struct {
|
|||
Swap int `xml:"swap"`
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
type service struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Name string `xml:"name"`
|
||||
Status int `xml:"status"`
|
||||
MonitoringStatus int `xml:"monitor"`
|
||||
MonitorMode int `xml:"monitormode"`
|
||||
PendingAction int `xml:"pendingaction"`
|
||||
Memory Memory `xml:"memory"`
|
||||
CPU CPU `xml:"cpu"`
|
||||
System System `xml:"system"`
|
||||
Memory memory `xml:"memory"`
|
||||
CPU cpu `xml:"cpu"`
|
||||
System system `xml:"system"`
|
||||
Size int64 `xml:"size"`
|
||||
Mode int `xml:"mode"`
|
||||
Program Program `xml:"program"`
|
||||
Block Block `xml:"block"`
|
||||
Inode Inode `xml:"inode"`
|
||||
Program program `xml:"program"`
|
||||
Block block `xml:"block"`
|
||||
Inode inode `xml:"inode"`
|
||||
Pid int64 `xml:"pid"`
|
||||
ParentPid int64 `xml:"ppid"`
|
||||
Threads int `xml:"threads"`
|
||||
Children int `xml:"children"`
|
||||
Port Port `xml:"port"`
|
||||
Link Link `xml:"link"`
|
||||
Port port `xml:"port"`
|
||||
Link link `xml:"link"`
|
||||
}
|
||||
|
||||
type Link struct {
|
||||
type link struct {
|
||||
State int `xml:"state"`
|
||||
Speed int64 `xml:"speed"`
|
||||
Duplex int `xml:"duplex"`
|
||||
Download Download `xml:"download"`
|
||||
Upload Upload `xml:"upload"`
|
||||
Download download `xml:"download"`
|
||||
Upload upload `xml:"upload"`
|
||||
}
|
||||
|
||||
type Download struct {
|
||||
type download struct {
|
||||
Packets struct {
|
||||
Now int64 `xml:"now"`
|
||||
Total int64 `xml:"total"`
|
||||
|
|
@ -105,7 +105,7 @@ type Download struct {
|
|||
} `xml:"errors"`
|
||||
}
|
||||
|
||||
type Upload struct {
|
||||
type upload struct {
|
||||
Packets struct {
|
||||
Now int64 `xml:"now"`
|
||||
Total int64 `xml:"total"`
|
||||
|
|
@ -120,7 +120,7 @@ type Upload struct {
|
|||
} `xml:"errors"`
|
||||
}
|
||||
|
||||
type Port struct {
|
||||
type port struct {
|
||||
Hostname string `xml:"hostname"`
|
||||
PortNumber int64 `xml:"portnumber"`
|
||||
Request string `xml:"request"`
|
||||
|
|
@ -129,36 +129,36 @@ type Port struct {
|
|||
Type string `xml:"type"`
|
||||
}
|
||||
|
||||
type Block struct {
|
||||
type block struct {
|
||||
Percent float64 `xml:"percent"`
|
||||
Usage float64 `xml:"usage"`
|
||||
Total float64 `xml:"total"`
|
||||
}
|
||||
|
||||
type Inode struct {
|
||||
type inode struct {
|
||||
Percent float64 `xml:"percent"`
|
||||
Usage float64 `xml:"usage"`
|
||||
Total float64 `xml:"total"`
|
||||
}
|
||||
|
||||
type Program struct {
|
||||
type program struct {
|
||||
Started int64 `xml:"started"`
|
||||
Status int `xml:"status"`
|
||||
}
|
||||
|
||||
type Memory struct {
|
||||
type memory struct {
|
||||
Percent float64 `xml:"percent"`
|
||||
PercentTotal float64 `xml:"percenttotal"`
|
||||
Kilobyte int64 `xml:"kilobyte"`
|
||||
KilobyteTotal int64 `xml:"kilobytetotal"`
|
||||
}
|
||||
|
||||
type CPU struct {
|
||||
type cpu struct {
|
||||
Percent float64 `xml:"percent"`
|
||||
PercentTotal float64 `xml:"percenttotal"`
|
||||
}
|
||||
|
||||
type System struct {
|
||||
type system struct {
|
||||
Load struct {
|
||||
Avg01 float64 `xml:"avg01"`
|
||||
Avg05 float64 `xml:"avg05"`
|
||||
|
|
@ -188,10 +188,6 @@ type Monit struct {
|
|||
Timeout config.Duration `toml:"timeout"`
|
||||
}
|
||||
|
||||
type Messagebody struct {
|
||||
Metrics []string `json:"metrics"`
|
||||
}
|
||||
|
||||
func (*Monit) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
|
@ -231,7 +227,7 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error {
|
|||
return fmt.Errorf("received status code %d (%s), expected 200", resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
|
||||
var status Status
|
||||
var status status
|
||||
decoder := xml.NewDecoder(resp.Body)
|
||||
decoder.CharsetReader = charset.NewReaderLabel
|
||||
if err := decoder.Decode(&status); err != nil {
|
||||
|
|
@ -291,7 +287,7 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error {
|
|||
fields["protocol"] = service.Port.Protocol
|
||||
fields["type"] = service.Port.Type
|
||||
acc.AddFields("monit_remote_host", fields, tags)
|
||||
} else if service.Type == system {
|
||||
} else if service.Type == sstm {
|
||||
fields["cpu_system"] = service.System.CPU.System
|
||||
fields["cpu_user"] = service.System.CPU.User
|
||||
fields["cpu_wait"] = service.System.CPU.Wait
|
||||
|
|
@ -306,7 +302,7 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error {
|
|||
} else if service.Type == fifo {
|
||||
fields["mode"] = service.Mode
|
||||
acc.AddFields("monit_fifo", fields, tags)
|
||||
} else if service.Type == program {
|
||||
} else if service.Type == prgrm {
|
||||
fields["program_started"] = service.Program.Started * 10000000
|
||||
fields["program_status"] = service.Program.Status
|
||||
acc.AddFields("monit_program", fields, tags)
|
||||
|
|
@ -333,7 +329,7 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func linkMode(s Service) string {
|
||||
func linkMode(s service) string {
|
||||
if s.Link.Duplex == 1 {
|
||||
return "duplex"
|
||||
} else if s.Link.Duplex == 0 {
|
||||
|
|
@ -342,14 +338,14 @@ func linkMode(s Service) string {
|
|||
return "unknown"
|
||||
}
|
||||
|
||||
func serviceStatus(s Service) string {
|
||||
func serviceStatus(s service) string {
|
||||
if s.Status == 0 {
|
||||
return "running"
|
||||
}
|
||||
return "failure"
|
||||
}
|
||||
|
||||
func pendingAction(s Service) string {
|
||||
func pendingAction(s service) string {
|
||||
if s.PendingAction > 0 {
|
||||
if s.PendingAction >= len(pendingActions) {
|
||||
return "unknown"
|
||||
|
|
@ -359,7 +355,7 @@ func pendingAction(s Service) string {
|
|||
return "none"
|
||||
}
|
||||
|
||||
func monitoringMode(s Service) string {
|
||||
func monitoringMode(s service) string {
|
||||
switch s.MonitorMode {
|
||||
case 0:
|
||||
return "active"
|
||||
|
|
@ -369,7 +365,7 @@ func monitoringMode(s Service) string {
|
|||
return "unknown"
|
||||
}
|
||||
|
||||
func monitoringStatus(s Service) string {
|
||||
func monitoringStatus(s service) string {
|
||||
switch s.MonitoringStatus {
|
||||
case 1:
|
||||
return "monitored"
|
||||
|
|
|
|||
Loading…
Reference in New Issue