docs: remove trailing whitespace (#12660)
This commit is contained in:
parent
200044a495
commit
fa26b6498b
|
|
@ -144,7 +144,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
## Allow metrics without discovery data, if discovery is enabled.
|
||||
## If set to true, then metric without discovery data would be emitted, otherwise dropped.
|
||||
## This cane be of help, in case debugging dimension filters, or partial coverage of
|
||||
## discovery scope vs monitoring scope
|
||||
## discovery scope vs monitoring scope
|
||||
# allow_dps_without_discovery = false
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -107,5 +107,5 @@
|
|||
## Allow metrics without discovery data, if discovery is enabled.
|
||||
## If set to true, then metric without discovery data would be emitted, otherwise dropped.
|
||||
## This cane be of help, in case debugging dimension filters, or partial coverage of
|
||||
## discovery scope vs monitoring scope
|
||||
## discovery scope vs monitoring scope
|
||||
# allow_dps_without_discovery = false
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
|
||||
## Include the delete field in every telemetry message.
|
||||
# include_delete_field = false
|
||||
|
||||
|
||||
## Define aliases to map telemetry encoding paths to simple measurement names
|
||||
[inputs.cisco_telemetry_mdt.aliases]
|
||||
ifstats = "ietf-interfaces:interfaces-state/interface/statistics"
|
||||
|
|
@ -76,7 +76,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
## removed, and adding subsequent subscriptions does not keep a stable session.
|
||||
# permit_keepalive_without_calls = false
|
||||
|
||||
## GRPC minimum timeout between successive pings, decreasing this value may
|
||||
## GRPC minimum timeout between successive pings, decreasing this value may
|
||||
## help if this plugin is closing connections with ENHANCE_YOUR_CALM (too_many_pings).
|
||||
# keepalive_minimum_time = "5m"
|
||||
```
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
## Include the delete field in every telemetry message.
|
||||
# include_delete_field = false
|
||||
|
||||
|
||||
## Define aliases to map telemetry encoding paths to simple measurement names
|
||||
[inputs.cisco_telemetry_mdt.aliases]
|
||||
ifstats = "ietf-interfaces:interfaces-state/interface/statistics"
|
||||
|
|
@ -50,6 +50,6 @@
|
|||
## removed, and adding subsequent subscriptions does not keep a stable session.
|
||||
# permit_keepalive_without_calls = false
|
||||
|
||||
## GRPC minimum timeout between successive pings, decreasing this value may
|
||||
## GRPC minimum timeout between successive pings, decreasing this value may
|
||||
## help if this plugin is closing connections with ENHANCE_YOUR_CALM (too_many_pings).
|
||||
# keepalive_minimum_time = "5m"
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ const sampleJSON = `
|
|||
"buffer_stage_byte_size": 0,
|
||||
"buffer_queue_byte_size": 0,
|
||||
"buffer_available_buffer_space_ratios": 0
|
||||
},
|
||||
},
|
||||
{
|
||||
"plugin_id": "object:output_td_2",
|
||||
"plugin_category": "output",
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
## Read more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
|
||||
## Optional. Filepath for GCP credentials JSON file to authorize calls to
|
||||
## Google Cloud Storage APIs. If not set explicitly, Telegraf will attempt to use
|
||||
## Application Default Credentials, which is preferred.
|
||||
|
|
@ -74,8 +74,8 @@ The example output
|
|||
2022-09-17T17:52:19Z I! Starting Telegraf 1.25.0-a93ec9a0
|
||||
2022-09-17T17:52:19Z I! Available plugins: 209 inputs, 9 aggregators, 26 processors, 20 parsers, 57 outputs
|
||||
2022-09-17T17:52:19Z I! Loaded inputs: google_cloud_storage
|
||||
2022-09-17T17:52:19Z I! Loaded aggregators:
|
||||
2022-09-17T17:52:19Z I! Loaded processors:
|
||||
2022-09-17T17:52:19Z I! Loaded aggregators:
|
||||
2022-09-17T17:52:19Z I! Loaded processors:
|
||||
2022-09-17T17:52:19Z I! Loaded outputs: influxdb
|
||||
2022-09-17T17:52:19Z I! Tags enabled: host=user-N9RXNWKWY3
|
||||
2022-09-17T17:52:19Z I! [agent] Config: Interval:10s, Quiet:false, Hostname:"user-N9RXNWKWY3", Flush Interval:10s
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@
|
|||
## Read more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
|
||||
## Optional. Filepath for GCP credentials JSON file to authorize calls to
|
||||
## Google Cloud Storage APIs. If not set explicitly, Telegraf will attempt to use
|
||||
## Application Default Credentials, which is preferred.
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ func TestIptables_Gather(t *testing.T) {
|
|||
`Chain FORWARD (policy ACCEPT 58 packets, 5096 bytes)
|
||||
pkts bytes target prot opt in out source destination
|
||||
300 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* bar */
|
||||
400 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
|
||||
400 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0
|
||||
500 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */
|
||||
`,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@
|
|||
name = "FSNamesystemState"
|
||||
mbean = "Hadoop:name=FSNamesystemState,service=NameNode"
|
||||
paths = ["VolumeFailuresTotal", "UnderReplicatedBlocks", "BlocksTotal"]
|
||||
|
||||
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "OperatingSystem"
|
||||
mbean = "java.lang:type=OperatingSystem"
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
|
||||
[[inputs.jolokia2_agent]]
|
||||
name_prefix = "kafka_"
|
||||
|
||||
|
||||
## If you intend to use "non_negative_derivative(1s)" with "*.count" fields, you don't need precalculated fields.
|
||||
# fielddrop = [
|
||||
# "*.EventType",
|
||||
|
|
@ -22,7 +22,7 @@
|
|||
# "*.Max",
|
||||
# "*.StdDev"
|
||||
# ]
|
||||
|
||||
|
||||
## jolokia_agent_url tag is not needed if you have only one instance of Kafka on the server.
|
||||
# tagexclude = ["jolokia_agent_url"]
|
||||
|
||||
|
|
@ -91,7 +91,7 @@
|
|||
# [[processors.rename.replace]]
|
||||
# tag = "jolokia_agent_url"
|
||||
# dest = "instance"
|
||||
#
|
||||
#
|
||||
# [[processors.regex]]
|
||||
# namepass = ["kafka_*"]
|
||||
# order = 2
|
||||
|
|
|
|||
|
|
@ -7,13 +7,13 @@ const logstash7PipelinesJSON = `
|
|||
"http_address" : "127.0.0.1:9600",
|
||||
"id" : "28580380-ad2c-4032-934b-76359125edca",
|
||||
"name" : "HOST01.local",
|
||||
"ephemeral_id" : "bd95ff6b-3fa8-42ae-be32-098a4e4ea1ec",
|
||||
"status" : "green",
|
||||
"snapshot" : true,
|
||||
"pipeline" : {
|
||||
"workers" : 8,
|
||||
"batch_size" : 125,
|
||||
"batch_delay" : 50
|
||||
"ephemeral_id" : "bd95ff6b-3fa8-42ae-be32-098a4e4ea1ec",
|
||||
"status" : "green",
|
||||
"snapshot" : true,
|
||||
"pipeline" : {
|
||||
"workers" : 8,
|
||||
"batch_size" : 125,
|
||||
"batch_delay" : 50
|
||||
},
|
||||
"pipelines" : {
|
||||
"infra" : {
|
||||
|
|
@ -31,22 +31,22 @@ const logstash7PipelinesJSON = `
|
|||
"out" : 2665549,
|
||||
"queue_push_duration_in_millis" : 13300
|
||||
},
|
||||
"peak_connections" : 1,
|
||||
"name" : "beats",
|
||||
"current_connections" : 1
|
||||
"peak_connections" : 1,
|
||||
"name" : "beats",
|
||||
"current_connections" : 1
|
||||
} ],
|
||||
"codecs" : [ {
|
||||
"id" : "plain_7312c097-1e7f-41db-983b-4f5a87a9eba2",
|
||||
"encode" : {
|
||||
"duration_in_millis" : 0,
|
||||
"writes_in" : 0
|
||||
},
|
||||
"name" : "plain",
|
||||
"decode" : {
|
||||
"out" : 0,
|
||||
"duration_in_millis" : 0,
|
||||
"writes_in" : 0
|
||||
}
|
||||
"codecs" : [ {
|
||||
"id" : "plain_7312c097-1e7f-41db-983b-4f5a87a9eba2",
|
||||
"encode" : {
|
||||
"duration_in_millis" : 0,
|
||||
"writes_in" : 0
|
||||
},
|
||||
"name" : "plain",
|
||||
"decode" : {
|
||||
"out" : 0,
|
||||
"duration_in_millis" : 0,
|
||||
"writes_in" : 0
|
||||
}
|
||||
}, {
|
||||
"id" : "rubydebug_e958e3dc-10f6-4dd6-b7c5-ae3de2892afb",
|
||||
"encode" : {
|
||||
|
|
@ -80,7 +80,7 @@ const logstash7PipelinesJSON = `
|
|||
"duration_in_millis" : 8648
|
||||
},
|
||||
"name" : "date",
|
||||
"matches" : 2665549
|
||||
"matches" : 2665549
|
||||
}, {
|
||||
"id" : "bec0c77b3f53a78c7878449c72ec59f97be31c1f12f9621f61ed2d4563bad869",
|
||||
"events" : {
|
||||
|
|
@ -133,8 +133,8 @@ const logstash7PipelinesJSON = `
|
|||
"queue_size_in_bytes" : 32028566,
|
||||
"max_queue_size_in_bytes" : 4294967296
|
||||
},
|
||||
"hash" : "5bc589ae4b02cb3e436626429b50928b9d99360639c84dc7fc69268ac01a9fd0",
|
||||
"ephemeral_id" : "4bcacefa-6cbf-461e-b14e-184edd9ebdf3"
|
||||
"hash" : "5bc589ae4b02cb3e436626429b50928b9d99360639c84dc7fc69268ac01a9fd0",
|
||||
"ephemeral_id" : "4bcacefa-6cbf-461e-b14e-184edd9ebdf3"
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
|
|
|||
|
|
@ -30,9 +30,9 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
|
||||
## jetstream subjects
|
||||
## jetstream is a streaming technology inside of nats.
|
||||
## With jetstream the nats-server persists messages and
|
||||
## With jetstream the nats-server persists messages and
|
||||
## a consumer can consume historical messages. This is
|
||||
## useful when telegraf needs to restart it don't miss a
|
||||
## useful when telegraf needs to restart it don't miss a
|
||||
## message. You need to configure the nats-server.
|
||||
## https://docs.nats.io/nats-concepts/jetstream.
|
||||
jetstream_subjects = ["js_telegraf"]
|
||||
|
|
|
|||
|
|
@ -10,9 +10,9 @@
|
|||
|
||||
## jetstream subjects
|
||||
## jetstream is a streaming technology inside of nats.
|
||||
## With jetstream the nats-server persists messages and
|
||||
## With jetstream the nats-server persists messages and
|
||||
## a consumer can consume historical messages. This is
|
||||
## useful when telegraf needs to restart it don't miss a
|
||||
## useful when telegraf needs to restart it don't miss a
|
||||
## message. You need to configure the nats-server.
|
||||
## https://docs.nats.io/nats-concepts/jetstream.
|
||||
jetstream_subjects = ["js_telegraf"]
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
# Pulls statistics from nvidia GPUs attached to the host
|
||||
[[inputs.nvidia_smi]]
|
||||
## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi"
|
||||
## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value),
|
||||
## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value),
|
||||
## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned
|
||||
# bin_path = "/usr/bin/nvidia-smi"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Pulls statistics from nvidia GPUs attached to the host
|
||||
[[inputs.nvidia_smi]]
|
||||
## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi"
|
||||
## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value),
|
||||
## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value),
|
||||
## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned
|
||||
# bin_path = "/usr/bin/nvidia-smi"
|
||||
|
||||
|
|
|
|||
|
|
@ -200,7 +200,7 @@ This example group configuration has three groups with two nodes each:
|
|||
name = "name"
|
||||
identifier = "1002"
|
||||
default_tags = {node1_tag = "val3"}
|
||||
|
||||
|
||||
# Group 2
|
||||
[[inputs.opcua.group]]
|
||||
name = "group2_metric_name"
|
||||
|
|
@ -214,7 +214,7 @@ This example group configuration has three groups with two nodes each:
|
|||
[[inputs.opcua.group.nodes]]
|
||||
name = "sin"
|
||||
identifier = "1004"
|
||||
|
||||
|
||||
# Group 3
|
||||
[[inputs.opcua.group]]
|
||||
name = "group3_metric_name"
|
||||
|
|
|
|||
|
|
@ -208,7 +208,7 @@ This example group configuration has three groups with two nodes each:
|
|||
name = "name"
|
||||
identifier = "1002"
|
||||
default_tags = {node1_tag = "val3"}
|
||||
|
||||
|
||||
# Group 2
|
||||
[[inputs.opcua_listener.group]]
|
||||
name = "group2_metric_name"
|
||||
|
|
@ -222,7 +222,7 @@ This example group configuration has three groups with two nodes each:
|
|||
[[inputs.opcua.group.nodes]]
|
||||
name = "sin"
|
||||
identifier = "1004"
|
||||
|
||||
|
||||
# Group 3
|
||||
[[inputs.opcua_listener.group]]
|
||||
name = "group3_metric_name"
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
## Define the endpoint of P4Runtime gRPC server to collect metrics.
|
||||
# endpoint = "127.0.0.1:9559"
|
||||
## Set DeviceID required for Client Arbitration.
|
||||
## https://p4.org/p4-spec/p4runtime/main/P4Runtime-Spec.html#sec-client-arbitration-and-controller-replication
|
||||
## https://p4.org/p4-spec/p4runtime/main/P4Runtime-Spec.html#sec-client-arbitration-and-controller-replication
|
||||
# device_id = 1
|
||||
## Filter counters by their names that should be observed.
|
||||
## Example: counter_names_include=["ingressCounter", "egressCounter"]
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
## Define the endpoint of P4Runtime gRPC server to collect metrics.
|
||||
# endpoint = "127.0.0.1:9559"
|
||||
## Set DeviceID required for Client Arbitration.
|
||||
## https://p4.org/p4-spec/p4runtime/main/P4Runtime-Spec.html#sec-client-arbitration-and-controller-replication
|
||||
## https://p4.org/p4-spec/p4runtime/main/P4Runtime-Spec.html#sec-client-arbitration-and-controller-replication
|
||||
# device_id = 1
|
||||
## Filter counters by their names that should be observed.
|
||||
## Example: counter_names_include=["ingressCounter", "egressCounter"]
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ Counters
|
|||
{TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent
|
||||
|
||||
State Table Total Rate
|
||||
current entries -23
|
||||
current entries -23
|
||||
searches 11325 7.2/s
|
||||
inserts 5 0.0/s
|
||||
removals 3 0.0/s
|
||||
|
|
@ -128,7 +128,7 @@ Counters
|
|||
{TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent
|
||||
|
||||
State Table Total Rate
|
||||
current entries 2
|
||||
current entries 2
|
||||
searches 11325 7.2/s
|
||||
inserts 5 0.0/s
|
||||
removals 3 0.0/s
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
## Version 3: PowerDNS >=4.6.0
|
||||
## By default this is set to 1.
|
||||
# control_protocol_version = 1
|
||||
|
||||
|
||||
```
|
||||
|
||||
### Newer PowerDNS Recursor versions
|
||||
|
|
|
|||
|
|
@ -16,4 +16,4 @@
|
|||
## Version 3: PowerDNS >=4.6.0
|
||||
## By default this is set to 1.
|
||||
# control_protocol_version = 1
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -96,21 +96,21 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
[[inputs.smart]]
|
||||
## Optionally specify the path to the smartctl executable
|
||||
# path_smartctl = "/usr/bin/smartctl"
|
||||
|
||||
|
||||
## Optionally specify the path to the nvme-cli executable
|
||||
# path_nvme = "/usr/bin/nvme"
|
||||
|
||||
|
||||
## Optionally specify if vendor specific attributes should be propagated for NVMe disk case
|
||||
## ["auto-on"] - automatically find and enable additional vendor specific disk info
|
||||
## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info
|
||||
# enable_extensions = ["auto-on"]
|
||||
|
||||
|
||||
## On most platforms used cli utilities requires root access.
|
||||
## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli.
|
||||
## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli
|
||||
## without a password.
|
||||
# use_sudo = false
|
||||
|
||||
|
||||
## Skip checking disks in this power mode. Defaults to
|
||||
## "standby" to not wake up disks that have stopped rotating.
|
||||
## See --nocheck in the man pages for smartctl.
|
||||
|
|
@ -118,22 +118,22 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
## power mode and might require changing this value to
|
||||
## "never" depending on your disks.
|
||||
# nocheck = "standby"
|
||||
|
||||
|
||||
## Gather all returned S.M.A.R.T. attribute metrics and the detailed
|
||||
## information from each drive into the 'smart_attribute' measurement.
|
||||
# attributes = false
|
||||
|
||||
|
||||
## Optionally specify devices to exclude from reporting if disks auto-discovery is performed.
|
||||
# excludes = [ "/dev/pass6" ]
|
||||
|
||||
|
||||
## Optionally specify devices and device type, if unset
|
||||
## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done
|
||||
## and all found will be included except for the excluded in excludes.
|
||||
# devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"]
|
||||
|
||||
|
||||
## Timeout for the cli command to complete.
|
||||
# timeout = "30s"
|
||||
|
||||
|
||||
## Optionally call smartctl and nvme-cli with a specific concurrency policy.
|
||||
## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes.
|
||||
## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of
|
||||
|
|
|
|||
|
|
@ -2,21 +2,21 @@
|
|||
[[inputs.smart]]
|
||||
## Optionally specify the path to the smartctl executable
|
||||
# path_smartctl = "/usr/bin/smartctl"
|
||||
|
||||
|
||||
## Optionally specify the path to the nvme-cli executable
|
||||
# path_nvme = "/usr/bin/nvme"
|
||||
|
||||
|
||||
## Optionally specify if vendor specific attributes should be propagated for NVMe disk case
|
||||
## ["auto-on"] - automatically find and enable additional vendor specific disk info
|
||||
## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info
|
||||
# enable_extensions = ["auto-on"]
|
||||
|
||||
|
||||
## On most platforms used cli utilities requires root access.
|
||||
## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli.
|
||||
## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli
|
||||
## without a password.
|
||||
# use_sudo = false
|
||||
|
||||
|
||||
## Skip checking disks in this power mode. Defaults to
|
||||
## "standby" to not wake up disks that have stopped rotating.
|
||||
## See --nocheck in the man pages for smartctl.
|
||||
|
|
@ -24,22 +24,22 @@
|
|||
## power mode and might require changing this value to
|
||||
## "never" depending on your disks.
|
||||
# nocheck = "standby"
|
||||
|
||||
|
||||
## Gather all returned S.M.A.R.T. attribute metrics and the detailed
|
||||
## information from each drive into the 'smart_attribute' measurement.
|
||||
# attributes = false
|
||||
|
||||
|
||||
## Optionally specify devices to exclude from reporting if disks auto-discovery is performed.
|
||||
# excludes = [ "/dev/pass6" ]
|
||||
|
||||
|
||||
## Optionally specify devices and device type, if unset
|
||||
## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done
|
||||
## and all found will be included except for the excluded in excludes.
|
||||
# devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"]
|
||||
|
||||
|
||||
## Timeout for the cli command to complete.
|
||||
# timeout = "30s"
|
||||
|
||||
|
||||
## Optionally call smartctl and nvme-cli with a specific concurrency policy.
|
||||
## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes.
|
||||
## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/
|
|||
RETURN
|
||||
END
|
||||
|
||||
SELECT TOP 1
|
||||
SELECT TOP 1
|
||||
'sqlserver_server_properties' AS [measurement]
|
||||
,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
|
||||
,[virtual_core_count] AS [cpu_count]
|
||||
|
|
@ -36,7 +36,7 @@ SELECT TOP 1
|
|||
,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability
|
||||
FROM sys.server_resource_stats
|
||||
CROSS APPLY (
|
||||
SELECT
|
||||
SELECT
|
||||
SUM( CASE WHEN [state] = 0 THEN 1 ELSE 0 END ) AS [db_online]
|
||||
,SUM( CASE WHEN [state] = 1 THEN 1 ELSE 0 END ) AS [db_restoring]
|
||||
,SUM( CASE WHEN [state] = 2 THEN 1 ELSE 0 END ) AS [db_recovering]
|
||||
|
|
@ -44,8 +44,8 @@ CROSS APPLY (
|
|||
,SUM( CASE WHEN [state] = 4 THEN 1 ELSE 0 END ) AS [db_suspect]
|
||||
,SUM( CASE WHEN [state] IN (6,10) THEN 1 ELSE 0 END ) AS [db_offline]
|
||||
FROM sys.databases
|
||||
) AS dbs
|
||||
ORDER BY
|
||||
) AS dbs
|
||||
ORDER BY
|
||||
[start_time] DESC;
|
||||
`
|
||||
|
||||
|
|
@ -105,7 +105,7 @@ SELECT
|
|||
,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
|
||||
,DB_NAME(mf.[database_id]) AS [database_name]
|
||||
,COALESCE(mf.[physical_name],'RBPEX') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension
|
||||
,COALESCE(mf.[name],'RBPEX') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension
|
||||
,COALESCE(mf.[name],'RBPEX') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension
|
||||
,mf.[type_desc] AS [file_type]
|
||||
,vfs.[io_stall_read_ms] AS [read_latency_ms]
|
||||
,vfs.[num_of_reads] AS [reads]
|
||||
|
|
@ -113,12 +113,12 @@ SELECT
|
|||
,vfs.[io_stall_write_ms] AS [write_latency_ms]
|
||||
,vfs.[num_of_writes] AS [writes]
|
||||
,vfs.[num_of_bytes_written] AS [write_bytes]
|
||||
,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms]
|
||||
,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms]
|
||||
,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms]
|
||||
,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability
|
||||
FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs
|
||||
LEFT OUTER JOIN sys.master_files AS mf WITH (NOLOCK)
|
||||
ON vfs.[database_id] = mf.[database_id]
|
||||
ON vfs.[database_id] = mf.[database_id]
|
||||
AND vfs.[file_id] = mf.[file_id]
|
||||
WHERE
|
||||
vfs.[database_id] < 32760
|
||||
|
|
@ -161,7 +161,7 @@ SELECT
|
|||
,[signal_wait_time_ms]
|
||||
,[max_wait_time_ms]
|
||||
,[waiting_tasks_count]
|
||||
,CASE
|
||||
,CASE
|
||||
WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' then 'CPU'
|
||||
WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread'
|
||||
WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock'
|
||||
|
|
@ -171,7 +171,7 @@ SELECT
|
|||
WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation'
|
||||
WHEN ws.[wait_type] LIKE 'CLR[_]%' or ws.[wait_type] like 'SQLCLR%' THEN 'SQL CLR'
|
||||
WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring'
|
||||
WHEN ws.[wait_type] LIKE 'DTC[_]%' or ws.[wait_type] LIKE 'DTCNEW%' or ws.[wait_type] LIKE 'TRAN_%'
|
||||
WHEN ws.[wait_type] LIKE 'DTC[_]%' or ws.[wait_type] LIKE 'DTCNEW%' or ws.[wait_type] LIKE 'TRAN_%'
|
||||
or ws.[wait_type] LIKE 'XACT%' or ws.[wait_type] like 'MSQL_XACT%' THEN 'Transaction'
|
||||
WHEN ws.[wait_type] LIKE 'SLEEP[_]%'
|
||||
or ws.[wait_type] IN (
|
||||
|
|
@ -188,7 +188,7 @@ SELECT
|
|||
'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND',
|
||||
'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO'
|
||||
WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor'
|
||||
WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%'
|
||||
WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%'
|
||||
or ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor'
|
||||
WHEN ws.[wait_type] LIKE 'RBIO_RG%' or ws.[wait_type] like 'WAIT_RBIO_RG%' then 'VLDB Log Rate Governor'
|
||||
WHEN ws.[wait_type] LIKE 'RBIO[_]%' or ws.[wait_type] like 'WAIT_RBIO[_]%' then 'VLDB RBIO'
|
||||
|
|
@ -204,16 +204,16 @@ SELECT
|
|||
'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory'
|
||||
WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait'
|
||||
WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%'
|
||||
or ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%'
|
||||
or ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%'
|
||||
or ws.[wait_type] LIKE 'SE_REPL[_]%'
|
||||
or ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication'
|
||||
WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%'
|
||||
or ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication'
|
||||
WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%'
|
||||
or ws.[wait_type] IN (
|
||||
'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION',
|
||||
'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN',
|
||||
'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing'
|
||||
WHEN ws.[wait_type] IN (
|
||||
'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX',
|
||||
'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX',
|
||||
'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK',
|
||||
'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR',
|
||||
'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search'
|
||||
|
|
@ -292,7 +292,7 @@ WITH PerfCounters AS (
|
|||
OR RTRIM(spi.[object_name]) LIKE '%:Advanced Analytics')
|
||||
AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only
|
||||
THEN ISNULL(d.[name],RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value
|
||||
WHEN
|
||||
WHEN
|
||||
RTRIM([object_name]) LIKE '%:Availability Replica'
|
||||
AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only
|
||||
THEN ISNULL(d.[name],RTRIM(spi.[instance_name])) + RTRIM(SUBSTRING(spi.[instance_name], 37, LEN(spi.[instance_name])))
|
||||
|
|
@ -300,7 +300,7 @@ WITH PerfCounters AS (
|
|||
END AS [instance_name]
|
||||
,CAST(spi.[cntr_value] AS BIGINT) AS [cntr_value]
|
||||
,spi.[cntr_type]
|
||||
FROM sys.dm_os_performance_counters AS spi
|
||||
FROM sys.dm_os_performance_counters AS spi
|
||||
LEFT JOIN sys.databases AS d
|
||||
ON LEFT(spi.[instance_name], 36) -- some instance_name values have an additional identifier appended after the GUID
|
||||
= CASE
|
||||
|
|
@ -418,14 +418,14 @@ WITH PerfCounters AS (
|
|||
|
||||
INSERT INTO @PCounters select * from PerfCounters
|
||||
|
||||
SELECT
|
||||
SELECT
|
||||
'sqlserver_performance' AS [measurement]
|
||||
,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
|
||||
,pc.[object_name] AS [object]
|
||||
,pc.[counter_name] AS [counter]
|
||||
,CASE pc.[instance_name]
|
||||
WHEN '_Total' THEN 'Total'
|
||||
ELSE ISNULL(pc.[instance_name],'')
|
||||
,CASE pc.[instance_name]
|
||||
WHEN '_Total' THEN 'Total'
|
||||
ELSE ISNULL(pc.[instance_name],'')
|
||||
END AS [instance]
|
||||
,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value]
|
||||
,cast(pc.[cntr_type] as varchar(25)) as [counter_type]
|
||||
|
|
@ -468,7 +468,7 @@ BEGIN TRY
|
|||
,replica_updateability
|
||||
,[session_db_name],[open_transaction]
|
||||
FROM (
|
||||
SELECT
|
||||
SELECT
|
||||
'sqlserver_requests' AS [measurement]
|
||||
,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
|
||||
,DB_NAME() as [database_name]
|
||||
|
|
@ -491,18 +491,18 @@ BEGIN TRY
|
|||
,s.[login_name]
|
||||
,COALESCE(r.[open_transaction_count], s.[open_transaction_count]) AS [open_transaction]
|
||||
,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level])
|
||||
WHEN 0 THEN '0-Read Committed'
|
||||
WHEN 1 THEN '1-Read Uncommitted (NOLOCK)'
|
||||
WHEN 2 THEN '2-Read Committed'
|
||||
WHEN 3 THEN '3-Repeatable Read'
|
||||
WHEN 4 THEN '4-Serializable'
|
||||
WHEN 5 THEN '5-Snapshot'
|
||||
ELSE CONVERT (varchar(30), r.[transaction_isolation_level]) + '-UNKNOWN'
|
||||
WHEN 0 THEN '0-Read Committed'
|
||||
WHEN 1 THEN '1-Read Uncommitted (NOLOCK)'
|
||||
WHEN 2 THEN '2-Read Committed'
|
||||
WHEN 3 THEN '3-Repeatable Read'
|
||||
WHEN 4 THEN '4-Serializable'
|
||||
WHEN 5 THEN '5-Snapshot'
|
||||
ELSE CONVERT (varchar(30), r.[transaction_isolation_level]) + '-UNKNOWN'
|
||||
END, 30) AS [transaction_isolation_level]
|
||||
,r.[granted_query_memory] AS [granted_query_memory_pages]
|
||||
,r.[percent_complete]
|
||||
,SUBSTRING(
|
||||
qt.[text],
|
||||
qt.[text],
|
||||
r.[statement_start_offset] / 2 + 1,
|
||||
(CASE WHEN r.[statement_end_offset] = -1
|
||||
THEN DATALENGTH(qt.[text])
|
||||
|
|
@ -518,7 +518,7 @@ BEGIN TRY
|
|||
,s.[is_user_process]
|
||||
,[blocking_or_blocked] = COUNT(*) OVER(PARTITION BY ISNULL(NULLIF(r.[blocking_session_id], 0),s.[session_id]))
|
||||
FROM sys.dm_exec_sessions AS s
|
||||
LEFT OUTER JOIN sys.dm_exec_requests AS r
|
||||
LEFT OUTER JOIN sys.dm_exec_requests AS r
|
||||
ON s.[session_id] = r.[session_id]
|
||||
OUTER APPLY sys.dm_exec_sql_text(r.[sql_handle]) AS qt
|
||||
) AS data
|
||||
|
|
@ -532,12 +532,12 @@ BEGIN TRY
|
|||
OR [status] COLLATE Latin1_General_BIN NOT IN ('background', 'sleeping')
|
||||
)
|
||||
AND [session_id] <> @@SPID
|
||||
)
|
||||
)
|
||||
OPTION(MAXDOP 1);
|
||||
END TRY
|
||||
BEGIN CATCH
|
||||
IF (ERROR_NUMBER() <> 976) --Avoid possible errors from secondary replica
|
||||
THROW;
|
||||
THROW;
|
||||
END CATCH
|
||||
`
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
// ------------------ Azure Sql Elastic Pool ------------------------------------------------------
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
const sqlAzurePoolResourceStats = `
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN
|
||||
DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.';
|
||||
RAISERROR (@ErrorMessage,11,1)
|
||||
|
|
@ -30,16 +30,16 @@ SELECT TOP(1)
|
|||
,cast([max_data_space_kb]/1024. as int) AS [storage_limit_mb]
|
||||
,cast([avg_instance_cpu_percent] as float) AS [avg_instance_cpu_percent]
|
||||
,cast([avg_allocated_storage_percent] as float) AS [avg_allocated_storage_percent]
|
||||
FROM
|
||||
FROM
|
||||
sys.dm_resource_governor_resource_pools_history_ex
|
||||
WHERE
|
||||
WHERE
|
||||
[name] = 'SloSharedPool1'
|
||||
ORDER BY
|
||||
[snapshot_time] DESC;
|
||||
`
|
||||
|
||||
const sqlAzurePoolResourceGovernance = `
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN
|
||||
DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.';
|
||||
RAISERROR (@ErrorMessage,11,1)
|
||||
|
|
@ -81,13 +81,13 @@ SELECT
|
|||
,[volume_type_external_xstore_iops]
|
||||
,[volume_pfs_iops]
|
||||
,[volume_type_pfs_iops]
|
||||
FROM
|
||||
FROM
|
||||
sys.dm_user_db_resource_governance
|
||||
WHERE database_id = DB_ID();
|
||||
`
|
||||
|
||||
const sqlAzurePoolDatabaseIO = `
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN
|
||||
DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.';
|
||||
RAISERROR (@ErrorMessage,11,1)
|
||||
|
|
@ -107,9 +107,9 @@ SELECT
|
|||
END AS [database_name]
|
||||
,vfs.[database_id]
|
||||
,vfs.[file_id]
|
||||
,CASE
|
||||
,CASE
|
||||
WHEN vfs.[file_id] = 2 THEN 'LOG'
|
||||
ELSE 'ROWS'
|
||||
ELSE 'ROWS'
|
||||
END AS [file_type]
|
||||
,vfs.[num_of_reads] AS [reads]
|
||||
,vfs.[num_of_bytes_read] AS [read_bytes]
|
||||
|
|
@ -121,15 +121,15 @@ SELECT
|
|||
,vfs.[io_stall_queued_write_ms] AS [rg_write_stall_ms]
|
||||
,[size_on_disk_bytes]
|
||||
,ISNULL([size_on_disk_bytes],0)/(1024*1024) AS [size_on_disk_mb]
|
||||
FROM
|
||||
FROM
|
||||
sys.dm_io_virtual_file_stats(NULL,NULL) AS vfs
|
||||
LEFT OUTER JOIN
|
||||
LEFT OUTER JOIN
|
||||
sys.dm_user_db_resource_governance AS gov
|
||||
ON vfs.[database_id] = gov.[database_id];
|
||||
`
|
||||
|
||||
const sqlAzurePoolOsWaitStats = `
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN
|
||||
DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.';
|
||||
RAISERROR (@ErrorMessage,11,1)
|
||||
|
|
@ -156,7 +156,7 @@ SELECT
|
|||
WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation'
|
||||
WHEN ws.[wait_type] LIKE 'CLR[_]%' OR ws.[wait_type] LIKE 'SQLCLR%' THEN 'SQL CLR'
|
||||
WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring'
|
||||
WHEN ws.[wait_type] LIKE 'DTC[_]%' OR ws.[wait_type] LIKE 'DTCNEW%' OR ws.[wait_type] LIKE 'TRAN_%'
|
||||
WHEN ws.[wait_type] LIKE 'DTC[_]%' OR ws.[wait_type] LIKE 'DTCNEW%' OR ws.[wait_type] LIKE 'TRAN_%'
|
||||
OR ws.[wait_type] LIKE 'XACT%' OR ws.[wait_type] LIKE 'MSQL_XACT%' THEN 'Transaction'
|
||||
WHEN ws.[wait_type] LIKE 'SLEEP[_]%' OR ws.[wait_type] IN (
|
||||
'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP',
|
||||
|
|
@ -172,7 +172,7 @@ SELECT
|
|||
'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND',
|
||||
'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO'
|
||||
WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor'
|
||||
WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%'
|
||||
WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%'
|
||||
OR ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor'
|
||||
WHEN ws.[wait_type] LIKE 'RBIO_RG%' OR ws.[wait_type] LIKE 'WAIT_RBIO_RG%' THEN 'VLDB Log Rate Governor'
|
||||
WHEN ws.[wait_type] LIKE 'RBIO[_]%' OR ws.[wait_type] LIKE 'WAIT_RBIO[_]%' THEN 'VLDB RBIO'
|
||||
|
|
@ -188,16 +188,16 @@ SELECT
|
|||
'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory'
|
||||
WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait'
|
||||
WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%'
|
||||
OR ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%'
|
||||
OR ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%'
|
||||
OR ws.[wait_type] LIKE 'SE_REPL[_]%'
|
||||
OR ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication'
|
||||
WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%'
|
||||
OR ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication'
|
||||
WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%'
|
||||
OR ws.[wait_type] IN (
|
||||
'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION',
|
||||
'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN',
|
||||
'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing'
|
||||
WHEN ws.[wait_type] IN (
|
||||
'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX',
|
||||
'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX',
|
||||
'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK',
|
||||
'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR',
|
||||
'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search'
|
||||
|
|
@ -231,7 +231,7 @@ WHERE
|
|||
N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY',
|
||||
N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK',
|
||||
N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP',
|
||||
N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP',
|
||||
N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP',
|
||||
N'SQLTRACE_WAIT_ENTRIES', N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN',
|
||||
N'WAIT_XTP_HOST_WAIT', N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE',
|
||||
N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN',
|
||||
|
|
@ -243,7 +243,7 @@ AND [wait_time_ms] > 100;
|
|||
`
|
||||
|
||||
const sqlAzurePoolMemoryClerks = `
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN
|
||||
DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.';
|
||||
RAISERROR (@ErrorMessage,11,1)
|
||||
|
|
@ -256,11 +256,11 @@ SELECT
|
|||
,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name]
|
||||
,mc.[type] AS [clerk_type]
|
||||
,SUM(mc.[pages_kb]) AS [size_kb]
|
||||
FROM
|
||||
FROM
|
||||
sys.dm_os_memory_clerks AS mc
|
||||
GROUP BY
|
||||
mc.[type]
|
||||
HAVING
|
||||
HAVING
|
||||
SUM(mc.[pages_kb]) >= 1024
|
||||
OPTION(RECOMPILE);
|
||||
`
|
||||
|
|
@ -274,7 +274,7 @@ OPTION(RECOMPILE);
|
|||
// The corresponding base value is the performance counter Buffer Manager:Buffer cache hit ratio base where the cntr_type column value is 1073939712.
|
||||
const sqlAzurePoolPerformanceCounters = `
|
||||
SET DEADLOCK_PRIORITY -10;
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN
|
||||
DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.';
|
||||
RAISERROR (@ErrorMessage,11,1)
|
||||
|
|
@ -292,7 +292,7 @@ DECLARE @PCounters TABLE
|
|||
);
|
||||
|
||||
WITH PerfCounters AS (
|
||||
SELECT DISTINCT
|
||||
SELECT DISTINCT
|
||||
RTRIM(pc.[object_name]) AS [object_name]
|
||||
,RTRIM(pc.[counter_name]) AS [counter_name]
|
||||
,ISNULL(gov.[database_name], RTRIM(pc.instance_name)) AS [instance_name]
|
||||
|
|
@ -300,7 +300,7 @@ WITH PerfCounters AS (
|
|||
,pc.[cntr_type] AS [cntr_type]
|
||||
FROM sys.dm_os_performance_counters AS pc
|
||||
LEFT JOIN sys.dm_user_db_resource_governance AS gov
|
||||
ON
|
||||
ON
|
||||
TRY_CONVERT([uniqueidentifier], pc.[instance_name]) = gov.[physical_database_guid]
|
||||
WHERE
|
||||
/*filter out unnecessary SQL DB system database counters, other than master and tempdb*/
|
||||
|
|
@ -421,15 +421,15 @@ SELECT
|
|||
,pc.[counter_name] AS [counter]
|
||||
,CASE pc.[instance_name] WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance]
|
||||
,CAST(
|
||||
CASE WHEN pc.[cntr_type] = 537003264 AND base.[cntr_value] > 0
|
||||
THEN (pc.[cntr_value] * 1.0) / (base.[cntr_value] * 1.0) * 100
|
||||
ELSE pc.[cntr_value]
|
||||
END
|
||||
CASE WHEN pc.[cntr_type] = 537003264 AND base.[cntr_value] > 0
|
||||
THEN (pc.[cntr_value] * 1.0) / (base.[cntr_value] * 1.0) * 100
|
||||
ELSE pc.[cntr_value]
|
||||
END
|
||||
AS float) AS [value]
|
||||
,CAST(pc.[cntr_type] AS varchar(25)) AS [counter_type]
|
||||
FROM @PCounters AS pc
|
||||
LEFT OUTER JOIN @PCounters AS base
|
||||
ON
|
||||
ON
|
||||
pc.[counter_name] = REPLACE(base.[counter_name],' base','')
|
||||
AND pc.[object_name] = base.[object_name]
|
||||
AND pc.[instance_name] = base.[instance_name]
|
||||
|
|
@ -440,7 +440,7 @@ OPTION(RECOMPILE)
|
|||
`
|
||||
|
||||
const sqlAzurePoolSchedulers = `
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
IF SERVERPROPERTY('EngineEdition') <> 5
|
||||
OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN
|
||||
DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.';
|
||||
RAISERROR (@ErrorMessage,11,1)
|
||||
|
|
@ -473,6 +473,6 @@ SELECT
|
|||
,[total_cpu_idle_capped_ms]
|
||||
,[total_scheduler_delay_ms]
|
||||
,[ideal_workers_limit]
|
||||
FROM
|
||||
FROM
|
||||
sys.dm_os_schedulers;
|
||||
`
|
||||
|
|
|
|||
|
|
@ -130,9 +130,9 @@ LEFT OUTER JOIN ( VALUES
|
|||
,(''MEMORYCLERK_QUERYDISKSTORE_HASHMAP'',''QDS Query/Plan Hash Table'')
|
||||
) AS clerk_names([system_name],[name])
|
||||
ON mc.[type] = clerk_names.[system_name]
|
||||
GROUP BY
|
||||
GROUP BY
|
||||
ISNULL(clerk_names.[name], mc.[type])
|
||||
HAVING
|
||||
HAVING
|
||||
SUM(' + @Columns + N') >= 1024
|
||||
OPTION(RECOMPILE);
|
||||
'
|
||||
|
|
@ -144,7 +144,7 @@ EXEC(@SqlStatement)
|
|||
// EngineEdition=5 is Azure SQL DB
|
||||
const sqlDatabaseIOV2 = `
|
||||
SET DEADLOCK_PRIORITY -10;
|
||||
DECLARE
|
||||
DECLARE
|
||||
@SqlStatement AS nvarchar(max)
|
||||
,@EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
|
||||
|
||||
|
|
@ -200,25 +200,25 @@ BEGIN
|
|||
DECLARE @Columns as nvarchar(max) = ''
|
||||
DECLARE @Tables as nvarchar(max) = ''
|
||||
|
||||
IF @MajorMinorVersion >= 1050 BEGIN
|
||||
IF @MajorMinorVersion >= 1050 BEGIN
|
||||
/*in [volume_mount_point] any trailing "\" char will be removed by telegraf */
|
||||
SET @Columns += N',[volume_mount_point]'
|
||||
SET @Tables += N'CROSS APPLY sys.dm_os_volume_stats(vfs.[database_id], vfs.[file_id]) AS vs'
|
||||
END
|
||||
|
||||
|
||||
IF @MajorMinorVersion > 1100 BEGIN
|
||||
SET @Columns += N'
|
||||
,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms]
|
||||
,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms]
|
||||
,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms]'
|
||||
END
|
||||
|
||||
|
||||
SET @SqlStatement = N'
|
||||
SELECT
|
||||
''sqlserver_database_io'' AS [measurement]
|
||||
,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance]
|
||||
,DB_NAME(vfs.[database_id]) AS [database_name]
|
||||
,COALESCE(mf.[physical_name],''RBPEX'') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension
|
||||
,COALESCE(mf.[name],''RBPEX'') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension
|
||||
,COALESCE(mf.[name],''RBPEX'') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension
|
||||
,mf.[type_desc] AS [file_type]
|
||||
,vfs.[io_stall_read_ms] AS [read_latency_ms]
|
||||
,vfs.[num_of_reads] AS [reads]
|
||||
|
|
@ -232,7 +232,7 @@ BEGIN
|
|||
ON vfs.[database_id] = mf.[database_id] AND vfs.[file_id] = mf.[file_id]
|
||||
'
|
||||
+ @Tables;
|
||||
|
||||
|
||||
EXEC sp_executesql @SqlStatement
|
||||
|
||||
END
|
||||
|
|
@ -246,7 +246,7 @@ SET DEADLOCK_PRIORITY -10;
|
|||
DECLARE
|
||||
@SqlStatement AS nvarchar(max) = ''
|
||||
,@EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
|
||||
|
||||
|
||||
IF @EngineEdition = 8 /*Managed Instance*/
|
||||
SET @SqlStatement = 'SELECT TOP 1 ''sqlserver_server_properties'' AS [measurement],
|
||||
REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance],
|
||||
|
|
@ -274,7 +274,7 @@ IF @EngineEdition = 8 /*Managed Instance*/
|
|||
SUM( CASE WHEN state = 4 THEN 1 ELSE 0 END ) AS db_suspect,
|
||||
SUM( CASE WHEN state = 6 or state = 10 THEN 1 ELSE 0 END ) AS db_offline
|
||||
FROM sys.databases
|
||||
) AS dbs
|
||||
) AS dbs
|
||||
ORDER BY start_time DESC';
|
||||
|
||||
IF @EngineEdition = 5 /*Azure SQL DB*/
|
||||
|
|
@ -286,17 +286,17 @@ IF @EngineEdition = 5 /*Azure SQL DB*/
|
|||
slo.edition as sku,
|
||||
@EngineEdition AS engine_edition,
|
||||
slo.service_objective AS hardware_type,
|
||||
CASE
|
||||
WHEN slo.edition = ''Hyperscale'' then NULL
|
||||
ELSE cast(DATABASEPROPERTYEX(DB_NAME(),''MaxSizeInBytes'') as bigint)/(1024*1024)
|
||||
CASE
|
||||
WHEN slo.edition = ''Hyperscale'' then NULL
|
||||
ELSE cast(DATABASEPROPERTYEX(DB_NAME(),''MaxSizeInBytes'') as bigint)/(1024*1024)
|
||||
END AS total_storage_mb,
|
||||
CASE
|
||||
WHEN slo.edition = ''Hyperscale'' then NULL
|
||||
ELSE
|
||||
(cast(DATABASEPROPERTYEX(DB_NAME(),''MaxSizeInBytes'') as bigint)/(1024*1024)-
|
||||
(select SUM(size/128 - CAST(FILEPROPERTY(name, ''SpaceUsed'') AS int)/128) FROM sys.database_files )
|
||||
)
|
||||
END AS available_storage_mb,
|
||||
)
|
||||
END AS available_storage_mb,
|
||||
(select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as uptime
|
||||
FROM sys.databases d
|
||||
-- sys.databases.database_id may not match current DB_ID on Azure SQL DB
|
||||
|
|
@ -316,7 +316,7 @@ BEGIN
|
|||
END AS [hardware_type]';
|
||||
ELSE /*data not available*/
|
||||
SET @Columns = N',''<n/a>'' AS [hardware_type]';
|
||||
|
||||
|
||||
SET @SqlStatement = 'SELECT ''sqlserver_server_properties'' AS [measurement],
|
||||
REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance],
|
||||
DB_NAME() as [database_name],
|
||||
|
|
@ -344,7 +344,7 @@ BEGIN
|
|||
SUM( CASE WHEN state = 6 or state = 10 THEN 1 ELSE 0 END ) AS db_offline
|
||||
FROM sys.databases
|
||||
) AS dbs';
|
||||
|
||||
|
||||
END
|
||||
EXEC sp_executesql @SqlStatement , N'@EngineEdition smallint', @EngineEdition = @EngineEdition;
|
||||
|
||||
|
|
@ -365,7 +365,7 @@ DECLARE
|
|||
END
|
||||
|
||||
SET @SqlStatement = N'
|
||||
SELECT
|
||||
SELECT
|
||||
''sqlserver_schedulers'' AS [measurement]
|
||||
,REPLACE(@@SERVERNAME, ''\'', '':'') AS [sql_instance]
|
||||
,DB_NAME() AS [database_name]
|
||||
|
|
@ -436,7 +436,7 @@ SET @SqlStatement = N'SELECT DISTINCT
|
|||
OR RTRIM(spi.object_name) LIKE ''%:Query Store''
|
||||
OR RTRIM(spi.object_name) LIKE ''%:Columnstore''
|
||||
OR RTRIM(spi.object_name) LIKE ''%:Advanced Analytics'')
|
||||
AND TRY_CONVERT(uniqueidentifier, spi.instance_name)
|
||||
AND TRY_CONVERT(uniqueidentifier, spi.instance_name)
|
||||
IS NOT NULL -- for cloud only
|
||||
THEN ISNULL(d.name,RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value
|
||||
WHEN RTRIM(object_name) LIKE ''%:Availability Replica''
|
||||
|
|
@ -451,7 +451,7 @@ SET @SqlStatement = N'SELECT DISTINCT
|
|||
spi.cntr_type
|
||||
FROM sys.dm_os_performance_counters AS spi '
|
||||
+
|
||||
CASE
|
||||
CASE
|
||||
WHEN @EngineEdition IN (5,8) --- Join is ONLY for managed instance and SQL DB, not for on-prem
|
||||
THEN CAST(N'LEFT JOIN sys.databases AS d
|
||||
ON LEFT(spi.instance_name, 36) -- some instance_name values have an additional identifier appended after the GUID
|
||||
|
|
@ -1251,7 +1251,7 @@ const sqlServerRequestsV2 string = `
|
|||
SET DEADLOCK_PRIORITY -10;
|
||||
SET NOCOUNT ON;
|
||||
|
||||
DECLARE
|
||||
DECLARE
|
||||
@SqlStatement AS nvarchar(max)
|
||||
,@EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
|
||||
,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int) * 100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
|
||||
|
|
@ -1273,7 +1273,7 @@ ELSE
|
|||
SET @SqlStatement = N'
|
||||
SELECT blocking_session_id into #blockingSessions FROM sys.dm_exec_requests WHERE blocking_session_id != 0
|
||||
create index ix_blockingSessions_1 on #blockingSessions (blocking_session_id)
|
||||
SELECT
|
||||
SELECT
|
||||
''sqlserver_requests'' AS [measurement]
|
||||
, REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance]
|
||||
, DB_NAME() as [database_name]
|
||||
|
|
@ -1293,20 +1293,20 @@ N' , COALESCE(r.status,s.status) AS status
|
|||
, s.program_name
|
||||
, s.host_name
|
||||
, s.nt_user_name '
|
||||
+ @Columns +
|
||||
+ @Columns +
|
||||
N', LEFT (CASE COALESCE(r.transaction_isolation_level, s.transaction_isolation_level)
|
||||
WHEN 0 THEN ''0-Read Committed''
|
||||
WHEN 1 THEN ''1-Read Uncommitted (NOLOCK)''
|
||||
WHEN 2 THEN ''2-Read Committed''
|
||||
WHEN 3 THEN ''3-Repeatable Read''
|
||||
WHEN 4 THEN ''4-Serializable''
|
||||
WHEN 5 THEN ''5-Snapshot''
|
||||
ELSE CONVERT (varchar(30), r.transaction_isolation_level) + ''-UNKNOWN''
|
||||
WHEN 0 THEN ''0-Read Committed''
|
||||
WHEN 1 THEN ''1-Read Uncommitted (NOLOCK)''
|
||||
WHEN 2 THEN ''2-Read Committed''
|
||||
WHEN 3 THEN ''3-Repeatable Read''
|
||||
WHEN 4 THEN ''4-Serializable''
|
||||
WHEN 5 THEN ''5-Snapshot''
|
||||
ELSE CONVERT (varchar(30), r.transaction_isolation_level) + ''-UNKNOWN''
|
||||
END, 30) AS transaction_isolation_level
|
||||
, r.granted_query_memory as granted_query_memory_pages
|
||||
, r.percent_complete
|
||||
, SUBSTRING(
|
||||
qt.text,
|
||||
qt.text,
|
||||
r.statement_start_offset / 2 + 1,
|
||||
(CASE WHEN r.statement_end_offset = -1
|
||||
THEN DATALENGTH(qt.text)
|
||||
|
|
@ -1319,11 +1319,11 @@ N', LEFT (CASE COALESCE(r.transaction_isolation_level, s.transaction_isolation_l
|
|||
, CONVERT(varchar(20),[query_hash],1) as [query_hash]
|
||||
, CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash]
|
||||
FROM sys.dm_exec_sessions AS s
|
||||
LEFT OUTER JOIN sys.dm_exec_requests AS r
|
||||
LEFT OUTER JOIN sys.dm_exec_requests AS r
|
||||
ON s.session_id = r.session_id
|
||||
OUTER APPLY sys.dm_exec_sql_text(r.sql_handle) AS qt
|
||||
WHERE 1 = 1
|
||||
AND (r.session_id IS NOT NULL AND (s.is_user_process = 1
|
||||
AND (r.session_id IS NOT NULL AND (s.is_user_process = 1
|
||||
OR r.status COLLATE Latin1_General_BIN NOT IN (''background'', ''sleeping'')) AND r.session_id <> @@SPID)
|
||||
OR (s.session_id IN (SELECT blocking_session_id FROM #blockingSessions))
|
||||
OPTION(MAXDOP 1)'
|
||||
|
|
@ -1333,7 +1333,7 @@ BEGIN TRY
|
|||
END TRY
|
||||
BEGIN CATCH
|
||||
IF (ERROR_NUMBER() <> 976) --Avoid possible errors from secondary replica
|
||||
THROW;
|
||||
THROW;
|
||||
END CATCH
|
||||
`
|
||||
|
||||
|
|
@ -1346,7 +1346,7 @@ Gets data about disk space, only for volumes used by SQL Server (data available
|
|||
DECLARE
|
||||
@EngineEdition AS tinyint = CAST(SERVERPROPERTY('EngineEdition') AS int)
|
||||
,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') as nvarchar),3) AS int)
|
||||
|
||||
|
||||
IF @EngineEdition IN (2,3,4) AND @MajorMinorVersion >= 1050
|
||||
BEGIN
|
||||
SELECT DISTINCT
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ DECLARE
|
|||
,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),3) AS int)
|
||||
,@Columns AS nvarchar(MAX) = ''
|
||||
|
||||
IF CAST(SERVERPROPERTY('ProductVersion') AS varchar(50)) >= '10.50.2500.0'
|
||||
IF CAST(SERVERPROPERTY('ProductVersion') AS varchar(50)) >= '10.50.2500.0'
|
||||
SET @Columns = N'
|
||||
,CASE [virtual_machine_type_desc]
|
||||
WHEN ''NONE'' THEN ''PHYSICAL Machine''
|
||||
|
|
@ -1144,7 +1144,7 @@ BEGIN TRY
|
|||
END TRY
|
||||
BEGIN CATCH
|
||||
IF (ERROR_NUMBER() <> 976) --Avoid possible errors from secondary replica
|
||||
THROW;
|
||||
THROW;
|
||||
END CATCH
|
||||
`
|
||||
|
||||
|
|
|
|||
|
|
@ -1,21 +1,32 @@
|
|||
# github webhooks
|
||||
|
||||
You should configure your Organization's Webhooks to point at the `webhooks` service. To do this go to `github.com/{my_organization}` and click `Settings > Webhooks > Add webhook`. In the resulting menu set `Payload URL` to `http://<my_ip>:1619/github`, `Content type` to `application/json` and under the section `Which events would you like to trigger this webhook?` select 'Send me **everything**'. By default all of the events will write to the `github_webhooks` measurement, this is configurable by setting the `measurement_name` in the config file.
|
||||
You should configure your Organization's Webhooks to point at the `webhooks`
|
||||
service. To do this go to `github.com/{my_organization}` and click
|
||||
`Settings > Webhooks > Add webhook`. In the resulting menu set `Payload URL` to
|
||||
`http://<my_ip>:1619/github`, `Content type` to `application/json` and under
|
||||
the section `Which events would you like to trigger this webhook?` select
|
||||
'Send me **everything**'. By default all of the events will write to the
|
||||
`github_webhooks` measurement, this is configurable by setting the
|
||||
`measurement_name` in the config file.
|
||||
|
||||
You can also add a secret that will be used by telegraf to verify the authenticity of the requests.
|
||||
You can also add a secret that will be used by telegraf to verify the
|
||||
authenticity of the requests.
|
||||
|
||||
## Events
|
||||
## Metrics
|
||||
|
||||
The titles of the following sections are links to the full payloads and details for each event. The body contains what information from the event is persisted. The format is as follows:
|
||||
The titles of the following sections are links to the full payloads and details
|
||||
for each event. The body contains what information from the event is persisted.
|
||||
The format is as follows:
|
||||
|
||||
```toml
|
||||
# TAGS
|
||||
* 'tagKey' = `tagValue` type
|
||||
# FIELDS
|
||||
# FIELDS
|
||||
* 'fieldKey' = `fieldValue` type
|
||||
```
|
||||
|
||||
The tag values and field values show the place on the incoming JSON object where the data is sourced from.
|
||||
The tag values and field values show the place on the incoming JSON object
|
||||
where the data is sourced from.
|
||||
|
||||
### [`commit_comment` event](https://developer.github.com/v3/activity/events/types/#commitcommentevent)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,10 @@
|
|||
# particle webhooks
|
||||
|
||||
You should configure your Particle.io's Webhooks to point at the `webhooks` service. To do this go to [https://console.particle.io](https://console.particle.io/) and click `Integrations > New Integration > Webhook`. In the resulting page set `URL` to `http://<my_ip>:1619/particle`, and under `Advanced Settings` click on `JSON` and add:
|
||||
You should configure your Particle.io's Webhooks to point at the `webhooks`
|
||||
service. To do this go to [https://console.particle.io][particle.io]
|
||||
and click `Integrations > New Integration > Webhook`. In the resulting page set
|
||||
`URL` to `http://<my_ip>:1619/particle`, and under `Advanced Settings` click
|
||||
on `JSON` and add:
|
||||
|
||||
```json
|
||||
{
|
||||
|
|
@ -10,18 +14,21 @@ You should configure your Particle.io's Webhooks to point at the `webhooks` serv
|
|||
|
||||
If required, enter your username and password, etc. and then click `Save`
|
||||
|
||||
[particle.io]: https://console.particle.io/
|
||||
|
||||
## Events
|
||||
|
||||
Your Particle device should publish an event that contains a JSON in the form of:
|
||||
Your Particle device should publish an event that contains a JSON in the form
|
||||
of:
|
||||
|
||||
```json
|
||||
String data = String::format("{ \"tags\" : {
|
||||
\"tag_name\": \"tag_value\",
|
||||
\"tag_name\": \"tag_value\",
|
||||
\"other_tag\": \"other_value\"
|
||||
},
|
||||
},
|
||||
\"values\": {
|
||||
\"value_name\": %f,
|
||||
\"other_value\": %f,
|
||||
\"value_name\": %f,
|
||||
\"other_value\": %f,
|
||||
}
|
||||
}", value_value, other_value
|
||||
);
|
||||
|
|
@ -29,8 +36,10 @@ String data = String::format("{ \"tags\" : {
|
|||
```
|
||||
|
||||
Escaping the "" is required in the source file.
|
||||
The number of tag values and field values is not restricted so you can send as many values per webhook call as you'd like.
|
||||
The number of tag values and field values is not restricted so you can send as
|
||||
many values per webhook call as you'd like.
|
||||
|
||||
You will need to enable JSON messages in the Webhooks setup of Particle.io, and make sure to check the "include default data" box as well.
|
||||
You will need to enable JSON messages in the Webhooks setup of Particle.io, and
|
||||
make sure to check the "include default data" box as well.
|
||||
|
||||
See [webhook doc](https://docs.particle.io/reference/webhooks/)
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ func TestDecodeUTF16(t *testing.T) {
|
|||
|
||||
var xmlbroken = `
|
||||
<BrokenXML>
|
||||
<Data/>qq</Data>
|
||||
<Data/>qq</Data>
|
||||
</BrokenXML>
|
||||
`
|
||||
|
||||
|
|
@ -79,9 +79,9 @@ var xmldata = `
|
|||
</CbsPackageChangeState>
|
||||
</UserData>
|
||||
<EventData>
|
||||
<Data>2120-07-26T15:24:25Z</Data>
|
||||
<Data>RulesEngine</Data>
|
||||
<Data Name="Engine">RulesEngine</Data>
|
||||
<Data>2120-07-26T15:24:25Z</Data>
|
||||
<Data>RulesEngine</Data>
|
||||
<Data Name="Engine">RulesEngine</Data>
|
||||
</EventData>
|
||||
</Event>
|
||||
`
|
||||
|
|
|
|||
|
|
@ -393,7 +393,7 @@ func TestTOMLConfig(t *testing.T) {
|
|||
url = "https://localhost:3000"
|
||||
data_format = "carbon2"
|
||||
[outputs.sumologic.headers]
|
||||
X-Sumo-Name = "dummy"
|
||||
X-Sumo-Name = "dummy"
|
||||
X-Sumo-Host = "dummy"
|
||||
X-Sumo-Category = "dummy"
|
||||
X-Sumo-Dimensions = "dummy"
|
||||
|
|
|
|||
|
|
@ -35,20 +35,20 @@ values.
|
|||
|
||||
## Indicates the number of rows to skip before looking for metadata and header information.
|
||||
csv_skip_rows = 0
|
||||
|
||||
## Indicates the number of rows to parse as metadata before looking for header information.
|
||||
## By default, the parser assumes there are no metadata rows to parse.
|
||||
|
||||
## Indicates the number of rows to parse as metadata before looking for header information.
|
||||
## By default, the parser assumes there are no metadata rows to parse.
|
||||
## If set, the parser would use the provided separators in the csv_metadata_separators to look for metadata.
|
||||
## Please note that by default, the (key, value) pairs will be added as tags.
|
||||
## Please note that by default, the (key, value) pairs will be added as tags.
|
||||
## If fields are required, use the converter processor.
|
||||
csv_metadata_rows = 0
|
||||
|
||||
|
||||
## A list of metadata separators. If csv_metadata_rows is set,
|
||||
## csv_metadata_separators must contain at least one separator.
|
||||
## Please note that separators are case sensitive and the sequence of the seperators are respected.
|
||||
csv_metadata_separators = [":", "="]
|
||||
|
||||
## A set of metadata trim characters.
|
||||
|
||||
## A set of metadata trim characters.
|
||||
## If csv_metadata_trim_set is not set, no trimming is performed.
|
||||
## Please note that the trim cutset is case sensitive.
|
||||
csv_metadata_trim_set = ""
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ func TestParseValidEmptyJSON(t *testing.T) {
|
|||
const validCounterJSON = `
|
||||
{
|
||||
"version": "3.0.0",
|
||||
"counters" : {
|
||||
"counters" : {
|
||||
"measurement" : {
|
||||
"count" : 1
|
||||
}
|
||||
|
|
@ -75,7 +75,7 @@ const validEmbeddedCounterJSON = `
|
|||
"tag3 space,comma=equals" : "red ,="
|
||||
},
|
||||
"metrics" : {
|
||||
"counters" : {
|
||||
"counters" : {
|
||||
"measurement" : {
|
||||
"count" : 1
|
||||
}
|
||||
|
|
@ -130,7 +130,7 @@ const validMeterJSON1 = `
|
|||
{
|
||||
"version": "3.0.0",
|
||||
"counters" : {},
|
||||
"meters" : {
|
||||
"meters" : {
|
||||
"measurement1" : {
|
||||
"count" : 1,
|
||||
"m15_rate" : 1.0,
|
||||
|
|
@ -171,7 +171,7 @@ const validMeterJSON2 = `
|
|||
{
|
||||
"version": "3.0.0",
|
||||
"counters" : {},
|
||||
"meters" : {
|
||||
"meters" : {
|
||||
"measurement2,key=value" : {
|
||||
"count" : 2,
|
||||
"m15_rate" : 2.0,
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
[[processors.execd]]
|
||||
command = ["ruby", "plugins/processors/execd/examples/multiplier_line_protocol/multiplier_line_protocol.rb"]
|
||||
|
||||
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ metrics.{
|
|||
"time": timestamp,
|
||||
"platform": platform,
|
||||
"key": tags.key,
|
||||
"events": [
|
||||
"events": [
|
||||
{
|
||||
"time": timestamp,
|
||||
"flag": tags.flagname,
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ json_transformation = '''
|
|||
"time": timestamp,
|
||||
"platform": platform,
|
||||
"key": tags.key,
|
||||
"events": [
|
||||
"events": [
|
||||
{
|
||||
"time": timestamp,
|
||||
"flag": tags.flagname,
|
||||
|
|
|
|||
Loading…
Reference in New Issue