feat(outputs.postgresql): add Postgresql output (#11672)

This commit is contained in:
Joshua Powers 2022-08-25 13:33:23 -06:00 committed by GitHub
parent dd85fc03a9
commit 49cd0a8131
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 4450 additions and 5 deletions

View File

@ -21,6 +21,9 @@ following works:
- github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) - github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE)
- github.com/Azure/go-ntlmssp [MIT License](https://github.com/Azure/go-ntlmssp/blob/master/LICENSE) - github.com/Azure/go-ntlmssp [MIT License](https://github.com/Azure/go-ntlmssp/blob/master/LICENSE)
- github.com/ClickHouse/clickhouse-go [MIT License](https://github.com/ClickHouse/clickhouse-go/blob/master/LICENSE) - github.com/ClickHouse/clickhouse-go [MIT License](https://github.com/ClickHouse/clickhouse-go/blob/master/LICENSE)
- github.com/Masterminds/goutils [Apache License 2.0](https://github.com/Masterminds/goutils/blob/master/LICENSE.txt)
- github.com/Masterminds/semver [MIT License](https://github.com/Masterminds/semver/blob/master/LICENSE.txt)
- github.com/Masterminds/sprig [MIT License](https://github.com/Masterminds/sprig/blob/master/LICENSE.txt)
- github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE) - github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE)
- github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE) - github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
- github.com/Microsoft/hcsshim [MIT License](https://github.com/microsoft/hcsshim/blob/master/LICENSE) - github.com/Microsoft/hcsshim [MIT License](https://github.com/microsoft/hcsshim/blob/master/LICENSE)
@ -76,6 +79,7 @@ following works:
- github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) - github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt)
- github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE) - github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE)
- github.com/containerd/containerd [Apache License 2.0](https://github.com/containerd/containerd/blob/master/LICENSE) - github.com/containerd/containerd [Apache License 2.0](https://github.com/containerd/containerd/blob/master/LICENSE)
- github.com/coocood/freecache [MIT License](https://github.com/coocood/freecache/blob/master/LICENSE)
- github.com/coreos/go-semver [Apache License 2.0](https://github.com/coreos/go-semver/blob/main/LICENSE) - github.com/coreos/go-semver [Apache License 2.0](https://github.com/coreos/go-semver/blob/main/LICENSE)
- github.com/coreos/go-systemd [Apache License 2.0](https://github.com/coreos/go-systemd/blob/main/LICENSE) - github.com/coreos/go-systemd [Apache License 2.0](https://github.com/coreos/go-systemd/blob/main/LICENSE)
- github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) - github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
@ -155,6 +159,7 @@ following works:
- github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/blob/master/LICENSE) - github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/blob/master/LICENSE)
- github.com/hashicorp/golang-lru [Mozilla Public License 2.0](https://github.com/hashicorp/golang-lru/blob/master/LICENSE) - github.com/hashicorp/golang-lru [Mozilla Public License 2.0](https://github.com/hashicorp/golang-lru/blob/master/LICENSE)
- github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE) - github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE)
- github.com/huandu/xstrings [MIT License](https://github.com/huandu/xstrings/blob/master/LICENSE)
- github.com/imdario/mergo [BSD 3-Clause "New" or "Revised" License](https://github.com/imdario/mergo/blob/master/LICENSE) - github.com/imdario/mergo [BSD 3-Clause "New" or "Revised" License](https://github.com/imdario/mergo/blob/master/LICENSE)
- github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE) - github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE)
- github.com/influxdata/influxdb-observability/common [MIT License](https://github.com/influxdata/influxdb-observability/blob/main/LICENSE) - github.com/influxdata/influxdb-observability/common [MIT License](https://github.com/influxdata/influxdb-observability/blob/main/LICENSE)
@ -173,6 +178,7 @@ following works:
- github.com/jackc/pgservicefile [MIT License](https://github.com/jackc/pgservicefile/blob/master/LICENSE) - github.com/jackc/pgservicefile [MIT License](https://github.com/jackc/pgservicefile/blob/master/LICENSE)
- github.com/jackc/pgtype [MIT License](https://github.com/jackc/pgtype/blob/master/LICENSE) - github.com/jackc/pgtype [MIT License](https://github.com/jackc/pgtype/blob/master/LICENSE)
- github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE) - github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE)
- github.com/jackc/puddle [MIT License](https://github.com/jackc/puddle/blob/master/LICENSE)
- github.com/jaegertracing/jaeger [Apache License 2.0](https://github.com/jaegertracing/jaeger/blob/master/LICENSE) - github.com/jaegertracing/jaeger [Apache License 2.0](https://github.com/jaegertracing/jaeger/blob/master/LICENSE)
- github.com/james4k/rcon [MIT License](https://github.com/james4k/rcon/blob/master/LICENSE) - github.com/james4k/rcon [MIT License](https://github.com/james4k/rcon/blob/master/LICENSE)
- github.com/jcmturner/aescts [Apache License 2.0](https://github.com/jcmturner/aescts/blob/master/LICENSE) - github.com/jcmturner/aescts [Apache License 2.0](https://github.com/jcmturner/aescts/blob/master/LICENSE)
@ -206,8 +212,10 @@ following works:
- github.com/microsoft/ApplicationInsights-Go [MIT License](https://github.com/microsoft/ApplicationInsights-Go/blob/master/LICENSE) - github.com/microsoft/ApplicationInsights-Go [MIT License](https://github.com/microsoft/ApplicationInsights-Go/blob/master/LICENSE)
- github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE)
- github.com/minio/highwayhash [Apache License 2.0](https://github.com/minio/highwayhash/blob/master/LICENSE) - github.com/minio/highwayhash [Apache License 2.0](https://github.com/minio/highwayhash/blob/master/LICENSE)
- github.com/mitchellh/copystructure [MIT License](https://github.com/mitchellh/copystructure/blob/master/LICENSE)
- github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE) - github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE)
- github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) - github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
- github.com/mitchellh/reflectwalk [MIT License](https://github.com/mitchellh/reflectwalk/blob/master/LICENSE)
- github.com/moby/ipvs [Apache License 2.0](https://github.com/moby/ipvs/blob/master/LICENSE) - github.com/moby/ipvs [Apache License 2.0](https://github.com/moby/ipvs/blob/master/LICENSE)
- github.com/moby/sys/mount [Apache License 2.0](https://github.com/moby/sys/blob/main/LICENSE) - github.com/moby/sys/mount [Apache License 2.0](https://github.com/moby/sys/blob/main/LICENSE)
- github.com/moby/sys/mountinfo [Apache License 2.0](https://github.com/moby/sys/blob/main/LICENSE) - github.com/moby/sys/mountinfo [Apache License 2.0](https://github.com/moby/sys/blob/main/LICENSE)

18
go.mod
View File

@ -19,6 +19,7 @@ require (
github.com/BurntSushi/toml v1.2.0 github.com/BurntSushi/toml v1.2.0
github.com/ClickHouse/clickhouse-go v1.5.4 github.com/ClickHouse/clickhouse-go v1.5.4
github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/DATA-DOG/go-sqlmock v1.5.0
github.com/Masterminds/sprig v2.22.0+incompatible
github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee
github.com/Shopify/sarama v1.35.0 github.com/Shopify/sarama v1.35.0
github.com/aerospike/aerospike-client-go/v5 v5.9.0 github.com/aerospike/aerospike-client-go/v5 v5.9.0
@ -28,6 +29,7 @@ require (
github.com/antchfx/jsonquery v1.3.0 github.com/antchfx/jsonquery v1.3.0
github.com/antchfx/xmlquery v1.3.12 github.com/antchfx/xmlquery v1.3.12
github.com/antchfx/xpath v1.2.1 github.com/antchfx/xpath v1.2.1
github.com/apache/iotdb-client-go v0.12.2-0.20220722111104-cd17da295b46
github.com/apache/thrift v0.16.0 github.com/apache/thrift v0.16.0
github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5
@ -48,6 +50,7 @@ require (
github.com/bmatcuk/doublestar/v3 v3.0.0 github.com/bmatcuk/doublestar/v3 v3.0.0
github.com/caio/go-tdigest v3.1.0+incompatible github.com/caio/go-tdigest v3.1.0+incompatible
github.com/cisco-ie/nx-telemetry-proto v0.0.0-20220628142927-f4160bcb943c github.com/cisco-ie/nx-telemetry-proto v0.0.0-20220628142927-f4160bcb943c
github.com/coocood/freecache v1.2.2
github.com/coreos/go-semver v0.3.0 github.com/coreos/go-semver v0.3.0
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/couchbase/go-couchbase v0.1.1 github.com/couchbase/go-couchbase v0.1.1
@ -97,6 +100,9 @@ require (
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8
github.com/intel/iaevents v1.0.0 github.com/intel/iaevents v1.0.0
github.com/jackc/pgconn v1.13.0
github.com/jackc/pgio v1.0.0
github.com/jackc/pgtype v1.12.0
github.com/jackc/pgx/v4 v4.17.0 github.com/jackc/pgx/v4 v4.17.0
github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a
github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca
@ -104,6 +110,7 @@ require (
github.com/kardianos/service v1.2.1 github.com/kardianos/service v1.2.1
github.com/karrick/godirwalk v1.17.0 github.com/karrick/godirwalk v1.17.0
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
github.com/lxc/lxd v0.0.0-20220809104211-1aaea4d7159b github.com/lxc/lxd v0.0.0-20220809104211-1aaea4d7159b
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369
github.com/mdlayher/apcupsd v0.0.0-20220319200143-473c7b5f3c6a github.com/mdlayher/apcupsd v0.0.0-20220319200143-473c7b5f3c6a
@ -202,13 +209,14 @@ require (
github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e // indirect github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Microsoft/hcsshim v0.9.4 // indirect github.com/Microsoft/hcsshim v0.9.4 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/alecthomas/participle v0.4.1 // indirect github.com/alecthomas/participle v0.4.1 // indirect
github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4 // indirect github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4 // indirect
github.com/apache/iotdb-client-go v0.12.2-0.20220722111104-cd17da295b46
github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect
github.com/armon/go-metrics v0.3.10 // indirect github.com/armon/go-metrics v0.3.10 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.3 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.3 // indirect
@ -282,14 +290,13 @@ require (
github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/serf v0.9.7 // indirect github.com/hashicorp/serf v0.9.7 // indirect
github.com/huandu/xstrings v1.3.2 // indirect
github.com/imdario/mergo v0.3.12 // indirect github.com/imdario/mergo v0.3.12 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgconn v1.13.0 // indirect
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgproto3/v2 v2.3.1 // indirect github.com/jackc/pgproto3/v2 v2.3.1 // indirect
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
github.com/jackc/pgtype v1.12.0 // indirect github.com/jackc/puddle v1.2.1 // indirect
github.com/jaegertracing/jaeger v1.26.0 // indirect github.com/jaegertracing/jaeger v1.26.0 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
@ -303,7 +310,6 @@ require (
github.com/juju/webbrowser v1.0.0 // indirect github.com/juju/webbrowser v1.0.0 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/klauspost/compress v1.15.9 // indirect github.com/klauspost/compress v1.15.9 // indirect
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
github.com/kr/fs v0.1.0 // indirect github.com/kr/fs v0.1.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect
github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect
@ -317,8 +323,10 @@ require (
github.com/mdlayher/netlink v1.6.0 // indirect github.com/mdlayher/netlink v1.6.0 // indirect
github.com/mdlayher/socket v0.2.3 // indirect github.com/mdlayher/socket v0.2.3 // indirect
github.com/minio/highwayhash v1.0.2 // indirect github.com/minio/highwayhash v1.0.2 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/sys/mount v0.2.0 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect

13
go.sum
View File

@ -186,10 +186,14 @@ github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5H
github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4=
github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w=
github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
@ -627,6 +631,8 @@ github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRD
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/coocood/freecache v1.2.2 h1:UPkJCxhRujykq1jXuwxAPgDHnm6lKGrLZPnuHzgWRtE=
github.com/coocood/freecache v1.2.2/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@ -1341,6 +1347,8 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe
github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw=
github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@ -1430,6 +1438,7 @@ github.com/jackc/pgx/v4 v4.17.0/go.mod h1:Gd6RmOhtFLTu8cp/Fhq4kP195KrshxYJH3oW8A
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.2.1 h1:gI8os0wpRXFd4FiAY2dWiqRK037tjj3t7rKFeO4X5iw=
github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jaegertracing/jaeger v1.22.0/go.mod h1:WnwW68MjJEViSLRQhe0nkIsBDaF3CzfFd8wJcpJv24k= github.com/jaegertracing/jaeger v1.22.0/go.mod h1:WnwW68MjJEViSLRQhe0nkIsBDaF3CzfFd8wJcpJv24k=
github.com/jaegertracing/jaeger v1.23.0/go.mod h1:gB6Qc+Kjd/IX1G82oGTArbHI3ZRO//iUkaMW+gzL9uw= github.com/jaegertracing/jaeger v1.23.0/go.mod h1:gB6Qc+Kjd/IX1G82oGTArbHI3ZRO//iUkaMW+gzL9uw=
@ -1713,6 +1722,8 @@ github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLT
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@ -1732,6 +1743,8 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mjibson/esc v0.2.0/go.mod h1:9Hw9gxxfHulMF5OJKCyhYD7PzlSdhzXyaGEBRPH1OPs= github.com/mjibson/esc v0.2.0/go.mod h1:9Hw9gxxfHulMF5OJKCyhYD7PzlSdhzXyaGEBRPH1OPs=
github.com/moby/ipvs v1.0.2 h1:NSbzuRTvfneftLU3VwPU5QuA6NZ0IUmqq9+VHcQxqHw= github.com/moby/ipvs v1.0.2 h1:NSbzuRTvfneftLU3VwPU5QuA6NZ0IUmqq9+VHcQxqHw=
github.com/moby/ipvs v1.0.2/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/ipvs v1.0.2/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ=

View File

@ -0,0 +1,5 @@
//go:build !custom || outputs || outputs.postgresql
package all
import _ "github.com/influxdata/telegraf/plugins/outputs/postgresql" // register plugin

View File

@ -0,0 +1,26 @@
# This Dockerfile can be used to build an image including the pguint extension.
#
# docker build -t postgres:pguint .
# docker run -d --name postgres -p 5432:5432 -e POSTGRES_HOST_AUTH_METHOD=trust postgres:pguint
# docker logs -f postgres 2>&1 | grep -q 'listening on IPv4 address "0.0.0.0", port 5432'
# go test
# Tag from https://hub.docker.com/_/postgres?tab=tags
ARG POSTGRES_TAG=latest
ARG PGUINT_REPO
ARG PGUINT_RELEASE
FROM postgres:${POSTGRES_TAG}
RUN apt-get update && apt-get install -y build-essential curl postgresql-server-dev-${PG_MAJOR}=${PG_VERSION}
ENV PGUINT_REPO=${PGUINT_REPO:-phemmer/pguint}
ENV PGUINT_REF=${PGUINT_REF:-fix-getmsgint64}
RUN mkdir /pguint && cd /pguint && \
curl -L https://github.com/${PGUINT_REPO}/tarball/${PGUINT_REF} | tar -zx --strip-components=1 && \
make && make install && \
echo 'CREATE EXTENSION uint;' > /docker-entrypoint-initdb.d/uint.sql && \
echo '\\c template1' >> /docker-entrypoint-initdb.d/uint.sql && \
echo 'CREATE EXTENSION uint;' >> /docker-entrypoint-initdb.d/uint.sql

View File

@ -0,0 +1,251 @@
# PostgreSQL Output Plugin
This output plugin writes metrics to PostgreSQL (or compatible database).
The plugin manages the schema, automatically updating missing columns.
## Configuration
```toml @sample.conf
# Publishes metrics to a postgresql database
[[outputs.postgresql]]
## Specify connection address via the standard libpq connection string:
## host=... user=... password=... sslmode=... dbname=...
## Or a URL:
## postgres://[user[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
## See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
##
## All connection parameters are optional. Environment vars are also supported.
## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE
## All supported vars can be found here:
## https://www.postgresql.org/docs/current/libpq-envars.html
##
## Non-standard parameters:
## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts.
## pool_min_conns (default: 0) - Minimum size of connection pool.
## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing.
## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing.
## pool_health_check_period (default: 0s) - Duration between health checks on idle connections.
# connection = ""
## Postgres schema to use.
# schema = "public"
## Store tags as foreign keys in the metrics table. Default is false.
# tags_as_foreign_keys = false
## Suffix to append to table name (measurement name) for the foreign tag table.
# tag_table_suffix = "_tag"
## Deny inserting metrics if the foreign tag can't be inserted.
# foreign_tag_constraint = false
## Store all tags as a JSONB object in a single 'tags' column.
# tags_as_jsonb = false
## Store all fields as a JSONB object in a single 'fields' column.
# fields_as_jsonb = false
## Templated statements to execute when creating a new table.
# create_templates = [
# '''CREATE TABLE {{ .table }} ({{ .columns }})''',
# ]
## Templated statements to execute when adding columns to a table.
## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. Points
## containing fields for which there is no column will have the field omitted.
# add_column_templates = [
# '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''',
# ]
## Templated statements to execute when creating a new tag table.
# tag_table_create_templates = [
# '''CREATE TABLE {{ .table }} ({{ .columns }}, PRIMARY KEY (tag_id))''',
# ]
## Templated statements to execute when adding columns to a tag table.
## Set to an empty list to disable. Points containing tags for which there is no column will be skipped.
# tag_table_add_column_templates = [
# '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''',
# ]
## The postgres data type to use for storing unsigned 64-bit integer values (Postgres does not have a native
## unsigned 64-bit integer type).
## The value can be one of:
## numeric - Uses the PostgreSQL "numeric" data type.
## uint8 - Requires pguint extension (https://github.com/petere/pguint)
# uint64_type = "numeric"
## When using pool_max_conns>1, and a temporary error occurs, the query is retried with an incremental backoff. This
## controls the maximum backoff duration.
# retry_max_backoff = "15s"
## Approximate number of tag IDs to store in in-memory cache (when using tags_as_foreign_keys).
## This is an optimization to skip inserting known tag IDs.
## Each entry consumes approximately 34 bytes of memory.
# tag_cache_size = 100000
## Enable & set the log level for the Postgres driver.
# log_level = "warn" # trace, debug, info, warn, error, none
```
### Concurrency
By default the postgresql plugin does not utilize any concurrency. However it
can for increased throughput. When concurrency is off, telegraf core handles
things like retrying on failure, buffering, etc. When concurrency is used,
these aspects have to be handled by the plugin.
To enable concurrent writes to the database, set the `pool_max_conns`
connection parameter to a value >1. When enabled, incoming batches will be
split by measurement/table name. In addition, if a batch comes in and the
previous batch has not completed, concurrency will be used for the new batch
as well.
If all connections are utilized and the pool is exhausted, further incoming
batches will be buffered within telegraf core.
### Foreign tags
When using `tags_as_foreign_keys`, tags will be written to a separate table
with a `tag_id` column used for joins. Each series (unique combination of tag
values) gets its own entry in the tags table, and a unique `tag_id`.
## Data types
By default the postgresql plugin maps Influx data types to the following
PostgreSQL types:
| Influx | PostgreSQL |
|--------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------|
| [float](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#float) | [double precision](https://www.postgresql.org/docs/current/datatype-numeric.html#DATATYPE-FLOAT) |
| [integer](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#integer) | [bigint](https://www.postgresql.org/docs/current/datatype-numeric.html#DATATYPE-INT) |
| [uinteger](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#uinteger) | [numeric](https://www.postgresql.org/docs/current/datatype-numeric.html#DATATYPE-NUMERIC-DECIMAL)* |
| [string](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#string) | [text](https://www.postgresql.org/docs/current/datatype-character.html) |
| [boolean](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#boolean) | [boolean](https://www.postgresql.org/docs/current/datatype-boolean.html) |
| [unix timestamp](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#unix-timestamp) | [timestamp](https://www.postgresql.org/docs/current/datatype-datetime.html) |
It is important to note that `uinteger` (unsigned 64-bit integer) is mapped to
the `numeric` PostgreSQL data type. The `numeric` data type is an arbitrary
precision decimal data type that is less efficient than `bigint`. This is
necessary as the range of values for the Influx `uinteger` data type can
exceed `bigint`, and thus cause errors when inserting data.
### pguint
As a solution to the `uinteger`/`numeric` data type problem, there is a
PostgreSQL extension that offers unsigned 64-bit integer support:
[https://github.com/petere/pguint](https://github.com/petere/pguint).
If this extension is installed, you can enable the `unsigned_integers` config
parameter which will cause the plugin to use the `uint8` datatype instead of
`numeric`.
## Templating
The postgresql plugin uses templates for the schema modification SQL
statements. This allows for complete control of the schema by the user.
Documentation on how to write templates can be found [sqltemplate docs][1]
[1]: https://pkg.go.dev/github.com/influxdb/telegraf/plugins/outputs/postgresql/sqltemplate
### Samples
#### TimescaleDB
```toml
tags_as_foreign_keys = true
create_templates = [
'''CREATE TABLE {{ .table }} ({{ .columns }})''',
'''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1h')''',
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
]
```
##### Multi-node
```toml
tags_as_foreign_keys = true
create_templates = [
'''CREATE TABLE {{ .table }} ({{ .columns }})''',
'''SELECT create_distributed_hypertable({{ .table|quoteLiteral }}, 'time', partitioning_column => 'tag_id', number_partitions => (SELECT count(*) FROM timescaledb_information.data_nodes)::integer, replication_factor => 2, chunk_time_interval => INTERVAL '1h')''',
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
]
```
#### Tag table with view
This example enables `tags_as_foreign_keys`, but creates a postgres view to
automatically join the metric & tag tables. The metric & tag tables are stored
in a "telegraf" schema, with the view in the "public" schema.
```toml
tags_as_foreign_keys = true
schema = "telegraf"
create_templates = [
'''CREATE TABLE {{ .table }} ({{ .columns }})''',
'''CREATE VIEW {{ .table.WithSchema "public" }} AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} FROM {{ .table }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''',
]
add_column_templates = [
'''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''',
'''DROP VIEW {{ .table.WithSchema "public" }} IF EXISTS''',
'''CREATE VIEW {{ .table.WithSchema "public" }} AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} FROM {{ .table }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''',
]
tag_table_add_column_templates = [
'''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''',
'''DROP VIEW {{ .metricTable.WithSchema "public" }} IF EXISTS''',
'''CREATE VIEW {{ .metricTable.WithSchema "public" }} AS SELECT time, {{ (.allColumns.Tags.Concat .metricTable.Columns.Fields).Identifiers | join "," }} FROM {{ .metricTable }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''',
]
```
#### Immutable data table
Some PostgreSQL-compatible databases don't allow modification of table schema
after initial creation. This example works around the limitation by creating
a new table and then using a view to join them together.
```toml
tags_as_foreign_keys = true
schema = 'telegraf'
create_templates = [
'''CREATE TABLE {{ .table }} ({{ .allColumns }})''',
'''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1h')''',
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
'''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '2h')''',
'''CREATE VIEW {{ .table.WithSuffix "_data" }} AS SELECT {{ .allColumns.Selectors | join "," }} FROM {{ .table }}''',
'''CREATE VIEW {{ .table.WithSchema "public" }} AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} FROM {{ .table.WithSuffix "_data" }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''',
]
add_column_templates = [
'''ALTER TABLE {{ .table }} RENAME TO {{ (.table.WithSuffix "_" .table.Columns.Hash).WithSchema "" }}''',
'''ALTER VIEW {{ .table.WithSuffix "_data" }} RENAME TO {{ (.table.WithSuffix "_" .table.Columns.Hash "_data").WithSchema "" }}''',
'''DROP VIEW {{ .table.WithSchema "public" }}''',
'''CREATE TABLE {{ .table }} ({{ .allColumns }})''',
'''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1h')''',
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
'''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '2h')''',
'''CREATE VIEW {{ .table.WithSuffix "_data" }} AS SELECT {{ .allColumns.Selectors | join "," }} FROM {{ .table }} UNION ALL SELECT {{ (.allColumns.Union .table.Columns).Selectors | join "," }} FROM {{ .table.WithSuffix "_" .table.Columns.Hash "_data" }}''',
'''CREATE VIEW {{ .table.WithSchema "public" }} AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} FROM {{ .table.WithSuffix "_data" }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''',
]
tag_table_add_column_templates = [
'''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''',
'''DROP VIEW {{ .metricTable.WithSchema "public" }}''',
'''CREATE VIEW {{ .metricTable.WithSchema "public" }} AS SELECT time, {{ (.allColumns.Tags.Concat .metricTable.Columns.Fields).Identifiers | join "," }} FROM {{ .metricTable.WithSuffix "_data" }} t, {{ .table }} tt WHERE t.tag_id = tt.tag_id''',
]
```
## Error handling
When the plugin encounters an error writing to the database, it attempts to
determine whether the error is temporary or permanent. An error is considered
temporary if it's possible that retrying the write will succeed. Some examples
of temporary errors are things like connection interruption, deadlocks, etc.
Permanent errors are things like invalid data type, insufficient permissions,
etc.
When an error is determined to be temporary, the plugin will retry the write
with an incremental backoff.
When an error is determined to be permanent, the plugin will discard the
sub-batch. The "sub-batch" is the portion of the input batch that is being
written to the same table.

View File

@ -0,0 +1,26 @@
package postgresql
import "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
// Column names and data types for standard fields (time, tag_id, tags, and fields)
const (
timeColumnName = "time"
timeColumnDataType = PgTimestampWithoutTimeZone
tagIDColumnName = "tag_id"
tagIDColumnDataType = PgBigInt
tagsJSONColumnName = "tags"
fieldsJSONColumnName = "fields"
jsonColumnDataType = PgJSONb
)
var timeColumn = utils.Column{Name: timeColumnName, Type: timeColumnDataType, Role: utils.TimeColType}
var tagIDColumn = utils.Column{Name: tagIDColumnName, Type: tagIDColumnDataType, Role: utils.TagsIDColType}
var fieldsJSONColumn = utils.Column{Name: fieldsJSONColumnName, Type: jsonColumnDataType, Role: utils.FieldColType}
var tagsJSONColumn = utils.Column{Name: tagsJSONColumnName, Type: jsonColumnDataType, Role: utils.TagColType}
func (p *Postgresql) columnFromTag(key string, value interface{}) utils.Column {
return utils.Column{Name: key, Type: p.derivePgDatatype(value), Role: utils.TagColType}
}
func (p *Postgresql) columnFromField(key string, value interface{}) utils.Column {
return utils.Column{Name: key, Type: p.derivePgDatatype(value), Role: utils.FieldColType}
}

View File

@ -0,0 +1,356 @@
//nolint
package postgresql
// Copied from https://github.com/jackc/pgtype/blob/master/int8.go and tweaked for uint64
/*
Copyright (c) 2013-2021 Jack Christensen
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
import (
"database/sql"
"database/sql/driver"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
. "github.com/jackc/pgtype"
"math"
"strconv"
"github.com/jackc/pgio"
)
var errUndefined = errors.New("cannot encode status undefined")
var errBadStatus = errors.New("invalid status")
type Uint8 struct {
Int uint64
Status Status
}
func (dst *Uint8) Set(src interface{}) error {
if src == nil {
*dst = Uint8{Status: Null}
return nil
}
if value, ok := src.(interface{ Get() interface{} }); ok {
value2 := value.Get()
if value2 != value {
return dst.Set(value2)
}
}
switch value := src.(type) {
case int8:
*dst = Uint8{Int: uint64(value), Status: Present}
case uint8:
*dst = Uint8{Int: uint64(value), Status: Present}
case int16:
*dst = Uint8{Int: uint64(value), Status: Present}
case uint16:
*dst = Uint8{Int: uint64(value), Status: Present}
case int32:
*dst = Uint8{Int: uint64(value), Status: Present}
case uint32:
*dst = Uint8{Int: uint64(value), Status: Present}
case int64:
*dst = Uint8{Int: uint64(value), Status: Present}
case uint64:
*dst = Uint8{Int: value, Status: Present}
case int:
if value < 0 {
return fmt.Errorf("%d is less than maximum value for Uint8", value)
}
*dst = Uint8{Int: uint64(value), Status: Present}
case uint:
if uint64(value) > math.MaxInt64 {
return fmt.Errorf("%d is greater than maximum value for Uint8", value)
}
*dst = Uint8{Int: uint64(value), Status: Present}
case string:
num, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return err
}
*dst = Uint8{Int: num, Status: Present}
case float32:
if value > math.MaxInt64 {
return fmt.Errorf("%f is greater than maximum value for Uint8", value)
}
*dst = Uint8{Int: uint64(value), Status: Present}
case float64:
if value > math.MaxInt64 {
return fmt.Errorf("%f is greater than maximum value for Uint8", value)
}
*dst = Uint8{Int: uint64(value), Status: Present}
case *int8:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
}
case *uint8:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
}
case *int16:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
}
case *uint16:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
}
case *int32:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
}
case *uint32:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
}
case *int64:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
}
case *uint64:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
}
case *int:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
}
case *uint:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
}
case *string:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
}
case *float32:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
}
case *float64:
if value == nil {
*dst = Uint8{Status: Null}
} else {
return dst.Set(*value)
}
default:
return fmt.Errorf("cannot convert %v to Uint8", value)
}
return nil
}
func (dst Uint8) Get() interface{} {
switch dst.Status {
case Present:
return dst.Int
case Null:
return nil
default:
return dst.Status
}
}
func (src *Uint8) AssignTo(dst interface{}) error {
switch v := dst.(type) {
case *int:
*v = int(src.Int)
case *int8:
*v = int8(src.Int)
case *int16:
*v = int16(src.Int)
case *int32:
*v = int32(src.Int)
case *int64:
*v = int64(src.Int)
case *uint:
*v = uint(src.Int)
case *uint8:
*v = uint8(src.Int)
case *uint16:
*v = uint16(src.Int)
case *uint32:
*v = uint32(src.Int)
case *uint64:
*v = src.Int
case *float32:
*v = float32(src.Int)
case *float64:
*v = float64(src.Int)
case *string:
*v = strconv.FormatUint(src.Int, 10)
case sql.Scanner:
return v.Scan(src.Int)
case interface{ Set(interface{}) error }:
return v.Set(src.Int)
default:
return fmt.Errorf("cannot assign %v into %T", src.Int, dst)
}
return nil
}
func (dst *Uint8) DecodeText(ci *ConnInfo, src []byte) error {
if src == nil {
*dst = Uint8{Status: Null}
return nil
}
n, err := strconv.ParseUint(string(src), 10, 64)
if err != nil {
return err
}
*dst = Uint8{Int: n, Status: Present}
return nil
}
func (dst *Uint8) DecodeBinary(ci *ConnInfo, src []byte) error {
if src == nil {
*dst = Uint8{Status: Null}
return nil
}
if len(src) != 8 {
return fmt.Errorf("invalid length for int8: %v", len(src))
}
n := binary.BigEndian.Uint64(src)
*dst = Uint8{Int: n, Status: Present}
return nil
}
func (src Uint8) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
switch src.Status {
case Null:
return nil, nil
case Undefined:
return nil, errUndefined
}
return append(buf, strconv.FormatUint(src.Int, 10)...), nil
}
func (src Uint8) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
switch src.Status {
case Null:
return nil, nil
case Undefined:
return nil, errUndefined
}
return pgio.AppendUint64(buf, src.Int), nil
}
// Scan implements the database/sql Scanner interface.
func (dst *Uint8) Scan(src interface{}) error {
if src == nil {
*dst = Uint8{Status: Null}
return nil
}
switch src := src.(type) {
case uint64:
*dst = Uint8{Int: src, Status: Present}
return nil
case string:
return dst.DecodeText(nil, []byte(src))
case []byte:
srcCopy := make([]byte, len(src))
copy(srcCopy, src)
return dst.DecodeText(nil, srcCopy)
}
return fmt.Errorf("cannot scan %T", src)
}
// Value implements the database/sql/driver Valuer interface.
func (src Uint8) Value() (driver.Value, error) {
switch src.Status {
case Present:
return int64(src.Int), nil
case Null:
return nil, nil
default:
return nil, errUndefined
}
}
func (src Uint8) MarshalJSON() ([]byte, error) {
switch src.Status {
case Present:
return []byte(strconv.FormatUint(src.Int, 10)), nil
case Null:
return []byte("null"), nil
case Undefined:
return nil, errUndefined
}
return nil, errBadStatus
}
func (dst *Uint8) UnmarshalJSON(b []byte) error {
var n *uint64
err := json.Unmarshal(b, &n)
if err != nil {
return err
}
if n == nil {
*dst = Uint8{Status: Null}
} else {
*dst = Uint8{Int: *n, Status: Present}
}
return nil
}

View File

@ -0,0 +1,60 @@
package postgresql
import (
"time"
)
// Constants for naming PostgreSQL data types both in
// their short and long versions.
const (
PgBool = "boolean"
PgSmallInt = "smallint"
PgInteger = "integer"
PgBigInt = "bigint"
PgReal = "real"
PgDoublePrecision = "double precision"
PgNumeric = "numeric"
PgText = "text"
PgTimestampWithTimeZone = "timestamp with time zone"
PgTimestampWithoutTimeZone = "timestamp without time zone"
PgSerial = "serial"
PgJSONb = "jsonb"
)
// Types from pguint
const (
PgUint8 = "uint8"
)
// DerivePgDatatype returns the appropriate PostgreSQL data type
// that could hold the value.
func (p *Postgresql) derivePgDatatype(value interface{}) string {
if p.Uint64Type == PgUint8 {
if _, ok := value.(uint64); ok {
return PgUint8
}
}
switch value.(type) {
case bool:
return PgBool
case uint64:
return PgNumeric
case int64, int, uint, uint32:
return PgBigInt
case int32:
return PgInteger
case int16, int8:
return PgSmallInt
case float64:
return PgDoublePrecision
case float32:
return PgReal
case string:
return PgText
case time.Time:
return PgTimestampWithoutTimeZone
default:
return PgText
}
}

View File

@ -0,0 +1,460 @@
//go:generate ../../../tools/readme_config_includer/generator
package postgresql
import (
"context"
_ "embed"
"errors"
"fmt"
"strings"
"time"
"github.com/coocood/freecache"
"github.com/jackc/pgconn"
"github.com/jackc/pgtype"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/models"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate"
"github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
)
type dbh interface {
Begin(ctx context.Context) (pgx.Tx, error)
CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error)
Exec(ctx context.Context, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag, err error)
Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error)
}
// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data.
//
//go:embed sample.conf
var sampleConfig string
type Postgresql struct {
Connection string `toml:"connection"`
Schema string `toml:"schema"`
TagsAsForeignKeys bool `toml:"tags_as_foreign_keys"`
TagTableSuffix string `toml:"tag_table_suffix"`
ForeignTagConstraint bool `toml:"foreign_tag_constraint"`
TagsAsJsonb bool `toml:"tags_as_jsonb"`
FieldsAsJsonb bool `toml:"fields_as_jsonb"`
CreateTemplates []*sqltemplate.Template `toml:"create_templates"`
AddColumnTemplates []*sqltemplate.Template `toml:"add_column_templates"`
TagTableCreateTemplates []*sqltemplate.Template `toml:"tag_table_create_templates"`
TagTableAddColumnTemplates []*sqltemplate.Template `toml:"tag_table_add_column_templates"`
Uint64Type string `toml:"uint64_type"`
RetryMaxBackoff config.Duration `toml:"retry_max_backoff"`
TagCacheSize int `toml:"tag_cache_size"`
LogLevel string `toml:"log_level"`
dbContext context.Context
dbContextCancel func()
dbConfig *pgxpool.Config
db *pgxpool.Pool
tableManager *TableManager
tagsCache *freecache.Cache
pguint8 *pgtype.DataType
writeChan chan *TableSource
writeWaitGroup *utils.WaitGroup
Logger telegraf.Logger `toml:"-"`
}
func init() {
outputs.Add("postgresql", func() telegraf.Output { return newPostgresql() })
}
func newPostgresql() *Postgresql {
p := &Postgresql{
Schema: "public",
TagTableSuffix: "_tag",
TagCacheSize: 100000,
Uint64Type: PgNumeric,
CreateTemplates: []*sqltemplate.Template{{}},
AddColumnTemplates: []*sqltemplate.Template{{}},
TagTableCreateTemplates: []*sqltemplate.Template{{}},
TagTableAddColumnTemplates: []*sqltemplate.Template{{}},
RetryMaxBackoff: config.Duration(time.Second * 15),
Logger: models.NewLogger("outputs", "postgresql", ""),
LogLevel: "warn",
}
_ = p.CreateTemplates[0].UnmarshalText([]byte(`CREATE TABLE {{ .table }} ({{ .columns }})`))
_ = p.AddColumnTemplates[0].UnmarshalText([]byte(`ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}`))
_ = p.TagTableCreateTemplates[0].UnmarshalText([]byte(`CREATE TABLE {{ .table }} ({{ .columns }}, PRIMARY KEY (tag_id))`))
_ = p.TagTableAddColumnTemplates[0].UnmarshalText([]byte(`ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}`))
return p
}
func (p *Postgresql) Init() error {
if p.TagCacheSize < 0 {
return fmt.Errorf("invalid tag_cache_size")
}
var err error
if p.dbConfig, err = pgxpool.ParseConfig(p.Connection); err != nil {
return err
}
parsedConfig, _ := pgx.ParseConfig(p.Connection)
if _, ok := parsedConfig.Config.RuntimeParams["pool_max_conns"]; !ok {
// The pgx default for pool_max_conns is 4. However we want to default to 1.
p.dbConfig.MaxConns = 1
}
if _, ok := p.dbConfig.ConnConfig.RuntimeParams["application_name"]; !ok {
p.dbConfig.ConnConfig.RuntimeParams["application_name"] = "telegraf"
}
if p.LogLevel != "" {
p.dbConfig.ConnConfig.Logger = utils.PGXLogger{Logger: p.Logger}
p.dbConfig.ConnConfig.LogLevel, err = pgx.LogLevelFromString(p.LogLevel)
if err != nil {
return fmt.Errorf("invalid log level")
}
}
switch p.Uint64Type {
case PgNumeric:
case PgUint8:
p.dbConfig.AfterConnect = p.registerUint8
default:
return fmt.Errorf("invalid uint64_type")
}
return nil
}
func (p *Postgresql) SampleConfig() string { return sampleConfig }
// Connect establishes a connection to the target database and prepares the cache
func (p *Postgresql) Connect() error {
// Yes, we're not supposed to store the context. However since we don't receive a context, we have to.
p.dbContext, p.dbContextCancel = context.WithCancel(context.Background())
var err error
p.db, err = pgxpool.ConnectConfig(p.dbContext, p.dbConfig)
if err != nil {
p.Logger.Errorf("Couldn't connect to server\n%v", err)
return err
}
p.tableManager = NewTableManager(p)
if p.TagsAsForeignKeys {
p.tagsCache = freecache.NewCache(p.TagCacheSize * 34) // from testing, each entry consumes approx 34 bytes
}
maxConns := int(p.db.Stat().MaxConns())
if maxConns > 1 {
p.writeChan = make(chan *TableSource)
p.writeWaitGroup = utils.NewWaitGroup()
for i := 0; i < maxConns; i++ {
p.writeWaitGroup.Add(1)
go p.writeWorker(p.dbContext)
}
}
return nil
}
func (p *Postgresql) registerUint8(ctx context.Context, conn *pgx.Conn) error {
if p.pguint8 == nil {
dt := pgtype.DataType{
// Use 'numeric' type for encoding/decoding across the wire
// It might be more efficient to create a native pgtype.Type, but would involve a lot of code. So this is
// probably good enough.
Value: &Uint8{},
Name: "uint8",
}
row := conn.QueryRow(p.dbContext, "SELECT oid FROM pg_type WHERE typname=$1", dt.Name)
if err := row.Scan(&dt.OID); err != nil {
return fmt.Errorf("retreiving OID for uint8 data type: %w", err)
}
p.pguint8 = &dt
}
conn.ConnInfo().RegisterDataType(*p.pguint8)
return nil
}
// Close closes the connection(s) to the database.
func (p *Postgresql) Close() error {
if p.writeChan != nil {
// We're using async mode. Gracefully close with timeout.
close(p.writeChan)
select {
case <-p.writeWaitGroup.C():
case <-time.NewTimer(time.Second * 5).C:
p.Logger.Warnf("Shutdown timeout expired while waiting for metrics to flush. Some metrics may not be written to database.")
}
}
// Die!
p.dbContextCancel()
p.db.Close()
p.tableManager = nil
return nil
}
func (p *Postgresql) Write(metrics []telegraf.Metric) error {
if p.tagsCache != nil {
// gather at the start of write so there's less chance of any async operations ongoing
p.Logger.Debugf("cache: size=%d hit=%d miss=%d full=%d\n",
p.tagsCache.EntryCount(),
p.tagsCache.HitCount(),
p.tagsCache.MissCount(),
p.tagsCache.EvacuateCount(),
)
p.tagsCache.ResetStatistics()
}
tableSources := NewTableSources(p, metrics)
var err error
if p.db.Stat().MaxConns() > 1 {
err = p.writeConcurrent(tableSources)
} else {
err = p.writeSequential(tableSources)
}
if err != nil {
var pgErr *pgconn.PgError
if errors.As(err, &pgErr) {
// PgError doesn't include .Detail in Error(), so we concat it onto .Message.
if pgErr.Detail != "" {
pgErr.Message += "; " + pgErr.Detail
}
}
}
return err
}
func (p *Postgresql) writeSequential(tableSources map[string]*TableSource) error {
tx, err := p.db.Begin(p.dbContext)
if err != nil {
return fmt.Errorf("starting transaction: %w", err)
}
defer tx.Rollback(p.dbContext) //nolint:errcheck
for _, tableSource := range tableSources {
sp := tx
if len(tableSources) > 1 {
// wrap each sub-batch in a savepoint so that if a permanent error is received, we can drop just that one sub-batch, and insert everything else.
sp, err = tx.Begin(p.dbContext)
if err != nil {
return fmt.Errorf("starting savepoint: %w", err)
}
}
err := p.writeMetricsFromMeasure(p.dbContext, sp, tableSource)
if err != nil {
if isTempError(err) {
// return so that telegraf will retry the whole batch
return err
}
p.Logger.Errorf("write error (permanent, dropping sub-batch): %v", err)
if len(tableSources) == 1 {
return nil
}
// drop this one sub-batch and continue trying the rest
if err := sp.Rollback(p.dbContext); err != nil {
return err
}
}
// savepoints do not need to be committed (released), so save the round trip and skip it
}
if err := tx.Commit(p.dbContext); err != nil {
return fmt.Errorf("committing transaction: %w", err)
}
return nil
}
func (p *Postgresql) writeConcurrent(tableSources map[string]*TableSource) error {
for _, tableSource := range tableSources {
select {
case p.writeChan <- tableSource:
case <-p.dbContext.Done():
return nil
}
}
return nil
}
func (p *Postgresql) writeWorker(ctx context.Context) {
defer p.writeWaitGroup.Done()
for {
select {
case tableSource, ok := <-p.writeChan:
if !ok {
return
}
if err := p.writeRetry(ctx, tableSource); err != nil {
p.Logger.Errorf("write error (permanent, dropping sub-batch): %v", err)
}
case <-p.dbContext.Done():
return
}
}
}
// isTempError reports whether the error received during a metric write operation is temporary or permanent.
// A temporary error is one that if the write were retried at a later time, that it might succeed.
// Note however that this applies to the transaction as a whole, not the individual operation. Meaning for example a
// write might come in that needs a new table created, but another worker already created the table in between when we
// checked for it, and tried to create it. In this case, the operation error is permanent, as we can try `CREATE TABLE`
// again and it will still fail. But if we retry the transaction from scratch, when we perform the table check we'll see
// it exists, so we consider the error temporary.
func isTempError(err error) bool {
var pgErr *pgconn.PgError
if errors.As(err, &pgErr); pgErr != nil {
// https://www.postgresql.org/docs/12/errcodes-appendix.html
errClass := pgErr.Code[:2]
switch errClass {
case "23": // Integrity Constraint Violation
switch pgErr.Code { //nolint:revive
case "23505": // unique_violation
if strings.Contains(err.Error(), "pg_type_typname_nsp_index") {
// Happens when you try to create 2 tables simultaneously.
return true
}
}
case "25": // Invalid Transaction State
// If we're here, this is a bug, but recoverable
return true
case "40": // Transaction Rollback
switch pgErr.Code { //nolint:revive
case "40P01": // deadlock_detected
return true
}
case "42": // Syntax Error or Access Rule Violation
switch pgErr.Code {
case "42701": // duplicate_column
return true
case "42P07": // duplicate_table
return true
}
case "53": // Insufficient Resources
return true
case "57": // Operator Intervention
switch pgErr.Code { //nolint:revive
case "57014": // query_cancelled
// This one is a bit of a mess. This code comes back when PGX cancels the query. Such as when PGX can't
// convert to the column's type. So even though the error was originally generated by PGX, we get the
// error from Postgres.
return false
case "57P04": // database_dropped
return false
}
return true
}
// Assume that any other error that comes from postgres is a permanent error
return false
}
if err, ok := err.(interface{ Temporary() bool }); ok {
return err.Temporary()
}
// Assume that any other error is permanent.
// This may mean that we incorrectly discard data that could have been retried, but the alternative is that we get
// stuck retrying data that will never succeed, causing good data to be dropped because the buffer fills up.
return false
}
func (p *Postgresql) writeRetry(ctx context.Context, tableSource *TableSource) error {
backoff := time.Duration(0)
for {
err := p.writeMetricsFromMeasure(ctx, p.db, tableSource)
if err == nil {
return nil
}
if !isTempError(err) {
return err
}
p.Logger.Errorf("write error (retry in %s): %v", backoff, err)
tableSource.Reset()
time.Sleep(backoff)
if backoff == 0 {
backoff = time.Millisecond * 250
} else {
backoff *= 2
if backoff > time.Duration(p.RetryMaxBackoff) {
backoff = time.Duration(p.RetryMaxBackoff)
}
}
}
}
// Writes the metrics from a specified measure. All the provided metrics must belong to the same measurement.
func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableSource *TableSource) error {
err := p.tableManager.MatchSource(ctx, db, tableSource)
if err != nil {
return err
}
if p.TagsAsForeignKeys {
if err := p.writeTagTable(ctx, db, tableSource); err != nil {
if p.ForeignTagConstraint {
return fmt.Errorf("writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err)
}
// log and continue. As the admin can correct the issue, and tags don't change over time, they can be
// added from future metrics after issue is corrected.
p.Logger.Errorf("writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err)
}
}
fullTableName := utils.FullTableName(p.Schema, tableSource.Name())
if _, err := db.CopyFrom(ctx, fullTableName, tableSource.ColumnNames(), tableSource); err != nil {
return err
}
return nil
}
func (p *Postgresql) writeTagTable(ctx context.Context, db dbh, tableSource *TableSource) error {
ttsrc := NewTagTableSource(tableSource)
// Check whether we have any tags to insert
if !ttsrc.Next() {
return nil
}
ttsrc.Reset()
// need a transaction so that if it errors, we don't roll back the parent transaction, just the tags
tx, err := db.Begin(ctx)
if err != nil {
return err
}
defer tx.Rollback(ctx) //nolint:errcheck
ident := pgx.Identifier{ttsrc.postgresql.Schema, ttsrc.Name()}
identTemp := pgx.Identifier{ttsrc.Name() + "_temp"}
sql := fmt.Sprintf("CREATE TEMP TABLE %s (LIKE %s) ON COMMIT DROP", identTemp.Sanitize(), ident.Sanitize())
if _, err := tx.Exec(ctx, sql); err != nil {
return fmt.Errorf("creating tags temp table: %w", err)
}
if _, err := tx.CopyFrom(ctx, identTemp, ttsrc.ColumnNames(), ttsrc); err != nil {
return fmt.Errorf("copying into tags temp table: %w", err)
}
if _, err := tx.Exec(ctx, fmt.Sprintf("INSERT INTO %s SELECT * FROM %s ORDER BY tag_id ON CONFLICT (tag_id) DO NOTHING", ident.Sanitize(), identTemp.Sanitize())); err != nil {
return fmt.Errorf("inserting into tags table: %w", err)
}
if err := tx.Commit(ctx); err != nil {
return err
}
ttsrc.UpdateCache()
return nil
}

View File

@ -0,0 +1,103 @@
package postgresql
import (
"context"
"fmt"
"math/rand"
"strconv"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
)
func BenchmarkPostgresql_sequential(b *testing.B) {
gen := batchGenerator(batchGeneratorArgs{ctx, b, 1000, 3, 8, 12, 100, 2})
benchmarkPostgresql(b, gen, 1, true)
}
func BenchmarkPostgresql_concurrent(b *testing.B) {
gen := batchGenerator(batchGeneratorArgs{ctx, b, 1000, 3, 8, 12, 100, 2})
benchmarkPostgresql(b, gen, 10, true)
}
func benchmarkPostgresql(b *testing.B, gen <-chan []telegraf.Metric, concurrency int, foreignTags bool) {
p := newPostgresqlTest(b)
p.Connection += fmt.Sprintf(" pool_max_conns=%d", concurrency)
p.TagsAsForeignKeys = foreignTags
p.LogLevel = ""
_ = p.Init()
if err := p.Connect(); err != nil {
b.Fatalf("Error: %s", err)
}
metricCount := 0
b.ResetTimer()
tStart := time.Now()
for i := 0; i < b.N; i++ {
batch := <-gen
if err := p.Write(batch); err != nil {
b.Fatalf("Error: %s", err)
}
metricCount += len(batch)
}
_ = p.Close()
b.StopTimer()
tStop := time.Now()
b.ReportMetric(float64(metricCount)/tStop.Sub(tStart).Seconds(), "metrics/s")
}
type batchGeneratorArgs struct {
ctx context.Context
b *testing.B
batchSize int
numTables int
numTags int
numFields int
tagCardinality int
fieldCardinality int
}
// tagCardinality counts all the tag keys & values as one element. fieldCardinality counts all the field keys (not values) as one element.
func batchGenerator(args batchGeneratorArgs) <-chan []telegraf.Metric {
tagSets := make([]MSS, args.tagCardinality)
for i := 0; i < args.tagCardinality; i++ {
tags := MSS{}
for j := 0; j < args.numTags; j++ {
tags[fmt.Sprintf("tag_%d", j)] = fmt.Sprintf("%d", rand.Int())
}
tagSets[i] = tags
}
metricChan := make(chan []telegraf.Metric, 32)
go func() {
for {
batch := make([]telegraf.Metric, args.batchSize)
for i := 0; i < args.batchSize; i++ {
tableName := args.b.Name() + "_" + strconv.Itoa(rand.Intn(args.numTables))
tags := tagSets[rand.Intn(len(tagSets))]
m := metric.New(tableName, tags, nil, time.Now())
m.AddTag("tableName", tableName) // ensure the tag set is unique to this table. Just in case...
// We do field cardinality by randomizing the name of the final field to an integer < cardinality.
for j := 0; j < args.numFields-1; j++ { // use -1 to reserve the last field for cardinality
m.AddField("f"+strconv.Itoa(j), rand.Int())
}
m.AddField("f"+strconv.Itoa(rand.Intn(args.fieldCardinality)), rand.Int())
batch[i] = m
}
select {
case metricChan <- batch:
case <-ctx.Done():
return
}
}
}()
return metricChan
}

View File

@ -0,0 +1,805 @@
package postgresql
import (
"context"
"fmt"
"math"
"math/rand"
"strings"
"sync"
"testing"
"time"
"github.com/docker/go-connections/nat"
"github.com/testcontainers/testcontainers-go/wait"
"github.com/influxdata/telegraf/testutil"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
)
type Log struct {
level pgx.LogLevel
format string
args []interface{}
}
func (l Log) String() string {
// We have to use Errorf() as Sprintf() doesn't allow usage of %w.
return fmt.Errorf("%s: "+l.format, append([]interface{}{l.level}, l.args...)...).Error()
}
// LogAccumulator is a log collector that satisfies telegraf.Logger.
type LogAccumulator struct {
logs []Log
cond *sync.Cond
tb testing.TB
emitLevel pgx.LogLevel
}
func NewLogAccumulator(tb testing.TB) *LogAccumulator {
return &LogAccumulator{
cond: sync.NewCond(&sync.Mutex{}),
tb: tb,
}
}
func (la *LogAccumulator) append(level pgx.LogLevel, format string, args []interface{}) {
la.tb.Helper()
la.cond.L.Lock()
log := Log{level, format, args}
la.logs = append(la.logs, log)
if la.emitLevel == 0 || log.level <= la.emitLevel {
la.tb.Log(log.String())
}
la.cond.Broadcast()
la.cond.L.Unlock()
}
func (la *LogAccumulator) HasLevel(level pgx.LogLevel) bool {
la.cond.L.Lock()
defer la.cond.L.Unlock()
for _, log := range la.logs {
if log.level > 0 && log.level <= level {
return true
}
}
return false
}
func (la *LogAccumulator) WaitLen(n int) []Log {
la.cond.L.Lock()
defer la.cond.L.Unlock()
for len(la.logs) < n {
la.cond.Wait()
}
return la.logs[:]
}
// Waits for a specific query log from pgx to show up.
func (la *LogAccumulator) WaitFor(f func(l Log) bool, waitCommit bool) {
la.cond.L.Lock()
defer la.cond.L.Unlock()
i := 0
var commitPid uint32
for {
for ; i < len(la.logs); i++ {
log := la.logs[i]
if commitPid == 0 {
if f(log) {
if !waitCommit {
return
}
commitPid = log.args[1].(MSI)["pid"].(uint32)
}
} else {
if len(log.args) < 2 {
continue
}
data, ok := log.args[1].(MSI)
if !ok || data["pid"] != commitPid {
continue
}
if log.args[0] == "Exec" && data["sql"] == "commit" {
return
} else if log.args[0] == "Exec" && data["sql"] == "rollback" {
// transaction aborted, start looking for another match
commitPid = 0
} else if log.level == pgx.LogLevelError {
commitPid = 0
}
}
}
la.cond.Wait()
}
}
func (la *LogAccumulator) WaitForQuery(str string, waitCommit bool) {
la.WaitFor(func(log Log) bool {
return log.format == "PG %s - %+v" &&
(log.args[0].(string) == "Query" || log.args[0].(string) == "Exec") &&
strings.Contains(log.args[1].(MSI)["sql"].(string), str)
}, waitCommit)
}
func (la *LogAccumulator) WaitForCopy(tableName string, waitCommit bool) {
la.WaitFor(func(log Log) bool {
return log.format == "PG %s - %+v" &&
log.args[0].(string) == "CopyFrom" &&
log.args[1].(MSI)["tableName"].(pgx.Identifier)[1] == tableName
}, waitCommit)
}
// Clear any stored logs.
// Do not run this while any WaitFor* operations are in progress.
func (la *LogAccumulator) Clear() {
la.cond.L.Lock()
if len(la.logs) > 0 {
la.logs = nil
}
la.cond.L.Unlock()
}
func (la *LogAccumulator) Logs() []Log {
la.cond.L.Lock()
defer la.cond.L.Unlock()
return la.logs[:]
}
func (la *LogAccumulator) Errorf(format string, args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelError, format, args)
}
func (la *LogAccumulator) Error(args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelError, "%v", args)
}
func (la *LogAccumulator) Debugf(format string, args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelDebug, format, args)
}
func (la *LogAccumulator) Debug(args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelDebug, "%v", args)
}
func (la *LogAccumulator) Warnf(format string, args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelWarn, format, args)
}
func (la *LogAccumulator) Warn(args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelWarn, "%v", args)
}
func (la *LogAccumulator) Infof(format string, args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelInfo, format, args)
}
func (la *LogAccumulator) Info(args ...interface{}) {
la.tb.Helper()
la.append(pgx.LogLevelInfo, "%v", args)
}
var ctx = context.Background()
type PostgresqlTest struct {
*Postgresql
Logger *LogAccumulator
}
func newPostgresqlTest(tb testing.TB) *PostgresqlTest {
if testing.Short() {
tb.Skip("Skipping integration test in short mode")
}
servicePort := "5432"
username := "postgres"
password := "postgres"
testDatabaseName := "telegraf_test"
container := testutil.Container{
Image: "postgres:alpine",
ExposedPorts: []string{servicePort},
Env: map[string]string{
"POSTGRES_USER": username,
"POSTGRES_PASSWORD": password,
"POSTGRES_DB": "telegraf_test",
},
WaitingFor: wait.ForAll(
// the database comes up twice, once right away, then again a second
// time after the docker entrypoint starts configuraiton
wait.ForLog("database system is ready to accept connections").WithOccurrence(2),
wait.ForListeningPort(nat.Port(servicePort)),
),
}
tb.Cleanup(func() {
require.NoError(tb, container.Terminate(), "terminating container failed")
})
err := container.Start()
require.NoError(tb, err, "failed to start container")
p := newPostgresql()
p.Connection = fmt.Sprintf(
"host=%s port=%s user=%s password=%s dbname=%s",
container.Address,
container.Ports[servicePort],
username,
password,
testDatabaseName,
)
logger := NewLogAccumulator(tb)
p.Logger = logger
p.LogLevel = "debug"
require.NoError(tb, p.Init())
pt := &PostgresqlTest{Postgresql: p}
pt.Logger = logger
return pt
}
func TestPostgresqlConnectIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
require.NoError(t, p.Connect())
assert.EqualValues(t, 1, p.db.Stat().MaxConns())
p = newPostgresqlTest(t)
p.Connection += " pool_max_conns=2"
_ = p.Init()
require.NoError(t, p.Connect())
assert.EqualValues(t, 2, p.db.Stat().MaxConns())
}
func newMetric(
t *testing.T,
suffix string,
tags map[string]string,
fields map[string]interface{},
) telegraf.Metric {
return testutil.MustMetric(t.Name()+suffix, tags, fields, time.Now())
}
type MSS = map[string]string
type MSI = map[string]interface{}
func dbTableDump(t *testing.T, db *pgxpool.Pool, suffix string) []MSI {
rows, err := db.Query(ctx, "SELECT * FROM "+pgx.Identifier{t.Name() + suffix}.Sanitize())
require.NoError(t, err)
defer rows.Close()
var dump []MSI
for rows.Next() {
msi := MSI{}
vals, err := rows.Values()
require.NoError(t, err)
for i, fd := range rows.FieldDescriptions() {
msi[string(fd.Name)] = vals[i]
}
dump = append(dump, msi)
}
require.NoError(t, rows.Err())
return dump
}
func TestWriteIntegration_sequential(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "_a", MSS{}, MSI{"v": 1}),
newMetric(t, "_b", MSS{}, MSI{"v": 2}),
newMetric(t, "_a", MSS{}, MSI{"v": 3}),
}
require.NoError(t, p.Write(metrics))
dumpA := dbTableDump(t, p.db, "_a")
dumpB := dbTableDump(t, p.db, "_b")
if assert.Len(t, dumpA, 2) {
assert.EqualValues(t, 1, dumpA[0]["v"])
assert.EqualValues(t, 3, dumpA[1]["v"])
}
if assert.Len(t, dumpB, 1) {
assert.EqualValues(t, 2, dumpB[0]["v"])
}
p.Logger.Clear()
require.NoError(t, p.Write(metrics))
stmtCount := 0
for _, log := range p.Logger.Logs() {
if strings.Contains(log.String(), "info: PG ") {
stmtCount++
}
}
assert.Equal(t, 6, stmtCount) // BEGIN, SAVEPOINT, COPY table _a, SAVEPOINT, COPY table _b, COMMIT
}
func TestWriteIntegration_concurrent(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.dbConfig.MaxConns = 3
require.NoError(t, p.Connect())
// Write a metric so it creates a table we can lock.
metrics := []telegraf.Metric{
newMetric(t, "_a", MSS{}, MSI{"v": 1}),
}
require.NoError(t, p.Write(metrics))
p.Logger.WaitForCopy(t.Name()+"_a", false)
// clear so that the WaitForCopy calls below don't pick up this one
p.Logger.Clear()
// Lock the table so that we ensure the writes hangs and the plugin has to open another connection.
tx, err := p.db.Begin(ctx)
require.NoError(t, err)
defer tx.Rollback(ctx) //nolint:errcheck
_, err = tx.Exec(ctx, "LOCK TABLE "+utils.QuoteIdentifier(t.Name()+"_a"))
require.NoError(t, err)
metrics = []telegraf.Metric{
newMetric(t, "_a", MSS{}, MSI{"v": 2}),
}
require.NoError(t, p.Write(metrics))
// Note, there is technically a possible race here, where it doesn't try to insert into _a until after _b. However
// this should be practically impossible, and trying to engineer a solution to account for it would be even more
// complex than we already are.
metrics = []telegraf.Metric{
newMetric(t, "_b", MSS{}, MSI{"v": 3}),
}
require.NoError(t, p.Write(metrics))
p.Logger.WaitForCopy(t.Name()+"_b", false)
// release the lock on table _a
_ = tx.Rollback(ctx)
p.Logger.WaitForCopy(t.Name()+"_a", false)
dumpA := dbTableDump(t, p.db, "_a")
dumpB := dbTableDump(t, p.db, "_b")
if assert.Len(t, dumpA, 2) {
assert.EqualValues(t, 1, dumpA[0]["v"])
assert.EqualValues(t, 2, dumpA[1]["v"])
}
if assert.Len(t, dumpB, 1) {
assert.EqualValues(t, 3, dumpB[0]["v"])
}
// We should have had 3 connections. One for the lock, and one for each table.
assert.EqualValues(t, 3, p.db.Stat().TotalConns())
}
// Test that the bad metric is dropped, and the rest of the batch succeeds.
func TestWriteIntegration_sequentialPermError(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "_a", MSS{}, MSI{"v": 1}),
newMetric(t, "_b", MSS{}, MSI{"v": 2}),
}
require.NoError(t, p.Write(metrics))
metrics = []telegraf.Metric{
newMetric(t, "_a", MSS{}, MSI{"v": "a"}),
newMetric(t, "_b", MSS{}, MSI{"v": 3}),
}
require.NoError(t, p.Write(metrics))
dumpA := dbTableDump(t, p.db, "_a")
dumpB := dbTableDump(t, p.db, "_b")
assert.Len(t, dumpA, 1)
assert.Len(t, dumpB, 2)
haveError := false
for _, l := range p.Logger.Logs() {
if strings.Contains(l.String(), "write error") {
haveError = true
break
}
}
assert.True(t, haveError, "write error not found in log")
}
// Test that in a bach with only 1 sub-batch, that we don't return an error.
func TestWriteIntegration_sequentialSinglePermError(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{}, MSI{"v": 1}),
}
require.NoError(t, p.Write(metrics))
metrics = []telegraf.Metric{
newMetric(t, "", MSS{}, MSI{"v": "a"}),
}
require.NoError(t, p.Write(metrics))
}
// Test that the bad metric is dropped, and the rest of the batch succeeds.
func TestWriteIntegration_concurrentPermError(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.dbConfig.MaxConns = 2
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "_a", MSS{}, MSI{"v": 1}),
}
require.NoError(t, p.Write(metrics))
p.Logger.WaitForCopy(t.Name()+"_a", false)
metrics = []telegraf.Metric{
newMetric(t, "_a", MSS{}, MSI{"v": "a"}),
newMetric(t, "_b", MSS{}, MSI{"v": 2}),
}
require.NoError(t, p.Write(metrics))
p.Logger.WaitFor(func(l Log) bool {
return strings.Contains(l.String(), "write error")
}, false)
p.Logger.WaitForCopy(t.Name()+"_b", false)
dumpA := dbTableDump(t, p.db, "_a")
dumpB := dbTableDump(t, p.db, "_b")
assert.Len(t, dumpA, 1)
assert.Len(t, dumpB, 1)
}
// Verify that in sequential mode, errors are returned allowing telegraf agent to handle & retry
func TestWriteIntegration_sequentialTempError(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
require.NoError(t, p.Connect())
// To avoid a race condition, we need to know when our goroutine has started listening to the log.
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
// Wait for the CREATE TABLE, and then kill the connection.
// The WaitFor callback holds a lock on the log. Meaning it will block logging of the next action. So we trigger
// on CREATE TABLE so that there's a few statements to go before the COMMIT.
p.Logger.WaitFor(func(log Log) bool {
if strings.Contains(log.String(), "release wg") {
wg.Done()
}
if !strings.Contains(log.String(), "CREATE TABLE") {
return false
}
pid := log.args[1].(MSI)["pid"].(uint32)
conf := p.db.Config().ConnConfig
conf.Logger = nil
c, err := pgx.ConnectConfig(context.Background(), conf)
if !assert.NoError(t, err) {
return true
}
_, err = c.Exec(context.Background(), "SELECT pg_terminate_backend($1)", pid)
assert.NoError(t, err)
return true
}, false)
}()
p.Logger.Infof("release wg")
wg.Wait()
metrics := []telegraf.Metric{
newMetric(t, "_a", MSS{}, MSI{"v": 1}),
}
require.Error(t, p.Write(metrics))
}
// Verify that when using concurrency, errors are not returned, but instead logged and automatically retried
func TestWriteIntegration_concurrentTempError(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.dbConfig.MaxConns = 2
require.NoError(t, p.Connect())
// To avoid a race condition, we need to know when our goroutine has started listening to the log.
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
// Wait for the CREATE TABLE, and then kill the connection.
// The WaitFor callback holds a lock on the log. Meaning it will block logging of the next action. So we trigger
// on CREATE TABLE so that there's a few statements to go before the COMMIT.
p.Logger.WaitFor(func(log Log) bool {
if strings.Contains(log.String(), "release wg") {
wg.Done()
}
if !strings.Contains(log.String(), "CREATE TABLE") {
return false
}
pid := log.args[1].(MSI)["pid"].(uint32)
conf := p.db.Config().ConnConfig
conf.Logger = nil
c, err := pgx.ConnectConfig(context.Background(), conf)
if !assert.NoError(t, err) {
return true
}
_, err = c.Exec(context.Background(), "SELECT pg_terminate_backend($1)", pid)
assert.NoError(t, err)
return true
}, false)
}()
p.Logger.Infof("release wg")
wg.Wait()
metrics := []telegraf.Metric{
newMetric(t, "_a", MSS{}, MSI{"v": 1}),
}
require.NoError(t, p.Write(metrics))
p.Logger.WaitForCopy(t.Name()+"_a", false)
dumpA := dbTableDump(t, p.db, "_a")
assert.Len(t, dumpA, 1)
haveError := false
for _, l := range p.Logger.Logs() {
if strings.Contains(l.String(), "write error") {
haveError = true
break
}
}
assert.True(t, haveError, "write error not found in log")
}
func TestWriteTagTableIntegration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"v": 1}),
}
require.NoError(t, p.Write(metrics))
dump := dbTableDump(t, p.db, "")
require.Len(t, dump, 1)
assert.EqualValues(t, 1, dump[0]["v"])
dumpTags := dbTableDump(t, p.db, p.TagTableSuffix)
require.Len(t, dumpTags, 1)
assert.EqualValues(t, dump[0]["tag_id"], dumpTags[0]["tag_id"])
assert.EqualValues(t, "foo", dumpTags[0]["tag"])
p.Logger.Clear()
require.NoError(t, p.Write(metrics))
stmtCount := 0
for _, log := range p.Logger.Logs() {
if strings.Contains(log.String(), "info: PG ") {
stmtCount++
}
}
assert.Equal(t, 3, stmtCount) // BEGIN, COPY metrics table, COMMIT
}
// Verify that when using TagsAsForeignKeys and a tag can't be written, that we still add the metrics.
func TestWriteIntegration_tagError(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"v": 1}),
}
require.NoError(t, p.Write(metrics))
// It'll have the table cached, so won't know we dropped it, will try insert, and get error.
_, err := p.db.Exec(ctx, "DROP TABLE \""+t.Name()+"_tag\"")
require.NoError(t, err)
metrics = []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"v": 2}),
}
require.NoError(t, p.Write(metrics))
dump := dbTableDump(t, p.db, "")
require.Len(t, dump, 2)
assert.EqualValues(t, 1, dump[0]["v"])
assert.EqualValues(t, 2, dump[1]["v"])
}
// Verify that when using TagsAsForeignKeys and ForeignTagConstraing and a tag can't be written, that we drop the metrics.
func TestWriteIntegration_tagError_foreignConstraint(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
p.ForeignTagConstraint = true
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"v": 1}),
}
require.NoError(t, p.Write(metrics))
// It'll have the table cached, so won't know we dropped it, will try insert, and get error.
_, err := p.db.Exec(ctx, "DROP TABLE \""+t.Name()+"_tag\"")
require.NoError(t, err)
metrics = []telegraf.Metric{
newMetric(t, "", MSS{"tag": "bar"}, MSI{"v": 2}),
}
assert.NoError(t, p.Write(metrics))
haveError := false
for _, l := range p.Logger.Logs() {
if strings.Contains(l.String(), "write error") {
haveError = true
break
}
}
assert.True(t, haveError, "write error not found in log")
dump := dbTableDump(t, p.db, "")
require.Len(t, dump, 1)
assert.EqualValues(t, 1, dump[0]["v"])
}
func TestWriteIntegration_utf8(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "Ѧ𝙱Ƈᗞ",
MSS{"ăѣ𝔠ծ": "𝘈Ḇ𝖢𝕯٤ḞԍНǏ𝙅ƘԸⲘ𝙉০Ρ𝗤Ɍ𝓢ȚЦ𝒱Ѡ𝓧ƳȤ"},
MSI{"АḂⲤ𝗗": "𝘢ƀ𝖼ḋếᵮℊ𝙝Ꭵ𝕛кιṃդⱺ𝓅𝘲𝕣𝖘ŧ𝑢ṽẉ𝘅ყž𝜡"},
),
}
assert.NoError(t, p.Write(metrics))
dump := dbTableDump(t, p.db, "Ѧ𝙱Ƈᗞ")
require.Len(t, dump, 1)
assert.EqualValues(t, "𝘢ƀ𝖼ḋếᵮℊ𝙝Ꭵ𝕛кιṃդⱺ𝓅𝘲𝕣𝖘ŧ𝑢ṽẉ𝘅ყž𝜡", dump[0]["АḂⲤ𝗗"])
dumpTags := dbTableDump(t, p.db, "Ѧ𝙱Ƈᗞ"+p.TagTableSuffix)
require.Len(t, dumpTags, 1)
assert.EqualValues(t, dump[0]["tag_id"], dumpTags[0]["tag_id"])
assert.EqualValues(t, "𝘈Ḇ𝖢𝕯٤ḞԍНǏ𝙅ƘԸⲘ𝙉০Ρ𝗤Ɍ𝓢ȚЦ𝒱Ѡ𝓧ƳȤ", dumpTags[0]["ăѣ𝔠ծ"])
}
func TestWriteIntegration_UnsignedIntegers(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.Uint64Type = PgUint8
_ = p.Init()
if err := p.Connect(); err != nil {
if strings.Contains(err.Error(), "retreiving OID for uint8 data type") {
t.Skipf("pguint extension is not installed")
t.SkipNow()
}
require.NoError(t, err)
}
metrics := []telegraf.Metric{
newMetric(t, "", MSS{}, MSI{"v": uint64(math.MaxUint64)}),
}
require.NoError(t, p.Write(metrics))
dump := dbTableDump(t, p.db, "")
if assert.Len(t, dump, 1) {
assert.EqualValues(t, uint64(math.MaxUint64), dump[0]["v"])
}
}
// Last ditch effort to find any concurrency issues.
func TestStressConcurrencyIntegration(t *testing.T) {
t.Skip("Skipping very long test - run locally with no timeout")
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"foo": "bar"}, MSI{"a": 1}),
newMetric(t, "", MSS{"pop": "tart"}, MSI{"b": 1}),
newMetric(t, "", MSS{"foo": "bar", "pop": "tart"}, MSI{"a": 2, "b": 2}),
newMetric(t, "_b", MSS{"foo": "bar"}, MSI{"a": 1}),
}
concurrency := 4
loops := 100
pctl := newPostgresqlTest(t)
pctl.Logger.emitLevel = pgx.LogLevelWarn
require.NoError(t, pctl.Connect())
for i := 0; i < loops; i++ {
var wgStart, wgDone sync.WaitGroup
wgStart.Add(concurrency)
wgDone.Add(concurrency)
for j := 0; j < concurrency; j++ {
go func() {
mShuf := make([]telegraf.Metric, len(metrics))
copy(mShuf, metrics)
rand.Shuffle(len(mShuf), func(a, b int) { mShuf[a], mShuf[b] = mShuf[b], mShuf[a] })
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
p.Logger.emitLevel = pgx.LogLevelWarn
p.dbConfig.MaxConns = int32(rand.Intn(3) + 1)
require.NoError(t, p.Connect())
wgStart.Done()
wgStart.Wait()
err := p.Write(mShuf)
assert.NoError(t, err)
assert.NoError(t, p.Close())
assert.False(t, p.Logger.HasLevel(pgx.LogLevelWarn))
wgDone.Done()
}()
}
wgDone.Wait()
if t.Failed() {
break
}
}
}

View File

@ -0,0 +1,80 @@
# Publishes metrics to a postgresql database
[[outputs.postgresql]]
## Specify connection address via the standard libpq connection string:
## host=... user=... password=... sslmode=... dbname=...
## Or a URL:
## postgres://[user[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
## See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
##
## All connection parameters are optional. Environment vars are also supported.
## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE
## All supported vars can be found here:
## https://www.postgresql.org/docs/current/libpq-envars.html
##
## Non-standard parameters:
## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts.
## pool_min_conns (default: 0) - Minimum size of connection pool.
## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing.
## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing.
## pool_health_check_period (default: 0s) - Duration between health checks on idle connections.
# connection = ""
## Postgres schema to use.
# schema = "public"
## Store tags as foreign keys in the metrics table. Default is false.
# tags_as_foreign_keys = false
## Suffix to append to table name (measurement name) for the foreign tag table.
# tag_table_suffix = "_tag"
## Deny inserting metrics if the foreign tag can't be inserted.
# foreign_tag_constraint = false
## Store all tags as a JSONB object in a single 'tags' column.
# tags_as_jsonb = false
## Store all fields as a JSONB object in a single 'fields' column.
# fields_as_jsonb = false
## Templated statements to execute when creating a new table.
# create_templates = [
# '''CREATE TABLE {{ .table }} ({{ .columns }})''',
# ]
## Templated statements to execute when adding columns to a table.
## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. Points
## containing fields for which there is no column will have the field omitted.
# add_column_templates = [
# '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''',
# ]
## Templated statements to execute when creating a new tag table.
# tag_table_create_templates = [
# '''CREATE TABLE {{ .table }} ({{ .columns }}, PRIMARY KEY (tag_id))''',
# ]
## Templated statements to execute when adding columns to a tag table.
## Set to an empty list to disable. Points containing tags for which there is no column will be skipped.
# tag_table_add_column_templates = [
# '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''',
# ]
## The postgres data type to use for storing unsigned 64-bit integer values (Postgres does not have a native
## unsigned 64-bit integer type).
## The value can be one of:
## numeric - Uses the PostgreSQL "numeric" data type.
## uint8 - Requires pguint extension (https://github.com/petere/pguint)
# uint64_type = "numeric"
## When using pool_max_conns>1, and a temporary error occurs, the query is retried with an incremental backoff. This
## controls the maximum backoff duration.
# retry_max_backoff = "15s"
## Approximate number of tag IDs to store in in-memory cache (when using tags_as_foreign_keys).
## This is an optimization to skip inserting known tag IDs.
## Each entry consumes approximately 34 bytes of memory.
# tag_cache_size = 100000
## Enable & set the log level for the Postgres driver.
# log_level = "warn" # trace, debug, info, warn, error, none

View File

@ -0,0 +1,418 @@
/*
Templates are used for creation of the SQL used when creating and modifying tables. These templates are specified within
the configuration as the parameters 'create_templates', 'add_column_templates, 'tag_table_create_templates', and
'tag_table_add_column_templates'.
The templating functionality behaves the same in all cases. However the variables will differ.
# Variables
The following variables are available within all template executions:
- table - A Table object referring to the current table being
created/modified.
- columns - A Columns object of the new columns being added to the
table (all columns in the case of a new table, and new columns in the case
of existing table).
- allColumns - A Columns object of all the columns (both old and new)
of the table. In the case of a new table, this is the same as `columns`.
- metricTable - A Table object referring to the table containing the
fields. In the case of TagsAsForeignKeys and `table` is the tag table, then
`metricTable` is the table using this one for its tags.
- tagTable - A Table object referring to the table containing the
tags. In the case of TagsAsForeignKeys and `table` is the metrics table,
then `tagTable` is the table containing the tags for it.
Each object has helper methods that may be used within the template. See the documentation for the appropriate type.
When the object is interpolated without a helper, it is automatically converted to a string through its String() method.
# Functions
All the functions provided by the Sprig library (http://masterminds.github.io/sprig/) are available within template executions.
In addition, the following functions are also available:
- quoteIdentifier - Quotes the input string as a Postgres identifier.
- quoteLiteral - Quotes the input string as a Postgres literal.
# Examples
The default templates show basic usage. When left unconfigured, it is the equivalent of:
[outputs.postgresql]
create_templates = [
'''CREATE TABLE {{.table}} ({{.columns}})''',
]
add_column_templates = [
'''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''',
]
tag_table_create_templates = [
'''CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))'''
]
tag_table_add_column_templates = [
'''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''',
]
A simple example for usage with TimescaleDB would be:
[outputs.postgresql]
create_templates = [
'''CREATE TABLE {{ .table }} ({{ .allColumns }})''',
'''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1d')''',
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
'''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '2h')''',
]
...where the defaults for the other templates would be automatically applied.
A very complex example for versions of TimescaleDB which don't support adding columns to compressed hypertables (v<2.1.0), using views and unions to emulate the functionality, would be:
[outputs.postgresql]
schema = "telegraf"
create_templates = [
'''CREATE TABLE {{ .table }} ({{ .allColumns }})''',
'''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1d')''',
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
'''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '2d')''',
'''CREATE VIEW {{ .table.WithSuffix "_data" }} AS
SELECT {{ .allColumns.Selectors | join "," }} FROM {{ .table }}''',
'''CREATE VIEW {{ .table.WithSchema "public" }} AS
SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }}
FROM {{ .table.WithSuffix "_data" }} t, {{ .tagTable }} tt
WHERE t.tag_id = tt.tag_id''',
]
add_column_templates = [
'''ALTER TABLE {{ .table }} RENAME TO {{ (.table.WithSuffix "_" .table.Columns.Hash).WithSchema "" }}''',
'''ALTER VIEW {{ .table.WithSuffix "_data" }} RENAME TO {{ (.table.WithSuffix "_" .table.Columns.Hash "_data").WithSchema "" }}''',
'''DROP VIEW {{ .table.WithSchema "public" }}''',
'''CREATE TABLE {{ .table }} ({{ .allColumns }})''',
'''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1d')''',
'''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''',
'''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '2d')''',
'''CREATE VIEW {{ .table.WithSuffix "_data" }} AS
SELECT {{ .allColumns.Selectors | join "," }}
FROM {{ .table }}
UNION ALL
SELECT {{ (.allColumns.Union .table.Columns).Selectors | join "," }}
FROM {{ .table.WithSuffix "_" .table.Columns.Hash "_data" }}''',
'''CREATE VIEW {{ .table.WithSchema "public" }}
AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }}
FROM {{ .table.WithSuffix "_data" }} t, {{ .tagTable }} tt
WHERE t.tag_id = tt.tag_id''',
]
*/
package sqltemplate
import (
"bytes"
"encoding/base32"
"fmt"
"hash/fnv"
"strings"
"text/template"
"unsafe"
"github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
"github.com/Masterminds/sprig"
)
var templateFuncs = map[string]interface{}{
"quoteIdentifier": QuoteIdentifier,
"quoteLiteral": QuoteLiteral,
}
func asString(obj interface{}) string {
switch obj := obj.(type) {
case string:
return obj
case []byte:
return string(obj)
case fmt.Stringer:
return obj.String()
default:
return fmt.Sprintf("%v", obj)
}
}
// QuoteIdentifier quotes the given string as a Postgres identifier (double-quotes the value).
//
// QuoteIdentifier is accessible within templates as 'quoteIdentifier'.
func QuoteIdentifier(name interface{}) string {
return utils.QuoteIdentifier(asString(name))
}
// QuoteLiteral quotes the given string as a Postgres literal (single-quotes the value).
//
// QuoteLiteral is accessible within templates as 'quoteLiteral'.
func QuoteLiteral(str interface{}) string {
return utils.QuoteLiteral(asString(str))
}
// Table is an object which represents a Postgres table.
type Table struct {
Schema string
Name string
Columns Columns
}
func NewTable(schemaName, tableName string, columns []utils.Column) *Table {
if tableName == "" {
return nil
}
return &Table{
Schema: schemaName,
Name: tableName,
Columns: NewColumns(columns),
}
}
// String returns the table's fully qualified & quoted identifier (schema+table).
func (tbl *Table) String() string {
return tbl.Identifier()
}
// Identifier returns the table's fully qualified & quoted identifier (schema+table).
//
// If schema is empty, it is omitted from the result.
func (tbl *Table) Identifier() string {
if tbl.Schema == "" {
return QuoteIdentifier(tbl.Name)
}
return QuoteIdentifier(tbl.Schema) + "." + QuoteIdentifier(tbl.Name)
}
// WithSchema returns a copy of the Table object, but with the schema replaced by the given value.
func (tbl *Table) WithSchema(name string) *Table {
tblNew := &Table{}
*tblNew = *tbl
tblNew.Schema = name
return tblNew
}
// WithName returns a copy of the Table object, but with the name replaced by the given value.
func (tbl *Table) WithName(name string) *Table {
tblNew := &Table{}
*tblNew = *tbl
tblNew.Name = name
return tblNew
}
// WithSuffix returns a copy of the Table object, but with the name suffixed with the given value.
func (tbl *Table) WithSuffix(suffixes ...string) *Table {
tblNew := &Table{}
*tblNew = *tbl
tblNew.Name += strings.Join(suffixes, "")
return tblNew
}
// A Column is an object which represents a Postgres column.
type Column utils.Column
// String returns the column's definition (as used in a CREATE TABLE statement). E.G:
//
// "my_column" bigint
func (tc Column) String() string {
return tc.Definition()
}
// Definition returns the column's definition (as used in a CREATE TABLE statement). E.G:
//
// "my_column" bigint
func (tc Column) Definition() string {
return tc.Identifier() + " " + tc.Type
}
// Identifier returns the column's quoted identifier.
func (tc Column) Identifier() string {
return QuoteIdentifier(tc.Name)
}
// Selector returns the selector for the column. For most cases this is the same as Identifier. However in some cases, such as a UNION, this may return a statement such as `NULL AS "foo"`.
func (tc Column) Selector() string {
if tc.Type != "" {
return tc.Identifier()
}
return "NULL AS " + tc.Identifier()
}
// IsTag returns true if the column is a tag column. Otherwise false.
func (tc Column) IsTag() bool {
return tc.Role == utils.TagColType
}
// IsField returns true if the column is a field column. Otherwise false.
func (tc Column) IsField() bool {
return tc.Role == utils.FieldColType
}
// Columns represents an ordered list of Column objects, with convenience methods for operating on the
// list.
type Columns []Column
func NewColumns(cols []utils.Column) Columns {
tcols := make(Columns, len(cols))
for i, col := range cols {
tcols[i] = Column(col)
}
return tcols
}
// List returns the Columns object as a slice of Column.
func (cols Columns) List() []Column {
return cols
}
// Definitions returns the list of column definitions.
func (cols Columns) Definitions() []string {
defs := make([]string, len(cols))
for i, tc := range cols {
defs[i] = tc.Definition()
}
return defs
}
// Identifiers returns the list of quoted column identifiers.
func (cols Columns) Identifiers() []string {
idents := make([]string, len(cols))
for i, tc := range cols {
idents[i] = tc.Identifier()
}
return idents
}
// Selectors returns the list of column selectors.
func (cols Columns) Selectors() []string {
selectors := make([]string, len(cols))
for i, tc := range cols {
selectors[i] = tc.Selector()
}
return selectors
}
// String returns the comma delimited list of column identifiers.
func (cols Columns) String() string {
colStrs := make([]string, len(cols))
for i, tc := range cols {
colStrs[i] = tc.String()
}
return strings.Join(colStrs, ", ")
}
// Keys returns a Columns list of the columns which are not fields (e.g. time, tag_id, & tags).
func (cols Columns) Keys() Columns {
var newCols []Column
for _, tc := range cols {
if tc.Role != utils.FieldColType {
newCols = append(newCols, tc)
}
}
return newCols
}
// Sorted returns a sorted copy of Columns.
//
// Columns are sorted so that they are in order as: [Time, Tags, Fields], with the columns within each group sorted
// alphabetically.
func (cols Columns) Sorted() Columns {
newCols := append([]Column{}, cols...)
(*utils.ColumnList)(unsafe.Pointer(&newCols)).Sort()
return newCols
}
// Concat returns a copy of Columns with the given tcsList appended to the end.
func (cols Columns) Concat(tcsList ...Columns) Columns {
tcsNew := append(Columns{}, cols...)
for _, tcs := range tcsList {
tcsNew = append(tcsNew, tcs...)
}
return tcsNew
}
// Union generates a list of SQL selectors against the given columns.
//
// For each column in tcs, if the column also exist in tcsFrom, it will be selected. If the column does not exist NULL will be selected.
func (cols Columns) Union(tcsFrom Columns) Columns {
tcsNew := append(Columns{}, cols...)
TCS:
for i, tc := range cols {
for _, tcFrom := range tcsFrom {
if tc.Name == tcFrom.Name {
continue TCS
}
}
tcsNew[i].Type = ""
}
return tcsNew
}
// Tags returns a Columns list of the columns which are tags.
func (cols Columns) Tags() Columns {
var newCols []Column
for _, tc := range cols {
if tc.Role == utils.TagColType {
newCols = append(newCols, tc)
}
}
return newCols
}
// Fields returns a Columns list of the columns which are fields.
func (cols Columns) Fields() Columns {
var newCols []Column
for _, tc := range cols {
if tc.Role == utils.FieldColType {
newCols = append(newCols, tc)
}
}
return newCols
}
// Hash returns a hash of the column names. The hash is base-32 encoded string, up to 7 characters long with no padding.
//
// This can be useful as an identifier for supporting table renaming + unions in the case of non-modifiable tables.
func (cols Columns) Hash() string {
hash := fnv.New32a()
for _, tc := range cols.Sorted() {
_, _ = hash.Write([]byte(tc.Name))
_, _ = hash.Write([]byte{0})
}
return strings.ToLower(base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(hash.Sum(nil)))
}
type Template template.Template
func (t *Template) UnmarshalText(text []byte) error {
tmpl := template.New("")
tmpl.Option("missingkey=error")
tmpl.Funcs(templateFuncs)
tmpl.Funcs(sprig.TxtFuncMap())
tt, err := tmpl.Parse(string(text))
if err != nil {
return err
}
*t = Template(*tt)
return nil
}
func (t *Template) Render(table *Table, newColumns []utils.Column, metricTable *Table, tagTable *Table) ([]byte, error) {
tcs := NewColumns(newColumns).Sorted()
data := map[string]interface{}{
"table": table,
"columns": tcs,
"allColumns": tcs.Concat(table.Columns).Sorted(),
"metricTable": metricTable,
"tagTable": tagTable,
}
buf := bytes.NewBuffer(nil)
err := (*template.Template)(t).Execute(buf, data)
return buf.Bytes(), err
}

View File

@ -0,0 +1,432 @@
package postgresql
import (
"context"
"fmt"
"strings"
"sync"
"github.com/jackc/pgx/v4"
"github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate"
"github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
)
// This is an arbitrary constant value shared between multiple telegraf processes used for locking schema updates.
const schemaAdvisoryLockID int64 = 5705450890675909945
type tableState struct {
name string
columns map[string]utils.Column
sync.RWMutex
}
type TableManager struct {
*Postgresql
// map[tableName]map[columnName]utils.Column
tables map[string]*tableState
tablesMutex sync.Mutex
}
// NewTableManager returns an instance of the tables.Manager interface
// that can handle checking and updating the state of tables in the PG database.
func NewTableManager(postgresql *Postgresql) *TableManager {
return &TableManager{
Postgresql: postgresql,
tables: make(map[string]*tableState),
}
}
// ClearTableCache clear the table structure cache.
func (tm *TableManager) ClearTableCache() {
tm.tablesMutex.Lock()
for _, tbl := range tm.tables {
tbl.Lock()
tbl.columns = nil
tbl.Unlock()
}
tm.tablesMutex.Unlock()
if tm.tagsCache != nil {
tm.tagsCache.Clear()
}
}
func (tm *TableManager) table(name string) *tableState {
tm.tablesMutex.Lock()
tbl := tm.tables[name]
if tbl == nil {
tbl = &tableState{name: name}
tm.tables[name] = tbl
}
tm.tablesMutex.Unlock()
return tbl
}
// MatchSource scans through the metrics, determining what columns are needed for inserting, and ensuring the DB schema matches.
//
// If the schema does not match, and schema updates are disabled:
// If a field missing from the DB, the field is omitted.
// If a tag is missing from the DB, the metric is dropped.
func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *TableSource) error {
metricTable := tm.table(rowSource.Name())
var tagTable *tableState
if tm.TagsAsForeignKeys {
tagTable = tm.table(metricTable.name + tm.TagTableSuffix)
missingCols, err := tm.EnsureStructure(
ctx,
db,
tagTable,
rowSource.TagTableColumns(),
tm.TagTableCreateTemplates,
tm.TagTableAddColumnTemplates,
metricTable,
tagTable,
)
if err != nil {
if isTempError(err) {
return err
}
tm.Postgresql.Logger.Errorf("permanent error updating schema for %s: %w", tagTable.name, err)
}
if len(missingCols) > 0 {
colDefs := make([]string, len(missingCols))
for i, col := range missingCols {
if err := rowSource.DropColumn(col); err != nil {
return fmt.Errorf("metric/table mismatch: Unable to omit field/column from \"%s\": %w", tagTable.name, err)
}
colDefs[i] = col.Name + " " + col.Type
}
tm.Logger.Errorf("table '%s' is missing tag columns (dropping metrics): %s",
tagTable.name,
strings.Join(colDefs, ", "))
}
}
missingCols, err := tm.EnsureStructure(
ctx,
db,
metricTable,
rowSource.MetricTableColumns(),
tm.CreateTemplates,
tm.AddColumnTemplates,
metricTable,
tagTable,
)
if err != nil {
if isTempError(err) {
return err
}
tm.Postgresql.Logger.Errorf("permanent error updating schema for %s: %w", metricTable.name, err)
}
if len(missingCols) > 0 {
colDefs := make([]string, len(missingCols))
for i, col := range missingCols {
if err := rowSource.DropColumn(col); err != nil {
return fmt.Errorf("metric/table mismatch: Unable to omit field/column from \"%s\": %w", metricTable.name, err)
}
colDefs[i] = col.Name + " " + col.Type
}
tm.Logger.Errorf("table '%s' is missing columns (omitting fields): %s",
metricTable.name,
strings.Join(colDefs, ", "))
}
return nil
}
// EnsureStructure ensures that the table identified by tableName contains the provided columns.
//
// createTemplates and addColumnTemplates are the templates which are executed in the event of table create or alter
// (respectively).
// metricsTableName and tagsTableName are passed to the templates.
//
// If the table cannot be modified, the returned column list is the columns which are missing from the table. This
// includes when an error is returned.
//nolint:revive
func (tm *TableManager) EnsureStructure(
ctx context.Context,
db dbh,
tbl *tableState,
columns []utils.Column,
createTemplates []*sqltemplate.Template,
addColumnsTemplates []*sqltemplate.Template,
metricsTable *tableState,
tagsTable *tableState,
) ([]utils.Column, error) {
// Sort so that:
// * When we create/alter the table the columns are in a sane order (telegraf gives us the fields in random order)
// * When we display errors about missing columns, the order is also sane, and consistent
utils.ColumnList(columns).Sort()
// rlock, read, runlock, wlock, read, read_db, wlock_db, read_db, write_db, wunlock_db, wunlock
// rlock
tbl.RLock()
// read
currCols := tbl.columns
// runlock
tbl.RUnlock()
missingCols := diffMissingColumns(currCols, columns)
if len(missingCols) == 0 {
return nil, nil
}
// check that the missing columns are columns that can be added
addColumns := make([]utils.Column, 0, len(missingCols))
var invalidColumns []utils.Column
for _, col := range missingCols {
if tm.validateColumnName(col.Name) {
addColumns = append(addColumns, col)
continue
}
if col.Role == utils.TagColType {
return nil, fmt.Errorf("column name too long: \"%s\"", col.Name)
}
tm.Postgresql.Logger.Errorf("column name too long: \"%s\"", col.Name)
invalidColumns = append(invalidColumns, col)
}
// wlock
// We also need to lock the other table as it may be referenced by a template.
// To prevent deadlock, the metric & tag table must always be locked in the same order: 1) Tag, 2) Metric
if tbl == tagsTable {
tagsTable.Lock()
defer tagsTable.Unlock()
metricsTable.RLock()
defer metricsTable.RUnlock()
} else {
if tagsTable != nil {
tagsTable.RLock()
defer tagsTable.RUnlock()
}
metricsTable.Lock()
defer metricsTable.Unlock()
}
// read
currCols = tbl.columns
addColumns = diffMissingColumns(currCols, addColumns)
if len(addColumns) == 0 {
return invalidColumns, nil
}
// read_db
var err error
if currCols, err = tm.getColumns(ctx, db, tbl.name); err != nil {
return nil, err
}
tbl.columns = currCols
addColumns = diffMissingColumns(currCols, addColumns)
if len(addColumns) == 0 {
tbl.columns = currCols
return invalidColumns, nil
}
if len(currCols) == 0 && len(createTemplates) == 0 {
// can't create
return columns, nil
}
if len(currCols) != 0 && len(addColumnsTemplates) == 0 {
// can't add
return append(addColumns, invalidColumns...), nil
}
if len(currCols) == 0 && !tm.validateTableName(tbl.name) {
return nil, fmt.Errorf("table name too long: %s", tbl.name)
}
// wlock_db
tx, err := db.Begin(ctx)
if err != nil {
return append(addColumns, invalidColumns...), err
}
defer tx.Rollback(ctx) //nolint:errcheck
// It's possible to have multiple telegraf processes, in which we can't ensure they all lock tables in the same
// order. So to prevent possible deadlocks, we have to have a single lock for all schema modifications.
if _, err := tx.Exec(ctx, "SELECT pg_advisory_xact_lock($1)", schemaAdvisoryLockID); err != nil {
return append(addColumns, invalidColumns...), err
}
// read_db
if currCols, err = tm.getColumns(ctx, tx, tbl.name); err != nil {
return nil, err
}
tbl.columns = currCols
if currCols != nil {
addColumns = diffMissingColumns(currCols, addColumns)
if len(addColumns) == 0 {
return invalidColumns, nil
}
}
// write_db
var tmpls []*sqltemplate.Template
if len(currCols) == 0 {
tmpls = createTemplates
} else {
tmpls = addColumnsTemplates
}
if err := tm.update(ctx, tx, tbl, tmpls, addColumns, metricsTable, tagsTable); err != nil {
return append(addColumns, invalidColumns...), err
}
if currCols, err = tm.getColumns(ctx, tx, tbl.name); err != nil {
return append(addColumns, invalidColumns...), err
}
if err := tx.Commit(ctx); err != nil {
return append(addColumns, invalidColumns...), err
}
tbl.columns = currCols
// wunlock_db (deferred)
// wunlock (deferred)
return invalidColumns, nil
}
func (tm *TableManager) getColumns(ctx context.Context, db dbh, name string) (map[string]utils.Column, error) {
rows, err := db.Query(ctx, `
SELECT
column_name,
CASE WHEN data_type='USER-DEFINED' THEN udt_name ELSE data_type END,
col_description(format('%I.%I', table_schema, table_name)::regclass::oid, ordinal_position)
FROM information_schema.columns
WHERE table_schema = $1 and table_name = $2`, tm.Schema, name)
if err != nil {
return nil, err
}
defer rows.Close()
cols := make(map[string]utils.Column)
for rows.Next() {
var colName, colType string
desc := new(string)
err := rows.Scan(&colName, &colType, &desc)
if err != nil {
return nil, err
}
role := utils.FieldColType
switch colName {
case timeColumnName:
role = utils.TimeColType
case tagIDColumnName:
role = utils.TagsIDColType
case tagsJSONColumnName:
role = utils.TagColType
case fieldsJSONColumnName:
role = utils.FieldColType
default:
// We don't want to monopolize the column comment (preventing user from storing other information there), so just look at the first word
if desc != nil {
descWords := strings.Split(*desc, " ")
if descWords[0] == "tag" {
role = utils.TagColType
}
}
}
cols[colName] = utils.Column{
Name: colName,
Type: colType,
Role: role,
}
}
return cols, rows.Err()
}
//nolint:revive
func (tm *TableManager) update(ctx context.Context,
tx pgx.Tx,
state *tableState,
tmpls []*sqltemplate.Template,
missingCols []utils.Column,
metricsTable *tableState,
tagsTable *tableState,
) error {
tmplTable := sqltemplate.NewTable(tm.Schema, state.name, colMapToSlice(state.columns))
metricsTmplTable := sqltemplate.NewTable(tm.Schema, metricsTable.name, colMapToSlice(metricsTable.columns))
var tagsTmplTable *sqltemplate.Table
if tagsTable != nil {
tagsTmplTable = sqltemplate.NewTable(tm.Schema, tagsTable.name, colMapToSlice(tagsTable.columns))
} else {
tagsTmplTable = sqltemplate.NewTable("", "", nil)
}
for _, tmpl := range tmpls {
sql, err := tmpl.Render(tmplTable, missingCols, metricsTmplTable, tagsTmplTable)
if err != nil {
return err
}
if _, err := tx.Exec(ctx, string(sql)); err != nil {
return fmt.Errorf("executing `%s`: %w", sql, err)
}
}
// We need to be able to determine the role of the column when reading the structure back (because of the templates).
// For some columns we can determine this by the column name (time, tag_id, etc). However tags and fields can have any
// name, and look the same. So we add a comment to tag columns, and through process of elimination what remains are
// field columns.
for _, col := range missingCols {
if col.Role != utils.TagColType {
continue
}
stmt := fmt.Sprintf("COMMENT ON COLUMN %s.%s IS 'tag'",
tmplTable.String(), sqltemplate.QuoteIdentifier(col.Name))
if _, err := tx.Exec(ctx, stmt); err != nil {
return fmt.Errorf("setting column role comment: %s", err)
}
}
return nil
}
const maxIdentifierLength = 63
func (tm *TableManager) validateTableName(name string) bool {
if tm.Postgresql.TagsAsForeignKeys {
return len([]byte(name))+len([]byte(tm.Postgresql.TagTableSuffix)) <= maxIdentifierLength
}
return len([]byte(name)) <= maxIdentifierLength
}
func (tm *TableManager) validateColumnName(name string) bool {
return len([]byte(name)) <= maxIdentifierLength
}
// diffMissingColumns filters srcColumns to the ones not present in dbColumns.
func diffMissingColumns(dbColumns map[string]utils.Column, srcColumns []utils.Column) []utils.Column {
if len(dbColumns) == 0 {
return srcColumns
}
var missingColumns []utils.Column
for _, srcCol := range srcColumns {
if _, ok := dbColumns[srcCol.Name]; !ok {
missingColumns = append(missingColumns, srcCol)
continue
}
}
return missingColumns
}
func colMapToSlice(colMap map[string]utils.Column) []utils.Column {
if colMap == nil {
return nil
}
cols := make([]utils.Column, 0, len(colMap))
for _, col := range colMap {
cols = append(cols, col)
}
return cols
}

View File

@ -0,0 +1,472 @@
package postgresql
import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate"
"github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
)
func TestTableManagerIntegration_EnsureStructure(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
require.NoError(t, p.Connect())
cols := []utils.Column{
p.columnFromTag("foo", ""),
p.columnFromField("baz", 0),
}
missingCols, err := p.tableManager.EnsureStructure(
ctx,
p.db,
p.tableManager.table(t.Name()),
cols,
p.CreateTemplates,
p.AddColumnTemplates,
p.tableManager.table(t.Name()),
nil,
)
require.NoError(t, err)
assert.Empty(t, missingCols)
tblCols := p.tableManager.table(t.Name()).columns
assert.EqualValues(t, cols[0], tblCols["foo"])
assert.EqualValues(t, cols[1], tblCols["baz"])
}
func TestTableManagerIntegration_EnsureStructure_alter(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
require.NoError(t, p.Connect())
cols := []utils.Column{
p.columnFromTag("foo", ""),
p.columnFromField("bar", 0),
}
_, err := p.tableManager.EnsureStructure(
ctx,
p.db,
p.tableManager.table(t.Name()),
cols,
p.CreateTemplates,
p.AddColumnTemplates,
p.tableManager.table(t.Name()),
nil,
)
require.NoError(t, err)
cols = append(cols, p.columnFromField("baz", 0))
missingCols, err := p.tableManager.EnsureStructure(
ctx,
p.db,
p.tableManager.table(t.Name()),
cols,
p.CreateTemplates,
p.AddColumnTemplates,
p.tableManager.table(t.Name()),
nil,
)
require.NoError(t, err)
assert.Empty(t, missingCols)
tblCols := p.tableManager.table(t.Name()).columns
assert.EqualValues(t, cols[0], tblCols["foo"])
assert.EqualValues(t, cols[1], tblCols["bar"])
assert.EqualValues(t, cols[2], tblCols["baz"])
}
func TestTableManagerIntegration_EnsureStructure_overflowTableName(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
require.NoError(t, p.Connect())
tbl := p.tableManager.table("ăăăăăăăăăăăăăăăăăăăăăăăăăăăăăăăă") // 32 2-byte unicode characters = 64 bytes
cols := []utils.Column{
p.columnFromField("foo", 0),
}
_, err := p.tableManager.EnsureStructure(
ctx,
p.db,
tbl,
cols,
p.CreateTemplates,
p.AddColumnTemplates,
tbl,
nil,
)
require.Error(t, err)
assert.Contains(t, err.Error(), "table name too long")
assert.False(t, isTempError(err))
}
func TestTableManagerIntegration_EnsureStructure_overflowTagName(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
require.NoError(t, p.Connect())
tbl := p.tableManager.table(t.Name())
cols := []utils.Column{
p.columnFromTag("ăăăăăăăăăăăăăăăăăăăăăăăăăăăăăăăă", "a"), // 32 2-byte unicode characters = 64 bytes
p.columnFromField("foo", 0),
}
_, err := p.tableManager.EnsureStructure(
ctx,
p.db,
tbl,
cols,
p.CreateTemplates,
p.AddColumnTemplates,
tbl,
nil,
)
require.Error(t, err)
assert.False(t, isTempError(err))
}
func TestTableManagerIntegration_EnsureStructure_overflowFieldName(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
require.NoError(t, p.Connect())
tbl := p.tableManager.table(t.Name())
cols := []utils.Column{
p.columnFromField("foo", 0),
p.columnFromField("ăăăăăăăăăăăăăăăăăăăăăăăăăăăăăăăă", 0),
}
missingCols, err := p.tableManager.EnsureStructure(
ctx,
p.db,
tbl,
cols,
p.CreateTemplates,
p.AddColumnTemplates,
tbl,
nil,
)
require.NoError(t, err)
assert.Len(t, missingCols, 1)
assert.Equal(t, cols[1], missingCols[0])
}
func TestTableManagerIntegration_getColumns(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
require.NoError(t, p.Connect())
cols := []utils.Column{
p.columnFromTag("foo", ""),
p.columnFromField("baz", 0),
}
_, err := p.tableManager.EnsureStructure(
ctx,
p.db,
p.tableManager.table(t.Name()),
cols,
p.CreateTemplates,
p.AddColumnTemplates,
p.tableManager.table(t.Name()),
nil,
)
require.NoError(t, err)
p.tableManager.ClearTableCache()
require.Empty(t, p.tableManager.table(t.Name()).columns)
curCols, err := p.tableManager.getColumns(ctx, p.db, t.Name())
require.NoError(t, err)
assert.EqualValues(t, cols[0], curCols["foo"])
assert.EqualValues(t, cols[1], curCols["baz"])
}
func TestTableManagerIntegration_MatchSource(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
assert.Contains(t, p.tableManager.table(t.Name()+p.TagTableSuffix).columns, "tag")
assert.Contains(t, p.tableManager.table(t.Name()).columns, "a")
}
func TestTableManagerIntegration_MatchSource_UnsignedIntegers(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.Uint64Type = PgUint8
_ = p.Init()
if err := p.Connect(); err != nil {
if strings.Contains(err.Error(), "retreiving OID for uint8 data type") {
t.Skipf("pguint extension is not installed")
t.SkipNow()
}
require.NoError(t, err)
}
metrics := []telegraf.Metric{
newMetric(t, "", nil, MSI{"a": uint64(1)}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
assert.Equal(t, PgUint8, p.tableManager.table(t.Name()).columns["a"].Type)
}
func TestTableManagerIntegration_noCreateTable(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.CreateTemplates = nil
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
require.Error(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
}
func TestTableManagerIntegration_noCreateTagTable(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagTableCreateTemplates = nil
p.TagsAsForeignKeys = true
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
require.Error(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
}
// verify that TableManager updates & caches the DB table structure unless the incoming metric can't fit.
func TestTableManagerIntegration_cache(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
}
// Verify that when alter statements are disabled and a metric comes in with a new tag key, that the tag is omitted.
func TestTableManagerIntegration_noAlterMissingTag(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.AddColumnTemplates = []*sqltemplate.Template{}
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
metrics = []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}),
newMetric(t, "", MSS{"tag": "foo", "bar": "baz"}, MSI{"a": 3}),
}
tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()]
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
assert.NotContains(t, tsrc.ColumnNames(), "bar")
}
// Verify that when using foreign tags and alter statements are disabled and a metric comes in with a new tag key, that
// the tag is omitted.
func TestTableManagerIntegration_noAlterMissingTagTableTag(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
p.TagTableAddColumnTemplates = []*sqltemplate.Template{}
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
metrics = []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}),
newMetric(t, "", MSS{"tag": "foo", "bar": "baz"}, MSI{"a": 3}),
}
tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()]
ttsrc := NewTagTableSource(tsrc)
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
assert.NotContains(t, ttsrc.ColumnNames(), "bar")
}
// Verify that when using foreign tags and alter statements generate a permanent error and a metric comes in with a new
// tag key, that the tag is omitted.
func TestTableManagerIntegration_badAlterTagTable(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
tmpl := &sqltemplate.Template{}
_ = tmpl.UnmarshalText([]byte("bad"))
p.TagTableAddColumnTemplates = []*sqltemplate.Template{tmpl}
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
metrics = []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}),
newMetric(t, "", MSS{"tag": "foo", "bar": "baz"}, MSI{"a": 3}),
}
tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()]
ttsrc := NewTagTableSource(tsrc)
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
assert.NotContains(t, ttsrc.ColumnNames(), "bar")
}
// verify that when alter statements are disabled and a metric comes in with a new field key, that the field is omitted.
func TestTableManagerIntegration_noAlterMissingField(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.AddColumnTemplates = []*sqltemplate.Template{}
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
metrics = []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}),
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 3, "b": 3}),
}
tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()]
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
assert.NotContains(t, tsrc.ColumnNames(), "b")
}
// verify that when alter statements generate a permanent error and a metric comes in with a new field key, that the field is omitted.
func TestTableManagerIntegration_badAlterField(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
tmpl := &sqltemplate.Template{}
_ = tmpl.UnmarshalText([]byte("bad"))
p.AddColumnTemplates = []*sqltemplate.Template{tmpl}
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
metrics = []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}),
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 3, "b": 3}),
}
tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()]
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
assert.NotContains(t, tsrc.ColumnNames(), "b")
}
func TestTableManager_addColumnTemplates(t *testing.T) {
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
require.NoError(t, p.Connect())
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"foo": "bar"}, MSI{"a": 1}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
p = newPostgresqlTest(t)
p.TagsAsForeignKeys = true
tmpl := &sqltemplate.Template{}
require.NoError(t, tmpl.UnmarshalText([]byte(`-- addColumnTemplate: {{ . }}`)))
p.AddColumnTemplates = []*sqltemplate.Template{tmpl}
require.NoError(t, p.Connect())
metrics = []telegraf.Metric{
newMetric(t, "", MSS{"pop": "tart"}, MSI{"a": 1, "b": 2}),
}
tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()]
require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc))
p.Logger.Info("ok")
expected := `CREATE TABLE "public"."TestTableManager_addColumnTemplates" ("time" timestamp without time zone, "tag_id" bigint, "a" bigint, "b" bigint)`
stmtCount := 0
for _, log := range p.Logger.Logs() {
if strings.Contains(log.String(), expected) {
stmtCount++
}
}
assert.Equal(t, 1, stmtCount)
}

View File

@ -0,0 +1,436 @@
package postgresql
import (
"fmt"
"hash/fnv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
)
type columnList struct {
columns []utils.Column
indices map[string]int
}
func newColumnList() *columnList {
return &columnList{
indices: map[string]int{},
}
}
func (cl *columnList) Add(column utils.Column) bool {
if _, ok := cl.indices[column.Name]; ok {
return false
}
cl.columns = append(cl.columns, column)
cl.indices[column.Name] = len(cl.columns) - 1
return true
}
func (cl *columnList) Remove(name string) bool {
idx, ok := cl.indices[name]
if !ok {
return false
}
cl.columns = append(cl.columns[:idx], cl.columns[idx+1:]...)
delete(cl.indices, name)
for i, col := range cl.columns[idx:] {
cl.indices[col.Name] = idx + i
}
return true
}
// TableSource satisfies pgx.CopyFromSource
type TableSource struct {
postgresql *Postgresql
metrics []telegraf.Metric
cursor int
cursorValues []interface{}
cursorError error
// tagHashSalt is so that we can use a global tag cache for all tables. The salt is unique per table, and combined
// with the tag ID when looked up in the cache.
tagHashSalt int64
tagColumns *columnList
// tagSets is the list of tag IDs to tag values in use within the TableSource. The position of each value in the list
// corresponds to the key name in the tagColumns list.
// This data is used to build out the foreign tag table when enabled.
tagSets map[int64][]*telegraf.Tag
fieldColumns *columnList
droppedTagColumns []string
}
func NewTableSources(p *Postgresql, metrics []telegraf.Metric) map[string]*TableSource {
tableSources := map[string]*TableSource{}
for _, m := range metrics {
tsrc := tableSources[m.Name()]
if tsrc == nil {
tsrc = NewTableSource(p, m.Name())
tableSources[m.Name()] = tsrc
}
tsrc.AddMetric(m)
}
return tableSources
}
func NewTableSource(postgresql *Postgresql, name string) *TableSource {
h := fnv.New64a()
_, _ = h.Write([]byte(name))
tsrc := &TableSource{
postgresql: postgresql,
cursor: -1,
tagSets: make(map[int64][]*telegraf.Tag),
tagHashSalt: int64(h.Sum64()),
}
if !postgresql.TagsAsJsonb {
tsrc.tagColumns = newColumnList()
}
if !postgresql.FieldsAsJsonb {
tsrc.fieldColumns = newColumnList()
}
return tsrc
}
func (tsrc *TableSource) AddMetric(metric telegraf.Metric) {
if tsrc.postgresql.TagsAsForeignKeys {
tagID := utils.GetTagID(metric)
if _, ok := tsrc.tagSets[tagID]; !ok {
tsrc.tagSets[tagID] = metric.TagList()
}
}
if !tsrc.postgresql.TagsAsJsonb {
for _, t := range metric.TagList() {
tsrc.tagColumns.Add(tsrc.postgresql.columnFromTag(t.Key, t.Value))
}
}
if !tsrc.postgresql.FieldsAsJsonb {
for _, f := range metric.FieldList() {
tsrc.fieldColumns.Add(tsrc.postgresql.columnFromField(f.Key, f.Value))
}
}
tsrc.metrics = append(tsrc.metrics, metric)
}
func (tsrc *TableSource) Name() string {
if len(tsrc.metrics) == 0 {
return ""
}
return tsrc.metrics[0].Name()
}
// Returns the superset of all tags of all metrics.
func (tsrc *TableSource) TagColumns() []utils.Column {
var cols []utils.Column
if tsrc.postgresql.TagsAsJsonb {
cols = append(cols, tagsJSONColumn)
} else {
cols = append(cols, tsrc.tagColumns.columns...)
}
return cols
}
// Returns the superset of all fields of all metrics.
func (tsrc *TableSource) FieldColumns() []utils.Column {
return tsrc.fieldColumns.columns
}
// Returns the full column list, including time, tag id or tags, and fields.
func (tsrc *TableSource) MetricTableColumns() []utils.Column {
cols := []utils.Column{
timeColumn,
}
if tsrc.postgresql.TagsAsForeignKeys {
cols = append(cols, tagIDColumn)
} else {
cols = append(cols, tsrc.TagColumns()...)
}
if tsrc.postgresql.FieldsAsJsonb {
cols = append(cols, fieldsJSONColumn)
} else {
cols = append(cols, tsrc.FieldColumns()...)
}
return cols
}
func (tsrc *TableSource) TagTableColumns() []utils.Column {
cols := []utils.Column{
tagIDColumn,
}
cols = append(cols, tsrc.TagColumns()...)
return cols
}
func (tsrc *TableSource) ColumnNames() []string {
cols := tsrc.MetricTableColumns()
names := make([]string, len(cols))
for i, col := range cols {
names[i] = col.Name
}
return names
}
// Drops the specified column.
// If column is a tag column, any metrics containing the tag will be skipped.
// If column is a field column, any metrics containing the field will have it omitted.
func (tsrc *TableSource) DropColumn(col utils.Column) error {
switch col.Role {
case utils.TagColType:
return tsrc.dropTagColumn(col)
case utils.FieldColType:
return tsrc.dropFieldColumn(col)
case utils.TimeColType, utils.TagsIDColType:
return fmt.Errorf("critical column \"%s\"", col.Name)
default:
return fmt.Errorf("internal error: unknown column \"%s\"", col.Name)
}
}
// Drops the tag column from conversion. Any metrics containing this tag will be skipped.
func (tsrc *TableSource) dropTagColumn(col utils.Column) error {
if col.Role != utils.TagColType || tsrc.postgresql.TagsAsJsonb {
return fmt.Errorf("internal error: Tried to perform an invalid tag drop. measurement=%s tag=%s", tsrc.Name(), col.Name)
}
tsrc.droppedTagColumns = append(tsrc.droppedTagColumns, col.Name)
if !tsrc.tagColumns.Remove(col.Name) {
return nil
}
for setID, set := range tsrc.tagSets {
for _, tag := range set {
if tag.Key == col.Name {
// The tag is defined, so drop the whole set
delete(tsrc.tagSets, setID)
break
}
}
}
return nil
}
// Drops the field column from conversion. Any metrics containing this field will have the field omitted.
func (tsrc *TableSource) dropFieldColumn(col utils.Column) error {
if col.Role != utils.FieldColType || tsrc.postgresql.FieldsAsJsonb {
return fmt.Errorf("internal error: Tried to perform an invalid field drop. measurement=%s field=%s", tsrc.Name(), col.Name)
}
tsrc.fieldColumns.Remove(col.Name)
return nil
}
func (tsrc *TableSource) Next() bool {
for {
if tsrc.cursor+1 >= len(tsrc.metrics) {
tsrc.cursorValues = nil
tsrc.cursorError = nil
return false
}
tsrc.cursor++
tsrc.cursorValues, tsrc.cursorError = tsrc.getValues()
if tsrc.cursorValues != nil || tsrc.cursorError != nil {
return true
}
}
}
func (tsrc *TableSource) Reset() {
tsrc.cursor = -1
}
// getValues calculates the values for the metric at the cursor position.
// If the metric cannot be emitted, such as due to dropped tags, or all fields dropped, the return value is nil.
func (tsrc *TableSource) getValues() ([]interface{}, error) {
metric := tsrc.metrics[tsrc.cursor]
values := []interface{}{
metric.Time().UTC(),
}
if !tsrc.postgresql.TagsAsForeignKeys {
if !tsrc.postgresql.TagsAsJsonb {
// tags_as_foreignkey=false, tags_as_json=false
tagValues := make([]interface{}, len(tsrc.tagColumns.columns))
for _, tag := range metric.TagList() {
tagPos, ok := tsrc.tagColumns.indices[tag.Key]
if !ok {
// tag has been dropped, we can't emit or we risk collision with another metric
return nil, nil
}
tagValues[tagPos] = tag.Value
}
values = append(values, tagValues...)
} else {
// tags_as_foreign_key=false, tags_as_json=true
values = append(values, utils.TagListToJSON(metric.TagList()))
}
} else {
// tags_as_foreignkey=true
tagID := utils.GetTagID(metric)
if tsrc.postgresql.ForeignTagConstraint {
if _, ok := tsrc.tagSets[tagID]; !ok {
// tag has been dropped
return nil, nil
}
}
values = append(values, tagID)
}
if !tsrc.postgresql.FieldsAsJsonb {
// fields_as_json=false
fieldValues := make([]interface{}, len(tsrc.fieldColumns.columns))
fieldsEmpty := true
for _, field := range metric.FieldList() {
// we might have dropped the field due to the table missing the column & schema updates being turned off
if fPos, ok := tsrc.fieldColumns.indices[field.Key]; ok {
fieldValues[fPos] = field.Value
fieldsEmpty = false
}
}
if fieldsEmpty {
// all fields have been dropped. Don't emit a metric with just tags and no fields.
return nil, nil
}
values = append(values, fieldValues...)
} else {
// fields_as_json=true
value, err := utils.FieldListToJSON(metric.FieldList())
if err != nil {
return nil, err
}
values = append(values, value)
}
return values, nil
}
func (tsrc *TableSource) Values() ([]interface{}, error) {
return tsrc.cursorValues, tsrc.cursorError
}
func (tsrc *TableSource) Err() error {
return nil
}
type TagTableSource struct {
*TableSource
tagIDs []int64
cursor int
cursorValues []interface{}
cursorError error
}
func NewTagTableSource(tsrc *TableSource) *TagTableSource {
ttsrc := &TagTableSource{
TableSource: tsrc,
cursor: -1,
}
ttsrc.tagIDs = make([]int64, 0, len(tsrc.tagSets))
for tagID := range tsrc.tagSets {
ttsrc.tagIDs = append(ttsrc.tagIDs, tagID)
}
return ttsrc
}
func (ttsrc *TagTableSource) Name() string {
return ttsrc.TableSource.Name() + ttsrc.postgresql.TagTableSuffix
}
func (ttsrc *TagTableSource) cacheCheck(tagID int64) bool {
// Adding the 2 hashes is good enough. It's not a perfect solution, but given that we're operating in an int64
// space, the risk of collision is extremely small.
key := ttsrc.tagHashSalt + tagID
_, err := ttsrc.postgresql.tagsCache.GetInt(key)
return err == nil
}
func (ttsrc *TagTableSource) cacheTouch(tagID int64) {
key := ttsrc.tagHashSalt + tagID
_ = ttsrc.postgresql.tagsCache.SetInt(key, nil, 0)
}
func (ttsrc *TagTableSource) ColumnNames() []string {
cols := ttsrc.TagTableColumns()
names := make([]string, len(cols))
for i, col := range cols {
names[i] = col.Name
}
return names
}
func (ttsrc *TagTableSource) Next() bool {
for {
if ttsrc.cursor+1 >= len(ttsrc.tagIDs) {
ttsrc.cursorValues = nil
return false
}
ttsrc.cursor++
if ttsrc.cacheCheck(ttsrc.tagIDs[ttsrc.cursor]) {
// tag ID already inserted
continue
}
ttsrc.cursorValues = ttsrc.getValues()
if ttsrc.cursorValues != nil {
return true
}
}
}
func (ttsrc *TagTableSource) Reset() {
ttsrc.cursor = -1
}
func (ttsrc *TagTableSource) getValues() []interface{} {
tagID := ttsrc.tagIDs[ttsrc.cursor]
tagSet := ttsrc.tagSets[tagID]
var values []interface{}
if !ttsrc.postgresql.TagsAsJsonb {
values = make([]interface{}, len(ttsrc.TableSource.tagColumns.indices)+1)
for _, tag := range tagSet {
values[ttsrc.TableSource.tagColumns.indices[tag.Key]+1] = tag.Value // +1 to account for tag_id column
}
} else {
values = make([]interface{}, 2)
values[1] = utils.TagListToJSON(tagSet)
}
values[0] = tagID
return values
}
func (ttsrc *TagTableSource) Values() ([]interface{}, error) {
return ttsrc.cursorValues, ttsrc.cursorError
}
func (ttsrc *TagTableSource) UpdateCache() {
for _, tagID := range ttsrc.tagIDs {
ttsrc.cacheTouch(tagID)
}
}
func (ttsrc *TagTableSource) Err() error {
return nil
}

View File

@ -0,0 +1,317 @@
package postgresql
import (
"encoding/json"
"testing"
"time"
"github.com/coocood/freecache"
"github.com/jackc/pgx/v4"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs/postgresql/utils"
)
func TestTableSource(t *testing.T) {
}
type source interface {
pgx.CopyFromSource
ColumnNames() []string
}
func nextSrcRow(src source) MSI {
if !src.Next() {
return nil
}
row := MSI{}
vals, err := src.Values()
if err != nil {
panic(err)
}
for i, name := range src.ColumnNames() {
row[name] = vals[i]
}
return row
}
func TestTableSourceIntegration_tagJSONB(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsJsonb = true
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
row := nextSrcRow(tsrc)
require.NoError(t, tsrc.Err())
assert.IsType(t, time.Time{}, row["time"])
var tags MSI
require.NoError(t, json.Unmarshal(row["tags"].([]byte), &tags))
assert.EqualValues(t, MSI{"a": "one", "b": "two"}, tags)
assert.EqualValues(t, 1, row["v"])
}
func TestTableSourceIntegration_tagTable(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
p.tagsCache = freecache.NewCache(5 * 1024 * 1024)
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
ttsrc := NewTagTableSource(tsrc)
ttrow := nextSrcRow(ttsrc)
assert.EqualValues(t, "one", ttrow["a"])
assert.EqualValues(t, "two", ttrow["b"])
row := nextSrcRow(tsrc)
assert.Equal(t, row["tag_id"], ttrow["tag_id"])
}
func TestTableSourceIntegration_tagTableJSONB(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
p.TagsAsJsonb = true
p.tagsCache = freecache.NewCache(5 * 1024 * 1024)
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
ttsrc := NewTagTableSource(tsrc)
ttrow := nextSrcRow(ttsrc)
var tags MSI
require.NoError(t, json.Unmarshal(ttrow["tags"].([]byte), &tags))
assert.EqualValues(t, MSI{"a": "one", "b": "two"}, tags)
}
func TestTableSourceIntegration_fieldsJSONB(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.FieldsAsJsonb = true
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1, "b": 2}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
row := nextSrcRow(tsrc)
var fields MSI
require.NoError(t, json.Unmarshal(row["fields"].([]byte), &fields))
// json unmarshals numbers as floats
assert.EqualValues(t, MSI{"a": 1.0, "b": 2.0}, fields)
}
// TagsAsForeignKeys=false
// Test that when a tag column is dropped, all metrics containing that tag are dropped.
func TestTableSourceIntegration_DropColumn_tag(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}),
newMetric(t, "", MSS{"a": "one"}, MSI{"v": 2}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
// Drop column "b"
var col utils.Column
for _, c := range tsrc.TagColumns() {
if c.Name == "b" {
col = c
break
}
}
_ = tsrc.DropColumn(col)
row := nextSrcRow(tsrc)
assert.EqualValues(t, "one", row["a"])
assert.EqualValues(t, 2, row["v"])
assert.False(t, tsrc.Next())
}
// TagsAsForeignKeys=true, ForeignTagConstraint=true
// Test that when a tag column is dropped, all metrics containing that tag are dropped.
func TestTableSourceIntegration_DropColumn_tag_fkTrue_fcTrue(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
p.ForeignTagConstraint = true
p.tagsCache = freecache.NewCache(5 * 1024 * 1024)
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}),
newMetric(t, "", MSS{"a": "one"}, MSI{"v": 2}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
// Drop column "b"
var col utils.Column
for _, c := range tsrc.TagColumns() {
if c.Name == "b" {
col = c
break
}
}
_ = tsrc.DropColumn(col)
ttsrc := NewTagTableSource(tsrc)
row := nextSrcRow(ttsrc)
assert.EqualValues(t, "one", row["a"])
assert.False(t, ttsrc.Next())
row = nextSrcRow(tsrc)
assert.EqualValues(t, 2, row["v"])
assert.False(t, tsrc.Next())
}
// TagsAsForeignKeys=true, ForeignTagConstraint=false
// Test that when a tag column is dropped, metrics are still added while the tag is not.
func TestTableSourceIntegration_DropColumn_tag_fkTrue_fcFalse(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
p.ForeignTagConstraint = false
p.tagsCache = freecache.NewCache(5 * 1024 * 1024)
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}),
newMetric(t, "", MSS{"a": "one"}, MSI{"v": 2}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
// Drop column "b"
var col utils.Column
for _, c := range tsrc.TagColumns() {
if c.Name == "b" {
col = c
break
}
}
_ = tsrc.DropColumn(col)
ttsrc := NewTagTableSource(tsrc)
row := nextSrcRow(ttsrc)
assert.EqualValues(t, "one", row["a"])
assert.False(t, ttsrc.Next())
row = nextSrcRow(tsrc)
assert.EqualValues(t, 1, row["v"])
row = nextSrcRow(tsrc)
assert.EqualValues(t, 2, row["v"])
}
// Test that when a field is dropped, only the field is dropped, and all rows remain, unless it was the only field.
func TestTableSourceIntegration_DropColumn_field(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}),
newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2, "b": 3}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
// Drop column "a"
var col utils.Column
for _, c := range tsrc.FieldColumns() {
if c.Name == "a" {
col = c
break
}
}
_ = tsrc.DropColumn(col)
row := nextSrcRow(tsrc)
assert.EqualValues(t, "foo", row["tag"])
assert.EqualValues(t, 3, row["b"])
assert.False(t, tsrc.Next())
}
func TestTableSourceIntegration_InconsistentTags(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"a": "1"}, MSI{"b": 2}),
newMetric(t, "", MSS{"c": "3"}, MSI{"d": 4}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
trow := nextSrcRow(tsrc)
assert.EqualValues(t, "1", trow["a"])
assert.EqualValues(t, nil, trow["c"])
trow = nextSrcRow(tsrc)
assert.EqualValues(t, nil, trow["a"])
assert.EqualValues(t, "3", trow["c"])
}
func TestTagTableSourceIntegration_InconsistentTags(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
p := newPostgresqlTest(t)
p.TagsAsForeignKeys = true
p.tagsCache = freecache.NewCache(5 * 1024 * 1024)
metrics := []telegraf.Metric{
newMetric(t, "", MSS{"a": "1"}, MSI{"b": 2}),
newMetric(t, "", MSS{"c": "3"}, MSI{"d": 4}),
}
tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()]
ttsrc := NewTagTableSource(tsrc)
// ttsrc is in non-deterministic order
expected := []MSI{
{"a": "1", "c": nil},
{"a": nil, "c": "3"},
}
var actual []MSI
for row := nextSrcRow(ttsrc); row != nil; row = nextSrcRow(ttsrc) {
delete(row, "tag_id")
actual = append(actual, row)
}
assert.ElementsMatch(t, expected, actual)
}

View File

@ -0,0 +1,53 @@
package utils
// This is split out from the 'postgresql' package as its depended upon by both the 'postgresql' and
// 'postgresql/template' packages.
import (
"sort"
"strings"
)
// ColumnRole specifies the role of a column in a metric.
// It helps map the columns to the DB.
type ColumnRole int
const (
TimeColType ColumnRole = iota + 1
TagsIDColType
TagColType
FieldColType
)
type Column struct {
Name string
// the data type of each column should have in the db. used when checking
// if the schema matches or it needs updates
Type string
// the role each column has, helps properly map the metric to the db
Role ColumnRole
}
// ColumnList implements sort.Interface.
// Columns are sorted first into groups of time,tag_id,tags,fields, and then alphabetically within
// each group.
type ColumnList []Column
func (cl ColumnList) Len() int {
return len(cl)
}
func (cl ColumnList) Less(i, j int) bool {
if cl[i].Role != cl[j].Role {
return cl[i].Role < cl[j].Role
}
return strings.ToLower(cl[i].Name) < strings.ToLower(cl[j].Name)
}
func (cl ColumnList) Swap(i, j int) {
cl[i], cl[j] = cl[j], cl[i]
}
func (cl ColumnList) Sort() {
sort.Sort(cl)
}

View File

@ -0,0 +1,116 @@
package utils
import (
"context"
"encoding/json"
"hash/fnv"
"strings"
"sync/atomic"
"github.com/jackc/pgx/v4"
"github.com/influxdata/telegraf"
)
func TagListToJSON(tagList []*telegraf.Tag) []byte {
tags := make(map[string]string, len(tagList))
for _, tag := range tagList {
tags[tag.Key] = tag.Value
}
bs, _ := json.Marshal(tags)
return bs
}
func FieldListToJSON(fieldList []*telegraf.Field) ([]byte, error) {
fields := make(map[string]interface{}, len(fieldList))
for _, field := range fieldList {
fields[field.Key] = field.Value
}
return json.Marshal(fields)
}
// QuoteIdentifier returns a sanitized string safe to use in SQL as an identifier
func QuoteIdentifier(name string) string {
return pgx.Identifier{name}.Sanitize()
}
// QuoteLiteral returns a sanitized string safe to use in sql as a string literal
func QuoteLiteral(name string) string {
return "'" + strings.Replace(name, "'", "''", -1) + "'"
}
// FullTableName returns a sanitized table name with its schema (if supplied)
func FullTableName(schema, name string) pgx.Identifier {
if schema != "" {
return pgx.Identifier{schema, name}
}
return pgx.Identifier{name}
}
// pgxLogger makes telegraf.Logger compatible with pgx.Logger
type PGXLogger struct {
telegraf.Logger
}
func (l PGXLogger) Log(_ context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
switch level {
case pgx.LogLevelError:
l.Errorf("PG %s - %+v", msg, data)
case pgx.LogLevelWarn:
l.Warnf("PG %s - %+v", msg, data)
case pgx.LogLevelInfo, pgx.LogLevelNone:
l.Infof("PG %s - %+v", msg, data)
case pgx.LogLevelDebug, pgx.LogLevelTrace:
l.Debugf("PG %s - %+v", msg, data)
default:
l.Debugf("PG %s - %+v", msg, data)
}
}
func GetTagID(metric telegraf.Metric) int64 {
hash := fnv.New64a()
for _, tag := range metric.TagList() {
_, _ = hash.Write([]byte(tag.Key))
_, _ = hash.Write([]byte{0})
_, _ = hash.Write([]byte(tag.Value))
_, _ = hash.Write([]byte{0})
}
// Convert to int64 as postgres does not support uint64
return int64(hash.Sum64())
}
// WaitGroup is similar to sync.WaitGroup, but allows interruptable waiting (e.g. a timeout).
type WaitGroup struct {
count int32
done chan struct{}
}
func NewWaitGroup() *WaitGroup {
return &WaitGroup{
done: make(chan struct{}),
}
}
func (wg *WaitGroup) Add(i int32) {
select {
case <-wg.done:
panic("use of an already-done WaitGroup")
default:
}
atomic.AddInt32(&wg.count, i)
}
func (wg *WaitGroup) Done() {
i := atomic.AddInt32(&wg.count, -1)
if i == 0 {
close(wg.done)
}
if i < 0 {
panic("too many Done() calls")
}
}
func (wg *WaitGroup) C() <-chan struct{} {
return wg.done
}