chore(inputs): Fix line-length in READMEs for `t` to `z` (#16485)
This commit is contained in:
parent
cf599121e5
commit
c38792949d
|
|
@ -66,7 +66,8 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
- http_503 (integer, total number of 503 requests)
|
||||
- http_504 (integer, total number of 504 requests)
|
||||
- http_508 (integer, total number of 508 requests)
|
||||
- http_other_detail_status (integer, total number of requests of other status codes*http_ups_4xx total number of requests of upstream 4xx)
|
||||
- http_other_detail_status (integer, total number of requests of other
|
||||
status codes*http_ups_4xx total number of requests of upstream 4xx)
|
||||
- http_ups_5xx (integer, total number of requests of upstream 5xx)
|
||||
|
||||
## Example Output
|
||||
|
|
|
|||
|
|
@ -64,299 +64,299 @@ their capitalized prefix (eg MAIN, MEMPOOL, etc). In the output, the prefix will
|
|||
be used as a tag, and removed from field names.
|
||||
|
||||
- varnish
|
||||
- MAIN.uptime (uint64, count, Child process uptime)
|
||||
- MAIN.sess_conn (uint64, count, Sessions accepted)
|
||||
- MAIN.sess_drop (uint64, count, Sessions dropped)
|
||||
- MAIN.sess_fail (uint64, count, Session accept failures)
|
||||
- MAIN.sess_pipe_overflow (uint64, count, Session pipe overflow)
|
||||
- MAIN.client_req_400 (uint64, count, Client requests received,)
|
||||
- MAIN.client_req_411 (uint64, count, Client requests received,)
|
||||
- MAIN.client_req_413 (uint64, count, Client requests received,)
|
||||
- MAIN.client_req_417 (uint64, count, Client requests received,)
|
||||
- MAIN.client_req (uint64, count, Good client requests)
|
||||
- MAIN.cache_hit (uint64, count, Cache hits)
|
||||
- MAIN.cache_hitpass (uint64, count, Cache hits for)
|
||||
- MAIN.cache_miss (uint64, count, Cache misses)
|
||||
- MAIN.backend_conn (uint64, count, Backend conn. success)
|
||||
- MAIN.backend_unhealthy (uint64, count, Backend conn. not)
|
||||
- MAIN.backend_busy (uint64, count, Backend conn. too)
|
||||
- MAIN.backend_fail (uint64, count, Backend conn. failures)
|
||||
- MAIN.backend_reuse (uint64, count, Backend conn. reuses)
|
||||
- MAIN.backend_toolate (uint64, count, Backend conn. was)
|
||||
- MAIN.backend_recycle (uint64, count, Backend conn. recycles)
|
||||
- MAIN.backend_retry (uint64, count, Backend conn. retry)
|
||||
- MAIN.fetch_head (uint64, count, Fetch no body)
|
||||
- MAIN.fetch_length (uint64, count, Fetch with Length)
|
||||
- MAIN.fetch_chunked (uint64, count, Fetch chunked)
|
||||
- MAIN.fetch_eof (uint64, count, Fetch EOF)
|
||||
- MAIN.fetch_bad (uint64, count, Fetch bad T- E)
|
||||
- MAIN.fetch_close (uint64, count, Fetch wanted close)
|
||||
- MAIN.fetch_oldhttp (uint64, count, Fetch pre HTTP/1.1)
|
||||
- MAIN.fetch_zero (uint64, count, Fetch zero len)
|
||||
- MAIN.fetch_1xx (uint64, count, Fetch no body)
|
||||
- MAIN.fetch_204 (uint64, count, Fetch no body)
|
||||
- MAIN.fetch_304 (uint64, count, Fetch no body)
|
||||
- MAIN.fetch_failed (uint64, count, Fetch failed (all)
|
||||
- MAIN.fetch_no_thread (uint64, count, Fetch failed (no)
|
||||
- MAIN.pools (uint64, count, Number of thread)
|
||||
- MAIN.threads (uint64, count, Total number of)
|
||||
- MAIN.threads_limited (uint64, count, Threads hit max)
|
||||
- MAIN.threads_created (uint64, count, Threads created)
|
||||
- MAIN.threads_destroyed (uint64, count, Threads destroyed)
|
||||
- MAIN.threads_failed (uint64, count, Thread creation failed)
|
||||
- MAIN.thread_queue_len (uint64, count, Length of session)
|
||||
- MAIN.busy_sleep (uint64, count, Number of requests)
|
||||
- MAIN.busy_wakeup (uint64, count, Number of requests)
|
||||
- MAIN.sess_queued (uint64, count, Sessions queued for)
|
||||
- MAIN.sess_dropped (uint64, count, Sessions dropped for)
|
||||
- MAIN.n_object (uint64, count, object structs made)
|
||||
- MAIN.n_vampireobject (uint64, count, unresurrected objects)
|
||||
- MAIN.n_objectcore (uint64, count, objectcore structs made)
|
||||
- MAIN.n_objecthead (uint64, count, objecthead structs made)
|
||||
- MAIN.n_waitinglist (uint64, count, waitinglist structs made)
|
||||
- MAIN.n_backend (uint64, count, Number of backends)
|
||||
- MAIN.n_expired (uint64, count, Number of expired)
|
||||
- MAIN.n_lru_nuked (uint64, count, Number of LRU)
|
||||
- MAIN.n_lru_moved (uint64, count, Number of LRU)
|
||||
- MAIN.losthdr (uint64, count, HTTP header overflows)
|
||||
- MAIN.s_sess (uint64, count, Total sessions seen)
|
||||
- MAIN.s_req (uint64, count, Total requests seen)
|
||||
- MAIN.s_pipe (uint64, count, Total pipe sessions)
|
||||
- MAIN.s_pass (uint64, count, Total pass- ed requests)
|
||||
- MAIN.s_fetch (uint64, count, Total backend fetches)
|
||||
- MAIN.s_synth (uint64, count, Total synthetic responses)
|
||||
- MAIN.s_req_hdrbytes (uint64, count, Request header bytes)
|
||||
- MAIN.s_req_bodybytes (uint64, count, Request body bytes)
|
||||
- MAIN.s_resp_hdrbytes (uint64, count, Response header bytes)
|
||||
- MAIN.s_resp_bodybytes (uint64, count, Response body bytes)
|
||||
- MAIN.s_pipe_hdrbytes (uint64, count, Pipe request header)
|
||||
- MAIN.s_pipe_in (uint64, count, Piped bytes from)
|
||||
- MAIN.s_pipe_out (uint64, count, Piped bytes to)
|
||||
- MAIN.sess_closed (uint64, count, Session Closed)
|
||||
- MAIN.sess_pipeline (uint64, count, Session Pipeline)
|
||||
- MAIN.sess_readahead (uint64, count, Session Read Ahead)
|
||||
- MAIN.sess_herd (uint64, count, Session herd)
|
||||
- MAIN.shm_records (uint64, count, SHM records)
|
||||
- MAIN.shm_writes (uint64, count, SHM writes)
|
||||
- MAIN.shm_flushes (uint64, count, SHM flushes due)
|
||||
- MAIN.shm_cont (uint64, count, SHM MTX contention)
|
||||
- MAIN.shm_cycles (uint64, count, SHM cycles through)
|
||||
- MAIN.sms_nreq (uint64, count, SMS allocator requests)
|
||||
- MAIN.sms_nobj (uint64, count, SMS outstanding allocations)
|
||||
- MAIN.sms_nbytes (uint64, count, SMS outstanding bytes)
|
||||
- MAIN.sms_balloc (uint64, count, SMS bytes allocated)
|
||||
- MAIN.sms_bfree (uint64, count, SMS bytes freed)
|
||||
- MAIN.backend_req (uint64, count, Backend requests made)
|
||||
- MAIN.n_vcl (uint64, count, Number of loaded)
|
||||
- MAIN.n_vcl_avail (uint64, count, Number of VCLs)
|
||||
- MAIN.n_vcl_discard (uint64, count, Number of discarded)
|
||||
- MAIN.bans (uint64, count, Count of bans)
|
||||
- MAIN.bans_completed (uint64, count, Number of bans)
|
||||
- MAIN.bans_obj (uint64, count, Number of bans)
|
||||
- MAIN.bans_req (uint64, count, Number of bans)
|
||||
- MAIN.bans_added (uint64, count, Bans added)
|
||||
- MAIN.bans_deleted (uint64, count, Bans deleted)
|
||||
- MAIN.bans_tested (uint64, count, Bans tested against)
|
||||
- MAIN.bans_obj_killed (uint64, count, Objects killed by)
|
||||
- MAIN.bans_lurker_tested (uint64, count, Bans tested against)
|
||||
- MAIN.bans_tests_tested (uint64, count, Ban tests tested)
|
||||
- MAIN.bans_lurker_tests_tested (uint64, count, Ban tests tested)
|
||||
- MAIN.bans_lurker_obj_killed (uint64, count, Objects killed by)
|
||||
- MAIN.bans_dups (uint64, count, Bans superseded by)
|
||||
- MAIN.bans_lurker_contention (uint64, count, Lurker gave way)
|
||||
- MAIN.bans_persisted_bytes (uint64, count, Bytes used by)
|
||||
- MAIN.bans_persisted_fragmentation (uint64, count, Extra bytes in)
|
||||
- MAIN.n_purges (uint64, count, Number of purge)
|
||||
- MAIN.n_obj_purged (uint64, count, Number of purged)
|
||||
- MAIN.exp_mailed (uint64, count, Number of objects)
|
||||
- MAIN.exp_received (uint64, count, Number of objects)
|
||||
- MAIN.hcb_nolock (uint64, count, HCB Lookups without)
|
||||
- MAIN.hcb_lock (uint64, count, HCB Lookups with)
|
||||
- MAIN.hcb_insert (uint64, count, HCB Inserts)
|
||||
- MAIN.esi_errors (uint64, count, ESI parse errors)
|
||||
- MAIN.esi_warnings (uint64, count, ESI parse warnings)
|
||||
- MAIN.vmods (uint64, count, Loaded VMODs)
|
||||
- MAIN.n_gzip (uint64, count, Gzip operations)
|
||||
- MAIN.n_gunzip (uint64, count, Gunzip operations)
|
||||
- MAIN.vsm_free (uint64, count, Free VSM space)
|
||||
- MAIN.vsm_used (uint64, count, Used VSM space)
|
||||
- MAIN.vsm_cooling (uint64, count, Cooling VSM space)
|
||||
- MAIN.vsm_overflow (uint64, count, Overflow VSM space)
|
||||
- MAIN.vsm_overflowed (uint64, count, Overflowed VSM space)
|
||||
- MGT.uptime (uint64, count, Management process uptime)
|
||||
- MGT.child_start (uint64, count, Child process started)
|
||||
- MGT.child_exit (uint64, count, Child process normal)
|
||||
- MGT.child_stop (uint64, count, Child process unexpected)
|
||||
- MGT.child_died (uint64, count, Child process died)
|
||||
- MGT.child_dump (uint64, count, Child process core)
|
||||
- MGT.child_panic (uint64, count, Child process panic)
|
||||
- MEMPOOL.vbc.live (uint64, count, In use)
|
||||
- MEMPOOL.vbc.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.vbc.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.vbc.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.vbc.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.vbc.frees (uint64, count, Frees )
|
||||
- MEMPOOL.vbc.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.vbc.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.vbc.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.vbc.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.vbc.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.busyobj.live (uint64, count, In use)
|
||||
- MEMPOOL.busyobj.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.busyobj.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.busyobj.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.busyobj.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.busyobj.frees (uint64, count, Frees )
|
||||
- MEMPOOL.busyobj.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.busyobj.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.busyobj.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.busyobj.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.busyobj.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.req0.live (uint64, count, In use)
|
||||
- MEMPOOL.req0.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.req0.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.req0.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.req0.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.req0.frees (uint64, count, Frees )
|
||||
- MEMPOOL.req0.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.req0.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.req0.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.req0.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.req0.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.sess0.live (uint64, count, In use)
|
||||
- MEMPOOL.sess0.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.sess0.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.sess0.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.sess0.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.sess0.frees (uint64, count, Frees )
|
||||
- MEMPOOL.sess0.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.sess0.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.sess0.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.sess0.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.sess0.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.req1.live (uint64, count, In use)
|
||||
- MEMPOOL.req1.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.req1.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.req1.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.req1.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.req1.frees (uint64, count, Frees )
|
||||
- MEMPOOL.req1.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.req1.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.req1.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.req1.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.req1.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.sess1.live (uint64, count, In use)
|
||||
- MEMPOOL.sess1.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.sess1.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.sess1.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.sess1.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.sess1.frees (uint64, count, Frees )
|
||||
- MEMPOOL.sess1.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.sess1.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.sess1.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.sess1.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.sess1.randry (uint64, count, Pool ran dry)
|
||||
- SMA.s0.c_req (uint64, count, Allocator requests)
|
||||
- SMA.s0.c_fail (uint64, count, Allocator failures)
|
||||
- SMA.s0.c_bytes (uint64, count, Bytes allocated)
|
||||
- SMA.s0.c_freed (uint64, count, Bytes freed)
|
||||
- SMA.s0.g_alloc (uint64, count, Allocations outstanding)
|
||||
- SMA.s0.g_bytes (uint64, count, Bytes outstanding)
|
||||
- SMA.s0.g_space (uint64, count, Bytes available)
|
||||
- SMA.Transient.c_req (uint64, count, Allocator requests)
|
||||
- SMA.Transient.c_fail (uint64, count, Allocator failures)
|
||||
- SMA.Transient.c_bytes (uint64, count, Bytes allocated)
|
||||
- SMA.Transient.c_freed (uint64, count, Bytes freed)
|
||||
- SMA.Transient.g_alloc (uint64, count, Allocations outstanding)
|
||||
- SMA.Transient.g_bytes (uint64, count, Bytes outstanding)
|
||||
- SMA.Transient.g_space (uint64, count, Bytes available)
|
||||
- VBE.default(127.0.0.1,,8080).vcls (uint64, count, VCL references)
|
||||
- VBE.default(127.0.0.1,,8080).happy (uint64, count, Happy health probes)
|
||||
- VBE.default(127.0.0.1,,8080).bereq_hdrbytes (uint64, count, Request header bytes)
|
||||
- VBE.default(127.0.0.1,,8080).bereq_bodybytes (uint64, count, Request body bytes)
|
||||
- VBE.default(127.0.0.1,,8080).beresp_hdrbytes (uint64, count, Response header bytes)
|
||||
- VBE.default(127.0.0.1,,8080).beresp_bodybytes (uint64, count, Response body bytes)
|
||||
- VBE.default(127.0.0.1,,8080).pipe_hdrbytes (uint64, count, Pipe request header)
|
||||
- VBE.default(127.0.0.1,,8080).pipe_out (uint64, count, Piped bytes to)
|
||||
- VBE.default(127.0.0.1,,8080).pipe_in (uint64, count, Piped bytes from)
|
||||
- LCK.sms.creat (uint64, count, Created locks)
|
||||
- LCK.sms.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.sms.locks (uint64, count, Lock Operations)
|
||||
- LCK.smp.creat (uint64, count, Created locks)
|
||||
- LCK.smp.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.smp.locks (uint64, count, Lock Operations)
|
||||
- LCK.sma.creat (uint64, count, Created locks)
|
||||
- LCK.sma.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.sma.locks (uint64, count, Lock Operations)
|
||||
- LCK.smf.creat (uint64, count, Created locks)
|
||||
- LCK.smf.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.smf.locks (uint64, count, Lock Operations)
|
||||
- LCK.hsl.creat (uint64, count, Created locks)
|
||||
- LCK.hsl.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.hsl.locks (uint64, count, Lock Operations)
|
||||
- LCK.hcb.creat (uint64, count, Created locks)
|
||||
- LCK.hcb.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.hcb.locks (uint64, count, Lock Operations)
|
||||
- LCK.hcl.creat (uint64, count, Created locks)
|
||||
- LCK.hcl.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.hcl.locks (uint64, count, Lock Operations)
|
||||
- LCK.vcl.creat (uint64, count, Created locks)
|
||||
- LCK.vcl.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.vcl.locks (uint64, count, Lock Operations)
|
||||
- LCK.sessmem.creat (uint64, count, Created locks)
|
||||
- LCK.sessmem.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.sessmem.locks (uint64, count, Lock Operations)
|
||||
- LCK.sess.creat (uint64, count, Created locks)
|
||||
- LCK.sess.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.sess.locks (uint64, count, Lock Operations)
|
||||
- LCK.wstat.creat (uint64, count, Created locks)
|
||||
- LCK.wstat.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.wstat.locks (uint64, count, Lock Operations)
|
||||
- LCK.herder.creat (uint64, count, Created locks)
|
||||
- LCK.herder.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.herder.locks (uint64, count, Lock Operations)
|
||||
- LCK.wq.creat (uint64, count, Created locks)
|
||||
- LCK.wq.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.wq.locks (uint64, count, Lock Operations)
|
||||
- LCK.objhdr.creat (uint64, count, Created locks)
|
||||
- LCK.objhdr.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.objhdr.locks (uint64, count, Lock Operations)
|
||||
- LCK.exp.creat (uint64, count, Created locks)
|
||||
- LCK.exp.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.exp.locks (uint64, count, Lock Operations)
|
||||
- LCK.lru.creat (uint64, count, Created locks)
|
||||
- LCK.lru.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.lru.locks (uint64, count, Lock Operations)
|
||||
- LCK.cli.creat (uint64, count, Created locks)
|
||||
- LCK.cli.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.cli.locks (uint64, count, Lock Operations)
|
||||
- LCK.ban.creat (uint64, count, Created locks)
|
||||
- LCK.ban.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.ban.locks (uint64, count, Lock Operations)
|
||||
- LCK.vbp.creat (uint64, count, Created locks)
|
||||
- LCK.vbp.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.vbp.locks (uint64, count, Lock Operations)
|
||||
- LCK.backend.creat (uint64, count, Created locks)
|
||||
- LCK.backend.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.backend.locks (uint64, count, Lock Operations)
|
||||
- LCK.vcapace.creat (uint64, count, Created locks)
|
||||
- LCK.vcapace.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.vcapace.locks (uint64, count, Lock Operations)
|
||||
- LCK.nbusyobj.creat (uint64, count, Created locks)
|
||||
- LCK.nbusyobj.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.nbusyobj.locks (uint64, count, Lock Operations)
|
||||
- LCK.busyobj.creat (uint64, count, Created locks)
|
||||
- LCK.busyobj.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.busyobj.locks (uint64, count, Lock Operations)
|
||||
- LCK.mempool.creat (uint64, count, Created locks)
|
||||
- LCK.mempool.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.mempool.locks (uint64, count, Lock Operations)
|
||||
- LCK.vxid.creat (uint64, count, Created locks)
|
||||
- LCK.vxid.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.vxid.locks (uint64, count, Lock Operations)
|
||||
- LCK.pipestat.creat (uint64, count, Created locks)
|
||||
- LCK.pipestat.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.pipestat.locks (uint64, count, Lock Operations)
|
||||
- MAIN.uptime (uint64, count, Child process uptime)
|
||||
- MAIN.sess_conn (uint64, count, Sessions accepted)
|
||||
- MAIN.sess_drop (uint64, count, Sessions dropped)
|
||||
- MAIN.sess_fail (uint64, count, Session accept failures)
|
||||
- MAIN.sess_pipe_overflow (uint64, count, Session pipe overflow)
|
||||
- MAIN.client_req_400 (uint64, count, Client requests received,)
|
||||
- MAIN.client_req_411 (uint64, count, Client requests received,)
|
||||
- MAIN.client_req_413 (uint64, count, Client requests received,)
|
||||
- MAIN.client_req_417 (uint64, count, Client requests received,)
|
||||
- MAIN.client_req (uint64, count, Good client requests)
|
||||
- MAIN.cache_hit (uint64, count, Cache hits)
|
||||
- MAIN.cache_hitpass (uint64, count, Cache hits for)
|
||||
- MAIN.cache_miss (uint64, count, Cache misses)
|
||||
- MAIN.backend_conn (uint64, count, Backend conn. success)
|
||||
- MAIN.backend_unhealthy (uint64, count, Backend conn. not)
|
||||
- MAIN.backend_busy (uint64, count, Backend conn. too)
|
||||
- MAIN.backend_fail (uint64, count, Backend conn. failures)
|
||||
- MAIN.backend_reuse (uint64, count, Backend conn. reuses)
|
||||
- MAIN.backend_toolate (uint64, count, Backend conn. was)
|
||||
- MAIN.backend_recycle (uint64, count, Backend conn. recycles)
|
||||
- MAIN.backend_retry (uint64, count, Backend conn. retry)
|
||||
- MAIN.fetch_head (uint64, count, Fetch no body)
|
||||
- MAIN.fetch_length (uint64, count, Fetch with Length)
|
||||
- MAIN.fetch_chunked (uint64, count, Fetch chunked)
|
||||
- MAIN.fetch_eof (uint64, count, Fetch EOF)
|
||||
- MAIN.fetch_bad (uint64, count, Fetch bad T- E)
|
||||
- MAIN.fetch_close (uint64, count, Fetch wanted close)
|
||||
- MAIN.fetch_oldhttp (uint64, count, Fetch pre HTTP/1.1)
|
||||
- MAIN.fetch_zero (uint64, count, Fetch zero len)
|
||||
- MAIN.fetch_1xx (uint64, count, Fetch no body)
|
||||
- MAIN.fetch_204 (uint64, count, Fetch no body)
|
||||
- MAIN.fetch_304 (uint64, count, Fetch no body)
|
||||
- MAIN.fetch_failed (uint64, count, Fetch failed (all)
|
||||
- MAIN.fetch_no_thread (uint64, count, Fetch failed (no)
|
||||
- MAIN.pools (uint64, count, Number of thread)
|
||||
- MAIN.threads (uint64, count, Total number of)
|
||||
- MAIN.threads_limited (uint64, count, Threads hit max)
|
||||
- MAIN.threads_created (uint64, count, Threads created)
|
||||
- MAIN.threads_destroyed (uint64, count, Threads destroyed)
|
||||
- MAIN.threads_failed (uint64, count, Thread creation failed)
|
||||
- MAIN.thread_queue_len (uint64, count, Length of session)
|
||||
- MAIN.busy_sleep (uint64, count, Number of requests)
|
||||
- MAIN.busy_wakeup (uint64, count, Number of requests)
|
||||
- MAIN.sess_queued (uint64, count, Sessions queued for)
|
||||
- MAIN.sess_dropped (uint64, count, Sessions dropped for)
|
||||
- MAIN.n_object (uint64, count, object structs made)
|
||||
- MAIN.n_vampireobject (uint64, count, unresurrected objects)
|
||||
- MAIN.n_objectcore (uint64, count, objectcore structs made)
|
||||
- MAIN.n_objecthead (uint64, count, objecthead structs made)
|
||||
- MAIN.n_waitinglist (uint64, count, waitinglist structs made)
|
||||
- MAIN.n_backend (uint64, count, Number of backends)
|
||||
- MAIN.n_expired (uint64, count, Number of expired)
|
||||
- MAIN.n_lru_nuked (uint64, count, Number of LRU)
|
||||
- MAIN.n_lru_moved (uint64, count, Number of LRU)
|
||||
- MAIN.losthdr (uint64, count, HTTP header overflows)
|
||||
- MAIN.s_sess (uint64, count, Total sessions seen)
|
||||
- MAIN.s_req (uint64, count, Total requests seen)
|
||||
- MAIN.s_pipe (uint64, count, Total pipe sessions)
|
||||
- MAIN.s_pass (uint64, count, Total pass- ed requests)
|
||||
- MAIN.s_fetch (uint64, count, Total backend fetches)
|
||||
- MAIN.s_synth (uint64, count, Total synthetic responses)
|
||||
- MAIN.s_req_hdrbytes (uint64, count, Request header bytes)
|
||||
- MAIN.s_req_bodybytes (uint64, count, Request body bytes)
|
||||
- MAIN.s_resp_hdrbytes (uint64, count, Response header bytes)
|
||||
- MAIN.s_resp_bodybytes (uint64, count, Response body bytes)
|
||||
- MAIN.s_pipe_hdrbytes (uint64, count, Pipe request header)
|
||||
- MAIN.s_pipe_in (uint64, count, Piped bytes from)
|
||||
- MAIN.s_pipe_out (uint64, count, Piped bytes to)
|
||||
- MAIN.sess_closed (uint64, count, Session Closed)
|
||||
- MAIN.sess_pipeline (uint64, count, Session Pipeline)
|
||||
- MAIN.sess_readahead (uint64, count, Session Read Ahead)
|
||||
- MAIN.sess_herd (uint64, count, Session herd)
|
||||
- MAIN.shm_records (uint64, count, SHM records)
|
||||
- MAIN.shm_writes (uint64, count, SHM writes)
|
||||
- MAIN.shm_flushes (uint64, count, SHM flushes due)
|
||||
- MAIN.shm_cont (uint64, count, SHM MTX contention)
|
||||
- MAIN.shm_cycles (uint64, count, SHM cycles through)
|
||||
- MAIN.sms_nreq (uint64, count, SMS allocator requests)
|
||||
- MAIN.sms_nobj (uint64, count, SMS outstanding allocations)
|
||||
- MAIN.sms_nbytes (uint64, count, SMS outstanding bytes)
|
||||
- MAIN.sms_balloc (uint64, count, SMS bytes allocated)
|
||||
- MAIN.sms_bfree (uint64, count, SMS bytes freed)
|
||||
- MAIN.backend_req (uint64, count, Backend requests made)
|
||||
- MAIN.n_vcl (uint64, count, Number of loaded)
|
||||
- MAIN.n_vcl_avail (uint64, count, Number of VCLs)
|
||||
- MAIN.n_vcl_discard (uint64, count, Number of discarded)
|
||||
- MAIN.bans (uint64, count, Count of bans)
|
||||
- MAIN.bans_completed (uint64, count, Number of bans)
|
||||
- MAIN.bans_obj (uint64, count, Number of bans)
|
||||
- MAIN.bans_req (uint64, count, Number of bans)
|
||||
- MAIN.bans_added (uint64, count, Bans added)
|
||||
- MAIN.bans_deleted (uint64, count, Bans deleted)
|
||||
- MAIN.bans_tested (uint64, count, Bans tested against)
|
||||
- MAIN.bans_obj_killed (uint64, count, Objects killed by)
|
||||
- MAIN.bans_lurker_tested (uint64, count, Bans tested against)
|
||||
- MAIN.bans_tests_tested (uint64, count, Ban tests tested)
|
||||
- MAIN.bans_lurker_tests_tested (uint64, count, Ban tests tested)
|
||||
- MAIN.bans_lurker_obj_killed (uint64, count, Objects killed by)
|
||||
- MAIN.bans_dups (uint64, count, Bans superseded by)
|
||||
- MAIN.bans_lurker_contention (uint64, count, Lurker gave way)
|
||||
- MAIN.bans_persisted_bytes (uint64, count, Bytes used by)
|
||||
- MAIN.bans_persisted_fragmentation (uint64, count, Extra bytes in)
|
||||
- MAIN.n_purges (uint64, count, Number of purge)
|
||||
- MAIN.n_obj_purged (uint64, count, Number of purged)
|
||||
- MAIN.exp_mailed (uint64, count, Number of objects)
|
||||
- MAIN.exp_received (uint64, count, Number of objects)
|
||||
- MAIN.hcb_nolock (uint64, count, HCB Lookups without)
|
||||
- MAIN.hcb_lock (uint64, count, HCB Lookups with)
|
||||
- MAIN.hcb_insert (uint64, count, HCB Inserts)
|
||||
- MAIN.esi_errors (uint64, count, ESI parse errors)
|
||||
- MAIN.esi_warnings (uint64, count, ESI parse warnings)
|
||||
- MAIN.vmods (uint64, count, Loaded VMODs)
|
||||
- MAIN.n_gzip (uint64, count, Gzip operations)
|
||||
- MAIN.n_gunzip (uint64, count, Gunzip operations)
|
||||
- MAIN.vsm_free (uint64, count, Free VSM space)
|
||||
- MAIN.vsm_used (uint64, count, Used VSM space)
|
||||
- MAIN.vsm_cooling (uint64, count, Cooling VSM space)
|
||||
- MAIN.vsm_overflow (uint64, count, Overflow VSM space)
|
||||
- MAIN.vsm_overflowed (uint64, count, Overflowed VSM space)
|
||||
- MGT.uptime (uint64, count, Management process uptime)
|
||||
- MGT.child_start (uint64, count, Child process started)
|
||||
- MGT.child_exit (uint64, count, Child process normal)
|
||||
- MGT.child_stop (uint64, count, Child process unexpected)
|
||||
- MGT.child_died (uint64, count, Child process died)
|
||||
- MGT.child_dump (uint64, count, Child process core)
|
||||
- MGT.child_panic (uint64, count, Child process panic)
|
||||
- MEMPOOL.vbc.live (uint64, count, In use)
|
||||
- MEMPOOL.vbc.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.vbc.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.vbc.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.vbc.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.vbc.frees (uint64, count, Frees )
|
||||
- MEMPOOL.vbc.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.vbc.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.vbc.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.vbc.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.vbc.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.busyobj.live (uint64, count, In use)
|
||||
- MEMPOOL.busyobj.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.busyobj.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.busyobj.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.busyobj.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.busyobj.frees (uint64, count, Frees )
|
||||
- MEMPOOL.busyobj.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.busyobj.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.busyobj.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.busyobj.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.busyobj.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.req0.live (uint64, count, In use)
|
||||
- MEMPOOL.req0.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.req0.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.req0.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.req0.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.req0.frees (uint64, count, Frees )
|
||||
- MEMPOOL.req0.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.req0.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.req0.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.req0.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.req0.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.sess0.live (uint64, count, In use)
|
||||
- MEMPOOL.sess0.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.sess0.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.sess0.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.sess0.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.sess0.frees (uint64, count, Frees )
|
||||
- MEMPOOL.sess0.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.sess0.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.sess0.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.sess0.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.sess0.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.req1.live (uint64, count, In use)
|
||||
- MEMPOOL.req1.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.req1.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.req1.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.req1.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.req1.frees (uint64, count, Frees )
|
||||
- MEMPOOL.req1.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.req1.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.req1.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.req1.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.req1.randry (uint64, count, Pool ran dry)
|
||||
- MEMPOOL.sess1.live (uint64, count, In use)
|
||||
- MEMPOOL.sess1.pool (uint64, count, In Pool)
|
||||
- MEMPOOL.sess1.sz_wanted (uint64, count, Size requested)
|
||||
- MEMPOOL.sess1.sz_needed (uint64, count, Size allocated)
|
||||
- MEMPOOL.sess1.allocs (uint64, count, Allocations )
|
||||
- MEMPOOL.sess1.frees (uint64, count, Frees )
|
||||
- MEMPOOL.sess1.recycle (uint64, count, Recycled from pool)
|
||||
- MEMPOOL.sess1.timeout (uint64, count, Timed out from)
|
||||
- MEMPOOL.sess1.toosmall (uint64, count, Too small to)
|
||||
- MEMPOOL.sess1.surplus (uint64, count, Too many for)
|
||||
- MEMPOOL.sess1.randry (uint64, count, Pool ran dry)
|
||||
- SMA.s0.c_req (uint64, count, Allocator requests)
|
||||
- SMA.s0.c_fail (uint64, count, Allocator failures)
|
||||
- SMA.s0.c_bytes (uint64, count, Bytes allocated)
|
||||
- SMA.s0.c_freed (uint64, count, Bytes freed)
|
||||
- SMA.s0.g_alloc (uint64, count, Allocations outstanding)
|
||||
- SMA.s0.g_bytes (uint64, count, Bytes outstanding)
|
||||
- SMA.s0.g_space (uint64, count, Bytes available)
|
||||
- SMA.Transient.c_req (uint64, count, Allocator requests)
|
||||
- SMA.Transient.c_fail (uint64, count, Allocator failures)
|
||||
- SMA.Transient.c_bytes (uint64, count, Bytes allocated)
|
||||
- SMA.Transient.c_freed (uint64, count, Bytes freed)
|
||||
- SMA.Transient.g_alloc (uint64, count, Allocations outstanding)
|
||||
- SMA.Transient.g_bytes (uint64, count, Bytes outstanding)
|
||||
- SMA.Transient.g_space (uint64, count, Bytes available)
|
||||
- VBE.default(127.0.0.1,,8080).vcls (uint64, count, VCL references)
|
||||
- VBE.default(127.0.0.1,,8080).happy (uint64, count, Happy health probes)
|
||||
- VBE.default(127.0.0.1,,8080).bereq_hdrbytes (uint64, count, Req. header bytes)
|
||||
- VBE.default(127.0.0.1,,8080).bereq_bodybytes (uint64, count, Request body bytes)
|
||||
- VBE.default(127.0.0.1,,8080).beresp_hdrbytes (uint64, count, Resp. header bytes)
|
||||
- VBE.default(127.0.0.1,,8080).beresp_bodybytes (uint64, count, Response body bytes)
|
||||
- VBE.default(127.0.0.1,,8080).pipe_hdrbytes (uint64, count, Pipe request header)
|
||||
- VBE.default(127.0.0.1,,8080).pipe_out (uint64, count, Piped bytes to)
|
||||
- VBE.default(127.0.0.1,,8080).pipe_in (uint64, count, Piped bytes from)
|
||||
- LCK.sms.creat (uint64, count, Created locks)
|
||||
- LCK.sms.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.sms.locks (uint64, count, Lock Operations)
|
||||
- LCK.smp.creat (uint64, count, Created locks)
|
||||
- LCK.smp.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.smp.locks (uint64, count, Lock Operations)
|
||||
- LCK.sma.creat (uint64, count, Created locks)
|
||||
- LCK.sma.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.sma.locks (uint64, count, Lock Operations)
|
||||
- LCK.smf.creat (uint64, count, Created locks)
|
||||
- LCK.smf.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.smf.locks (uint64, count, Lock Operations)
|
||||
- LCK.hsl.creat (uint64, count, Created locks)
|
||||
- LCK.hsl.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.hsl.locks (uint64, count, Lock Operations)
|
||||
- LCK.hcb.creat (uint64, count, Created locks)
|
||||
- LCK.hcb.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.hcb.locks (uint64, count, Lock Operations)
|
||||
- LCK.hcl.creat (uint64, count, Created locks)
|
||||
- LCK.hcl.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.hcl.locks (uint64, count, Lock Operations)
|
||||
- LCK.vcl.creat (uint64, count, Created locks)
|
||||
- LCK.vcl.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.vcl.locks (uint64, count, Lock Operations)
|
||||
- LCK.sessmem.creat (uint64, count, Created locks)
|
||||
- LCK.sessmem.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.sessmem.locks (uint64, count, Lock Operations)
|
||||
- LCK.sess.creat (uint64, count, Created locks)
|
||||
- LCK.sess.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.sess.locks (uint64, count, Lock Operations)
|
||||
- LCK.wstat.creat (uint64, count, Created locks)
|
||||
- LCK.wstat.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.wstat.locks (uint64, count, Lock Operations)
|
||||
- LCK.herder.creat (uint64, count, Created locks)
|
||||
- LCK.herder.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.herder.locks (uint64, count, Lock Operations)
|
||||
- LCK.wq.creat (uint64, count, Created locks)
|
||||
- LCK.wq.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.wq.locks (uint64, count, Lock Operations)
|
||||
- LCK.objhdr.creat (uint64, count, Created locks)
|
||||
- LCK.objhdr.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.objhdr.locks (uint64, count, Lock Operations)
|
||||
- LCK.exp.creat (uint64, count, Created locks)
|
||||
- LCK.exp.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.exp.locks (uint64, count, Lock Operations)
|
||||
- LCK.lru.creat (uint64, count, Created locks)
|
||||
- LCK.lru.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.lru.locks (uint64, count, Lock Operations)
|
||||
- LCK.cli.creat (uint64, count, Created locks)
|
||||
- LCK.cli.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.cli.locks (uint64, count, Lock Operations)
|
||||
- LCK.ban.creat (uint64, count, Created locks)
|
||||
- LCK.ban.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.ban.locks (uint64, count, Lock Operations)
|
||||
- LCK.vbp.creat (uint64, count, Created locks)
|
||||
- LCK.vbp.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.vbp.locks (uint64, count, Lock Operations)
|
||||
- LCK.backend.creat (uint64, count, Created locks)
|
||||
- LCK.backend.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.backend.locks (uint64, count, Lock Operations)
|
||||
- LCK.vcapace.creat (uint64, count, Created locks)
|
||||
- LCK.vcapace.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.vcapace.locks (uint64, count, Lock Operations)
|
||||
- LCK.nbusyobj.creat (uint64, count, Created locks)
|
||||
- LCK.nbusyobj.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.nbusyobj.locks (uint64, count, Lock Operations)
|
||||
- LCK.busyobj.creat (uint64, count, Created locks)
|
||||
- LCK.busyobj.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.busyobj.locks (uint64, count, Lock Operations)
|
||||
- LCK.mempool.creat (uint64, count, Created locks)
|
||||
- LCK.mempool.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.mempool.locks (uint64, count, Lock Operations)
|
||||
- LCK.vxid.creat (uint64, count, Created locks)
|
||||
- LCK.vxid.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.vxid.locks (uint64, count, Lock Operations)
|
||||
- LCK.pipestat.creat (uint64, count, Created locks)
|
||||
- LCK.pipestat.destroy (uint64, count, Destroyed locks)
|
||||
- LCK.pipestat.locks (uint64, count, Lock Operations)
|
||||
|
||||
### Tags
|
||||
|
||||
|
|
@ -381,7 +381,8 @@ that are related to the nonactive VCL are excluded from monitoring.
|
|||
|
||||
## Requirements
|
||||
|
||||
- Varnish 6.0.2+ is required (older versions do not support JSON output from CLI tools)
|
||||
- Varnish 6.0.2+ is required (older versions do not support JSON output from
|
||||
CLI tools)
|
||||
|
||||
## Examples
|
||||
|
||||
|
|
@ -415,23 +416,91 @@ _Tip: It is useful to verify regexps using online tools like
|
|||
|
||||
By default, the plugin has a builtin list of regexps for following VMODs:
|
||||
|
||||
- Dynamic Backends (goto)
|
||||
- regexp: `^VBE\.(?P<_vcl>[\w\-]*)\.goto\.[[:alnum:]]+\.\((?P<backend>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\)\.\((?P<server>.*)\)\.\(ttl:\d*\.\d*.*\)`
|
||||
- `VBE.VCL12323.goto.000007c8.(123.123.123.123).(http://aaa.xxcc:80).(ttl:3600.000000).cache_hit` -> `varnish,section=VBE,backend="123.123.123.123",server="http://aaa.xxcc:80" cache_hit=51i 1462765437090957980`
|
||||
### Dynamic Backends (goto)
|
||||
|
||||
- Key value storage (kvstore)
|
||||
- regexp `^KVSTORE\.(?P<id>[\w\-]*)\.(?P<_vcl>[\w\-]*)\.([\w\-]*)`
|
||||
- `KVSTORE.object_name.vcl_name.key` -> `varnish,section=KVSTORE,id=object_name key=5i`
|
||||
- XCNT (libvmod-xcounter)
|
||||
- regexp `^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P<group>[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val`
|
||||
- `XCNT.abc1234.XXX+_YYYY.cr.pass.val` -> `varnish,section=XCNT,group="XXX+_YYYY.cr" pass=5i`
|
||||
```regex
|
||||
^VBE\.(?P<_vcl>[\w\-]*)\.goto\.[[:alnum:]]+\.\((?P<backend>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\)\.\((?P<server>.*)\)\.\(ttl:\d*\.\d*.*\)
|
||||
```
|
||||
|
||||
- standard VBE metrics
|
||||
- regexp `^VBE\.(?P<_vcl>[\w\-]*)\.(?P<backend>[\w\-]*)\.([\w\-]*)`
|
||||
- `VBE.reload_20210622_153544_23757.default.unhealthy` -> `varnish,section=VBE,backend="default" unhealthy=51i 1462765437090957980`
|
||||
- default generic metric
|
||||
- regexp `([\w\-]*)\.(?P<_field>[\w\-.]*)`
|
||||
- `MSE_STORE.store-1-1.g_aio_running_bytes_write` -> `varnish,section=MSE_STORE store-1-1.g_aio_running_bytes_write=5i`
|
||||
with data
|
||||
|
||||
```text
|
||||
VBE.VCL12323.goto.000007c8.(123.123.123.123).(http://aaa.xxcc:80).(ttl:3600.000000).cache_hit
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
```text
|
||||
varnish,section=VBE,backend="123.123.123.123",server="http://aaa.xxcc:80" cache_hit=51i 1462765437090957980
|
||||
```
|
||||
|
||||
### Key value storage (kvstore)
|
||||
|
||||
```regex
|
||||
^KVSTORE\.(?P<id>[\w\-]*)\.(?P<_vcl>[\w\-]*)\.([\w\-]*)
|
||||
```
|
||||
|
||||
with data
|
||||
|
||||
```text
|
||||
KVSTORE.object_name.vcl_name.key
|
||||
```
|
||||
|
||||
-> `varnish,section=KVSTORE,id=object_name key=5i`
|
||||
|
||||
### XCNT (libvmod-xcounter)
|
||||
|
||||
```regex
|
||||
^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P<group>[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val
|
||||
```
|
||||
|
||||
with data
|
||||
|
||||
```text
|
||||
XCNT.abc1234.XXX+_YYYY.cr.pass.val
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
```text
|
||||
varnish,section=XCNT,group="XXX+_YYYY.cr" pass=5i
|
||||
```
|
||||
|
||||
### Standard VBE metrics
|
||||
|
||||
```regex
|
||||
^VBE\.(?P<_vcl>[\w\-]*)\.(?P<backend>[\w\-]*)\.([\w\-]*)
|
||||
```
|
||||
|
||||
with data
|
||||
|
||||
```text
|
||||
VBE.reload_20210622_153544_23757.default.unhealthy
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
```text
|
||||
varnish,section=VBE,backend="default" unhealthy=51i 1462765437090957980
|
||||
```
|
||||
|
||||
### Default generic metric
|
||||
|
||||
```regex
|
||||
([\w\-]*)\.(?P<_field>[\w\-.]*)
|
||||
```
|
||||
|
||||
with data
|
||||
|
||||
```text
|
||||
MSE_STORE.store-1-1.g_aio_running_bytes_write
|
||||
```
|
||||
|
||||
results in
|
||||
|
||||
```text
|
||||
varnish,section=MSE_STORE store-1-1.g_aio_running_bytes_write=5i
|
||||
```
|
||||
|
||||
The default regexps list can be extended in the telegraf config. The following
|
||||
example shows a config with a custom regexp for parsing of `accounting` VMOD
|
||||
|
|
|
|||
|
|
@ -1,13 +1,17 @@
|
|||
# Common vSphere Performance Metrics
|
||||
|
||||
The set of performance metrics in vSphere is open ended. Metrics may be added or removed in new releases
|
||||
and the set of available metrics may vary depending hardware, as well as what plugins and add-on products
|
||||
are installed. Therefore, providing a definitive list of available metrics is difficult. The metrics listed
|
||||
below are the most commonly available as of vSphere 6.5.
|
||||
The set of performance metrics in vSphere is open ended. Metrics may be added
|
||||
or removed in new releases and the set of available metrics may vary depending
|
||||
hardware, as well as what plugins and add-on products are installed. Therefore,
|
||||
providing a definitive list of available metrics is difficult. The metrics
|
||||
listed below are the most commonly available as of vSphere 6.5.
|
||||
|
||||
For a complete list of metrics available from vSphere and the units they measure in, please reference the [VMWare Product Documentation](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.monitoring.doc/GUID-FF7F87C7-91E7-4A2D-88B5-E3E04A76F51B.html) or the [VMWare Performance Manager Documentation](https://vdc-repo.vmware.com/vmwb-repository/dcr-public/eda658cb-b729-480e-99bc-d3c961055a38/dc769ba5-3cfa-44b1-a5f9-ad807521af19/doc/vim.PerformanceManager.html)
|
||||
For a complete list of metrics available from vSphere and the units they
|
||||
measure in, please reference the [VMWare Product Documentation][product_doc] or
|
||||
the [VMWare Performance Manager Documentation][perf_manager_doc].
|
||||
|
||||
To list the exact set in your environment, please use the govc tool available [here](https://github.com/vmware/govmomi/tree/master/govc)
|
||||
To list the exact set in your environment, please use the govc tool available
|
||||
[here](https://github.com/vmware/govmomi/tree/master/govc)
|
||||
|
||||
To obtain the set of metrics for e.g. a VM, you may use the following command:
|
||||
|
||||
|
|
@ -15,6 +19,9 @@ To obtain the set of metrics for e.g. a VM, you may use the following command:
|
|||
govc metric.ls vm/*
|
||||
```
|
||||
|
||||
[product_doc]: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.monitoring.doc/GUID-FF7F87C7-91E7-4A2D-88B5-E3E04A76F51B.html
|
||||
[perf_manager_doc]: https://vdc-repo.vmware.com/vmwb-repository/dcr-public/eda658cb-b729-480e-99bc-d3c961055a38/dc769ba5-3cfa-44b1-a5f9-ad807521af19/doc/vim.PerformanceManager.html
|
||||
|
||||
## Virtual Machine Metrics
|
||||
|
||||
```metrics
|
||||
|
|
|
|||
|
|
@ -375,13 +375,13 @@ wildcards may be slow in very large environments.
|
|||
|
||||
If your datacenter is in a folder, you have two options:
|
||||
|
||||
1. Explicitly include the folder in the path. For example, if your datacenter is in
|
||||
a folder named ```F1``` you could use the following path to get to your hosts:
|
||||
```/F1/MyDatacenter/host/**```
|
||||
2. Use a recursive wildcard to search an arbitrarily long chain of nested folders. To
|
||||
get to the hosts, you could use the following path: ```/**/host/**```. Note that
|
||||
this may run slowly in a very large environment, since a large number of nodes will
|
||||
be traversed.
|
||||
1. Explicitly include the folder in the path. For example, if your datacenter is
|
||||
in a folder named ```F1``` you could use the path `/F1/MyDatacenter/host/**`
|
||||
to get to your hosts.
|
||||
2. Use a recursive wildcard to search an arbitrarily long chain of nested
|
||||
folders. To get to the hosts, you could use the path `/**/host/**`.
|
||||
Note: This may run slowly in very large environments, since a large number of
|
||||
nodes will be traversed.
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
|
|
@ -390,8 +390,19 @@ be traversed.
|
|||
vCenter keeps two different kinds of metrics, known as realtime and historical
|
||||
metrics.
|
||||
|
||||
* Realtime metrics: Available at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter.
|
||||
* Historical metrics: Available at a (default) 5 minute, 30 minutes, 2 hours and 24 hours rollup levels. The vSphere Telegraf plugin only uses the most granular rollup which defaults to 5 minutes but can be changed in vCenter to other interval durations. These metrics are stored in the vCenter database and can be expensive and slow to query. Historical metrics are the only type of metrics available for **clusters**, **datastores**, **resource pools** and **datacenters**.
|
||||
* Realtime metrics: Available at a 20 second granularity. These metrics are
|
||||
stored in memory and are very fast and cheap to query. Our tests have shown
|
||||
that a complete set of realtime metrics for 7000 virtual machines can be
|
||||
obtained in less than 20 seconds. Realtime metrics are only available on
|
||||
**ESXi hosts** and **virtual machine** resources. Realtime metrics are only
|
||||
stored for 1 hour in vCenter.
|
||||
* Historical metrics: Available at a (default) 5 minute, 30 minutes, 2 hours
|
||||
and 24 hours rollup levels. The vSphere Telegraf plugin only uses the most
|
||||
granular rollup which defaults to 5 minutes but can be changed in vCenter to
|
||||
other interval durations. These metrics are stored in the vCenter database and
|
||||
can be expensive and slow to query. Historical metrics are the only type of
|
||||
metrics available for **clusters**, **datastores**, **resource pools** and
|
||||
**datacenters**.
|
||||
|
||||
This distinction has an impact on how Telegraf collects metrics. A single
|
||||
instance of an input plugin can have one and only one collection interval,
|
||||
|
|
@ -489,15 +500,21 @@ may result in an error message similar to this:
|
|||
|
||||
There are two ways of addressing this:
|
||||
|
||||
* Ask your vCenter administrator to set `config.vpxd.stats.maxQueryMetrics` to a number that's higher than the total number of virtual machines managed by a vCenter instance.
|
||||
* Exclude the cluster metrics and use either the basicstats aggregator to calculate sums and averages per cluster or use queries in the visualization tool to obtain the same result.
|
||||
* Ask your vCenter administrator to set `config.vpxd.stats.maxQueryMetrics` to
|
||||
a number that's higher than the total number of virtual machines managed by a
|
||||
vCenter instance.
|
||||
* Exclude the cluster metrics and use either the basicstats aggregator to
|
||||
calculate sums and averages per cluster or use queries in the visualization
|
||||
tool to obtain the same result.
|
||||
|
||||
### Concurrency Settings
|
||||
|
||||
The vSphere plugin allows you to specify two concurrency settings:
|
||||
|
||||
* `collect_concurrency`: The maximum number of simultaneous queries for performance metrics allowed per resource type.
|
||||
* `discover_concurrency`: The maximum number of simultaneous queries for resource discovery allowed.
|
||||
* `collect_concurrency`: The maximum number of simultaneous queries for
|
||||
performance metrics allowed per resource type.
|
||||
* `discover_concurrency`: The maximum number of simultaneous queries for
|
||||
resource discovery allowed.
|
||||
|
||||
While a higher level of concurrency typically has a positive impact on
|
||||
performance, increasing these numbers too much can cause performance issues at
|
||||
|
|
@ -511,7 +528,8 @@ statistics that exist at a specific interval. The default historical interval
|
|||
duration is 5 minutes but if this interval has been changed then you must
|
||||
override the default query interval in the vSphere plugin.
|
||||
|
||||
* `historical_interval`: The interval of the most granular statistics configured in vSphere represented in seconds.
|
||||
* `historical_interval`: The interval of the most granular statistics configured
|
||||
in vSphere represented in seconds.
|
||||
|
||||
## Metrics
|
||||
|
||||
|
|
@ -519,7 +537,8 @@ override the default query interval in the vSphere plugin.
|
|||
* Cluster services: CPU, memory, failover
|
||||
* CPU: total, usage
|
||||
* Memory: consumed, total, vmmemctl
|
||||
* VM operations: # changes, clone, create, deploy, destroy, power, reboot, reconfigure, register, reset, shutdown, standby, vmotion
|
||||
* VM operations: # changes, clone, create, deploy, destroy, power, reboot,
|
||||
reconfigure, register, reset, shutdown, standby, vmotion
|
||||
* Host Stats:
|
||||
* CPU: total, usage, cost, mhz
|
||||
* Datastore: iops, latency, read/write bytes, # reads/writes
|
||||
|
|
@ -530,14 +549,16 @@ override the default query interval in the vSphere plugin.
|
|||
* Res CPU: active, max, running
|
||||
* Storage Adapter: commands, latency, # reads/writes
|
||||
* Storage Path: commands, latency, # reads/writes
|
||||
* System Resources: cpu active, cpu max, cpu running, cpu usage, mem allocated, mem consumed, mem shared, swap
|
||||
* System Resources: cpu active, cpu max, cpu running, cpu usage, mem allocated,
|
||||
mem consumed, mem shared, swap
|
||||
* System: uptime
|
||||
* Flash Module: active VMDKs
|
||||
* VM Stats:
|
||||
* CPU: demand, usage, readiness, cost, mhz
|
||||
* Datastore: latency, # reads/writes
|
||||
* Disk: commands, latency, # reads/writes, provisioned, usage
|
||||
* Memory: granted, usage, active, swap, vmmemctl, memorySizeMB (allocated), memoryReservation
|
||||
* Memory: granted, usage, active, swap, vmmemctl, memorySizeMB (allocated),
|
||||
memoryReservation
|
||||
* Network: broadcast, bytes, dropped, multicast, packets, usage
|
||||
* Power: energy, usage
|
||||
* Res CPU: active, max, running
|
||||
|
|
@ -599,9 +620,11 @@ configuration of hosts, VMs, and other resources.
|
|||
|
||||
* vSphere 6.5 and later
|
||||
* Clusters with vSAN enabled
|
||||
* [Turn on Virtual SAN performance service](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.virtualsan.doc/GUID-02F67DC3-3D5A-48A4-A445-D2BD6AF2862C.html): When you create a vSAN cluster,
|
||||
the performance service is disabled. To monitor the performance metrics,
|
||||
you must turn on vSAN performance service.
|
||||
* [Turn on Virtual SAN performance service][vsan_perf_service]: When you create
|
||||
a vSAN cluster, the performance service is disabled. To monitor the
|
||||
performance metrics, you must turn on vSAN performance service.
|
||||
|
||||
[vsan_perf_service]: https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.virtualsan.doc/GUID-02F67DC3-3D5A-48A4-A445-D2BD6AF2862C.html
|
||||
|
||||
### vSAN Configuration
|
||||
|
||||
|
|
@ -667,22 +690,42 @@ you must turn on vSAN performance service.
|
|||
# insecure_skip_verify = false
|
||||
```
|
||||
|
||||
* Use `vsan_metric_include = [...]` to define the vSAN metrics that you want to collect.
|
||||
For example, `vsan_metric_include = ["summary.*", "performance.host-domclient", "performance.cache-disk", "performance.disk-group", "performance.capacity-disk"]`.
|
||||
Use `vsan_metric_include = [...]` to define the vSAN metrics that you want to
|
||||
collect. For example
|
||||
|
||||
```toml
|
||||
vsan_metric_include = ["summary.*", "performance.host-domclient", "performance.cache-disk", "performance.disk-group", "performance.capacity-disk"]
|
||||
```
|
||||
|
||||
To include all supported vSAN metrics, use `vsan_metric_include = [ "*" ]`.
|
||||
To disable all the vSAN metrics, use `vsan_metric_exclude = [ "*" ]`.
|
||||
|
||||
* `vsan_metric_skip_verify` defines whether to skip verifying vSAN metrics against the ones from [GetSupportedEntityTypes API](https://code.vmware.com/apis/48/vsan#/doc/vim.cluster.VsanPerformanceManager.html#getSupportedEntityTypes).
|
||||
This option is given because some performance entities are not returned by the API, but we want to offer the flexibility if you really need the stats.
|
||||
When set to false, anything not in the supported entity list will be filtered out.
|
||||
When set to true, queried metrics will be identical to vsan_metric_include and the exclusive array will not be used in this case. By default the value is false.
|
||||
`vsan_metric_skip_verify` defines whether to skip verifying vSAN metrics against
|
||||
the ones from [GetSupportedEntityTypes API][supported_entity_types]. This option
|
||||
is given because some performance entities are not returned by the API, but we
|
||||
want to offer the flexibility if you really need the stats. When set to false,
|
||||
anything not in the supported entity list will be filtered out. When set to
|
||||
true, queried metrics will be identical to vsan_metric_include and the exclusive
|
||||
array will not be used in this case. By default the value is false.
|
||||
|
||||
* `vsan_cluster_include` defines a list of inventory paths that will be used to select a portion of vSAN clusters.
|
||||
vSAN metrics are only collected on the cluster level. Therefore, use the same way as inventory paths for [vSphere clusters](README.md#inventory-paths).
|
||||
`vsan_cluster_include` defines a list of inventory paths that will be used to
|
||||
select a portion of vSAN clusters. vSAN metrics are only collected on the
|
||||
cluster level. Therefore, use the same way as inventory paths for
|
||||
[vSphere clusters](#inventory-paths).
|
||||
|
||||
* Many vCenter environments use self-signed certificates. Update the bottom portion of the above configuration and provide proper values for all applicable SSL Config settings that apply in your vSphere environment. In some environments, setting insecure_skip_verify = true will be necessary when the SSL certificates are not available.
|
||||
Many vCenter environments use self-signed certificates. Update the bottom
|
||||
portion of the above configuration and provide proper values for all applicable
|
||||
SSL Config settings that apply in your vSphere environment. In some
|
||||
environments, setting insecure_skip_verify = true will be necessary when the
|
||||
SSL certificates are not available.
|
||||
|
||||
* To ensure consistent collection in larger vSphere environments, you must increase concurrency for the plugin. Use the collect_concurrency setting to control concurrency. Set collect_concurrency to the number of virtual machines divided by 1500 and rounded up to the nearest integer. For example, for 1200 VMs use 1, and for 2300 VMs use 2.
|
||||
To ensure consistent collection in larger vSphere environments, you must
|
||||
increase concurrency for the plugin. Use the collect_concurrency setting to
|
||||
control concurrency. Set collect_concurrency to the number of virtual machines
|
||||
divided by 1500 and rounded up to the nearest integer. For example, for
|
||||
1200 VMs use 1, and for 2300 VMs use 2.
|
||||
|
||||
[supported_entity_types]: https://code.vmware.com/apis/48/vsan#/doc/vim.cluster.VsanPerformanceManager.html#getSupportedEntityTypes
|
||||
|
||||
### Measurements & Fields
|
||||
|
||||
|
|
@ -696,37 +739,85 @@ and fields may vary.
|
|||
|
||||
* vSAN Performance
|
||||
* cluster-domclient
|
||||
* iops_read, throughput_read, latency_avg_read, iops_write, throughput_write, latency_avg_write, congestion, oio
|
||||
* iops_read, throughput_read, latency_avg_read, iops_write,
|
||||
throughput_write, latency_avg_write, congestion, oio
|
||||
* cluster-domcompmgr
|
||||
* iops_read, throughput_read, latency_avg_read, iops_write, throughput_write, latency_avg_write, iops_rec_write, throughput_rec_write, latency_avg_rec_write, congestion, oio, iops_resync_read, tput_resync_read, lat_avg_resyncread
|
||||
* iops_read, throughput_read, latency_avg_read, iops_write,
|
||||
throughput_write, latency_avg_write, iops_rec_write, throughput_rec_write,
|
||||
latency_avg_rec_write, congestion, oio, iops_resync_read, tput_resync_read,
|
||||
lat_avg_resyncread
|
||||
* host-domclient
|
||||
* iops_read, throughput_read, latency_avg_read, read_count, iops_write, throughput_write, latency_avg_write, write_count, congestion, oio, client_cache_hits, client_cache_hit_rate
|
||||
* iops_read, throughput_read, latency_avg_read, read_count, iops_write,
|
||||
throughput_write, latency_avg_write, write_count, congestion, oio,
|
||||
client_cache_hits, client_cache_hit_rate
|
||||
* host-domcompmgr
|
||||
* iops_read, throughput_read, latency_avg_read, read_count, iops_write, throughput_write, latency_avg_write, write_count, iops_rec_write, throughput_rec_write, latency_avg_rec_write, rec_write_count congestion, oio, iops_resync_read, tput_resync_read, lat_avg_resync_read
|
||||
* iops_read, throughput_read, latency_avg_read, read_count, iops_write,
|
||||
throughput_write, latency_avg_write, write_count, iops_rec_write,
|
||||
throughput_rec_write, latency_avg_rec_write, rec_write_count congestion,
|
||||
oio, iops_resync_read, tput_resync_read, lat_avg_resync_read
|
||||
* cache-disk
|
||||
* iops_dev_read, throughput_dev_read, latency_dev_read, io_count_dev_read, iops_dev_write, throughput_dev_write, latency_dev_write, io_count_dev_write, latency_dev_d_avg, latency_dev_g_avg
|
||||
* iops_dev_read, throughput_dev_read, latency_dev_read, io_count_dev_read,
|
||||
iops_dev_write, throughput_dev_write, latency_dev_write,
|
||||
io_count_dev_write, latency_dev_d_avg, latency_dev_g_avg
|
||||
* capacity-disk
|
||||
* iops_dev_read, throughput_dev_read, latency_dev_read, io_count_dev_read, iops_dev_write, throughput_dev_write, latency_dev_write, io_count_dev_write, latency_dev_d_avg, latency_dev_g_avg, iops_read, latency_read, io_count_read, iops_write, latency_write, io_count_write
|
||||
* iops_dev_read, throughput_dev_read, latency_dev_read, io_count_dev_read,
|
||||
iops_dev_write, throughput_dev_write, latency_dev_write,
|
||||
io_count_dev_write, latency_dev_d_avg, latency_dev_g_avg, iops_read,
|
||||
latency_read, io_count_read, iops_write, latency_write, io_count_write
|
||||
* disk-group
|
||||
* iops_sched, latency_sched, outstanding_bytes_sched, iops_sched_queue_rec, throughput_sched_queue_rec,latency_sched_queue_rec, iops_sched_queue_vm, throughput_sched_queue_vm,latency_sched_queue_vm, iops_sched_queue_meta, throughput_sched_queue_meta,latency_sched_queue_meta, iops_delay_pct_sched, latency_delay_sched, rc_hit_rate, wb_free_pct, war_evictions, quota_evictions, iops_rc_read, latency_rc_read, io_count_rc_read, iops_wb_read, latency_wb_read, io_count_wb_read, iops_rc_write, latency_rc_write, io_count_rc_write, iops_wb_write, latency_wb_write, io_count_wb_write, ssd_bytes_drained, zero_bytes_drained, mem_congestion, slab_congestion, ssd_congestion, iops_congestion, log_congestion, comp_congestion, iops_direct_sched, iops_read, throughput_read, latency_avg_read, read_count, iops_write, throughput_write, latency_avg_write, write_count, oio_write, oio_rec_write, oio_write_size, oio_rec_write_size, rc_size, wb_size, capacity, capacity_used, capacity_reserved, throughput_sched, iops_resync_read_policy, iops_resync_read_decom, iops_resync_read_rebalance, iops_resync_read_fix_comp, iops_resync_write_policy, iops_resync_write_decom, iops_resync_write_rebalance, iops_resync_write_fix_comp, tput_resync_read_policy, tput_resync_read_decom, tput_resync_read_rebalance, tput_resync_read_fix_comp, tput_resync_write_policy, tput_resync_write_decom, tput_resync_write_rebalance, tput_resync_write_fix_comp, lat_resync_read_policy, lat_resync_read_decom, lat_resync_read_rebalance, lat_resync_read_fix_comp, lat_resync_write_policy, lat_resync_write_decom, lat_resync_write_rebalance, lat_resync_write_fix_comp
|
||||
* iops_sched, latency_sched, outstanding_bytes_sched, iops_sched_queue_rec,
|
||||
throughput_sched_queue_rec,latency_sched_queue_rec, iops_sched_queue_vm,
|
||||
throughput_sched_queue_vm,latency_sched_queue_vm, iops_sched_queue_meta,
|
||||
throughput_sched_queue_meta,latency_sched_queue_meta,
|
||||
iops_delay_pct_sched, latency_delay_sched, rc_hit_rate, wb_free_pct,
|
||||
war_evictions, quota_evictions, iops_rc_read, latency_rc_read,
|
||||
io_count_rc_read, iops_wb_read, latency_wb_read, io_count_wb_read,
|
||||
iops_rc_write, latency_rc_write, io_count_rc_write, iops_wb_write,
|
||||
latency_wb_write, io_count_wb_write, ssd_bytes_drained, zero_bytes_drained,
|
||||
mem_congestion, slab_congestion, ssd_congestion, iops_congestion,
|
||||
log_congestion, comp_congestion, iops_direct_sched, iops_read,
|
||||
throughput_read, latency_avg_read, read_count, iops_write,
|
||||
throughput_write, latency_avg_write, write_count, oio_write,
|
||||
oio_rec_write, oio_write_size, oio_rec_write_size, rc_size, wb_size,
|
||||
capacity, capacity_used, capacity_reserved, throughput_sched,
|
||||
iops_resync_read_policy, iops_resync_read_decom,
|
||||
iops_resync_read_rebalance, iops_resync_read_fix_comp,
|
||||
iops_resync_write_policy, iops_resync_write_decom,
|
||||
iops_resync_write_rebalance, iops_resync_write_fix_comp,
|
||||
tput_resync_read_policy, tput_resync_read_decom,
|
||||
tput_resync_read_rebalance, tput_resync_read_fix_comp,
|
||||
tput_resync_write_policy, tput_resync_write_decom,
|
||||
tput_resync_write_rebalance, tput_resync_write_fix_comp,
|
||||
lat_resync_read_policy, lat_resync_read_decom, lat_resync_read_rebalance,
|
||||
lat_resync_read_fix_comp, lat_resync_write_policy, lat_resync_write_decom,
|
||||
lat_resync_write_rebalance, lat_resync_write_fix_comp
|
||||
* virtual-machine
|
||||
* iops_read, throughput_read, latency_read_avg, latency_read_stddev, read_count, iops_write, throughput_write, latency_write_avg, latency_write_stddev, write_count
|
||||
* iops_read, throughput_read, latency_read_avg, latency_read_stddev,
|
||||
read_count, iops_write, throughput_write, latency_write_avg,
|
||||
latency_write_stddev, write_count
|
||||
* vscsi
|
||||
* iops_read, throughput_read, latency_read, read_count, iops_write, throughput_write, latency_write, write_count
|
||||
* iops_read, throughput_read, latency_read, read_count, iops_write,
|
||||
throughput_write, latency_write, write_count
|
||||
* virtual-disk
|
||||
* iops_limit, niops, niops_delayed
|
||||
* vsan-host-net
|
||||
* rx_throughput, rx_packets, rx_packets_loss_rate, tx_throughput, tx_packets, tx_packets_loss_rate
|
||||
* rx_throughput, rx_packets, rx_packets_loss_rate, tx_throughput, tx_packets,
|
||||
tx_packets_loss_rate
|
||||
* vsan-vnic-net
|
||||
* rx_throughput, rx_packets, rx_packets_loss_rate, tx_throughput, tx_packets, tx_packets_loss_rate
|
||||
* rx_throughput, rx_packets, rx_packets_loss_rate, tx_throughput, tx_packets,
|
||||
tx_packets_loss_rate
|
||||
* vsan-pnic-net
|
||||
* rx_throughput, rx_packets, rx_packets_loss_rate, tx_throughput, tx_packets, tx_packets_loss_rate
|
||||
* rx_throughput, rx_packets, rx_packets_loss_rate, tx_throughput, tx_packets,
|
||||
tx_packets_loss_rate
|
||||
* vsan-iscsi-host
|
||||
* iops_read, iops_write, iops_total, bandwidth_read, bandwidth_write, bandwidth_total, latency_read, latency_write, latency_total, queue_depth
|
||||
* iops_read, iops_write, iops_total, bandwidth_read, bandwidth_write,
|
||||
bandwidth_total, latency_read, latency_write, latency_total, queue_depth
|
||||
* vsan-iscsi-target
|
||||
* iops_read, iops_write, iops_total, bandwidth_read, bandwidth_write, bandwidth_total, latency_read, latency_write, latency_total, queue_depth
|
||||
* iops_read, iops_write, iops_total, bandwidth_read, bandwidth_write,
|
||||
bandwidth_total, latency_read, latency_write, latency_total, queue_depth
|
||||
* vsan-iscsi-lun
|
||||
* iops_read, iops_write, iops_total, bandwidth_read, bandwidth_write, bandwidth_total, latency_read, latency_write, latency_total, queue_depth
|
||||
* iops_read, iops_write, iops_total, bandwidth_read, bandwidth_write,
|
||||
bandwidth_total, latency_read, latency_write, latency_total, queue_depth
|
||||
|
||||
### vSAN Tags
|
||||
|
||||
|
|
@ -754,8 +845,11 @@ and fields may vary.
|
|||
vSAN metrics also keep two different kinds of metrics - realtime and
|
||||
historical metrics.
|
||||
|
||||
* Realtime metrics are metrics with the prefix 'summary'. These metrics are available in realtime.
|
||||
* Historical metrics are metrics with the prefix 'performance'. These are metrics queried from vSAN performance API, which is available at a 5-minute rollup level.
|
||||
* Realtime metrics are metrics with the prefix 'summary'. These metrics are
|
||||
available in realtime.
|
||||
* Historical metrics are metrics with the prefix 'performance'. These are
|
||||
metrics queried from vSAN performance API, which is available at a 5-minute
|
||||
rollup level.
|
||||
|
||||
For performance consideration, it is better to specify two instances of the
|
||||
plugin, one for the realtime metrics with a short collection interval,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
# Filestack webhook
|
||||
|
||||
You should configure your Filestack's Webhooks to point at the `webhooks` service. To do this go to [filestack.com](https://www.filestack.com/), select your app and click `Credentials > Webhooks`. In the resulting page, set the `URL` to `http://<my_ip>:1619/filestack`, and click on `Add`.
|
||||
You should configure your Filestack's Webhooks to point at the `webhooks`
|
||||
service. To do this go to [filestack.com](https://www.filestack.com/), select
|
||||
your app and click `Credentials > Webhooks`. In the resulting page, set the
|
||||
`URL` to `http://<my_ip>:1619/filestack`, and click on `Add`.
|
||||
|
||||
## Events
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,17 @@
|
|||
# mandrill webhook
|
||||
|
||||
You should configure your Mandrill's Webhooks to point at the `webhooks` service. To do this go to [mandrillapp.com](https://mandrillapp.com) and click `Settings > Webhooks`. In the resulting page, click on `Add a Webhook`, select all events, and set the `URL` to `http://<my_ip>:1619/mandrill`, and click on `Create Webhook`.
|
||||
You should configure your Mandrill's Webhooks to point at the `webhooks`
|
||||
service. To do this go to [mandrillapp.com](https://mandrillapp.com) and click
|
||||
`Settings > Webhooks`. In the resulting page, click on `Add a Webhook`, select
|
||||
all events, and set the `URL` to `http://<my_ip>:1619/mandrill`, and click on
|
||||
`Create Webhook`.
|
||||
|
||||
## Events
|
||||
|
||||
See the [webhook doc](https://mandrill.zendesk.com/hc/en-us/articles/205583307-Message-Event-Webhook-format).
|
||||
|
||||
All events for logs the original timestamp, the event name and the unique identifier of the message that generated the event.
|
||||
All events for logs the original timestamp, the event name and the unique
|
||||
identifier of the message that generated the event.
|
||||
|
||||
**Tags:**
|
||||
|
||||
|
|
|
|||
|
|
@ -11,12 +11,13 @@ Events from Papertrail come in two forms:
|
|||
* The [event-based callback](http://help.papertrailapp.com/kb/how-it-works/web-hooks/#callback):
|
||||
|
||||
* A point is created per event, with the timestamp as `received_at`
|
||||
* Each point has a field counter (`count`), which is set to `1` (signifying the event occurred)
|
||||
* Each point has a field counter (`count`), which is set to `1` (signifying
|
||||
the event occurred)
|
||||
* Each event "hostname" object is converted to a `host` tag
|
||||
* The "saved_search" name in the payload is added as an `event` tag
|
||||
* The "saved_search" id in the payload is added as a `search_id` field
|
||||
* The papertrail url to view the event is built and added as a `url` field
|
||||
* The rest of the data in the event is converted directly to fields on the point:
|
||||
* The rest of the event data is converted directly to fields on the point:
|
||||
* `id`
|
||||
* `source_ip`
|
||||
* `source_name`
|
||||
|
|
@ -34,8 +35,10 @@ papertrail,host=myserver.example.com,event=saved_search_name count=1i,source_nam
|
|||
|
||||
* The [count-based callback](http://help.papertrailapp.com/kb/how-it-works/web-hooks/#count-only-webhooks)
|
||||
|
||||
* A point is created per timeseries object per count, with the timestamp as the "timeseries" key (the unix epoch of the event)
|
||||
* Each point has a field counter (`count`), which is set to the value of each "timeseries" object
|
||||
* A point is created per timeseries object per count, with the timestamp as
|
||||
the "timeseries" key (the unix epoch of the event)
|
||||
* Each point has a field counter (`count`), which is set to the value of each
|
||||
"timeseries" object
|
||||
* Each count "source_name" object is converted to a `host` tag
|
||||
* The "saved_search" name in the payload is added as an `event` tag
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +1,15 @@
|
|||
# rollbar webhooks
|
||||
|
||||
You should configure your Rollbar's Webhooks to point at the `webhooks` service. To do this go to [rollbar.com](https://rollbar.com/) and click `Settings > Notifications > Webhook`. In the resulting page set `URL` to `http://<my_ip>:1619/rollbar`, and click on `Enable Webhook Integration`.
|
||||
You should configure your Rollbar's Webhooks to point at the `webhooks` service.
|
||||
To do this go to [rollbar.com](https://rollbar.com/) and click
|
||||
`Settings > Notifications > Webhook`. In the resulting page set `URL` to
|
||||
`http://<my_ip>:1619/rollbar`, and click on `Enable Webhook Integration`.
|
||||
|
||||
## Events
|
||||
|
||||
The titles of the following sections are links to the full payloads and details for each event. The body contains what information from the event is persisted. The format is as follows:
|
||||
The titles of the following sections are links to the full payloads and details
|
||||
for each event. The body contains what information from the event is persisted.
|
||||
The format is as follows:
|
||||
|
||||
```toml
|
||||
# TAGS
|
||||
|
|
@ -13,7 +18,8 @@ The titles of the following sections are links to the full payloads and details
|
|||
* 'fieldKey' = `fieldValue` type
|
||||
```
|
||||
|
||||
The tag values and field values show the place on the incoming JSON object where the data is sourced from.
|
||||
The tag values and field values show the place on the incoming JSON object where
|
||||
the data is sourced from.
|
||||
|
||||
See [webhook doc](https://rollbar.com/docs/webhooks/)
|
||||
|
||||
|
|
|
|||
|
|
@ -33,9 +33,11 @@ or `win_perf_counters` by default.
|
|||
|
||||
*Tags:*
|
||||
|
||||
- source - computer name, as specified in the `Sources` parameter. Name `localhost` is translated into the host name
|
||||
- source - computer name, as specified in the `Sources` parameter. Name
|
||||
`localhost` is translated into the host name
|
||||
- objectname - normalized name of the performance object
|
||||
- instance - instance name, if performance object supports multiple instances, otherwise omitted
|
||||
- instance - instance name, if performance object supports multiple instances,
|
||||
otherwise omitted
|
||||
|
||||
*Fields* are counters of the performance object.
|
||||
The field name is normalized counter name.
|
||||
|
|
|
|||
|
|
@ -39,10 +39,12 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
- `device` (associated interface device name, e.g. `wg0`)
|
||||
- `public_key` (peer public key, e.g. `NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE=`)
|
||||
- fields:
|
||||
- `persistent_keepalive_interval_ns` (int, keepalive interval in nanoseconds; 0 if unset)
|
||||
- `persistent_keepalive_interval_ns` (int, keepalive interval in
|
||||
nanoseconds; 0 if unset)
|
||||
- `protocol_version` (int, Wireguard protocol version number)
|
||||
- `allowed_ips` (int, number of allowed IPs for this peer)
|
||||
- `last_handshake_time_ns` (int, Unix timestamp of the last handshake for this peer in nanoseconds)
|
||||
- `last_handshake_time_ns` (int, Unix timestamp of the last handshake for
|
||||
this peer in nanoseconds)
|
||||
- `rx_bytes` (int, number of bytes received from this peer)
|
||||
- `tx_bytes` (int, number of bytes transmitted to this peer)
|
||||
- `allowed_peer_cidr` (string, comma separated list of allowed peer CIDRs)
|
||||
|
|
|
|||
|
|
@ -29,11 +29,13 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
- tags:
|
||||
- interface (wireless interface)
|
||||
- fields:
|
||||
- status (int64, gauge) - Its current state. This is a device dependent information
|
||||
- status (int64, gauge) - Its current state. This is a device dependent
|
||||
information
|
||||
- link (int64, percentage, gauge) - general quality of the reception
|
||||
- level (int64, dBm, gauge) - signal strength at the receiver
|
||||
- noise (int64, dBm, gauge) - silence level (no packet) at the receiver
|
||||
- nwid (int64, packets, counter) - number of discarded packets due to invalid network id
|
||||
- nwid (int64, packets, counter) - number of discarded packets due to
|
||||
invalid network id
|
||||
- crypt (int64, packets, counter) - number of packet unable to decrypt
|
||||
- frag (int64, packets, counter) - fragmented packets
|
||||
- retry (int64, packets, counter) - cumulative retry counts
|
||||
|
|
|
|||
|
|
@ -77,7 +77,8 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
- fields:
|
||||
- verification_code (int)
|
||||
- verification_error (string)
|
||||
- expiry (int, seconds) - Time when the certificate will expire, in seconds since the Unix epoch. `SELECT (expiry / 60 / 60 / 24) as "expiry_in_days"`
|
||||
- expiry (int, seconds) - Time when the certificate will expire, in seconds
|
||||
since the Unix epoch. `SELECT (expiry / 60 / 60 / 24) as "expiry_in_days"`
|
||||
- age (int, seconds)
|
||||
- startdate (int, seconds)
|
||||
- enddate (int, seconds)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,10 @@ __Please Note:__ This plugin is experimental; Its data schema may be subject to
|
|||
change based on its main usage cases and the evolution of the OpenTracing
|
||||
standard.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> This plugin will create high cardinality data, so please take this into
|
||||
> account when sending data to your output!
|
||||
|
||||
## Service Input <!-- @/docs/includes/service_input.md -->
|
||||
|
||||
This plugin is a service input. Normal plugins gather metrics determined by the
|
||||
|
|
@ -30,7 +34,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details.
|
|||
## Configuration
|
||||
|
||||
```toml @sample.conf
|
||||
# This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
|
||||
# Gather data from a Zipkin server including trace and timing data
|
||||
[[inputs.zipkin]]
|
||||
## URL path for span data
|
||||
# path = "/api/v1/spans"
|
||||
|
|
@ -52,167 +56,160 @@ is not set, then the plugin assumes it is `JSON` format.
|
|||
|
||||
This plugin uses Annotations tags and fields to track data from spans
|
||||
|
||||
- __TRACE:__ is a set of spans that share a single root span.
|
||||
Traces are built by collecting all Spans that share a traceId.
|
||||
|
||||
- __SPAN:__ is a set of Annotations and BinaryAnnotations that correspond to a particular RPC.
|
||||
|
||||
- __Annotations:__ for each annotation & binary annotation of a span a metric is output. _Records an occurrence in time at the beginning and end of a request._
|
||||
- `TRACE` is a set of spans that share a single root span. Traces are built by
|
||||
collecting all Spans that share a traceId.
|
||||
- `SPAN` is a set of Annotations and BinaryAnnotations that correspond to a
|
||||
particular RPC.
|
||||
- `Annotations` create a metric for each annotation & binary annotation of a
|
||||
span. This records an occurrence in time at the beginning and end of each
|
||||
request.
|
||||
|
||||
Annotations may have the following values:
|
||||
|
||||
- __CS (client start):__ beginning of span, request is made.
|
||||
- __SR (server receive):__ server receives request and will start processing it
|
||||
network latency & clock jitters differ it from cs
|
||||
- __SS (server send):__ server is done processing and sends request back to client
|
||||
amount of time it took to process request will differ it from sr
|
||||
- __CR (client receive):__ end of span, client receives response from server
|
||||
RPC is considered complete with this annotation
|
||||
- `CS` (client start) marks the beginning of the span, a request is made.
|
||||
- `SR` (server receive) marks the point in time the server receives the request
|
||||
and starts processing it. Network latency & clock jitters distinguish this
|
||||
from `CS`.
|
||||
- `SS` (server send) marks the point in time the server is finished processing
|
||||
and sends a request back to client. The difference to `SR` denotes the
|
||||
amount of time it took to process the request.
|
||||
- `CR` (client receive) marks the end of the span, with the client receiving
|
||||
the response from server. RPC is considered complete with this annotation.
|
||||
|
||||
## Metrics
|
||||
|
||||
- __"duration_ns":__ The time in nanoseconds between the end and beginning of a span.
|
||||
- `duration_ns` the time in nanoseconds between the end and beginning of a span
|
||||
|
||||
### Tags
|
||||
|
||||
- __"id":__ The 64-bit ID of the span.
|
||||
- __"parent_id":__ An ID associated with a particular child span. If there is no child span, the parent ID is set to ID.
|
||||
- __"trace_id":__ The 64 or 128-bit ID of a particular trace. Every span in a trace shares this ID. Concatenation of high and low and converted to hexadecimal.
|
||||
- __"name":__ Defines a span
|
||||
- `id` the 64-bit ID of the span.
|
||||
- `parent_id` an ID associated with a particular child span. If there is no
|
||||
child span, `parent_id` is equal to `id`
|
||||
- `trace_id` the 64-bit or 128-bit ID of a particular trace. Every span in a
|
||||
trace uses this ID.
|
||||
- `name` defines a span
|
||||
|
||||
#### Annotations have these additional tags
|
||||
|
||||
- __"service_name":__ Defines a service
|
||||
- __"annotation":__ The value of an annotation
|
||||
- __"endpoint_host":__ Listening port concat with IPV4, if port is not present it will not be concatenated
|
||||
- `service_name` defines a service
|
||||
- `annotation` the value of an annotation
|
||||
- `endpoint_host` listening IPv4 address and, if present, port
|
||||
|
||||
#### Binary Annotations have these additional tag
|
||||
|
||||
- __"service_name":__ Defines a service
|
||||
- __"annotation":__ The value of an annotation
|
||||
- __"endpoint_host":__ Listening port concat with IPV4, if port is not present it will not be concatenated
|
||||
- __"annotation_key":__ label describing the annotation
|
||||
|
||||
## Sample Queries
|
||||
|
||||
__Get All Span Names for Service__ `my_web_server`
|
||||
|
||||
```sql
|
||||
SHOW TAG VALUES FROM "zipkin" with key="name" WHERE "service_name" = 'my_web_server'
|
||||
```
|
||||
|
||||
- __Description:__ returns a list containing the names of the spans which have annotations with the given `service_name` of `my_web_server`.
|
||||
|
||||
-__Get All Service Names__-
|
||||
|
||||
```sql
|
||||
SHOW TAG VALUES FROM "zipkin" WITH KEY = "service_name"
|
||||
```
|
||||
|
||||
- __Description:__ returns a list of all `distinct` endpoint service names.
|
||||
|
||||
-__Find spans with the longest duration__-
|
||||
|
||||
```sql
|
||||
SELECT max("duration_ns") FROM "zipkin" WHERE "service_name" = 'my_service' AND "name" = 'my_span_name' AND time > now() - 20m GROUP BY "trace_id",time(30s) LIMIT 5
|
||||
```
|
||||
|
||||
- __Description:__ In the last 20 minutes find the top 5 longest span durations for service `my_server` and span name `my_span_name`
|
||||
|
||||
### Recommended InfluxDB setup
|
||||
|
||||
This test will create high cardinality data so we recommend using the [tsi
|
||||
influxDB engine][1].
|
||||
|
||||
[1]: https://www.influxdata.com/path-1-billion-time-series-influxdb-high-cardinality-indexing-ready-testing/
|
||||
|
||||
#### How To Set Up InfluxDB For Work With Zipkin
|
||||
|
||||
##### Steps
|
||||
|
||||
1. ___Update___ InfluxDB to >= 1.3, in order to use the new tsi engine.
|
||||
|
||||
2. ___Generate___ a config file with the following command:
|
||||
|
||||
```sh
|
||||
influxd config > /path/for/config/file
|
||||
```
|
||||
|
||||
3. ___Add___ the following to your config file, under the `[data]` tab:
|
||||
|
||||
```toml
|
||||
[data]
|
||||
index-version = "tsi1"
|
||||
```
|
||||
|
||||
4. ___Start___ `influxd` with your new config file:
|
||||
|
||||
```sh
|
||||
influxd -config=/path/to/your/config/file
|
||||
```
|
||||
|
||||
5. ___Update___ your retention policy:
|
||||
|
||||
```sql
|
||||
ALTER RETENTION POLICY "autogen" ON "telegraf" DURATION 1d SHARD DURATION 30m
|
||||
```
|
||||
|
||||
### Example Input Trace
|
||||
|
||||
- [Cli microservice with two services Test](https://github.com/openzipkin/zipkin-go-opentracing/tree/master/examples/cli_with_2_services)
|
||||
- [Test data from distributed trace repo sample json](https://github.com/mattkanwisher/distributedtrace/blob/master/testclient/sample.json)
|
||||
|
||||
#### [Trace Example from Zipkin model](http://zipkin.io/pages/data_model.html)
|
||||
|
||||
```json
|
||||
{
|
||||
"traceId": "bd7a977555f6b982",
|
||||
"name": "query",
|
||||
"id": "be2d01e33cc78d97",
|
||||
"parentId": "ebf33e1a81dc6f71",
|
||||
"timestamp": 1458702548786000,
|
||||
"duration": 13000,
|
||||
"annotations": [
|
||||
{
|
||||
"endpoint": {
|
||||
"serviceName": "zipkin-query",
|
||||
"ipv4": "192.168.1.2",
|
||||
"port": 9411
|
||||
},
|
||||
"timestamp": 1458702548786000,
|
||||
"value": "cs"
|
||||
},
|
||||
{
|
||||
"endpoint": {
|
||||
"serviceName": "zipkin-query",
|
||||
"ipv4": "192.168.1.2",
|
||||
"port": 9411
|
||||
},
|
||||
"timestamp": 1458702548799000,
|
||||
"value": "cr"
|
||||
}
|
||||
],
|
||||
"binaryAnnotations": [
|
||||
{
|
||||
"key": "jdbc.query",
|
||||
"value": "select distinct `zipkin_spans`.`trace_id` from `zipkin_spans` join `zipkin_annotations` on (`zipkin_spans`.`trace_id` = `zipkin_annotations`.`trace_id` and `zipkin_spans`.`id` = `zipkin_annotations`.`span_id`) where (`zipkin_annotations`.`endpoint_service_name` = ? and `zipkin_spans`.`start_ts` between ? and ?) order by `zipkin_spans`.`start_ts` desc limit ?",
|
||||
"endpoint": {
|
||||
"serviceName": "zipkin-query",
|
||||
"ipv4": "192.168.1.2",
|
||||
"port": 9411
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "sa",
|
||||
"value": true,
|
||||
"endpoint": {
|
||||
"serviceName": "spanstore-jdbc",
|
||||
"ipv4": "127.0.0.1",
|
||||
"port": 3306
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
- `service_name` defines a service
|
||||
- `annotation` the value of an annotation
|
||||
- `endpoint_host` listening IPv4 address and, if present, port
|
||||
- `annotation_key` label describing the annotation
|
||||
|
||||
## Example Output
|
||||
|
||||
The Zipkin data
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"trace_id": 2505404965370368069,
|
||||
"name": "Child",
|
||||
"id": 8090652509916334619,
|
||||
"parent_id": 22964302721410078,
|
||||
"annotations": [],
|
||||
"binary_annotations": [
|
||||
{
|
||||
"key": "lc",
|
||||
"value": "dHJpdmlhbA==",
|
||||
"annotation_type": "STRING",
|
||||
"host": {
|
||||
"ipv4": 2130706433,
|
||||
"port": 0,
|
||||
"service_name": "trivial"
|
||||
}
|
||||
}
|
||||
],
|
||||
"timestamp": 1498688360851331,
|
||||
"duration": 53106
|
||||
},
|
||||
{
|
||||
"trace_id": 2505404965370368069,
|
||||
"name": "Child",
|
||||
"id": 103618986556047333,
|
||||
"parent_id": 22964302721410078,
|
||||
"annotations": [],
|
||||
"binary_annotations": [
|
||||
{
|
||||
"key": "lc",
|
||||
"value": "dHJpdmlhbA==",
|
||||
"annotation_type": "STRING",
|
||||
"host": {
|
||||
"ipv4": 2130706433,
|
||||
"port": 0,
|
||||
"service_name": "trivial"
|
||||
}
|
||||
}
|
||||
],
|
||||
"timestamp": 1498688360904552,
|
||||
"duration": 50410
|
||||
},
|
||||
{
|
||||
"trace_id": 2505404965370368069,
|
||||
"name": "Parent",
|
||||
"id": 22964302721410078,
|
||||
"annotations": [
|
||||
{
|
||||
"timestamp": 1498688360851325,
|
||||
"value": "Starting child #0",
|
||||
"host": {
|
||||
"ipv4": 2130706433,
|
||||
"port": 0,
|
||||
"service_name": "trivial"
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp": 1498688360904545,
|
||||
"value": "Starting child #1",
|
||||
"host": {
|
||||
"ipv4": 2130706433,
|
||||
"port": 0,
|
||||
"service_name": "trivial"
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp": 1498688360954992,
|
||||
"value": "A Log",
|
||||
"host": {
|
||||
"ipv4": 2130706433,
|
||||
"port": 0,
|
||||
"service_name": "trivial"
|
||||
}
|
||||
}
|
||||
],
|
||||
"binary_annotations": [
|
||||
{
|
||||
"key": "lc",
|
||||
"value": "dHJpdmlhbA==",
|
||||
"annotation_type": "STRING",
|
||||
"host": {
|
||||
"ipv4": 2130706433,
|
||||
"port": 0,
|
||||
"service_name": "trivial"
|
||||
}
|
||||
}
|
||||
],
|
||||
"timestamp": 1498688360851318,
|
||||
"duration": 103680
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
generated the following metrics
|
||||
|
||||
```text
|
||||
zipkin,id=7047c59776af8a1b,name=child,parent_id=5195e96239641e,service_name=trivial,trace_id=22c4fc8ab3669045 duration_ns=53106000i 1498688360851331000
|
||||
zipkin,annotation=trivial,annotation_key=lc,endpoint_host=127.0.0.1,id=7047c59776af8a1b,name=child,parent_id=5195e96239641e,service_name=trivial,trace_id=22c4fc8ab3669045 duration_ns=53106000i 1498688360851331000
|
||||
zipkin,id=17020eb55a8bfe5,name=child,parent_id=5195e96239641e,service_name=trivial,trace_id=22c4fc8ab3669045 duration_ns=50410000i 1498688360904552000
|
||||
zipkin,annotation=trivial,annotation_key=lc,endpoint_host=127.0.0.1,id=17020eb55a8bfe5,name=child,parent_id=5195e96239641e,service_name=trivial,trace_id=22c4fc8ab3669045 duration_ns=50410000i 1498688360904552000
|
||||
zipkin,id=5195e96239641e,name=parent,parent_id=5195e96239641e,service_name=trivial,trace_id=22c4fc8ab3669045 duration_ns=103680000i 1498688360851318000
|
||||
zipkin,annotation=Starting\ child\ #0,endpoint_host=127.0.0.1,id=5195e96239641e,name=parent,parent_id=5195e96239641e,service_name=trivial,trace_id=22c4fc8ab3669045 duration_ns=103680000i 1498688360851318000
|
||||
zipkin,annotation=Starting\ child\ #1,endpoint_host=127.0.0.1,id=5195e96239641e,name=parent,parent_id=5195e96239641e,service_name=trivial,trace_id=22c4fc8ab3669045 duration_ns=103680000i 1498688360851318000
|
||||
zipkin,annotation=A\ Log,endpoint_host=127.0.0.1,id=5195e96239641e,name=parent,parent_id=5195e96239641e,service_name=trivial,trace_id=22c4fc8ab3669045 duration_ns=103680000i 1498688360851318000
|
||||
zipkin,annotation=trivial,annotation_key=lc,endpoint_host=127.0.0.1,id=5195e96239641e,name=parent,parent_id=5195e96239641e,service_name=trivial,trace_id=22c4fc8ab3669045 duration_ns=103680000i 1498688360851318000
|
||||
```
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
# This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
|
||||
# Gather data from a Zipkin server including trace and timing data
|
||||
[[inputs.zipkin]]
|
||||
## URL path for span data
|
||||
# path = "/api/v1/spans"
|
||||
|
|
|
|||
Loading…
Reference in New Issue