# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime. Sourced from /gc/cycles/automatic:gc-cycles. # TYPE go_gc_cycles_automatic_gc_cycles_total counter go_gc_cycles_automatic_gc_cycles_total 648 # HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application. Sourced from /gc/cycles/forced:gc-cycles. # TYPE go_gc_cycles_forced_gc_cycles_total counter go_gc_cycles_forced_gc_cycles_total 0 # HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles. Sourced from /gc/cycles/total:gc-cycles. # TYPE go_gc_cycles_total_gc_cycles_total counter go_gc_cycles_total_gc_cycles_total 648 # HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 0.000131974 go_gc_duration_seconds{quantile="0.25"} 0.00014999 go_gc_duration_seconds{quantile="0.5"} 0.000173371 go_gc_duration_seconds{quantile="0.75"} 0.000200793 go_gc_duration_seconds{quantile="1"} 0.007358768 go_gc_duration_seconds_sum 0.182771913 go_gc_duration_seconds_count 648 # HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent. # TYPE go_gc_gogc_percent gauge go_gc_gogc_percent 75 # HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes. # TYPE go_gc_gomemlimit_bytes gauge go_gc_gomemlimit_bytes 1.683228672e+10 # HELP go_gc_heap_allocs_by_size_bytes Distribution of heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. Sourced from /gc/heap/allocs-by-size:bytes. # TYPE go_gc_heap_allocs_by_size_bytes histogram go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 7.660624e+06 go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 8.8939843e+07 go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 2.23268095e+08 go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 2.8529473e+08 go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 3.03952249e+08 go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 3.07636208e+08 go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 3.1135437e+08 go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 3.12567895e+08 go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 3.12963e+08 go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 3.13134105e+08 go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 3.13261665e+08 go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 3.13332659e+08 go_gc_heap_allocs_by_size_bytes_sum 9.232557672e+10 go_gc_heap_allocs_by_size_bytes_count 3.13332659e+08 # HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application. Sourced from /gc/heap/allocs:bytes. # TYPE go_gc_heap_allocs_bytes_total counter go_gc_heap_allocs_bytes_total 9.232557672e+10 # HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. Sourced from /gc/heap/allocs:objects. # TYPE go_gc_heap_allocs_objects_total counter go_gc_heap_allocs_objects_total 3.13332659e+08 # HELP go_gc_heap_frees_by_size_bytes Distribution of freed heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. Sourced from /gc/heap/frees-by-size:bytes. # TYPE go_gc_heap_frees_by_size_bytes histogram go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 7.642561e+06 go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 8.8785336e+07 go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 2.21969761e+08 go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 2.83680272e+08 go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 3.02134079e+08 go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 3.05802076e+08 go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 3.09513355e+08 go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 3.10724636e+08 go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 3.11098549e+08 go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 3.11269232e+08 go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 3.11396194e+08 go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 3.1146697e+08 go_gc_heap_frees_by_size_bytes_sum 9.1680148904e+10 go_gc_heap_frees_by_size_bytes_count 3.1146697e+08 # HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector. Sourced from /gc/heap/frees:bytes. # TYPE go_gc_heap_frees_bytes_total counter go_gc_heap_frees_bytes_total 9.1680148904e+10 # HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. Sourced from /gc/heap/frees:objects. # TYPE go_gc_heap_frees_objects_total counter go_gc_heap_frees_objects_total 3.1146697e+08 # HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle. Sourced from /gc/heap/goal:bytes. # TYPE go_gc_heap_goal_bytes gauge go_gc_heap_goal_bytes 9.10854631e+08 # HELP go_gc_heap_live_bytes Heap memory occupied by live objects that were marked by the previous GC. Sourced from /gc/heap/live:bytes. # TYPE go_gc_heap_live_bytes gauge go_gc_heap_live_bytes 5.19755232e+08 # HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory. Sourced from /gc/heap/objects:objects. # TYPE go_gc_heap_objects_objects gauge go_gc_heap_objects_objects 1.865689e+06 # HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size. Sourced from /gc/heap/tiny/allocs:objects. # TYPE go_gc_heap_tiny_allocs_objects_total counter go_gc_heap_tiny_allocs_objects_total 1.2850795e+07 # HELP go_gc_limiter_last_enabled_gc_cycle GC cycle the last time the GC CPU limiter was enabled. This metric is useful for diagnosing the root cause of an out-of-memory error, because the limiter trades memory for CPU time when the GC's CPU time gets too high. This is most likely to occur with use of SetMemoryLimit. The first GC cycle is cycle 1, so a value of 0 indicates that it was never enabled. Sourced from /gc/limiter/last-enabled:gc-cycle. # TYPE go_gc_limiter_last_enabled_gc_cycle gauge go_gc_limiter_last_enabled_gc_cycle 0 # HELP go_gc_pauses_seconds Deprecated. Prefer the identical /sched/pauses/total/gc:seconds. Sourced from /gc/pauses:seconds. # TYPE go_gc_pauses_seconds histogram go_gc_pauses_seconds_bucket{le="6.399999999999999e-08"} 0 go_gc_pauses_seconds_bucket{le="6.399999999999999e-07"} 0 go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 2 go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 640 go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 1277 go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 1295 go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 1296 go_gc_pauses_seconds_bucket{le="+Inf"} 1296 go_gc_pauses_seconds_sum 0.083758336 go_gc_pauses_seconds_count 1296 # HELP go_gc_scan_globals_bytes The total amount of global variable space that is scannable. Sourced from /gc/scan/globals:bytes. # TYPE go_gc_scan_globals_bytes gauge go_gc_scan_globals_bytes 1.611386e+06 # HELP go_gc_scan_heap_bytes The total amount of heap space that is scannable. Sourced from /gc/scan/heap:bytes. # TYPE go_gc_scan_heap_bytes gauge go_gc_scan_heap_bytes 8.2323784e+07 # HELP go_gc_scan_stack_bytes The number of bytes of stack that were scanned last GC cycle. Sourced from /gc/scan/stack:bytes. # TYPE go_gc_scan_stack_bytes gauge go_gc_scan_stack_bytes 99248 # HELP go_gc_scan_total_bytes The total amount space that is scannable. Sum of all metrics in /gc/scan. Sourced from /gc/scan/total:bytes. # TYPE go_gc_scan_total_bytes gauge go_gc_scan_total_bytes 8.4034418e+07 # HELP go_gc_stack_starting_size_bytes The stack size of new goroutines. Sourced from /gc/stack/starting-size:bytes. # TYPE go_gc_stack_starting_size_bytes gauge go_gc_stack_starting_size_bytes 2048 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines 105 # HELP go_info Information about the Go environment. # TYPE go_info gauge go_info{version="go1.25.3"} 1 # HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes. # TYPE go_memstats_alloc_bytes gauge go_memstats_alloc_bytes 6.45427816e+08 # HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes. # TYPE go_memstats_alloc_bytes_total counter go_memstats_alloc_bytes_total 9.232557672e+10 # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes. # TYPE go_memstats_buck_hash_sys_bytes gauge go_memstats_buck_hash_sys_bytes 3.184165e+06 # HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects. # TYPE go_memstats_frees_total counter go_memstats_frees_total 3.24317765e+08 # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes. # TYPE go_memstats_gc_sys_bytes gauge go_memstats_gc_sys_bytes 9.704512e+06 # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes. # TYPE go_memstats_heap_alloc_bytes gauge go_memstats_heap_alloc_bytes 6.45427816e+08 # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. # TYPE go_memstats_heap_idle_bytes gauge go_memstats_heap_idle_bytes 5.14891776e+08 # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes # TYPE go_memstats_heap_inuse_bytes gauge go_memstats_heap_inuse_bytes 7.98973952e+08 # HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects. # TYPE go_memstats_heap_objects gauge go_memstats_heap_objects 1.865689e+06 # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes. # TYPE go_memstats_heap_released_bytes gauge go_memstats_heap_released_bytes 4.34225152e+08 # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. # TYPE go_memstats_heap_sys_bytes gauge go_memstats_heap_sys_bytes 1.313865728e+09 # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge go_memstats_last_gc_time_seconds 1.7649917615387464e+09 # HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects. # TYPE go_memstats_mallocs_total counter go_memstats_mallocs_total 3.26183454e+08 # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes. # TYPE go_memstats_mcache_inuse_bytes gauge go_memstats_mcache_inuse_bytes 4832 # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes. # TYPE go_memstats_mcache_sys_bytes gauge go_memstats_mcache_sys_bytes 15704 # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes. # TYPE go_memstats_mspan_inuse_bytes gauge go_memstats_mspan_inuse_bytes 5.27888e+06 # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes. # TYPE go_memstats_mspan_sys_bytes gauge go_memstats_mspan_sys_bytes 1.031424e+07 # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes. # TYPE go_memstats_next_gc_bytes gauge go_memstats_next_gc_bytes 9.10854631e+08 # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes. # TYPE go_memstats_other_sys_bytes gauge go_memstats_other_sys_bytes 1.746043e+06 # HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes. # TYPE go_memstats_stack_inuse_bytes gauge go_memstats_stack_inuse_bytes 3.145728e+06 # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes. # TYPE go_memstats_stack_sys_bytes gauge go_memstats_stack_sys_bytes 3.145728e+06 # HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte. # TYPE go_memstats_sys_bytes gauge go_memstats_sys_bytes 1.34197612e+09 # HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads. # TYPE go_sched_gomaxprocs_threads gauge go_sched_gomaxprocs_threads 4 # HELP go_sched_goroutines_goroutines Count of live goroutines. Sourced from /sched/goroutines:goroutines. # TYPE go_sched_goroutines_goroutines gauge go_sched_goroutines_goroutines 105 # HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. Bucket counts increase monotonically. Sourced from /sched/latencies:seconds. # TYPE go_sched_latencies_seconds histogram go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 352546 go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 385457 go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 456377 go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 527517 go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 532705 go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 534270 go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 534336 go_sched_latencies_seconds_bucket{le="+Inf"} 534336 go_sched_latencies_seconds_sum 3.110381504 go_sched_latencies_seconds_count 534336 # HELP go_sched_pauses_stopping_gc_seconds Distribution of individual GC-related stop-the-world stopping latencies. This is the time it takes from deciding to stop the world until all Ps are stopped. This is a subset of the total GC-related stop-the-world time (/sched/pauses/total/gc:seconds). During this time, some threads may be executing. Bucket counts increase monotonically. Sourced from /sched/pauses/stopping/gc:seconds. # TYPE go_sched_pauses_stopping_gc_seconds histogram go_sched_pauses_stopping_gc_seconds_bucket{le="6.399999999999999e-08"} 0 go_sched_pauses_stopping_gc_seconds_bucket{le="6.399999999999999e-07"} 219 go_sched_pauses_stopping_gc_seconds_bucket{le="7.167999999999999e-06"} 870 go_sched_pauses_stopping_gc_seconds_bucket{le="8.191999999999999e-05"} 1232 go_sched_pauses_stopping_gc_seconds_bucket{le="0.0009175039999999999"} 1280 go_sched_pauses_stopping_gc_seconds_bucket{le="0.010485759999999998"} 1295 go_sched_pauses_stopping_gc_seconds_bucket{le="0.11744051199999998"} 1296 go_sched_pauses_stopping_gc_seconds_bucket{le="+Inf"} 1296 go_sched_pauses_stopping_gc_seconds_sum 0.031205952 go_sched_pauses_stopping_gc_seconds_count 1296 # HELP go_sched_pauses_stopping_other_seconds Distribution of individual non-GC-related stop-the-world stopping latencies. This is the time it takes from deciding to stop the world until all Ps are stopped. This is a subset of the total non-GC-related stop-the-world time (/sched/pauses/total/other:seconds). During this time, some threads may be executing. Bucket counts increase monotonically. Sourced from /sched/pauses/stopping/other:seconds. # TYPE go_sched_pauses_stopping_other_seconds histogram go_sched_pauses_stopping_other_seconds_bucket{le="6.399999999999999e-08"} 0 go_sched_pauses_stopping_other_seconds_bucket{le="6.399999999999999e-07"} 0 go_sched_pauses_stopping_other_seconds_bucket{le="7.167999999999999e-06"} 0 go_sched_pauses_stopping_other_seconds_bucket{le="8.191999999999999e-05"} 0 go_sched_pauses_stopping_other_seconds_bucket{le="0.0009175039999999999"} 0 go_sched_pauses_stopping_other_seconds_bucket{le="0.010485759999999998"} 0 go_sched_pauses_stopping_other_seconds_bucket{le="0.11744051199999998"} 0 go_sched_pauses_stopping_other_seconds_bucket{le="+Inf"} 0 go_sched_pauses_stopping_other_seconds_sum 0 go_sched_pauses_stopping_other_seconds_count 0 # HELP go_sched_pauses_total_gc_seconds Distribution of individual GC-related stop-the-world pause latencies. This is the time from deciding to stop the world until the world is started again. Some of this time is spent getting all threads to stop (this is measured directly in /sched/pauses/stopping/gc:seconds), during which some threads may still be running. Bucket counts increase monotonically. Sourced from /sched/pauses/total/gc:seconds. # TYPE go_sched_pauses_total_gc_seconds histogram go_sched_pauses_total_gc_seconds_bucket{le="6.399999999999999e-08"} 0 go_sched_pauses_total_gc_seconds_bucket{le="6.399999999999999e-07"} 0 go_sched_pauses_total_gc_seconds_bucket{le="7.167999999999999e-06"} 2 go_sched_pauses_total_gc_seconds_bucket{le="8.191999999999999e-05"} 640 go_sched_pauses_total_gc_seconds_bucket{le="0.0009175039999999999"} 1277 go_sched_pauses_total_gc_seconds_bucket{le="0.010485759999999998"} 1295 go_sched_pauses_total_gc_seconds_bucket{le="0.11744051199999998"} 1296 go_sched_pauses_total_gc_seconds_bucket{le="+Inf"} 1296 go_sched_pauses_total_gc_seconds_sum 0.083758336 go_sched_pauses_total_gc_seconds_count 1296 # HELP go_sched_pauses_total_other_seconds Distribution of individual non-GC-related stop-the-world pause latencies. This is the time from deciding to stop the world until the world is started again. Some of this time is spent getting all threads to stop (measured directly in /sched/pauses/stopping/other:seconds). Bucket counts increase monotonically. Sourced from /sched/pauses/total/other:seconds. # TYPE go_sched_pauses_total_other_seconds histogram go_sched_pauses_total_other_seconds_bucket{le="6.399999999999999e-08"} 0 go_sched_pauses_total_other_seconds_bucket{le="6.399999999999999e-07"} 0 go_sched_pauses_total_other_seconds_bucket{le="7.167999999999999e-06"} 0 go_sched_pauses_total_other_seconds_bucket{le="8.191999999999999e-05"} 0 go_sched_pauses_total_other_seconds_bucket{le="0.0009175039999999999"} 0 go_sched_pauses_total_other_seconds_bucket{le="0.010485759999999998"} 0 go_sched_pauses_total_other_seconds_bucket{le="0.11744051199999998"} 0 go_sched_pauses_total_other_seconds_bucket{le="+Inf"} 0 go_sched_pauses_total_other_seconds_sum 0 go_sched_pauses_total_other_seconds_count 0 # HELP go_sync_mutex_wait_total_seconds_total Approximate cumulative time goroutines have spent blocked on a sync.Mutex, sync.RWMutex, or runtime-internal lock. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data. Sourced from /sync/mutex/wait/total:seconds. # TYPE go_sync_mutex_wait_total_seconds_total counter go_sync_mutex_wait_total_seconds_total 1.11890924 # HELP go_threads Number of OS threads created. # TYPE go_threads gauge go_threads 10 # HELP net_conntrack_dialer_conn_attempted_total Total number of connections attempted by the given dialer a given name. # TYPE net_conntrack_dialer_conn_attempted_total counter net_conntrack_dialer_conn_attempted_total{dialer_name="alertmanager"} 2 net_conntrack_dialer_conn_attempted_total{dialer_name="cadvisor"} 1 net_conntrack_dialer_conn_attempted_total{dialer_name="default"} 0 net_conntrack_dialer_conn_attempted_total{dialer_name="grafana"} 5 net_conntrack_dialer_conn_attempted_total{dialer_name="loki"} 1 net_conntrack_dialer_conn_attempted_total{dialer_name="node-exporter"} 1 net_conntrack_dialer_conn_attempted_total{dialer_name="postgres"} 160 net_conntrack_dialer_conn_attempted_total{dialer_name="prometheus"} 1 net_conntrack_dialer_conn_attempted_total{dialer_name="redis"} 14 net_conntrack_dialer_conn_attempted_total{dialer_name="tempo"} 1 net_conntrack_dialer_conn_attempted_total{dialer_name="traefik"} 1 # HELP net_conntrack_dialer_conn_closed_total Total number of connections closed which originated from the dialer of a given name. # TYPE net_conntrack_dialer_conn_closed_total counter net_conntrack_dialer_conn_closed_total{dialer_name="alertmanager"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="cadvisor"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="default"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="grafana"} 2 net_conntrack_dialer_conn_closed_total{dialer_name="loki"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="node-exporter"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="postgres"} 12 net_conntrack_dialer_conn_closed_total{dialer_name="prometheus"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="redis"} 11 net_conntrack_dialer_conn_closed_total{dialer_name="tempo"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="traefik"} 0 # HELP net_conntrack_dialer_conn_established_total Total number of connections successfully established by the given dialer a given name. # TYPE net_conntrack_dialer_conn_established_total counter net_conntrack_dialer_conn_established_total{dialer_name="alertmanager"} 2 net_conntrack_dialer_conn_established_total{dialer_name="cadvisor"} 1 net_conntrack_dialer_conn_established_total{dialer_name="default"} 0 net_conntrack_dialer_conn_established_total{dialer_name="grafana"} 3 net_conntrack_dialer_conn_established_total{dialer_name="loki"} 1 net_conntrack_dialer_conn_established_total{dialer_name="node-exporter"} 1 net_conntrack_dialer_conn_established_total{dialer_name="postgres"} 15 net_conntrack_dialer_conn_established_total{dialer_name="prometheus"} 1 net_conntrack_dialer_conn_established_total{dialer_name="redis"} 14 net_conntrack_dialer_conn_established_total{dialer_name="tempo"} 1 net_conntrack_dialer_conn_established_total{dialer_name="traefik"} 1 # HELP net_conntrack_dialer_conn_failed_total Total number of connections failed to dial by the dialer a given name. # TYPE net_conntrack_dialer_conn_failed_total counter net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="refused"} 1 net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="resolution"} 1 net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="unknown"} 1 net_conntrack_dialer_conn_failed_total{dialer_name="loki",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="loki",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="loki",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="loki",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="node-exporter",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="node-exporter",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="node-exporter",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="node-exporter",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="postgres",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="postgres",reason="resolution"} 145 net_conntrack_dialer_conn_failed_total{dialer_name="postgres",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="postgres",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="redis",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="redis",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="redis",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="redis",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="tempo",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="tempo",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="tempo",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="tempo",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="traefik",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="traefik",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="traefik",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="traefik",reason="unknown"} 0 # HELP net_conntrack_listener_conn_accepted_total Total number of connections opened to the listener of a given name. # TYPE net_conntrack_listener_conn_accepted_total counter net_conntrack_listener_conn_accepted_total{listener_name="http"} 232 # HELP net_conntrack_listener_conn_closed_total Total number of connections closed that were made to the listener of a given name. # TYPE net_conntrack_listener_conn_closed_total counter net_conntrack_listener_conn_closed_total{listener_name="http"} 229 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter process_cpu_seconds_total 3435.9 # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge process_max_fds 1.048576e+06 # HELP process_network_receive_bytes_total Number of bytes received by the process over the network. # TYPE process_network_receive_bytes_total counter process_network_receive_bytes_total 5.788586374e+09 # HELP process_network_transmit_bytes_total Number of bytes sent by the process over the network. # TYPE process_network_transmit_bytes_total counter process_network_transmit_bytes_total 2.296052e+08 # HELP process_open_fds Number of open file descriptors. # TYPE process_open_fds gauge process_open_fds 54 # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge process_resident_memory_bytes 9.56932096e+08 # HELP process_start_time_seconds Start time of the process since unix epoch in seconds. # TYPE process_start_time_seconds gauge process_start_time_seconds 1.76491840345e+09 # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge process_virtual_memory_bytes 5.819895808e+09 # HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. # TYPE process_virtual_memory_max_bytes gauge process_virtual_memory_max_bytes 1.8446744073709552e+19 # HELP prometheus_api_notification_active_subscribers The current number of active notification subscribers. # TYPE prometheus_api_notification_active_subscribers gauge prometheus_api_notification_active_subscribers 0 # HELP prometheus_api_notification_updates_dropped_total Total number of notification updates dropped. # TYPE prometheus_api_notification_updates_dropped_total counter prometheus_api_notification_updates_dropped_total 0 # HELP prometheus_api_notification_updates_sent_total Total number of notification updates sent. # TYPE prometheus_api_notification_updates_sent_total counter prometheus_api_notification_updates_sent_total 0 # HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which prometheus was built, and the goos and goarch for the build. # TYPE prometheus_build_info gauge prometheus_build_info{branch="HEAD",goarch="amd64",goos="linux",goversion="go1.25.3",revision="0a41f0000705c69ab8e0f9a723fc73e39ed62b07",tags="netgo,builtinassets",version="3.7.3"} 1 # HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. # TYPE prometheus_config_last_reload_success_timestamp_seconds gauge prometheus_config_last_reload_success_timestamp_seconds 1.7649184209374745e+09 # HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful. # TYPE prometheus_config_last_reload_successful gauge prometheus_config_last_reload_successful 1 # HELP prometheus_engine_queries The current number of queries being executed or waiting. # TYPE prometheus_engine_queries gauge prometheus_engine_queries 0 # HELP prometheus_engine_queries_concurrent_max The max number of concurrent queries. # TYPE prometheus_engine_queries_concurrent_max gauge prometheus_engine_queries_concurrent_max 20 # HELP prometheus_engine_query_duration_seconds Query timings # TYPE prometheus_engine_query_duration_seconds summary prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.5"} 3.2154e-05 prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.9"} 0.00095677 prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.99"} 0.005984941 prometheus_engine_query_duration_seconds_sum{slice="inner_eval"} 132.8875740009993 prometheus_engine_query_duration_seconds_count{slice="inner_eval"} 346721 prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.5"} 1.2251e-05 prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.9"} 4.3801e-05 prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.99"} 9.0464e-05 prometheus_engine_query_duration_seconds_sum{slice="prepare_time"} 7.469906166000303 prometheus_engine_query_duration_seconds_count{slice="prepare_time"} 346721 prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.5"} 4.274e-06 prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.9"} 1.0492e-05 prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.99"} 2.316e-05 prometheus_engine_query_duration_seconds_sum{slice="queue_time"} 4.79980005500008 prometheus_engine_query_duration_seconds_count{slice="queue_time"} 693442 prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.5"} 8.15e-07 prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.9"} 1.064e-06 prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.99"} 1.064e-06 prometheus_engine_query_duration_seconds_sum{slice="result_sort"} 0.0011243730000000012 prometheus_engine_query_duration_seconds_count{slice="result_sort"} 1344 # HELP prometheus_engine_query_log_enabled State of the query log. # TYPE prometheus_engine_query_log_enabled gauge prometheus_engine_query_log_enabled 0 # HELP prometheus_engine_query_log_failures_total The number of query log failures. # TYPE prometheus_engine_query_log_failures_total counter prometheus_engine_query_log_failures_total 0 # HELP prometheus_engine_query_samples_total The total number of samples loaded by all queries. # TYPE prometheus_engine_query_samples_total counter prometheus_engine_query_samples_total 1.89524519e+08 # HELP prometheus_http_request_duration_seconds Histogram of latencies for HTTP requests. # TYPE prometheus_http_request_duration_seconds histogram prometheus_http_request_duration_seconds_bucket{handler="/",le="0.1"} 10 prometheus_http_request_duration_seconds_bucket{handler="/",le="0.2"} 10 prometheus_http_request_duration_seconds_bucket{handler="/",le="0.4"} 10 prometheus_http_request_duration_seconds_bucket{handler="/",le="1"} 10 prometheus_http_request_duration_seconds_bucket{handler="/",le="3"} 10 prometheus_http_request_duration_seconds_bucket{handler="/",le="8"} 10 prometheus_http_request_duration_seconds_bucket{handler="/",le="20"} 10 prometheus_http_request_duration_seconds_bucket{handler="/",le="60"} 10 prometheus_http_request_duration_seconds_bucket{handler="/",le="120"} 10 prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"} 10 prometheus_http_request_duration_seconds_sum{handler="/"} 0.00037476 prometheus_http_request_duration_seconds_count{handler="/"} 10 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.1"} 10480 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.2"} 10480 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.4"} 10480 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="1"} 10480 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="3"} 10480 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="8"} 10480 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="20"} 10480 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="60"} 10480 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="120"} 10480 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="+Inf"} 10480 prometheus_http_request_duration_seconds_sum{handler="/-/healthy"} 0.3043636770000003 prometheus_http_request_duration_seconds_count{handler="/-/healthy"} 10480 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.1"} 2 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.2"} 2 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.4"} 2 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="1"} 2 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="3"} 2 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="8"} 2 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="20"} 2 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="60"} 2 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="120"} 2 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="+Inf"} 2 prometheus_http_request_duration_seconds_sum{handler="/-/ready"} 7.1846e-05 prometheus_http_request_duration_seconds_count{handler="/-/ready"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.2"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.4"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="3"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="8"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="20"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="60"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="120"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="+Inf"} 3 prometheus_http_request_duration_seconds_sum{handler="/api/v1/alerts"} 0.0035931850000000005 prometheus_http_request_duration_seconds_count{handler="/api/v1/alerts"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.1"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.2"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.4"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="3"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="8"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="20"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="60"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="120"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 3 prometheus_http_request_duration_seconds_sum{handler="/api/v1/label/:name/values"} 0.13154909899999997 prometheus_http_request_duration_seconds_count{handler="/api/v1/label/:name/values"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.1"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.2"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.4"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="1"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="3"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="8"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="20"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="60"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="120"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="+Inf"} 1 prometheus_http_request_duration_seconds_sum{handler="/api/v1/labels"} 0.074275066 prometheus_http_request_duration_seconds_count{handler="/api/v1/labels"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.1"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.2"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.4"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="1"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="3"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="8"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="20"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="60"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="120"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="+Inf"} 2 prometheus_http_request_duration_seconds_sum{handler="/api/v1/metadata"} 0.034806368 prometheus_http_request_duration_seconds_count{handler="/api/v1/metadata"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.1"} 1187 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.2"} 1187 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.4"} 1187 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="1"} 1187 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="3"} 1187 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="8"} 1187 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="20"} 1187 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="60"} 1187 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="120"} 1187 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="+Inf"} 1187 prometheus_http_request_duration_seconds_sum{handler="/api/v1/query"} 2.0611046739999974 prometheus_http_request_duration_seconds_count{handler="/api/v1/query"} 1187 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.1"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.2"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.4"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="1"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="3"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="8"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="20"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="60"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="120"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="+Inf"} 2 prometheus_http_request_duration_seconds_sum{handler="/api/v1/query_exemplars"} 0.036359624 prometheus_http_request_duration_seconds_count{handler="/api/v1/query_exemplars"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.1"} 1342 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.2"} 1344 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.4"} 1344 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="1"} 1344 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="3"} 1344 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="8"} 1344 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="20"} 1344 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="60"} 1344 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="120"} 1344 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="+Inf"} 1344 prometheus_http_request_duration_seconds_sum{handler="/api/v1/query_range"} 2.5160097189999986 prometheus_http_request_duration_seconds_count{handler="/api/v1/query_range"} 1344 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.2"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.4"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="3"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="8"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="20"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="60"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="120"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="+Inf"} 3 prometheus_http_request_duration_seconds_sum{handler="/api/v1/rules"} 0.07336162300000001 prometheus_http_request_duration_seconds_count{handler="/api/v1/rules"} 3 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.1"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.2"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.4"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="1"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="3"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="8"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="20"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="60"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="120"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="+Inf"} 1 prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/buildinfo"} 0.008973117 prometheus_http_request_duration_seconds_count{handler="/api/v1/status/buildinfo"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.1"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.2"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.4"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="1"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="3"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="8"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="20"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="60"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="120"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="+Inf"} 1 prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/runtimeinfo"} 0.002386944 prometheus_http_request_duration_seconds_count{handler="/api/v1/status/runtimeinfo"} 1 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.1"} 74 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.2"} 74 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.4"} 74 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="1"} 74 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="3"} 74 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="8"} 74 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="20"} 74 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="60"} 74 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="120"} 74 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="+Inf"} 74 prometheus_http_request_duration_seconds_sum{handler="/api/v1/targets"} 0.129125119 prometheus_http_request_duration_seconds_count{handler="/api/v1/targets"} 74 prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.1"} 5 prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.2"} 5 prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.4"} 5 prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="1"} 5 prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="3"} 5 prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="8"} 5 prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="20"} 5 prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="60"} 5 prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="120"} 5 prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="+Inf"} 5 prometheus_http_request_duration_seconds_sum{handler="/assets/*filepath"} 0.256858572 prometheus_http_request_duration_seconds_count{handler="/assets/*filepath"} 5 prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.1"} 1 prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.2"} 1 prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.4"} 1 prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="1"} 1 prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="3"} 1 prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="8"} 1 prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="20"} 1 prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="60"} 1 prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="120"} 1 prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="+Inf"} 1 prometheus_http_request_duration_seconds_sum{handler="/favicon.svg"} 0.033740939 prometheus_http_request_duration_seconds_count{handler="/favicon.svg"} 1 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.1"} 4896 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.2"} 4896 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.4"} 4896 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="1"} 4896 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="3"} 4896 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="8"} 4896 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="20"} 4896 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="60"} 4896 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="120"} 4896 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="+Inf"} 4896 prometheus_http_request_duration_seconds_sum{handler="/metrics"} 53.4796296140002 prometheus_http_request_duration_seconds_count{handler="/metrics"} 4896 prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.1"} 8 prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.2"} 8 prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.4"} 8 prometheus_http_request_duration_seconds_bucket{handler="/query",le="1"} 8 prometheus_http_request_duration_seconds_bucket{handler="/query",le="3"} 8 prometheus_http_request_duration_seconds_bucket{handler="/query",le="8"} 8 prometheus_http_request_duration_seconds_bucket{handler="/query",le="20"} 8 prometheus_http_request_duration_seconds_bucket{handler="/query",le="60"} 8 prometheus_http_request_duration_seconds_bucket{handler="/query",le="120"} 8 prometheus_http_request_duration_seconds_bucket{handler="/query",le="+Inf"} 8 prometheus_http_request_duration_seconds_sum{handler="/query"} 0.055080703999999994 prometheus_http_request_duration_seconds_count{handler="/query"} 8 # HELP prometheus_http_requests_total Counter of HTTP requests. # TYPE prometheus_http_requests_total counter prometheus_http_requests_total{code="200",handler="/"} 0 prometheus_http_requests_total{code="200",handler="/-/healthy"} 10480 prometheus_http_requests_total{code="200",handler="/-/quit"} 0 prometheus_http_requests_total{code="200",handler="/-/ready"} 2 prometheus_http_requests_total{code="200",handler="/-/reload"} 0 prometheus_http_requests_total{code="200",handler="/alertmanager-discovery"} 0 prometheus_http_requests_total{code="200",handler="/alerts"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/*path"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/clean_tombstones"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/delete_series"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/snapshot"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/alertmanagers"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/alerts"} 3 prometheus_http_requests_total{code="200",handler="/api/v1/format_query"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/label/:name/values"} 3 prometheus_http_requests_total{code="200",handler="/api/v1/labels"} 1 prometheus_http_requests_total{code="200",handler="/api/v1/metadata"} 2 prometheus_http_requests_total{code="200",handler="/api/v1/notifications"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/notifications/live"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/otlp/v1/metrics"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/parse_query"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/query"} 1187 prometheus_http_requests_total{code="200",handler="/api/v1/query_exemplars"} 2 prometheus_http_requests_total{code="200",handler="/api/v1/query_range"} 1344 prometheus_http_requests_total{code="200",handler="/api/v1/read"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/rules"} 3 prometheus_http_requests_total{code="200",handler="/api/v1/scrape_pools"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/series"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/status/buildinfo"} 1 prometheus_http_requests_total{code="200",handler="/api/v1/status/config"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/status/flags"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/status/runtimeinfo"} 1 prometheus_http_requests_total{code="200",handler="/api/v1/status/tsdb"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/status/tsdb/blocks"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/status/walreplay"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/targets"} 74 prometheus_http_requests_total{code="200",handler="/api/v1/targets/metadata"} 0 prometheus_http_requests_total{code="200",handler="/api/v1/write"} 0 prometheus_http_requests_total{code="200",handler="/assets/*filepath"} 5 prometheus_http_requests_total{code="200",handler="/classic/static/*filepath"} 0 prometheus_http_requests_total{code="200",handler="/config"} 0 prometheus_http_requests_total{code="200",handler="/consoles/*filepath"} 0 prometheus_http_requests_total{code="200",handler="/debug/*subpath"} 0 prometheus_http_requests_total{code="200",handler="/favicon.ico"} 0 prometheus_http_requests_total{code="200",handler="/favicon.svg"} 1 prometheus_http_requests_total{code="200",handler="/federate"} 0 prometheus_http_requests_total{code="200",handler="/flags"} 0 prometheus_http_requests_total{code="200",handler="/graph"} 0 prometheus_http_requests_total{code="200",handler="/manifest.json"} 0 prometheus_http_requests_total{code="200",handler="/metrics"} 4896 prometheus_http_requests_total{code="200",handler="/query"} 8 prometheus_http_requests_total{code="200",handler="/rules"} 0 prometheus_http_requests_total{code="200",handler="/service-discovery"} 0 prometheus_http_requests_total{code="200",handler="/status"} 0 prometheus_http_requests_total{code="200",handler="/targets"} 0 prometheus_http_requests_total{code="200",handler="/tsdb-status"} 0 prometheus_http_requests_total{code="200",handler="/version"} 0 prometheus_http_requests_total{code="302",handler="/"} 10 # HELP prometheus_http_response_size_bytes Histogram of response size for HTTP requests. # TYPE prometheus_http_response_size_bytes histogram prometheus_http_response_size_bytes_bucket{handler="/",le="100"} 10 prometheus_http_response_size_bytes_bucket{handler="/",le="1000"} 10 prometheus_http_response_size_bytes_bucket{handler="/",le="10000"} 10 prometheus_http_response_size_bytes_bucket{handler="/",le="100000"} 10 prometheus_http_response_size_bytes_bucket{handler="/",le="1e+06"} 10 prometheus_http_response_size_bytes_bucket{handler="/",le="1e+07"} 10 prometheus_http_response_size_bytes_bucket{handler="/",le="1e+08"} 10 prometheus_http_response_size_bytes_bucket{handler="/",le="1e+09"} 10 prometheus_http_response_size_bytes_bucket{handler="/",le="+Inf"} 10 prometheus_http_response_size_bytes_sum{handler="/"} 290 prometheus_http_response_size_bytes_count{handler="/"} 10 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100"} 10480 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1000"} 10480 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="10000"} 10480 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100000"} 10480 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+06"} 10480 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+07"} 10480 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+08"} 10480 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+09"} 10480 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="+Inf"} 10480 prometheus_http_response_size_bytes_sum{handler="/-/healthy"} 314400 prometheus_http_response_size_bytes_count{handler="/-/healthy"} 10480 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100"} 2 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1000"} 2 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="10000"} 2 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100000"} 2 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+06"} 2 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+07"} 2 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+08"} 2 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+09"} 2 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="+Inf"} 2 prometheus_http_response_size_bytes_sum{handler="/-/ready"} 56 prometheus_http_response_size_bytes_count{handler="/-/ready"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="10000"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="100000"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+06"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+07"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+08"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+09"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="+Inf"} 3 prometheus_http_response_size_bytes_sum{handler="/api/v1/alerts"} 18564 prometheus_http_response_size_bytes_count{handler="/api/v1/alerts"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1000"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="10000"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100000"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+06"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+07"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+08"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+09"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 3 prometheus_http_response_size_bytes_sum{handler="/api/v1/label/:name/values"} 31779 prometheus_http_response_size_bytes_count{handler="/api/v1/label/:name/values"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="10000"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="100000"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+06"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+07"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+08"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+09"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="+Inf"} 1 prometheus_http_response_size_bytes_sum{handler="/api/v1/labels"} 1649 prometheus_http_response_size_bytes_count{handler="/api/v1/labels"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="100"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1000"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="10000"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="100000"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+06"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+07"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+08"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+09"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="+Inf"} 2 prometheus_http_response_size_bytes_sum{handler="/api/v1/metadata"} 38958 prometheus_http_response_size_bytes_count{handler="/api/v1/metadata"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100"} 15 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1000"} 1187 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="10000"} 1187 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100000"} 1187 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+06"} 1187 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+07"} 1187 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+08"} 1187 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+09"} 1187 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="+Inf"} 1187 prometheus_http_response_size_bytes_sum{handler="/api/v1/query"} 330352 prometheus_http_response_size_bytes_count{handler="/api/v1/query"} 1187 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="100"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1000"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="10000"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="100000"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+06"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+07"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+08"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+09"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="+Inf"} 2 prometheus_http_response_size_bytes_sum{handler="/api/v1/query_exemplars"} 120 prometheus_http_response_size_bytes_count{handler="/api/v1/query_exemplars"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100"} 1211 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1000"} 1317 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="10000"} 1344 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100000"} 1344 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+06"} 1344 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+07"} 1344 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+08"} 1344 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+09"} 1344 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="+Inf"} 1344 prometheus_http_response_size_bytes_sum{handler="/api/v1/query_range"} 230635 prometheus_http_response_size_bytes_count{handler="/api/v1/query_range"} 1344 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="10000"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="100000"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+06"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+07"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+08"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+09"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="+Inf"} 3 prometheus_http_response_size_bytes_sum{handler="/api/v1/rules"} 48072 prometheus_http_response_size_bytes_count{handler="/api/v1/rules"} 3 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1000"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="10000"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="100000"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+06"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+07"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+08"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+09"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="+Inf"} 1 prometheus_http_response_size_bytes_sum{handler="/api/v1/status/buildinfo"} 183 prometheus_http_response_size_bytes_count{handler="/api/v1/status/buildinfo"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1000"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="10000"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="100000"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+06"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+07"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+08"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+09"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="+Inf"} 1 prometheus_http_response_size_bytes_sum{handler="/api/v1/status/runtimeinfo"} 270 prometheus_http_response_size_bytes_count{handler="/api/v1/status/runtimeinfo"} 1 prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="10000"} 74 prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="100000"} 74 prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+06"} 74 prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+07"} 74 prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+08"} 74 prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+09"} 74 prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="+Inf"} 74 prometheus_http_response_size_bytes_sum{handler="/api/v1/targets"} 93628 prometheus_http_response_size_bytes_count{handler="/api/v1/targets"} 74 prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="10000"} 0 prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="100000"} 0 prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+06"} 1 prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+07"} 5 prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+08"} 5 prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+09"} 5 prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="+Inf"} 5 prometheus_http_response_size_bytes_sum{handler="/assets/*filepath"} 1.1013274e+07 prometheus_http_response_size_bytes_count{handler="/assets/*filepath"} 5 prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="10000"} 1 prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="100000"} 1 prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+06"} 1 prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+07"} 1 prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+08"} 1 prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+09"} 1 prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="+Inf"} 1 prometheus_http_response_size_bytes_sum{handler="/favicon.svg"} 2777 prometheus_http_response_size_bytes_count{handler="/favicon.svg"} 1 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="10000"} 0 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100000"} 4896 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+06"} 4896 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+07"} 4896 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+08"} 4896 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+09"} 4896 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="+Inf"} 4896 prometheus_http_response_size_bytes_sum{handler="/metrics"} 8.7424531e+07 prometheus_http_response_size_bytes_count{handler="/metrics"} 4896 prometheus_http_response_size_bytes_bucket{handler="/query",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/query",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/query",le="10000"} 8 prometheus_http_response_size_bytes_bucket{handler="/query",le="100000"} 8 prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+06"} 8 prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+07"} 8 prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+08"} 8 prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+09"} 8 prometheus_http_response_size_bytes_bucket{handler="/query",le="+Inf"} 8 prometheus_http_response_size_bytes_sum{handler="/query"} 14016 prometheus_http_response_size_bytes_count{handler="/query"} 8 # HELP prometheus_notifications_alertmanagers_discovered The number of alertmanagers discovered and active. # TYPE prometheus_notifications_alertmanagers_discovered gauge prometheus_notifications_alertmanagers_discovered 1 # HELP prometheus_notifications_dropped_total Total number of alerts dropped due to errors when sending to Alertmanager. # TYPE prometheus_notifications_dropped_total counter prometheus_notifications_dropped_total 0 # HELP prometheus_notifications_errors_total Total number of sent alerts affected by errors. # TYPE prometheus_notifications_errors_total counter prometheus_notifications_errors_total{alertmanager="http://alertmanager:9093/api/v2/alerts"} 0 # HELP prometheus_notifications_latency_seconds Latency quantiles for sending alert notifications. # TYPE prometheus_notifications_latency_seconds summary prometheus_notifications_latency_seconds{alertmanager="http://alertmanager:9093/api/v2/alerts",quantile="0.5"} 0.001120705 prometheus_notifications_latency_seconds{alertmanager="http://alertmanager:9093/api/v2/alerts",quantile="0.9"} 0.006215019 prometheus_notifications_latency_seconds{alertmanager="http://alertmanager:9093/api/v2/alerts",quantile="0.99"} 0.006905213 prometheus_notifications_latency_seconds_sum{alertmanager="http://alertmanager:9093/api/v2/alerts"} 7.744771278000008 prometheus_notifications_latency_seconds_count{alertmanager="http://alertmanager:9093/api/v2/alerts"} 3222 # HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. # TYPE prometheus_notifications_queue_capacity gauge prometheus_notifications_queue_capacity 10000 # HELP prometheus_notifications_queue_length The number of alert notifications in the queue. # TYPE prometheus_notifications_queue_length gauge prometheus_notifications_queue_length 0 # HELP prometheus_notifications_sent_total Total number of alerts sent. # TYPE prometheus_notifications_sent_total counter prometheus_notifications_sent_total{alertmanager="http://alertmanager:9093/api/v2/alerts"} 18736 # HELP prometheus_ready Whether Prometheus startup was fully completed and the server is ready for normal operation. # TYPE prometheus_ready gauge prometheus_ready 1 # HELP prometheus_remote_read_handler_queries The current number of remote read queries that are either in execution or queued on the handler. # TYPE prometheus_remote_read_handler_queries gauge prometheus_remote_read_handler_queries 0 # HELP prometheus_remote_storage_exemplars_in_total Exemplars in to remote storage, compare to exemplars out for queue managers. Deprecated, check prometheus_wal_watcher_records_read_total and prometheus_remote_storage_exemplars_dropped_total # TYPE prometheus_remote_storage_exemplars_in_total counter prometheus_remote_storage_exemplars_in_total 0 # HELP prometheus_remote_storage_highest_timestamp_in_seconds Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet. Deprecated, check prometheus_remote_storage_queue_highest_timestamp_seconds which is more accurate. # TYPE prometheus_remote_storage_highest_timestamp_in_seconds gauge prometheus_remote_storage_highest_timestamp_in_seconds 1.76499186e+09 # HELP prometheus_remote_storage_histograms_in_total HistogramSamples in to remote storage, compare to histograms out for queue managers. Deprecated, check prometheus_wal_watcher_records_read_total and prometheus_remote_storage_histograms_dropped_total # TYPE prometheus_remote_storage_histograms_in_total counter prometheus_remote_storage_histograms_in_total 0 # HELP prometheus_remote_storage_samples_in_total Samples in to remote storage, compare to samples out for queue managers. Deprecated, check prometheus_wal_watcher_records_read_total and prometheus_remote_storage_samples_dropped_total # TYPE prometheus_remote_storage_samples_in_total counter prometheus_remote_storage_samples_in_total 3.20241938e+08 # HELP prometheus_remote_storage_string_interner_zero_reference_releases_total The number of times release has been called for strings that are not interned. # TYPE prometheus_remote_storage_string_interner_zero_reference_releases_total counter prometheus_remote_storage_string_interner_zero_reference_releases_total 0 # HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute. # TYPE prometheus_rule_evaluation_duration_seconds summary prometheus_rule_evaluation_duration_seconds{quantile="0.5"} 0.000167706 prometheus_rule_evaluation_duration_seconds{quantile="0.9"} 0.001240657 prometheus_rule_evaluation_duration_seconds{quantile="0.99"} 0.006253567 prometheus_rule_evaluation_duration_seconds_sum 196.59821772900068 prometheus_rule_evaluation_duration_seconds_count 344190 # HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. # TYPE prometheus_rule_evaluation_failures_total counter prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/claude-code-alerts.yml;claude_code_health"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/container-alerts.yml;container_health"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/container-alerts.yml;container_resources"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/container-alerts.yml;container_restarts"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/container-alerts.yml;critical_containers"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/infrastructure-alerts.yml;infrastructure_health"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/red-metrics-recording.yml;red_metrics_recording"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/resume-worker-alerts.yml;resume_worker_health"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/security-monitoring.yml;security_monitoring"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/slo-sli.yml;error_budget_tracking"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_alerts"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_sli_recording"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_availability"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_configuration"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_performance"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_security"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_health"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_sli_recording"} 0 # HELP prometheus_rule_evaluations_total The total number of rule evaluations. # TYPE prometheus_rule_evaluations_total counter prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/claude-code-alerts.yml;claude_code_health"} 19584 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/container-alerts.yml;container_health"} 4896 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/container-alerts.yml;container_resources"} 12240 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/container-alerts.yml;container_restarts"} 2448 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/container-alerts.yml;critical_containers"} 9792 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/infrastructure-alerts.yml;infrastructure_health"} 31824 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/red-metrics-recording.yml;red_metrics_recording"} 112608 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/resume-worker-alerts.yml;resume_worker_health"} 14688 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/security-monitoring.yml;security_monitoring"} 29376 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/slo-sli.yml;error_budget_tracking"} 1470 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_alerts"} 7344 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_sli_recording"} 26928 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_availability"} 4896 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_configuration"} 2448 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_performance"} 9792 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_security"} 7344 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_health"} 34272 prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_sli_recording"} 12240 # HELP prometheus_rule_group_duration_seconds The duration of rule group evaluations. # TYPE prometheus_rule_group_duration_seconds summary prometheus_rule_group_duration_seconds{quantile="0.01"} 0.000385446 prometheus_rule_group_duration_seconds{quantile="0.05"} 0.000529409 prometheus_rule_group_duration_seconds{quantile="0.5"} 0.00159288 prometheus_rule_group_duration_seconds{quantile="0.9"} 0.015698959 prometheus_rule_group_duration_seconds{quantile="0.99"} 0.022786938 prometheus_rule_group_duration_seconds_sum 201.9463863170017 prometheus_rule_group_duration_seconds_count 41861 # HELP prometheus_rule_group_interval_seconds The interval of a rule group. # TYPE prometheus_rule_group_interval_seconds gauge prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/claude-code-alerts.yml;claude_code_health"} 30 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_health"} 30 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_resources"} 30 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_restarts"} 30 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;critical_containers"} 30 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/infrastructure-alerts.yml;infrastructure_health"} 30 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/red-metrics-recording.yml;red_metrics_recording"} 15 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/resume-worker-alerts.yml;resume_worker_health"} 30 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/security-monitoring.yml;security_monitoring"} 30 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;error_budget_tracking"} 300 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_alerts"} 60 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_sli_recording"} 30 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_availability"} 30 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_configuration"} 60 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_performance"} 30 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_security"} 30 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_health"} 30 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_sli_recording"} 30 # HELP prometheus_rule_group_iterations_missed_total The total number of rule group evaluations missed due to slow rule group evaluation. # TYPE prometheus_rule_group_iterations_missed_total counter prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/claude-code-alerts.yml;claude_code_health"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/container-alerts.yml;container_health"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/container-alerts.yml;container_resources"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/container-alerts.yml;container_restarts"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/container-alerts.yml;critical_containers"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/infrastructure-alerts.yml;infrastructure_health"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/red-metrics-recording.yml;red_metrics_recording"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/resume-worker-alerts.yml;resume_worker_health"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/security-monitoring.yml;security_monitoring"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/slo-sli.yml;error_budget_tracking"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_alerts"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_sli_recording"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_availability"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_configuration"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_performance"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_security"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_health"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_sli_recording"} 0 # HELP prometheus_rule_group_iterations_total The total number of scheduled rule group evaluations, whether executed or missed. # TYPE prometheus_rule_group_iterations_total counter prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/claude-code-alerts.yml;claude_code_health"} 2448 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/container-alerts.yml;container_health"} 2448 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/container-alerts.yml;container_resources"} 2448 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/container-alerts.yml;container_restarts"} 2448 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/container-alerts.yml;critical_containers"} 2448 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/infrastructure-alerts.yml;infrastructure_health"} 2448 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/red-metrics-recording.yml;red_metrics_recording"} 4896 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/resume-worker-alerts.yml;resume_worker_health"} 2448 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/security-monitoring.yml;security_monitoring"} 2448 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/slo-sli.yml;error_budget_tracking"} 245 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_alerts"} 1224 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_sli_recording"} 2448 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_availability"} 2448 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_configuration"} 1224 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_performance"} 2448 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_security"} 2448 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_health"} 2448 prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_sli_recording"} 2448 # HELP prometheus_rule_group_last_duration_seconds The duration of the last rule group evaluation. # TYPE prometheus_rule_group_last_duration_seconds gauge prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/claude-code-alerts.yml;claude_code_health"} 0.000906259 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_health"} 0.000887623 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_resources"} 0.004082061 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_restarts"} 0.000821326 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;critical_containers"} 0.001761024 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/infrastructure-alerts.yml;infrastructure_health"} 0.00900194 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/red-metrics-recording.yml;red_metrics_recording"} 0.01756005 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/resume-worker-alerts.yml;resume_worker_health"} 0.000840226 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/security-monitoring.yml;security_monitoring"} 0.001972054 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;error_budget_tracking"} 0.001813674 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_alerts"} 0.000647925 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_sli_recording"} 0.014348997 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_availability"} 0.000389024 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_configuration"} 0.001871864 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_performance"} 0.012735671 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_security"} 0.001076122 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_health"} 0.001645723 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_sli_recording"} 0.000693908 # HELP prometheus_rule_group_last_evaluation_samples The number of samples returned during the last rule group evaluation. # TYPE prometheus_rule_group_last_evaluation_samples gauge prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/claude-code-alerts.yml;claude_code_health"} 0 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/container-alerts.yml;container_health"} 0 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/container-alerts.yml;container_resources"} 0 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/container-alerts.yml;container_restarts"} 0 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/container-alerts.yml;critical_containers"} 2 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/infrastructure-alerts.yml;infrastructure_health"} 42 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/red-metrics-recording.yml;red_metrics_recording"} 187 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/resume-worker-alerts.yml;resume_worker_health"} 0 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/security-monitoring.yml;security_monitoring"} 2 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/slo-sli.yml;error_budget_tracking"} 2 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_alerts"} 0 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_sli_recording"} 78 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_availability"} 0 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_configuration"} 0 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_performance"} 0 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_security"} 0 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_health"} 0 prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_sli_recording"} 0 # HELP prometheus_rule_group_last_evaluation_timestamp_seconds The timestamp of the last rule group evaluation in seconds. # TYPE prometheus_rule_group_last_evaluation_timestamp_seconds gauge prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/claude-code-alerts.yml;claude_code_health"} 1.7649918446173387e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_health"} 1.7649918313048677e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_resources"} 1.7649918333289847e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_restarts"} 1.7649918542417018e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;critical_containers"} 1.7649918468791761e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/infrastructure-alerts.yml;infrastructure_health"} 1.7649918541481736e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/red-metrics-recording.yml;red_metrics_recording"} 1.7649918555416584e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/resume-worker-alerts.yml;resume_worker_health"} 1.7649918472817755e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/security-monitoring.yml;security_monitoring"} 1.7649918475270977e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;error_budget_tracking"} 1.764991796013139e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_alerts"} 1.7649918547813036e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_sli_recording"} 1.7649918571211536e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_availability"} 1.7649918540789719e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_configuration"} 1.7649918073910303e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_performance"} 1.764991844743513e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_security"} 1.764991831232629e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_health"} 1.7649918594218333e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_sli_recording"} 1.764991849669981e+09 # HELP prometheus_rule_group_last_restore_duration_seconds The duration of the last alert rules alerts restoration using the `ALERTS_FOR_STATE` series. # TYPE prometheus_rule_group_last_restore_duration_seconds gauge prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/claude-code-alerts.yml;claude_code_health"} 3.4729e-05 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_health"} 6.652e-06 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_resources"} 3.793e-05 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_restarts"} 6.625e-06 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;critical_containers"} 2.3642e-05 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/infrastructure-alerts.yml;infrastructure_health"} 1.9555e-05 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/red-metrics-recording.yml;red_metrics_recording"} 3.769e-06 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/resume-worker-alerts.yml;resume_worker_health"} 2.1633e-05 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/security-monitoring.yml;security_monitoring"} 4.1407e-05 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;error_budget_tracking"} 2.3596e-05 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_alerts"} 3.304e-06 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_sli_recording"} 4.084e-06 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_availability"} 3.07e-06 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_configuration"} 4.204e-06 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_performance"} 1.9906e-05 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_security"} 1.6267e-05 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_health"} 2.0181e-05 prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_sli_recording"} 3.224e-06 # HELP prometheus_rule_group_last_rule_duration_sum_seconds The sum of time in seconds it took to evaluate each rule in the group regardless of concurrency. This should be higher than the group duration if rules are evaluated concurrently. # TYPE prometheus_rule_group_last_rule_duration_sum_seconds gauge prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/claude-code-alerts.yml;claude_code_health"} 0.000780571 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_health"} 0.000836365 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_resources"} 0.003980694 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;container_restarts"} 0.000773365 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/container-alerts.yml;critical_containers"} 0.001692319 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/infrastructure-alerts.yml;infrastructure_health"} 0.008840337 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/red-metrics-recording.yml;red_metrics_recording"} 0.017326418 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/resume-worker-alerts.yml;resume_worker_health"} 0.000767139 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/security-monitoring.yml;security_monitoring"} 0.001856685 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;error_budget_tracking"} 0.001718363 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_alerts"} 0.00057446 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_sli_recording"} 0.014174801 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_availability"} 0.00032779 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_configuration"} 0.001817335 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_performance"} 0.012656097 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_security"} 0.001011536 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_health"} 0.00153003 prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_sli_recording"} 0.00062041 # HELP prometheus_rule_group_rules The number of rules. # TYPE prometheus_rule_group_rules gauge prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/claude-code-alerts.yml;claude_code_health"} 8 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/container-alerts.yml;container_health"} 2 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/container-alerts.yml;container_resources"} 5 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/container-alerts.yml;container_restarts"} 1 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/container-alerts.yml;critical_containers"} 4 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/infrastructure-alerts.yml;infrastructure_health"} 13 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/red-metrics-recording.yml;red_metrics_recording"} 23 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/resume-worker-alerts.yml;resume_worker_health"} 6 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/security-monitoring.yml;security_monitoring"} 12 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/slo-sli.yml;error_budget_tracking"} 6 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_alerts"} 6 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/slo-sli.yml;slo_sli_recording"} 11 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_availability"} 2 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_configuration"} 2 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_performance"} 4 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/traefik-alerts.yml;traefik_security"} 3 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_health"} 14 prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/vault-monitoring.yml;vault_sli_recording"} 5 # HELP prometheus_sd_azure_cache_hit_total Number of cache hit during refresh. # TYPE prometheus_sd_azure_cache_hit_total counter prometheus_sd_azure_cache_hit_total 0 # HELP prometheus_sd_azure_failures_total Number of Azure service discovery refresh failures. # TYPE prometheus_sd_azure_failures_total counter prometheus_sd_azure_failures_total 0 # HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds. # TYPE prometheus_sd_consul_rpc_duration_seconds summary prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 # HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures. # TYPE prometheus_sd_consul_rpc_failures_total counter prometheus_sd_consul_rpc_failures_total 0 # HELP prometheus_sd_discovered_targets Current number of discovered targets. # TYPE prometheus_sd_discovered_targets gauge prometheus_sd_discovered_targets{config="alertmanager",name="scrape"} 1 prometheus_sd_discovered_targets{config="cadvisor",name="scrape"} 1 prometheus_sd_discovered_targets{config="config-0",name="notify"} 1 prometheus_sd_discovered_targets{config="grafana",name="scrape"} 1 prometheus_sd_discovered_targets{config="loki",name="scrape"} 1 prometheus_sd_discovered_targets{config="node-exporter",name="scrape"} 1 prometheus_sd_discovered_targets{config="postgres",name="scrape"} 3 prometheus_sd_discovered_targets{config="prometheus",name="scrape"} 1 prometheus_sd_discovered_targets{config="redis",name="scrape"} 3 prometheus_sd_discovered_targets{config="tempo",name="scrape"} 1 prometheus_sd_discovered_targets{config="traefik",name="scrape"} 1 # HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures. # TYPE prometheus_sd_dns_lookup_failures_total counter prometheus_sd_dns_lookup_failures_total 0 # HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups. # TYPE prometheus_sd_dns_lookups_total counter prometheus_sd_dns_lookups_total 0 # HELP prometheus_sd_failed_configs Current number of service discovery configurations that failed to load. # TYPE prometheus_sd_failed_configs gauge prometheus_sd_failed_configs{name="notify"} 0 prometheus_sd_failed_configs{name="scrape"} 0 # HELP prometheus_sd_file_read_errors_total The number of File-SD read errors. # TYPE prometheus_sd_file_read_errors_total counter prometheus_sd_file_read_errors_total 0 # HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds. # TYPE prometheus_sd_file_scan_duration_seconds summary prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN prometheus_sd_file_scan_duration_seconds_sum 0 prometheus_sd_file_scan_duration_seconds_count 0 # HELP prometheus_sd_file_watcher_errors_total The number of File-SD errors caused by filesystem watch failures. # TYPE prometheus_sd_file_watcher_errors_total counter prometheus_sd_file_watcher_errors_total 0 # HELP prometheus_sd_http_failures_total Number of HTTP service discovery refresh failures. # TYPE prometheus_sd_http_failures_total counter prometheus_sd_http_failures_total 0 # HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled. # TYPE prometheus_sd_kubernetes_events_total counter prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 prometheus_sd_kubernetes_events_total{event="add",role="endpointslice"} 0 prometheus_sd_kubernetes_events_total{event="add",role="ingress"} 0 prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 prometheus_sd_kubernetes_events_total{event="delete",role="endpointslice"} 0 prometheus_sd_kubernetes_events_total{event="delete",role="ingress"} 0 prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 prometheus_sd_kubernetes_events_total{event="update",role="endpointslice"} 0 prometheus_sd_kubernetes_events_total{event="update",role="ingress"} 0 prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 # HELP prometheus_sd_kubernetes_failures_total The number of failed WATCH/LIST requests. # TYPE prometheus_sd_kubernetes_failures_total counter prometheus_sd_kubernetes_failures_total 0 # HELP prometheus_sd_kuma_fetch_duration_seconds The duration of a Kuma MADS fetch call. # TYPE prometheus_sd_kuma_fetch_duration_seconds summary prometheus_sd_kuma_fetch_duration_seconds{quantile="0.5"} NaN prometheus_sd_kuma_fetch_duration_seconds{quantile="0.9"} NaN prometheus_sd_kuma_fetch_duration_seconds{quantile="0.99"} NaN prometheus_sd_kuma_fetch_duration_seconds_sum 0 prometheus_sd_kuma_fetch_duration_seconds_count 0 # HELP prometheus_sd_kuma_fetch_failures_total The number of Kuma MADS fetch call failures. # TYPE prometheus_sd_kuma_fetch_failures_total counter prometheus_sd_kuma_fetch_failures_total 0 # HELP prometheus_sd_kuma_fetch_skipped_updates_total The number of Kuma MADS fetch calls that result in no updates to the targets. # TYPE prometheus_sd_kuma_fetch_skipped_updates_total counter prometheus_sd_kuma_fetch_skipped_updates_total 0 # HELP prometheus_sd_linode_failures_total Number of Linode service discovery refresh failures. # TYPE prometheus_sd_linode_failures_total counter prometheus_sd_linode_failures_total 0 # HELP prometheus_sd_nomad_failures_total Number of nomad service discovery refresh failures. # TYPE prometheus_sd_nomad_failures_total counter prometheus_sd_nomad_failures_total 0 # HELP prometheus_sd_received_updates_total Total number of update events received from the SD providers. # TYPE prometheus_sd_received_updates_total counter prometheus_sd_received_updates_total{name="notify"} 2 prometheus_sd_received_updates_total{name="scrape"} 20 # HELP prometheus_sd_updates_delayed_total Total number of update events that couldn't be sent immediately. # TYPE prometheus_sd_updates_delayed_total counter prometheus_sd_updates_delayed_total{name="notify"} 0 prometheus_sd_updates_delayed_total{name="scrape"} 0 # HELP prometheus_sd_updates_total Total number of update events sent to the SD consumers. # TYPE prometheus_sd_updates_total counter prometheus_sd_updates_total{name="notify"} 1 prometheus_sd_updates_total{name="scrape"} 1 # HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. # TYPE prometheus_target_interval_length_seconds summary prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14.998936577 prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14.999143714 prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15.000053019 prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15.000655142 prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15.001227666 prometheus_target_interval_length_seconds_sum{interval="15s"} 1.0279509794124637e+06 prometheus_target_interval_length_seconds_count{interval="15s"} 68530 # HELP prometheus_target_metadata_cache_bytes The number of bytes that are currently used for storing metric metadata in the cache # TYPE prometheus_target_metadata_cache_bytes gauge prometheus_target_metadata_cache_bytes{scrape_job="alertmanager"} 5642 prometheus_target_metadata_cache_bytes{scrape_job="cadvisor"} 4589 prometheus_target_metadata_cache_bytes{scrape_job="grafana"} 43191 prometheus_target_metadata_cache_bytes{scrape_job="loki"} 37316 prometheus_target_metadata_cache_bytes{scrape_job="node-exporter"} 13587 prometheus_target_metadata_cache_bytes{scrape_job="postgres"} 63044 prometheus_target_metadata_cache_bytes{scrape_job="prometheus"} 21034 prometheus_target_metadata_cache_bytes{scrape_job="redis"} 20102 prometheus_target_metadata_cache_bytes{scrape_job="tempo"} 10872 prometheus_target_metadata_cache_bytes{scrape_job="traefik"} 5858 # HELP prometheus_target_metadata_cache_entries Total number of metric metadata entries in the cache # TYPE prometheus_target_metadata_cache_entries gauge prometheus_target_metadata_cache_entries{scrape_job="alertmanager"} 100 prometheus_target_metadata_cache_entries{scrape_job="cadvisor"} 93 prometheus_target_metadata_cache_entries{scrape_job="grafana"} 443 prometheus_target_metadata_cache_entries{scrape_job="loki"} 355 prometheus_target_metadata_cache_entries{scrape_job="node-exporter"} 292 prometheus_target_metadata_cache_entries{scrape_job="postgres"} 1103 prometheus_target_metadata_cache_entries{scrape_job="prometheus"} 240 prometheus_target_metadata_cache_entries{scrape_job="redis"} 523 prometheus_target_metadata_cache_entries{scrape_job="tempo"} 152 prometheus_target_metadata_cache_entries{scrape_job="traefik"} 58 # HELP prometheus_target_scrape_pool_exceeded_label_limits_total Total number of times scrape pools hit the label limits, during sync or config reload. # TYPE prometheus_target_scrape_pool_exceeded_label_limits_total counter prometheus_target_scrape_pool_exceeded_label_limits_total 0 # HELP prometheus_target_scrape_pool_exceeded_target_limit_total Total number of times scrape pools hit the target limit, during sync or config reload. # TYPE prometheus_target_scrape_pool_exceeded_target_limit_total counter prometheus_target_scrape_pool_exceeded_target_limit_total 0 # HELP prometheus_target_scrape_pool_reloads_failed_total Total number of failed scrape pool reloads. # TYPE prometheus_target_scrape_pool_reloads_failed_total counter prometheus_target_scrape_pool_reloads_failed_total 0 # HELP prometheus_target_scrape_pool_reloads_total Total number of scrape pool reloads. # TYPE prometheus_target_scrape_pool_reloads_total counter prometheus_target_scrape_pool_reloads_total 0 # HELP prometheus_target_scrape_pool_symboltable_items Current number of symbols in table for this scrape pool. # TYPE prometheus_target_scrape_pool_symboltable_items gauge prometheus_target_scrape_pool_symboltable_items{scrape_job="alertmanager"} 0 prometheus_target_scrape_pool_symboltable_items{scrape_job="cadvisor"} 0 prometheus_target_scrape_pool_symboltable_items{scrape_job="grafana"} 0 prometheus_target_scrape_pool_symboltable_items{scrape_job="loki"} 0 prometheus_target_scrape_pool_symboltable_items{scrape_job="node-exporter"} 0 prometheus_target_scrape_pool_symboltable_items{scrape_job="postgres"} 0 prometheus_target_scrape_pool_symboltable_items{scrape_job="prometheus"} 0 prometheus_target_scrape_pool_symboltable_items{scrape_job="redis"} 0 prometheus_target_scrape_pool_symboltable_items{scrape_job="tempo"} 0 prometheus_target_scrape_pool_symboltable_items{scrape_job="traefik"} 0 # HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool. # TYPE prometheus_target_scrape_pool_sync_total counter prometheus_target_scrape_pool_sync_total{scrape_job="alertmanager"} 1 prometheus_target_scrape_pool_sync_total{scrape_job="cadvisor"} 1 prometheus_target_scrape_pool_sync_total{scrape_job="grafana"} 1 prometheus_target_scrape_pool_sync_total{scrape_job="loki"} 1 prometheus_target_scrape_pool_sync_total{scrape_job="node-exporter"} 1 prometheus_target_scrape_pool_sync_total{scrape_job="postgres"} 1 prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 prometheus_target_scrape_pool_sync_total{scrape_job="redis"} 1 prometheus_target_scrape_pool_sync_total{scrape_job="tempo"} 1 prometheus_target_scrape_pool_sync_total{scrape_job="traefik"} 1 # HELP prometheus_target_scrape_pool_target_limit Maximum number of targets allowed in this scrape pool. # TYPE prometheus_target_scrape_pool_target_limit gauge prometheus_target_scrape_pool_target_limit{scrape_job="alertmanager"} 0 prometheus_target_scrape_pool_target_limit{scrape_job="cadvisor"} 0 prometheus_target_scrape_pool_target_limit{scrape_job="grafana"} 0 prometheus_target_scrape_pool_target_limit{scrape_job="loki"} 0 prometheus_target_scrape_pool_target_limit{scrape_job="node-exporter"} 0 prometheus_target_scrape_pool_target_limit{scrape_job="postgres"} 0 prometheus_target_scrape_pool_target_limit{scrape_job="prometheus"} 0 prometheus_target_scrape_pool_target_limit{scrape_job="redis"} 0 prometheus_target_scrape_pool_target_limit{scrape_job="tempo"} 0 prometheus_target_scrape_pool_target_limit{scrape_job="traefik"} 0 # HELP prometheus_target_scrape_pool_targets Current number of targets in this scrape pool. # TYPE prometheus_target_scrape_pool_targets gauge prometheus_target_scrape_pool_targets{scrape_job="alertmanager"} 1 prometheus_target_scrape_pool_targets{scrape_job="cadvisor"} 1 prometheus_target_scrape_pool_targets{scrape_job="grafana"} 1 prometheus_target_scrape_pool_targets{scrape_job="loki"} 1 prometheus_target_scrape_pool_targets{scrape_job="node-exporter"} 1 prometheus_target_scrape_pool_targets{scrape_job="postgres"} 3 prometheus_target_scrape_pool_targets{scrape_job="prometheus"} 1 prometheus_target_scrape_pool_targets{scrape_job="redis"} 3 prometheus_target_scrape_pool_targets{scrape_job="tempo"} 1 prometheus_target_scrape_pool_targets{scrape_job="traefik"} 1 # HELP prometheus_target_scrape_pools_failed_total Total number of scrape pool creations that failed. # TYPE prometheus_target_scrape_pools_failed_total counter prometheus_target_scrape_pools_failed_total 0 # HELP prometheus_target_scrape_pools_total Total number of scrape pool creation attempts. # TYPE prometheus_target_scrape_pools_total counter prometheus_target_scrape_pools_total 10 # HELP prometheus_target_scrapes_cache_flush_forced_total How many times a scrape cache was flushed due to getting big while scrapes are failing. # TYPE prometheus_target_scrapes_cache_flush_forced_total counter prometheus_target_scrapes_cache_flush_forced_total 0 # HELP prometheus_target_scrapes_exceeded_body_size_limit_total Total number of scrapes that hit the body size limit # TYPE prometheus_target_scrapes_exceeded_body_size_limit_total counter prometheus_target_scrapes_exceeded_body_size_limit_total 0 # HELP prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total Total number of scrapes that hit the native histogram bucket limit and were rejected. # TYPE prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total counter prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total 0 # HELP prometheus_target_scrapes_exceeded_sample_limit_total Total number of scrapes that hit the sample limit and were rejected. # TYPE prometheus_target_scrapes_exceeded_sample_limit_total counter prometheus_target_scrapes_exceeded_sample_limit_total 0 # HELP prometheus_target_scrapes_exemplar_out_of_order_total Total number of exemplar rejected due to not being out of the expected order. # TYPE prometheus_target_scrapes_exemplar_out_of_order_total counter prometheus_target_scrapes_exemplar_out_of_order_total 0 # HELP prometheus_target_scrapes_sample_duplicate_timestamp_total Total number of samples rejected due to duplicate timestamps but different values. # TYPE prometheus_target_scrapes_sample_duplicate_timestamp_total counter prometheus_target_scrapes_sample_duplicate_timestamp_total 0 # HELP prometheus_target_scrapes_sample_out_of_bounds_total Total number of samples rejected due to timestamp falling outside of the time bounds. # TYPE prometheus_target_scrapes_sample_out_of_bounds_total counter prometheus_target_scrapes_sample_out_of_bounds_total 0 # HELP prometheus_target_scrapes_sample_out_of_order_total Total number of samples rejected due to not being out of the expected order. # TYPE prometheus_target_scrapes_sample_out_of_order_total counter prometheus_target_scrapes_sample_out_of_order_total 0 # HELP prometheus_target_sync_failed_total Total number of target sync failures. # TYPE prometheus_target_sync_failed_total counter prometheus_target_sync_failed_total{scrape_job="alertmanager"} 0 prometheus_target_sync_failed_total{scrape_job="cadvisor"} 0 prometheus_target_sync_failed_total{scrape_job="grafana"} 0 prometheus_target_sync_failed_total{scrape_job="loki"} 0 prometheus_target_sync_failed_total{scrape_job="node-exporter"} 0 prometheus_target_sync_failed_total{scrape_job="postgres"} 0 prometheus_target_sync_failed_total{scrape_job="prometheus"} 0 prometheus_target_sync_failed_total{scrape_job="redis"} 0 prometheus_target_sync_failed_total{scrape_job="tempo"} 0 prometheus_target_sync_failed_total{scrape_job="traefik"} 0 # HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool. # TYPE prometheus_target_sync_length_seconds summary prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.01"} NaN prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.05"} NaN prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.5"} NaN prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.9"} NaN prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.99"} NaN prometheus_target_sync_length_seconds_sum{scrape_job="alertmanager"} 0.000155771 prometheus_target_sync_length_seconds_count{scrape_job="alertmanager"} 1 prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.01"} NaN prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.05"} NaN prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.5"} NaN prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.9"} NaN prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.99"} NaN prometheus_target_sync_length_seconds_sum{scrape_job="cadvisor"} 0.000154351 prometheus_target_sync_length_seconds_count{scrape_job="cadvisor"} 1 prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.01"} NaN prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.05"} NaN prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.5"} NaN prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.9"} NaN prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.99"} NaN prometheus_target_sync_length_seconds_sum{scrape_job="grafana"} 9.1223e-05 prometheus_target_sync_length_seconds_count{scrape_job="grafana"} 1 prometheus_target_sync_length_seconds{scrape_job="loki",quantile="0.01"} NaN prometheus_target_sync_length_seconds{scrape_job="loki",quantile="0.05"} NaN prometheus_target_sync_length_seconds{scrape_job="loki",quantile="0.5"} NaN prometheus_target_sync_length_seconds{scrape_job="loki",quantile="0.9"} NaN prometheus_target_sync_length_seconds{scrape_job="loki",quantile="0.99"} NaN prometheus_target_sync_length_seconds_sum{scrape_job="loki"} 0.000135374 prometheus_target_sync_length_seconds_count{scrape_job="loki"} 1 prometheus_target_sync_length_seconds{scrape_job="node-exporter",quantile="0.01"} NaN prometheus_target_sync_length_seconds{scrape_job="node-exporter",quantile="0.05"} NaN prometheus_target_sync_length_seconds{scrape_job="node-exporter",quantile="0.5"} NaN prometheus_target_sync_length_seconds{scrape_job="node-exporter",quantile="0.9"} NaN prometheus_target_sync_length_seconds{scrape_job="node-exporter",quantile="0.99"} NaN prometheus_target_sync_length_seconds_sum{scrape_job="node-exporter"} 0.000112032 prometheus_target_sync_length_seconds_count{scrape_job="node-exporter"} 1 prometheus_target_sync_length_seconds{scrape_job="postgres",quantile="0.01"} NaN prometheus_target_sync_length_seconds{scrape_job="postgres",quantile="0.05"} NaN prometheus_target_sync_length_seconds{scrape_job="postgres",quantile="0.5"} NaN prometheus_target_sync_length_seconds{scrape_job="postgres",quantile="0.9"} NaN prometheus_target_sync_length_seconds{scrape_job="postgres",quantile="0.99"} NaN prometheus_target_sync_length_seconds_sum{scrape_job="postgres"} 0.000136602 prometheus_target_sync_length_seconds_count{scrape_job="postgres"} 1 prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} NaN prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} NaN prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} NaN prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} NaN prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} NaN prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.000200236 prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 prometheus_target_sync_length_seconds{scrape_job="redis",quantile="0.01"} NaN prometheus_target_sync_length_seconds{scrape_job="redis",quantile="0.05"} NaN prometheus_target_sync_length_seconds{scrape_job="redis",quantile="0.5"} NaN prometheus_target_sync_length_seconds{scrape_job="redis",quantile="0.9"} NaN prometheus_target_sync_length_seconds{scrape_job="redis",quantile="0.99"} NaN prometheus_target_sync_length_seconds_sum{scrape_job="redis"} 0.000179249 prometheus_target_sync_length_seconds_count{scrape_job="redis"} 1 prometheus_target_sync_length_seconds{scrape_job="tempo",quantile="0.01"} NaN prometheus_target_sync_length_seconds{scrape_job="tempo",quantile="0.05"} NaN prometheus_target_sync_length_seconds{scrape_job="tempo",quantile="0.5"} NaN prometheus_target_sync_length_seconds{scrape_job="tempo",quantile="0.9"} NaN prometheus_target_sync_length_seconds{scrape_job="tempo",quantile="0.99"} NaN prometheus_target_sync_length_seconds_sum{scrape_job="tempo"} 9.5871e-05 prometheus_target_sync_length_seconds_count{scrape_job="tempo"} 1 prometheus_target_sync_length_seconds{scrape_job="traefik",quantile="0.01"} NaN prometheus_target_sync_length_seconds{scrape_job="traefik",quantile="0.05"} NaN prometheus_target_sync_length_seconds{scrape_job="traefik",quantile="0.5"} NaN prometheus_target_sync_length_seconds{scrape_job="traefik",quantile="0.9"} NaN prometheus_target_sync_length_seconds{scrape_job="traefik",quantile="0.99"} NaN prometheus_target_sync_length_seconds_sum{scrape_job="traefik"} 0.000159196 prometheus_target_sync_length_seconds_count{scrape_job="traefik"} 1 # HELP prometheus_template_text_expansion_failures_total The total number of template text expansion failures. # TYPE prometheus_template_text_expansion_failures_total counter prometheus_template_text_expansion_failures_total 0 # HELP prometheus_template_text_expansions_total The total number of template text expansions. # TYPE prometheus_template_text_expansions_total counter prometheus_template_text_expansions_total 226058 # HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines. # TYPE prometheus_treecache_watcher_goroutines gauge prometheus_treecache_watcher_goroutines 0 # HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. # TYPE prometheus_treecache_zookeeper_failures_total counter prometheus_treecache_zookeeper_failures_total 0 # HELP prometheus_tsdb_blocks_loaded Number of currently loaded data blocks # TYPE prometheus_tsdb_blocks_loaded gauge prometheus_tsdb_blocks_loaded 9 # HELP prometheus_tsdb_checkpoint_creations_failed_total Total number of checkpoint creations that failed. # TYPE prometheus_tsdb_checkpoint_creations_failed_total counter prometheus_tsdb_checkpoint_creations_failed_total 0 # HELP prometheus_tsdb_checkpoint_creations_total Total number of checkpoint creations attempted. # TYPE prometheus_tsdb_checkpoint_creations_total counter prometheus_tsdb_checkpoint_creations_total 11 # HELP prometheus_tsdb_checkpoint_deletions_failed_total Total number of checkpoint deletions that failed. # TYPE prometheus_tsdb_checkpoint_deletions_failed_total counter prometheus_tsdb_checkpoint_deletions_failed_total 0 # HELP prometheus_tsdb_checkpoint_deletions_total Total number of checkpoint deletions attempted. # TYPE prometheus_tsdb_checkpoint_deletions_total counter prometheus_tsdb_checkpoint_deletions_total 11 # HELP prometheus_tsdb_clean_start -1: lockfile is disabled. 0: a lockfile from a previous execution was replaced. 1: lockfile creation was clean # TYPE prometheus_tsdb_clean_start gauge prometheus_tsdb_clean_start 0 # HELP prometheus_tsdb_compaction_chunk_range_seconds Final time range of chunks on their first compaction # TYPE prometheus_tsdb_compaction_chunk_range_seconds histogram prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="100"} 129 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="400"} 129 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1600"} 129 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6400"} 129 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="25600"} 400 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="102400"} 1052 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="409600"} 2602 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1.6384e+06"} 18058 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6.5536e+06"} 2.571142e+06 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="2.62144e+07"} 2.633581e+06 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="+Inf"} 2.633581e+06 prometheus_tsdb_compaction_chunk_range_seconds_sum 5.10334263105e+12 prometheus_tsdb_compaction_chunk_range_seconds_count 2.633581e+06 # HELP prometheus_tsdb_compaction_chunk_samples Final number of samples on their first compaction # TYPE prometheus_tsdb_compaction_chunk_samples histogram prometheus_tsdb_compaction_chunk_samples_bucket{le="4"} 757 prometheus_tsdb_compaction_chunk_samples_bucket{le="6"} 1076 prometheus_tsdb_compaction_chunk_samples_bucket{le="9"} 1621 prometheus_tsdb_compaction_chunk_samples_bucket{le="13.5"} 2135 prometheus_tsdb_compaction_chunk_samples_bucket{le="20.25"} 2536 prometheus_tsdb_compaction_chunk_samples_bucket{le="30.375"} 3133 prometheus_tsdb_compaction_chunk_samples_bucket{le="45.5625"} 3684 prometheus_tsdb_compaction_chunk_samples_bucket{le="68.34375"} 13239 prometheus_tsdb_compaction_chunk_samples_bucket{le="102.515625"} 17499 prometheus_tsdb_compaction_chunk_samples_bucket{le="153.7734375"} 2.510565e+06 prometheus_tsdb_compaction_chunk_samples_bucket{le="230.66015625"} 2.633233e+06 prometheus_tsdb_compaction_chunk_samples_bucket{le="345.990234375"} 2.633581e+06 prometheus_tsdb_compaction_chunk_samples_bucket{le="+Inf"} 2.633581e+06 prometheus_tsdb_compaction_chunk_samples_sum 3.23337268e+08 prometheus_tsdb_compaction_chunk_samples_count 2.633581e+06 # HELP prometheus_tsdb_compaction_chunk_size_bytes Final size of chunks on their first compaction # TYPE prometheus_tsdb_compaction_chunk_size_bytes histogram prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="32"} 1800 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="48"} 1.438678e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="72"} 2.170346e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="108"} 2.288637e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="162"} 2.338081e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="243"} 2.399041e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="364.5"} 2.505014e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="546.75"} 2.60039e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="820.125"} 2.617437e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1230.1875"} 2.633581e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1845.28125"} 2.633581e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="2767.921875"} 2.633581e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="+Inf"} 2.633581e+06 prometheus_tsdb_compaction_chunk_size_bytes_sum 2.40538144e+08 prometheus_tsdb_compaction_chunk_size_bytes_count 2.633581e+06 # HELP prometheus_tsdb_compaction_duration_seconds Duration of compaction runs # TYPE prometheus_tsdb_compaction_duration_seconds histogram prometheus_tsdb_compaction_duration_seconds_bucket{le="1"} 0 prometheus_tsdb_compaction_duration_seconds_bucket{le="2"} 9 prometheus_tsdb_compaction_duration_seconds_bucket{le="4"} 11 prometheus_tsdb_compaction_duration_seconds_bucket{le="8"} 15 prometheus_tsdb_compaction_duration_seconds_bucket{le="16"} 16 prometheus_tsdb_compaction_duration_seconds_bucket{le="32"} 16 prometheus_tsdb_compaction_duration_seconds_bucket{le="64"} 16 prometheus_tsdb_compaction_duration_seconds_bucket{le="128"} 16 prometheus_tsdb_compaction_duration_seconds_bucket{le="256"} 16 prometheus_tsdb_compaction_duration_seconds_bucket{le="512"} 16 prometheus_tsdb_compaction_duration_seconds_bucket{le="1024"} 16 prometheus_tsdb_compaction_duration_seconds_bucket{le="2048"} 16 prometheus_tsdb_compaction_duration_seconds_bucket{le="4096"} 16 prometheus_tsdb_compaction_duration_seconds_bucket{le="8192"} 16 prometheus_tsdb_compaction_duration_seconds_bucket{le="+Inf"} 16 prometheus_tsdb_compaction_duration_seconds_sum 53.574930482999996 prometheus_tsdb_compaction_duration_seconds_count 16 # HELP prometheus_tsdb_compaction_populating_block Set to 1 when a block is currently being written to the disk. # TYPE prometheus_tsdb_compaction_populating_block gauge prometheus_tsdb_compaction_populating_block 0 # HELP prometheus_tsdb_compactions_failed_total Total number of compactions that failed for the partition. # TYPE prometheus_tsdb_compactions_failed_total counter prometheus_tsdb_compactions_failed_total 0 # HELP prometheus_tsdb_compactions_skipped_total Total number of skipped compactions due to disabled auto compaction. # TYPE prometheus_tsdb_compactions_skipped_total counter prometheus_tsdb_compactions_skipped_total 0 # HELP prometheus_tsdb_compactions_total Total number of compactions that were executed for the partition. # TYPE prometheus_tsdb_compactions_total counter prometheus_tsdb_compactions_total 16 # HELP prometheus_tsdb_compactions_triggered_total Total number of triggered compactions for the partition. # TYPE prometheus_tsdb_compactions_triggered_total counter prometheus_tsdb_compactions_triggered_total 1236 # HELP prometheus_tsdb_data_replay_duration_seconds Time taken to replay the data on disk. # TYPE prometheus_tsdb_data_replay_duration_seconds gauge prometheus_tsdb_data_replay_duration_seconds 13.607558657 # HELP prometheus_tsdb_exemplar_exemplars_appended_total Total number of appended exemplars. # TYPE prometheus_tsdb_exemplar_exemplars_appended_total counter prometheus_tsdb_exemplar_exemplars_appended_total 0 # HELP prometheus_tsdb_exemplar_exemplars_in_storage Number of exemplars currently in circular storage. # TYPE prometheus_tsdb_exemplar_exemplars_in_storage gauge prometheus_tsdb_exemplar_exemplars_in_storage 0 # HELP prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds The timestamp of the oldest exemplar stored in circular storage. Useful to check for what timerange the current exemplar buffer limit allows. This usually means the last timestampfor all exemplars for a typical setup. This is not true though if one of the series timestamp is in future compared to rest series. # TYPE prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds gauge prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds 0 # HELP prometheus_tsdb_exemplar_max_exemplars Total number of exemplars the exemplar storage can store, resizeable. # TYPE prometheus_tsdb_exemplar_max_exemplars gauge prometheus_tsdb_exemplar_max_exemplars 0 # HELP prometheus_tsdb_exemplar_out_of_order_exemplars_total Total number of out of order exemplar ingestion failed attempts. # TYPE prometheus_tsdb_exemplar_out_of_order_exemplars_total counter prometheus_tsdb_exemplar_out_of_order_exemplars_total 0 # HELP prometheus_tsdb_exemplar_series_with_exemplars_in_storage Number of series with exemplars currently in circular storage. # TYPE prometheus_tsdb_exemplar_series_with_exemplars_in_storage gauge prometheus_tsdb_exemplar_series_with_exemplars_in_storage 0 # HELP prometheus_tsdb_head_active_appenders Number of currently active appender transactions # TYPE prometheus_tsdb_head_active_appenders gauge prometheus_tsdb_head_active_appenders 0 # HELP prometheus_tsdb_head_chunks Total number of chunks in the head block. # TYPE prometheus_tsdb_head_chunks gauge prometheus_tsdb_head_chunks 248354 # HELP prometheus_tsdb_head_chunks_created_total Total number of chunks created in the head # TYPE prometheus_tsdb_head_chunks_created_total counter prometheus_tsdb_head_chunks_created_total 2.881935e+06 # HELP prometheus_tsdb_head_chunks_removed_total Total number of chunks removed in the head # TYPE prometheus_tsdb_head_chunks_removed_total counter prometheus_tsdb_head_chunks_removed_total 2.633581e+06 # HELP prometheus_tsdb_head_chunks_storage_size_bytes Size of the chunks_head directory. # TYPE prometheus_tsdb_head_chunks_storage_size_bytes gauge prometheus_tsdb_head_chunks_storage_size_bytes 3.4095508e+07 # HELP prometheus_tsdb_head_gc_duration_seconds Runtime of garbage collection in the head block. # TYPE prometheus_tsdb_head_gc_duration_seconds summary prometheus_tsdb_head_gc_duration_seconds_sum 1.024033215 prometheus_tsdb_head_gc_duration_seconds_count 11 # HELP prometheus_tsdb_head_max_time Maximum timestamp of the head block. The unit is decided by the library consumer. # TYPE prometheus_tsdb_head_max_time gauge prometheus_tsdb_head_max_time 1.764991860513e+12 # HELP prometheus_tsdb_head_max_time_seconds Maximum timestamp of the head block. # TYPE prometheus_tsdb_head_max_time_seconds gauge prometheus_tsdb_head_max_time_seconds 1.76499186e+09 # HELP prometheus_tsdb_head_min_time Minimum time bound of the head block. The unit is decided by the library consumer. # TYPE prometheus_tsdb_head_min_time gauge prometheus_tsdb_head_min_time 1.764986400083e+12 # HELP prometheus_tsdb_head_min_time_seconds Minimum time bound of the head block. # TYPE prometheus_tsdb_head_min_time_seconds gauge prometheus_tsdb_head_min_time_seconds 1.7649864e+09 # HELP prometheus_tsdb_head_out_of_order_samples_appended_total Total number of appended out of order samples. # TYPE prometheus_tsdb_head_out_of_order_samples_appended_total counter prometheus_tsdb_head_out_of_order_samples_appended_total{type="float"} 0 prometheus_tsdb_head_out_of_order_samples_appended_total{type="histogram"} 0 # HELP prometheus_tsdb_head_samples_appended_total Total number of appended samples. # TYPE prometheus_tsdb_head_samples_appended_total counter prometheus_tsdb_head_samples_appended_total{type="float"} 3.02514864e+08 prometheus_tsdb_head_samples_appended_total{type="histogram"} 0 # HELP prometheus_tsdb_head_series Total number of series in the head block. # TYPE prometheus_tsdb_head_series gauge prometheus_tsdb_head_series 66543 # HELP prometheus_tsdb_head_series_created_total Total number of series created in the head # TYPE prometheus_tsdb_head_series_created_total counter prometheus_tsdb_head_series_created_total 78021 # HELP prometheus_tsdb_head_series_not_found_total Total number of requests for series that were not found. # TYPE prometheus_tsdb_head_series_not_found_total counter prometheus_tsdb_head_series_not_found_total 0 # HELP prometheus_tsdb_head_series_removed_total Total number of series removed in the head # TYPE prometheus_tsdb_head_series_removed_total counter prometheus_tsdb_head_series_removed_total 11478 # HELP prometheus_tsdb_head_stale_series Total number of stale series in the head block. # TYPE prometheus_tsdb_head_stale_series gauge prometheus_tsdb_head_stale_series 35 # HELP prometheus_tsdb_head_truncations_failed_total Total number of head truncations that failed. # TYPE prometheus_tsdb_head_truncations_failed_total counter prometheus_tsdb_head_truncations_failed_total 0 # HELP prometheus_tsdb_head_truncations_total Total number of head truncations attempted. # TYPE prometheus_tsdb_head_truncations_total counter prometheus_tsdb_head_truncations_total 11 # HELP prometheus_tsdb_isolation_high_watermark The highest TSDB append ID that has been given out. # TYPE prometheus_tsdb_isolation_high_watermark gauge prometheus_tsdb_isolation_high_watermark 412734 # HELP prometheus_tsdb_isolation_low_watermark The lowest TSDB append ID that is still referenced. # TYPE prometheus_tsdb_isolation_low_watermark gauge prometheus_tsdb_isolation_low_watermark 412734 # HELP prometheus_tsdb_lowest_timestamp Lowest timestamp value stored in the database. The unit is decided by the library consumer. # TYPE prometheus_tsdb_lowest_timestamp gauge prometheus_tsdb_lowest_timestamp 1.764126470287e+12 # HELP prometheus_tsdb_lowest_timestamp_seconds Lowest timestamp value stored in the database. # TYPE prometheus_tsdb_lowest_timestamp_seconds gauge prometheus_tsdb_lowest_timestamp_seconds 1.76412647e+09 # HELP prometheus_tsdb_mmap_chunk_corruptions_total Total number of memory-mapped chunk corruptions. # TYPE prometheus_tsdb_mmap_chunk_corruptions_total counter prometheus_tsdb_mmap_chunk_corruptions_total 0 # HELP prometheus_tsdb_mmap_chunks_total Total number of chunks that were memory-mapped. # TYPE prometheus_tsdb_mmap_chunks_total counter prometheus_tsdb_mmap_chunks_total 2.441323e+06 # HELP prometheus_tsdb_out_of_bound_samples_total Total number of out of bound samples ingestion failed attempts with out of order support disabled. # TYPE prometheus_tsdb_out_of_bound_samples_total counter prometheus_tsdb_out_of_bound_samples_total{type="float"} 0 # HELP prometheus_tsdb_out_of_order_samples_total Total number of out of order samples ingestion failed attempts due to out of order being disabled. # TYPE prometheus_tsdb_out_of_order_samples_total counter prometheus_tsdb_out_of_order_samples_total{type="float"} 0 prometheus_tsdb_out_of_order_samples_total{type="histogram"} 0 # HELP prometheus_tsdb_reloads_failures_total Number of times the database failed to reloadBlocks block data from disk. # TYPE prometheus_tsdb_reloads_failures_total counter prometheus_tsdb_reloads_failures_total 0 # HELP prometheus_tsdb_reloads_total Number of times the database reloaded block data from disk. # TYPE prometheus_tsdb_reloads_total counter prometheus_tsdb_reloads_total 1231 # HELP prometheus_tsdb_retention_limit_bytes Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled # TYPE prometheus_tsdb_retention_limit_bytes gauge prometheus_tsdb_retention_limit_bytes 0 # HELP prometheus_tsdb_retention_limit_seconds How long to retain samples in storage. # TYPE prometheus_tsdb_retention_limit_seconds gauge prometheus_tsdb_retention_limit_seconds 2.592e+06 # HELP prometheus_tsdb_size_retentions_total The number of times that blocks were deleted because the maximum number of bytes was exceeded. # TYPE prometheus_tsdb_size_retentions_total counter prometheus_tsdb_size_retentions_total 0 # HELP prometheus_tsdb_snapshot_replay_error_total Total number snapshot replays that failed. # TYPE prometheus_tsdb_snapshot_replay_error_total counter prometheus_tsdb_snapshot_replay_error_total 0 # HELP prometheus_tsdb_storage_blocks_bytes The number of bytes that are currently used for local storage by all blocks. # TYPE prometheus_tsdb_storage_blocks_bytes gauge prometheus_tsdb_storage_blocks_bytes 2.862793438e+09 # HELP prometheus_tsdb_symbol_table_size_bytes Size of symbol table in memory for loaded blocks # TYPE prometheus_tsdb_symbol_table_size_bytes gauge prometheus_tsdb_symbol_table_size_bytes 14656 # HELP prometheus_tsdb_time_retentions_total The number of times that blocks were deleted because the maximum time limit was exceeded. # TYPE prometheus_tsdb_time_retentions_total counter prometheus_tsdb_time_retentions_total 0 # HELP prometheus_tsdb_tombstone_cleanup_seconds The time taken to recompact blocks to remove tombstones. # TYPE prometheus_tsdb_tombstone_cleanup_seconds histogram prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="+Inf"} 0 prometheus_tsdb_tombstone_cleanup_seconds_sum 0 prometheus_tsdb_tombstone_cleanup_seconds_count 0 # HELP prometheus_tsdb_too_old_samples_total Total number of out of order samples ingestion failed attempts with out of support enabled, but sample outside of time window. # TYPE prometheus_tsdb_too_old_samples_total counter prometheus_tsdb_too_old_samples_total{type="float"} 0 # HELP prometheus_tsdb_vertical_compactions_total Total number of compactions done on overlapping blocks. # TYPE prometheus_tsdb_vertical_compactions_total counter prometheus_tsdb_vertical_compactions_total 0 # HELP prometheus_tsdb_wal_completed_pages_total Total number of completed pages. # TYPE prometheus_tsdb_wal_completed_pages_total counter prometheus_tsdb_wal_completed_pages_total 49034 # HELP prometheus_tsdb_wal_corruptions_total Total number of WAL corruptions. # TYPE prometheus_tsdb_wal_corruptions_total counter prometheus_tsdb_wal_corruptions_total 0 # HELP prometheus_tsdb_wal_fsync_duration_seconds Duration of write log fsync. # TYPE prometheus_tsdb_wal_fsync_duration_seconds summary prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.5"} NaN prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.9"} NaN prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.99"} NaN prometheus_tsdb_wal_fsync_duration_seconds_sum 0.680921172 prometheus_tsdb_wal_fsync_duration_seconds_count 21 # HELP prometheus_tsdb_wal_page_flushes_total Total number of page flushes. # TYPE prometheus_tsdb_wal_page_flushes_total counter prometheus_tsdb_wal_page_flushes_total 186900 # HELP prometheus_tsdb_wal_record_bytes_saved_total Total number of bytes saved by the optional record compression. Use this metric to learn about the effectiveness compression. # TYPE prometheus_tsdb_wal_record_bytes_saved_total counter prometheus_tsdb_wal_record_bytes_saved_total{compression="snappy"} 2.145355601e+09 # HELP prometheus_tsdb_wal_record_part_writes_total Total number of record parts written before flushing. # TYPE prometheus_tsdb_wal_record_part_writes_total counter prometheus_tsdb_wal_record_part_writes_total 186879 # HELP prometheus_tsdb_wal_record_parts_bytes_written_total Total number of record part bytes written before flushing, including CRC and compression headers. # TYPE prometheus_tsdb_wal_record_parts_bytes_written_total counter prometheus_tsdb_wal_record_parts_bytes_written_total 1.606484634e+09 # HELP prometheus_tsdb_wal_segment_current Write log segment index that TSDB is currently writing to. # TYPE prometheus_tsdb_wal_segment_current gauge prometheus_tsdb_wal_segment_current 235 # HELP prometheus_tsdb_wal_storage_size_bytes Size of the write log directory. # TYPE prometheus_tsdb_wal_storage_size_bytes gauge prometheus_tsdb_wal_storage_size_bytes 2.06911604e+08 # HELP prometheus_tsdb_wal_truncate_duration_seconds Duration of WAL truncation. # TYPE prometheus_tsdb_wal_truncate_duration_seconds summary prometheus_tsdb_wal_truncate_duration_seconds_sum 25.838256746000006 prometheus_tsdb_wal_truncate_duration_seconds_count 11 # HELP prometheus_tsdb_wal_truncations_failed_total Total number of write log truncations that failed. # TYPE prometheus_tsdb_wal_truncations_failed_total counter prometheus_tsdb_wal_truncations_failed_total 0 # HELP prometheus_tsdb_wal_truncations_total Total number of write log truncations attempted. # TYPE prometheus_tsdb_wal_truncations_total counter prometheus_tsdb_wal_truncations_total 11 # HELP prometheus_tsdb_wal_writes_failed_total Total number of write log writes that failed. # TYPE prometheus_tsdb_wal_writes_failed_total counter prometheus_tsdb_wal_writes_failed_total 0 # HELP prometheus_web_federation_errors_total Total number of errors that occurred while sending federation responses. # TYPE prometheus_web_federation_errors_total counter prometheus_web_federation_errors_total 0 # HELP prometheus_web_federation_warnings_total Total number of warnings that occurred while sending federation responses. # TYPE prometheus_web_federation_warnings_total counter prometheus_web_federation_warnings_total 0 # HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. # TYPE promhttp_metric_handler_requests_in_flight gauge promhttp_metric_handler_requests_in_flight 1 # HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. # TYPE promhttp_metric_handler_requests_total counter promhttp_metric_handler_requests_total{code="200"} 4896 promhttp_metric_handler_requests_total{code="500"} 0 promhttp_metric_handler_requests_total{code="503"} 0