From 37733b2dc88a2a7ad79ed87d689c93afaccb33b0 Mon Sep 17 00:00:00 2001 From: nyyu Date: Tue, 28 Jan 2025 22:17:57 +0100 Subject: [PATCH] chore: upd alerts --- conf/prometheus/alerts/embedded-exporter.yml | 4 +- conf/prometheus/alerts/kubestate-exporter.yml | 2 +- conf/prometheus/alerts/node-exporter.yml | 159 ++++++++---------- conf/prometheus/alerts/postgres-exporter.yml | 14 +- conf/prometheus/alerts/smartctl-exporter.yml | 77 +++++++++ 5 files changed, 156 insertions(+), 100 deletions(-) create mode 100644 conf/prometheus/alerts/smartctl-exporter.yml diff --git a/conf/prometheus/alerts/embedded-exporter.yml b/conf/prometheus/alerts/embedded-exporter.yml index 65bfd82..8a2e402 100644 --- a/conf/prometheus/alerts/embedded-exporter.yml +++ b/conf/prometheus/alerts/embedded-exporter.yml @@ -32,7 +32,7 @@ groups: description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PrometheusTargetMissingWithWarmupTime - expr: 'sum by (instance, job) ((up == 0) * on (instance) group_right(job) (node_time_seconds - node_boot_time_seconds > 600))' + expr: 'sum by (instance, job) ((up == 0) * on (instance) group_left (__name__) (node_time_seconds - node_boot_time_seconds > 600))' for: 0m labels: severity: critical @@ -248,7 +248,7 @@ groups: description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PrometheusTimeseriesCardinality - expr: 'label_replace(count by(__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") > 10000' + expr: '(label_replace(count by (__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") unless on (__name__) ({__name__=~"node_cpu.*|node_systemd_unit_state"})) > 10000' for: 0m labels: severity: warning diff --git a/conf/prometheus/alerts/kubestate-exporter.yml b/conf/prometheus/alerts/kubestate-exporter.yml index 7e32694..99e6b0b 100644 --- a/conf/prometheus/alerts/kubestate-exporter.yml +++ b/conf/prometheus/alerts/kubestate-exporter.yml @@ -257,7 +257,7 @@ groups: description: "Some Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are running where they are not supposed to run\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: KubernetesCronjobTooLong - expr: 'time() - kube_cronjob_next_schedule_time > 3600' + expr: 'time() - kube_cronjob_next_schedule_time > 604800' for: 0m labels: severity: warning diff --git a/conf/prometheus/alerts/node-exporter.yml b/conf/prometheus/alerts/node-exporter.yml index 6a465d9..d52b34d 100644 --- a/conf/prometheus/alerts/node-exporter.yml +++ b/conf/prometheus/alerts/node-exporter.yml @@ -5,7 +5,7 @@ groups: rules: - alert: HostOutOfMemory - expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10)' for: 2m labels: severity: warning @@ -14,97 +14,88 @@ groups: description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostMemoryUnderMemoryPressure - expr: '(rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 2m + expr: '(rate(node_vmstat_pgmajfault[5m]) > 1000)' + for: 0m labels: severity: warning annotations: summary: Host memory under memory pressure (instance {{ $labels.instance }}) - description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "The node is under heavy memory pressure. High rate of loading memory pages from disk.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostMemoryIsUnderutilized - expr: '(100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 1w + expr: 'min_over_time(node_memory_MemFree_bytes[1w]) > node_memory_MemTotal_bytes * .8' + for: 0m labels: severity: info annotations: summary: Host Memory is underutilized (instance {{ $labels.instance }}) - description: "Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualNetworkThroughputIn - expr: '(sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 5m + expr: '((rate(node_network_receive_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)' + for: 0m labels: severity: warning annotations: summary: Host unusual network throughput in (instance {{ $labels.instance }}) - description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Host receive bandwidth is high (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualNetworkThroughputOut - expr: '(sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 5m + expr: '((rate(node_network_transmit_bytes_total[5m]) / on(instance, device) node_network_speed_bytes) > .80)' + for: 0m labels: severity: warning annotations: summary: Host unusual network throughput out (instance {{ $labels.instance }}) - description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Host transmit bandwidth is high (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskReadRate - expr: '(sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 5m + expr: '(rate(node_disk_io_time_seconds_total[5m]) > .80)' + for: 0m labels: severity: warning annotations: summary: Host unusual disk read rate (instance {{ $labels.instance }}) - description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: HostUnusualDiskWriteRate - expr: '(sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 2m - labels: - severity: warning - annotations: - summary: Host unusual disk write rate (instance {{ $labels.instance }}) - description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Disk is too busy (IO wait > 80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOutOfDiskSpace - expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0)' for: 2m labels: - severity: warning + severity: critical annotations: summary: Host out of disk space (instance {{ $labels.instance }}) description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostDiskWillFillIn24Hours - expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - alert: HostDiskMayFillIn24Hours + expr: 'predict_linear(node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_avail_bytes > 0' for: 2m labels: severity: warning annotations: - summary: Host disk will fill in 24 hours (instance {{ $labels.instance }}) - description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Host disk may fill in 24 hours (instance {{ $labels.instance }}) + description: "Filesystem will likely run out of space within the next 24 hours\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostOutOfInodes - expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0)' for: 2m labels: - severity: warning + severity: critical annotations: summary: Host out of inodes (instance {{ $labels.instance }}) description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostFilesystemDeviceError - expr: 'node_filesystem_device_error == 1' + expr: 'node_filesystem_device_error{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} == 1' for: 2m labels: severity: critical annotations: summary: Host filesystem device error (instance {{ $labels.instance }}) - description: "{{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Error stat-ing the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostInodesWillFillIn24Hours - expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"} == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'predict_linear(node_filesystem_files_free{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"}[1h], 86400) <= 0 and node_filesystem_files_free > 0' for: 2m labels: severity: warning @@ -113,7 +104,7 @@ groups: description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskReadLatency - expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0)' for: 2m labels: severity: warning @@ -122,7 +113,7 @@ groups: description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskWriteLatency - expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0)' for: 2m labels: severity: warning @@ -131,7 +122,7 @@ groups: description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostHighCpuLoad - expr: '(sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .80' for: 10m labels: severity: warning @@ -140,16 +131,16 @@ groups: description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuIsUnderutilized - expr: '(100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(min by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > 0.8' for: 1w labels: severity: info annotations: summary: Host CPU is underutilized (instance {{ $labels.instance }}) - description: "CPU load is < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuStealNoisyNeighbor - expr: '(avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10' for: 0m labels: severity: warning @@ -158,28 +149,25 @@ groups: description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostCpuHighIowait - expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10' for: 0m labels: severity: warning annotations: summary: Host CPU high iowait (instance {{ $labels.instance }}) - description: "CPU iowait > 10%. A high iowait means that you are disk or network bound.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "CPU iowait > 10%. Your CPU is idling waiting for storage to respond.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostUnusualDiskIo - expr: '(rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'rate(node_disk_io_time_seconds_total[5m]) > 0.8' for: 5m labels: severity: warning annotations: summary: Host unusual disk IO (instance {{ $labels.instance }}) - description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Disk usage >80%. Check storage for issues or increase IOPS capabilities.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostContextSwitchingHigh - expr: '(rate(node_context_switches_total[15m])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) -/ -(rate(node_context_switches_total[1d])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) > 2 -' + expr: '(rate(node_context_switches_total[15m])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"}))/(rate(node_context_switches_total[1d])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) > 2' for: 0m labels: severity: warning @@ -188,7 +176,7 @@ groups: description: "Context switching is growing on the node (twice the daily average during the last 15m)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostSwapIsFillingUp - expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80)' for: 2m labels: severity: warning @@ -197,7 +185,7 @@ groups: description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostSystemdServiceCrashed - expr: '(node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_systemd_unit_state{state="failed"} == 1)' for: 0m labels: severity: warning @@ -206,7 +194,7 @@ groups: description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostPhysicalComponentTooHot - expr: '((node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'node_hwmon_temp_celsius > node_hwmon_temp_max_celsius' for: 5m labels: severity: warning @@ -223,44 +211,44 @@ groups: summary: Host node overtemperature alarm (instance {{ $labels.instance }}) description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostRaidArrayGotInactive - expr: '(node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - alert: HostSoftwareRaidInsufficientDrives + expr: '((node_md_disks_required - on(device, instance) node_md_disks{state="active"}) > 0)' for: 0m labels: severity: critical annotations: - summary: Host RAID array got inactive (instance {{ $labels.instance }}) - description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Host Software RAID insufficient drives (instance {{ $labels.instance }}) + description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} has insufficient drives remaining.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostRaidDiskFailure - expr: '(node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + - alert: HostSoftwareRaidDiskFailure + expr: '(node_md_disks{state="failed"} > 0)' for: 2m labels: severity: warning annotations: - summary: Host RAID disk failure (instance {{ $labels.instance }}) - description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Host Software RAID disk failure (instance {{ $labels.instance }}) + description: "MD RAID array {{ $labels.device }} on {{ $labels.instance }} needs attention.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostKernelVersionDeviations - expr: '(count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 6h - labels: - severity: warning - annotations: - summary: Host kernel version deviations (instance {{ $labels.instance }}) - description: "Different kernel versions are running\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - - alert: HostOomKillDetected - expr: '(increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: 'changes(node_uname_info[1h]) > 0' for: 0m labels: - severity: warning + severity: info + annotations: + summary: Host kernel version deviations (instance {{ $labels.instance }}) + description: "Kernel version for {{ $labels.instance }} has changed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostOomKillDetected + expr: '(increase(node_vmstat_oom_kill[1m]) > 0)' + for: 0m + labels: + severity: critical annotations: summary: Host OOM kill detected (instance {{ $labels.instance }}) description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostEdacCorrectableErrorsDetected - expr: '(increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(increase(node_edac_correctable_errors_total[1m]) > 0)' for: 0m labels: severity: info @@ -269,7 +257,7 @@ groups: description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostEdacUncorrectableErrorsDetected - expr: '(node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_edac_uncorrectable_errors_total > 0)' for: 0m labels: severity: warning @@ -278,7 +266,7 @@ groups: description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostNetworkReceiveErrors - expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01)' for: 2m labels: severity: warning @@ -287,7 +275,7 @@ groups: description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostNetworkTransmitErrors - expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01)' for: 2m labels: severity: warning @@ -295,17 +283,8 @@ groups: summary: Host Network Transmit Errors (instance {{ $labels.instance }}) description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostNetworkInterfaceSaturated - expr: '((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' - for: 1m - labels: - severity: warning - annotations: - summary: Host Network Interface Saturated (instance {{ $labels.instance }}) - description: "The network interface \"{{ $labels.device }}\" on \"{{ $labels.instance }}\" is getting overloaded.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - alert: HostNetworkBondDegraded - expr: '((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((node_bonding_active - node_bonding_slaves) != 0)' for: 2m labels: severity: warning @@ -314,7 +293,7 @@ groups: description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostConntrackLimit - expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8)' for: 5m labels: severity: warning @@ -323,7 +302,7 @@ groups: description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostClockSkew - expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0))' for: 10m labels: severity: warning @@ -332,7 +311,7 @@ groups: description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostClockNotSynchronising - expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16)' for: 2m labels: severity: warning @@ -341,7 +320,7 @@ groups: description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: HostRequiresReboot - expr: '(node_reboot_required > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + expr: '(node_reboot_required > 0)' for: 4h labels: severity: info diff --git a/conf/prometheus/alerts/postgres-exporter.yml b/conf/prometheus/alerts/postgres-exporter.yml index 96ae5ea..692acef 100644 --- a/conf/prometheus/alerts/postgres-exporter.yml +++ b/conf/prometheus/alerts/postgres-exporter.yml @@ -32,7 +32,7 @@ groups: description: "Postgresql exporter is showing errors. A query may be buggy in query.yaml\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlTableNotAutoVacuumed - expr: '(pg_stat_user_tables_last_autovacuum > 0) and (time() - pg_stat_user_tables_last_autovacuum) > 60 * 60 * 24 * 10' + expr: '((pg_stat_user_tables_n_tup_del + pg_stat_user_tables_n_tup_upd + pg_stat_user_tables_n_tup_hot_upd) > pg_settings_autovacuum_vacuum_threshold) and (time() - pg_stat_user_tables_last_autovacuum) > 864000' for: 0m labels: severity: warning @@ -41,7 +41,7 @@ groups: description: "Table {{ $labels.relname }} has not been auto vacuumed for 10 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlTableNotAutoAnalyzed - expr: '(pg_stat_user_tables_last_autoanalyze > 0) and (time() - pg_stat_user_tables_last_autoanalyze) > 24 * 60 * 60 * 10' + expr: '((pg_stat_user_tables_n_tup_del + pg_stat_user_tables_n_tup_upd + pg_stat_user_tables_n_tup_hot_upd) > pg_settings_autovacuum_analyze_threshold) and (time() - pg_stat_user_tables_last_autoanalyze) > 864000' for: 0m labels: severity: warning @@ -53,7 +53,7 @@ groups: expr: 'sum by (instance, job, server) (pg_stat_activity_count) > min by (instance, job, server) (pg_settings_max_connections * 0.8)' for: 2m labels: - severity: warning + severity: critical annotations: summary: Postgresql too many connections (instance {{ $labels.instance }}) description: "PostgreSQL instance has too many connections (> 80%).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" @@ -62,7 +62,7 @@ groups: expr: 'sum by (datname) (pg_stat_activity_count{datname!~"template.*|postgres"}) < 5' for: 2m labels: - severity: warning + severity: critical annotations: summary: Postgresql not enough connections (instance {{ $labels.instance }}) description: "PostgreSQL instance should have more connections (> 5)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" @@ -86,8 +86,8 @@ groups: description: "Ratio of transactions being aborted compared to committed is > 2 %\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlCommitRateLow - expr: 'rate(pg_stat_database_xact_commit[1m]) < 10' - for: 2m + expr: 'increase(pg_stat_database_xact_commit{datname!~"template.*|postgres",datid!="0"}[5m]) < 5' + for: 5m labels: severity: critical annotations: @@ -155,7 +155,7 @@ groups: severity: critical annotations: summary: Postgresql SSL compression active (instance {{ $labels.instance }}) - description: "Database connections with SSL compression enabled. This may add significant jitter in replication delay. Replicas should turn off SSL compression via `sslcompression=0` in `recovery.conf`.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Database allows connections with SSL compression enabled.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: PostgresqlTooManyLocksAcquired expr: '((sum (pg_locks_count)) / (pg_settings_max_locks_per_transaction * pg_settings_max_connections)) > 0.20' diff --git a/conf/prometheus/alerts/smartctl-exporter.yml b/conf/prometheus/alerts/smartctl-exporter.yml new file mode 100644 index 0000000..866d715 --- /dev/null +++ b/conf/prometheus/alerts/smartctl-exporter.yml @@ -0,0 +1,77 @@ +groups: + +- name: SmartctlExporter + + rules: + + - alert: SmartDeviceTemperatureWarning + expr: '(avg_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) unless on (instance, device) smartctl_device_temperature{temperature_type="drive_trip"}) > 60' + for: 0m + labels: + severity: warning + annotations: + summary: SMART device temperature warning (instance {{ $labels.instance }}) + description: "Device temperature warning on {{ $labels.instance }} drive {{ $labels.device }} over 60°C\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: SmartDeviceTemperatureCritical + expr: '(max_over_time(smartctl_device_temperature{temperature_type="current"} [5m]) unless on (instance, device) smartctl_device_temperature{temperature_type="drive_trip"}) > 70' + for: 0m + labels: + severity: critical + annotations: + summary: SMART device temperature critical (instance {{ $labels.instance }}) + description: "Device temperature critical on {{ $labels.instance }} drive {{ $labels.device }} over 70°C\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: SmartDeviceTemperatureOverTripValue + expr: 'max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) smartctl_device_temperature{temperature_type="drive_trip"}' + for: 0m + labels: + severity: critical + annotations: + summary: SMART device temperature over trip value (instance {{ $labels.instance }}) + description: "Device temperature over trip value on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: SmartDeviceTemperatureNearingTripValue + expr: 'max_over_time(smartctl_device_temperature{temperature_type="current"} [10m]) >= on(device, instance) (smartctl_device_temperature{temperature_type="drive_trip"} * .80)' + for: 0m + labels: + severity: warning + annotations: + summary: SMART device temperature nearing trip value (instance {{ $labels.instance }}) + description: "Device temperature at 80% of trip value on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: SmartStatus + expr: 'smartctl_device_smart_status != 1' + for: 0m + labels: + severity: critical + annotations: + summary: SMART status (instance {{ $labels.instance }}) + description: "Device has a SMART status failure on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: SmartCriticalWarning + expr: 'smartctl_device_critical_warning > 0' + for: 0m + labels: + severity: critical + annotations: + summary: SMART critical warning (instance {{ $labels.instance }}) + description: "Disk controller has critical warning on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: SmartMediaErrors + expr: 'smartctl_device_media_errors > 0' + for: 0m + labels: + severity: critical + annotations: + summary: SMART media errors (instance {{ $labels.instance }}) + description: "Disk controller detected media errors on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: SmartWearoutIndicator + expr: 'smartctl_device_available_spare < smartctl_device_available_spare_threshold' + for: 0m + labels: + severity: critical + annotations: + summary: SMART Wearout Indicator (instance {{ $labels.instance }}) + description: "Device is wearing out on {{ $labels.instance }} drive {{ $labels.device }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"