AwesomeGenel

Awesome Prometheus alert rules

0
  1. alert: PrometheusJobMissing
    expr: absent(up{job="my-job"})
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Prometheus job missing (instance {{ $labels.instance }})"
    description: "A Prometheus job has disappeared\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  2. alert: PrometheusTargetMissing
    expr: up == 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus target missing (instance {{ $labels.instance }})"
    description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  3. alert: PrometheusAllTargetsMissing
    expr: count by (job) (up) == 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus all targets missing (instance {{ $labels.instance }})"
    description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  4. alert: PrometheusConfigurationReloadFailure
    expr: prometheus_config_last_reload_successful != 1
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Prometheus configuration reload failure (instance {{ $labels.instance }})"
    description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  5. alert: PrometheusTooManyRestarts
    expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Prometheus too many restarts (instance {{ $labels.instance }})"
    description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  6. alert: PrometheusAlertmanagerConfigurationReloadFailure
    expr: alertmanager_config_last_reload_successful != 1
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }})"
    description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  7. alert: PrometheusAlertmanagerConfigNotSynced
    expr: count(count_values("config_hash", alertmanager_config_hash)) > 1
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Prometheus AlertManager config not synced (instance {{ $labels.instance }})"
    description: "Configurations of AlertManager cluster instances are out of sync\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  8. alert: PrometheusAlertmanagerE2eDeadManSwitch
    expr: vector(1)
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus AlertManager E2E dead man switch (instance {{ $labels.instance }})"
    description: "Prometheus DeadManSwitch is an always-firing alert. It's used as an end-to-end test of Prometheus through the Alertmanager.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  9. alert: PrometheusNotConnectedToAlertmanager
    expr: prometheus_notifications_alertmanagers_discovered < 1
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus not connected to alertmanager (instance {{ $labels.instance }})"
    description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  10. alert: PrometheusRuleEvaluationFailures
    expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus rule evaluation failures (instance {{ $labels.instance }})"
    description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  11. alert: PrometheusTemplateTextExpansionFailures
    expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus template text expansion failures (instance {{ $labels.instance }})"
    description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  12. alert: PrometheusRuleEvaluationSlow
    expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Prometheus rule evaluation slow (instance {{ $labels.instance }})"
    description: "Prometheus rule evaluation took more time than the scheduled interval. I indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  13. alert: PrometheusNotificationsBacklog
    expr: min_over_time(prometheus_notifications_queue_length[10m]) > 0
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Prometheus notifications backlog (instance {{ $labels.instance }})"
    description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  14. alert: PrometheusAlertmanagerNotificationFailing
    expr: rate(alertmanager_notifications_failed_total[1m]) > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus AlertManager notification failing (instance {{ $labels.instance }})"
    description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  15. alert: PrometheusTargetEmpty
    expr: prometheus_sd_discovered_targets == 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus target empty (instance {{ $labels.instance }})"
    description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  16. alert: PrometheusTargetScrapingSlow
    expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 60
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Prometheus target scraping slow (instance {{ $labels.instance }})"
    description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  17. alert: PrometheusLargeScrape
    expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Prometheus large scrape (instance {{ $labels.instance }})"
    description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  18. alert: PrometheusTargetScrapeDuplicate
    expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Prometheus target scrape duplicate (instance {{ $labels.instance }})"
    description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  19. alert: PrometheusTsdbCheckpointCreationFailures
    expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[3m]) > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }})"
    description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  20. alert: PrometheusTsdbCheckpointDeletionFailures
    expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[3m]) > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }})"
    description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  21. alert: PrometheusTsdbCompactionsFailed
    expr: increase(prometheus_tsdb_compactions_failed_total[3m]) > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus TSDB compactions failed (instance {{ $labels.instance }})"
    description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  22. alert: PrometheusTsdbHeadTruncationsFailed
    expr: increase(prometheus_tsdb_head_truncations_failed_total[3m]) > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus TSDB head truncations failed (instance {{ $labels.instance }})"
    description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  23. alert: PrometheusTsdbReloadFailures
    expr: increase(prometheus_tsdb_reloads_failures_total[3m]) > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus TSDB reload failures (instance {{ $labels.instance }})"
    description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  24. alert: PrometheusTsdbWalCorruptions
    expr: increase(prometheus_tsdb_wal_corruptions_total[3m]) > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus TSDB WAL corruptions (instance {{ $labels.instance }})"
    description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  25. alert: PrometheusTsdbWalTruncationsFailed
    expr: increase(prometheus_tsdb_wal_truncations_failed_total[3m]) > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }})"
    description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  26. alert: MysqlDown
    expr: mysql_up == 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "MySQL down (instance {{ $labels.instance }})"
    description: "MySQL instance is down on {{ $labels.instance }}\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  27. alert: MysqlTooManyConnections
    expr: avg by (instance) (max_over_time(mysql_global_status_threads_connected[5m])) / avg by (instance) (mysql_global_variables_max_connections) * 100 > 80
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "MySQL too many connections (instance {{ $labels.instance }})"
    description: "More than 80% of MySQL connections are in use on {{ $labels.instance }}\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  28. alert: MysqlHighThreadsRunning
    expr: avg by (instance) (max_over_time(mysql_global_status_threads_running[5m])) / avg by (instance) (mysql_global_variables_max_connections) * 100 > 60
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "MySQL high threads running (instance {{ $labels.instance }})"
    description: "More than 60% of MySQL connections are in running state on {{ $labels.instance }}\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  29. alert: MysqlSlaveIoThreadNotRunning
    expr: mysql_slave_status_master_server_id > 0 and ON (instance) mysql_slave_status_slave_io_running == 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "MySQL Slave IO thread not running (instance {{ $labels.instance }})"
    description: "MySQL Slave IO thread not running on {{ $labels.instance }}\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  30. alert: MysqlSlaveSqlThreadNotRunning
    expr: mysql_slave_status_master_server_id > 0 and ON (instance) mysql_slave_status_slave_sql_running == 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "MySQL Slave SQL thread not running (instance {{ $labels.instance }})"
    description: "MySQL Slave SQL thread not running on {{ $labels.instance }}\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  31. alert: MysqlSlaveReplicationLag
    expr: mysql_slave_status_master_server_id > 0 and ON (instance) (mysql_slave_status_seconds_behind_master - mysql_slave_status_sql_delay) > 300
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "MySQL Slave replication lag (instance {{ $labels.instance }})"
    description: "MysqL replication lag on {{ $labels.instance }}\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  32. alert: MysqlSlowQueries
    expr: mysql_global_status_slow_queries > 0
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "MySQL slow queries (instance {{ $labels.instance }})"
    description: "MySQL server is having some slow queries.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  33. alert: MysqlRestarted
    expr: mysql_global_status_uptime < 60
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "MySQL restarted (instance {{ $labels.instance }})"
    description: "MySQL has just been restarted, less than one minute ago on {{ $labels.instance }}.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  34. alert: KafkaTopicsReplicas
    expr: sum(kafka_topic_partition_in_sync_replica) by (topic) < 3
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kafka topics replicas (instance {{ $labels.instance }})"
    description: "Kafka topic in-sync partition\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  35. alert: KafkaConsumersGroup
    expr: sum(kafka_consumergroup_lag) by (consumergroup) > 50
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kafka consumers group (instance {{ $labels.instance }})"
    description: "Kafka consumers group\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  36. alert: NginxHighHttp4xxErrorRate
    expr: sum(rate(nginx_http_requests_total{status=~"^4.."}[1m])) / sum(rate(nginx_http_requests_total[1m])) * 100 > 5
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Nginx high HTTP 4xx error rate (instance {{ $labels.instance }})"
    description: "Too many HTTP requests with status 4xx (> 5%)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  37. alert: NginxHighHttp5xxErrorRate
    expr: sum(rate(nginx_http_requests_total{status=~"^5.."}[1m])) / sum(rate(nginx_http_requests_total[1m])) * 100 > 5
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Nginx high HTTP 5xx error rate (instance {{ $labels.instance }})"
    description: "Too many HTTP requests with status 5xx (> 5%)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  38. alert: NginxLatencyHigh
    expr: histogram_quantile(0.99, sum(rate(nginx_http_request_duration_seconds_bucket[30m])) by (host, node)) > 10
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Nginx latency high (instance {{ $labels.instance }})"
    description: "Nginx p99 latency is higher than 10 seconds\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  39. alert: KubernetesNodeReady
    expr: kube_node_status_condition{condition="Ready",status="true"} == 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes Node ready (instance {{ $labels.instance }})"
    description: "Node {{ $labels.node }} has been unready for a long time\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  40. alert: KubernetesMemoryPressure
    expr: kube_node_status_condition{condition="MemoryPressure",status="true"} == 1
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes memory pressure (instance {{ $labels.instance }})"
    description: "{{ $labels.node }} has MemoryPressure condition\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  41. alert: KubernetesDiskPressure
    expr: kube_node_status_condition{condition="DiskPressure",status="true"} == 1
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes disk pressure (instance {{ $labels.instance }})"
    description: "{{ $labels.node }} has DiskPressure condition\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  42. alert: KubernetesOutOfDisk
    expr: kube_node_status_condition{condition="OutOfDisk",status="true"} == 1
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes out of disk (instance {{ $labels.instance }})"
    description: "{{ $labels.node }} has OutOfDisk condition\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  43. alert: KubernetesJobFailed
    expr: kube_job_status_failed > 0
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes Job failed (instance {{ $labels.instance }})"
    description: "Job {{$labels.namespace}}/{{$labels.exported_job}} failed to complete\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  44. alert: KubernetesCronjobSuspended
    expr: kube_cronjob_spec_suspend != 0
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes CronJob suspended (instance {{ $labels.instance }})"
    description: "CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is suspended\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  45. alert: KubernetesPersistentvolumeclaimPending
    expr: kube_persistentvolumeclaim_status_phase{phase="Pending"} == 1
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes PersistentVolumeClaim pending (instance {{ $labels.instance }})"
    description: "PersistentVolumeClaim {{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is pending\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  46. alert: KubernetesVolumeOutOfDiskSpace
    expr: kubelet_volume_stats_available_bytes / kubelet_volume_stats_capacity_bytes * 100 < 10
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes Volume out of disk space (instance {{ $labels.instance }})"
    description: "Volume is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  47. alert: KubernetesVolumeFullInFourDays
    expr: predict_linear(kubelet_volume_stats_available_bytes[6h], 4 * 24 * 3600) < 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes Volume full in four days (instance {{ $labels.instance }})"
    description: "{{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is expected to fill up within four days. Currently {{ $value | humanize }}% is available.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  48. alert: KubernetesPersistentvolumeError
    expr: kube_persistentvolume_status_phase{phase=~"Failed|Pending",job="kube-state-metrics"} > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes PersistentVolume error (instance {{ $labels.instance }})"
    description: "Persistent volume is in bad state\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  49. alert: KubernetesStatefulsetDown
    expr: (kube_statefulset_status_replicas_ready / kube_statefulset_status_replicas_current) != 1
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes StatefulSet down (instance {{ $labels.instance }})"
    description: "A StatefulSet went down\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  50. alert: KubernetesHpaScalingAbility
    expr: kube_hpa_status_condition{condition="false", status="AbleToScale"} == 1
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes HPA scaling ability (instance {{ $labels.instance }})"
    description: "Pod is unable to scale\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  51. alert: KubernetesHpaMetricAvailability
    expr: kube_hpa_status_condition{condition="false", status="ScalingActive"} == 1
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes HPA metric availability (instance {{ $labels.instance }})"
    description: "HPA is not able to colelct metrics\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  52. alert: KubernetesHpaScaleCapability
    expr: kube_hpa_status_desired_replicas >= kube_hpa_spec_max_replicas
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes HPA scale capability (instance {{ $labels.instance }})"
    description: "The maximum number of desired Pods has been hit\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  53. alert: KubernetesPodNotHealthy
    expr: min_over_time(sum by (namespace, pod) (kube_pod_status_phase{phase=~"Pending|Unknown|Failed"})[1h:]) > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes Pod not healthy (instance {{ $labels.instance }})"
    description: "Pod has been in a non-ready state for longer than an hour.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  54. alert: KubernetesPodCrashLooping
    expr: rate(kube_pod_container_status_restarts_total[15m]) * 60 * 5 > 5
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes pod crash looping (instance {{ $labels.instance }})"
    description: "Pod {{ $labels.pod }} is crash looping\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  55. alert: KubernetesReplicassetMismatch
    expr: kube_replicaset_spec_replicas != kube_replicaset_status_ready_replicas
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes ReplicasSet mismatch (instance {{ $labels.instance }})"
    description: "Deployment Replicas mismatch\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  56. alert: KubernetesDeploymentReplicasMismatch
    expr: kube_deployment_spec_replicas != kube_deployment_status_replicas_available
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes Deployment replicas mismatch (instance {{ $labels.instance }})"
    description: "Deployment Replicas mismatch\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  57. alert: KubernetesStatefulsetReplicasMismatch
    expr: kube_statefulset_status_replicas_ready != kube_statefulset_status_replicas
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes StatefulSet replicas mismatch (instance {{ $labels.instance }})"
    description: "A StatefulSet has not matched the expected number of replicas for longer than 15 minutes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  58. alert: KubernetesDeploymentGenerationMismatch
    expr: kube_deployment_status_observed_generation != kube_deployment_metadata_generation
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes Deployment generation mismatch (instance {{ $labels.instance }})"
    description: "A Deployment has failed but has not been rolled back.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  59. alert: KubernetesStatefulsetGenerationMismatch
    expr: kube_statefulset_status_observed_generation != kube_statefulset_metadata_generation
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes StatefulSet generation mismatch (instance {{ $labels.instance }})"
    description: "A StatefulSet has failed but has not been rolled back.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  60. alert: KubernetesStatefulsetUpdateNotRolledOut
    expr: max without (revision) (kube_statefulset_status_current_revision unless kube_statefulset_status_update_revision) * (kube_statefulset_replicas != kube_statefulset_status_replicas_updated)
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes StatefulSet update not rolled out (instance {{ $labels.instance }})"
    description: "StatefulSet update has not been rolled out.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  61. alert: KubernetesDaemonsetRolloutStuck
    expr: kube_daemonset_status_number_ready / kube_daemonset_status_desired_number_scheduled * 100 < 100 or kube_daemonset_status_desired_number_scheduled - kube_daemonset_status_current_number_scheduled > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes DaemonSet rollout stuck (instance {{ $labels.instance }})"
    description: "Some Pods of DaemonSet are not scheduled or not ready\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  62. alert: KubernetesDaemonsetMisscheduled
    expr: kube_daemonset_status_number_misscheduled > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes DaemonSet misscheduled (instance {{ $labels.instance }})"
    description: "Some DaemonSet Pods are running where they are not supposed to run\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  63. alert: KubernetesCronjobTooLong
    expr: time() - kube_cronjob_next_schedule_time > 3600
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes CronJob too long (instance {{ $labels.instance }})"
    description: "CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is taking more than 1h to complete.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  64. alert: KubernetesJobCompletion
    expr: kube_job_spec_completions - kube_job_status_succeeded > 0 or kube_job_status_failed > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes job completion (instance {{ $labels.instance }})"
    description: "Kubernetes Job failed to complete\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  65. alert: KubernetesApiServerErrors
    expr: sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[2m])) / sum(rate(apiserver_request_count{job="apiserver"}[2m])) * 100 > 3
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes API server errors (instance {{ $labels.instance }})"
    description: "Kubernetes API server is experiencing high error rate\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  66. alert: KubernetesApiClientErrors
    expr: (sum(rate(rest_client_requests_total{code=~"(4|5).."}[2m])) by (instance, job) / sum(rate(rest_client_requests_total[2m])) by (instance, job)) * 100 > 1
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes API client errors (instance {{ $labels.instance }})"
    description: "Kubernetes API client is experiencing high error rate\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  67. alert: KubernetesClientCertificateExpiresNextWeek
    expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 72460*60
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes client certificate expires next week (instance {{ $labels.instance }})"
    description: "A client certificate used to authenticate to the apiserver is expiring next week.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  68. alert: KubernetesClientCertificateExpiresSoon
    expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 246060
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Kubernetes client certificate expires soon (instance {{ $labels.instance }})"
    description: "A client certificate used to authenticate to the apiserver is expiring in less than 24.0 hours.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  69. alert: KubernetesApiServerLatency
    expr: histogram_quantile(0.99, sum(apiserver_request_latencies_bucket{verb!~"CONNECT|WATCHLIST|WATCH|PROXY"}) WITHOUT (instance, resource)) / 1e+06 > 1
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Kubernetes API server latency (instance {{ $labels.instance }})"
    description: "Kubernetes API server has a 99th percentile latency of {{ $value }} seconds for {{ $labels.verb }} {{ $labels.resource }}.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  70. alert: CephState
    expr: ceph_health_status != 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Ceph State (instance {{ $labels.instance }})"
    description: "Ceph instance unhealthy\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  71. alert: CephMonitorClockSkew
    expr: abs(ceph_monitor_clock_skew_seconds) > 0.2
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Ceph monitor clock skew (instance {{ $labels.instance }})"
    description: "Ceph monitor clock skew detected. Please check ntp and hardware clock settings\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  72. alert: CephMonitorLowSpace
    expr: ceph_monitor_avail_percent < 10
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Ceph monitor low space (instance {{ $labels.instance }})"
    description: "Ceph monitor storage is low.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  73. alert: CephOsdDown
    expr: ceph_osd_up == 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Ceph OSD Down (instance {{ $labels.instance }})"
    description: "Ceph Object Storage Daemon Down\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  74. alert: CephHighOsdLatency
    expr: ceph_osd_perf_apply_latency_seconds > 10
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Ceph high OSD latency (instance {{ $labels.instance }})"
    description: "Ceph Object Storage Daemon latetncy is high. Please check if it doesn't stuck in weird state.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  75. alert: CephOsdLowSpace
    expr: ceph_osd_utilization > 90
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Ceph OSD low space (instance {{ $labels.instance }})"
    description: "Ceph Object Storage Daemon is going out of space. Please add more disks.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  76. alert: CephOsdReweighted
    expr: ceph_osd_weight < 1
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Ceph OSD reweighted (instance {{ $labels.instance }})"
    description: "Ceph Object Storage Daemon take ttoo much time to resize.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  77. alert: CephPgDown
    expr: ceph_pg_down > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Ceph PG down (instance {{ $labels.instance }})"
    description: "Some Ceph placement groups are down. Please ensure that all the data are available.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  78. alert: CephPgIncomplete
    expr: ceph_pg_incomplete > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Ceph PG incomplete (instance {{ $labels.instance }})"
    description: "Some Ceph placement groups are incomplete. Please ensure that all the data are available.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  79. alert: CephPgInconsistant
    expr: ceph_pg_inconsistent > 0
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Ceph PG inconsistant (instance {{ $labels.instance }})"
    description: "Some Ceph placement groups are inconsitent. Data is available but inconsistent across nodes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  80. alert: CephPgActivationLong
    expr: ceph_pg_activating > 0
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Ceph PG activation long (instance {{ $labels.instance }})"
    description: "Some Ceph placement groups are too long to activate.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  81. alert: CephPgBackfillFull
    expr: ceph_pg_backfill_toofull > 0
    for: 5m
    labels:
    severity: warning
    annotations:
    summary: "Ceph PG backfill full (instance {{ $labels.instance }})"
    description: "Some Ceph placement groups are located on full Object Storage Daemon on cluster. Those PGs can be unavailable shortly. Please check OSDs, change weight or reconfigure CRUSH rules.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  82. alert: CephPgUnavailable
    expr: ceph_pg_total - ceph_pg_active > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Ceph PG unavailable (instance {{ $labels.instance }})"
    description: "Some Ceph placement groups are unavailable.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  83. alert: ThanosCompactionHalted
    expr: thanos_compactor_halted == 1
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Thanos compaction halted (instance {{ $labels.instance }})"
    description: "Thanos compaction has failed to run and is now halted.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  84. alert: ThanosCompactBucketOperationFailure
    expr: rate(thanos_objstore_bucket_operation_failures_total[1m]) > 0
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Thanos compact bucket operation failure (instance {{ $labels.instance }})"
    description: "Thanos compaction has failing storage operations\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
  85. alert: ThanosCompactNotRun
    expr: (time() - thanos_objstore_bucket_last_successful_upload_time) > 246060
    for: 5m
    labels:
    severity: critical
    annotations:
    summary: "Thanos compact not run (instance {{ $labels.instance }})"
    description: "Thanos compaction has not run in 24 hours.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"

Fluxion – WiFi Cracking Tool

Previous article

Kali Linux 2020.3 Release (ZSH, Win-Kex, HiDPI & Bluetooth Arsenal)

Next article

You may also like

Comments

Leave a reply

E-posta hesabınız yayımlanmayacak. Gerekli alanlar * ile işaretlenmişlerdir

More in Awesome