fix
This commit is contained in:
119
meta/domain-monitor/apps.tf
Normal file
119
meta/domain-monitor/apps.tf
Normal file
@@ -0,0 +1,119 @@
|
||||
locals {
|
||||
annotations = {
|
||||
"vynil.solidite.fr/meta" = "domain-monitor"
|
||||
"vynil.solidite.fr/name" = var.namespace
|
||||
"vynil.solidite.fr/domain" = var.domain-name
|
||||
"vynil.solidite.fr/issuer" = var.issuer
|
||||
"vynil.solidite.fr/ingress" = var.ingress-class
|
||||
}
|
||||
global = {
|
||||
"domain" = var.namespace
|
||||
"domain-name" = "admin.${var.domain-name}"
|
||||
"issuer" = var.issuer
|
||||
"ingress-class" = var.ingress-class
|
||||
"backups" = var.backups
|
||||
"app-group" = var.app-group
|
||||
}
|
||||
prometheus = { for k, v in var.prometheus : k => v if k!="enable" }
|
||||
alertmanager = { for k, v in var.alertmanager : k => v if k!="enable" }
|
||||
nodeExporter = { for k, v in var.node-exporter : k => v if k!="enable" }
|
||||
kubeStateMetrics = merge({"cluster-admin" = true}, { for k, v in var.kube-state-metrics : k => v if k!="enable" })
|
||||
monitorControlPlan = merge({"cluster-admin" = true}, { for k, v in var.monitor-control-plan : k => v if k!="enable" })
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace_v1" "monitor-ns" {
|
||||
count = ( var.prometheus.enable || var.alertmanager.enable || var.nodeExporter.enable || var.kubeStateMetrics.enable || var.monitorControlPlan.enable )? 1 : 0
|
||||
metadata {
|
||||
annotations = local.annotations
|
||||
labels = merge(local.common-labels, local.annotations)
|
||||
name = "${var.namespace}-monitor"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubectl_manifest" "alertmanager" {
|
||||
count = var.alertmanager.enable ? 1 : 0
|
||||
depends_on = [kubernetes_namespace_v1.monitor-ns]
|
||||
yaml_body = <<-EOF
|
||||
nodeExporterVersion: "vynil.solidite.fr/v1"
|
||||
kind: "Install"
|
||||
metadata:
|
||||
name: "alertmanager"
|
||||
namespace: "${kubernetes_namespace_v1.monitor-ns[0].metadata[0].name}"
|
||||
labels: ${jsonencode(local.common-labels)}
|
||||
spec:
|
||||
distrib: "${var.distributions.domain}"
|
||||
category: "share"
|
||||
component: "alertmanager"
|
||||
options: ${jsonencode(merge(local.global, local.alertmanager))}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "kubectl_manifest" "prometheus" {
|
||||
count = var.prometheus.enable ? 1 : 0
|
||||
yaml_body = <<-EOF
|
||||
nodeExporterVersion: "vynil.solidite.fr/v1"
|
||||
kind: "Install"
|
||||
metadata:
|
||||
name: "prometheus"
|
||||
namespace: "${kubernetes_namespace_v1.monitor-ns[0].metadata[0].name}"
|
||||
labels: ${jsonencode(local.common-labels)}
|
||||
spec:
|
||||
distrib: "${var.distributions.domain}"
|
||||
category: "monitor"
|
||||
component: "prometheus"
|
||||
options: ${jsonencode(merge(local.global, local.prometheus))}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "kubectl_manifest" "nodeExporter" {
|
||||
count = var.nodeExporter.enable ? 1 : 0
|
||||
yaml_body = <<-EOF
|
||||
nodeExporterVersion: "vynil.solidite.fr/v1"
|
||||
kind: "Install"
|
||||
metadata:
|
||||
name: "node-exporter"
|
||||
namespace: "${kubernetes_namespace_v1.monitor-ns[0].metadata[0].name}"
|
||||
labels: ${jsonencode(local.common-labels)}
|
||||
spec:
|
||||
distrib: "${var.distributions.domain}"
|
||||
category: "monitor"
|
||||
component: "k8s-nodeExporter"
|
||||
options: ${jsonencode(merge(local.global, local.nodeExporter))}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "kubectl_manifest" "kubeStateMetrics" {
|
||||
count = var.kubeStateMetrics.enable ? 1 : 0
|
||||
depends_on = [kubernetes_namespace_v1.monitor-ns]
|
||||
yaml_body = <<-EOF
|
||||
nodeExporterVersion: "vynil.solidite.fr/v1"
|
||||
kind: "Install"
|
||||
metadata:
|
||||
name: "kube-state-metrics"
|
||||
namespace: "${kubernetes_namespace_v1.monitor-ns[0].metadata[0].name}"
|
||||
labels: ${jsonencode(local.common-labels)}
|
||||
spec:
|
||||
distrib: "${var.distributions.domain}"
|
||||
category: "monitor"
|
||||
component: "kubeStateMetrics"
|
||||
options: ${jsonencode(merge(local.global, local.kubeStateMetrics))}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "kubectl_manifest" "monitorControlPlan" {
|
||||
count = var.monitorControlPlan.enable ? 1 : 0
|
||||
depends_on = [kubernetes_namespace_v1.monitor-ns]
|
||||
yaml_body = <<-EOF
|
||||
nodeExporterVersion: "vynil.solidite.fr/v1"
|
||||
kind: "Install"
|
||||
metadata:
|
||||
name: "monitor-control-plan"
|
||||
namespace: "${kubernetes_namespace_v1.monitor-ns[0].metadata[0].name}"
|
||||
labels: ${jsonencode(local.common-labels)}
|
||||
spec:
|
||||
distrib: "${var.distributions.domain}"
|
||||
category: "monitor"
|
||||
component: "monitor-control-plan"
|
||||
options: ${jsonencode(merge(local.global, local.monitorControlPlan))}
|
||||
EOF
|
||||
}
|
||||
173
meta/domain-monitor/index.yaml
Normal file
173
meta/domain-monitor/index.yaml
Normal file
@@ -0,0 +1,173 @@
|
||||
---
|
||||
apiVersion: vinyl.solidite.fr/v1beta1
|
||||
kind: Component
|
||||
category: meta
|
||||
metadata:
|
||||
name: domain-monitor
|
||||
description: null
|
||||
options:
|
||||
issuer:
|
||||
default: letsencrypt-prod
|
||||
examples:
|
||||
- letsencrypt-prod
|
||||
type: string
|
||||
backups:
|
||||
default:
|
||||
enable: false
|
||||
endpoint: ''
|
||||
key-id-key: s3-id
|
||||
secret-key: s3-secret
|
||||
secret-name: backup-settings
|
||||
examples:
|
||||
- enable: false
|
||||
endpoint: ''
|
||||
key-id-key: s3-id
|
||||
secret-key: s3-secret
|
||||
secret-name: backup-settings
|
||||
properties:
|
||||
enable:
|
||||
default: false
|
||||
type: boolean
|
||||
endpoint:
|
||||
default: ''
|
||||
type: string
|
||||
key-id-key:
|
||||
default: s3-id
|
||||
type: string
|
||||
secret-key:
|
||||
default: s3-secret
|
||||
type: string
|
||||
secret-name:
|
||||
default: backup-settings
|
||||
type: string
|
||||
type: object
|
||||
ingress-class:
|
||||
default: traefik
|
||||
examples:
|
||||
- traefik
|
||||
type: string
|
||||
distributions:
|
||||
default:
|
||||
core: core
|
||||
domain: domain
|
||||
examples:
|
||||
- core: core
|
||||
domain: domain
|
||||
properties:
|
||||
core:
|
||||
default: core
|
||||
type: string
|
||||
domain:
|
||||
default: domain
|
||||
type: string
|
||||
type: object
|
||||
domain-name:
|
||||
default: your_company.com
|
||||
examples:
|
||||
- your_company.com
|
||||
type: string
|
||||
node-exporter:
|
||||
default:
|
||||
enable: true
|
||||
examples:
|
||||
- enable: true
|
||||
properties:
|
||||
enable:
|
||||
default: true
|
||||
type: boolean
|
||||
type: object
|
||||
x-vynil-category: monitor
|
||||
x-vynil-package: node-exporter
|
||||
kube-state-metrics:
|
||||
default:
|
||||
enable: true
|
||||
examples:
|
||||
- enable: true
|
||||
properties:
|
||||
enable:
|
||||
default: true
|
||||
type: boolean
|
||||
type: object
|
||||
x-vynil-category: monitor
|
||||
x-vynil-package: kube-state-metrics
|
||||
storage-classes:
|
||||
default:
|
||||
BlockReadWriteMany: ''
|
||||
BlockReadWriteOnce: ''
|
||||
FilesystemReadWriteMany: ''
|
||||
FilesystemReadWriteOnce: ''
|
||||
examples:
|
||||
- BlockReadWriteMany: ''
|
||||
BlockReadWriteOnce: ''
|
||||
FilesystemReadWriteMany: ''
|
||||
FilesystemReadWriteOnce: ''
|
||||
properties:
|
||||
BlockReadWriteMany:
|
||||
default: ''
|
||||
type: string
|
||||
BlockReadWriteOnce:
|
||||
default: ''
|
||||
type: string
|
||||
FilesystemReadWriteMany:
|
||||
default: ''
|
||||
type: string
|
||||
FilesystemReadWriteOnce:
|
||||
default: ''
|
||||
type: string
|
||||
type: object
|
||||
app-group:
|
||||
default: infra
|
||||
examples:
|
||||
- infra
|
||||
type: string
|
||||
prometheus:
|
||||
default:
|
||||
enable: true
|
||||
examples:
|
||||
- enable: true
|
||||
properties:
|
||||
enable:
|
||||
default: true
|
||||
type: boolean
|
||||
type: object
|
||||
x-vynil-category: monitor
|
||||
x-vynil-package: prometheus
|
||||
alertmanager:
|
||||
default:
|
||||
enable: true
|
||||
examples:
|
||||
- enable: true
|
||||
properties:
|
||||
enable:
|
||||
default: true
|
||||
type: boolean
|
||||
type: object
|
||||
x-vynil-category: monitor
|
||||
x-vynil-package: alertmanager
|
||||
domain:
|
||||
default: your-company
|
||||
examples:
|
||||
- your-company
|
||||
type: string
|
||||
monitor-control-plan:
|
||||
default:
|
||||
enable: false
|
||||
examples:
|
||||
- enable: false
|
||||
properties:
|
||||
enable:
|
||||
default: false
|
||||
type: boolean
|
||||
type: object
|
||||
x-vynil-category: monitor
|
||||
x-vynil-package: monitor-control-plan
|
||||
dependencies: []
|
||||
providers:
|
||||
kubernetes: true
|
||||
authentik: null
|
||||
kubectl: true
|
||||
postgresql: null
|
||||
restapi: null
|
||||
http: null
|
||||
gitea: null
|
||||
tfaddtype: null
|
||||
@@ -6,95 +6,6 @@ metadata:
|
||||
name: domain
|
||||
description: null
|
||||
options:
|
||||
erp:
|
||||
default:
|
||||
dolibarr:
|
||||
enable: true
|
||||
enable: false
|
||||
examples:
|
||||
- dolibarr:
|
||||
enable: true
|
||||
enable: false
|
||||
properties:
|
||||
dolibarr:
|
||||
default:
|
||||
enable: true
|
||||
properties:
|
||||
enable:
|
||||
default: true
|
||||
type: boolean
|
||||
type: object
|
||||
enable:
|
||||
default: false
|
||||
type: boolean
|
||||
type: object
|
||||
x-vynil-category: meta
|
||||
x-vynil-package: domain-erp
|
||||
ingress-class:
|
||||
default: traefik
|
||||
examples:
|
||||
- traefik
|
||||
type: string
|
||||
devspaces:
|
||||
default:
|
||||
enable: false
|
||||
examples:
|
||||
- enable: false
|
||||
properties:
|
||||
enable:
|
||||
default: false
|
||||
type: boolean
|
||||
type: object
|
||||
x-vynil-category: meta
|
||||
x-vynil-package: domain-devspaces
|
||||
auth:
|
||||
default:
|
||||
authentik:
|
||||
enable: true
|
||||
enable: true
|
||||
examples:
|
||||
- authentik:
|
||||
enable: true
|
||||
enable: true
|
||||
properties:
|
||||
authentik:
|
||||
default:
|
||||
enable: true
|
||||
properties:
|
||||
enable:
|
||||
default: true
|
||||
type: boolean
|
||||
type: object
|
||||
enable:
|
||||
default: true
|
||||
type: boolean
|
||||
type: object
|
||||
x-vynil-category: meta
|
||||
x-vynil-package: domain-auth
|
||||
infra:
|
||||
default:
|
||||
enable: false
|
||||
traefik:
|
||||
enable: false
|
||||
examples:
|
||||
- enable: false
|
||||
traefik:
|
||||
enable: false
|
||||
properties:
|
||||
enable:
|
||||
default: false
|
||||
type: boolean
|
||||
traefik:
|
||||
default:
|
||||
enable: false
|
||||
properties:
|
||||
enable:
|
||||
default: false
|
||||
type: boolean
|
||||
type: object
|
||||
type: object
|
||||
x-vynil-category: meta
|
||||
x-vynil-package: domain-infra
|
||||
storage-classes:
|
||||
default:
|
||||
BlockReadWriteMany: ''
|
||||
@@ -120,16 +31,115 @@ options:
|
||||
default: ''
|
||||
type: string
|
||||
type: object
|
||||
domain-name:
|
||||
default: your_company.com
|
||||
ci:
|
||||
default:
|
||||
enable: false
|
||||
gitea:
|
||||
enable: true
|
||||
examples:
|
||||
- your_company.com
|
||||
type: string
|
||||
- enable: false
|
||||
gitea:
|
||||
enable: true
|
||||
properties:
|
||||
enable:
|
||||
default: false
|
||||
type: boolean
|
||||
gitea:
|
||||
default:
|
||||
enable: true
|
||||
properties:
|
||||
enable:
|
||||
default: true
|
||||
type: boolean
|
||||
type: object
|
||||
type: object
|
||||
x-vynil-category: meta
|
||||
x-vynil-package: domain-ci
|
||||
monitor:
|
||||
default:
|
||||
enable: false
|
||||
examples:
|
||||
- enable: false
|
||||
properties:
|
||||
enable:
|
||||
default: false
|
||||
type: boolean
|
||||
type: object
|
||||
x-vynil-category: meta
|
||||
x-vynil-package: domain-monitor
|
||||
distributions:
|
||||
default:
|
||||
core: core
|
||||
domain: domain
|
||||
examples:
|
||||
- core: core
|
||||
domain: domain
|
||||
properties:
|
||||
core:
|
||||
default: core
|
||||
type: string
|
||||
domain:
|
||||
default: domain
|
||||
type: string
|
||||
type: object
|
||||
infra:
|
||||
default:
|
||||
enable: false
|
||||
traefik:
|
||||
enable: false
|
||||
examples:
|
||||
- enable: false
|
||||
traefik:
|
||||
enable: false
|
||||
properties:
|
||||
enable:
|
||||
default: false
|
||||
type: boolean
|
||||
traefik:
|
||||
default:
|
||||
enable: false
|
||||
properties:
|
||||
enable:
|
||||
default: false
|
||||
type: boolean
|
||||
type: object
|
||||
type: object
|
||||
x-vynil-category: meta
|
||||
x-vynil-package: domain-infra
|
||||
issuer:
|
||||
default: letsencrypt-prod
|
||||
examples:
|
||||
- letsencrypt-prod
|
||||
type: string
|
||||
ingress-class:
|
||||
default: traefik
|
||||
examples:
|
||||
- traefik
|
||||
type: string
|
||||
erp:
|
||||
default:
|
||||
dolibarr:
|
||||
enable: true
|
||||
enable: false
|
||||
examples:
|
||||
- dolibarr:
|
||||
enable: true
|
||||
enable: false
|
||||
properties:
|
||||
dolibarr:
|
||||
default:
|
||||
enable: true
|
||||
properties:
|
||||
enable:
|
||||
default: true
|
||||
type: boolean
|
||||
type: object
|
||||
enable:
|
||||
default: false
|
||||
type: boolean
|
||||
type: object
|
||||
x-vynil-category: meta
|
||||
x-vynil-package: domain-erp
|
||||
apps:
|
||||
default:
|
||||
enable: false
|
||||
@@ -176,6 +186,18 @@ options:
|
||||
type: boolean
|
||||
type: object
|
||||
type: object
|
||||
devspaces:
|
||||
default:
|
||||
enable: false
|
||||
examples:
|
||||
- enable: false
|
||||
properties:
|
||||
enable:
|
||||
default: false
|
||||
type: boolean
|
||||
type: object
|
||||
x-vynil-category: meta
|
||||
x-vynil-package: domain-devspaces
|
||||
backups:
|
||||
default:
|
||||
enable: false
|
||||
@@ -206,35 +228,22 @@ options:
|
||||
default: backup-settings
|
||||
type: string
|
||||
type: object
|
||||
distributions:
|
||||
default:
|
||||
core: core
|
||||
domain: domain
|
||||
domain-name:
|
||||
default: your_company.com
|
||||
examples:
|
||||
- core: core
|
||||
domain: domain
|
||||
properties:
|
||||
core:
|
||||
default: core
|
||||
type: string
|
||||
domain:
|
||||
default: domain
|
||||
type: string
|
||||
type: object
|
||||
ci:
|
||||
- your_company.com
|
||||
type: string
|
||||
auth:
|
||||
default:
|
||||
enable: false
|
||||
gitea:
|
||||
authentik:
|
||||
enable: true
|
||||
enable: true
|
||||
examples:
|
||||
- enable: false
|
||||
gitea:
|
||||
- authentik:
|
||||
enable: true
|
||||
enable: true
|
||||
properties:
|
||||
enable:
|
||||
default: false
|
||||
type: boolean
|
||||
gitea:
|
||||
authentik:
|
||||
default:
|
||||
enable: true
|
||||
properties:
|
||||
@@ -242,9 +251,12 @@ options:
|
||||
default: true
|
||||
type: boolean
|
||||
type: object
|
||||
enable:
|
||||
default: true
|
||||
type: boolean
|
||||
type: object
|
||||
x-vynil-category: meta
|
||||
x-vynil-package: domain-ci
|
||||
x-vynil-package: domain-auth
|
||||
dependencies: []
|
||||
providers:
|
||||
kubernetes: null
|
||||
|
||||
@@ -21,6 +21,7 @@ locals {
|
||||
erp = { for k, v in var.erp : k => v if k!="enable" }
|
||||
apps = { for k, v in var.apps : k => v if k!="enable" }
|
||||
mail = { for k, v in var.mail : k => v if k!="enable" }
|
||||
monitor = { for k, v in var.monitor : k => v if k!="enable" }
|
||||
devspaces = { for k, v in var.devspaces : k => v if k!="enable" }
|
||||
|
||||
# Force install authentik and it's modules when any are needed
|
||||
@@ -218,6 +219,22 @@ resource "kubectl_manifest" "mail" {
|
||||
options: ${jsonencode(merge(local.global, local.mail))}
|
||||
EOF
|
||||
}
|
||||
resource "kubectl_manifest" "monitor" {
|
||||
count = var.monitor.enable ? 1 : 0
|
||||
yaml_body = <<-EOF
|
||||
apiVersion: "vynil.solidite.fr/v1"
|
||||
kind: "Install"
|
||||
metadata:
|
||||
name: "monitor"
|
||||
namespace: "${var.namespace}"
|
||||
labels: ${jsonencode(local.common-labels)}
|
||||
spec:
|
||||
distrib: "${var.distributions.domain}"
|
||||
category: "meta"
|
||||
component: "domain-monitor"
|
||||
options: ${jsonencode(merge(local.global, local.monitor))}
|
||||
EOF
|
||||
}
|
||||
resource "kubectl_manifest" "devspaces" {
|
||||
count = var.devspaces.enable ? 1 : 0
|
||||
yaml_body = <<-EOF
|
||||
|
||||
165
monitor/alertmanager/datas.tf
Normal file
165
monitor/alertmanager/datas.tf
Normal file
@@ -0,0 +1,165 @@
|
||||
locals {
|
||||
common-labels = {
|
||||
"vynil.solidite.fr/owner-name" = var.instance
|
||||
"vynil.solidite.fr/owner-namespace" = var.namespace
|
||||
"vynil.solidite.fr/owner-category" = var.category
|
||||
"vynil.solidite.fr/owner-component" = var.component
|
||||
"app.kubernetes.io/managed-by" = "vynil"
|
||||
"app.kubernetes.io/instance" = var.instance
|
||||
}
|
||||
rb-patch = <<-EOF
|
||||
- op: replace
|
||||
path: /subjects/0/namespace
|
||||
value: "${var.namespace}"
|
||||
EOF
|
||||
}
|
||||
|
||||
data "kubernetes_secret_v1" "authentik" {
|
||||
metadata {
|
||||
name = "authentik"
|
||||
namespace = "${var.domain}-auth"
|
||||
}
|
||||
}
|
||||
|
||||
data "kustomization_overlay" "data" {
|
||||
common_labels = local.common-labels
|
||||
namespace = var.namespace
|
||||
resources = [for file in fileset(path.module, "*.yaml"): file if file != "index.yaml" && length(regexall("ClusterRole",file))<1]
|
||||
patches {
|
||||
target {
|
||||
kind = "Alertmanager"
|
||||
name = "prometheus-community-kube-alertmanager"
|
||||
}
|
||||
patch = <<-EOF
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: Alertmanager
|
||||
metadata:
|
||||
name: prometheus-community-kube-alertmanager
|
||||
spec:
|
||||
image: "${var.images.alertmanager.registry}/${var.images.alertmanager.repository}:${var.images.alertmanager.tag}"
|
||||
version: ${var.images.alertmanager.tag}
|
||||
externalUrl: http://prometheus-community-kube-alertmanager.${var.namespace}:9093
|
||||
replicas: ${var.replicas}
|
||||
listenLocal: ${var.listenLocal}
|
||||
logLevel: "${var.logLevel}"
|
||||
retention: "${var.retention}"
|
||||
EOF
|
||||
}
|
||||
patches {
|
||||
target {
|
||||
kind = "ConfigMap"
|
||||
name = "alertmanager-kube-grafana-datasource"
|
||||
}
|
||||
patch = <<-EOF
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: alertmanager-kube-grafana-datasource
|
||||
data:
|
||||
datasource.yaml: |-
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: Alertmanager
|
||||
type: alertmanager
|
||||
uid: alertmanager
|
||||
url: http://prometheus-community-kube-alertmanager.${var.namespace}:9093/
|
||||
access: proxy
|
||||
jsonData:
|
||||
handleGrafanaManagedAlerts: false
|
||||
implementation: prometheus
|
||||
EOF
|
||||
}
|
||||
patches {
|
||||
target {
|
||||
kind = "ServiceMonitor"
|
||||
name = "prometheus-community-kube-alertmanager"
|
||||
}
|
||||
patch = <<-EOF
|
||||
- op: replace
|
||||
path: /spec/namespaceSelector/matchNames/0
|
||||
value: "${var.namespace}"
|
||||
EOF
|
||||
}
|
||||
patches {
|
||||
target {
|
||||
kind = "PrometheusRule"
|
||||
name = "prometheus-community-kube-alertmanager.rules"
|
||||
}
|
||||
patch = <<-EOF
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-alertmanager.rules
|
||||
spec:
|
||||
groups:
|
||||
- name: alertmanager.rules
|
||||
rules:
|
||||
- alert: AlertmanagerFailedReload
|
||||
expr: |-
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
max_over_time(alertmanager_config_last_reload_successful{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}"}[5m]) == 0
|
||||
- alert: AlertmanagerMembersInconsistent
|
||||
expr: |-
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
max_over_time(alertmanager_cluster_members{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}"}[5m])
|
||||
< on (namespace,service,cluster) group_left
|
||||
count by (namespace,service,cluster) (max_over_time(alertmanager_cluster_members{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}"}[5m]))
|
||||
- alert: AlertmanagerFailedToSendAlerts
|
||||
expr: |-
|
||||
(
|
||||
rate(alertmanager_notifications_failed_total{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}"}[5m])
|
||||
/
|
||||
ignoring (reason) group_left rate(alertmanager_notifications_total{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}"}[5m])
|
||||
)
|
||||
> 0.01
|
||||
- alert: AlertmanagerClusterFailedToSendAlerts
|
||||
expr: |-
|
||||
min by (namespace,service, integration) (
|
||||
rate(alertmanager_notifications_failed_total{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}", integration=~`.*`}[5m])
|
||||
/
|
||||
ignoring (reason) group_left rate(alertmanager_notifications_total{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}", integration=~`.*`}[5m])
|
||||
)
|
||||
> 0.01
|
||||
- alert: AlertmanagerClusterFailedToSendAlerts
|
||||
expr: |-
|
||||
min by (namespace,service, integration) (
|
||||
rate(alertmanager_notifications_failed_total{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}", integration!~`.*`}[5m])
|
||||
/
|
||||
ignoring (reason) group_left rate(alertmanager_notifications_total{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}", integration!~`.*`}[5m])
|
||||
)
|
||||
> 0.01
|
||||
- alert: AlertmanagerConfigInconsistent
|
||||
expr: |-
|
||||
count by (namespace,service,cluster) (
|
||||
count_values by (namespace,service,cluster) ("config_hash", alertmanager_config_hash{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}"})
|
||||
)
|
||||
!= 1
|
||||
- alert: AlertmanagerClusterDown
|
||||
expr: |-
|
||||
(
|
||||
count by (namespace,service,cluster) (
|
||||
avg_over_time(up{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}"}[5m]) < 0.5
|
||||
)
|
||||
/
|
||||
count by (namespace,service,cluster) (
|
||||
up{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}"}
|
||||
)
|
||||
)
|
||||
>= 0.5
|
||||
- alert: AlertmanagerClusterCrashlooping
|
||||
expr: |-
|
||||
(
|
||||
count by (namespace,service,cluster) (
|
||||
changes(process_start_time_seconds{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}"}[10m]) > 4
|
||||
)
|
||||
/
|
||||
count by (namespace,service,cluster) (
|
||||
up{job="prometheus-community-kube-alertmanager",namespace="${var.namespace}"}
|
||||
)
|
||||
)
|
||||
>= 0.5
|
||||
EOF
|
||||
}
|
||||
}
|
||||
102
monitor/alertmanager/index.yaml
Normal file
102
monitor/alertmanager/index.yaml
Normal file
@@ -0,0 +1,102 @@
|
||||
---
|
||||
apiVersion: vinyl.solidite.fr/v1beta1
|
||||
kind: Component
|
||||
category: monitor
|
||||
metadata:
|
||||
name: alertmanager
|
||||
description: null
|
||||
options:
|
||||
retention:
|
||||
default: 120h
|
||||
examples:
|
||||
- 120h
|
||||
type: string
|
||||
images:
|
||||
default:
|
||||
alertmanager:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: quay.io
|
||||
repository: prometheus/alertmanager
|
||||
tag: v0.26.0
|
||||
examples:
|
||||
- alertmanager:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: quay.io
|
||||
repository: prometheus/alertmanager
|
||||
tag: v0.26.0
|
||||
properties:
|
||||
alertmanager:
|
||||
default:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: quay.io
|
||||
repository: prometheus/alertmanager
|
||||
tag: v0.26.0
|
||||
properties:
|
||||
pullPolicy:
|
||||
default: IfNotPresent
|
||||
enum:
|
||||
- Always
|
||||
- Never
|
||||
- IfNotPresent
|
||||
type: string
|
||||
registry:
|
||||
default: quay.io
|
||||
type: string
|
||||
repository:
|
||||
default: prometheus/alertmanager
|
||||
type: string
|
||||
tag:
|
||||
default: v0.26.0
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
domain:
|
||||
default: your-company
|
||||
examples:
|
||||
- your-company
|
||||
type: string
|
||||
listenLocal:
|
||||
default: false
|
||||
examples:
|
||||
- false
|
||||
type: boolean
|
||||
logLevel:
|
||||
default: info
|
||||
examples:
|
||||
- info
|
||||
type: string
|
||||
sub-domain:
|
||||
default: to-be-set
|
||||
examples:
|
||||
- to-be-set
|
||||
type: string
|
||||
issuer:
|
||||
default: letsencrypt-prod
|
||||
examples:
|
||||
- letsencrypt-prod
|
||||
type: string
|
||||
ingress-class:
|
||||
default: traefik
|
||||
examples:
|
||||
- traefik
|
||||
type: string
|
||||
domain-name:
|
||||
default: your_company.com
|
||||
examples:
|
||||
- your_company.com
|
||||
type: string
|
||||
replicas:
|
||||
default: 1
|
||||
examples:
|
||||
- 1
|
||||
type: integer
|
||||
dependencies: []
|
||||
providers:
|
||||
kubernetes: true
|
||||
authentik: true
|
||||
kubectl: true
|
||||
postgresql: null
|
||||
restapi: null
|
||||
http: null
|
||||
gitea: null
|
||||
tfaddtype: null
|
||||
@@ -0,0 +1,38 @@
|
||||
# Source: kube-prometheus-stack/templates/alertmanager/alertmanager.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: Alertmanager
|
||||
metadata:
|
||||
name: prometheus-community-kube-alertmanager
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-alertmanager
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
image: "quay.io/prometheus/alertmanager:v0.26.0"
|
||||
version: v0.26.0
|
||||
replicas: 1
|
||||
listenLocal: false
|
||||
serviceAccountName: prometheus-community-kube-alertmanager
|
||||
externalUrl: http://prometheus-community-kube-alertmanager.vynil-monitor:9093
|
||||
paused: false
|
||||
logFormat: "logfmt"
|
||||
logLevel: "info"
|
||||
retention: "120h"
|
||||
alertmanagerConfigSelector: {}
|
||||
alertmanagerConfigNamespaceSelector: {}
|
||||
routePrefix: "/"
|
||||
securityContext:
|
||||
fsGroup: 2000
|
||||
runAsGroup: 2000
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
portName: http-web
|
||||
@@ -0,0 +1,142 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/alertmanager.rules.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-alertmanager.rules
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: alertmanager.rules
|
||||
rules:
|
||||
- alert: AlertmanagerFailedReload
|
||||
annotations:
|
||||
description: Configuration has failed to load for {{ $labels.namespace }}/{{ $labels.pod}}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerfailedreload
|
||||
summary: Reloading an Alertmanager configuration has failed.
|
||||
expr: |-
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
max_over_time(alertmanager_config_last_reload_successful{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor"}[5m]) == 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: AlertmanagerMembersInconsistent
|
||||
annotations:
|
||||
description: Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} has only found {{ $value }} members of the {{$labels.job}} cluster.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagermembersinconsistent
|
||||
summary: A member of an Alertmanager cluster has not found all other cluster members.
|
||||
expr: |-
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
max_over_time(alertmanager_cluster_members{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor"}[5m])
|
||||
< on (namespace,service,cluster) group_left
|
||||
count by (namespace,service,cluster) (max_over_time(alertmanager_cluster_members{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor"}[5m]))
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: AlertmanagerFailedToSendAlerts
|
||||
annotations:
|
||||
description: Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} failed to send {{ $value | humanizePercentage }} of notifications to {{ $labels.integration }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerfailedtosendalerts
|
||||
summary: An Alertmanager instance failed to send notifications.
|
||||
expr: |-
|
||||
(
|
||||
rate(alertmanager_notifications_failed_total{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor"}[5m])
|
||||
/
|
||||
ignoring (reason) group_left rate(alertmanager_notifications_total{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor"}[5m])
|
||||
)
|
||||
> 0.01
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: AlertmanagerClusterFailedToSendAlerts
|
||||
annotations:
|
||||
description: The minimum notification failure rate to {{ $labels.integration }} sent from any instance in the {{$labels.job}} cluster is {{ $value | humanizePercentage }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterfailedtosendalerts
|
||||
summary: All Alertmanager instances in a cluster failed to send notifications to a critical integration.
|
||||
expr: |-
|
||||
min by (namespace,service, integration) (
|
||||
rate(alertmanager_notifications_failed_total{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor", integration=~`.*`}[5m])
|
||||
/
|
||||
ignoring (reason) group_left rate(alertmanager_notifications_total{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor", integration=~`.*`}[5m])
|
||||
)
|
||||
> 0.01
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: AlertmanagerClusterFailedToSendAlerts
|
||||
annotations:
|
||||
description: The minimum notification failure rate to {{ $labels.integration }} sent from any instance in the {{$labels.job}} cluster is {{ $value | humanizePercentage }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterfailedtosendalerts
|
||||
summary: All Alertmanager instances in a cluster failed to send notifications to a non-critical integration.
|
||||
expr: |-
|
||||
min by (namespace,service, integration) (
|
||||
rate(alertmanager_notifications_failed_total{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor", integration!~`.*`}[5m])
|
||||
/
|
||||
ignoring (reason) group_left rate(alertmanager_notifications_total{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor", integration!~`.*`}[5m])
|
||||
)
|
||||
> 0.01
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: AlertmanagerConfigInconsistent
|
||||
annotations:
|
||||
description: Alertmanager instances within the {{$labels.job}} cluster have different configurations.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerconfiginconsistent
|
||||
summary: Alertmanager instances within the same cluster have different configurations.
|
||||
expr: |-
|
||||
count by (namespace,service,cluster) (
|
||||
count_values by (namespace,service,cluster) ("config_hash", alertmanager_config_hash{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor"})
|
||||
)
|
||||
!= 1
|
||||
for: 20m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: AlertmanagerClusterDown
|
||||
annotations:
|
||||
description: '{{ $value | humanizePercentage }} of Alertmanager instances within the {{$labels.job}} cluster have been up for less than half of the last 5m.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterdown
|
||||
summary: Half or more of the Alertmanager instances within the same cluster are down.
|
||||
expr: |-
|
||||
(
|
||||
count by (namespace,service,cluster) (
|
||||
avg_over_time(up{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor"}[5m]) < 0.5
|
||||
)
|
||||
/
|
||||
count by (namespace,service,cluster) (
|
||||
up{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor"}
|
||||
)
|
||||
)
|
||||
>= 0.5
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: AlertmanagerClusterCrashlooping
|
||||
annotations:
|
||||
description: '{{ $value | humanizePercentage }} of Alertmanager instances within the {{$labels.job}} cluster have restarted at least 5 times in the last 10m.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclustercrashlooping
|
||||
summary: Half or more of the Alertmanager instances within the same cluster are crashlooping.
|
||||
expr: |-
|
||||
(
|
||||
count by (namespace,service,cluster) (
|
||||
changes(process_start_time_seconds{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor"}[10m]) > 4
|
||||
)
|
||||
/
|
||||
count by (namespace,service,cluster) (
|
||||
up{job="prometheus-community-kube-alertmanager",namespace="vynil-monitor"}
|
||||
)
|
||||
)
|
||||
>= 0.5
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
@@ -0,0 +1,33 @@
|
||||
# Source: kube-prometheus-stack/templates/alertmanager/servicemonitor.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: prometheus-community-kube-alertmanager
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-alertmanager
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kube-prometheus-stack-alertmanager
|
||||
release: "prometheus-community"
|
||||
self-monitor: "true"
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "vynil-monitor"
|
||||
endpoints:
|
||||
- port: http-web
|
||||
enableHttp2: true
|
||||
path: "/metrics"
|
||||
- port: reloader-web
|
||||
scheme: http
|
||||
path: "/metrics"
|
||||
75
monitor/alertmanager/presentation.tf
Normal file
75
monitor/alertmanager/presentation.tf
Normal file
@@ -0,0 +1,75 @@
|
||||
locals {
|
||||
dns-name = "${var.sub-domain}.${var.domain-name}"
|
||||
dns-names = [local.dns-name]
|
||||
app-name = var.component == var.instance ? var.instance : format("%s-%s", var.component, var.instance)
|
||||
icon = "icon.svg"
|
||||
request_headers = {
|
||||
"Content-Type" = "application/json"
|
||||
Authorization = "Bearer ${data.kubernetes_secret_v1.authentik.data["AUTHENTIK_BOOTSTRAP_TOKEN"]}"
|
||||
}
|
||||
service = {
|
||||
"name" = "prometheus-community-kube-prometheus"
|
||||
"port" = {
|
||||
"number" = 9090
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module "ingress" {
|
||||
source = "git::https://git.solidite.fr/vynil/kydah-modules.git//ingress"
|
||||
component = ""
|
||||
instance = var.instance
|
||||
namespace = var.namespace
|
||||
issuer = var.issuer
|
||||
ingress_class = var.ingress-class
|
||||
labels = local.common-labels
|
||||
dns_names = local.dns-names
|
||||
middlewares = ["forward-${local.app-name}"]
|
||||
services = [local.service]
|
||||
providers = {
|
||||
kubectl = kubectl
|
||||
}
|
||||
}
|
||||
|
||||
module "application" {
|
||||
source = "git::https://git.solidite.fr/vynil/kydah-modules.git//application"
|
||||
component = var.component
|
||||
instance = var.instance
|
||||
app_group = var.app-group
|
||||
dns_name = local.dns-name
|
||||
icon = local.icon
|
||||
protocol_provider = module.forward.provider-id
|
||||
providers = {
|
||||
authentik = authentik
|
||||
}
|
||||
}
|
||||
|
||||
provider "restapi" {
|
||||
uri = "http://authentik.${var.domain}-auth.svc/api/v3/"
|
||||
headers = local.request_headers
|
||||
create_method = "PATCH"
|
||||
update_method = "PATCH"
|
||||
destroy_method = "PATCH"
|
||||
write_returns_object = true
|
||||
id_attribute = "name"
|
||||
}
|
||||
|
||||
module "forward" {
|
||||
source = "git::https://git.solidite.fr/vynil/kydah-modules.git//forward"
|
||||
component = var.component
|
||||
instance = var.instance
|
||||
domain = var.domain
|
||||
namespace = var.namespace
|
||||
ingress_class = var.ingress-class
|
||||
labels = local.common-labels
|
||||
dns_names = local.dns-names
|
||||
service = local.service
|
||||
icon = local.icon
|
||||
request_headers = local.request_headers
|
||||
providers = {
|
||||
restapi = restapi
|
||||
http = http
|
||||
kubectl = kubectl
|
||||
authentik = authentik
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: alertmanager-kube-grafana-datasource
|
||||
labels:
|
||||
grafana_datasource: "1"
|
||||
app: alertmanager
|
||||
data:
|
||||
datasource.yaml: |-
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: Alertmanager
|
||||
type: alertmanager
|
||||
uid: alertmanager
|
||||
url: http://prometheus-community-kube-alertmanager.vynil-monitor:9093/
|
||||
access: proxy
|
||||
jsonData:
|
||||
handleGrafanaManagedAlerts: false
|
||||
implementation: prometheus
|
||||
@@ -0,0 +1,18 @@
|
||||
# Source: kube-prometheus-stack/templates/alertmanager/secret.yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: alertmanager-prometheus-community-kube-alertmanager
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-alertmanager
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
data:
|
||||
alertmanager.yaml: "Z2xvYmFsOgogIHJlc29sdmVfdGltZW91dDogNW0KaW5oaWJpdF9ydWxlczoKLSBlcXVhbDoKICAtIG5hbWVzcGFjZQogIC0gYWxlcnRuYW1lCiAgc291cmNlX21hdGNoZXJzOgogIC0gc2V2ZXJpdHkgPSBjcml0aWNhbAogIHRhcmdldF9tYXRjaGVyczoKICAtIHNldmVyaXR5ID1+IHdhcm5pbmd8aW5mbwotIGVxdWFsOgogIC0gbmFtZXNwYWNlCiAgLSBhbGVydG5hbWUKICBzb3VyY2VfbWF0Y2hlcnM6CiAgLSBzZXZlcml0eSA9IHdhcm5pbmcKICB0YXJnZXRfbWF0Y2hlcnM6CiAgLSBzZXZlcml0eSA9IGluZm8KLSBlcXVhbDoKICAtIG5hbWVzcGFjZQogIHNvdXJjZV9tYXRjaGVyczoKICAtIGFsZXJ0bmFtZSA9IEluZm9JbmhpYml0b3IKICB0YXJnZXRfbWF0Y2hlcnM6CiAgLSBzZXZlcml0eSA9IGluZm8KLSB0YXJnZXRfbWF0Y2hlcnM6CiAgLSBhbGVydG5hbWUgPSBJbmZvSW5oaWJpdG9yCnJlY2VpdmVyczoKLSBuYW1lOiAibnVsbCIKcm91dGU6CiAgZ3JvdXBfYnk6CiAgLSBuYW1lc3BhY2UKICBncm91cF9pbnRlcnZhbDogNW0KICBncm91cF93YWl0OiAzMHMKICByZWNlaXZlcjogIm51bGwiCiAgcmVwZWF0X2ludGVydmFsOiAxMmgKICByb3V0ZXM6CiAgLSBtYXRjaGVyczoKICAgIC0gYWxlcnRuYW1lID0gIldhdGNoZG9nIgogICAgcmVjZWl2ZXI6ICJudWxsIgp0ZW1wbGF0ZXM6Ci0gL2V0Yy9hbGVydG1hbmFnZXIvY29uZmlnLyoudG1wbA=="
|
||||
@@ -0,0 +1,20 @@
|
||||
---
|
||||
# Source: kube-prometheus-stack/templates/alertmanager/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus-community-kube-alertmanager
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-alertmanager
|
||||
app.kubernetes.io/name: kube-prometheus-stack-alertmanager
|
||||
app.kubernetes.io/component: alertmanager
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
automountServiceAccountToken: true
|
||||
@@ -0,0 +1,32 @@
|
||||
# Source: kube-prometheus-stack/templates/alertmanager/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus-community-kube-alertmanager
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-alertmanager
|
||||
self-monitor: "true"
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
ports:
|
||||
- name: http-web
|
||||
port: 9093
|
||||
targetPort: 9093
|
||||
protocol: TCP
|
||||
- name: reloader-web
|
||||
appProtocol: http
|
||||
port: 8080
|
||||
targetPort: reloader-web
|
||||
selector:
|
||||
app.kubernetes.io/name: alertmanager
|
||||
alertmanager: prometheus-community-kube-alertmanager
|
||||
sessionAffinity: None
|
||||
type: "ClusterIP"
|
||||
@@ -0,0 +1,82 @@
|
||||
# Source: kube-prometheus-stack/charts/kube-state-metrics/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: prometheus-community-kube-state-metrics
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
helm.sh/chart: kube-state-metrics-5.16.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: metrics
|
||||
app.kubernetes.io/part-of: kube-state-metrics
|
||||
app.kubernetes.io/name: kube-state-metrics
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "2.10.1"
|
||||
release: prometheus-community
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: kube-state-metrics
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
revisionHistoryLimit: 10
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kube-state-metrics-5.16.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: metrics
|
||||
app.kubernetes.io/part-of: kube-state-metrics
|
||||
app.kubernetes.io/name: kube-state-metrics
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "2.10.1"
|
||||
release: prometheus-community
|
||||
spec:
|
||||
hostNetwork: false
|
||||
serviceAccountName: prometheus-community-kube-state-metrics
|
||||
securityContext:
|
||||
fsGroup: 65534
|
||||
runAsGroup: 65534
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: kube-state-metrics
|
||||
args:
|
||||
- --port=8080
|
||||
- --resources=certificatesigningrequests,configmaps,cronjobs,daemonsets,deployments,endpoints,horizontalpodautoscalers,ingresses,jobs,leases,limitranges,mutatingwebhookconfigurations,namespaces,networkpolicies,nodes,persistentvolumeclaims,persistentvolumes,poddisruptionbudgets,pods,replicasets,replicationcontrollers,resourcequotas,secrets,services,statefulsets,storageclasses,validatingwebhookconfigurations,volumeattachments
|
||||
imagePullPolicy: IfNotPresent
|
||||
image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.10.1
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: "http"
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
httpHeaders:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
httpHeaders:
|
||||
path: /
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
38
monitor/kube-state-metrics/datas.tf
Normal file
38
monitor/kube-state-metrics/datas.tf
Normal file
@@ -0,0 +1,38 @@
|
||||
locals {
|
||||
common-labels = {
|
||||
"vynil.solidite.fr/owner-name" = var.instance
|
||||
"vynil.solidite.fr/owner-namespace" = var.namespace
|
||||
"vynil.solidite.fr/owner-category" = var.category
|
||||
"vynil.solidite.fr/owner-component" = var.component
|
||||
"app.kubernetes.io/managed-by" = "vynil"
|
||||
"app.kubernetes.io/instance" = var.instance
|
||||
}
|
||||
rb-patch = <<-EOF
|
||||
- op: replace
|
||||
path: /subjects/0/namespace
|
||||
value: "${var.namespace}"
|
||||
EOF
|
||||
}
|
||||
|
||||
data "kustomization_overlay" "data" {
|
||||
common_labels = local.common-labels
|
||||
namespace = var.namespace
|
||||
resources = [for file in fileset(path.module, "*.yaml"): file if file != "index.yaml" && length(regexall("ClusterRole",file))<1]
|
||||
images {
|
||||
name = "registry.k8s.io/kube-state-metrics/kube-state-metrics"
|
||||
new_name = "${var.images.kube-state-metrics.registry}/${var.images.kube-state-metrics.repository}"
|
||||
new_tag = "${var.images.kube-state-metrics.tag}"
|
||||
}
|
||||
}
|
||||
|
||||
data "kustomization_overlay" "data_no_ns" {
|
||||
common_labels = local.common-labels
|
||||
resources = [for file in fileset(path.module, "*.yaml"): file if length(regexall("ClusterRole",file))>0]
|
||||
patches {
|
||||
target {
|
||||
kind = "ClusterRoleBinding"
|
||||
name = "prometheus-community-kube-state-metrics"
|
||||
}
|
||||
patch = local.rb-patch
|
||||
}
|
||||
}
|
||||
57
monitor/kube-state-metrics/index.yaml
Normal file
57
monitor/kube-state-metrics/index.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
apiVersion: vinyl.solidite.fr/v1beta1
|
||||
kind: Component
|
||||
category: monitor
|
||||
metadata:
|
||||
name: kube-state-metrics
|
||||
description: null
|
||||
options:
|
||||
images:
|
||||
default:
|
||||
kube-state-metrics:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: registry.k8s.io
|
||||
repository: kube-state-metrics/kube-state-metrics
|
||||
tag: v2.10.1
|
||||
examples:
|
||||
- kube-state-metrics:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: registry.k8s.io
|
||||
repository: kube-state-metrics/kube-state-metrics
|
||||
tag: v2.10.1
|
||||
properties:
|
||||
kube-state-metrics:
|
||||
default:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: registry.k8s.io
|
||||
repository: kube-state-metrics/kube-state-metrics
|
||||
tag: v2.10.1
|
||||
properties:
|
||||
pullPolicy:
|
||||
default: IfNotPresent
|
||||
enum:
|
||||
- Always
|
||||
- Never
|
||||
- IfNotPresent
|
||||
type: string
|
||||
registry:
|
||||
default: registry.k8s.io
|
||||
type: string
|
||||
repository:
|
||||
default: kube-state-metrics/kube-state-metrics
|
||||
type: string
|
||||
tag:
|
||||
default: v2.10.1
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
dependencies: []
|
||||
providers:
|
||||
kubernetes: true
|
||||
authentik: null
|
||||
kubectl: true
|
||||
postgresql: null
|
||||
restapi: null
|
||||
http: null
|
||||
gitea: null
|
||||
tfaddtype: null
|
||||
@@ -0,0 +1,68 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kube-state-metrics.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-state-metrics
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kube-state-metrics
|
||||
rules:
|
||||
- alert: KubeStateMetricsListErrors
|
||||
annotations:
|
||||
description: kube-state-metrics is experiencing errors at an elevated rate in list operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricslisterrors
|
||||
summary: kube-state-metrics is experiencing errors in list operations.
|
||||
expr: |-
|
||||
(sum(rate(kube_state_metrics_list_total{job="kube-state-metrics",result="error"}[5m])) by (cluster)
|
||||
/
|
||||
sum(rate(kube_state_metrics_list_total{job="kube-state-metrics"}[5m])) by (cluster))
|
||||
> 0.01
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: KubeStateMetricsWatchErrors
|
||||
annotations:
|
||||
description: kube-state-metrics is experiencing errors at an elevated rate in watch operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricswatcherrors
|
||||
summary: kube-state-metrics is experiencing errors in watch operations.
|
||||
expr: |-
|
||||
(sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics",result="error"}[5m])) by (cluster)
|
||||
/
|
||||
sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics"}[5m])) by (cluster))
|
||||
> 0.01
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: KubeStateMetricsShardingMismatch
|
||||
annotations:
|
||||
description: kube-state-metrics pods are running with different --total-shards configuration, some Kubernetes objects may be exposed multiple times or not exposed at all.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricsshardingmismatch
|
||||
summary: kube-state-metrics sharding is misconfigured.
|
||||
expr: stdvar (kube_state_metrics_total_shards{job="kube-state-metrics"}) by (cluster) != 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: KubeStateMetricsShardsMissing
|
||||
annotations:
|
||||
description: kube-state-metrics shards are missing, some Kubernetes objects are not being exposed.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricsshardsmissing
|
||||
summary: kube-state-metrics shards are missing.
|
||||
expr: |-
|
||||
2^max(kube_state_metrics_total_shards{job="kube-state-metrics"}) by (cluster) - 1
|
||||
-
|
||||
sum( 2 ^ max by (cluster, shard_ordinal) (kube_state_metrics_shard_ordinal{job="kube-state-metrics"}) ) by (cluster)
|
||||
!= 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
@@ -0,0 +1,24 @@
|
||||
# Source: kube-prometheus-stack/charts/kube-state-metrics/templates/servicemonitor.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: prometheus-community-kube-state-metrics
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
helm.sh/chart: kube-state-metrics-5.16.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: metrics
|
||||
app.kubernetes.io/part-of: kube-state-metrics
|
||||
app.kubernetes.io/name: kube-state-metrics
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "2.10.1"
|
||||
release: prometheus-community
|
||||
spec:
|
||||
jobLabel: app.kubernetes.io/name
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: kube-state-metrics
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
endpoints:
|
||||
- port: http
|
||||
honorLabels: true
|
||||
@@ -0,0 +1,22 @@
|
||||
# Source: kube-prometheus-stack/charts/kube-state-metrics/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kube-state-metrics-5.16.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: metrics
|
||||
app.kubernetes.io/part-of: kube-state-metrics
|
||||
app.kubernetes.io/name: kube-state-metrics
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "2.10.1"
|
||||
release: prometheus-community
|
||||
name: prometheus-community-kube-state-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus-community-kube-state-metrics
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus-community-kube-state-metrics
|
||||
namespace: vynil-monitor
|
||||
@@ -0,0 +1,155 @@
|
||||
# Source: kube-prometheus-stack/charts/kube-state-metrics/templates/role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kube-state-metrics-5.16.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: metrics
|
||||
app.kubernetes.io/part-of: kube-state-metrics
|
||||
app.kubernetes.io/name: kube-state-metrics
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "2.10.1"
|
||||
release: prometheus-community
|
||||
name: prometheus-community-kube-state-metrics
|
||||
rules:
|
||||
|
||||
- apiGroups: ["certificates.k8s.io"]
|
||||
resources:
|
||||
- certificatesigningrequests
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["batch"]
|
||||
resources:
|
||||
- cronjobs
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["extensions", "apps"]
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["extensions", "apps"]
|
||||
resources:
|
||||
- deployments
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- endpoints
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["autoscaling"]
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["extensions", "networking.k8s.io"]
|
||||
resources:
|
||||
- ingresses
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["batch"]
|
||||
resources:
|
||||
- jobs
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources:
|
||||
- leases
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- limitranges
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["admissionregistration.k8s.io"]
|
||||
resources:
|
||||
- mutatingwebhookconfigurations
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- namespaces
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["policy"]
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["extensions", "apps"]
|
||||
resources:
|
||||
- replicasets
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- replicationcontrollers
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- resourcequotas
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- secrets
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- services
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["apps"]
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["admissionregistration.k8s.io"]
|
||||
resources:
|
||||
- validatingwebhookconfigurations
|
||||
verbs: ["list", "watch"]
|
||||
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources:
|
||||
- volumeattachments
|
||||
verbs: ["list", "watch"]
|
||||
45
monitor/kube-state-metrics/ressources_no_ns.tf
Normal file
45
monitor/kube-state-metrics/ressources_no_ns.tf
Normal file
@@ -0,0 +1,45 @@
|
||||
|
||||
# first loop through resources in ids_prio[0]
|
||||
resource "kustomization_resource" "pre_no_ns" {
|
||||
for_each = data.kustomization_overlay.data_no_ns.ids_prio[0]
|
||||
|
||||
manifest = (
|
||||
contains(["_/Secret"], regex("(?P<group_kind>.*/.*)/.*/.*", each.value)["group_kind"])
|
||||
? sensitive(data.kustomization_overlay.data_no_ns.manifests[each.value])
|
||||
: data.kustomization_overlay.data_no_ns.manifests[each.value]
|
||||
)
|
||||
}
|
||||
|
||||
# then loop through resources in ids_prio[1]
|
||||
# and set an explicit depends_on on kustomization_resource.pre
|
||||
# wait 2 minutes for any deployment or daemonset to become ready
|
||||
resource "kustomization_resource" "main_no_ns" {
|
||||
for_each = data.kustomization_overlay.data_no_ns.ids_prio[1]
|
||||
|
||||
manifest = (
|
||||
contains(["_/Secret"], regex("(?P<group_kind>.*/.*)/.*/.*", each.value)["group_kind"])
|
||||
? sensitive(data.kustomization_overlay.data_no_ns.manifests[each.value])
|
||||
: data.kustomization_overlay.data_no_ns.manifests[each.value]
|
||||
)
|
||||
wait = true
|
||||
timeouts {
|
||||
create = "5m"
|
||||
update = "5m"
|
||||
}
|
||||
|
||||
depends_on = [kustomization_resource.pre_no_ns]
|
||||
}
|
||||
|
||||
# finally, loop through resources in ids_prio[2]
|
||||
# and set an explicit depends_on on kustomization_resource.main
|
||||
resource "kustomization_resource" "post_no_ns" {
|
||||
for_each = data.kustomization_overlay.data_no_ns.ids_prio[2]
|
||||
|
||||
manifest = (
|
||||
contains(["_/Secret"], regex("(?P<group_kind>.*/.*)/.*/.*", each.value)["group_kind"])
|
||||
? sensitive(data.kustomization_overlay.data_no_ns.manifests[each.value])
|
||||
: data.kustomization_overlay.data_no_ns.manifests[each.value]
|
||||
)
|
||||
|
||||
depends_on = [kustomization_resource.main_no_ns]
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
---
|
||||
# Source: kube-prometheus-stack/charts/kube-state-metrics/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kube-state-metrics-5.16.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: metrics
|
||||
app.kubernetes.io/part-of: kube-state-metrics
|
||||
app.kubernetes.io/name: kube-state-metrics
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "2.10.1"
|
||||
release: prometheus-community
|
||||
name: prometheus-community-kube-state-metrics
|
||||
namespace: vynil-monitor
|
||||
@@ -0,0 +1,28 @@
|
||||
# Source: kube-prometheus-stack/charts/kube-state-metrics/templates/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus-community-kube-state-metrics
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
helm.sh/chart: kube-state-metrics-5.16.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: metrics
|
||||
app.kubernetes.io/part-of: kube-state-metrics
|
||||
app.kubernetes.io/name: kube-state-metrics
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "2.10.1"
|
||||
release: prometheus-community
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
type: "ClusterIP"
|
||||
ports:
|
||||
- name: "http"
|
||||
protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
|
||||
selector:
|
||||
app.kubernetes.io/name: kube-state-metrics
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
21
monitor/monitor-control-plan/datas.tf
Normal file
21
monitor/monitor-control-plan/datas.tf
Normal file
@@ -0,0 +1,21 @@
|
||||
locals {
|
||||
common-labels = {
|
||||
"vynil.solidite.fr/owner-name" = var.instance
|
||||
"vynil.solidite.fr/owner-namespace" = var.namespace
|
||||
"vynil.solidite.fr/owner-category" = var.category
|
||||
"vynil.solidite.fr/owner-component" = var.component
|
||||
"app.kubernetes.io/managed-by" = "vynil"
|
||||
"app.kubernetes.io/instance" = var.instance
|
||||
}
|
||||
}
|
||||
|
||||
data "kustomization_overlay" "data" {
|
||||
common_labels = local.common-labels
|
||||
namespace = var.namespace
|
||||
resources = [for file in fileset(path.module, "*.yaml"): file if file != "index.yaml" && length(regexall("v1_Service_prometheus",file))<1]
|
||||
}
|
||||
|
||||
data "kustomization_overlay" "data_no_ns" {
|
||||
common_labels = local.common-labels
|
||||
resources = [for file in fileset(path.module, "*.yaml"): file if length(regexall("v1_Service_prometheus",file))>0]
|
||||
}
|
||||
82
monitor/monitor-control-plan/index.yaml
Normal file
82
monitor/monitor-control-plan/index.yaml
Normal file
@@ -0,0 +1,82 @@
|
||||
---
|
||||
apiVersion: vinyl.solidite.fr/v1beta1
|
||||
kind: Component
|
||||
category: monitor
|
||||
metadata:
|
||||
name: monitor-control-plan
|
||||
description: null
|
||||
options:
|
||||
sub-domain:
|
||||
default: to-be-set
|
||||
examples:
|
||||
- to-be-set
|
||||
type: string
|
||||
issuer:
|
||||
default: letsencrypt-prod
|
||||
examples:
|
||||
- letsencrypt-prod
|
||||
type: string
|
||||
domain:
|
||||
default: your-company
|
||||
examples:
|
||||
- your-company
|
||||
type: string
|
||||
ingress-class:
|
||||
default: traefik
|
||||
examples:
|
||||
- traefik
|
||||
type: string
|
||||
images:
|
||||
default:
|
||||
operator:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: docker.io
|
||||
repository: to-be/defined
|
||||
tag: v1.0.0
|
||||
examples:
|
||||
- operator:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: docker.io
|
||||
repository: to-be/defined
|
||||
tag: v1.0.0
|
||||
properties:
|
||||
operator:
|
||||
default:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: docker.io
|
||||
repository: to-be/defined
|
||||
tag: v1.0.0
|
||||
properties:
|
||||
pullPolicy:
|
||||
default: IfNotPresent
|
||||
enum:
|
||||
- Always
|
||||
- Never
|
||||
- IfNotPresent
|
||||
type: string
|
||||
registry:
|
||||
default: docker.io
|
||||
type: string
|
||||
repository:
|
||||
default: to-be/defined
|
||||
type: string
|
||||
tag:
|
||||
default: v1.0.0
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
domain-name:
|
||||
default: your_company.com
|
||||
examples:
|
||||
- your_company.com
|
||||
type: string
|
||||
dependencies: []
|
||||
providers:
|
||||
kubernetes: true
|
||||
authentik: true
|
||||
kubectl: true
|
||||
postgresql: null
|
||||
restapi: null
|
||||
http: null
|
||||
gitea: null
|
||||
tfaddtype: null
|
||||
@@ -0,0 +1,167 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/etcd.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-etcd
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: etcd
|
||||
rules:
|
||||
- alert: etcdMembersDown
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": members are down ({{ $value }}).'
|
||||
summary: etcd cluster members are down.
|
||||
expr: |-
|
||||
max without (endpoint) (
|
||||
sum without (instance) (up{job=~".*etcd.*"} == bool 0)
|
||||
or
|
||||
count without (To) (
|
||||
sum without (instance) (rate(etcd_network_peer_sent_failures_total{job=~".*etcd.*"}[120s])) > 0.01
|
||||
)
|
||||
)
|
||||
> 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: etcdInsufficientMembers
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": insufficient members ({{ $value }}).'
|
||||
summary: etcd cluster has insufficient number of members.
|
||||
expr: sum(up{job=~".*etcd.*"} == bool 1) without (instance) < ((count(up{job=~".*etcd.*"}) without (instance) + 1) / 2)
|
||||
for: 3m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: etcdNoLeader
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": member {{ $labels.instance }} has no leader.'
|
||||
summary: etcd cluster has no leader.
|
||||
expr: etcd_server_has_leader{job=~".*etcd.*"} == 0
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: etcdHighNumberOfLeaderChanges
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": {{ $value }} leader changes within the last 15 minutes. Frequent elections may be a sign of insufficient resources, high network latency, or disruptions by other components and should be investigated.'
|
||||
summary: etcd cluster has high number of leader changes.
|
||||
expr: increase((max without (instance) (etcd_server_leader_changes_seen_total{job=~".*etcd.*"}) or 0*absent(etcd_server_leader_changes_seen_total{job=~".*etcd.*"}))[15m:1m]) >= 4
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: etcdHighNumberOfFailedGRPCRequests
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": {{ $value }}% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.'
|
||||
summary: etcd cluster has high number of failed grpc requests.
|
||||
expr: |-
|
||||
100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code)
|
||||
/
|
||||
sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) without (grpc_type, grpc_code)
|
||||
> 1
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: etcdHighNumberOfFailedGRPCRequests
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": {{ $value }}% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.'
|
||||
summary: etcd cluster has high number of failed grpc requests.
|
||||
expr: |-
|
||||
100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code)
|
||||
/
|
||||
sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) without (grpc_type, grpc_code)
|
||||
> 5
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: etcdGRPCRequestsSlow
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": 99th percentile of gRPC requests is {{ $value }}s on etcd instance {{ $labels.instance }} for {{ $labels.grpc_method }} method.'
|
||||
summary: etcd grpc requests are slow
|
||||
expr: |-
|
||||
histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~".*etcd.*", grpc_method!="Defragment", grpc_type="unary"}[5m])) without(grpc_type))
|
||||
> 0.15
|
||||
for: 10m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: etcdMemberCommunicationSlow
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": member communication with {{ $labels.To }} is taking {{ $value }}s on etcd instance {{ $labels.instance }}.'
|
||||
summary: etcd cluster member communication is slow.
|
||||
expr: |-
|
||||
histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{job=~".*etcd.*"}[5m]))
|
||||
> 0.15
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: etcdHighNumberOfFailedProposals
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": {{ $value }} proposal failures within the last 30 minutes on etcd instance {{ $labels.instance }}.'
|
||||
summary: etcd cluster has high number of proposal failures.
|
||||
expr: rate(etcd_server_proposals_failed_total{job=~".*etcd.*"}[15m]) > 5
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: etcdHighFsyncDurations
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}.'
|
||||
summary: etcd cluster 99th percentile fsync durations are too high.
|
||||
expr: |-
|
||||
histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=~".*etcd.*"}[5m]))
|
||||
> 0.5
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: etcdHighFsyncDurations
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}.'
|
||||
summary: etcd cluster 99th percentile fsync durations are too high.
|
||||
expr: |-
|
||||
histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=~".*etcd.*"}[5m]))
|
||||
> 1
|
||||
for: 10m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: etcdHighCommitDurations
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": 99th percentile commit durations {{ $value }}s on etcd instance {{ $labels.instance }}.'
|
||||
summary: etcd cluster 99th percentile commit durations are too high.
|
||||
expr: |-
|
||||
histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket{job=~".*etcd.*"}[5m]))
|
||||
> 0.25
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: etcdDatabaseQuotaLowSpace
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": database size exceeds the defined quota on etcd instance {{ $labels.instance }}, please defrag or increase the quota as the writes to etcd will be disabled when it is full.'
|
||||
summary: etcd cluster database is running full.
|
||||
expr: (last_over_time(etcd_mvcc_db_total_size_in_bytes{job=~".*etcd.*"}[5m]) / last_over_time(etcd_server_quota_backend_bytes{job=~".*etcd.*"}[5m]))*100 > 95
|
||||
for: 10m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: etcdExcessiveDatabaseGrowth
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": Predicting running out of disk space in the next four hours, based on write observations within the past four hours on etcd instance {{ $labels.instance }}, please check as it might be disruptive.'
|
||||
summary: etcd cluster database growing very fast.
|
||||
expr: predict_linear(etcd_mvcc_db_total_size_in_bytes{job=~".*etcd.*"}[4h], 4*60*60) > etcd_server_quota_backend_bytes{job=~".*etcd.*"}
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: etcdDatabaseHighFragmentationRatio
|
||||
annotations:
|
||||
description: 'etcd cluster "{{ $labels.job }}": database size in use on instance {{ $labels.instance }} is {{ $value | humanizePercentage }} of the actual allocated disk space, please run defragmentation (e.g. etcdctl defrag) to retrieve the unused fragmented disk space.'
|
||||
runbook_url: https://etcd.io/docs/v3.5/op-guide/maintenance/#defragmentation
|
||||
summary: etcd database size in use is less than 50% of the actual allocated storage.
|
||||
expr: (last_over_time(etcd_mvcc_db_total_size_in_use_in_bytes{job=~".*etcd.*"}[5m]) / last_over_time(etcd_mvcc_db_total_size_in_bytes{job=~".*etcd.*"}[5m])) < 0.5 and etcd_mvcc_db_total_size_in_use_in_bytes{job=~".*etcd.*"} > 104857600
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
@@ -0,0 +1,129 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-availability.rules.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-apiserver-availability.rules
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- interval: 3m
|
||||
name: kube-apiserver-availability.rules
|
||||
rules:
|
||||
- expr: avg_over_time(code_verb:apiserver_request_total:increase1h[30d]) * 24 * 30
|
||||
record: code_verb:apiserver_request_total:increase30d
|
||||
- expr: sum by (cluster, code) (code_verb:apiserver_request_total:increase30d{verb=~"LIST|GET"})
|
||||
labels:
|
||||
verb: read
|
||||
record: code:apiserver_request_total:increase30d
|
||||
- expr: sum by (cluster, code) (code_verb:apiserver_request_total:increase30d{verb=~"POST|PUT|PATCH|DELETE"})
|
||||
labels:
|
||||
verb: write
|
||||
record: code:apiserver_request_total:increase30d
|
||||
- expr: sum by (cluster, verb, scope) (increase(apiserver_request_sli_duration_seconds_count{job="apiserver"}[1h]))
|
||||
record: cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase1h
|
||||
- expr: sum by (cluster, verb, scope) (avg_over_time(cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase1h[30d]) * 24 * 30)
|
||||
record: cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d
|
||||
- expr: sum by (cluster, verb, scope, le) (increase(apiserver_request_sli_duration_seconds_bucket[1h]))
|
||||
record: cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase1h
|
||||
- expr: sum by (cluster, verb, scope, le) (avg_over_time(cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase1h[30d]) * 24 * 30)
|
||||
record: cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d
|
||||
- expr: |-
|
||||
1 - (
|
||||
(
|
||||
# write too slow
|
||||
sum by (cluster) (cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d{verb=~"POST|PUT|PATCH|DELETE"})
|
||||
-
|
||||
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"POST|PUT|PATCH|DELETE",le="1"})
|
||||
) +
|
||||
(
|
||||
# read too slow
|
||||
sum by (cluster) (cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d{verb=~"LIST|GET"})
|
||||
-
|
||||
(
|
||||
(
|
||||
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope=~"resource|",le="1"})
|
||||
or
|
||||
vector(0)
|
||||
)
|
||||
+
|
||||
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="namespace",le="5"})
|
||||
+
|
||||
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="cluster",le="30"})
|
||||
)
|
||||
) +
|
||||
# errors
|
||||
sum by (cluster) (code:apiserver_request_total:increase30d{code=~"5.."} or vector(0))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (code:apiserver_request_total:increase30d)
|
||||
labels:
|
||||
verb: all
|
||||
record: apiserver_request:availability30d
|
||||
- expr: |-
|
||||
1 - (
|
||||
sum by (cluster) (cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d{verb=~"LIST|GET"})
|
||||
-
|
||||
(
|
||||
# too slow
|
||||
(
|
||||
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope=~"resource|",le="1"})
|
||||
or
|
||||
vector(0)
|
||||
)
|
||||
+
|
||||
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="namespace",le="5"})
|
||||
+
|
||||
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="cluster",le="30"})
|
||||
)
|
||||
+
|
||||
# errors
|
||||
sum by (cluster) (code:apiserver_request_total:increase30d{verb="read",code=~"5.."} or vector(0))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (code:apiserver_request_total:increase30d{verb="read"})
|
||||
labels:
|
||||
verb: read
|
||||
record: apiserver_request:availability30d
|
||||
- expr: |-
|
||||
1 - (
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d{verb=~"POST|PUT|PATCH|DELETE"})
|
||||
-
|
||||
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"POST|PUT|PATCH|DELETE",le="1"})
|
||||
)
|
||||
+
|
||||
# errors
|
||||
sum by (cluster) (code:apiserver_request_total:increase30d{verb="write",code=~"5.."} or vector(0))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (code:apiserver_request_total:increase30d{verb="write"})
|
||||
labels:
|
||||
verb: write
|
||||
record: apiserver_request:availability30d
|
||||
- expr: sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m]))
|
||||
labels:
|
||||
verb: read
|
||||
record: code_resource:apiserver_request_total:rate5m
|
||||
- expr: sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))
|
||||
labels:
|
||||
verb: write
|
||||
record: code_resource:apiserver_request_total:rate5m
|
||||
- expr: sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"2.."}[1h]))
|
||||
record: code_verb:apiserver_request_total:increase1h
|
||||
- expr: sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"3.."}[1h]))
|
||||
record: code_verb:apiserver_request_total:increase1h
|
||||
- expr: sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"4.."}[1h]))
|
||||
record: code_verb:apiserver_request_total:increase1h
|
||||
- expr: sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"5.."}[1h]))
|
||||
record: code_verb:apiserver_request_total:increase1h
|
||||
@@ -0,0 +1,321 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-burnrate.rules.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-apiserver-burnrate.rules
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kube-apiserver-burnrate.rules
|
||||
rules:
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[1d]))
|
||||
-
|
||||
(
|
||||
(
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[1d]))
|
||||
or
|
||||
vector(0)
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[1d]))
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[1d]))
|
||||
)
|
||||
)
|
||||
+
|
||||
# errors
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1d]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1d]))
|
||||
labels:
|
||||
verb: read
|
||||
record: apiserver_request:burnrate1d
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[1h]))
|
||||
-
|
||||
(
|
||||
(
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[1h]))
|
||||
or
|
||||
vector(0)
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[1h]))
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[1h]))
|
||||
)
|
||||
)
|
||||
+
|
||||
# errors
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1h]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1h]))
|
||||
labels:
|
||||
verb: read
|
||||
record: apiserver_request:burnrate1h
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[2h]))
|
||||
-
|
||||
(
|
||||
(
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[2h]))
|
||||
or
|
||||
vector(0)
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[2h]))
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[2h]))
|
||||
)
|
||||
)
|
||||
+
|
||||
# errors
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[2h]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[2h]))
|
||||
labels:
|
||||
verb: read
|
||||
record: apiserver_request:burnrate2h
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[30m]))
|
||||
-
|
||||
(
|
||||
(
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[30m]))
|
||||
or
|
||||
vector(0)
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[30m]))
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[30m]))
|
||||
)
|
||||
)
|
||||
+
|
||||
# errors
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[30m]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[30m]))
|
||||
labels:
|
||||
verb: read
|
||||
record: apiserver_request:burnrate30m
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[3d]))
|
||||
-
|
||||
(
|
||||
(
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[3d]))
|
||||
or
|
||||
vector(0)
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[3d]))
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[3d]))
|
||||
)
|
||||
)
|
||||
+
|
||||
# errors
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[3d]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[3d]))
|
||||
labels:
|
||||
verb: read
|
||||
record: apiserver_request:burnrate3d
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[5m]))
|
||||
-
|
||||
(
|
||||
(
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[5m]))
|
||||
or
|
||||
vector(0)
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[5m]))
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[5m]))
|
||||
)
|
||||
)
|
||||
+
|
||||
# errors
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[5m]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m]))
|
||||
labels:
|
||||
verb: read
|
||||
record: apiserver_request:burnrate5m
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[6h]))
|
||||
-
|
||||
(
|
||||
(
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[6h]))
|
||||
or
|
||||
vector(0)
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[6h]))
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[6h]))
|
||||
)
|
||||
)
|
||||
+
|
||||
# errors
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[6h]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[6h]))
|
||||
labels:
|
||||
verb: read
|
||||
record: apiserver_request:burnrate6h
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[1d]))
|
||||
-
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[1d]))
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1d]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d]))
|
||||
labels:
|
||||
verb: write
|
||||
record: apiserver_request:burnrate1d
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[1h]))
|
||||
-
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[1h]))
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1h]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h]))
|
||||
labels:
|
||||
verb: write
|
||||
record: apiserver_request:burnrate1h
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[2h]))
|
||||
-
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[2h]))
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[2h]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h]))
|
||||
labels:
|
||||
verb: write
|
||||
record: apiserver_request:burnrate2h
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[30m]))
|
||||
-
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[30m]))
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[30m]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m]))
|
||||
labels:
|
||||
verb: write
|
||||
record: apiserver_request:burnrate30m
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[3d]))
|
||||
-
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[3d]))
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[3d]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d]))
|
||||
labels:
|
||||
verb: write
|
||||
record: apiserver_request:burnrate3d
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[5m]))
|
||||
-
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[5m]))
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[5m]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))
|
||||
labels:
|
||||
verb: write
|
||||
record: apiserver_request:burnrate5m
|
||||
- expr: |-
|
||||
(
|
||||
(
|
||||
# too slow
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[6h]))
|
||||
-
|
||||
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[6h]))
|
||||
)
|
||||
+
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[6h]))
|
||||
)
|
||||
/
|
||||
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h]))
|
||||
labels:
|
||||
verb: write
|
||||
record: apiserver_request:burnrate6h
|
||||
@@ -0,0 +1,30 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-histogram.rules.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-apiserver-histogram.rules
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kube-apiserver-histogram.rules
|
||||
rules:
|
||||
- expr: histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[5m]))) > 0
|
||||
labels:
|
||||
quantile: '0.99'
|
||||
verb: read
|
||||
record: cluster_quantile:apiserver_request_sli_duration_seconds:histogram_quantile
|
||||
- expr: histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[5m]))) > 0
|
||||
labels:
|
||||
quantile: '0.99'
|
||||
verb: write
|
||||
record: cluster_quantile:apiserver_request_sli_duration_seconds:histogram_quantile
|
||||
@@ -0,0 +1,76 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-slos.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-apiserver-slos
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kube-apiserver-slos
|
||||
rules:
|
||||
- alert: KubeAPIErrorBudgetBurn
|
||||
annotations:
|
||||
description: The API server is burning too much error budget.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapierrorbudgetburn
|
||||
summary: The API server is burning too much error budget.
|
||||
expr: |-
|
||||
sum(apiserver_request:burnrate1h) > (14.40 * 0.01000)
|
||||
and
|
||||
sum(apiserver_request:burnrate5m) > (14.40 * 0.01000)
|
||||
for: 2m
|
||||
labels:
|
||||
long: 1h
|
||||
severity: critical
|
||||
short: 5m
|
||||
- alert: KubeAPIErrorBudgetBurn
|
||||
annotations:
|
||||
description: The API server is burning too much error budget.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapierrorbudgetburn
|
||||
summary: The API server is burning too much error budget.
|
||||
expr: |-
|
||||
sum(apiserver_request:burnrate6h) > (6.00 * 0.01000)
|
||||
and
|
||||
sum(apiserver_request:burnrate30m) > (6.00 * 0.01000)
|
||||
for: 15m
|
||||
labels:
|
||||
long: 6h
|
||||
severity: critical
|
||||
short: 30m
|
||||
- alert: KubeAPIErrorBudgetBurn
|
||||
annotations:
|
||||
description: The API server is burning too much error budget.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapierrorbudgetburn
|
||||
summary: The API server is burning too much error budget.
|
||||
expr: |-
|
||||
sum(apiserver_request:burnrate1d) > (3.00 * 0.01000)
|
||||
and
|
||||
sum(apiserver_request:burnrate2h) > (3.00 * 0.01000)
|
||||
for: 1h
|
||||
labels:
|
||||
long: 1d
|
||||
severity: warning
|
||||
short: 2h
|
||||
- alert: KubeAPIErrorBudgetBurn
|
||||
annotations:
|
||||
description: The API server is burning too much error budget.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapierrorbudgetburn
|
||||
summary: The API server is burning too much error budget.
|
||||
expr: |-
|
||||
sum(apiserver_request:burnrate3d) > (1.00 * 0.01000)
|
||||
and
|
||||
sum(apiserver_request:burnrate6h) > (1.00 * 0.01000)
|
||||
for: 3h
|
||||
labels:
|
||||
long: 3d
|
||||
severity: warning
|
||||
short: 6h
|
||||
@@ -0,0 +1,56 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-scheduler.rules
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kube-scheduler.rules
|
||||
rules:
|
||||
- expr: histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod))
|
||||
labels:
|
||||
quantile: '0.99'
|
||||
record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile
|
||||
- expr: histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod))
|
||||
labels:
|
||||
quantile: '0.99'
|
||||
record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile
|
||||
- expr: histogram_quantile(0.99, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod))
|
||||
labels:
|
||||
quantile: '0.99'
|
||||
record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile
|
||||
- expr: histogram_quantile(0.9, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod))
|
||||
labels:
|
||||
quantile: '0.9'
|
||||
record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile
|
||||
- expr: histogram_quantile(0.9, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod))
|
||||
labels:
|
||||
quantile: '0.9'
|
||||
record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile
|
||||
- expr: histogram_quantile(0.9, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod))
|
||||
labels:
|
||||
quantile: '0.9'
|
||||
record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile
|
||||
- expr: histogram_quantile(0.5, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod))
|
||||
labels:
|
||||
quantile: '0.5'
|
||||
record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile
|
||||
- expr: histogram_quantile(0.5, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod))
|
||||
labels:
|
||||
quantile: '0.5'
|
||||
record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile
|
||||
- expr: histogram_quantile(0.5, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod))
|
||||
labels:
|
||||
quantile: '0.5'
|
||||
record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile
|
||||
@@ -0,0 +1,29 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kubernetes-system-controller-manager
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kubernetes-system-controller-manager
|
||||
rules:
|
||||
- alert: KubeControllerManagerDown
|
||||
annotations:
|
||||
description: KubeControllerManager has disappeared from Prometheus target discovery.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecontrollermanagerdown
|
||||
summary: Target disappeared from Prometheus target discovery.
|
||||
expr: absent(up{job="kube-controller-manager"} == 1)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
@@ -0,0 +1,29 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kubernetes-system-scheduler
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kubernetes-system-scheduler
|
||||
rules:
|
||||
- alert: KubeSchedulerDown
|
||||
annotations:
|
||||
description: KubeScheduler has disappeared from Prometheus target discovery.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeschedulerdown
|
||||
summary: Target disappeared from Prometheus target discovery.
|
||||
expr: absent(up{job="kube-scheduler"} == 1)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
@@ -0,0 +1,40 @@
|
||||
# Source: kube-prometheus-stack/templates/exporters/kube-api-server/servicemonitor.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: prometheus-community-kube-apiserver
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-apiserver
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
|
||||
endpoints:
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
port: https
|
||||
scheme: https
|
||||
metricRelabelings:
|
||||
- action: drop
|
||||
regex: apiserver_request_duration_seconds_bucket;(0.15|0.2|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2|3|3.5|4|4.5|6|7|8|9|15|25|40|50)
|
||||
sourceLabels:
|
||||
- __name__
|
||||
- le
|
||||
tlsConfig:
|
||||
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
serverName: kubernetes
|
||||
insecureSkipVerify: false
|
||||
jobLabel: component
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- default
|
||||
selector:
|
||||
matchLabels:
|
||||
component: apiserver
|
||||
provider: kubernetes
|
||||
@@ -0,0 +1,33 @@
|
||||
# Source: kube-prometheus-stack/templates/exporters/kube-controller-manager/servicemonitor.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-controller-manager
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-kube-controller-manager
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
jobLabel: jobLabel
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kube-prometheus-stack-kube-controller-manager
|
||||
release: "prometheus-community"
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "kube-system"
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecureSkipVerify: true
|
||||
@@ -0,0 +1,29 @@
|
||||
# Source: kube-prometheus-stack/templates/exporters/kube-etcd/servicemonitor.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-etcd
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-kube-etcd
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
jobLabel: jobLabel
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kube-prometheus-stack-kube-etcd
|
||||
release: "prometheus-community"
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "kube-system"
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
@@ -0,0 +1,33 @@
|
||||
# Source: kube-prometheus-stack/templates/exporters/kube-scheduler/servicemonitor.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-scheduler
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-kube-scheduler
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
jobLabel: jobLabel
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kube-prometheus-stack-kube-scheduler
|
||||
release: "prometheus-community"
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "kube-system"
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecureSkipVerify: true
|
||||
45
monitor/monitor-control-plan/ressources_no_ns.tf
Normal file
45
monitor/monitor-control-plan/ressources_no_ns.tf
Normal file
@@ -0,0 +1,45 @@
|
||||
|
||||
# first loop through resources in ids_prio[0]
|
||||
resource "kustomization_resource" "pre_no_ns" {
|
||||
for_each = data.kustomization_overlay.data_no_ns.ids_prio[0]
|
||||
|
||||
manifest = (
|
||||
contains(["_/Secret"], regex("(?P<group_kind>.*/.*)/.*/.*", each.value)["group_kind"])
|
||||
? sensitive(data.kustomization_overlay.data_no_ns.manifests[each.value])
|
||||
: data.kustomization_overlay.data_no_ns.manifests[each.value]
|
||||
)
|
||||
}
|
||||
|
||||
# then loop through resources in ids_prio[1]
|
||||
# and set an explicit depends_on on kustomization_resource.pre
|
||||
# wait 2 minutes for any deployment or daemonset to become ready
|
||||
resource "kustomization_resource" "main_no_ns" {
|
||||
for_each = data.kustomization_overlay.data_no_ns.ids_prio[1]
|
||||
|
||||
manifest = (
|
||||
contains(["_/Secret"], regex("(?P<group_kind>.*/.*)/.*/.*", each.value)["group_kind"])
|
||||
? sensitive(data.kustomization_overlay.data_no_ns.manifests[each.value])
|
||||
: data.kustomization_overlay.data_no_ns.manifests[each.value]
|
||||
)
|
||||
wait = true
|
||||
timeouts {
|
||||
create = "5m"
|
||||
update = "5m"
|
||||
}
|
||||
|
||||
depends_on = [kustomization_resource.pre_no_ns]
|
||||
}
|
||||
|
||||
# finally, loop through resources in ids_prio[2]
|
||||
# and set an explicit depends_on on kustomization_resource.main
|
||||
resource "kustomization_resource" "post_no_ns" {
|
||||
for_each = data.kustomization_overlay.data_no_ns.ids_prio[2]
|
||||
|
||||
manifest = (
|
||||
contains(["_/Secret"], regex("(?P<group_kind>.*/.*)/.*/.*", each.value)["group_kind"])
|
||||
? sensitive(data.kustomization_overlay.data_no_ns.manifests[each.value])
|
||||
: data.kustomization_overlay.data_no_ns.manifests[each.value]
|
||||
)
|
||||
|
||||
depends_on = [kustomization_resource.main_no_ns]
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
---
|
||||
# Source: kube-prometheus-stack/templates/exporters/kube-controller-manager/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-controller-manager
|
||||
labels:
|
||||
app: kube-prometheus-stack-kube-controller-manager
|
||||
jobLabel: kube-controller-manager
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
namespace: kube-system
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 10257
|
||||
protocol: TCP
|
||||
targetPort: 10257
|
||||
selector:
|
||||
component: kube-controller-manager
|
||||
type: ClusterIP
|
||||
@@ -0,0 +1,27 @@
|
||||
# Source: kube-prometheus-stack/templates/exporters/kube-etcd/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-etcd
|
||||
labels:
|
||||
app: kube-prometheus-stack-kube-etcd
|
||||
jobLabel: kube-etcd
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
namespace: kube-system
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 2381
|
||||
protocol: TCP
|
||||
targetPort: 2381
|
||||
selector:
|
||||
component: etcd
|
||||
type: ClusterIP
|
||||
@@ -0,0 +1,27 @@
|
||||
# Source: kube-prometheus-stack/templates/exporters/kube-scheduler/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-scheduler
|
||||
labels:
|
||||
app: kube-prometheus-stack-kube-scheduler
|
||||
jobLabel: kube-scheduler
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
namespace: kube-system
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 10259
|
||||
protocol: TCP
|
||||
targetPort: 10259
|
||||
selector:
|
||||
component: kube-scheduler
|
||||
type: ClusterIP
|
||||
@@ -0,0 +1,119 @@
|
||||
# Source: kube-prometheus-stack/charts/prometheus-node-exporter/templates/daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: prometheus-community-prometheus-node-exporter
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
helm.sh/chart: prometheus-node-exporter-4.25.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: metrics
|
||||
app.kubernetes.io/part-of: prometheus-node-exporter
|
||||
app.kubernetes.io/name: prometheus-node-exporter
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
jobLabel: node-exporter
|
||||
release: prometheus-community
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: prometheus-node-exporter
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
revisionHistoryLimit: 10
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
|
||||
labels:
|
||||
helm.sh/chart: prometheus-node-exporter-4.25.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: metrics
|
||||
app.kubernetes.io/part-of: prometheus-node-exporter
|
||||
app.kubernetes.io/name: prometheus-node-exporter
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
jobLabel: node-exporter
|
||||
release: prometheus-community
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
securityContext:
|
||||
fsGroup: 65534
|
||||
runAsGroup: 65534
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
serviceAccountName: prometheus-community-prometheus-node-exporter
|
||||
containers:
|
||||
- name: node-exporter
|
||||
image: quay.io/prometheus/node-exporter:v1.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --path.procfs=/host/proc
|
||||
- --path.sysfs=/host/sys
|
||||
- --path.rootfs=/host/root
|
||||
- --path.udev.data=/host/root/run/udev/data
|
||||
- --web.listen-address=[$(HOST_IP)]:9100
|
||||
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
|
||||
- --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
env:
|
||||
- name: HOST_IP
|
||||
value: 0.0.0.0
|
||||
ports:
|
||||
- name: http-metrics
|
||||
containerPort: 9100
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
httpHeaders:
|
||||
path: /
|
||||
port: 9100
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
httpHeaders:
|
||||
path: /
|
||||
port: 9100
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
volumeMounts:
|
||||
- name: proc
|
||||
mountPath: /host/proc
|
||||
readOnly: true
|
||||
- name: sys
|
||||
mountPath: /host/sys
|
||||
readOnly: true
|
||||
- name: root
|
||||
mountPath: /host/root
|
||||
mountPropagation: HostToContainer
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
volumes:
|
||||
- name: proc
|
||||
hostPath:
|
||||
path: /proc
|
||||
- name: sys
|
||||
hostPath:
|
||||
path: /sys
|
||||
- name: root
|
||||
hostPath:
|
||||
path: /
|
||||
21
monitor/node-exporter/datas.tf
Normal file
21
monitor/node-exporter/datas.tf
Normal file
@@ -0,0 +1,21 @@
|
||||
locals {
|
||||
common-labels = {
|
||||
"vynil.solidite.fr/owner-name" = var.instance
|
||||
"vynil.solidite.fr/owner-namespace" = var.namespace
|
||||
"vynil.solidite.fr/owner-category" = var.category
|
||||
"vynil.solidite.fr/owner-component" = var.component
|
||||
"app.kubernetes.io/managed-by" = "vynil"
|
||||
"app.kubernetes.io/instance" = var.instance
|
||||
}
|
||||
}
|
||||
|
||||
data "kustomization_overlay" "data" {
|
||||
common_labels = local.common-labels
|
||||
namespace = var.namespace
|
||||
resources = [for file in fileset(path.module, "*.yaml"): file if file != "index.yaml"]
|
||||
images {
|
||||
name = "quay.io/prometheus/node-exporter"
|
||||
new_name = "${var.images.node-exporter.registry}/${var.images.node-exporter.repository}"
|
||||
new_tag = "${var.images.node-exporter.tag}"
|
||||
}
|
||||
}
|
||||
57
monitor/node-exporter/index.yaml
Normal file
57
monitor/node-exporter/index.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
apiVersion: vinyl.solidite.fr/v1beta1
|
||||
kind: Component
|
||||
category: monitor
|
||||
metadata:
|
||||
name: node-exporter
|
||||
description: null
|
||||
options:
|
||||
images:
|
||||
default:
|
||||
node-exporter:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: quay.io
|
||||
repository: prometheus/node-exporter
|
||||
tag: v1.7.0
|
||||
examples:
|
||||
- node-exporter:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: quay.io
|
||||
repository: prometheus/node-exporter
|
||||
tag: v1.7.0
|
||||
properties:
|
||||
node-exporter:
|
||||
default:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: quay.io
|
||||
repository: prometheus/node-exporter
|
||||
tag: v1.7.0
|
||||
properties:
|
||||
pullPolicy:
|
||||
default: IfNotPresent
|
||||
enum:
|
||||
- Always
|
||||
- Never
|
||||
- IfNotPresent
|
||||
type: string
|
||||
registry:
|
||||
default: quay.io
|
||||
type: string
|
||||
repository:
|
||||
default: prometheus/node-exporter
|
||||
type: string
|
||||
tag:
|
||||
default: v1.7.0
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
dependencies: []
|
||||
providers:
|
||||
kubernetes: true
|
||||
authentik: null
|
||||
kubectl: true
|
||||
postgresql: null
|
||||
restapi: null
|
||||
http: null
|
||||
gitea: null
|
||||
tfaddtype: null
|
||||
@@ -0,0 +1,82 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/node-exporter.rules.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-node-exporter.rules
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: node-exporter.rules
|
||||
rules:
|
||||
- expr: |-
|
||||
count without (cpu, mode) (
|
||||
node_cpu_seconds_total{job="node-exporter",mode="idle"}
|
||||
)
|
||||
record: instance:node_num_cpu:sum
|
||||
- expr: |-
|
||||
1 - avg without (cpu) (
|
||||
sum without (mode) (rate(node_cpu_seconds_total{job="node-exporter", mode=~"idle|iowait|steal"}[5m]))
|
||||
)
|
||||
record: instance:node_cpu_utilisation:rate5m
|
||||
- expr: |-
|
||||
(
|
||||
node_load1{job="node-exporter"}
|
||||
/
|
||||
instance:node_num_cpu:sum{job="node-exporter"}
|
||||
)
|
||||
record: instance:node_load1_per_cpu:ratio
|
||||
- expr: |-
|
||||
1 - (
|
||||
(
|
||||
node_memory_MemAvailable_bytes{job="node-exporter"}
|
||||
or
|
||||
(
|
||||
node_memory_Buffers_bytes{job="node-exporter"}
|
||||
+
|
||||
node_memory_Cached_bytes{job="node-exporter"}
|
||||
+
|
||||
node_memory_MemFree_bytes{job="node-exporter"}
|
||||
+
|
||||
node_memory_Slab_bytes{job="node-exporter"}
|
||||
)
|
||||
)
|
||||
/
|
||||
node_memory_MemTotal_bytes{job="node-exporter"}
|
||||
)
|
||||
record: instance:node_memory_utilisation:ratio
|
||||
- expr: rate(node_vmstat_pgmajfault{job="node-exporter"}[5m])
|
||||
record: instance:node_vmstat_pgmajfault:rate5m
|
||||
- expr: rate(node_disk_io_time_seconds_total{job="node-exporter", device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"}[5m])
|
||||
record: instance_device:node_disk_io_time_seconds:rate5m
|
||||
- expr: rate(node_disk_io_time_weighted_seconds_total{job="node-exporter", device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"}[5m])
|
||||
record: instance_device:node_disk_io_time_weighted_seconds:rate5m
|
||||
- expr: |-
|
||||
sum without (device) (
|
||||
rate(node_network_receive_bytes_total{job="node-exporter", device!="lo"}[5m])
|
||||
)
|
||||
record: instance:node_network_receive_bytes_excluding_lo:rate5m
|
||||
- expr: |-
|
||||
sum without (device) (
|
||||
rate(node_network_transmit_bytes_total{job="node-exporter", device!="lo"}[5m])
|
||||
)
|
||||
record: instance:node_network_transmit_bytes_excluding_lo:rate5m
|
||||
- expr: |-
|
||||
sum without (device) (
|
||||
rate(node_network_receive_drop_total{job="node-exporter", device!="lo"}[5m])
|
||||
)
|
||||
record: instance:node_network_receive_drop_excluding_lo:rate5m
|
||||
- expr: |-
|
||||
sum without (device) (
|
||||
rate(node_network_transmit_drop_total{job="node-exporter", device!="lo"}[5m])
|
||||
)
|
||||
record: instance:node_network_transmit_drop_excluding_lo:rate5m
|
||||
@@ -0,0 +1,328 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/node-exporter.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-node-exporter
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: node-exporter
|
||||
rules:
|
||||
- alert: NodeFilesystemSpaceFillingUp
|
||||
annotations:
|
||||
description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemspacefillingup
|
||||
summary: Filesystem is predicted to run out of space within the next 24 hours.
|
||||
expr: |-
|
||||
(
|
||||
node_filesystem_avail_bytes{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 15
|
||||
and
|
||||
predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!="",mountpoint!=""}[6h], 24*60*60) < 0
|
||||
and
|
||||
node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0
|
||||
)
|
||||
for: 1h
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeFilesystemSpaceFillingUp
|
||||
annotations:
|
||||
description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemspacefillingup
|
||||
summary: Filesystem is predicted to run out of space within the next 4 hours.
|
||||
expr: |-
|
||||
(
|
||||
node_filesystem_avail_bytes{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 10
|
||||
and
|
||||
predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!="",mountpoint!=""}[6h], 4*60*60) < 0
|
||||
and
|
||||
node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0
|
||||
)
|
||||
for: 1h
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: NodeFilesystemAlmostOutOfSpace
|
||||
annotations:
|
||||
description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutofspace
|
||||
summary: Filesystem has less than 5% space left.
|
||||
expr: |-
|
||||
(
|
||||
node_filesystem_avail_bytes{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 5
|
||||
and
|
||||
node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0
|
||||
)
|
||||
for: 30m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeFilesystemAlmostOutOfSpace
|
||||
annotations:
|
||||
description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutofspace
|
||||
summary: Filesystem has less than 3% space left.
|
||||
expr: |-
|
||||
(
|
||||
node_filesystem_avail_bytes{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 3
|
||||
and
|
||||
node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0
|
||||
)
|
||||
for: 30m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: NodeFilesystemFilesFillingUp
|
||||
annotations:
|
||||
description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemfilesfillingup
|
||||
summary: Filesystem is predicted to run out of inodes within the next 24 hours.
|
||||
expr: |-
|
||||
(
|
||||
node_filesystem_files_free{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_files{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 40
|
||||
and
|
||||
predict_linear(node_filesystem_files_free{job="node-exporter",fstype!="",mountpoint!=""}[6h], 24*60*60) < 0
|
||||
and
|
||||
node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0
|
||||
)
|
||||
for: 1h
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeFilesystemFilesFillingUp
|
||||
annotations:
|
||||
description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemfilesfillingup
|
||||
summary: Filesystem is predicted to run out of inodes within the next 4 hours.
|
||||
expr: |-
|
||||
(
|
||||
node_filesystem_files_free{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_files{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 20
|
||||
and
|
||||
predict_linear(node_filesystem_files_free{job="node-exporter",fstype!="",mountpoint!=""}[6h], 4*60*60) < 0
|
||||
and
|
||||
node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0
|
||||
)
|
||||
for: 1h
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: NodeFilesystemAlmostOutOfFiles
|
||||
annotations:
|
||||
description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutoffiles
|
||||
summary: Filesystem has less than 5% inodes left.
|
||||
expr: |-
|
||||
(
|
||||
node_filesystem_files_free{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_files{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 5
|
||||
and
|
||||
node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0
|
||||
)
|
||||
for: 1h
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeFilesystemAlmostOutOfFiles
|
||||
annotations:
|
||||
description: Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutoffiles
|
||||
summary: Filesystem has less than 3% inodes left.
|
||||
expr: |-
|
||||
(
|
||||
node_filesystem_files_free{job="node-exporter",fstype!="",mountpoint!=""} / node_filesystem_files{job="node-exporter",fstype!="",mountpoint!=""} * 100 < 3
|
||||
and
|
||||
node_filesystem_readonly{job="node-exporter",fstype!="",mountpoint!=""} == 0
|
||||
)
|
||||
for: 1h
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: NodeNetworkReceiveErrs
|
||||
annotations:
|
||||
description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodenetworkreceiveerrs
|
||||
summary: Network interface is reporting many receive errors.
|
||||
expr: rate(node_network_receive_errs_total{job="node-exporter"}[2m]) / rate(node_network_receive_packets_total{job="node-exporter"}[2m]) > 0.01
|
||||
for: 1h
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeNetworkTransmitErrs
|
||||
annotations:
|
||||
description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodenetworktransmiterrs
|
||||
summary: Network interface is reporting many transmit errors.
|
||||
expr: rate(node_network_transmit_errs_total{job="node-exporter"}[2m]) / rate(node_network_transmit_packets_total{job="node-exporter"}[2m]) > 0.01
|
||||
for: 1h
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeHighNumberConntrackEntriesUsed
|
||||
annotations:
|
||||
description: '{{ $value | humanizePercentage }} of conntrack entries are used.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodehighnumberconntrackentriesused
|
||||
summary: Number of conntrack are getting close to the limit.
|
||||
expr: (node_nf_conntrack_entries{job="node-exporter"} / node_nf_conntrack_entries_limit) > 0.75
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeTextFileCollectorScrapeError
|
||||
annotations:
|
||||
description: Node Exporter text file collector on {{ $labels.instance }} failed to scrape.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodetextfilecollectorscrapeerror
|
||||
summary: Node Exporter text file collector failed to scrape.
|
||||
expr: node_textfile_scrape_error{job="node-exporter"} == 1
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeClockSkewDetected
|
||||
annotations:
|
||||
description: Clock at {{ $labels.instance }} is out of sync by more than 0.05s. Ensure NTP is configured correctly on this host.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodeclockskewdetected
|
||||
summary: Clock skew detected.
|
||||
expr: |-
|
||||
(
|
||||
node_timex_offset_seconds{job="node-exporter"} > 0.05
|
||||
and
|
||||
deriv(node_timex_offset_seconds{job="node-exporter"}[5m]) >= 0
|
||||
)
|
||||
or
|
||||
(
|
||||
node_timex_offset_seconds{job="node-exporter"} < -0.05
|
||||
and
|
||||
deriv(node_timex_offset_seconds{job="node-exporter"}[5m]) <= 0
|
||||
)
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeClockNotSynchronising
|
||||
annotations:
|
||||
description: Clock at {{ $labels.instance }} is not synchronising. Ensure NTP is configured on this host.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodeclocknotsynchronising
|
||||
summary: Clock not synchronising.
|
||||
expr: |-
|
||||
min_over_time(node_timex_sync_status{job="node-exporter"}[5m]) == 0
|
||||
and
|
||||
node_timex_maxerror_seconds{job="node-exporter"} >= 16
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeRAIDDegraded
|
||||
annotations:
|
||||
description: RAID array '{{ $labels.device }}' at {{ $labels.instance }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/noderaiddegraded
|
||||
summary: RAID Array is degraded.
|
||||
expr: node_md_disks_required{job="node-exporter",device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"} - ignoring (state) (node_md_disks{state="active",job="node-exporter",device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"}) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: NodeRAIDDiskFailure
|
||||
annotations:
|
||||
description: At least one device in RAID array at {{ $labels.instance }} failed. Array '{{ $labels.device }}' needs attention and possibly a disk swap.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/noderaiddiskfailure
|
||||
summary: Failed device in RAID array.
|
||||
expr: node_md_disks{state="failed",job="node-exporter",device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"} > 0
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeFileDescriptorLimit
|
||||
annotations:
|
||||
description: File descriptors limit at {{ $labels.instance }} is currently at {{ printf "%.2f" $value }}%.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefiledescriptorlimit
|
||||
summary: Kernel is predicted to exhaust file descriptors limit soon.
|
||||
expr: |-
|
||||
(
|
||||
node_filefd_allocated{job="node-exporter"} * 100 / node_filefd_maximum{job="node-exporter"} > 70
|
||||
)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeFileDescriptorLimit
|
||||
annotations:
|
||||
description: File descriptors limit at {{ $labels.instance }} is currently at {{ printf "%.2f" $value }}%.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefiledescriptorlimit
|
||||
summary: Kernel is predicted to exhaust file descriptors limit soon.
|
||||
expr: |-
|
||||
(
|
||||
node_filefd_allocated{job="node-exporter"} * 100 / node_filefd_maximum{job="node-exporter"} > 90
|
||||
)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: NodeCPUHighUsage
|
||||
annotations:
|
||||
description: 'CPU usage at {{ $labels.instance }} has been above 90% for the last 15 minutes, is currently at {{ printf "%.2f" $value }}%.
|
||||
|
||||
'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodecpuhighusage
|
||||
summary: High CPU usage.
|
||||
expr: sum without(mode) (avg without (cpu) (rate(node_cpu_seconds_total{job="node-exporter", mode!="idle"}[2m]))) * 100 > 90
|
||||
for: 15m
|
||||
labels:
|
||||
severity: info
|
||||
- alert: NodeSystemSaturation
|
||||
annotations:
|
||||
description: 'System load per core at {{ $labels.instance }} has been above 2 for the last 15 minutes, is currently at {{ printf "%.2f" $value }}.
|
||||
|
||||
This might indicate this instance resources saturation and can cause it becoming unresponsive.
|
||||
|
||||
'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodesystemsaturation
|
||||
summary: System saturated, load per core is very high.
|
||||
expr: |-
|
||||
node_load1{job="node-exporter"}
|
||||
/ count without (cpu, mode) (node_cpu_seconds_total{job="node-exporter", mode="idle"}) > 2
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeMemoryMajorPagesFaults
|
||||
annotations:
|
||||
description: 'Memory major pages are occurring at very high rate at {{ $labels.instance }}, 500 major page faults per second for the last 15 minutes, is currently at {{ printf "%.2f" $value }}.
|
||||
|
||||
Please check that there is enough memory available at this instance.
|
||||
|
||||
'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodememorymajorpagesfaults
|
||||
summary: Memory major page faults are occurring at very high rate.
|
||||
expr: rate(node_vmstat_pgmajfault{job="node-exporter"}[5m]) > 500
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeMemoryHighUtilization
|
||||
annotations:
|
||||
description: 'Memory is filling up at {{ $labels.instance }}, has been above 90% for the last 15 minutes, is currently at {{ printf "%.2f" $value }}%.
|
||||
|
||||
'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodememoryhighutilization
|
||||
summary: Host is running out of memory.
|
||||
expr: 100 - (node_memory_MemAvailable_bytes{job="node-exporter"} / node_memory_MemTotal_bytes{job="node-exporter"} * 100) > 90
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeDiskIOSaturation
|
||||
annotations:
|
||||
description: 'Disk IO queue (aqu-sq) is high on {{ $labels.device }} at {{ $labels.instance }}, has been above 10 for the last 15 minutes, is currently at {{ printf "%.2f" $value }}.
|
||||
|
||||
This symptom might indicate disk saturation.
|
||||
|
||||
'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodediskiosaturation
|
||||
summary: Disk IO queue is high.
|
||||
expr: rate(node_disk_io_time_weighted_seconds_total{job="node-exporter", device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"}[5m]) > 10
|
||||
for: 30m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeSystemdServiceFailed
|
||||
annotations:
|
||||
description: Systemd service {{ $labels.name }} has entered failed state at {{ $labels.instance }}
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodesystemdservicefailed
|
||||
summary: Systemd service has entered failed state.
|
||||
expr: node_systemd_unit_state{job="node-exporter", state="failed"} == 1
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: NodeBondingDegraded
|
||||
annotations:
|
||||
description: Bonding interface {{ $labels.master }} on {{ $labels.instance }} is in degraded state due to one or more slave failures.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodebondingdegraded
|
||||
summary: Bonding interface is degraded
|
||||
expr: (node_bonding_slaves - node_bonding_active) != 0
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
@@ -0,0 +1,29 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/node-network.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-node-network
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: node-network
|
||||
rules:
|
||||
- alert: NodeNetworkInterfaceFlapping
|
||||
annotations:
|
||||
description: Network interface "{{ $labels.device }}" changing its up status often on node-exporter {{ $labels.namespace }}/{{ $labels.pod }}
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/nodenetworkinterfaceflapping
|
||||
summary: Network interface is often changing its status
|
||||
expr: changes(node_network_up{job="node-exporter",device!~"veth.+"}[2m]) > 2
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
@@ -0,0 +1,56 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/node.rules.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-node.rules
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: node.rules
|
||||
rules:
|
||||
- expr: |-
|
||||
topk by (cluster, namespace, pod) (1,
|
||||
max by (cluster, node, namespace, pod) (
|
||||
label_replace(kube_pod_info{job="kube-state-metrics",node!=""}, "pod", "$1", "pod", "(.*)")
|
||||
))
|
||||
record: 'node_namespace_pod:kube_pod_info:'
|
||||
- expr: |-
|
||||
count by (cluster, node) (
|
||||
node_cpu_seconds_total{mode="idle",job="node-exporter"}
|
||||
* on (cluster, namespace, pod) group_left(node)
|
||||
topk by (cluster, namespace, pod) (1, node_namespace_pod:kube_pod_info:)
|
||||
)
|
||||
record: node:node_num_cpu:sum
|
||||
- expr: |-
|
||||
sum(
|
||||
node_memory_MemAvailable_bytes{job="node-exporter"} or
|
||||
(
|
||||
node_memory_Buffers_bytes{job="node-exporter"} +
|
||||
node_memory_Cached_bytes{job="node-exporter"} +
|
||||
node_memory_MemFree_bytes{job="node-exporter"} +
|
||||
node_memory_Slab_bytes{job="node-exporter"}
|
||||
)
|
||||
) by (cluster)
|
||||
record: :node_memory_MemAvailable_bytes:sum
|
||||
- expr: |-
|
||||
avg by (cluster, node) (
|
||||
sum without (mode) (
|
||||
rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal",job="node-exporter"}[5m])
|
||||
)
|
||||
)
|
||||
record: node:node_cpu_utilization:ratio_rate5m
|
||||
- expr: |-
|
||||
avg by (cluster) (
|
||||
node:node_cpu_utilization:ratio_rate5m
|
||||
)
|
||||
record: cluster:node_cpu:ratio_rate5m
|
||||
@@ -0,0 +1,28 @@
|
||||
# Source: kube-prometheus-stack/charts/prometheus-node-exporter/templates/servicemonitor.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: prometheus-community-prometheus-node-exporter
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
helm.sh/chart: prometheus-node-exporter-4.25.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: metrics
|
||||
app.kubernetes.io/part-of: prometheus-node-exporter
|
||||
app.kubernetes.io/name: prometheus-node-exporter
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
jobLabel: node-exporter
|
||||
release: prometheus-community
|
||||
spec:
|
||||
jobLabel: jobLabel
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: prometheus-node-exporter
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
attachMetadata:
|
||||
node: false
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
scheme: http
|
||||
@@ -0,0 +1,17 @@
|
||||
---
|
||||
# Source: kube-prometheus-stack/charts/prometheus-node-exporter/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus-community-prometheus-node-exporter
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
helm.sh/chart: prometheus-node-exporter-4.25.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: metrics
|
||||
app.kubernetes.io/part-of: prometheus-node-exporter
|
||||
app.kubernetes.io/name: prometheus-node-exporter
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
jobLabel: node-exporter
|
||||
release: prometheus-community
|
||||
@@ -0,0 +1,28 @@
|
||||
# Source: kube-prometheus-stack/charts/prometheus-node-exporter/templates/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus-community-prometheus-node-exporter
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
helm.sh/chart: prometheus-node-exporter-4.25.0
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: metrics
|
||||
app.kubernetes.io/part-of: prometheus-node-exporter
|
||||
app.kubernetes.io/name: prometheus-node-exporter
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
jobLabel: node-exporter
|
||||
release: prometheus-community
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9100
|
||||
targetPort: 9100
|
||||
protocol: TCP
|
||||
name: http-metrics
|
||||
selector:
|
||||
app.kubernetes.io/name: prometheus-node-exporter
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
222
monitor/prometheus/datas.tf
Normal file
222
monitor/prometheus/datas.tf
Normal file
@@ -0,0 +1,222 @@
|
||||
locals {
|
||||
common-labels = {
|
||||
"vynil.solidite.fr/owner-name" = var.instance
|
||||
"vynil.solidite.fr/owner-namespace" = var.namespace
|
||||
"vynil.solidite.fr/owner-category" = var.category
|
||||
"vynil.solidite.fr/owner-component" = var.component
|
||||
"app.kubernetes.io/managed-by" = "vynil"
|
||||
"app.kubernetes.io/instance" = var.instance
|
||||
}
|
||||
rb-patch = <<-EOF
|
||||
- op: replace
|
||||
path: /subjects/0/namespace
|
||||
value: "${var.namespace}"
|
||||
EOF
|
||||
}
|
||||
|
||||
data "kubernetes_secret_v1" "authentik" {
|
||||
metadata {
|
||||
name = "authentik"
|
||||
namespace = "${var.domain}-auth"
|
||||
}
|
||||
}
|
||||
|
||||
data "kustomization_overlay" "data" {
|
||||
common_labels = local.common-labels
|
||||
namespace = var.namespace
|
||||
resources = [for file in fileset(path.module, "*.yaml"): file if file != "index.yaml" && length(regexall("ClusterRole",file))<1]
|
||||
patches {
|
||||
target {
|
||||
kind = "Prometheus"
|
||||
name = "prometheus-community-kube-prometheus"
|
||||
}
|
||||
patch = <<-EOF
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: Prometheus
|
||||
metadata:
|
||||
name: prometheus-community-kube-prometheus
|
||||
spec:
|
||||
image: "${var.images.prometheus.registry}/${var.images.prometheus.repository}:${var.images.prometheus.tag}"
|
||||
version: ${var.images.prometheus.tag}
|
||||
externalUrl: http://prometheus-community-kube-prometheus.${var.namespace}:9090
|
||||
replicas: ${var.replicas}
|
||||
shards: ${var.shards}
|
||||
logLevel: ${var.logLevel}
|
||||
listenLocal: ${var.listenLocal}
|
||||
enableAdminAPI: ${var.enableAdminAPI}
|
||||
retention: "${var.retention}"
|
||||
EOF
|
||||
}
|
||||
patches {
|
||||
target {
|
||||
kind = "ConfigMap"
|
||||
name = "prometheus-community-kube-grafana-datasource"
|
||||
}
|
||||
patch = <<-EOF
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: prometheus-community-kube-grafana-datasource
|
||||
data:
|
||||
datasource.yaml: |-
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
uid: prometheus
|
||||
url: http://prometheus-community-kube-prometheus.${var.namespace}:9090/
|
||||
access: proxy
|
||||
isDefault: false
|
||||
jsonData:
|
||||
httpMethod: POST
|
||||
timeInterval: 30s
|
||||
EOF
|
||||
}
|
||||
patches {
|
||||
target {
|
||||
kind = "ServiceMonitor"
|
||||
name = "prometheus-community-kube-prometheus"
|
||||
}
|
||||
patch = <<-EOF
|
||||
- op: replace
|
||||
path: /spec/namespaceSelector/matchNames/0
|
||||
value: "${var.namespace}"
|
||||
EOF
|
||||
}
|
||||
|
||||
patches {
|
||||
target {
|
||||
kind = "PrometheusRule"
|
||||
name = "prometheus-community-kube-prometheus"
|
||||
}
|
||||
patch = <<-EOF
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-prometheus
|
||||
spec:
|
||||
groups:
|
||||
- name: prometheus
|
||||
rules:
|
||||
- alert: PrometheusBadConfig
|
||||
expr: |-
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
max_over_time(prometheus_config_last_reload_successful{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) == 0
|
||||
- alert: PrometheusSDRefreshFailure
|
||||
expr: increase(prometheus_sd_refresh_failures_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[10m]) > 0
|
||||
- alert: PrometheusNotificationQueueRunningFull
|
||||
expr: |-
|
||||
# Without min_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
(
|
||||
predict_linear(prometheus_notifications_queue_length{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m], 60 * 30)
|
||||
>
|
||||
min_over_time(prometheus_notifications_queue_capacity{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m])
|
||||
)
|
||||
- alert: PrometheusErrorSendingAlertsToSomeAlertmanagers
|
||||
expr: |-
|
||||
(
|
||||
rate(prometheus_notifications_errors_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m])
|
||||
/
|
||||
rate(prometheus_notifications_sent_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m])
|
||||
)
|
||||
* 100
|
||||
> 1
|
||||
- alert: PrometheusNotConnectedToAlertmanagers
|
||||
expr: |-
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
max_over_time(prometheus_notifications_alertmanagers_discovered{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) < 1
|
||||
- alert: PrometheusTSDBReloadsFailing
|
||||
expr: increase(prometheus_tsdb_reloads_failures_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[3h]) > 0
|
||||
- alert: PrometheusTSDBCompactionsFailing
|
||||
expr: increase(prometheus_tsdb_compactions_failed_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[3h]) > 0
|
||||
- alert: PrometheusNotIngestingSamples
|
||||
expr: |-
|
||||
(
|
||||
rate(prometheus_tsdb_head_samples_appended_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) <= 0
|
||||
and
|
||||
(
|
||||
sum without(scrape_job) (prometheus_target_metadata_cache_entries{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}) > 0
|
||||
or
|
||||
sum without(rule_group) (prometheus_rule_group_rules{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}) > 0
|
||||
)
|
||||
)
|
||||
- alert: PrometheusDuplicateTimestamps
|
||||
expr: rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) > 0
|
||||
- alert: PrometheusOutOfOrderTimestamps
|
||||
expr: rate(prometheus_target_scrapes_sample_out_of_order_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) > 0
|
||||
- alert: PrometheusRemoteStorageFailures
|
||||
expr: |-
|
||||
(
|
||||
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]))
|
||||
/
|
||||
(
|
||||
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]))
|
||||
+
|
||||
(rate(prometheus_remote_storage_succeeded_samples_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) or rate(prometheus_remote_storage_samples_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]))
|
||||
)
|
||||
)
|
||||
* 100
|
||||
> 1
|
||||
- alert: PrometheusRemoteWriteBehind
|
||||
expr: |-
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
(
|
||||
max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m])
|
||||
- ignoring(remote_name, url) group_right
|
||||
max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m])
|
||||
)
|
||||
> 120
|
||||
- alert: PrometheusRemoteWriteDesiredShards
|
||||
expr: |-
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
(
|
||||
max_over_time(prometheus_remote_storage_shards_desired{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m])
|
||||
>
|
||||
max_over_time(prometheus_remote_storage_shards_max{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m])
|
||||
)
|
||||
- alert: PrometheusRuleFailures
|
||||
expr: increase(prometheus_rule_evaluation_failures_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) > 0
|
||||
- alert: PrometheusMissingRuleEvaluations
|
||||
expr: increase(prometheus_rule_group_iterations_missed_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) > 0
|
||||
- alert: PrometheusTargetLimitHit
|
||||
expr: increase(prometheus_target_scrape_pool_exceeded_target_limit_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) > 0
|
||||
- alert: PrometheusLabelLimitHit
|
||||
expr: increase(prometheus_target_scrape_pool_exceeded_label_limits_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) > 0
|
||||
- alert: PrometheusScrapeBodySizeLimitHit
|
||||
expr: increase(prometheus_target_scrapes_exceeded_body_size_limit_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) > 0
|
||||
- alert: PrometheusScrapeSampleLimitHit
|
||||
expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) > 0
|
||||
- alert: PrometheusTargetSyncFailure
|
||||
expr: increase(prometheus_target_sync_failed_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[30m]) > 0
|
||||
- alert: PrometheusHighQueryLoad
|
||||
expr: avg_over_time(prometheus_engine_queries{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) / max_over_time(prometheus_engine_queries_concurrent_max{job="prometheus-community-kube-prometheus",namespace="${var.namespace}"}[5m]) > 0.8
|
||||
- alert: PrometheusErrorSendingAlertsToAnyAlertmanager
|
||||
expr: |-
|
||||
min without (alertmanager) (
|
||||
rate(prometheus_notifications_errors_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}",alertmanager!~``}[5m])
|
||||
/
|
||||
rate(prometheus_notifications_sent_total{job="prometheus-community-kube-prometheus",namespace="${var.namespace}",alertmanager!~``}[5m])
|
||||
)
|
||||
* 100
|
||||
> 3
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
data "kustomization_overlay" "data_no_ns" {
|
||||
common_labels = local.common-labels
|
||||
resources = [for file in fileset(path.module, "*.yaml"): file if length(regexall("ClusterRole",file))>0]
|
||||
|
||||
patches {
|
||||
target {
|
||||
kind = "ClusterRoleBinding"
|
||||
name = "prometheus-community-kube-prometheus"
|
||||
}
|
||||
patch = local.rb-patch
|
||||
}
|
||||
}
|
||||
112
monitor/prometheus/index.yaml
Normal file
112
monitor/prometheus/index.yaml
Normal file
@@ -0,0 +1,112 @@
|
||||
---
|
||||
apiVersion: vinyl.solidite.fr/v1beta1
|
||||
kind: Component
|
||||
category: monitor
|
||||
metadata:
|
||||
name: prometheus
|
||||
description: null
|
||||
options:
|
||||
sub-domain:
|
||||
default: prometheus
|
||||
examples:
|
||||
- prometheus
|
||||
type: string
|
||||
domain:
|
||||
default: your-company
|
||||
examples:
|
||||
- your-company
|
||||
type: string
|
||||
issuer:
|
||||
default: letsencrypt-prod
|
||||
examples:
|
||||
- letsencrypt-prod
|
||||
type: string
|
||||
shards:
|
||||
default: 1
|
||||
examples:
|
||||
- 1
|
||||
type: integer
|
||||
retention:
|
||||
default: 10d
|
||||
examples:
|
||||
- 10d
|
||||
type: string
|
||||
ingress-class:
|
||||
default: traefik
|
||||
examples:
|
||||
- traefik
|
||||
type: string
|
||||
domain-name:
|
||||
default: your_company.com
|
||||
examples:
|
||||
- your_company.com
|
||||
type: string
|
||||
replicas:
|
||||
default: 1
|
||||
examples:
|
||||
- 1
|
||||
type: integer
|
||||
logLevel:
|
||||
default: info
|
||||
examples:
|
||||
- info
|
||||
type: string
|
||||
enableAdminAPI:
|
||||
default: false
|
||||
examples:
|
||||
- false
|
||||
type: boolean
|
||||
images:
|
||||
default:
|
||||
prometheus:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: quay.io
|
||||
repository: prometheus/prometheus
|
||||
tag: v2.49.1
|
||||
examples:
|
||||
- prometheus:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: quay.io
|
||||
repository: prometheus/prometheus
|
||||
tag: v2.49.1
|
||||
properties:
|
||||
prometheus:
|
||||
default:
|
||||
pullPolicy: IfNotPresent
|
||||
registry: quay.io
|
||||
repository: prometheus/prometheus
|
||||
tag: v2.49.1
|
||||
properties:
|
||||
pullPolicy:
|
||||
default: IfNotPresent
|
||||
enum:
|
||||
- Always
|
||||
- Never
|
||||
- IfNotPresent
|
||||
type: string
|
||||
registry:
|
||||
default: quay.io
|
||||
type: string
|
||||
repository:
|
||||
default: prometheus/prometheus
|
||||
type: string
|
||||
tag:
|
||||
default: v2.49.1
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
listenLocal:
|
||||
default: false
|
||||
examples:
|
||||
- false
|
||||
type: boolean
|
||||
dependencies: []
|
||||
providers:
|
||||
kubernetes: true
|
||||
authentik: true
|
||||
kubectl: true
|
||||
postgresql: null
|
||||
restapi: null
|
||||
http: null
|
||||
gitea: null
|
||||
tfaddtype: null
|
||||
@@ -0,0 +1,31 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/config-reloaders.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-config-reloaders
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: config-reloaders
|
||||
rules:
|
||||
- alert: ConfigReloaderSidecarErrors
|
||||
annotations:
|
||||
description: 'Errors encountered while the {{$labels.pod}} config-reloader sidecar attempts to sync config in {{$labels.namespace}} namespace.
|
||||
|
||||
As a result, configuration for service running in {{$labels.pod}} may be stale and cannot be updated anymore.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/configreloadersidecarerrors
|
||||
summary: config-reloader sidecar has not had a successful reload for 10m
|
||||
expr: max_over_time(reloader_last_reload_successful{namespace=~".+"}[5m]) == 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
@@ -0,0 +1,67 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/general.rules.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-general.rules
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: general.rules
|
||||
rules:
|
||||
- alert: TargetDown
|
||||
annotations:
|
||||
description: '{{ printf "%.4g" $value }}% of the {{ $labels.job }}/{{ $labels.service }} targets in {{ $labels.namespace }} namespace are down.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/targetdown
|
||||
summary: One or more targets are unreachable.
|
||||
expr: 100 * (count(up == 0) BY (cluster, job, namespace, service) / count(up) BY (cluster, job, namespace, service)) > 10
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: Watchdog
|
||||
annotations:
|
||||
description: 'This is an alert meant to ensure that the entire alerting pipeline is functional.
|
||||
|
||||
This alert is always firing, therefore it should always be firing in Alertmanager
|
||||
|
||||
and always fire against a receiver. There are integrations with various notification
|
||||
|
||||
mechanisms that send a notification when this alert is not firing. For example the
|
||||
|
||||
"DeadMansSnitch" integration in PagerDuty.
|
||||
|
||||
'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/watchdog
|
||||
summary: An alert that should always be firing to certify that Alertmanager is working properly.
|
||||
expr: vector(1)
|
||||
labels:
|
||||
severity: none
|
||||
- alert: InfoInhibitor
|
||||
annotations:
|
||||
description: 'This is an alert that is used to inhibit info alerts.
|
||||
|
||||
By themselves, the info-level alerts are sometimes very noisy, but they are relevant when combined with
|
||||
|
||||
other alerts.
|
||||
|
||||
This alert fires whenever there''s a severity="info" alert, and stops firing when another alert with a
|
||||
|
||||
severity of ''warning'' or ''critical'' starts firing on the same namespace.
|
||||
|
||||
This alert should be routed to a null receiver and configured to inhibit alerts with severity="info".
|
||||
|
||||
'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/infoinhibitor
|
||||
summary: Info-level alert inhibition.
|
||||
expr: ALERTS{severity = "info"} == 1 unless on (namespace) ALERTS{alertname != "InfoInhibitor", severity =~ "warning|critical", alertstate="firing"} == 1
|
||||
labels:
|
||||
severity: none
|
||||
@@ -0,0 +1,27 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.container_cpu_usage_seconds_total.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-k8s.rules.container-cpu-usage-seconds
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: k8s.rules.container_cpu_usage_seconds_total
|
||||
rules:
|
||||
- expr: |-
|
||||
sum by (cluster, namespace, pod, container) (
|
||||
irate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}[5m])
|
||||
) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) (
|
||||
1, max by (cluster, namespace, pod, node) (kube_pod_info{node!=""})
|
||||
)
|
||||
record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate
|
||||
@@ -0,0 +1,26 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.container_memory_cache.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-k8s.rules.container-memory-cache
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: k8s.rules.container_memory_cache
|
||||
rules:
|
||||
- expr: |-
|
||||
container_memory_cache{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}
|
||||
* on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) (1,
|
||||
max by (cluster, namespace, pod, node) (kube_pod_info{node!=""})
|
||||
)
|
||||
record: node_namespace_pod_container:container_memory_cache
|
||||
@@ -0,0 +1,26 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.container_memory_rss.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-k8s.rules.container-memory-rss
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: k8s.rules.container_memory_rss
|
||||
rules:
|
||||
- expr: |-
|
||||
container_memory_rss{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}
|
||||
* on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) (1,
|
||||
max by (cluster, namespace, pod, node) (kube_pod_info{node!=""})
|
||||
)
|
||||
record: node_namespace_pod_container:container_memory_rss
|
||||
@@ -0,0 +1,26 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.container_memory_swap.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-k8s.rules.container-memory-swap
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: k8s.rules.container_memory_swap
|
||||
rules:
|
||||
- expr: |-
|
||||
container_memory_swap{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}
|
||||
* on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) (1,
|
||||
max by (cluster, namespace, pod, node) (kube_pod_info{node!=""})
|
||||
)
|
||||
record: node_namespace_pod_container:container_memory_swap
|
||||
@@ -0,0 +1,26 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.container_memory_working_set_bytes.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-k8s.rules.container-memory-working-se
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: k8s.rules.container_memory_working_set_bytes
|
||||
rules:
|
||||
- expr: |-
|
||||
container_memory_working_set_bytes{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}
|
||||
* on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) (1,
|
||||
max by (cluster, namespace, pod, node) (kube_pod_info{node!=""})
|
||||
)
|
||||
record: node_namespace_pod_container:container_memory_working_set_bytes
|
||||
@@ -0,0 +1,88 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.container_resource.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-k8s.rules.container-resource
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: k8s.rules.container_resource
|
||||
rules:
|
||||
- expr: |-
|
||||
kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster)
|
||||
group_left() max by (namespace, pod, cluster) (
|
||||
(kube_pod_status_phase{phase=~"Pending|Running"} == 1)
|
||||
)
|
||||
record: cluster:namespace:pod_memory:active:kube_pod_container_resource_requests
|
||||
- expr: |-
|
||||
sum by (namespace, cluster) (
|
||||
sum by (namespace, pod, cluster) (
|
||||
max by (namespace, pod, container, cluster) (
|
||||
kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"}
|
||||
) * on (namespace, pod, cluster) group_left() max by (namespace, pod, cluster) (
|
||||
kube_pod_status_phase{phase=~"Pending|Running"} == 1
|
||||
)
|
||||
)
|
||||
)
|
||||
record: namespace_memory:kube_pod_container_resource_requests:sum
|
||||
- expr: |-
|
||||
kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster)
|
||||
group_left() max by (namespace, pod, cluster) (
|
||||
(kube_pod_status_phase{phase=~"Pending|Running"} == 1)
|
||||
)
|
||||
record: cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests
|
||||
- expr: |-
|
||||
sum by (namespace, cluster) (
|
||||
sum by (namespace, pod, cluster) (
|
||||
max by (namespace, pod, container, cluster) (
|
||||
kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"}
|
||||
) * on (namespace, pod, cluster) group_left() max by (namespace, pod, cluster) (
|
||||
kube_pod_status_phase{phase=~"Pending|Running"} == 1
|
||||
)
|
||||
)
|
||||
)
|
||||
record: namespace_cpu:kube_pod_container_resource_requests:sum
|
||||
- expr: |-
|
||||
kube_pod_container_resource_limits{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster)
|
||||
group_left() max by (namespace, pod, cluster) (
|
||||
(kube_pod_status_phase{phase=~"Pending|Running"} == 1)
|
||||
)
|
||||
record: cluster:namespace:pod_memory:active:kube_pod_container_resource_limits
|
||||
- expr: |-
|
||||
sum by (namespace, cluster) (
|
||||
sum by (namespace, pod, cluster) (
|
||||
max by (namespace, pod, container, cluster) (
|
||||
kube_pod_container_resource_limits{resource="memory",job="kube-state-metrics"}
|
||||
) * on (namespace, pod, cluster) group_left() max by (namespace, pod, cluster) (
|
||||
kube_pod_status_phase{phase=~"Pending|Running"} == 1
|
||||
)
|
||||
)
|
||||
)
|
||||
record: namespace_memory:kube_pod_container_resource_limits:sum
|
||||
- expr: |-
|
||||
kube_pod_container_resource_limits{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster)
|
||||
group_left() max by (namespace, pod, cluster) (
|
||||
(kube_pod_status_phase{phase=~"Pending|Running"} == 1)
|
||||
)
|
||||
record: cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits
|
||||
- expr: |-
|
||||
sum by (namespace, cluster) (
|
||||
sum by (namespace, pod, cluster) (
|
||||
max by (namespace, pod, container, cluster) (
|
||||
kube_pod_container_resource_limits{resource="cpu",job="kube-state-metrics"}
|
||||
) * on (namespace, pod, cluster) group_left() max by (namespace, pod, cluster) (
|
||||
kube_pod_status_phase{phase=~"Pending|Running"} == 1
|
||||
)
|
||||
)
|
||||
)
|
||||
record: namespace_cpu:kube_pod_container_resource_limits:sum
|
||||
@@ -0,0 +1,67 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.pod_owner.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-k8s.rules.pod-owner
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: k8s.rules.pod_owner
|
||||
rules:
|
||||
- expr: |-
|
||||
max by (cluster, namespace, workload, pod) (
|
||||
label_replace(
|
||||
label_replace(
|
||||
kube_pod_owner{job="kube-state-metrics", owner_kind="ReplicaSet"},
|
||||
"replicaset", "$1", "owner_name", "(.*)"
|
||||
) * on (replicaset, namespace) group_left(owner_name) topk by (replicaset, namespace) (
|
||||
1, max by (replicaset, namespace, owner_name) (
|
||||
kube_replicaset_owner{job="kube-state-metrics"}
|
||||
)
|
||||
),
|
||||
"workload", "$1", "owner_name", "(.*)"
|
||||
)
|
||||
)
|
||||
labels:
|
||||
workload_type: deployment
|
||||
record: namespace_workload_pod:kube_pod_owner:relabel
|
||||
- expr: |-
|
||||
max by (cluster, namespace, workload, pod) (
|
||||
label_replace(
|
||||
kube_pod_owner{job="kube-state-metrics", owner_kind="DaemonSet"},
|
||||
"workload", "$1", "owner_name", "(.*)"
|
||||
)
|
||||
)
|
||||
labels:
|
||||
workload_type: daemonset
|
||||
record: namespace_workload_pod:kube_pod_owner:relabel
|
||||
- expr: |-
|
||||
max by (cluster, namespace, workload, pod) (
|
||||
label_replace(
|
||||
kube_pod_owner{job="kube-state-metrics", owner_kind="StatefulSet"},
|
||||
"workload", "$1", "owner_name", "(.*)"
|
||||
)
|
||||
)
|
||||
labels:
|
||||
workload_type: statefulset
|
||||
record: namespace_workload_pod:kube_pod_owner:relabel
|
||||
- expr: |-
|
||||
max by (cluster, namespace, workload, pod) (
|
||||
label_replace(
|
||||
kube_pod_owner{job="kube-state-metrics", owner_kind="Job"},
|
||||
"workload", "$1", "owner_name", "(.*)"
|
||||
)
|
||||
)
|
||||
labels:
|
||||
workload_type: job
|
||||
record: namespace_workload_pod:kube_pod_owner:relabel
|
||||
@@ -0,0 +1,24 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-prometheus-general.rules
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kube-prometheus-general.rules
|
||||
rules:
|
||||
- expr: count without(instance, pod, node) (up == 1)
|
||||
record: count:up1
|
||||
- expr: count without(instance, pod, node) (up == 0)
|
||||
record: count:up0
|
||||
@@ -0,0 +1,32 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-prometheus-node-recording.rules
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kube-prometheus-node-recording.rules
|
||||
rules:
|
||||
- expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[3m])) BY (instance)
|
||||
record: instance:node_cpu:rate:sum
|
||||
- expr: sum(rate(node_network_receive_bytes_total[3m])) BY (instance)
|
||||
record: instance:node_network_receive_bytes:rate:sum
|
||||
- expr: sum(rate(node_network_transmit_bytes_total[3m])) BY (instance)
|
||||
record: instance:node_network_transmit_bytes:rate:sum
|
||||
- expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m])) WITHOUT (cpu, mode) / ON(instance) GROUP_LEFT() count(sum(node_cpu_seconds_total) BY (instance, cpu)) BY (instance)
|
||||
record: instance:node_cpu:ratio
|
||||
- expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m]))
|
||||
record: cluster:node_cpu:sum_rate5m
|
||||
- expr: cluster:node_cpu:sum_rate5m / count(sum(node_cpu_seconds_total) BY (instance, cpu))
|
||||
record: cluster:node_cpu:ratio
|
||||
@@ -0,0 +1,68 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kube-state-metrics.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-state-metrics
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kube-state-metrics
|
||||
rules:
|
||||
- alert: KubeStateMetricsListErrors
|
||||
annotations:
|
||||
description: kube-state-metrics is experiencing errors at an elevated rate in list operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricslisterrors
|
||||
summary: kube-state-metrics is experiencing errors in list operations.
|
||||
expr: |-
|
||||
(sum(rate(kube_state_metrics_list_total{job="kube-state-metrics",result="error"}[5m])) by (cluster)
|
||||
/
|
||||
sum(rate(kube_state_metrics_list_total{job="kube-state-metrics"}[5m])) by (cluster))
|
||||
> 0.01
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: KubeStateMetricsWatchErrors
|
||||
annotations:
|
||||
description: kube-state-metrics is experiencing errors at an elevated rate in watch operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricswatcherrors
|
||||
summary: kube-state-metrics is experiencing errors in watch operations.
|
||||
expr: |-
|
||||
(sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics",result="error"}[5m])) by (cluster)
|
||||
/
|
||||
sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics"}[5m])) by (cluster))
|
||||
> 0.01
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: KubeStateMetricsShardingMismatch
|
||||
annotations:
|
||||
description: kube-state-metrics pods are running with different --total-shards configuration, some Kubernetes objects may be exposed multiple times or not exposed at all.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricsshardingmismatch
|
||||
summary: kube-state-metrics sharding is misconfigured.
|
||||
expr: stdvar (kube_state_metrics_total_shards{job="kube-state-metrics"}) by (cluster) != 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: KubeStateMetricsShardsMissing
|
||||
annotations:
|
||||
description: kube-state-metrics shards are missing, some Kubernetes objects are not being exposed.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricsshardsmissing
|
||||
summary: kube-state-metrics shards are missing.
|
||||
expr: |-
|
||||
2^max(kube_state_metrics_total_shards{job="kube-state-metrics"}) by (cluster) - 1
|
||||
-
|
||||
sum( 2 ^ max by (cluster, shard_ordinal) (kube_state_metrics_shard_ordinal{job="kube-state-metrics"}) ) by (cluster)
|
||||
!= 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
@@ -0,0 +1,32 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kubelet.rules.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kubelet.rules
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kubelet.rules
|
||||
rules:
|
||||
- expr: histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (cluster, instance, le) * on (cluster, instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"})
|
||||
labels:
|
||||
quantile: '0.99'
|
||||
record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile
|
||||
- expr: histogram_quantile(0.9, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (cluster, instance, le) * on (cluster, instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"})
|
||||
labels:
|
||||
quantile: '0.9'
|
||||
record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile
|
||||
- expr: histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (cluster, instance, le) * on (cluster, instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"})
|
||||
labels:
|
||||
quantile: '0.5'
|
||||
record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile
|
||||
@@ -0,0 +1,258 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-apps.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kubernetes-apps
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kubernetes-apps
|
||||
rules:
|
||||
- alert: KubePodCrashLooping
|
||||
annotations:
|
||||
description: 'Pod {{ $labels.namespace }}/{{ $labels.pod }} ({{ $labels.container }}) is in waiting state (reason: "CrashLoopBackOff").'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepodcrashlooping
|
||||
summary: Pod is crash looping.
|
||||
expr: max_over_time(kube_pod_container_status_waiting_reason{reason="CrashLoopBackOff", job="kube-state-metrics", namespace=~".*"}[5m]) >= 1
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubePodNotReady
|
||||
annotations:
|
||||
description: Pod {{ $labels.namespace }}/{{ $labels.pod }} has been in a non-ready state for longer than 15 minutes.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepodnotready
|
||||
summary: Pod has been in a non-ready state for more than 15 minutes.
|
||||
expr: |-
|
||||
sum by (namespace, pod, cluster) (
|
||||
max by (namespace, pod, cluster) (
|
||||
kube_pod_status_phase{job="kube-state-metrics", namespace=~".*", phase=~"Pending|Unknown|Failed"}
|
||||
) * on (namespace, pod, cluster) group_left(owner_kind) topk by (namespace, pod, cluster) (
|
||||
1, max by (namespace, pod, owner_kind, cluster) (kube_pod_owner{owner_kind!="Job"})
|
||||
)
|
||||
) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeDeploymentGenerationMismatch
|
||||
annotations:
|
||||
description: Deployment generation for {{ $labels.namespace }}/{{ $labels.deployment }} does not match, this indicates that the Deployment has failed but has not been rolled back.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedeploymentgenerationmismatch
|
||||
summary: Deployment generation mismatch due to possible roll-back
|
||||
expr: |-
|
||||
kube_deployment_status_observed_generation{job="kube-state-metrics", namespace=~".*"}
|
||||
!=
|
||||
kube_deployment_metadata_generation{job="kube-state-metrics", namespace=~".*"}
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeDeploymentReplicasMismatch
|
||||
annotations:
|
||||
description: Deployment {{ $labels.namespace }}/{{ $labels.deployment }} has not matched the expected number of replicas for longer than 15 minutes.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedeploymentreplicasmismatch
|
||||
summary: Deployment has not matched the expected number of replicas.
|
||||
expr: |-
|
||||
(
|
||||
kube_deployment_spec_replicas{job="kube-state-metrics", namespace=~".*"}
|
||||
>
|
||||
kube_deployment_status_replicas_available{job="kube-state-metrics", namespace=~".*"}
|
||||
) and (
|
||||
changes(kube_deployment_status_replicas_updated{job="kube-state-metrics", namespace=~".*"}[10m])
|
||||
==
|
||||
0
|
||||
)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeDeploymentRolloutStuck
|
||||
annotations:
|
||||
description: Rollout of deployment {{ $labels.namespace }}/{{ $labels.deployment }} is not progressing for longer than 15 minutes.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedeploymentrolloutstuck
|
||||
summary: Deployment rollout is not progressing.
|
||||
expr: |-
|
||||
kube_deployment_status_condition{condition="Progressing", status="false",job="kube-state-metrics", namespace=~".*"}
|
||||
!= 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeStatefulSetReplicasMismatch
|
||||
annotations:
|
||||
description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} has not matched the expected number of replicas for longer than 15 minutes.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubestatefulsetreplicasmismatch
|
||||
summary: StatefulSet has not matched the expected number of replicas.
|
||||
expr: |-
|
||||
(
|
||||
kube_statefulset_status_replicas_ready{job="kube-state-metrics", namespace=~".*"}
|
||||
!=
|
||||
kube_statefulset_status_replicas{job="kube-state-metrics", namespace=~".*"}
|
||||
) and (
|
||||
changes(kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~".*"}[10m])
|
||||
==
|
||||
0
|
||||
)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeStatefulSetGenerationMismatch
|
||||
annotations:
|
||||
description: StatefulSet generation for {{ $labels.namespace }}/{{ $labels.statefulset }} does not match, this indicates that the StatefulSet has failed but has not been rolled back.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubestatefulsetgenerationmismatch
|
||||
summary: StatefulSet generation mismatch due to possible roll-back
|
||||
expr: |-
|
||||
kube_statefulset_status_observed_generation{job="kube-state-metrics", namespace=~".*"}
|
||||
!=
|
||||
kube_statefulset_metadata_generation{job="kube-state-metrics", namespace=~".*"}
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeStatefulSetUpdateNotRolledOut
|
||||
annotations:
|
||||
description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} update has not been rolled out.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubestatefulsetupdatenotrolledout
|
||||
summary: StatefulSet update has not been rolled out.
|
||||
expr: |-
|
||||
(
|
||||
max without (revision) (
|
||||
kube_statefulset_status_current_revision{job="kube-state-metrics", namespace=~".*"}
|
||||
unless
|
||||
kube_statefulset_status_update_revision{job="kube-state-metrics", namespace=~".*"}
|
||||
)
|
||||
*
|
||||
(
|
||||
kube_statefulset_replicas{job="kube-state-metrics", namespace=~".*"}
|
||||
!=
|
||||
kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~".*"}
|
||||
)
|
||||
) and (
|
||||
changes(kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~".*"}[5m])
|
||||
==
|
||||
0
|
||||
)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeDaemonSetRolloutStuck
|
||||
annotations:
|
||||
description: DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} has not finished or progressed for at least 15 minutes.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetrolloutstuck
|
||||
summary: DaemonSet rollout is stuck.
|
||||
expr: |-
|
||||
(
|
||||
(
|
||||
kube_daemonset_status_current_number_scheduled{job="kube-state-metrics", namespace=~".*"}
|
||||
!=
|
||||
kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~".*"}
|
||||
) or (
|
||||
kube_daemonset_status_number_misscheduled{job="kube-state-metrics", namespace=~".*"}
|
||||
!=
|
||||
0
|
||||
) or (
|
||||
kube_daemonset_status_updated_number_scheduled{job="kube-state-metrics", namespace=~".*"}
|
||||
!=
|
||||
kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~".*"}
|
||||
) or (
|
||||
kube_daemonset_status_number_available{job="kube-state-metrics", namespace=~".*"}
|
||||
!=
|
||||
kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~".*"}
|
||||
)
|
||||
) and (
|
||||
changes(kube_daemonset_status_updated_number_scheduled{job="kube-state-metrics", namespace=~".*"}[5m])
|
||||
==
|
||||
0
|
||||
)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeContainerWaiting
|
||||
annotations:
|
||||
description: pod/{{ $labels.pod }} in namespace {{ $labels.namespace }} on container {{ $labels.container}} has been in waiting state for longer than 1 hour.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecontainerwaiting
|
||||
summary: Pod container waiting longer than 1 hour
|
||||
expr: sum by (namespace, pod, container, cluster) (kube_pod_container_status_waiting_reason{job="kube-state-metrics", namespace=~".*"}) > 0
|
||||
for: 1h
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeDaemonSetNotScheduled
|
||||
annotations:
|
||||
description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are not scheduled.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetnotscheduled
|
||||
summary: DaemonSet pods are not scheduled.
|
||||
expr: |-
|
||||
kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~".*"}
|
||||
-
|
||||
kube_daemonset_status_current_number_scheduled{job="kube-state-metrics", namespace=~".*"} > 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeDaemonSetMisScheduled
|
||||
annotations:
|
||||
description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are running where they are not supposed to run.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetmisscheduled
|
||||
summary: DaemonSet pods are misscheduled.
|
||||
expr: kube_daemonset_status_number_misscheduled{job="kube-state-metrics", namespace=~".*"} > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeJobNotCompleted
|
||||
annotations:
|
||||
description: Job {{ $labels.namespace }}/{{ $labels.job_name }} is taking more than {{ "43200" | humanizeDuration }} to complete.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubejobnotcompleted
|
||||
summary: Job did not complete in time
|
||||
expr: |-
|
||||
time() - max by (namespace, job_name, cluster) (kube_job_status_start_time{job="kube-state-metrics", namespace=~".*"}
|
||||
and
|
||||
kube_job_status_active{job="kube-state-metrics", namespace=~".*"} > 0) > 43200
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeJobFailed
|
||||
annotations:
|
||||
description: Job {{ $labels.namespace }}/{{ $labels.job_name }} failed to complete. Removing failed job after investigation should clear this alert.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubejobfailed
|
||||
summary: Job failed to complete.
|
||||
expr: kube_job_failed{job="kube-state-metrics", namespace=~".*"} > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeHpaReplicasMismatch
|
||||
annotations:
|
||||
description: HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} has not matched the desired number of replicas for longer than 15 minutes.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubehpareplicasmismatch
|
||||
summary: HPA has not matched desired number of replicas.
|
||||
expr: |-
|
||||
(kube_horizontalpodautoscaler_status_desired_replicas{job="kube-state-metrics", namespace=~".*"}
|
||||
!=
|
||||
kube_horizontalpodautoscaler_status_current_replicas{job="kube-state-metrics", namespace=~".*"})
|
||||
and
|
||||
(kube_horizontalpodautoscaler_status_current_replicas{job="kube-state-metrics", namespace=~".*"}
|
||||
>
|
||||
kube_horizontalpodautoscaler_spec_min_replicas{job="kube-state-metrics", namespace=~".*"})
|
||||
and
|
||||
(kube_horizontalpodautoscaler_status_current_replicas{job="kube-state-metrics", namespace=~".*"}
|
||||
<
|
||||
kube_horizontalpodautoscaler_spec_max_replicas{job="kube-state-metrics", namespace=~".*"})
|
||||
and
|
||||
changes(kube_horizontalpodautoscaler_status_current_replicas{job="kube-state-metrics", namespace=~".*"}[15m]) == 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeHpaMaxedOut
|
||||
annotations:
|
||||
description: HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} has been running at max replicas for longer than 15 minutes.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubehpamaxedout
|
||||
summary: HPA is running at max replicas
|
||||
expr: |-
|
||||
kube_horizontalpodautoscaler_status_current_replicas{job="kube-state-metrics", namespace=~".*"}
|
||||
==
|
||||
kube_horizontalpodautoscaler_spec_max_replicas{job="kube-state-metrics", namespace=~".*"}
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
@@ -0,0 +1,122 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-resources.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kubernetes-resources
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kubernetes-resources
|
||||
rules:
|
||||
- alert: KubeCPUOvercommit
|
||||
annotations:
|
||||
description: Cluster {{ $labels.cluster }} has overcommitted CPU resource requests for Pods by {{ $value }} CPU shares and cannot tolerate node failure.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecpuovercommit
|
||||
summary: Cluster has overcommitted CPU resource requests.
|
||||
expr: |-
|
||||
sum(namespace_cpu:kube_pod_container_resource_requests:sum{job="kube-state-metrics",}) by (cluster) - (sum(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster) - max(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster)) > 0
|
||||
and
|
||||
(sum(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster) - max(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster)) > 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeMemoryOvercommit
|
||||
annotations:
|
||||
description: Cluster {{ $labels.cluster }} has overcommitted memory resource requests for Pods by {{ $value | humanize }} bytes and cannot tolerate node failure.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubememoryovercommit
|
||||
summary: Cluster has overcommitted memory resource requests.
|
||||
expr: |-
|
||||
sum(namespace_memory:kube_pod_container_resource_requests:sum{}) by (cluster) - (sum(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) by (cluster) - max(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) by (cluster)) > 0
|
||||
and
|
||||
(sum(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) by (cluster) - max(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) by (cluster)) > 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeCPUQuotaOvercommit
|
||||
annotations:
|
||||
description: Cluster {{ $labels.cluster }} has overcommitted CPU resource requests for Namespaces.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecpuquotaovercommit
|
||||
summary: Cluster has overcommitted CPU resource requests.
|
||||
expr: |-
|
||||
sum(min without(resource) (kube_resourcequota{job="kube-state-metrics", type="hard", resource=~"(cpu|requests.cpu)"})) by (cluster)
|
||||
/
|
||||
sum(kube_node_status_allocatable{resource="cpu", job="kube-state-metrics"}) by (cluster)
|
||||
> 1.5
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeMemoryQuotaOvercommit
|
||||
annotations:
|
||||
description: Cluster {{ $labels.cluster }} has overcommitted memory resource requests for Namespaces.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubememoryquotaovercommit
|
||||
summary: Cluster has overcommitted memory resource requests.
|
||||
expr: |-
|
||||
sum(min without(resource) (kube_resourcequota{job="kube-state-metrics", type="hard", resource=~"(memory|requests.memory)"})) by (cluster)
|
||||
/
|
||||
sum(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) by (cluster)
|
||||
> 1.5
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeQuotaAlmostFull
|
||||
annotations:
|
||||
description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubequotaalmostfull
|
||||
summary: Namespace quota is going to be full.
|
||||
expr: |-
|
||||
kube_resourcequota{job="kube-state-metrics", type="used"}
|
||||
/ ignoring(instance, job, type)
|
||||
(kube_resourcequota{job="kube-state-metrics", type="hard"} > 0)
|
||||
> 0.9 < 1
|
||||
for: 15m
|
||||
labels:
|
||||
severity: info
|
||||
- alert: KubeQuotaFullyUsed
|
||||
annotations:
|
||||
description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubequotafullyused
|
||||
summary: Namespace quota is fully used.
|
||||
expr: |-
|
||||
kube_resourcequota{job="kube-state-metrics", type="used"}
|
||||
/ ignoring(instance, job, type)
|
||||
(kube_resourcequota{job="kube-state-metrics", type="hard"} > 0)
|
||||
== 1
|
||||
for: 15m
|
||||
labels:
|
||||
severity: info
|
||||
- alert: KubeQuotaExceeded
|
||||
annotations:
|
||||
description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubequotaexceeded
|
||||
summary: Namespace quota has exceeded the limits.
|
||||
expr: |-
|
||||
kube_resourcequota{job="kube-state-metrics", type="used"}
|
||||
/ ignoring(instance, job, type)
|
||||
(kube_resourcequota{job="kube-state-metrics", type="hard"} > 0)
|
||||
> 1
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: CPUThrottlingHigh
|
||||
annotations:
|
||||
description: '{{ $value | humanizePercentage }} throttling of CPU in namespace {{ $labels.namespace }} for container {{ $labels.container }} in pod {{ $labels.pod }}.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/cputhrottlinghigh
|
||||
summary: Processes experience elevated CPU throttling.
|
||||
expr: |-
|
||||
sum(increase(container_cpu_cfs_throttled_periods_total{container!="", }[5m])) by (cluster, container, pod, namespace)
|
||||
/
|
||||
sum(increase(container_cpu_cfs_periods_total{}[5m])) by (cluster, container, pod, namespace)
|
||||
> ( 25 / 100 )
|
||||
for: 15m
|
||||
labels:
|
||||
severity: info
|
||||
@@ -0,0 +1,113 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-storage.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kubernetes-storage
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kubernetes-storage
|
||||
rules:
|
||||
- alert: KubePersistentVolumeFillingUp
|
||||
annotations:
|
||||
description: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} on Cluster {{ $labels.cluster }} is only {{ $value | humanizePercentage }} free.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumefillingup
|
||||
summary: PersistentVolume is filling up.
|
||||
expr: |-
|
||||
(
|
||||
kubelet_volume_stats_available_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"}
|
||||
/
|
||||
kubelet_volume_stats_capacity_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"}
|
||||
) < 0.03
|
||||
and
|
||||
kubelet_volume_stats_used_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"} > 0
|
||||
unless on (cluster, namespace, persistentvolumeclaim)
|
||||
kube_persistentvolumeclaim_access_mode{ access_mode="ReadOnlyMany"} == 1
|
||||
unless on (cluster, namespace, persistentvolumeclaim)
|
||||
kube_persistentvolumeclaim_labels{label_excluded_from_alerts="true"} == 1
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: KubePersistentVolumeFillingUp
|
||||
annotations:
|
||||
description: Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} on Cluster {{ $labels.cluster }} is expected to fill up within four days. Currently {{ $value | humanizePercentage }} is available.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumefillingup
|
||||
summary: PersistentVolume is filling up.
|
||||
expr: |-
|
||||
(
|
||||
kubelet_volume_stats_available_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"}
|
||||
/
|
||||
kubelet_volume_stats_capacity_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"}
|
||||
) < 0.15
|
||||
and
|
||||
kubelet_volume_stats_used_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"} > 0
|
||||
and
|
||||
predict_linear(kubelet_volume_stats_available_bytes{job="kubelet", namespace=~".*", metrics_path="/metrics"}[6h], 4 * 24 * 3600) < 0
|
||||
unless on (cluster, namespace, persistentvolumeclaim)
|
||||
kube_persistentvolumeclaim_access_mode{ access_mode="ReadOnlyMany"} == 1
|
||||
unless on (cluster, namespace, persistentvolumeclaim)
|
||||
kube_persistentvolumeclaim_labels{label_excluded_from_alerts="true"} == 1
|
||||
for: 1h
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubePersistentVolumeInodesFillingUp
|
||||
annotations:
|
||||
description: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} on Cluster {{ $labels.cluster }} only has {{ $value | humanizePercentage }} free inodes.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumeinodesfillingup
|
||||
summary: PersistentVolumeInodes are filling up.
|
||||
expr: |-
|
||||
(
|
||||
kubelet_volume_stats_inodes_free{job="kubelet", namespace=~".*", metrics_path="/metrics"}
|
||||
/
|
||||
kubelet_volume_stats_inodes{job="kubelet", namespace=~".*", metrics_path="/metrics"}
|
||||
) < 0.03
|
||||
and
|
||||
kubelet_volume_stats_inodes_used{job="kubelet", namespace=~".*", metrics_path="/metrics"} > 0
|
||||
unless on (cluster, namespace, persistentvolumeclaim)
|
||||
kube_persistentvolumeclaim_access_mode{ access_mode="ReadOnlyMany"} == 1
|
||||
unless on (cluster, namespace, persistentvolumeclaim)
|
||||
kube_persistentvolumeclaim_labels{label_excluded_from_alerts="true"} == 1
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: KubePersistentVolumeInodesFillingUp
|
||||
annotations:
|
||||
description: Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} on Cluster {{ $labels.cluster }} is expected to run out of inodes within four days. Currently {{ $value | humanizePercentage }} of its inodes are free.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumeinodesfillingup
|
||||
summary: PersistentVolumeInodes are filling up.
|
||||
expr: |-
|
||||
(
|
||||
kubelet_volume_stats_inodes_free{job="kubelet", namespace=~".*", metrics_path="/metrics"}
|
||||
/
|
||||
kubelet_volume_stats_inodes{job="kubelet", namespace=~".*", metrics_path="/metrics"}
|
||||
) < 0.15
|
||||
and
|
||||
kubelet_volume_stats_inodes_used{job="kubelet", namespace=~".*", metrics_path="/metrics"} > 0
|
||||
and
|
||||
predict_linear(kubelet_volume_stats_inodes_free{job="kubelet", namespace=~".*", metrics_path="/metrics"}[6h], 4 * 24 * 3600) < 0
|
||||
unless on (cluster, namespace, persistentvolumeclaim)
|
||||
kube_persistentvolumeclaim_access_mode{ access_mode="ReadOnlyMany"} == 1
|
||||
unless on (cluster, namespace, persistentvolumeclaim)
|
||||
kube_persistentvolumeclaim_labels{label_excluded_from_alerts="true"} == 1
|
||||
for: 1h
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubePersistentVolumeErrors
|
||||
annotations:
|
||||
description: The persistent volume {{ $labels.persistentvolume }} on Cluster {{ $labels.cluster }} has status {{ $labels.phase }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumeerrors
|
||||
summary: PersistentVolume is having issues with provisioning.
|
||||
expr: kube_persistentvolume_status_phase{phase=~"Failed|Pending",job="kube-state-metrics"} > 0
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
@@ -0,0 +1,64 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kubernetes-system-apiserver
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kubernetes-system-apiserver
|
||||
rules:
|
||||
- alert: KubeClientCertificateExpiration
|
||||
annotations:
|
||||
description: A client certificate used to authenticate to kubernetes apiserver is expiring in less than 7.0 days.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration
|
||||
summary: Client certificate is about to expire.
|
||||
expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and on (job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 604800
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeClientCertificateExpiration
|
||||
annotations:
|
||||
description: A client certificate used to authenticate to kubernetes apiserver is expiring in less than 24.0 hours.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration
|
||||
summary: Client certificate is about to expire.
|
||||
expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and on (job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 86400
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: KubeAggregatedAPIErrors
|
||||
annotations:
|
||||
description: Kubernetes aggregated API {{ $labels.name }}/{{ $labels.namespace }} has reported errors. It has appeared unavailable {{ $value | humanize }} times averaged over the past 10m.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeaggregatedapierrors
|
||||
summary: Kubernetes aggregated API has reported errors.
|
||||
expr: sum by (name, namespace, cluster)(increase(aggregator_unavailable_apiservice_total{job="apiserver"}[10m])) > 4
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeAggregatedAPIDown
|
||||
annotations:
|
||||
description: Kubernetes aggregated API {{ $labels.name }}/{{ $labels.namespace }} has been only {{ $value | humanize }}% available over the last 10m.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeaggregatedapidown
|
||||
summary: Kubernetes aggregated API is down.
|
||||
expr: (1 - max by (name, namespace, cluster)(avg_over_time(aggregator_unavailable_apiservice{job="apiserver"}[10m]))) * 100 < 85
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeAPITerminatedRequests
|
||||
annotations:
|
||||
description: The kubernetes apiserver has terminated {{ $value | humanizePercentage }} of its incoming requests.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapiterminatedrequests
|
||||
summary: The kubernetes apiserver has terminated {{ $value | humanizePercentage }} of its incoming requests.
|
||||
expr: sum(rate(apiserver_request_terminations_total{job="apiserver"}[10m])) / ( sum(rate(apiserver_request_total{job="apiserver"}[10m])) + sum(rate(apiserver_request_terminations_total{job="apiserver"}[10m])) ) > 0.20
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
@@ -0,0 +1,29 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system-kube-proxy.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kubernetes-system-kube-proxy
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kubernetes-system-kube-proxy
|
||||
rules:
|
||||
- alert: KubeProxyDown
|
||||
annotations:
|
||||
description: KubeProxy has disappeared from Prometheus target discovery.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeproxydown
|
||||
summary: Target disappeared from Prometheus target discovery.
|
||||
expr: absent(up{job="kube-proxy"} == 1)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
@@ -0,0 +1,140 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kubernetes-system-kubelet
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kubernetes-system-kubelet
|
||||
rules:
|
||||
- alert: KubeNodeNotReady
|
||||
annotations:
|
||||
description: '{{ $labels.node }} has been unready for more than 15 minutes.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodenotready
|
||||
summary: Node is not ready.
|
||||
expr: kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"} == 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeNodeUnreachable
|
||||
annotations:
|
||||
description: '{{ $labels.node }} is unreachable and some workloads may be rescheduled.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodeunreachable
|
||||
summary: Node is unreachable.
|
||||
expr: (kube_node_spec_taint{job="kube-state-metrics",key="node.kubernetes.io/unreachable",effect="NoSchedule"} unless ignoring(key,value) kube_node_spec_taint{job="kube-state-metrics",key=~"ToBeDeletedByClusterAutoscaler|cloud.google.com/impending-node-termination|aws-node-termination-handler/spot-itn"}) == 1
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeletTooManyPods
|
||||
annotations:
|
||||
description: Kubelet '{{ $labels.node }}' is running at {{ $value | humanizePercentage }} of its Pod capacity.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubelettoomanypods
|
||||
summary: Kubelet is running at capacity.
|
||||
expr: |-
|
||||
count by (cluster, node) (
|
||||
(kube_pod_status_phase{job="kube-state-metrics",phase="Running"} == 1) * on (instance,pod,namespace,cluster) group_left(node) topk by (instance,pod,namespace,cluster) (1, kube_pod_info{job="kube-state-metrics"})
|
||||
)
|
||||
/
|
||||
max by (cluster, node) (
|
||||
kube_node_status_capacity{job="kube-state-metrics",resource="pods"} != 1
|
||||
) > 0.95
|
||||
for: 15m
|
||||
labels:
|
||||
severity: info
|
||||
- alert: KubeNodeReadinessFlapping
|
||||
annotations:
|
||||
description: The readiness status of node {{ $labels.node }} has changed {{ $value }} times in the last 15 minutes.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodereadinessflapping
|
||||
summary: Node readiness status is flapping.
|
||||
expr: sum(changes(kube_node_status_condition{job="kube-state-metrics",status="true",condition="Ready"}[15m])) by (cluster, node) > 2
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeletPlegDurationHigh
|
||||
annotations:
|
||||
description: The Kubelet Pod Lifecycle Event Generator has a 99th percentile duration of {{ $value }} seconds on node {{ $labels.node }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletplegdurationhigh
|
||||
summary: Kubelet Pod Lifecycle Event Generator is taking too long to relist.
|
||||
expr: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile{quantile="0.99"} >= 10
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeletPodStartUpLatencyHigh
|
||||
annotations:
|
||||
description: Kubelet Pod startup 99th percentile latency is {{ $value }} seconds on node {{ $labels.node }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletpodstartuplatencyhigh
|
||||
summary: Kubelet Pod startup latency is too high.
|
||||
expr: histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (cluster, instance, le)) * on (cluster, instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"} > 60
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeletClientCertificateExpiration
|
||||
annotations:
|
||||
description: Client certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletclientcertificateexpiration
|
||||
summary: Kubelet client certificate is about to expire.
|
||||
expr: kubelet_certificate_manager_client_ttl_seconds < 604800
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeletClientCertificateExpiration
|
||||
annotations:
|
||||
description: Client certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletclientcertificateexpiration
|
||||
summary: Kubelet client certificate is about to expire.
|
||||
expr: kubelet_certificate_manager_client_ttl_seconds < 86400
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: KubeletServerCertificateExpiration
|
||||
annotations:
|
||||
description: Server certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletservercertificateexpiration
|
||||
summary: Kubelet server certificate is about to expire.
|
||||
expr: kubelet_certificate_manager_server_ttl_seconds < 604800
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeletServerCertificateExpiration
|
||||
annotations:
|
||||
description: Server certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletservercertificateexpiration
|
||||
summary: Kubelet server certificate is about to expire.
|
||||
expr: kubelet_certificate_manager_server_ttl_seconds < 86400
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: KubeletClientCertificateRenewalErrors
|
||||
annotations:
|
||||
description: Kubelet on node {{ $labels.node }} has failed to renew its client certificate ({{ $value | humanize }} errors in the last 5 minutes).
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletclientcertificaterenewalerrors
|
||||
summary: Kubelet has failed to renew its client certificate.
|
||||
expr: increase(kubelet_certificate_manager_client_expiration_renew_errors[5m]) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeletServerCertificateRenewalErrors
|
||||
annotations:
|
||||
description: Kubelet on node {{ $labels.node }} has failed to renew its server certificate ({{ $value | humanize }} errors in the last 5 minutes).
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletservercertificaterenewalerrors
|
||||
summary: Kubelet has failed to renew its server certificate.
|
||||
expr: increase(kubelet_server_expiration_renew_errors[5m]) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeletDown
|
||||
annotations:
|
||||
description: Kubelet has disappeared from Prometheus target discovery.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletdown
|
||||
summary: Target disappeared from Prometheus target discovery.
|
||||
expr: absent(up{job="kubelet", metrics_path="/metrics"} == 1)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
@@ -0,0 +1,42 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/kubernetes-system.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-kubernetes-system
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: kubernetes-system
|
||||
rules:
|
||||
- alert: KubeVersionMismatch
|
||||
annotations:
|
||||
description: There are {{ $value }} different semantic versions of Kubernetes components running.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeversionmismatch
|
||||
summary: Different semantic versions of Kubernetes components running.
|
||||
expr: count by (cluster) (count by (git_version, cluster) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"git_version","$1","git_version","(v[0-9]*.[0-9]*).*"))) > 1
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: KubeClientErrors
|
||||
annotations:
|
||||
description: Kubernetes API server client '{{ $labels.job }}/{{ $labels.instance }}' is experiencing {{ $value | humanizePercentage }} errors.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclienterrors
|
||||
summary: Kubernetes API server client is experiencing errors.
|
||||
expr: |-
|
||||
(sum(rate(rest_client_requests_total{job="apiserver",code=~"5.."}[5m])) by (cluster, instance, job, namespace)
|
||||
/
|
||||
sum(rate(rest_client_requests_total{job="apiserver"}[5m])) by (cluster, instance, job, namespace))
|
||||
> 0.01
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
@@ -0,0 +1,280 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/rules-1.14/prometheus.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: prometheus-community-kube-prometheus
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
groups:
|
||||
- name: prometheus
|
||||
rules:
|
||||
- alert: PrometheusBadConfig
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to reload its configuration.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusbadconfig
|
||||
summary: Failed Prometheus configuration reload.
|
||||
expr: |-
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
max_over_time(prometheus_config_last_reload_successful{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) == 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: PrometheusSDRefreshFailure
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to refresh SD with mechanism {{$labels.mechanism}}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheussdrefreshfailure
|
||||
summary: Failed Prometheus SD refresh.
|
||||
expr: increase(prometheus_sd_refresh_failures_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[10m]) > 0
|
||||
for: 20m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusNotificationQueueRunningFull
|
||||
annotations:
|
||||
description: Alert notification queue of Prometheus {{$labels.namespace}}/{{$labels.pod}} is running full.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusnotificationqueuerunningfull
|
||||
summary: Prometheus alert notification queue predicted to run full in less than 30m.
|
||||
expr: |-
|
||||
# Without min_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
(
|
||||
predict_linear(prometheus_notifications_queue_length{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m], 60 * 30)
|
||||
>
|
||||
min_over_time(prometheus_notifications_queue_capacity{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m])
|
||||
)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusErrorSendingAlertsToSomeAlertmanagers
|
||||
annotations:
|
||||
description: '{{ printf "%.1f" $value }}% errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to Alertmanager {{$labels.alertmanager}}.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuserrorsendingalertstosomealertmanagers
|
||||
summary: Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager.
|
||||
expr: |-
|
||||
(
|
||||
rate(prometheus_notifications_errors_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m])
|
||||
/
|
||||
rate(prometheus_notifications_sent_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m])
|
||||
)
|
||||
* 100
|
||||
> 1
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusNotConnectedToAlertmanagers
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not connected to any Alertmanagers.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusnotconnectedtoalertmanagers
|
||||
summary: Prometheus is not connected to any Alertmanagers.
|
||||
expr: |-
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
max_over_time(prometheus_notifications_alertmanagers_discovered{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) < 1
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusTSDBReloadsFailing
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} reload failures over the last 3h.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustsdbreloadsfailing
|
||||
summary: Prometheus has issues reloading blocks from disk.
|
||||
expr: increase(prometheus_tsdb_reloads_failures_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[3h]) > 0
|
||||
for: 4h
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusTSDBCompactionsFailing
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} compaction failures over the last 3h.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustsdbcompactionsfailing
|
||||
summary: Prometheus has issues compacting blocks.
|
||||
expr: increase(prometheus_tsdb_compactions_failed_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[3h]) > 0
|
||||
for: 4h
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusNotIngestingSamples
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not ingesting samples.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusnotingestingsamples
|
||||
summary: Prometheus is not ingesting samples.
|
||||
expr: |-
|
||||
(
|
||||
rate(prometheus_tsdb_head_samples_appended_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) <= 0
|
||||
and
|
||||
(
|
||||
sum without(scrape_job) (prometheus_target_metadata_cache_entries{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}) > 0
|
||||
or
|
||||
sum without(rule_group) (prometheus_rule_group_rules{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}) > 0
|
||||
)
|
||||
)
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusDuplicateTimestamps
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with different values but duplicated timestamp.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusduplicatetimestamps
|
||||
summary: Prometheus is dropping samples with duplicate timestamps.
|
||||
expr: rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) > 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusOutOfOrderTimestamps
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with timestamps arriving out of order.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusoutofordertimestamps
|
||||
summary: Prometheus drops samples with out-of-order timestamps.
|
||||
expr: rate(prometheus_target_scrapes_sample_out_of_order_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) > 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusRemoteStorageFailures
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} failed to send {{ printf "%.1f" $value }}% of the samples to {{ $labels.remote_name}}:{{ $labels.url }}
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusremotestoragefailures
|
||||
summary: Prometheus fails to send samples to remote storage.
|
||||
expr: |-
|
||||
(
|
||||
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]))
|
||||
/
|
||||
(
|
||||
(rate(prometheus_remote_storage_failed_samples_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]))
|
||||
+
|
||||
(rate(prometheus_remote_storage_succeeded_samples_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) or rate(prometheus_remote_storage_samples_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]))
|
||||
)
|
||||
)
|
||||
* 100
|
||||
> 1
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: PrometheusRemoteWriteBehind
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write is {{ printf "%.1f" $value }}s behind for {{ $labels.remote_name}}:{{ $labels.url }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusremotewritebehind
|
||||
summary: Prometheus remote write is behind.
|
||||
expr: |-
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
(
|
||||
max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m])
|
||||
- ignoring(remote_name, url) group_right
|
||||
max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m])
|
||||
)
|
||||
> 120
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: PrometheusRemoteWriteDesiredShards
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write desired shards calculation wants to run {{ $value }} shards for queue {{ $labels.remote_name}}:{{ $labels.url }}, which is more than the max of {{ printf `prometheus_remote_storage_shards_max{instance="%s",job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}` $labels.instance | query | first | value }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusremotewritedesiredshards
|
||||
summary: Prometheus remote write desired shards calculation wants to run more than configured max shards.
|
||||
expr: |-
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
(
|
||||
max_over_time(prometheus_remote_storage_shards_desired{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m])
|
||||
>
|
||||
max_over_time(prometheus_remote_storage_shards_max{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m])
|
||||
)
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusRuleFailures
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to evaluate {{ printf "%.0f" $value }} rules in the last 5m.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusrulefailures
|
||||
summary: Prometheus is failing rule evaluations.
|
||||
expr: increase(prometheus_rule_evaluation_failures_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: PrometheusMissingRuleEvaluations
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has missed {{ printf "%.0f" $value }} rule group evaluations in the last 5m.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusmissingruleevaluations
|
||||
summary: Prometheus is missing rule evaluations due to slow rule group evaluation.
|
||||
expr: increase(prometheus_rule_group_iterations_missed_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusTargetLimitHit
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has dropped {{ printf "%.0f" $value }} targets because the number of targets exceeded the configured target_limit.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustargetlimithit
|
||||
summary: Prometheus has dropped targets because some scrape configs have exceeded the targets limit.
|
||||
expr: increase(prometheus_target_scrape_pool_exceeded_target_limit_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusLabelLimitHit
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has dropped {{ printf "%.0f" $value }} targets because some samples exceeded the configured label_limit, label_name_length_limit or label_value_length_limit.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuslabellimithit
|
||||
summary: Prometheus has dropped targets because some scrape configs have exceeded the labels limit.
|
||||
expr: increase(prometheus_target_scrape_pool_exceeded_label_limits_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusScrapeBodySizeLimitHit
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed {{ printf "%.0f" $value }} scrapes in the last 5m because some targets exceeded the configured body_size_limit.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusscrapebodysizelimithit
|
||||
summary: Prometheus has dropped some targets that exceeded body size limit.
|
||||
expr: increase(prometheus_target_scrapes_exceeded_body_size_limit_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusScrapeSampleLimitHit
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed {{ printf "%.0f" $value }} scrapes in the last 5m because some targets exceeded the configured sample_limit.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheusscrapesamplelimithit
|
||||
summary: Prometheus has failed scrapes that have exceeded the configured sample limit.
|
||||
expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) > 0
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusTargetSyncFailure
|
||||
annotations:
|
||||
description: '{{ printf "%.0f" $value }} targets in Prometheus {{$labels.namespace}}/{{$labels.pod}} have failed to sync because invalid configuration was supplied.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheustargetsyncfailure
|
||||
summary: Prometheus has failed to sync targets.
|
||||
expr: increase(prometheus_target_sync_failed_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[30m]) > 0
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
- alert: PrometheusHighQueryLoad
|
||||
annotations:
|
||||
description: Prometheus {{$labels.namespace}}/{{$labels.pod}} query API has less than 20% available capacity in its query engine for the last 15 minutes.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheushighqueryload
|
||||
summary: Prometheus is reaching its maximum capacity serving concurrent requests.
|
||||
expr: avg_over_time(prometheus_engine_queries{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) / max_over_time(prometheus_engine_queries_concurrent_max{job="prometheus-community-kube-prometheus",namespace="vynil-monitor"}[5m]) > 0.8
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: PrometheusErrorSendingAlertsToAnyAlertmanager
|
||||
annotations:
|
||||
description: '{{ printf "%.1f" $value }}% minimum errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to any Alertmanager.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuserrorsendingalertstoanyalertmanager
|
||||
summary: Prometheus encounters more than 3% errors sending alerts to any Alertmanager.
|
||||
expr: |-
|
||||
min without (alertmanager) (
|
||||
rate(prometheus_notifications_errors_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor",alertmanager!~``}[5m])
|
||||
/
|
||||
rate(prometheus_notifications_sent_total{job="prometheus-community-kube-prometheus",namespace="vynil-monitor",alertmanager!~``}[5m])
|
||||
)
|
||||
* 100
|
||||
> 3
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
@@ -0,0 +1,67 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/prometheus.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: Prometheus
|
||||
metadata:
|
||||
name: prometheus-community-kube-prometheus
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-prometheus
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
image: "quay.io/prometheus/prometheus:v2.49.1"
|
||||
version: v2.49.1
|
||||
externalUrl: http://prometheus-community-kube-prometheus.vynil-monitor:9090
|
||||
paused: false
|
||||
replicas: 1
|
||||
shards: 1
|
||||
logLevel: info
|
||||
logFormat: logfmt
|
||||
listenLocal: false
|
||||
enableAdminAPI: false
|
||||
retention: "10d"
|
||||
tsdb:
|
||||
outOfOrderTimeWindow: 0s
|
||||
walCompression: true
|
||||
routePrefix: "/"
|
||||
serviceAccountName: prometheus-community-kube-prometheus
|
||||
serviceMonitorSelector:
|
||||
matchLabels:
|
||||
release: "prometheus-community"
|
||||
|
||||
serviceMonitorNamespaceSelector: {}
|
||||
podMonitorSelector:
|
||||
matchLabels:
|
||||
release: "prometheus-community"
|
||||
|
||||
podMonitorNamespaceSelector: {}
|
||||
probeSelector:
|
||||
matchLabels:
|
||||
release: "prometheus-community"
|
||||
|
||||
probeNamespaceSelector: {}
|
||||
securityContext:
|
||||
fsGroup: 2000
|
||||
runAsGroup: 2000
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
ruleNamespaceSelector: {}
|
||||
ruleSelector:
|
||||
matchLabels:
|
||||
release: "prometheus-community"
|
||||
|
||||
scrapeConfigSelector:
|
||||
matchLabels:
|
||||
release: "prometheus-community"
|
||||
|
||||
scrapeConfigNamespaceSelector: {}
|
||||
portName: http-web
|
||||
hostNetwork: false
|
||||
@@ -0,0 +1,29 @@
|
||||
# Source: kube-prometheus-stack/templates/exporters/core-dns/servicemonitor.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: prometheus-community-kube-coredns
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-coredns
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
jobLabel: jobLabel
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kube-prometheus-stack-coredns
|
||||
release: "prometheus-community"
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "kube-system"
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
@@ -0,0 +1,29 @@
|
||||
# Source: kube-prometheus-stack/templates/exporters/kube-proxy/servicemonitor.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: prometheus-community-kube-kube-proxy
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-kube-proxy
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
jobLabel: jobLabel
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kube-prometheus-stack-kube-proxy
|
||||
release: "prometheus-community"
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "kube-system"
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
@@ -0,0 +1,95 @@
|
||||
# Source: kube-prometheus-stack/templates/exporters/kubelet/servicemonitor.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: prometheus-community-kube-kubelet
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-kubelet
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
|
||||
attachMetadata:
|
||||
node: false
|
||||
endpoints:
|
||||
- port: https-metrics
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecureSkipVerify: true
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
honorLabels: true
|
||||
honorTimestamps: true
|
||||
relabelings:
|
||||
- action: replace
|
||||
sourceLabels:
|
||||
- __metrics_path__
|
||||
targetLabel: metrics_path
|
||||
- port: https-metrics
|
||||
scheme: https
|
||||
path: /metrics/cadvisor
|
||||
honorLabels: true
|
||||
honorTimestamps: true
|
||||
tlsConfig:
|
||||
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecureSkipVerify: true
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
metricRelabelings:
|
||||
- action: drop
|
||||
regex: container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)
|
||||
sourceLabels:
|
||||
- __name__
|
||||
- action: drop
|
||||
regex: container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)
|
||||
sourceLabels:
|
||||
- __name__
|
||||
- action: drop
|
||||
regex: container_memory_(mapped_file|swap)
|
||||
sourceLabels:
|
||||
- __name__
|
||||
- action: drop
|
||||
regex: container_(file_descriptors|tasks_state|threads_max)
|
||||
sourceLabels:
|
||||
- __name__
|
||||
- action: drop
|
||||
regex: container_spec.*
|
||||
sourceLabels:
|
||||
- __name__
|
||||
- action: drop
|
||||
regex: .+;
|
||||
sourceLabels:
|
||||
- id
|
||||
- pod
|
||||
relabelings:
|
||||
- action: replace
|
||||
sourceLabels:
|
||||
- __metrics_path__
|
||||
targetLabel: metrics_path
|
||||
- port: https-metrics
|
||||
scheme: https
|
||||
path: /metrics/probes
|
||||
honorLabels: true
|
||||
honorTimestamps: true
|
||||
tlsConfig:
|
||||
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecureSkipVerify: true
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabelings:
|
||||
- action: replace
|
||||
sourceLabels:
|
||||
- __metrics_path__
|
||||
targetLabel: metrics_path
|
||||
jobLabel: k8s-app
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- kube-system
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: kubelet
|
||||
k8s-app: kubelet
|
||||
@@ -0,0 +1,32 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/servicemonitor.yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: prometheus-community-kube-prometheus
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
app: kube-prometheus-stack-prometheus
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
spec:
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kube-prometheus-stack-prometheus
|
||||
release: "prometheus-community"
|
||||
self-monitor: "true"
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "vynil-monitor"
|
||||
endpoints:
|
||||
- port: http-web
|
||||
path: "/metrics"
|
||||
- port: reloader-web
|
||||
scheme: http
|
||||
path: "/metrics"
|
||||
75
monitor/prometheus/presentation.tf
Normal file
75
monitor/prometheus/presentation.tf
Normal file
@@ -0,0 +1,75 @@
|
||||
locals {
|
||||
dns-name = "${var.sub-domain}.${var.domain-name}"
|
||||
dns-names = [local.dns-name]
|
||||
app-name = var.component == var.instance ? var.instance : format("%s-%s", var.component, var.instance)
|
||||
icon = "icon.svg"
|
||||
request_headers = {
|
||||
"Content-Type" = "application/json"
|
||||
Authorization = "Bearer ${data.kubernetes_secret_v1.authentik.data["AUTHENTIK_BOOTSTRAP_TOKEN"]}"
|
||||
}
|
||||
service = {
|
||||
"name" = "prometheus-community-kube-prometheus"
|
||||
"port" = {
|
||||
"number" = 9090
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module "ingress" {
|
||||
source = "git::https://git.solidite.fr/vynil/kydah-modules.git//ingress"
|
||||
component = ""
|
||||
instance = var.instance
|
||||
namespace = var.namespace
|
||||
issuer = var.issuer
|
||||
ingress_class = var.ingress-class
|
||||
labels = local.common-labels
|
||||
dns_names = local.dns-names
|
||||
middlewares = ["forward-${local.app-name}"]
|
||||
services = [local.service]
|
||||
providers = {
|
||||
kubectl = kubectl
|
||||
}
|
||||
}
|
||||
|
||||
module "application" {
|
||||
source = "git::https://git.solidite.fr/vynil/kydah-modules.git//application"
|
||||
component = var.component
|
||||
instance = var.instance
|
||||
app_group = var.app-group
|
||||
dns_name = local.dns-name
|
||||
icon = local.icon
|
||||
protocol_provider = module.forward.provider-id
|
||||
providers = {
|
||||
authentik = authentik
|
||||
}
|
||||
}
|
||||
|
||||
provider "restapi" {
|
||||
uri = "http://authentik.${var.domain}-auth.svc/api/v3/"
|
||||
headers = local.request_headers
|
||||
create_method = "PATCH"
|
||||
update_method = "PATCH"
|
||||
destroy_method = "PATCH"
|
||||
write_returns_object = true
|
||||
id_attribute = "name"
|
||||
}
|
||||
|
||||
module "forward" {
|
||||
source = "git::https://git.solidite.fr/vynil/kydah-modules.git//forward"
|
||||
component = var.component
|
||||
instance = var.instance
|
||||
domain = var.domain
|
||||
namespace = var.namespace
|
||||
ingress_class = var.ingress-class
|
||||
labels = local.common-labels
|
||||
dns_names = local.dns-names
|
||||
service = local.service
|
||||
icon = local.icon
|
||||
request_headers = local.request_headers
|
||||
providers = {
|
||||
restapi = restapi
|
||||
http = http
|
||||
kubectl = kubectl
|
||||
authentik = authentik
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus-community-kube-prometheus
|
||||
labels:
|
||||
app: kube-prometheus-stack-prometheus
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus-community-kube-prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus-community-kube-prometheus
|
||||
namespace: vynil-monitor
|
||||
@@ -0,0 +1,33 @@
|
||||
# Source: kube-prometheus-stack/templates/prometheus/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus-community-kube-prometheus
|
||||
labels:
|
||||
app: kube-prometheus-stack-prometheus
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
rules:
|
||||
# This permission are not in the kube-prometheus repo
|
||||
# they're grabbed from https://github.com/prometheus/prometheus/blob/master/documentation/examples/rbac-setup.yml
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- nodes/metrics
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- "networking.k8s.io"
|
||||
resources:
|
||||
- ingresses
|
||||
verbs: ["get", "list", "watch"]
|
||||
- nonResourceURLs: ["/metrics", "/metrics/cadvisor"]
|
||||
verbs: ["get"]
|
||||
45
monitor/prometheus/ressources_no_ns.tf
Normal file
45
monitor/prometheus/ressources_no_ns.tf
Normal file
@@ -0,0 +1,45 @@
|
||||
|
||||
# first loop through resources in ids_prio[0]
|
||||
resource "kustomization_resource" "pre_no_ns" {
|
||||
for_each = data.kustomization_overlay.data_no_ns.ids_prio[0]
|
||||
|
||||
manifest = (
|
||||
contains(["_/Secret"], regex("(?P<group_kind>.*/.*)/.*/.*", each.value)["group_kind"])
|
||||
? sensitive(data.kustomization_overlay.data_no_ns.manifests[each.value])
|
||||
: data.kustomization_overlay.data_no_ns.manifests[each.value]
|
||||
)
|
||||
}
|
||||
|
||||
# then loop through resources in ids_prio[1]
|
||||
# and set an explicit depends_on on kustomization_resource.pre
|
||||
# wait 2 minutes for any deployment or daemonset to become ready
|
||||
resource "kustomization_resource" "main_no_ns" {
|
||||
for_each = data.kustomization_overlay.data_no_ns.ids_prio[1]
|
||||
|
||||
manifest = (
|
||||
contains(["_/Secret"], regex("(?P<group_kind>.*/.*)/.*/.*", each.value)["group_kind"])
|
||||
? sensitive(data.kustomization_overlay.data_no_ns.manifests[each.value])
|
||||
: data.kustomization_overlay.data_no_ns.manifests[each.value]
|
||||
)
|
||||
wait = true
|
||||
timeouts {
|
||||
create = "5m"
|
||||
update = "5m"
|
||||
}
|
||||
|
||||
depends_on = [kustomization_resource.pre_no_ns]
|
||||
}
|
||||
|
||||
# finally, loop through resources in ids_prio[2]
|
||||
# and set an explicit depends_on on kustomization_resource.main
|
||||
resource "kustomization_resource" "post_no_ns" {
|
||||
for_each = data.kustomization_overlay.data_no_ns.ids_prio[2]
|
||||
|
||||
manifest = (
|
||||
contains(["_/Secret"], regex("(?P<group_kind>.*/.*)/.*/.*", each.value)["group_kind"])
|
||||
? sensitive(data.kustomization_overlay.data_no_ns.manifests[each.value])
|
||||
: data.kustomization_overlay.data_no_ns.manifests[each.value]
|
||||
)
|
||||
|
||||
depends_on = [kustomization_resource.main_no_ns]
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,38 @@
|
||||
# Source: kube-prometheus-stack/templates/grafana/configmaps-datasources.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: prometheus-community-kube-grafana-datasource
|
||||
namespace: vynil-monitor
|
||||
labels:
|
||||
grafana_datasource: "1"
|
||||
app: kube-prometheus-stack-grafana
|
||||
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/instance: prometheus-community
|
||||
app.kubernetes.io/version: "56.0.2"
|
||||
app.kubernetes.io/part-of: kube-prometheus-stack
|
||||
chart: kube-prometheus-stack-56.0.2
|
||||
release: "prometheus-community"
|
||||
heritage: "Helm"
|
||||
data:
|
||||
datasource.yaml: |-
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
uid: prometheus
|
||||
url: http://prometheus-community-kube-prometheus.vynil-monitor:9090/
|
||||
access: proxy
|
||||
isDefault: true
|
||||
jsonData:
|
||||
httpMethod: POST
|
||||
timeInterval: 30s
|
||||
- name: Alertmanager
|
||||
type: alertmanager
|
||||
uid: alertmanager
|
||||
url: http://prometheus-community-kube-alertmanager.vynil-monitor:9093/
|
||||
access: proxy
|
||||
jsonData:
|
||||
handleGrafanaManagedAlerts: false
|
||||
implementation: prometheus
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user