Initial release

This commit is contained in:
2024-03-19 13:13:53 +01:00
commit 451fdb09fc
391 changed files with 184309 additions and 0 deletions

View File

@@ -0,0 +1,80 @@
# Source: node-problem-detector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: npd-node-problem-detector
labels:
app.kubernetes.io/name: node-problem-detector
helm.sh/chart: node-problem-detector-2.3.12
app.kubernetes.io/instance: npd
app.kubernetes.io/managed-by: Helm
namespace: vynil-tools
spec:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/name: node-problem-detector
app.kubernetes.io/instance: npd
app: node-problem-detector
template:
metadata:
labels:
app.kubernetes.io/name: node-problem-detector
app.kubernetes.io/instance: npd
app: node-problem-detector
annotations:
checksum/config: 871f3a539e0646ffe0c886c946c28fcd1ec9018ad31a53aafc732b71edee895b
spec:
serviceAccountName: npd-node-problem-detector
hostNetwork: false
hostPID: false
terminationGracePeriodSeconds: 30
priorityClassName: "system-node-critical"
containers:
- name: node-problem-detector
image: "registry.k8s.io/node-problem-detector/node-problem-detector:v0.8.15"
imagePullPolicy: "IfNotPresent"
command:
- "/bin/sh"
- "-c"
- "exec /node-problem-detector --logtostderr --config.system-log-monitor=/config/kernel-monitor.json,/config/docker-monitor.json --prometheus-address=0.0.0.0 --prometheus-port=20257 --k8s-exporter-heartbeat-period=5m0s "
securityContext:
privileged: true
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: log
mountPath: /var/log/
readOnly: true
- name: localtime
mountPath: /etc/localtime
readOnly: true
- name: custom-config
mountPath: /custom-config
readOnly: true
ports:
- containerPort: 20257
name: exporter
resources:
{}
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- name: log
hostPath:
path: /var/log/
- name: localtime
hostPath:
path: /etc/localtime
type: FileOrCreate
- name: custom-config
configMap:
name: npd-node-problem-detector-custom-config
defaultMode: 493

View File

@@ -0,0 +1,57 @@
locals {
common-labels = {
"vynil.solidite.fr/owner-name" = var.instance
"vynil.solidite.fr/owner-namespace" = var.namespace
"vynil.solidite.fr/owner-category" = var.category
"vynil.solidite.fr/owner-component" = var.component
"app.kubernetes.io/managed-by" = "vynil"
"app.kubernetes.io/name" = var.component
"app.kubernetes.io/instance" = var.instance
}
rb-patch = <<-EOF
- op: replace
path: /subjects/0/namespace
value: "${var.namespace}"
EOF
}
data "kustomization_overlay" "data" {
namespace = var.namespace
common_labels = local.common-labels
resources = [for file in fileset(path.module, "*.yaml"): file if file != "index.yaml" && length(regexall("ClusterRole",file))<1 ]
images {
name = "registry.k8s.io/node-problem-detector/node-problem-detector"
new_name = "${var.images.node-problem-detector.registry}/${var.images.node-problem-detector.repository}"
new_tag = "${var.images.node-problem-detector.tag}"
}
patches {
target {
kind = "DaemonSet"
name = "npd-node-problem-detector"
}
patch = <<-EOF
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: npd-node-problem-detector
spec:
template:
spec:
containers:
- name: node-problem-detector
imagePullPolicy: "${var.images.node-problem-detector.pull_policy}"
EOF
}
}
data "kustomization_overlay" "data_no_ns" {
common_labels = local.common-labels
resources = [for file in fileset(path.module, "*.yaml"): file if file != "index.yaml" && length(regexall("ClusterRole",file))>0 ]
patches {
target {
kind = "ClusterRoleBinding"
name = "npd-node-problem-detector"
}
patch = local.rb-patch
}
}

View File

@@ -0,0 +1,50 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: monitor
metadata:
name: node-problem-detector
description: node-problem-detector aims to make various node problems visible to the upstream layers in the cluster management stack.
options:
images:
default:
node-problem-detector:
pull_policy: IfNotPresent
registry: registry.k8s.io
repository: node-problem-detector/node-problem-detector
tag: v0.8.15
examples:
- node-problem-detector:
pull_policy: IfNotPresent
registry: registry.k8s.io
repository: node-problem-detector/node-problem-detector
tag: v0.8.15
properties:
node-problem-detector:
default:
pull_policy: IfNotPresent
registry: registry.k8s.io
repository: node-problem-detector/node-problem-detector
tag: v0.8.15
properties:
pull_policy:
default: IfNotPresent
enum:
- Always
- Never
- IfNotPresent
type: string
registry:
default: registry.k8s.io
type: string
repository:
default: node-problem-detector/node-problem-detector
type: string
tag:
default: v0.8.15
type: string
type: object
type: object
dependencies: []
providers: null
tfaddtype: null

View File

@@ -0,0 +1,18 @@
# Source: node-problem-detector/templates/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: npd-node-problem-detector
labels:
app.kubernetes.io/name: node-problem-detector
helm.sh/chart: node-problem-detector-2.3.12
app.kubernetes.io/instance: npd
app.kubernetes.io/managed-by: Helm
subjects:
- kind: ServiceAccount
name: npd-node-problem-detector
namespace: vynil-tools
roleRef:
kind: ClusterRole
name: npd-node-problem-detector
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,31 @@
# Source: node-problem-detector/templates/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: npd-node-problem-detector
labels:
app.kubernetes.io/name: node-problem-detector
helm.sh/chart: node-problem-detector-2.3.12
app.kubernetes.io/instance: npd
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update

View File

@@ -0,0 +1,45 @@
# first loop through resources in ids_prio[0]
resource "kustomization_resource" "pre_no_ns" {
for_each = data.kustomization_overlay.data_no_ns.ids_prio[0]
manifest = (
contains(["_/Secret"], regex("(?P<group_kind>.*/.*)/.*/.*", each.value)["group_kind"])
? sensitive(data.kustomization_overlay.data_no_ns.manifests[each.value])
: data.kustomization_overlay.data_no_ns.manifests[each.value]
)
}
# then loop through resources in ids_prio[1]
# and set an explicit depends_on on kustomization_resource.pre
# wait 2 minutes for any deployment or daemonset to become ready
resource "kustomization_resource" "main_no_ns" {
for_each = data.kustomization_overlay.data_no_ns.ids_prio[1]
manifest = (
contains(["_/Secret"], regex("(?P<group_kind>.*/.*)/.*/.*", each.value)["group_kind"])
? sensitive(data.kustomization_overlay.data_no_ns.manifests[each.value])
: data.kustomization_overlay.data_no_ns.manifests[each.value]
)
wait = true
timeouts {
create = "5m"
update = "5m"
}
depends_on = [kustomization_resource.pre_no_ns]
}
# finally, loop through resources in ids_prio[2]
# and set an explicit depends_on on kustomization_resource.main
resource "kustomization_resource" "post_no_ns" {
for_each = data.kustomization_overlay.data_no_ns.ids_prio[2]
manifest = (
contains(["_/Secret"], regex("(?P<group_kind>.*/.*)/.*/.*", each.value)["group_kind"])
? sensitive(data.kustomization_overlay.data_no_ns.manifests[each.value])
: data.kustomization_overlay.data_no_ns.manifests[each.value]
)
depends_on = [kustomization_resource.main_no_ns]
}

View File

@@ -0,0 +1,13 @@
# Source: node-problem-detector/templates/custom-config-configmap.yaml
apiVersion: v1
data:
{}
kind: ConfigMap
metadata:
name: npd-node-problem-detector-custom-config
labels:
app.kubernetes.io/name: node-problem-detector
helm.sh/chart: node-problem-detector-2.3.12
app.kubernetes.io/instance: npd
app.kubernetes.io/managed-by: Helm
namespace: vynil-tools

View File

@@ -0,0 +1,12 @@
---
# Source: node-problem-detector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: npd-node-problem-detector
labels:
app.kubernetes.io/name: node-problem-detector
helm.sh/chart: node-problem-detector-2.3.12
app.kubernetes.io/instance: npd
app.kubernetes.io/managed-by: Helm
namespace: vynil-tools