This commit is contained in:
2024-05-15 10:16:29 +02:00
parent 61a06511a6
commit 39e32d3418
24 changed files with 70 additions and 702 deletions

View File

@@ -0,0 +1,17 @@
const DOMAIN = config.domain;
fn check_domain() {
assert(have_namespace(`${global::DOMAIN}`), `There is no ${global::DOMAIN} namespace`);
}
fn check_authentik() {
assert(have_namespace(`${global::DOMAIN}-auth`), `There is no ${global::DOMAIN}-auth namespace`);
assert(have_install(`${global::DOMAIN}-auth`, "authentik"), `No authentik installation in ${global::DOMAIN}-auth`);
assert(have_secret(`${global::DOMAIN}-auth`, "authentik"), `No authentik secret in ${global::DOMAIN}-auth`);
}
fn check_authentik_forward() {
assert(have_install(`${global::DOMAIN}-auth`, "authentik-forward"), `No authentik-forward installation in ${global::DOMAIN}-auth`);
}
fn pre_check() {
check_domain();
check_authentik();
check_authentik_forward();
}

View File

@@ -95,6 +95,7 @@ options:
volume:
accessMode: ReadWriteOnce
class: ''
maxSize: 100Gi
size: 20Gi
type: Filesystem
description: Configure this app storage
@@ -102,6 +103,7 @@ options:
- volume:
accessMode: ReadWriteOnce
class: ''
maxSize: 100Gi
size: 20Gi
type: Filesystem
properties:
@@ -109,6 +111,7 @@ options:
default:
accessMode: ReadWriteOnce
class: ''
maxSize: 100Gi
size: 20Gi
type: Filesystem
properties:
@@ -122,6 +125,9 @@ options:
class:
default: ''
type: string
maxSize:
default: 100Gi
type: string
size:
default: 20Gi
type: string

View File

@@ -12,6 +12,7 @@ locals {
}:{})
}
resource "kubectl_manifest" "pvc" {
ignore_fields = ["spec.resources.requests.storage"]
yaml_body = <<-EOF
apiVersion: v1
kind: PersistentVolumeClaim
@@ -19,6 +20,8 @@ resource "kubectl_manifest" "pvc" {
name: "${var.component}-${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common_labels)}
annotations:
resize.kubesphere.io/storage_limit: "${var.storage.volume.maxSize}"
spec: ${jsonencode(local.pvc_spec)}
EOF
}

13
apps/dbgate/check.rhai Normal file
View File

@@ -0,0 +1,13 @@
const DOMAIN = config.domain;
fn check_domain() {
assert(have_namespace(`${global::DOMAIN}`), `There is no ${global::DOMAIN} namespace`);
}
fn check_authentik() {
assert(have_namespace(`${global::DOMAIN}-auth`), `There is no ${global::DOMAIN}-auth namespace`);
assert(have_install(`${global::DOMAIN}-auth`, "authentik"), `No authentik installation in ${global::DOMAIN}-auth`);
assert(have_secret(`${global::DOMAIN}-auth`, "authentik"), `No authentik secret in ${global::DOMAIN}-auth`);
}
fn pre_check() {
check_domain();
check_authentik();
}

View File

@@ -176,6 +176,7 @@ options:
volume:
accessMode: ReadWriteOnce
class: ''
maxSize: 10Gi
size: 1Gi
type: Filesystem
description: Configure this app storage
@@ -183,6 +184,7 @@ options:
- volume:
accessMode: ReadWriteOnce
class: ''
maxSize: 10Gi
size: 1Gi
type: Filesystem
properties:
@@ -190,6 +192,7 @@ options:
default:
accessMode: ReadWriteOnce
class: ''
maxSize: 10Gi
size: 1Gi
type: Filesystem
properties:
@@ -203,6 +206,9 @@ options:
class:
default: ''
type: string
maxSize:
default: 10Gi
type: string
size:
default: 1Gi
type: string

View File

@@ -12,6 +12,7 @@ locals {
}:{})
}
resource "kubectl_manifest" "pvc" {
ignore_fields = ["spec.resources.requests.storage"]
yaml_body = <<-EOF
apiVersion: v1
kind: PersistentVolumeClaim
@@ -19,6 +20,8 @@ resource "kubectl_manifest" "pvc" {
name: "${var.component}-${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common_labels)}
annotations:
resize.kubesphere.io/storage_limit: "${var.storage.volume.maxSize}"
spec: ${jsonencode(local.pvc_spec)}
EOF
}

View File

@@ -406,6 +406,7 @@ options:
volume:
accessMode: ReadWriteOnce
class: ''
maxSize: 100Gi
size: 10Gi
type: Filesystem
description: Configure this app storage
@@ -417,6 +418,7 @@ options:
volume:
accessMode: ReadWriteOnce
class: ''
maxSize: 100Gi
size: 10Gi
type: Filesystem
properties:
@@ -440,6 +442,7 @@ options:
default:
accessMode: ReadWriteOnce
class: ''
maxSize: 100Gi
size: 10Gi
type: Filesystem
properties:
@@ -453,6 +456,9 @@ options:
class:
default: ''
type: string
maxSize:
default: 100Gi
type: string
size:
default: 10Gi
type: string

View File

@@ -12,6 +12,7 @@ locals {
}:{})
}
resource "kubectl_manifest" "pvc" {
ignore_fields = ["spec.resources.requests.storage"]
yaml_body = <<-EOF
apiVersion: v1
kind: PersistentVolumeClaim
@@ -20,6 +21,7 @@ resource "kubectl_manifest" "pvc" {
namespace: "${var.namespace}"
annotations:
k8up.io/backup: "true"
resize.kubesphere.io/storage_limit: "${var.storage.volume.maxSize}"
labels: ${jsonencode(local.common_labels)}
spec: ${jsonencode(local.pvc_spec)}
EOF

View File

@@ -164,6 +164,7 @@ data "kustomization_overlay" "data" {
name: gitea-shared-storage
annotations:
k8up.io/backup: "true"
resize.kubesphere.io/storage_limit: "${var.storage.volume.maxSize}"
spec: ${jsonencode(local.pvc_spec)}
EOF
}

View File

@@ -423,6 +423,7 @@ options:
volume:
accessMode: ReadWriteOnce
class: ''
maxSize: 100Gi
size: 20Gi
type: Filesystem
description: Configure this app storage
@@ -434,6 +435,7 @@ options:
volume:
accessMode: ReadWriteOnce
class: ''
maxSize: 100Gi
size: 20Gi
type: Filesystem
properties:
@@ -457,6 +459,7 @@ options:
default:
accessMode: ReadWriteOnce
class: ''
maxSize: 100Gi
size: 20Gi
type: Filesystem
properties:
@@ -470,6 +473,9 @@ options:
class:
default: ''
type: string
maxSize:
default: 100Gi
type: string
size:
default: 20Gi
type: string

View File

@@ -68,6 +68,7 @@ data "kustomization_overlay" "data" {
name: grafana
annotations:
k8up.io/backup: "true"
resize.kubesphere.io/storage_limit: "${var.storage.volume.maxSize}"
spec: ${jsonencode(local.pvc_spec)}
EOF
}

View File

@@ -138,6 +138,7 @@ options:
volume:
accessMode: ReadWriteOnce
class: ''
maxSize: 100Gi
size: 10Gi
type: Filesystem
description: Configure this app storage
@@ -145,6 +146,7 @@ options:
- volume:
accessMode: ReadWriteOnce
class: ''
maxSize: 100Gi
size: 10Gi
type: Filesystem
properties:
@@ -152,6 +154,7 @@ options:
default:
accessMode: ReadWriteOnce
class: ''
maxSize: 100Gi
size: 10Gi
type: Filesystem
properties:
@@ -165,6 +168,9 @@ options:
class:
default: ''
type: string
maxSize:
default: 100Gi
type: string
size:
default: 10Gi
type: string

View File

@@ -1,124 +0,0 @@
# Source: loki/templates/single-binary/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: loki
namespace: vynil-monitor
labels:
helm.sh/chart: loki-5.43.3
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.9.4"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: single-binary
app.kubernetes.io/part-of: memberlist
spec:
replicas: 1
podManagementPolicy: Parallel
updateStrategy:
rollingUpdate:
partition: 0
serviceName: loki-headless
revisionHistoryLimit: 10
persistentVolumeClaimRetentionPolicy:
whenDeleted: Delete
whenScaled: Delete
selector:
matchLabels:
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/component: single-binary
template:
metadata:
annotations:
checksum/config: d690504a8775204dfc634b78b0b50513488be16b49ce8e6973f3f267c3070cb0
labels:
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/component: single-binary
app.kubernetes.io/part-of: memberlist
spec:
serviceAccountName: loki
automountServiceAccountToken: true
enableServiceLinks: true
securityContext:
fsGroup: 10001
runAsGroup: 10001
runAsNonRoot: true
runAsUser: 10001
terminationGracePeriodSeconds: 30
containers:
- name: loki
image: docker.io/grafana/loki:2.9.4
imagePullPolicy: IfNotPresent
args:
- -config.file=/etc/loki/config/config.yaml
- -target=all
ports:
- name: http-metrics
containerPort: 3100
protocol: TCP
- name: grpc
containerPort: 9095
protocol: TCP
- name: http-memberlist
containerPort: 7946
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
readinessProbe:
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 30
timeoutSeconds: 1
volumeMounts:
- name: tmp
mountPath: /tmp
- name: config
mountPath: /etc/loki/config
- name: runtime-config
mountPath: /etc/loki/runtime-config
- name: storage
mountPath: /var/loki
resources:
{}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/component: single-binary
topologyKey: kubernetes.io/hostname
volumes:
- name: tmp
emptyDir: {}
- name: config
configMap:
name: loki
items:
- key: "config.yaml"
path: "config.yaml"
- name: runtime-config
configMap:
name: loki-runtime
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: storage
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "17Gi"

View File

@@ -1,91 +0,0 @@
resource "kubectl_manifest" "datasource" {
yaml_body = <<-EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: loki-datasource
namespace: "${var.namespace}"
labels: ${jsonencode(merge(local.common_labels, {"grafana_datasource" = "1"}))}
data:
loki-datasource.yaml: |-
apiVersion: 1
datasources:
- name: Loki
type: loki
access: proxy
url: "http://loki.${var.namespace}.svc:3100"
version: 1
isDefault: false
jsonData:
{}
EOF
}
resource "kubectl_manifest" "config" {
yaml_body = <<-EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: loki
namespace: "${var.namespace}"
labels: ${jsonencode(local.common_labels)}
data:
config.yaml: |
auth_enabled: false
common:
compactor_address: 'loki'
path_prefix: /var/loki
replication_factor: 1
storage:
filesystem:
chunks_directory: /var/loki/chunks
rules_directory: /var/loki/rules
frontend:
scheduler_address: ""
frontend_worker:
scheduler_address: ""
index_gateway:
mode: ring
limits_config:
max_cache_freshness_per_query: 10m
reject_old_samples: true
reject_old_samples_max_age: 168h
split_queries_by_interval: 15m
memberlist:
join_members:
- loki-memberlist
query_range:
align_queries_with_step: true
ruler:
storage:
type: local
local:
directory: /tmp/rules
rule_path: /tmp/scratch
alertmanager_url: http://${var.alertmanager}:9093
ring:
kvstore:
store: inmemory
enable_api: true
runtime_config:
file: /etc/loki/runtime-config/runtime-config.yaml
schema_config:
configs:
- from: "2022-01-11"
index:
period: 24h
prefix: loki_index_
object_store: filesystem
schema: v12
store: boltdb-shipper
server:
grpc_listen_port: 9095
http_listen_port: 3100
storage_config:
hedging:
at: 250ms
max_per_second: 20
up_to: 3
tracing:
enabled: false
EOF
}

View File

@@ -1,77 +0,0 @@
locals {
common_labels = {
"vynil.solidite.fr/owner-name" = var.instance
"vynil.solidite.fr/owner-namespace" = var.namespace
"vynil.solidite.fr/owner-category" = var.category
"vynil.solidite.fr/owner-component" = var.component
"app.kubernetes.io/managed-by" = "vynil"
"app.kubernetes.io/instance" = var.instance
}
pvc_spec = merge({
"accessModes" = [var.storage.volume.accessMode]
"volumeMode" = var.storage.volume.type
"resources" = {
"requests" = {
"storage" = "${var.storage.volume.size}"
}
}
}, var.storage.volume.class != "" ?{
"storageClassName" = var.storage.volume.class
}:{})
rb-patch = <<-EOF
- op: replace
path: /subjects/0/namespace
value: "${var.namespace}"
EOF
}
data "kustomization_overlay" "data" {
common_labels = local.common_labels
namespace = var.namespace
resources = [for file in fileset(path.module, "*.yaml"): file if file != "index.yaml" && length(regexall("ClusterRole",file))<1]
images {
name = "docker.io/grafana/loki"
new_name = "${var.images.loki.registry}/${var.images.loki.repository}"
new_tag = "${var.images.loki.tag}"
}
patches {
target {
kind = "ServiceMonitor"
name = "loki"
}
patch = <<-EOF
- op: replace
path: /spec/endpoints/0/relabelings/0/replacement
value: "${var.namespace}/$1"
EOF
}
patches {
target {
kind = "StatefulSet"
name = "loki"
}
patch = <<-EOF
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: loki
spec:
replicas: 1
template:
spec:
containers:
- name: loki
imagePullPolicy: ${var.images.loki.pull_policy}
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: storage
annotations:
k8up.io/backup: "true"
spec: ${jsonencode(local.pvc_spec)}
EOF
}
}

View File

@@ -1,106 +0,0 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: monitor
metadata:
name: loki
description: Loki is a horizontally scalable, highly available, multi-tenant log aggregation system.
options:
alertmanager:
default: alertmanager-alertmanager
examples:
- alertmanager-alertmanager
type: string
storage:
default:
volume:
accessMode: ReadWriteOnce
class: ''
size: 10Gi
type: Filesystem
description: Configure this app storage
examples:
- volume:
accessMode: ReadWriteOnce
class: ''
size: 10Gi
type: Filesystem
properties:
volume:
default:
accessMode: ReadWriteOnce
class: ''
size: 10Gi
type: Filesystem
properties:
accessMode:
default: ReadWriteOnce
enum:
- ReadWriteOnce
- ReadOnlyMany
- ReadWriteMany
type: string
class:
default: ''
type: string
size:
default: 10Gi
type: string
type:
default: Filesystem
enum:
- Filesystem
- Block
type: string
type: object
type: object
images:
default:
loki:
pull_policy: IfNotPresent
registry: docker.io
repository: grafana/loki
tag: 2.9.3
examples:
- loki:
pull_policy: IfNotPresent
registry: docker.io
repository: grafana/loki
tag: 2.9.3
properties:
loki:
default:
pull_policy: IfNotPresent
registry: docker.io
repository: grafana/loki
tag: 2.9.3
properties:
pull_policy:
default: IfNotPresent
enum:
- Always
- Never
- IfNotPresent
type: string
registry:
default: docker.io
type: string
repository:
default: grafana/loki
type: string
tag:
default: 2.9.3
type: string
type: object
type: object
dependencies: []
providers:
kubernetes: true
authentik: null
kubectl: true
postgresql: null
mysql: null
restapi: null
http: null
gitea: null
tfaddtype: null

View File

@@ -1,65 +0,0 @@
# Source: loki/templates/monitoring/loki-alerts.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
labels:
helm.sh/chart: loki-5.43.3
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.9.4"
app.kubernetes.io/managed-by: Helm
name: loki-loki-alerts
namespace: vynil-monitor
spec:
groups:
- name: loki_alerts
rules:
- alert: LokiRequestErrors
annotations:
message: |
{{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}% errors.
expr: |
100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[2m])) by (namespace, job, route)
/
sum(rate(loki_request_duration_seconds_count[2m])) by (namespace, job, route)
> 10
for: 15m
labels:
severity: critical
- alert: LokiRequestPanics
annotations:
message: |
{{ $labels.job }} is experiencing {{ printf "%.2f" $value }}% increase of panics.
expr: |
sum(increase(loki_panic_total[10m])) by (namespace, job) > 0
labels:
severity: critical
- alert: LokiRequestLatency
annotations:
message: |
{{ $labels.job }} {{ $labels.route }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency.
expr: |
namespace_job_route:loki_request_duration_seconds:99quantile{route!~"(?i).*tail.*"} > 1
for: 15m
labels:
severity: critical
- alert: LokiTooManyCompactorsRunning
annotations:
message: |
{{ $labels.cluster }} {{ $labels.namespace }} has had {{ printf "%.0f" $value }} compactors running for more than 5m. Only one compactor should run at a time.
expr: |
sum(loki_boltdb_shipper_compactor_running) by (namespace, cluster) > 1
for: 5m
labels:
severity: warning
- name: loki_canaries_alerts
rules:
- alert: LokiCanaryLatency
annotations:
message: |
{{ $labels.job }} is experiencing {{ printf "%.2f" $value }}s 99th percentile latency.
expr: |
histogram_quantile(0.99, sum(rate(loki_canary_response_latency_seconds_bucket[5m])) by (le, namespace, job)) > 5
for: 15m
labels:
severity: warning

View File

@@ -1,98 +0,0 @@
# Source: loki/templates/monitoring/loki-rules.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
labels:
helm.sh/chart: loki-5.43.3
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.9.4"
app.kubernetes.io/managed-by: Helm
name: loki-loki-rules
namespace: vynil-monitor
spec:
groups:
- name: loki_rules
rules:
- expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m]))
by (le, job))
labels:
cluster: loki
record: job:loki_request_duration_seconds:99quantile
- expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m]))
by (le, job))
labels:
cluster: loki
record: job:loki_request_duration_seconds:50quantile
- expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (job) / sum(rate(loki_request_duration_seconds_count[1m]))
by (job)
labels:
cluster: loki
record: job:loki_request_duration_seconds:avg
- expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job)
labels:
cluster: loki
record: job:loki_request_duration_seconds_bucket:sum_rate
- expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (job)
labels:
cluster: loki
record: job:loki_request_duration_seconds_sum:sum_rate
- expr: sum(rate(loki_request_duration_seconds_count[1m])) by (job)
labels:
cluster: loki
record: job:loki_request_duration_seconds_count:sum_rate
- expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m]))
by (le, job, route))
labels:
cluster: loki
record: job_route:loki_request_duration_seconds:99quantile
- expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m]))
by (le, job, route))
labels:
cluster: loki
record: job_route:loki_request_duration_seconds:50quantile
- expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (job, route) / sum(rate(loki_request_duration_seconds_count[1m]))
by (job, route)
labels:
cluster: loki
record: job_route:loki_request_duration_seconds:avg
- expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job, route)
labels:
cluster: loki
record: job_route:loki_request_duration_seconds_bucket:sum_rate
- expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (job, route)
labels:
cluster: loki
record: job_route:loki_request_duration_seconds_sum:sum_rate
- expr: sum(rate(loki_request_duration_seconds_count[1m])) by (job, route)
labels:
cluster: loki
record: job_route:loki_request_duration_seconds_count:sum_rate
- expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m]))
by (le, namespace, job, route))
labels:
cluster: loki
record: namespace_job_route:loki_request_duration_seconds:99quantile
- expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m]))
by (le, namespace, job, route))
labels:
cluster: loki
record: namespace_job_route:loki_request_duration_seconds:50quantile
- expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (namespace, job, route)
/ sum(rate(loki_request_duration_seconds_count[1m])) by (namespace, job, route)
labels:
cluster: loki
record: namespace_job_route:loki_request_duration_seconds:avg
- expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, namespace, job,
route)
labels:
cluster: loki
record: namespace_job_route:loki_request_duration_seconds_bucket:sum_rate
- expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (namespace, job, route)
labels:
cluster: loki
record: namespace_job_route:loki_request_duration_seconds_sum:sum_rate
- expr: sum(rate(loki_request_duration_seconds_count[1m])) by (namespace, job, route)
labels:
cluster: loki
record: namespace_job_route:loki_request_duration_seconds_count:sum_rate

View File

@@ -1,35 +0,0 @@
# Source: loki/templates/monitoring/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: loki
namespace: vynil-monitor
labels:
helm.sh/chart: loki-5.43.3
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.9.4"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
matchExpressions:
- key: prometheus.io/service-monitor
operator: NotIn
values:
- "false"
endpoints:
- port: http-metrics
path: /metrics
interval: 15s
relabelings:
- sourceLabels: [job]
action: replace
replacement: "vynil-monitor/$1"
targetLabel: job
- action: replace
replacement: "loki"
targetLabel: cluster
scheme: http

View File

@@ -1,15 +0,0 @@
# Source: loki/templates/runtime-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: loki-runtime
namespace: vynil-monitor
labels:
helm.sh/chart: loki-5.43.3
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.9.4"
app.kubernetes.io/managed-by: Helm
data:
runtime-config.yaml: |
{}

View File

@@ -1,14 +0,0 @@
---
# Source: loki/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: loki
namespace: vynil-monitor
labels:
helm.sh/chart: loki-5.43.3
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.9.4"
app.kubernetes.io/managed-by: Helm
automountServiceAccountToken: true

View File

@@ -1,25 +0,0 @@
# Source: loki/templates/single-binary/service-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: loki-headless
namespace: vynil-monitor
labels:
helm.sh/chart: loki-5.43.3
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.9.4"
app.kubernetes.io/managed-by: Helm
variant: headless
prometheus.io/service-monitor: "false"
annotations:
spec:
clusterIP: None
ports:
- name: http-metrics
port: 3100
targetPort: http-metrics
protocol: TCP
selector:
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki

View File

@@ -1,24 +0,0 @@
# Source: loki/templates/service-memberlist.yaml
apiVersion: v1
kind: Service
metadata:
name: loki-memberlist
namespace: vynil-monitor
labels:
helm.sh/chart: loki-5.43.3
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.9.4"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp
port: 7946
targetPort: http-memberlist
protocol: TCP
selector:
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/part-of: memberlist

View File

@@ -1,28 +0,0 @@
# Source: loki/templates/single-binary/service.yaml
apiVersion: v1
kind: Service
metadata:
name: loki
namespace: vynil-monitor
labels:
helm.sh/chart: loki-5.43.3
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/version: "2.9.4"
app.kubernetes.io/managed-by: Helm
annotations:
spec:
type: ClusterIP
ports:
- name: http-metrics
port: 3100
targetPort: http-metrics
protocol: TCP
- name: grpc
port: 9095
targetPort: grpc
protocol: TCP
selector:
app.kubernetes.io/name: loki
app.kubernetes.io/instance: loki
app.kubernetes.io/component: single-binary