Files
domain-incoming/share/authentik/postgresql.tf
2023-08-06 21:07:18 +02:00

199 lines
6.3 KiB
HCL

locals {
pg-labels = merge(local.common-labels, {
"app.kubernetes.io/component" = "pg"
})
pool-labels = merge(local.common-labels, {
"app.kubernetes.io/component" = "pg-pool"
})
postgres-labels = merge(local.common-labels, {
"app.kubernetes.io/component" = "postgresql"
})
}
resource "kubectl_manifest" "authentik_postgresql" {
yaml_body = <<-EOF
apiVersion: "acid.zalan.do/v1"
kind: "postgresql"
metadata:
name: "${var.instance}-${var.component}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.postgres-labels)}
spec:
databases:
${var.component}: "${var.component}"
numberOfInstances: ${var.postgres.replicas}
podAnnotations:
"k8up.io/backupcommand": "pg_dump -U postgres -d ${var.component} --clean"
"k8up.io/file-extension": ".sql"
postgresql:
version: "${var.postgres.version}"
teamId: "${var.instance}"
users:
${var.component}:
- "superuser"
- "createdb"
volume:
size: "${var.postgres.storage}"
EOF
}
// Since each authentik worker create a new connection to the DB
// lots of logs are created mesuring in GBs of junk
// So a dayly cleanup make sense
resource "kubectl_manifest" "authentik_cleanup_logs_script" {
yaml_body = <<-EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: "${var.instance}-${var.component}-cleanlogs"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
data:
"clean.sh": |-
#!/bin/ash
grep log /pgdata/pgroot/data/postgresql.conf|grep conn
echo "$(date '+%T') - Enforcing configuration"
sed -i "s/^log_connections.*/log_connections = 'off'/;s/^log_disconnections.*/log_disconnections = 'off'/" /pgdata/pgroot/data/postgresql.conf
grep log /pgdata/pgroot/data/postgresql.conf|grep conn
for i in /pgdata/pgroot/pg_log/*csv;do echo "$(date '+%T') - Cleaning $i";sed -i '/connection/d' "$i";done
df -h /pgdata/pgroot
EOF
}
resource "kubectl_manifest" "authentik_cleanup_logs_job" {
yaml_body = <<-EOF
apiVersion: batch/v1
kind: CronJob
metadata:
name: "${var.instance}-${var.component}-cleanlogs"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
concurrencyPolicy: Forbid
failedJobsHistoryLimit: 1
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- image: "${var.postgres.cleanlogs.image}"
imagePullPolicy: IfNotPresent
name: cleanlogs
command: ["/bin/ash"]
args: ["/script/clean.sh"]
volumeMounts:
- mountPath: /pgdata
name: pgdata
- mountPath: /script
name: script
securityContext:
fsGroup: 100
runAsGroup: 100
runAsUser: 101
volumes:
- name: script
configMap:
name: ${kubectl_manifest.authentik_cleanup_logs_script.name}
- name: pgdata
persistentVolumeClaim:
claimName: pgdata-${var.instance}-${var.component}-0
schedule: "${var.postgres.cleanlogs.schedule}"
successfulJobsHistoryLimit: 3
EOF
}
resource "kubectl_manifest" "prj_pre_migrate_pg" {
yaml_body = <<-EOF
apiVersion: batch/v1
kind: Job
metadata:
name: "${var.instance}-remove-zalando-extensions"
namespace: "${var.namespace}"
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: clean
image: docker.io/postgres:15.3-bookworm
imagePullPolicy: IfNotPresent
env:
- name: USERNAME
valueFrom:
secretKeyRef:
key: username
name: postgres.${var.instance}-${var.component}.credentials.postgresql.acid.zalan.do
- name: PASSWORD
valueFrom:
secretKeyRef:
key: password
name: postgres.${var.instance}-${var.component}.credentials.postgresql.acid.zalan.do
- name: DBURL
value: "${var.instance}-${var.component}"
- name: DBNAME
value: "${var.component}"
command:
- /bin/bash
- "-c"
- "echo -ne 'drop view if exists metric_helpers.pg_stat_statements;\ndrop function if exists metric_helpers.pg_stat_statements;\nDROP EXTENSION IF EXISTS pg_stat_statements;\nDROP EXTENSION IF EXISTS pg_stat_kcache;\nDROP EXTENSION IF EXISTS set_user;\n'| PGPASSWORD=\"$PASSWORD\" psql -U $USERNAME -d $DBNAME -h $DBURL"
EOF
}
resource "kubectl_manifest" "prj_pg" {
depends_on = [kubectl_manifest.prj_pre_migrate_pg]
yaml_body = <<-EOF
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: "${var.instance}-${var.component}-pg"
namespace: "${var.namespace}"
labels: ${jsonencode(local.pg-labels)}
spec:
instances: ${var.postgres.replicas}
storage:
size: "${var.postgres.storage}"
bootstrap:
initdb:
database: ${var.component}
owner: ${var.component}
import:
type: microservice
databases:
- ${var.component}
source:
externalCluster: "${var.instance}-${var.component}"
externalClusters:
- name: "${var.instance}-${var.component}"
connectionParameters:
host: "${var.instance}-${var.component}"
user: postgres
dbname: postgres
sslmode: require
password:
name: "postgres.${var.instance}-${var.component}.credentials.postgresql.acid.zalan.do"
key: password
EOF
}
resource "kubectl_manifest" "prj_pg_pool" {
depends_on = [kubectl_manifest.prj_pg]
yaml_body = <<-EOF
apiVersion: postgresql.cnpg.io/v1
kind: Pooler
metadata:
name: "${var.instance}-${var.component}-pool"
namespace: "${var.namespace}"
labels: ${jsonencode(local.pool-labels)}
spec:
cluster:
name: "${var.instance}-${var.component}-pg"
instances: 1
type: rw
pgbouncer:
poolMode: session
parameters:
max_client_conn: "1000"
default_pool_size: "10"
EOF
}