first commit

This commit is contained in:
2023-07-14 11:51:07 +02:00
commit 284dc650c4
101 changed files with 8629 additions and 0 deletions

View File

@@ -0,0 +1,20 @@
resource "kubectl_manifest" "code-server-config" {
yaml_body = <<-EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: "${var.component}-${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
data:
config.yml: |
auth: none
autostart.sh: |
#!/bin/bash
kubectl config set-cluster default --server=https://$${KUBERNETES_SERVICE_HOST}:$${KUBERNETES_SERVICE_PORT} --certificate-authority=/run/secrets/kubernetes.io/serviceaccount/ca.crt
kubectl config set-credentials default --token=$(cat /run/secrets/kubernetes.io/serviceaccount/token)
kubectl config set-context default --cluster=default --user=default
kubectl config use-context default
[ -e /home/coder/.bashrc ] || cp /etc/skel/.bashrc /home/coder/.bashrc
EOF
}

22
apps/code-server/datas.tf Normal file
View File

@@ -0,0 +1,22 @@
locals {
common-labels = {
"vynil.solidite.fr/owner-name" = var.instance
"vynil.solidite.fr/owner-namespace" = var.namespace
"vynil.solidite.fr/owner-category" = var.category
"vynil.solidite.fr/owner-component" = var.component
"app.kubernetes.io/managed-by" = "vynil"
"app.kubernetes.io/name" = var.component
"app.kubernetes.io/instance" = var.instance
}
}
data "kubernetes_secret_v1" "authentik" {
metadata {
name = "authentik"
namespace = "${var.domain}-auth"
}
}
data "kustomization_overlay" "data" {
resources = []
}

109
apps/code-server/deploy.tf Normal file
View File

@@ -0,0 +1,109 @@
resource "kubectl_manifest" "deploy" {
yaml_body = <<-EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: "${var.component}-${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
replicas: 1
hostname: "${var.component}-${var.instance}"
subdomain: "${var.domain-name}"
selector:
matchLabels: ${jsonencode(local.common-labels)}
template:
metadata:
labels: ${jsonencode(local.common-labels)}
spec:
securityContext:
fsGroup: 1000
runAsGroup: 1000
capabilities:
add:
- SETGID
- SETUID
- SYS_CHROOT
hostname: "${var.component}-${var.instance}"
containers:
- name: code-server
securityContext:
fsGroup: 1000
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
privileged: true
env:
- name: TZ
value: "${var.timezone}"
- name: ENTRYPOINTD
value: /usr/local/startup
- name: PORT
value: "8080"
- name: CODE_SERVER_CONFIG
value: /etc/code-server/config.yml
image: "${var.images.codeserver.registry}/${var.images.codeserver.repository}:${var.images.codeserver.tag}"
imagePullPolicy: "${var.images.codeserver.pullPolicy}"
ports:
- containerPort: 8080
name: http
protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
path: /
port: http
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /
port: http
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
volumeMounts:
- name: config
mountPath: /etc/code-server/config.yml
subPath: config.yml
- name: startup
mountPath: /usr/local/startup/autostart.sh
subPath: autostart.sh
- name: home
mountPath: /home/coder
- name: run
mountPath: /run
restartPolicy: Always
securityContext:
fsGroup: 1000
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
serviceAccount: "${var.component}-${var.instance}"
serviceAccountName: "${var.component}-${var.instance}"
volumes:
- name: config
configMap:
defaultMode: 0420
name: "${var.component}-${var.instance}"
items:
- key: config.yml
path: config.yml
- name: startup
configMap:
defaultMode: 0755
name: "${var.component}-${var.instance}"
items:
- key: autostart.sh
path: autostart.sh
- name: home
persistentVolumeClaim:
claimName: "${var.component}-${var.instance}"
- name: run
emptyDir: {}
EOF
}

122
apps/code-server/forward.tf Normal file
View File

@@ -0,0 +1,122 @@
locals {
authentik-token = data.kubernetes_secret_v1.authentik.data["AUTHENTIK_BOOTSTRAP_TOKEN"]
request_headers = {
"Content-Type" = "application/json"
Authorization = "Bearer ${local.authentik-token}"
}
forward-outpost-providers = jsondecode(data.http.get_forward_outpost.response_body).results[0].providers
forward-outpost-pk = jsondecode(data.http.get_forward_outpost.response_body).results[0].pk
app-name = var.component == var.instance ? var.instance : format("%s-%s", var.component, var.instance)
app-icon = "dashboard/statics/icons/favicon-96x96.png"
main-group = format("%s-users", local.app-name)
sub-groups = []
external-url = format("https://%s", local.dns-names[0])
access-token-validity = "hours=10" // ;minutes=10
}
data "authentik_flow" "default-authorization-flow" {
depends_on = [authentik_group.prj_users]
slug = "default-provider-authorization-implicit-consent"
}
resource "authentik_provider_proxy" "prj_forward" {
name = local.app-name
external_host = local.external-url
authorization_flow = data.authentik_flow.default-authorization-flow.id
mode = "forward_single"
access_token_validity = local.access-token-validity
}
resource "authentik_application" "prj_application" {
name = local.app-name
slug = local.app-name
protocol_provider = authentik_provider_proxy.prj_forward.id
meta_launch_url = local.external-url
meta_icon = format("%s/%s", local.external-url, local.app-icon)
}
resource "authentik_group" "prj_users" {
name = local.main-group
}
resource "authentik_group" "subgroup" {
count = length(local.sub-groups)
name = format("%s-%s", local.app-name, local.sub-groups[count.index])
parent = authentik_group.prj_users.id
}
data "authentik_group" "vynil-admin" {
depends_on = [authentik_group.prj_users] # fake dependency so it is not evaluated at plan stage
name = "vynil-forward-admins"
}
resource "authentik_policy_binding" "prj_access_users" {
target = authentik_application.prj_application.uuid
group = authentik_group.prj_users.id
order = 0
}
resource "authentik_policy_binding" "prj_access_vynil" {
target = authentik_application.prj_application.uuid
group = data.authentik_group.vynil-admin.id
order = 1
}
data "http" "get_forward_outpost" {
depends_on = [authentik_provider_proxy.prj_forward]
url = "http://authentik.${var.domain}-auth.svc/api/v3/outposts/instances/?name__iexact=forward"
method = "GET"
request_headers = local.request_headers
lifecycle {
postcondition {
condition = contains([200], self.status_code)
error_message = "Status code invalid"
}
}
}
provider "restapi" {
uri = "http://authentik.${var.domain}-auth.svc/api/v3/"
headers = local.request_headers
create_method = "PATCH"
update_method = "PATCH"
destroy_method = "PATCH"
write_returns_object = true
id_attribute = "name"
}
resource "restapi_object" "forward_outpost_binding" {
path = "/outposts/instances/${local.forward-outpost-pk}/"
data = jsonencode({
name = "forward"
providers = contains(local.forward-outpost-providers, authentik_provider_proxy.prj_forward.id) ? local.forward-outpost-providers : concat(local.forward-outpost-providers, [authentik_provider_proxy.prj_forward.id])
})
}
resource "kubectl_manifest" "prj_middleware" {
yaml_body = <<-EOF
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: "forward-${local.app-name}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
forwardAuth:
address: http://ak-outpost-forward.${var.domain}-auth.svc:9000/outpost.goauthentik.io/auth/traefik
trustForwardHeader: true
authResponseHeaders:
- X-authentik-username
# - X-authentik-groups
# - X-authentik-email
# - X-authentik-name
# - X-authentik-uid
# - X-authentik-jwt
# - X-authentik-meta-jwks
# - X-authentik-meta-outpost
# - X-authentik-meta-provider
# - X-authentik-meta-app
# - X-authentik-meta-version
EOF
}

130
apps/code-server/index.yaml Normal file
View File

@@ -0,0 +1,130 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: apps
metadata:
name: code-server
description: null
options:
sub-domain:
default: code
examples:
- code
type: string
issuer:
default: letsencrypt-prod
examples:
- letsencrypt-prod
type: string
admin:
default:
cluster: false
namespace: false
examples:
- cluster: false
namespace: false
properties:
cluster:
default: false
type: boolean
namespace:
default: false
type: boolean
type: object
ingress-class:
default: traefik
examples:
- traefik
type: string
images:
default:
codeserver:
pullPolicy: IfNotPresent
registry: docker.io
repository: sebt3/code-server
tag: 4.13
examples:
- codeserver:
pullPolicy: IfNotPresent
registry: docker.io
repository: sebt3/code-server
tag: 4.13
properties:
codeserver:
default:
pullPolicy: IfNotPresent
registry: docker.io
repository: sebt3/code-server
tag: 4.13
properties:
pullPolicy:
default: IfNotPresent
enum:
- Always
- Never
- IfNotPresent
type: string
registry:
default: docker.io
type: string
repository:
default: sebt3/code-server
type: string
tag:
default: 4.13
type: number
type: object
type: object
domain:
default: your-company
examples:
- your-company
type: string
timezone:
default: Europe/Paris
examples:
- Europe/Paris
type: string
storage:
default:
accessMode: ReadWriteOnce
size: 20Gi
type: Filesystem
examples:
- accessMode: ReadWriteOnce
size: 20Gi
type: Filesystem
properties:
accessMode:
default: ReadWriteOnce
enum:
- ReadWriteOnce
- ReadOnlyMany
- ReadWriteMany
type: string
size:
default: 20Gi
type: string
type:
default: Filesystem
enum:
- Filesystem
- block
type: string
type: object
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
dependencies:
- dist: null
category: share
component: authentik-forward
providers:
kubernetes: true
authentik: true
kubectl: true
postgresql: null
restapi: true
http: true

View File

@@ -0,0 +1,76 @@
locals {
dns-names = ["${var.instance}.${var.sub-domain}.${var.domain-name}"]
middlewares = ["${var.instance}-https", "forward-${local.app-name}"]
service = {
"name" = "${var.component}-${var.instance}"
"port" = {
"number" = 80
}
}
rules = [ for v in local.dns-names : {
"host" = "${v}"
"http" = {
"paths" = [{
"backend" = {
"service" = local.service
}
"path" = "/"
"pathType" = "Prefix"
}]
}
}]
}
resource "kubectl_manifest" "prj_certificate" {
yaml_body = <<-EOF
apiVersion: "cert-manager.io/v1"
kind: "Certificate"
metadata:
name: "${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
secretName: "${var.instance}-cert"
dnsNames: ${jsonencode(local.dns-names)}
issuerRef:
name: "${var.issuer}"
kind: "ClusterIssuer"
group: "cert-manager.io"
EOF
}
resource "kubectl_manifest" "prj_https_redirect" {
yaml_body = <<-EOF
apiVersion: "traefik.containo.us/v1alpha1"
kind: "Middleware"
metadata:
name: "${var.instance}-https"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
redirectScheme:
scheme: "https"
permanent: true
EOF
}
resource "kubectl_manifest" "prj_ingress" {
force_conflicts = true
yaml_body = <<-EOF
apiVersion: "networking.k8s.io/v1"
kind: "Ingress"
metadata:
name: "${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
annotations:
"traefik.ingress.kubernetes.io/router.middlewares": "${join(",", [for m in local.middlewares : format("%s-%s@kubernetescrd", var.namespace, m)])}"
spec:
ingressClassName: "${var.ingress-class}"
rules: ${jsonencode(local.rules)}
tls:
- hosts: ${jsonencode(local.dns-names)}
secretName: "${var.instance}-cert"
EOF
}

17
apps/code-server/pvc.tf Normal file
View File

@@ -0,0 +1,17 @@
resource "kubectl_manifest" "pvc" {
yaml_body = <<-EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: "${var.component}-${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
accessModes:
- "${var.storage.accessMode}"
resources:
requests:
storage: "${var.storage.size}"
volumeMode: "${var.storage.type}"
EOF
}

78
apps/code-server/rbac.tf Normal file
View File

@@ -0,0 +1,78 @@
resource "kubectl_manifest" "sa" {
yaml_body = <<-EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: "${var.component}-${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
EOF
}
resource "kubectl_manifest" "role" {
count = var.admin.namespace?1:0
yaml_body = <<-EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "${var.component}-${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
rules:
- apiGroups: ['*']
resources: ['*']
verbs: ['*']
EOF
}
resource "kubectl_manifest" "rb" {
count = var.admin.namespace?1:0
yaml_body = <<-EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "${var.component}-${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
namespace: "${var.namespace}"
name: "${var.component}-${var.instance}"
subjects:
- kind: ServiceAccount
name: "${var.component}-${var.instance}"
namespace: "${var.namespace}"
EOF
}
resource "kubectl_manifest" "clusterrole" {
count = var.admin.cluster?1:0
yaml_body = <<-EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "${var.component}-${var.namespace}-${var.instance}"
labels: ${jsonencode(local.common-labels)}
rules:
- apiGroups: ['*']
resources: ['*']
verbs: ['*']
EOF
}
resource "kubectl_manifest" "crb" {
count = var.admin.cluster?1:0
yaml_body = <<-EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: "${var.component}-${var.namespace}-${var.instance}"
labels: ${jsonencode(local.common-labels)}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "${var.component}-${var.namespace}-${var.instance}"
subjects:
- kind: ServiceAccount
name: "${var.component}-${var.instance}"
namespace: "${var.namespace}"
EOF
}

18
apps/code-server/svc.tf Normal file
View File

@@ -0,0 +1,18 @@
resource "kubectl_manifest" "service" {
yaml_body = <<-EOF
apiVersion: v1
kind: Service
metadata:
name: "${var.component}-${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
type: ClusterIP
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
selector: ${jsonencode(local.common-labels)}
EOF
}

View File

@@ -0,0 +1,69 @@
locals {
sorted-group-names = reverse(distinct(sort([
for grp in var.user-groups: grp.name
])))
sorted-groups = flatten([
for name in local.sorted-group-names: [
for grp in var.user-groups:
grp if grp.name == name
]
])
}
data "authentik_group" "vynil-admin" {
name = "vynil-ldap-admins"
}
resource "authentik_group" "groups" {
count = length(local.sorted-groups)
name = local.sorted-groups[count.index].name
}
data "authentik_group" "readed_groups" {
depends_on = [ authentik_group.groups ]
count = length(local.sorted-groups)
name = local.sorted-groups[count.index].name
}
resource "authentik_application" "dolibarr_application_ldap" {
name = "${var.component}-${var.instance}-ldap"
slug = "${var.component}-${var.instance}-ldap"
protocol_provider = authentik_provider_ldap.dolibarr_provider_ldap.id
meta_launch_url = "blank://blank"
}
resource "authentik_policy_binding" "dolibarr_ldap_access_users" {
count = length(local.sorted-groups)
target = authentik_application.dolibarr_application_ldap.uuid
group = authentik_group.groups[count.index].id
order = count.index
}
resource "authentik_policy_binding" "dolibarr_ldap_access_ldap" {
target = authentik_application.dolibarr_application_ldap.uuid
group = authentik_group.dolibarr_ldapsearch.id
order = length(local.sorted-groups)+1
}
resource "authentik_policy_binding" "dolibarr_ldap_access_vynil" {
target = authentik_application.dolibarr_application_ldap.uuid
group = data.authentik_group.vynil-admin.id
order = length(local.sorted-groups)+2
}
resource "authentik_application" "dolibarr_application_saml" {
name = var.component==var.instance?var.component:"${var.component}-${var.instance}"
slug = "${var.component}-${var.instance}"
protocol_provider = authentik_provider_saml.dolibarr.id
meta_launch_url = format("https://%s.%s", var.sub-domain, var.domain-name)
meta_icon = format("https://%s.%s/%s", var.sub-domain, var.domain-name, "theme/dolibarr_256x256_color.png")
}
resource "authentik_policy_binding" "dolibarr_saml_access_users" {
count = length(local.sorted-groups)
target = authentik_application.dolibarr_application_saml.uuid
group = authentik_group.groups[count.index].id
order = count.index
}
resource "authentik_policy_binding" "dolibarr_saml_access_vynil" {
target = authentik_application.dolibarr_application_saml.uuid
group = data.authentik_group.vynil-admin.id
order = length(local.sorted-groups)+1
}

180
apps/dolibarr/configmap.tf Normal file
View File

@@ -0,0 +1,180 @@
data "kubernetes_ingress_v1" "authentik" {
metadata {
name = "authentik"
namespace = "${var.domain}-auth"
}
}
locals {
authentik-metadata-url="${data.kubernetes_ingress_v1.authentik.spec[0].rule[0].host}/api/v3/providers/saml/${authentik_provider_saml.dolibarr.id}/metadata/?download"
module-list = [
"user",
"ldap",
"syslog"
]
json-config = {
groups = [ for index, g in local.sorted-groups: {
name = g.name
admin = contains([for k,v in g:k], "admin")?g.admin:false
users = data.authentik_group.readed_groups[index].users_obj
}]
parameters = merge(var.parameters, {
LDAP_FIELD_FULLNAME="sAMAccountName"
LDAP_FIELD_LOGIN_SAMBA="sAMAccountName"
LDAP_FIELD_MAIL="mail"
LDAP_FIELD_NAME="sn"
LDAP_GROUP_FIELD_DESCRIPTION="sAMAccountName"
LDAP_GROUP_FIELD_FULLNAME="cn"
LDAP_GROUP_FIELD_GROUPID="gidNumber"
LDAP_GROUP_FIELD_GROUPMEMBERS="member"
LDAP_GROUP_OBJECT_CLASS="group"
LDAP_KEY_GROUPS="cn"
LDAP_KEY_USERS="cn"
LDAP_PASSWORD_HASH_TYPE="md5"
LDAP_SERVER_HOST="ak-outpost-ldap.${var.domain}-auth.svc"
LDAP_SERVER_PORT="389"
LDAP_SERVER_PROTOCOLVERSION="3"
LDAP_SERVER_TYPE="openldap"
LDAP_SERVER_DN="${local.base-dn}"
LDAP_SERVER_USE_TLS="0"
LDAP_SYNCHRO_ACTIVE="2"
LDAP_USER_OBJECT_CLASS="person"
LDAP_USER_DN=local.base-user-dn
LDAP_GROUP_DN=local.base-group-dn
LDAP_GROUP_FILTER="&(&(objectClass=groupOfNames)(|${join("",[for g in local.sorted-groups: format("(cn=%s)",g.name)])}))"
LDAP_ADMIN_DN="cn=${var.instance}-${var.component}-ldapsearch,${local.base-user-dn}"
LDAP_FILTER_CONNECTION="&(&(objectClass=inetOrgPerson)(|${join("",[for g in local.sorted-groups: format("(memberof=cn=%s,%s)",g.name,local.base-group-dn)])}))"
SAMLCONNECTOR_CREATE_UNEXISTING_USER="1"
SAMLCONNECTOR_MAPPING_USER_EMAIL="http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress"
SAMLCONNECTOR_MAPPING_USER_FIRSTNAME="http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name"
SAMLCONNECTOR_MAPPING_USER_LASTNAME="http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name"
SAMLCONNECTOR_UPDATE_USER_EVERYTIME="1"
SAMLCONNECTOR_DISABLE_IDP_DISCONNECTION="1"
SAMLCONNECTOR_IDP_DISPLAY_BUTTON="1"
SAMLCONNECTOR_IDP_METADATA_SOURCE="url"
SAMLCONNECTOR_MANAGE_MULTIPLE_IDP="0"
SAMLCONNECTOR_SP_CERT_PATH="/var/saml/tls.crt"
SAMLCONNECTOR_SP_PRIV_KEY_PATH="/var/saml/tls.key"
SAMLCONNECTOR_IDP_METADATA_URL=local.authentik-metadata-url
SAMLCONNECTOR_IDP_METADATA_XML_PATH=local.authentik-metadata-url
MAIN_MODULE_SAMLCONNECTOR="1"
MAIN_MODULE_SAMLCONNECTOR_CSS="[\"\\/samlconnector\\/css\\/samlconnector.css.php\"]"
MAIN_MODULE_SAMLCONNECTOR_HOOKS="[\"mainloginpage\",\"logout\",\"samlconnectorsetup\"]"
MAIN_MODULE_SAMLCONNECTOR_JS="[\"\\/samlconnector\\/js\\/samlconnector.js.php\"]"
MAIN_MODULE_SAMLCONNECTOR_LOGIN="1"
MAIN_MODULE_SAMLCONNECTOR_MODULEFOREXTERNAL="1"
MAIN_MODULE_SAMLCONNECTOR_SUBSTITUTIONS="1"
MAIN_MODULE_SAMLCONNECTOR_TRIGGERS="1"
SYSLOG_LEVEL="${var.log-level}"
SYSLOG_FILE="/var/logs/dolibarr.log"
SYSLOG_HANDLERS="[\"mod_syslog_file\"]"
})
modules=join(",",[for i in concat(var.modules, local.module-list): format("MAIN_MODULE_%s",upper(i))])
}
}
resource "kubectl_manifest" "config-json" {
yaml_body = <<-EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: "${var.instance}-json"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
data:
"docker-php-ext-redis.ini": |-
extension = redis.so
session.save_handler = redis
session.save_path = "tcp://${var.instance}-${var.component}-redis.${var.namespace}.svc:6379/?prefix=SESSION_${var.component}_${var.instance}:"
"vynil-configurator.sh": |-
#!/bin/ash
pgsqlRun() { PGPASSWORD="$${DOLI_DB_PASSWORD:="dolibarr"}" psql -h "$${DOLI_DB_HOST:="postgres"}" -p "$${DOLI_DB_PORT}" -U "$${DOLI_DB_USER}" -w "$DOLI_DB_NAME" "$@"; }
setDBconf() { pgsqlRun -c "insert into llx_const(entity, name, type, value, visible) VALUES (1, '$1', 'chaine', '$2', 0) ON CONFLICT(entity, name) DO UPDATE SET value='$2';"; }
createUser() { pgsqlRun -c "INSERT INTO llx_user(entity, admin, employee, fk_establishment, datec, login, lastname, email, statut, fk_barcode_type, nb_holiday) VALUES (1, 0, 1, 0, NOW(), '$1', '$2', '$3', 1, 0, 0) ON CONFLICT(entity, login) DO UPDATE SET admin=0;"; }
createAdmin() { pgsqlRun -c "INSERT INTO llx_user(entity, admin, employee, fk_establishment, datec, login, lastname, email, statut, fk_barcode_type, nb_holiday) VALUES (1, 1, 0, 0, NOW(), '$1', '$2', '$3', 1, 0, 0) ON CONFLICT(entity, login) DO UPDATE SET admin=1;"; }
createGroup() { pgsqlRun -c "INSERT INTO llx_usergroup(entity, nom, datec) VALUES (1, '$1', NOW()) ON CONFLICT(entity, nom) DO NOTHING;"; }
setGroupPerm() { pgsqlRun -c "insert into llx_usergroup_rights(fk_id,fk_usergroup,entity) select d.id as fk_id, g.rowid as fk_usergroup, 1 as entity from llx_rights_def d, llx_usergroup g where d.id is not null and d.module<>'user' and g.nom='$1' ON CONFLICT(fk_id,fk_usergroup,entity) DO NOTHING;"; }
setGroupUser() { pgsqlRun -c "insert into llx_usergroup_user(entity, fk_user, fk_usergroup) select 1, u.rowid, g.rowid from llx_usergroup g, llx_user u where g.nom='$1' and u.login='$2' ON CONFLICT(entity, fk_user, fk_usergroup) DO NOTHING;"; }
configquery() { jq -r "$1" </etc/config/config.json; }
installMod() { cd /var/www/htdocs/install;php upgrade2.php 0.0.0 0.0.0 "$@"; }
groupq() { configquery ".groups[$1].$2"; }
userq() { configquery ".groups[$1].users[$2].$3"; }
dolEncrypt() {
{ php <<ENDphp
<?php
require_once "/app/htdocs/core/lib/security.lib.php";
print_r(dolEncrypt("$1"));
ENDphp
} |tail -1
}
# Set parameters
pcnt=$(configquery ".parameters|keys|length")
for i in $(seq 0 $(( $pcnt - 1)) );do
k=$(configquery ".parameters|keys[$i]")
v=$(configquery ".parameters.$k")
setDBconf "$k" "$v"
done
setDBconf LDAP_ADMIN_PASS "$(dolEncrypt $${DOLI_LDAP_ADMIN_PASS})"
setDBconf SAMLCONNECTOR_MAPPING_USER_SEARCH_KEY "$(dolEncrypt SAMLCONNECTOR_MAPPING_USER_LASTNAME)"
rm -f /var/documents/install.lock
installMod $(configquery ".modules")
touch /var/documents/install.lock
chmod 400 /var/documents/install.lock
# Create groups and users
gcnt=$(configquery ".groups | length")
for i in $(seq 0 $(( $gcnt - 1)) );do
gname=$(groupq $i name)
echo ' *** '"Creating group: $${gname}"
createGroup "$${gname}"
admin=$(groupq $i admin)
if [[ $${admin} != "true" ]];then
setGroupPerm "$${gname}"
fi
ucnt=$(groupq $i "users | length")
for j in $(seq 0 $(( $ucnt - 1)) );do
email=$(userq $i $j email)
name=$(userq $i $j name)
username=$(userq $i $j username)
if [[ $${admin} == "true" ]];then
echo ' *** '"Creating admin: $${name}"
createAdmin "$${username}" "$${name}" "$${email}"
else
echo ' *** '"Creating user: $${name}"
createUser "$${username}" "$${name}" "$${email}"
fi
setGroupUser "$${gname}" "$${username}"
done
done
>/var/logs/dolibarr.log
"config.json": |-
${jsonencode(local.json-config)}
EOF
}
resource "kubectl_manifest" "config" {
yaml_body = <<-EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: "${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
data:
DOLI_DB_HOST: "${var.instance}-${var.component}.${var.namespace}.svc"
DOLI_DB_USER: "${var.component}"
DOLI_DB_NAME: "${var.component}"
DOLI_DB_PORT: "5432"
DOLI_DB_TYPE: "pgsql"
DOLI_ADMIN_LOGIN: "admin_${var.instance}"
DOLI_MODULES: "modSociete,modBlockedLog,modSamlConnector,modLdap"
DOLI_AUTH: "dolibarr"
DOLI_URL_ROOT: "https://${var.sub-domain}.${var.domain-name}"
DOLI_LDAP_PORT: "389"
DOLI_LDAP_VERSION: "3"
DOLI_LDAP_SERVERTYPE: "openldap"
DOLI_LDAP_LOGIN_ATTRIBUTE: "sAMAccountName"
DOLI_LDAP_FILTER: "(&(|${join("",[for g in local.sorted-groups: format("(memberof=cn=%s,%s)",g.name,local.base-group-dn)])})(|(uid=%1%)(mail=%1%)))"
DOLI_LDAP_ADMIN_LOGIN: "cn=${var.instance}-${var.component}-ldapsearch,${local.base-user-dn}"
DOLI_LDAP_DN: "${local.base-dn}"
DOLI_LDAP_HOST: "ak-outpost-ldap.${var.domain}-auth.svc"
EOF
}

185
apps/dolibarr/deploy.tf Normal file
View File

@@ -0,0 +1,185 @@
locals {
deploy-labels = merge(local.common-labels, {
"app.kubernetes.io/component" = "dolibarr"
})
}
resource "kubectl_manifest" "hpa" {
yaml_body = <<-EOF
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: ${var.instance}
namespace: ${var.namespace}
labels: ${jsonencode(local.deploy-labels)}
spec:
minReplicas: ${var.hpa.min-replicas}
maxReplicas: ${var.hpa.max-replicas}
metrics:
- resource:
name: cpu
target:
averageUtilization: ${var.hpa.avg-cpu}
type: Utilization
type: Resource
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: ${var.instance}
EOF
}
resource "kubectl_manifest" "deploy" {
yaml_body = <<-EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: ${var.instance}
namespace: ${var.namespace}
labels: ${jsonencode(local.deploy-labels)}
spec:
selector:
matchLabels: ${jsonencode(local.deploy-labels)}
template:
metadata:
labels: ${jsonencode(local.deploy-labels)}
spec:
securityContext:
runAsGroup: 82
runAsUser: 82
fsGroup: 82
volumes:
- name: config-json
configMap:
name: ${kubectl_manifest.config-json.name}
- name: documents
persistentVolumeClaim:
claimName: ${kubectl_manifest.pvc.name}
- name: nginx-run
emptyDir: {}
- name: nginx-cache
emptyDir: {}
- name: shared-files
emptyDir: {}
- name: shared-logs
emptyDir: {}
- name: nginx-config
configMap:
name: ${kubectl_manifest.nginx-config.name}
- name: saml-cert
secret:
secretName: "${kubectl_manifest.saml_certificate.name}"
initContainers:
- name: configure
args:
- echo
- SUCCESS
image: "${var.images.dolibarr.registry}/${var.images.dolibarr.repository}:${var.images.dolibarr.tag}"
imagePullPolicy: "${var.images.dolibarr.pullPolicy}"
volumeMounts:
- name: shared-files
mountPath: /var/www/
- name: shared-logs
mountPath: /var/logs/
- name: documents
mountPath: /var/documents
- name: config-json
mountPath: /etc/config/config.json
subPath: config.json
- name: config-json
mountPath: /docker-entrypoint.d/vynil-configurator.sh
subPath: vynil-configurator.sh
securityContext:
runAsNonRoot: true
env:
- name: DOLI_DB_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: "${var.component}.${var.instance}-${var.component}.credentials.postgresql.acid.zalan.do"
envFrom:
- configMapRef:
name: "${kubectl_manifest.config.name}"
- secretRef:
name: "${kubectl_manifest.dolibarr_ldap.name}"
containers:
- name: dolibarr
command:
- "/usr/local/sbin/php-fpm"
image: "${var.images.dolibarr.registry}/${var.images.dolibarr.repository}:${var.images.dolibarr.tag}"
imagePullPolicy: "${var.images.dolibarr.pullPolicy}"
resources: ${jsonencode(var.resources)}
readinessProbe:
httpGet:
path: /index.php
port: 3000
scheme: HTTP
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
livenessProbe:
httpGet:
path: /index.php
port: 3000
scheme: HTTP
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
successThreshold: 1
volumeMounts:
- name: shared-logs
mountPath: /var/logs/
- name: shared-files
mountPath: /var/www/
- name: documents
mountPath: /var/documents
- name: saml-cert
mountPath: /var/saml
- name: config-json
mountPath: /usr/local/etc/php/conf.d/docker-php-ext-redis.ini
subPath: docker-php-ext-redis.ini
securityContext:
runAsNonRoot: true
env:
- name: DOLI_DB_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: "${var.component}.${var.instance}-${var.component}.credentials.postgresql.acid.zalan.do"
envFrom:
- configMapRef:
name: "${kubectl_manifest.config.name}"
- secretRef:
name: "${kubectl_manifest.dolibarr_ldap.name}"
- name: dolibarr-logs
command:
- "tail"
- "-f"
- "/var/logs/dolibarr.log"
image: "${var.images.dolibarr.registry}/${var.images.dolibarr.repository}:${var.images.dolibarr.tag}"
imagePullPolicy: "${var.images.dolibarr.pullPolicy}"
volumeMounts:
- name: shared-logs
mountPath: /var/logs/
- name: nginx
image: "${var.images.nginx.registry}/${var.images.nginx.repository}:${var.images.nginx.tag}"
imagePullPolicy: "${var.images.nginx.pullPolicy}"
securityContext:
runAsNonRoot: true
readOnlyRootFilesystem: true
ports:
- name: http
containerPort: 3000
protocol: TCP
volumeMounts:
- name: nginx-run
mountPath: /var/run
- name: nginx-cache
mountPath: /var/cache/nginx
- name: shared-files
mountPath: /var/www/
- name: nginx-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
EOF
}

300
apps/dolibarr/index.yaml Normal file
View File

@@ -0,0 +1,300 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: apps
metadata:
name: dolibarr
description: null
options:
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
redis:
default:
exporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.44.0
image: quay.io/opstree/redis:v7.0.5
storage: 2Gi
examples:
- exporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.44.0
image: quay.io/opstree/redis:v7.0.5
storage: 2Gi
properties:
exporter:
default:
enabled: true
image: quay.io/opstree/redis-exporter:v1.44.0
properties:
enabled:
default: true
type: boolean
image:
default: quay.io/opstree/redis-exporter:v1.44.0
type: string
type: object
image:
default: quay.io/opstree/redis:v7.0.5
type: string
storage:
default: 2Gi
type: string
type: object
domain:
default: your-company
examples:
- your-company
type: string
ingress-class:
default: traefik
examples:
- traefik
type: string
parameters:
default:
MAIN_LANG_DEFAULT: auto
examples:
- MAIN_LANG_DEFAULT: auto
properties:
MAIN_LANG_DEFAULT:
default: auto
type: string
type: object
storage:
default:
accessMode: ReadWriteOnce
size: 10Gi
type: Filesystem
examples:
- accessMode: ReadWriteOnce
size: 10Gi
type: Filesystem
properties:
accessMode:
default: ReadWriteOnce
enum:
- ReadWriteOnce
- ReadOnlyMany
- ReadWriteMany
type: string
size:
default: 10Gi
type: string
type:
default: Filesystem
enum:
- Filesystem
- block
type: string
type: object
issuer:
default: letsencrypt-prod
examples:
- letsencrypt-prod
type: string
modules:
default:
- societe
examples:
- - societe
items:
type: string
type: array
sub-domain:
default: erp
examples:
- erp
type: string
user-groups:
default:
- admin: true
name: dolibarr-admin
examples:
- - admin: true
name: dolibarr-admin
items:
properties:
admin:
type: boolean
name:
type: string
type: object
type: array
hpa:
default:
avg-cpu: 50
max-replicas: 5
min-replicas: 1
examples:
- avg-cpu: 50
max-replicas: 5
min-replicas: 1
properties:
avg-cpu:
default: 50
type: integer
max-replicas:
default: 5
type: integer
min-replicas:
default: 1
type: integer
type: object
images:
default:
dolibarr:
pullPolicy: IfNotPresent
registry: docker.io
repository: sebt3/dolibarr
tag: 17.0.1
nginx:
pullPolicy: IfNotPresent
registry: docker.io
repository: nginx
tag: alpine
examples:
- dolibarr:
pullPolicy: IfNotPresent
registry: docker.io
repository: sebt3/dolibarr
tag: 17.0.1
nginx:
pullPolicy: IfNotPresent
registry: docker.io
repository: nginx
tag: alpine
properties:
dolibarr:
default:
pullPolicy: IfNotPresent
registry: docker.io
repository: sebt3/dolibarr
tag: 17.0.1
properties:
pullPolicy:
default: IfNotPresent
type: string
registry:
default: docker.io
type: string
repository:
default: sebt3/dolibarr
type: string
tag:
default: 17.0.1
type: string
type: object
nginx:
default:
pullPolicy: IfNotPresent
registry: docker.io
repository: nginx
tag: alpine
properties:
pullPolicy:
default: IfNotPresent
type: string
registry:
default: docker.io
type: string
repository:
default: nginx
type: string
tag:
default: alpine
type: string
type: object
type: object
log-level:
default: 5
examples:
- 5
type: integer
postgres:
default:
replicas: 1
storage: 5Gi
version: '14'
examples:
- replicas: 1
storage: 5Gi
version: '14'
properties:
replicas:
default: 1
type: integer
storage:
default: 5Gi
type: string
version:
default: '14'
type: string
type: object
resources:
default:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 50m
memory: 100Mi
examples:
- limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 50m
memory: 100Mi
properties:
limits:
default:
cpu: 200m
memory: 256Mi
properties:
cpu:
default: 200m
type: string
memory:
default: 256Mi
type: string
type: object
requests:
default:
cpu: 50m
memory: 100Mi
properties:
cpu:
default: 50m
type: string
memory:
default: 100Mi
type: string
type: object
type: object
dependencies:
- dist: null
category: share
component: authentik-ldap
- dist: null
category: core
component: secret-generator
- dist: null
category: core
component: cert-manager-self-sign
- dist: null
category: dbo
component: postgresql
- dist: null
category: dbo
component: redis
providers:
kubernetes: true
authentik: true
kubectl: true
postgresql: null
restapi: true
http: true

75
apps/dolibarr/ingress.tf Normal file
View File

@@ -0,0 +1,75 @@
locals {
dns-names = ["${var.sub-domain}.${var.domain-name}"]
middlewares = ["${var.instance}-https"]
service = {
"name" = "${var.instance}"
"port" = {
"number" = 80
}
}
rules = [ for v in local.dns-names : {
"host" = "${v}"
"http" = {
"paths" = [{
"backend" = {
"service" = local.service
}
"path" = "/"
"pathType" = "Prefix"
}]
}
}]
}
resource "kubectl_manifest" "prj_certificate" {
yaml_body = <<-EOF
apiVersion: "cert-manager.io/v1"
kind: "Certificate"
metadata:
name: "${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
secretName: "${var.instance}-cert"
dnsNames: ${jsonencode(local.dns-names)}
issuerRef:
name: "${var.issuer}"
kind: "ClusterIssuer"
group: "cert-manager.io"
EOF
}
resource "kubectl_manifest" "prj_https_redirect" {
yaml_body = <<-EOF
apiVersion: "traefik.containo.us/v1alpha1"
kind: "Middleware"
metadata:
name: "${var.instance}-https"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
redirectScheme:
scheme: "https"
permanent: true
EOF
}
resource "kubectl_manifest" "prj_ingress" {
force_conflicts = true
yaml_body = <<-EOF
apiVersion: "networking.k8s.io/v1"
kind: "Ingress"
metadata:
name: "${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
annotations:
"traefik.ingress.kubernetes.io/router.middlewares": "${join(",", [for m in local.middlewares : format("%s-%s@kubernetescrd", var.namespace, m)])}"
spec:
ingressClassName: "${var.ingress-class}"
rules: ${jsonencode(local.rules)}
tls:
- hosts: ${jsonencode(local.dns-names)}
secretName: "${var.instance}-cert"
EOF
}

115
apps/dolibarr/ldap.tf Normal file
View File

@@ -0,0 +1,115 @@
data "kubernetes_secret_v1" "authentik" {
metadata {
name = "authentik"
namespace = "${var.domain}-auth"
}
}
locals {
base-dn = format("dc=%s", join(",dc=", split(".", format("%s.%s", var.sub-domain, var.domain-name))))
base-group-dn = format("ou=groups,%s", local.base-dn)
base-user-dn = format("ou=users,%s", local.base-dn)
authentik-token = data.kubernetes_secret_v1.authentik.data["AUTHENTIK_BOOTSTRAP_TOKEN"]
request_headers = {
"Content-Type" = "application/json"
Authorization = "Bearer ${local.authentik-token}"
}
ldap-outpost-providers = jsondecode(data.http.get_ldap_outpost.response_body).results[0].providers
ldap-outpost-pk = jsondecode(data.http.get_ldap_outpost.response_body).results[0].pk
}
resource "kubectl_manifest" "dolibarr_ldap" {
ignore_fields = ["metadata.annotations"]
yaml_body = <<-EOF
apiVersion: "secretgenerator.mittwald.de/v1alpha1"
kind: "StringSecret"
metadata:
name: "${var.instance}-${var.component}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
forceRegenerate: false
fields:
- fieldName: "DOLI_LDAP_ADMIN_PASS"
length: "32"
- fieldName: "DOLI_ADMIN_PASSWORD"
length: "32"
- fieldName: "DOLI_COOKIE_CRYPTKEY"
length: "32"
EOF
}
data "kubernetes_secret_v1" "dolibarr_ldap_password" {
depends_on = [kubectl_manifest.dolibarr_ldap]
metadata {
name = kubectl_manifest.dolibarr_ldap.name
namespace = var.namespace
}
}
resource "authentik_user" "dolibarr_ldapsearch" {
username = "${var.instance}-${var.component}-ldapsearch"
name = "${var.instance}-${var.component}-ldapsearch"
}
resource "authentik_group" "dolibarr_ldapsearch" {
name = "${var.instance}-${var.component}-ldapsearch"
users = [authentik_user.dolibarr_ldapsearch.id]
is_superuser = true
}
data "http" "dolibarr_ldapsearch_password" {
url = "http://authentik.${var.domain}-auth.svc/api/v3/core/users/${authentik_user.dolibarr_ldapsearch.id}/set_password/"
method = "POST"
request_headers = local.request_headers
request_body = jsonencode({password=data.kubernetes_secret_v1.dolibarr_ldap_password.data["DOLI_LDAP_ADMIN_PASS"]})
lifecycle {
postcondition {
condition = contains([201, 204], self.status_code)
error_message = "Status code invalid"
}
}
}
data "authentik_flow" "ldap-authentication-flow" {
slug = "ldap-authentication-flow"
}
resource "authentik_provider_ldap" "dolibarr_provider_ldap" {
name = "dolibarr-${var.instance}-ldap"
base_dn = local.base-dn
search_group = authentik_group.dolibarr_ldapsearch.id
bind_flow = data.authentik_flow.ldap-authentication-flow.id
}
data "http" "get_ldap_outpost" {
depends_on = [authentik_policy_binding.dolibarr_ldap_access_users] # fake dependency so it is not evaluated at plan stage
url = "http://authentik.${var.domain}-auth.svc/api/v3/outposts/instances/?name__iexact=ldap"
method = "GET"
request_headers = local.request_headers
lifecycle {
postcondition {
condition = contains([200], self.status_code)
error_message = "Status code invalid"
}
}
}
provider "restapi" {
uri = "http://authentik.${var.domain}-auth.svc/api/v3/"
headers = local.request_headers
create_method = "PATCH"
update_method = "PATCH"
destroy_method = "PATCH"
write_returns_object = true
id_attribute = "name"
}
resource "restapi_object" "ldap_outpost_binding" {
path = "/outposts/instances/${local.ldap-outpost-pk}/"
data = jsonencode({
name = "ldap"
providers = contains(local.ldap-outpost-providers, authentik_provider_ldap.dolibarr_provider_ldap.id) ? local.ldap-outpost-providers : concat(local.ldap-outpost-providers, [authentik_provider_ldap.dolibarr_provider_ldap.id])
})
}

View File

@@ -0,0 +1,44 @@
resource "kubectl_manifest" "nginx-config" {
yaml_body = <<-EOF
kind: ConfigMap
apiVersion: v1
metadata:
name: ${var.instance}-nginx
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
data:
nginx.conf: |
worker_processes 5;
events {
}
http {
include /etc/nginx/mime.types;
server {
listen 3000;
server_name $${NGINX_HOST};
root /var/www/htdocs;
index index.php;
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
location ~ [^/]\.php(/|$) {
# try_files $uri =404;
fastcgi_split_path_info ^(.+?\.php)(/.*)$;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
}
location / {
try_files $uri $uri/ index.php;
}
location /api {
if ( !-e $request_filename) {
rewrite ^.* /api/index.php last;
}
}
}
}
EOF
}

View File

@@ -0,0 +1,31 @@
locals {
pg-labels = merge(local.common-labels, {
"app.kubernetes.io/component" = "postgresql"
})
}
resource "kubectl_manifest" "dolibarr_postgresql" {
yaml_body = <<-EOF
apiVersion: "acid.zalan.do/v1"
kind: "postgresql"
metadata:
name: "${var.instance}-${var.component}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.pg-labels)}
spec:
databases:
${var.component}: "${var.component}"
numberOfInstances: ${var.postgres.replicas}
podAnnotations:
"k8up.io/backupcommand": "pg_dump -U postgres -d ${var.component} --clean"
"k8up.io/file-extension": ".sql"
postgresql:
version: "${var.postgres.version}"
teamId: "${var.instance}"
users:
${var.component}:
- "superuser"
- "createdb"
volume:
size: "${var.postgres.storage}"
EOF
}

19
apps/dolibarr/pvc.tf Normal file
View File

@@ -0,0 +1,19 @@
resource "kubectl_manifest" "pvc" {
yaml_body = <<-EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ${var.instance}
namespace: "${var.namespace}"
annotations:
k8up.io/backup: "true"
labels: ${jsonencode(local.common-labels)}
spec:
accessModes:
- "${var.storage.accessMode}"
resources:
requests:
storage: "${var.storage.size}"
volumeMode: "${var.storage.type}"
EOF
}

32
apps/dolibarr/redis.tf Normal file
View File

@@ -0,0 +1,32 @@
locals {
redis-labels = merge(local.common-labels, {
"app.kubernetes.io/component" = "redis"
})
}
resource "kubectl_manifest" "dolibarr_redis" {
yaml_body = <<-EOF
apiVersion: "redis.redis.opstreelabs.in/v1beta1"
kind: "Redis"
metadata:
name: "${var.instance}-${var.component}-redis"
namespace: "${var.namespace}"
labels: ${jsonencode(local.redis-labels)}
spec:
kubernetesConfig:
image: "${var.redis.image}"
imagePullPolicy: "IfNotPresent"
storage:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: "${var.redis.storage}"
redisExporter:
enabled: ${var.redis.exporter.enabled}
image: "${var.redis.exporter.image}"
securityContext:
runAsUser: 1000
fsGroup: 1000
EOF
}

56
apps/dolibarr/saml.tf Normal file
View File

@@ -0,0 +1,56 @@
data "authentik_flow" "default-authorization-flow" {
slug = "default-provider-authorization-implicit-consent"
}
data "authentik_flow" "default-authentication-flow" {
slug = "default-authentication-flow"
}
data "authentik_property_mapping_saml" "saml_maps" {
managed_list = [
"goauthentik.io/providers/saml/email",
"goauthentik.io/providers/saml/groups",
"goauthentik.io/providers/saml/name",
"goauthentik.io/providers/saml/upn",
"goauthentik.io/providers/saml/uid",
"goauthentik.io/providers/saml/username",
"goauthentik.io/providers/saml/ms-windowsaccountname",
]
}
data "authentik_property_mapping_saml" "saml_name" {
managed = "goauthentik.io/providers/saml/username"
}
data "authentik_certificate_key_pair" "generated" {
name = "authentik Self-signed Certificate"
}
resource "kubectl_manifest" "saml_certificate" {
yaml_body = <<-EOF
apiVersion: "cert-manager.io/v1"
kind: "Certificate"
metadata:
name: "${var.instance}-${var.component}-saml"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
secretName: "${var.instance}-${var.component}-saml"
dnsNames: ${jsonencode(local.dns-names)}
issuerRef:
name: "self-sign"
kind: "ClusterIssuer"
group: "cert-manager.io"
EOF
}
resource "authentik_provider_saml" "dolibarr" {
name = "dolibarr-${var.instance}-saml"
authentication_flow = data.authentik_flow.default-authentication-flow.id
authorization_flow = data.authentik_flow.default-authorization-flow.id
acs_url = "https://${var.sub-domain}.${var.domain-name}/custom/samlconnector/acs.php?entity=1&fk_idp=0"
property_mappings = data.authentik_property_mapping_saml.saml_maps.ids
name_id_mapping = data.authentik_property_mapping_saml.saml_name.id
signing_kp = data.authentik_certificate_key_pair.generated.id
sp_binding = "post"
}

18
apps/dolibarr/svc.tf Normal file
View File

@@ -0,0 +1,18 @@
resource "kubectl_manifest" "svc" {
yaml_body = <<-EOF
apiVersion: v1
kind: Service
metadata:
name: ${var.instance}
namespace: "${var.namespace}"
labels: ${jsonencode(local.deploy-labels)}
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 3000
selector: ${jsonencode(local.deploy-labels)}
type: ClusterIP
EOF
}

View File

@@ -0,0 +1,89 @@
# Source: gitea/charts/memcached/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea-memcached
namespace: vynil-ci
labels:
app.kubernetes.io/name: memcached
helm.sh/chart: memcached-6.3.14
app.kubernetes.io/instance: gitea
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: memcached
app.kubernetes.io/instance: gitea
replicas: 1
strategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: memcached
helm.sh/chart: memcached-6.3.14
app.kubernetes.io/instance: gitea
app.kubernetes.io/managed-by: Helm
annotations:
spec:
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: memcached
app.kubernetes.io/instance: gitea
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
securityContext:
fsGroup: 1001
serviceAccountName: default
containers:
- name: memcached
image: docker.io/bitnami/memcached:1.6.19-debian-11-r7
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MEMCACHED_PORT_NUMBER
value: "11211"
ports:
- name: memcache
containerPort: 11211
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
tcpSocket:
port: memcache
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
tcpSocket:
port: memcache
resources:
limits: {}
requests:
cpu: 250m
memory: 256Mi
volumeMounts:
- name: tmp
mountPath: /tmp
volumes:
- name: tmp
emptyDir: {}

View File

@@ -0,0 +1,247 @@
# Source: gitea/templates/gitea/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: gitea
annotations:
labels:
helm.sh/chart: gitea-8.3.0
app: gitea
app.kubernetes.io/name: gitea
app.kubernetes.io/instance: gitea
app.kubernetes.io/version: "1.19.3"
version: "1.19.3"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: gitea
app.kubernetes.io/instance: gitea
serviceName: gitea
template:
metadata:
annotations:
checksum/config: 27af0e4460a4b6fa0279e60d04c3d82609060dda7af59dd2051139acc1cdb203
checksum/ldap_0: 9356e28431e375c7fc7d624460a9f41c243f14c3f9765c40aa2b13cf46203eaf
labels:
helm.sh/chart: gitea-8.3.0
app: gitea
app.kubernetes.io/name: gitea
app.kubernetes.io/instance: gitea
app.kubernetes.io/version: "1.19.3"
version: "1.19.3"
app.kubernetes.io/managed-by: Helm
spec:
securityContext:
fsGroup: 1000
initContainers:
- name: init-directories
image: "gitea/gitea:1.19.3"
imagePullPolicy: Always
command: ["/usr/sbin/init_directory_structure.sh"]
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
- name: TZ
value: Europe/Paris
volumeMounts:
- name: init
mountPath: /usr/sbin
- name: temp
mountPath: /tmp
- name: data
mountPath: /data
securityContext:
capabilities:
add:
- SYS_CHROOT
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
- name: init-app-ini
image: "gitea/gitea:1.19.3"
imagePullPolicy: Always
command: ["/usr/sbin/config_environment.sh"]
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
- name: TZ
value: Europe/Paris
- name: ENV_TO_INI__DATABASE__LOG_SQL
value: "false"
- name: ENV_TO_INI__LOG__LEVEL
value: Debug
volumeMounts:
- name: config
mountPath: /usr/sbin
- name: temp
mountPath: /tmp
- name: data
mountPath: /data
- name: inline-config-sources
mountPath: /env-to-ini-mounts/inlines/
securityContext:
capabilities:
add:
- SYS_CHROOT
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
- name: configure-gitea
image: "gitea/gitea:1.19.3"
command: ["/usr/sbin/configure_gitea.sh"]
imagePullPolicy: Always
securityContext:
capabilities:
add:
- SYS_CHROOT
runAsUser: 1000
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
- name: GITEA_LDAP_BIND_DN_0
valueFrom:
secretKeyRef:
key: bindDn
name: gitea-ldap
- name: GITEA_LDAP_PASSWORD_0
valueFrom:
secretKeyRef:
key: bindPassword
name: gitea-ldap
- name: GITEA_ADMIN_USERNAME
valueFrom:
secretKeyRef:
key: username
name: gitea-admin-user
- name: GITEA_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: gitea-admin-user
- name: TZ
value: Europe/Paris
volumeMounts:
- name: init
mountPath: /usr/sbin
- name: temp
mountPath: /tmp
- name: data
mountPath: /data
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
terminationGracePeriodSeconds: 60
containers:
- name: gitea
image: "gitea/gitea:1.19.3"
imagePullPolicy: Always
env:
# SSH Port values have to be set here as well for openssh configuration
- name: SSH_LISTEN_PORT
value: "2222"
- name: SSH_PORT
value: "2222"
- name: SSH_LOG_LEVEL
value: "INFO"
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
- name: TMPDIR
value: /tmp/gitea
- name: TZ
value: Europe/Paris
ports:
- name: ssh
containerPort: 2222
- name: http
containerPort: 3000
livenessProbe:
failureThreshold: 10
initialDelaySeconds: 200
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: http
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: http
timeoutSeconds: 1
resources:
{}
securityContext:
capabilities:
add:
- SYS_CHROOT
volumeMounts:
- name: temp
mountPath: /tmp
- name: data
mountPath: /data
- mountPath: /data/gitea/public/css
name: gitea-themes
volumes:
- name: init
secret:
secretName: gitea-init
defaultMode: 110
- name: config
secret:
secretName: gitea
defaultMode: 110
- configMap:
name: gitea-themes
name: gitea-themes
- name: inline-config-sources
secret:
secretName: gitea-inline-config
- name: temp
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"

136
apps/gitea/datas.tf Normal file
View File

@@ -0,0 +1,136 @@
locals {
common-labels = {
"vynil.solidite.fr/owner-name" = var.instance
"vynil.solidite.fr/owner-namespace" = var.namespace
"vynil.solidite.fr/owner-category" = var.category
"vynil.solidite.fr/owner-component" = var.component
"app.kubernetes.io/managed-by" = "vynil"
"app.kubernetes.io/instance" = var.instance
}
removePatch = <<-EOF
- op: remove
path: /spec/loadBalancerIP
EOF
modifyPatch = <<-EOF
- op: replace
path: /spec/loadBalancerIP
value: "${var.load-balancer.ip}"
EOF
}
data "kubernetes_secret_v1" "postgresql_password" {
depends_on = [kubectl_manifest.gitea_postgresql]
metadata {
name = "${var.component}.${var.instance}-${var.component}.credentials.postgresql.acid.zalan.do"
namespace = var.namespace
}
}
data "kubernetes_secret_v1" "authentik" {
metadata {
name = "authentik"
namespace = "${var.domain}-auth"
}
}
data "kustomization_overlay" "data" {
common_labels = local.common-labels
namespace = var.namespace
resources = [for file in fileset(path.module, "*.yaml"): file if ! contains(["index.yaml", "v1_ConfigMap_gitea-themes.yaml"], file)]
images {
name = "docker.io/bitnami/memcached"
new_name = "${var.images.memcached.registry}/${var.images.memcached.repository}"
new_tag = "${var.images.memcached.tag}"
}
patches {
target {
kind = "StatefulSet"
name = "gitea"
}
patch = <<-EOF
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: gitea
spec:
replicas: ${var.replicas}
template:
spec:
initContainers:
- name: init-directories
image: "${var.images.gitea.registry}/${var.images.gitea.repository}:${var.images.gitea.tag}"
imagePullPolicy: "${var.images.gitea.pullPolicy}"
- name: init-app-ini
image: "${var.images.gitea.registry}/${var.images.gitea.repository}:${var.images.gitea.tag}"
imagePullPolicy: IfNotPresent
- name: configure-gitea
image: "${var.images.gitea.registry}/${var.images.gitea.repository}:${var.images.gitea.tag}"
imagePullPolicy: IfNotPresent
env:
- name: LDAP_USER_SEARCH_BASE
valueFrom:
secretKeyRef:
key: user-search-base
name: gitea-ldap
- name: LDAP_USER_FILTER
valueFrom:
secretKeyRef:
key: user-filter
name: gitea-ldap
- name: LDAP_ADMIN_FILTER
valueFrom:
secretKeyRef:
key: admin-filter
name: gitea-ldap
- name: LDAP_HOST
valueFrom:
secretKeyRef:
key: endpoint
name: gitea-ldap
- name: TZ
value: ${var.timezone}
containers:
- name: gitea
image: "${var.images.gitea.registry}/${var.images.gitea.repository}:${var.images.gitea.tag}"
imagePullPolicy: IfNotPresent
env:
- name: SSH_LISTEN_PORT
value: "2222"
- name: SSH_PORT
value: "${var.ssh-port}"
- name: SSH_LOG_LEVEL
value: "INFO"
- name: TZ
value: ${var.timezone}
volumeClaimTemplates:
- metadata:
name: data
annotations:
k8up.io/backup: "true"
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "${var.volume.size}"
EOF
}
patches {
target {
kind = "Service"
name = "gitea-ssh"
}
patch = <<-EOF
- op: replace
path: /spec/ports/0/port
value: ${var.ssh-port}
EOF
}
patches {
target {
kind = "Service"
name = "gitea-ssh"
}
patch = var.load-balancer.ip==""?local.removePatch:local.modifyPatch
}
}

18
apps/gitea/index.rhai Normal file
View File

@@ -0,0 +1,18 @@
const NS=config.namespace;
const SRC=src;
const DEST=dest;
fn pre_pack() {
shell("helm repo add gitea-charts https://dl.gitea.io/charts/");
shell(`helm template gitea gitea-charts/gitea --namespace=vynil-ci --values values.yml >${global::SRC}/chart.yaml`);
}
fn post_pack() {
shell(`rm -f ${global::DEST}/v1_Pod_gitea-test-connection.yaml`);
shell(`rm -f ${global::DEST}/v1_Secret_gitea-inline-config.yaml`);
let regex = "'\\\\\\${\\(LDAP_[A-Z_]*\\)}'";
let final = "\\\"\\${\\1}\\\"";
shell(`sed -i "s/${regex}/${final}/g" ${global::DEST}/v1_Secret_gitea-init.yaml`);
}
fn pre_install() {
shell(`kubectl create -n ${global::NS} -f ${global::SRC}/v1_ConfigMap_gitea-themes.yaml || :`);
}

241
apps/gitea/index.yaml Normal file
View File

@@ -0,0 +1,241 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: apps
metadata:
name: gitea
description: |-
Git with a cup of tea
A painless self-hosted Git service.
Gitea is a community managed lightweight code hosting solution written in Go. It is published under the MIT license.
options:
images:
default:
gitea:
pullPolicy: IfNotPresent
registry: docker.io
repository: gitea/gitea
tag: 1.19.3
memcached:
registry: docker.io
repository: bitnami/memcached
tag: 1.6.19-debian-11-r7
examples:
- gitea:
pullPolicy: IfNotPresent
registry: docker.io
repository: gitea/gitea
tag: 1.19.3
memcached:
registry: docker.io
repository: bitnami/memcached
tag: 1.6.19-debian-11-r7
properties:
gitea:
default:
pullPolicy: IfNotPresent
registry: docker.io
repository: gitea/gitea
tag: 1.19.3
properties:
pullPolicy:
default: IfNotPresent
enum:
- Always
- Never
- IfNotPresent
type: string
registry:
default: docker.io
type: string
repository:
default: gitea/gitea
type: string
tag:
default: 1.19.3
type: string
type: object
memcached:
default:
registry: docker.io
repository: bitnami/memcached
tag: 1.6.19-debian-11-r7
properties:
registry:
default: docker.io
type: string
repository:
default: bitnami/memcached
type: string
tag:
default: 1.6.19-debian-11-r7
type: string
type: object
type: object
load-balancer:
default:
ip: ''
examples:
- ip: ''
properties:
ip:
default: ''
type: string
type: object
webhook:
default:
allowed-hosts: private
skip-tls-verify: false
examples:
- allowed-hosts: private
skip-tls-verify: false
properties:
allowed-hosts:
default: private
type: string
skip-tls-verify:
default: false
type: boolean
type: object
release:
default: 8.3.0
examples:
- 8.3.0
type: string
push-create:
default:
org: 'true'
private: 'false'
user: 'true'
examples:
- org: 'true'
private: 'false'
user: 'true'
properties:
org:
default: 'true'
type: string
private:
default: 'false'
type: string
user:
default: 'true'
type: string
type: object
volume:
default:
size: 10Gi
examples:
- size: 10Gi
properties:
size:
default: 10Gi
type: string
type: object
default-branch:
default: main
examples:
- main
type: string
postgres:
default:
replicas: 1
storage: 10Gi
version: '14'
examples:
- replicas: 1
storage: 10Gi
version: '14'
properties:
replicas:
default: 1
type: integer
storage:
default: 10Gi
type: string
version:
default: '14'
type: string
type: object
ingress-class:
default: traefik
examples:
- traefik
type: string
ssh-port:
default: 2222
examples:
- 2222
type: integer
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
domain:
default: your-company
examples:
- your-company
type: string
issuer:
default: letsencrypt-prod
examples:
- letsencrypt-prod
type: string
disable-registration:
default: true
examples:
- true
type: boolean
replicas:
default: 1
examples:
- 1
type: integer
sub-domain:
default: git
examples:
- git
type: string
admin:
default:
email: git-admin@git.your_company.com
name: gitea_admin
examples:
- email: git-admin@git.your_company.com
name: gitea_admin
properties:
email:
default: git-admin@git.your_company.com
type: string
name:
default: gitea_admin
type: string
type: object
timezone:
default: Europe/Paris
examples:
- Europe/Paris
type: string
theme:
default: gitea-modern
examples:
- gitea-modern
type: string
dependencies:
- dist: null
category: share
component: authentik-ldap
- dist: null
category: core
component: secret-generator
- dist: null
category: dbo
component: postgresql
providers:
kubernetes: true
authentik: true
kubectl: true
postgresql: null
restapi: true
http: true

69
apps/gitea/ingress.tf Normal file
View File

@@ -0,0 +1,69 @@
locals {
dns-names = ["${var.sub-domain}.${var.domain-name}"]
middlewares = [{"name" = "${var.instance}-https"}]
services = [{
"kind" = "Service"
"name" = "gitea-http"
"namespace" = var.namespace
"port" = 3000
}]
routes = [ for v in local.dns-names : {
"kind" = "Rule"
"match" = "Host(`${v}`)"
"middlewares" = local.middlewares
"services" = local.services
}]
}
resource "kubectl_manifest" "gitea_certificate" {
yaml_body = <<-EOF
apiVersion: "cert-manager.io/v1"
kind: "Certificate"
metadata:
name: "${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
secretName: "${var.instance}-cert"
dnsNames: ${jsonencode(local.dns-names)}
issuerRef:
name: "${var.issuer}"
kind: "ClusterIssuer"
group: "cert-manager.io"
EOF
}
resource "kubectl_manifest" "gitea_https_redirect" {
yaml_body = <<-EOF
apiVersion: "traefik.containo.us/v1alpha1"
kind: "Middleware"
metadata:
name: "${var.instance}-https"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
redirectScheme:
scheme: "https"
permanent: true
EOF
}
resource "kubectl_manifest" "gitea_ingress" {
force_conflicts = true
yaml_body = <<-EOF
apiVersion: "traefik.containo.us/v1alpha1"
kind: "IngressRoute"
metadata:
name: "${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
# annotations:
# "kubernetes.io/ingress.class": "${var.ingress-class}"
spec:
entryPoints: ["web","websecure"]
routes: ${jsonencode(local.routes)}
tls:
secretName: "${var.instance}-cert"
EOF
}

View File

@@ -0,0 +1,54 @@
resource "kubernetes_secret_v1" "gitea_inline_config" {
metadata {
name = "gitea-inline-config"
namespace = var.namespace
labels = local.common-labels
}
data = {
"_generals_" = ""
metrics = "ENABLED=true"
security = "INSTALL_LOCK=true"
service = "DISABLE_REGISTRATION=${var.disable-registration}"
cache = <<-EOF
ADAPTER=memcache
ENABLED=true
HOST=gitea-memcached.${var.namespace}.svc:11211
EOF
database = <<-EOF
DB_TYPE=postgres
HOST=${var.instance}-${var.component}.${var.namespace}.svc:5432
NAME=${var.component}
PASSWD=${data.kubernetes_secret_v1.postgresql_password.data["password"]}
USER=${data.kubernetes_secret_v1.postgresql_password.data["username"]}
SSL_MODE=require
EOF
repository = <<-EOF
DEFAULT_BRANCH=${var.default-branch}
DEFAULT_PUSH_CREATE_PRIVATE=${var.push-create.private}
ENABLE_PUSH_CREATE_ORG=${var.push-create.org}
ENABLE_PUSH_CREATE_USER=${var.push-create.user}
ROOT=/data/git/gitea-repositories
EOF
server = <<-EOF
APP_DATA_PATH=/data
DOMAIN=${var.sub-domain}.${var.domain-name}
ENABLE_PPROF=false
HTTP_PORT=3000
PROTOCOL=http
ROOT_URL=https://${var.sub-domain}.${var.domain-name}
SSH_DOMAIN=${var.sub-domain}.${var.domain-name}
SSH_LISTEN_PORT=${var.ssh-port}
SSH_PORT=${var.ssh-port}
EOF
ui = <<-EOF
DEFAULT_THEME=${var.theme}
SHOW_USER_EMAIL=false
THEMES=auto,gitea,arc-green,edge-auto,edge-dark,edge-light,everforest-auto,everforest-dark,everforest-light,gitea-modern,gruvbox-auto,gruvbox-dark,gruvbox-light,gruvbox-material-auto,gruvbox-material-dark,gruvbox-material-light,palenight,soft-era,sonokai-andromeda,sonokai-atlantis,sonokai-espresso,sonokai-maia,sonokai-shusia,sonokai,theme-nord
EOF
webhook = <<-EOF
ALLOWED_HOST_LIST=${var.webhook.allowed-hosts}
SKIP_TLS_VERIFY=${var.webhook.skip-tls-verify}
EOF
}
}

149
apps/gitea/ldap.tf Normal file
View File

@@ -0,0 +1,149 @@
locals {
base-dn = format("dc=%s", join(",dc=", split(".", format("%s.%s", var.sub-domain, var.domain-name))))
base-group-dn = format("ou=groups,%s", local.base-dn)
base-user-dn = format("ou=users,%s", local.base-dn)
authentik-token = data.kubernetes_secret_v1.authentik.data["AUTHENTIK_BOOTSTRAP_TOKEN"]
request_headers = {
"Content-Type" = "application/json"
Authorization = "Bearer ${local.authentik-token}"
}
ldap-outpost-prividers = jsondecode(data.http.get_ldap_outpost.response_body).results[0].providers
ldap-outpost-pk = jsondecode(data.http.get_ldap_outpost.response_body).results[0].pk
}
resource "kubectl_manifest" "gitea_ldap" {
ignore_fields = ["metadata.annotations"]
yaml_body = <<-EOF
apiVersion: "secretgenerator.mittwald.de/v1alpha1"
kind: "StringSecret"
metadata:
name: "${var.component}-ldap"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
forceRegenerate: false
data:
bindDn: "cn=${var.component}-ldapsearch,${local.base-user-dn}"
user-search-base: "${local.base-user-dn}"
user-filter: "(&(|(memberof=cn=gitea_admin,${local.base-group-dn})(memberof=cn=gitea_users,${local.base-group-dn}))(|(cn=%[1]s)(mail=%[1]s)))"
admin-filter: "(memberof=cn=gitea_admin,${local.base-group-dn})"
endpoint: "ak-outpost-ldap.${var.domain}-auth.svc"
fields:
- fieldName: "bindPassword"
length: "32"
EOF
}
data "kubernetes_secret_v1" "gitea_ldap_password" {
depends_on = [kubectl_manifest.gitea_ldap]
metadata {
name = kubectl_manifest.gitea_ldap.name
namespace = var.namespace
}
}
resource "authentik_user" "gitea_ldapsearch" {
username = "${var.component}-ldapsearch"
name = "${var.component}-ldapsearch"
}
resource "authentik_group" "gitea_ldapsearch" {
name = "${var.component}-ldapsearch"
users = [authentik_user.gitea_ldapsearch.id]
is_superuser = true
}
data "http" "gitea_ldapsearch_password" {
url = "http://authentik.${var.domain}-auth.svc/api/v3/core/users/${authentik_user.gitea_ldapsearch.id}/set_password/"
method = "POST"
request_headers = local.request_headers
request_body = jsonencode({password=data.kubernetes_secret_v1.gitea_ldap_password.data["bindPassword"]})
lifecycle {
postcondition {
condition = contains([201, 204], self.status_code)
error_message = "Status code invalid"
}
}
}
data "authentik_flow" "ldap-authentication-flow" {
depends_on = [authentik_user.gitea_ldapsearch] # fake dependency so it is not evaluated at plan stage
slug = "ldap-authentication-flow"
}
resource "authentik_provider_ldap" "gitea_provider_ldap" {
name = "gitea-ldap-provider"
base_dn = local.base-dn
search_group = authentik_group.gitea_ldapsearch.id
bind_flow = data.authentik_flow.ldap-authentication-flow.id
}
resource "authentik_application" "gitea_application" {
name = "gitea"
slug = "gitea"
protocol_provider = authentik_provider_ldap.gitea_provider_ldap.id
meta_launch_url = format("https://%s.%s", var.sub-domain, var.domain-name)
meta_icon = format("https://%s.%s/%s", var.sub-domain, var.domain-name, "assets/img/logo.svg")
}
resource "authentik_group" "gitea_users" {
name = "gitea_users"
}
data "authentik_group" "vynil-admin" {
depends_on = [authentik_group.gitea_users] # fake dependency so it is not evaluated at plan stage
name = "vynil-ldap-admins"
}
resource "authentik_group" "gitea_admin" {
name = "gitea_admin"
parent = authentik_group.gitea_users.id
}
resource "authentik_policy_binding" "gitea_access_users" {
target = authentik_application.gitea_application.uuid
group = authentik_group.gitea_users.id
order = 0
}
resource "authentik_policy_binding" "gitea_access_vynil" {
target = authentik_application.gitea_application.uuid
group = data.authentik_group.vynil-admin.id
order = 1
}
resource "authentik_policy_binding" "gitea_access_ldap" {
target = authentik_application.gitea_application.uuid
group = authentik_group.gitea_ldapsearch.id
order = 2
}
data "http" "get_ldap_outpost" {
depends_on = [authentik_group.gitea_users] # fake dependency so it is not evaluated at plan stage
url = "http://authentik.${var.domain}-auth.svc/api/v3/outposts/instances/?name__iexact=ldap"
method = "GET"
request_headers = local.request_headers
lifecycle {
postcondition {
condition = contains([200], self.status_code)
error_message = "Status code invalid"
}
}
}
provider "restapi" {
uri = "http://authentik.${var.domain}-auth.svc/api/v3/"
headers = local.request_headers
create_method = "PATCH"
update_method = "PATCH"
destroy_method = "PATCH"
write_returns_object = true
id_attribute = "name"
}
resource "restapi_object" "ldap_outpost_binding" {
path = "/outposts/instances/${local.ldap-outpost-pk}/"
data = jsonencode({
name = "ldap"
providers = contains(local.ldap-outpost-prividers, authentik_provider_ldap.gitea_provider_ldap.id) ? local.ldap-outpost-prividers : concat(local.ldap-outpost-prividers, [authentik_provider_ldap.gitea_provider_ldap.id])
})
}

31
apps/gitea/postgresql.tf Normal file
View File

@@ -0,0 +1,31 @@
locals {
pg-labels = merge(local.common-labels, {
"app.kubernetes.io/component" = "postgresql"
})
}
resource "kubectl_manifest" "gitea_postgresql" {
yaml_body = <<-EOF
apiVersion: "acid.zalan.do/v1"
kind: "postgresql"
metadata:
name: "${var.instance}-${var.component}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.pg-labels)}
spec:
databases:
${var.component}: "${var.component}"
numberOfInstances: ${var.postgres.replicas}
podAnnotations:
"k8up.io/backupcommand": "pg_dump -U postgres -d ${var.component} --clean"
"k8up.io/file-extension": ".sql"
postgresql:
version: "${var.postgres.version}"
teamId: "${var.instance}"
users:
${var.component}:
- "superuser"
- "createdb"
volume:
size: "${var.postgres.storage}"
EOF
}

19
apps/gitea/secret.tf Normal file
View File

@@ -0,0 +1,19 @@
resource "kubectl_manifest" "gitea_secret" {
ignore_fields = ["metadata.annotations"]
yaml_body = <<-EOF
apiVersion: "secretgenerator.mittwald.de/v1alpha1"
kind: "StringSecret"
metadata:
name: "gitea-admin-user"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
forceRegenerate: false
data:
username: "${var.admin.name}"
fields:
- fieldName: "password"
length: "32"
EOF
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,90 @@
# Source: gitea/templates/gitea/init.yaml
apiVersion: v1
kind: Secret
metadata:
name: gitea-init
labels:
helm.sh/chart: gitea-8.3.0
app: gitea
app.kubernetes.io/name: gitea
app.kubernetes.io/instance: gitea
app.kubernetes.io/version: "1.19.3"
version: "1.19.3"
app.kubernetes.io/managed-by: Helm
type: Opaque
stringData:
configure_gpg_environment.sh: |-
#!/usr/bin/env bash
set -eu
gpg --batch --import /raw/private.asc
init_directory_structure.sh: |-
#!/usr/bin/env bash
set -euo pipefail
set -x
chown 1000:1000 /data
mkdir -p /data/git/.ssh
chmod -R 700 /data/git/.ssh
[ ! -d /data/gitea/conf ] && mkdir -p /data/gitea/conf
# prepare temp directory structure
mkdir -p "${GITEA_TEMP}"
chown 1000:1000 "${GITEA_TEMP}"
chmod ug+rwx "${GITEA_TEMP}"
configure_gitea.sh: |-
#!/usr/bin/env bash
set -euo pipefail
echo '==== BEGIN GITEA CONFIGURATION ===='
{ # try
gitea migrate
} || { # catch
echo "Gitea migrate might fail due to database connection...This init-container will try again in a few seconds"
exit 1
}
function configure_admin_user() {
local ACCOUNT_ID=$(gitea admin user list --admin | grep -e "\s\+${GITEA_ADMIN_USERNAME}\s\+" | awk -F " " "{printf \$1}")
if [[ -z "${ACCOUNT_ID}" ]]; then
echo "No admin user '${GITEA_ADMIN_USERNAME}' found. Creating now..."
gitea admin user create --admin --username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}" --email "git-admin@local.com" --must-change-password=false
echo '...created.'
else
echo "Admin account '${GITEA_ADMIN_USERNAME}' already exist. Running update to sync password..."
gitea admin user change-password --username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}"
echo '...password sync done.'
fi
}
configure_admin_user
function configure_ldap() {
local LDAP_NAME='Authentik'
local GITEA_AUTH_ID=$(gitea admin auth list --vertical-bars | grep -E "\|${LDAP_NAME}\s+\|" | grep -iE '\|LDAP \(via BindDN\)\s+\|' | awk -F " " "{print \$1}")
if [[ -z "${GITEA_AUTH_ID}" ]]; then
echo "No ldap configuration found with name "${LDAP_NAME}". Installing it now..."
gitea admin auth add-ldap --admin-filter "${LDAP_ADMIN_FILTER}" --avatar-attribute 'jpegPhoto' --bind-dn "${GITEA_LDAP_BIND_DN_0}" --bind-password "${GITEA_LDAP_PASSWORD_0}" --email-attribute 'mail' --firstname-attribute 'givenname' --host "${LDAP_HOST}" --name 'Authentik' --port 389 --security-protocol 'unencrypted' --surname-attribute 'name' --user-filter "${LDAP_USER_FILTER}" --user-search-base "${LDAP_USER_SEARCH_BASE}" --username-attribute 'cn'
echo '...installed.'
else
echo "Existing ldap configuration with name "${LDAP_NAME}": '${GITEA_AUTH_ID}'. Running update to sync settings..."
gitea admin auth update-ldap --id "${GITEA_AUTH_ID}" --admin-filter "${LDAP_ADMIN_FILTER}" --avatar-attribute 'jpegPhoto' --bind-dn "${GITEA_LDAP_BIND_DN_0}" --bind-password "${GITEA_LDAP_PASSWORD_0}" --email-attribute 'mail' --firstname-attribute 'givenname' --host "${LDAP_HOST}" --name 'Authentik' --port 389 --security-protocol 'unencrypted' --surname-attribute 'name' --user-filter "${LDAP_USER_FILTER}" --user-search-base "${LDAP_USER_SEARCH_BASE}" --username-attribute 'cn'
echo '...sync settings done.'
fi
}
configure_ldap
function configure_oauth() {
echo 'no oauth configuration... skipping.'
}
configure_oauth
echo '==== END GITEA CONFIGURATION ===='

View File

@@ -0,0 +1,169 @@
# Source: gitea/templates/gitea/config.yaml
apiVersion: v1
kind: Secret
metadata:
name: gitea
labels:
helm.sh/chart: gitea-8.3.0
app: gitea
app.kubernetes.io/name: gitea
app.kubernetes.io/instance: gitea
app.kubernetes.io/version: "1.19.3"
version: "1.19.3"
app.kubernetes.io/managed-by: Helm
type: Opaque
stringData:
config_environment.sh: |-
#!/usr/bin/env bash
set -euo pipefail
function env2ini::log() {
printf "${1}\n"
}
function env2ini::read_config_to_env() {
local section="${1}"
local line="${2}"
if [[ -z "${line}" ]]; then
# skip empty line
return
fi
# 'xargs echo -n' trims all leading/trailing whitespaces and a trailing new line
local setting="$(awk -F '=' '{print $1}' <<< "${line}" | xargs echo -n)"
if [[ -z "${setting}" ]]; then
env2ini::log ' ! invalid setting'
exit 1
fi
local value=''
local regex="^${setting}(\s*)=(\s*)(.*)"
if [[ $line =~ $regex ]]; then
value="${BASH_REMATCH[3]}"
else
env2ini::log ' ! invalid setting'
exit 1
fi
env2ini::log " + '${setting}'"
if [[ -z "${section}" ]]; then
export "ENV_TO_INI____${setting^^}=${value}" # '^^' makes the variable content uppercase
return
fi
local masked_section="${section//./_0X2E_}" # '//' instructs to replace all matches
masked_section="${masked_section//-/_0X2D_}"
export "ENV_TO_INI__${masked_section^^}__${setting^^}=${value}" # '^^' makes the variable content uppercase
}
function env2ini::reload_preset_envs() {
env2ini::log "Reloading preset envs..."
while read -r line; do
if [[ -z "${line}" ]]; then
# skip empty line
return
fi
# 'xargs echo -n' trims all leading/trailing whitespaces and a trailing new line
local setting="$(awk -F '=' '{print $1}' <<< "${line}" | xargs echo -n)"
if [[ -z "${setting}" ]]; then
env2ini::log ' ! invalid setting'
exit 1
fi
local value=''
local regex="^${setting}(\s*)=(\s*)(.*)"
if [[ $line =~ $regex ]]; then
value="${BASH_REMATCH[3]}"
else
env2ini::log ' ! invalid setting'
exit 1
fi
env2ini::log " + '${setting}'"
export "${setting^^}=${value}" # '^^' makes the variable content uppercase
done < "/tmp/existing-envs"
rm /tmp/existing-envs
}
function env2ini::process_config_file() {
local config_file="${1}"
local section="$(basename "${config_file}")"
if [[ $section == '_generals_' ]]; then
env2ini::log " [ini root]"
section=''
else
env2ini::log " ${section}"
fi
while read -r line; do
env2ini::read_config_to_env "${section}" "${line}"
done < <(awk 1 "${config_file}") # Helm .toYaml trims the trailing new line which breaks line processing; awk 1 ... adds it back while reading
}
function env2ini::load_config_sources() {
local path="${1}"
if [[ -d "${path}" ]]; then
env2ini::log "Processing $(basename "${path}")..."
while read -d '' configFile; do
env2ini::process_config_file "${configFile}"
done < <(find "${path}" -type l -not -name '..data' -print0)
env2ini::log "\n"
fi
}
function env2ini::generate_initial_secrets() {
# These environment variables will either be
# - overwritten with user defined values,
# - initially used to set up Gitea
# Anyway, they won't harm existing app.ini files
export ENV_TO_INI__SECURITY__INTERNAL_TOKEN=$(gitea generate secret INTERNAL_TOKEN)
export ENV_TO_INI__SECURITY__SECRET_KEY=$(gitea generate secret SECRET_KEY)
export ENV_TO_INI__OAUTH2__JWT_SECRET=$(gitea generate secret JWT_SECRET)
export ENV_TO_INI__SERVER__LFS_JWT_SECRET=$(gitea generate secret LFS_JWT_SECRET)
env2ini::log "...Initial secrets generated\n"
}
env | (grep ENV_TO_INI || [[ $? == 1 ]]) > /tmp/existing-envs
# MUST BE CALLED BEFORE OTHER CONFIGURATION
env2ini::generate_initial_secrets
env2ini::load_config_sources '/env-to-ini-mounts/inlines/'
env2ini::load_config_sources '/env-to-ini-mounts/additionals/'
# load existing envs to override auto generated envs
env2ini::reload_preset_envs
env2ini::log "=== All configuration sources loaded ===\n"
# safety to prevent rewrite of secret keys if an app.ini already exists
if [ -f ${GITEA_APP_INI} ]; then
env2ini::log 'An app.ini file already exists. To prevent overwriting secret keys, these settings are dropped and remain unchanged:'
env2ini::log ' - security.INTERNAL_TOKEN'
env2ini::log ' - security.SECRET_KEY'
env2ini::log ' - oauth2.JWT_SECRET'
env2ini::log ' - server.LFS_JWT_SECRET'
unset ENV_TO_INI__SECURITY__INTERNAL_TOKEN
unset ENV_TO_INI__SECURITY__SECRET_KEY
unset ENV_TO_INI__OAUTH2__JWT_SECRET
unset ENV_TO_INI__SERVER__LFS_JWT_SECRET
fi
environment-to-ini -o $GITEA_APP_INI -p ENV_TO_INI

View File

@@ -0,0 +1,25 @@
# Source: gitea/templates/gitea/http-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: gitea-http
labels:
helm.sh/chart: gitea-8.3.0
app: gitea
app.kubernetes.io/name: gitea
app.kubernetes.io/instance: gitea
app.kubernetes.io/version: "1.19.3"
version: "1.19.3"
app.kubernetes.io/managed-by: Helm
annotations:
{}
spec:
type: ClusterIP
clusterIP: None
ports:
- name: http
port: 3000
targetPort: 3000
selector:
app.kubernetes.io/name: gitea
app.kubernetes.io/instance: gitea

View File

@@ -0,0 +1,23 @@
# Source: gitea/charts/memcached/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: gitea-memcached
namespace: vynil-ci
labels:
app.kubernetes.io/name: memcached
helm.sh/chart: memcached-6.3.14
app.kubernetes.io/instance: gitea
app.kubernetes.io/managed-by: Helm
annotations:
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: memcache
port: 11211
targetPort: memcache
nodePort: null
selector:
app.kubernetes.io/name: memcached
app.kubernetes.io/instance: gitea

View File

@@ -0,0 +1,28 @@
# Source: gitea/templates/gitea/ssh-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: gitea-ssh
labels:
helm.sh/chart: gitea-8.3.0
app: gitea
app.kubernetes.io/name: gitea
app.kubernetes.io/instance: gitea
app.kubernetes.io/version: "1.19.3"
version: "1.19.3"
app.kubernetes.io/managed-by: Helm
annotations:
metallb.universe.tf/address-pool: mlb-pool-public
metallb.universe.tf/allow-shared-ip: traefik-public-ip
spec:
type: LoadBalancer
loadBalancerIP: 1.2.3.4
ipFamilyPolicy: PreferDualStack
ports:
- name: ssh
port: 2222
targetPort: 2222
protocol: TCP
selector:
app.kubernetes.io/name: gitea
app.kubernetes.io/instance: gitea

22
apps/k8s-ui/datas.tf Normal file
View File

@@ -0,0 +1,22 @@
locals {
common-labels = {
"vynil.solidite.fr/owner-name" = var.instance
"vynil.solidite.fr/owner-namespace" = var.namespace
"vynil.solidite.fr/owner-category" = var.category
"vynil.solidite.fr/owner-component" = var.component
"app.kubernetes.io/managed-by" = "vynil"
"app.kubernetes.io/name" = var.component
"app.kubernetes.io/instance" = var.instance
}
}
data "kubernetes_secret_v1" "authentik" {
metadata {
name = "authentik"
namespace = "${var.domain}-auth"
}
}
data "kustomization_overlay" "data" {
resources = []
}

44
apps/k8s-ui/index.yaml Normal file
View File

@@ -0,0 +1,44 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: apps
metadata:
name: k8s-ui
description: Access to the kubernetes api
options:
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
issuer:
default: letsencrypt-prod
examples:
- letsencrypt-prod
type: string
sub-domain:
default: api
examples:
- api
type: string
ingress-class:
default: traefik
examples:
- traefik
type: string
domain:
default: your-company
examples:
- your-company
type: string
dependencies:
- dist: null
category: share
component: authentik-forward
providers:
kubernetes: true
authentik: true
kubectl: true
postgresql: null
restapi: true
http: true

51
apps/k8s-ui/ingress.tf Normal file
View File

@@ -0,0 +1,51 @@
locals {
dns-names = ["${var.sub-domain}.${var.domain-name}"]
middlewares = []
services = [{
"kind" = "Service"
"name" = "kubernetes"
"namespace" = "default"
"port" = 443
}]
routes = [ for v in local.dns-names : {
"kind" = "Rule"
"match" = "Host(`${v}`)"
"middlewares" = local.middlewares
"services" = local.services
}]
}
resource "kubectl_manifest" "prj_certificate" {
yaml_body = <<-EOF
apiVersion: "cert-manager.io/v1"
kind: "Certificate"
metadata:
name: "${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
secretName: "${var.instance}-cert"
dnsNames: ${jsonencode(local.dns-names)}
issuerRef:
name: "${var.issuer}"
kind: "ClusterIssuer"
group: "cert-manager.io"
EOF
}
resource "kubectl_manifest" "prj_ingress" {
force_conflicts = true
yaml_body = <<-EOF
apiVersion: "traefik.containo.us/v1alpha1"
kind: "IngressRoute"
metadata:
name: "${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
entryPoints: ["websecure"]
routes: ${jsonencode(local.routes)}
tls:
secretName: "${var.instance}-cert"
EOF
}

View File

@@ -0,0 +1,54 @@
# Source: nextcloud/templates/metrics-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nextcloud-metrics
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-3.5.19
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/component: metrics
template:
metadata:
annotations:
null
labels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/component: metrics
spec:
containers:
- name: metrics-exporter
image: "xperimental/nextcloud-exporter:0.6.1"
imagePullPolicy: IfNotPresent
env:
- name: NEXTCLOUD_USERNAME
valueFrom:
secretKeyRef:
name: nextcloud
key: nextcloud-username
- name: NEXTCLOUD_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud
key: nextcloud-password
- name: NEXTCLOUD_SERVER
value: http://nextcloud.kube.home
- name: NEXTCLOUD_TIMEOUT
value: 5s
- name: NEXTCLOUD_TLS_SKIP_VERIFY
value: "false"
ports:
- name: metrics
containerPort: 9205
securityContext:
runAsUser: 1000
runAsNonRoot: true

View File

@@ -0,0 +1,229 @@
# Source: nextcloud/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nextcloud
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-3.5.19
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: app
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/component: app
template:
metadata:
labels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/component: app
nextcloud-redis-client: "true"
annotations:
nextcloud-config-hash: a5aae02b1b8278a9c8a2dc143e82d3737fc295f62c34afd617207f37d1b2b438
php-config-hash: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
nginx-config-hash: 18dd8f905a93ed27f032e9ae68084222ed7e5926f7144cda17b979780f4da54b
spec:
containers:
- name: nextcloud
image: nextcloud:27.0.0-apache
imagePullPolicy: IfNotPresent
env:
- name: POSTGRES_HOST
value:
- name: POSTGRES_DB
value: "nextcloud"
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: nextcloud-admin
key: username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-admin
key: password
- name: NEXTCLOUD_ADMIN_USER
valueFrom:
secretKeyRef:
name: nextcloud
key: nextcloud-username
- name: NEXTCLOUD_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud
key: nextcloud-password
- name: NEXTCLOUD_TRUSTED_DOMAINS
value: nextcloud.kube.home
- name: NEXTCLOUD_DATA_DIR
value: "/var/www/html/data"
- name: REDIS_HOST
value: nextcloud-redis-master
- name: REDIS_HOST_PORT
value: "6379"
- name: REDIS_HOST_PASSWORD
value: changeme
resources:
{}
volumeMounts:
- name: nextcloud-main
mountPath: /var/www/
subPath: root
- name: nextcloud-main
mountPath: /var/www/html
subPath: html
- name: nextcloud-main
mountPath: /var/www/html/data
subPath: data
- name: nextcloud-main
mountPath: /var/www/html/config
subPath: config
- name: nextcloud-main
mountPath: /var/www/html/custom_apps
subPath: custom_apps
- name: nextcloud-main
mountPath: /var/www/tmp
subPath: tmp
- name: nextcloud-main
mountPath: /var/www/html/themes
subPath: themes
- name: nextcloud-cron
image: nextcloud:27.0.0-apache
imagePullPolicy: IfNotPresent
command:
- /cron.sh
env:
- name: POSTGRES_HOST
value:
- name: POSTGRES_DB
value: "nextcloud"
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: nextcloud-admin
key: username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-admin
key: password
- name: NEXTCLOUD_ADMIN_USER
valueFrom:
secretKeyRef:
name: nextcloud
key: nextcloud-username
- name: NEXTCLOUD_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud
key: nextcloud-password
- name: NEXTCLOUD_TRUSTED_DOMAINS
value: nextcloud.kube.home
- name: NEXTCLOUD_DATA_DIR
value: "/var/www/html/data"
- name: REDIS_HOST
value: nextcloud-redis-master
- name: REDIS_HOST_PORT
value: "6379"
- name: REDIS_HOST_PASSWORD
value: changeme
resources:
{}
volumeMounts:
- name: nextcloud-main
mountPath: /var/www/
subPath: root
- name: nextcloud-main
mountPath: /var/www/html
subPath: html
- name: nextcloud-main
mountPath: /var/www/html/data
subPath: data
- name: nextcloud-main
mountPath: /var/www/html/config
subPath: config
- name: nextcloud-main
mountPath: /var/www/html/custom_apps
subPath: custom_apps
- name: nextcloud-main
mountPath: /var/www/tmp
subPath: tmp
- name: nextcloud-main
mountPath: /var/www/html/themes
subPath: themes
- name: nextcloud-nginx
image: "nginx:alpine"
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /status.php
port: http
httpHeaders:
- name: Host
value: "nextcloud.kube.home"
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /status.php
port: 80
httpHeaders:
- name: Host
value: "nextcloud.kube.home"
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
resources:
{}
volumeMounts:
- name: nextcloud-main
mountPath: /var/www/
subPath: root
- name: nextcloud-main
mountPath: /var/www/html
subPath: html
- name: nextcloud-main
mountPath: /var/www/html/data
subPath: data
- name: nextcloud-main
mountPath: /var/www/html/config
subPath: config
- name: nextcloud-main
mountPath: /var/www/html/custom_apps
subPath: custom_apps
- name: nextcloud-main
mountPath: /var/www/tmp
subPath: tmp
- name: nextcloud-main
mountPath: /var/www/html/themes
subPath: themes
- name: nextcloud-nginx-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
volumes:
- name: nextcloud-main
persistentVolumeClaim:
claimName: nextcloud-nextcloud
- name: nextcloud-nginx-config
configMap:
name: nextcloud-nginxconfig
securityContext:
# Will mount configuration files as www-data (id: 82) for nextcloud
fsGroup: 82
serviceAccountName: nextcloud-serviceaccount

View File

@@ -0,0 +1,19 @@
# Source: nextcloud/templates/hpa.yaml
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: nextcloud
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-3.5.19
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: app
spec:
scaleTargetRef:
kind: Deployment
apiVersion: apps/v1
name: nextcloud
minReplicas: 1
maxReplicas: 10
targetCPUUtilizationPercentage: 60

80
apps/nextcloud/index.yaml Normal file
View File

@@ -0,0 +1,80 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: apps
metadata:
name: nextcloud
description: null
options:
ingress-class:
default: traefik
examples:
- traefik
type: string
images:
default:
nextcloud:
pullPolicy: IfNotPresent
registry: docker.io
repository: nextcloud
tag: 27.0.0-fpm
examples:
- nextcloud:
pullPolicy: IfNotPresent
registry: docker.io
repository: nextcloud
tag: 27.0.0-fpm
properties:
nextcloud:
default:
pullPolicy: IfNotPresent
registry: docker.io
repository: nextcloud
tag: 27.0.0-fpm
properties:
pullPolicy:
default: IfNotPresent
enum:
- Always
- Never
- IfNotPresent
type: string
registry:
default: docker.io
type: string
repository:
default: nextcloud
type: string
tag:
default: 27.0.0-fpm
type: string
type: object
type: object
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
issuer:
default: letsencrypt-prod
examples:
- letsencrypt-prod
type: string
domain:
default: your-company
examples:
- your-company
type: string
sub-domain:
default: cloud
examples:
- cloud
type: string
dependencies: []
providers:
kubernetes: true
authentik: true
kubectl: null
postgresql: null
restapi: null
http: null

View File

@@ -0,0 +1,26 @@
# Source: nextcloud/templates/metrics-servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: nextcloud
namespace: "vynil-cloud"
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-3.5.19
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
spec:
jobLabel: ""
selector:
matchLabels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/component: metrics
namespaceSelector:
matchNames:
- "vynil-cloud"
endpoints:
- port: metrics
path: "/"
interval: 30s

View File

@@ -0,0 +1,14 @@
# Source: nextcloud/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: nextcloud-privileged
namespace: vynil-cloud
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nextcloud-privileged
subjects:
- kind: ServiceAccount
name: nextcloud-serviceaccount
namespace: vynil-cloud

View File

@@ -0,0 +1,15 @@
# Source: nextcloud/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: nextcloud-privileged
namespace: vynil-cloud
rules:
- apiGroups:
- extensions
resourceNames:
- privileged
resources:
- podsecuritypolicies
verbs:
- use

View File

@@ -0,0 +1,173 @@
# Source: nextcloud/templates/nginx-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: nextcloud-nginxconfig
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-3.5.19
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/managed-by: Helm
data:
nginx.conf: |-
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
upstream php-handler {
server 127.0.0.1:9000;
}
server {
listen 80;
# HSTS settings
# WARNING: Only add the preload option once you read about
# the consequences in https://hstspreload.org/. This option
# will add the domain to a hardcoded list that is shipped
# in all major browsers and getting removed from this list
# could take several months.
#add_header Strict-Transport-Security "max-age=15768000; includeSubDomains; preload;" always;
# set max upload size
client_max_body_size 10G;
fastcgi_buffers 64 4K;
# Enable gzip but do not remove ETag headers
gzip on;
gzip_vary on;
gzip_comp_level 4;
gzip_min_length 256;
gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
# Pagespeed is not supported by Nextcloud, so if your server is built
# with the `ngx_pagespeed` module, uncomment this line to disable it.
#pagespeed off;
# HTTP response headers borrowed from Nextcloud `.htaccess`
add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Download-Options "noopen" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Permitted-Cross-Domain-Policies "none" always;
add_header X-Robots-Tag "noindex, nofollow" always;
add_header X-XSS-Protection "1; mode=block" always;
# Remove X-Powered-By, which is an information leak
fastcgi_hide_header X-Powered-By;
# Path to the root of your installation
root /var/www/html;
# Specify how to handle directories -- specifying `/index.php$request_uri`
# here as the fallback means that Nginx always exhibits the desired behaviour
# when a client requests a path that corresponds to a directory that exists
# on the server. In particular, if that directory contains an index.php file,
# that file is correctly served; if it doesn't, then the request is passed to
# the front-end controller. This consistent behaviour means that we don't need
# to specify custom rules for certain paths (e.g. images and other assets,
# `/updater`, `/ocm-provider`, `/ocs-provider`), and thus
# `try_files $uri $uri/ /index.php$request_uri`
# always provides the desired behaviour.
index index.php index.html /index.php$request_uri;
# Rule borrowed from `.htaccess` to handle Microsoft DAV clients
location = / {
if ( $http_user_agent ~ ^DavClnt ) {
return 302 /remote.php/webdav/$is_args$args;
}
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
# Make a regex exception for `/.well-known` so that clients can still
# access it despite the existence of the regex rule
# `location ~ /(\.|autotest|...)` which would otherwise handle requests
# for `/.well-known`.
location ^~ /.well-known {
# The following 6 rules are borrowed from `.htaccess`
location = /.well-known/carddav { return 301 /remote.php/dav/; }
location = /.well-known/caldav { return 301 /remote.php/dav/; }
# Anything else is dynamically handled by Nextcloud
location ^~ /.well-known { return 301 /index.php$uri; }
try_files $uri $uri/ =404;
}
# Rules borrowed from `.htaccess` to hide certain paths from clients
location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)(?:$|/) { return 404; }
location ~ ^/(?:\.|autotest|occ|issue|indie|db_|console) { return 404; }
# Ensure this block, which passes PHP files to the PHP process, is above the blocks
# which handle static assets (as seen below). If this block is not declared first,
# then Nginx will encounter an infinite rewriting loop when it prepends `/index.php`
# to the URI, resulting in a HTTP 500 error response.
location ~ \.php(?:$|/) {
# Required for legacy support
rewrite ^/(?!index|remote|public|cron|core\/ajax\/update|status|ocs\/v[12]|updater\/.+|oc[ms]-provider\/.+|.+\/richdocumentscode\/proxy) /index.php$request_uri;
fastcgi_split_path_info ^(.+?\.php)(/.*)$;
set $path_info $fastcgi_path_info;
try_files $fastcgi_script_name =404;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $path_info;
#fastcgi_param HTTPS on;
fastcgi_param modHeadersAvailable true; # Avoid sending the security headers twice
fastcgi_param front_controller_active true; # Enable pretty urls
fastcgi_pass php-handler;
fastcgi_intercept_errors on;
fastcgi_request_buffering off;
}
location ~ \.(?:css|js|svg|gif)$ {
try_files $uri /index.php$request_uri;
expires 6M; # Cache-Control policy borrowed from `.htaccess`
access_log off; # Optional: Don't log access to assets
}
location ~ \.woff2?$ {
try_files $uri /index.php$request_uri;
expires 7d; # Cache-Control policy borrowed from `.htaccess`
access_log off; # Optional: Don't log access to assets
}
location / {
try_files $uri $uri/ /index.php$request_uri;
}
}
}

View File

@@ -0,0 +1,17 @@
# Source: nextcloud/templates/nextcloud-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nextcloud-nextcloud
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-3.5.19
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: app
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"

View File

@@ -0,0 +1,15 @@
# Source: nextcloud/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: nextcloud
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-3.5.19
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/managed-by: Helm
type: Opaque
data:
nextcloud-username: "YWRtaW4="
nextcloud-password: "Y2hhbmdlbWU="
nextcloud-token: "WXVzZmluOWo4Ug=="

View File

@@ -0,0 +1,5 @@
# Source: nextcloud/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nextcloud-serviceaccount

View File

@@ -0,0 +1,24 @@
# Source: nextcloud/templates/metrics-service.yaml
apiVersion: v1
kind: Service
metadata:
name: nextcloud-metrics
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-3.5.19
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
annotations:
prometheus.io/port: "9205"
prometheus.io/scrape: "true"
spec:
type: ClusterIP
ports:
- name: metrics
port: 9205
targetPort: metrics
selector:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/component: metrics

View File

@@ -0,0 +1,22 @@
# Source: nextcloud/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: nextcloud
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-3.5.19
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: app
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud
app.kubernetes.io/component: app

22
apps/traefik-ui/datas.tf Normal file
View File

@@ -0,0 +1,22 @@
locals {
common-labels = {
"vynil.solidite.fr/owner-name" = var.instance
"vynil.solidite.fr/owner-namespace" = var.namespace
"vynil.solidite.fr/owner-category" = var.category
"vynil.solidite.fr/owner-component" = var.component
"app.kubernetes.io/managed-by" = "vynil"
"app.kubernetes.io/name" = var.component
"app.kubernetes.io/instance" = var.instance
}
}
data "kubernetes_secret_v1" "authentik" {
metadata {
name = "authentik"
namespace = "${var.domain}-auth"
}
}
data "kustomization_overlay" "data" {
resources = []
}

121
apps/traefik-ui/forward.tf Normal file
View File

@@ -0,0 +1,121 @@
locals {
authentik-token = data.kubernetes_secret_v1.authentik.data["AUTHENTIK_BOOTSTRAP_TOKEN"]
request_headers = {
"Content-Type" = "application/json"
Authorization = "Bearer ${local.authentik-token}"
}
forward-outpost-providers = jsondecode(data.http.get_forward_outpost.response_body).results[0].providers
forward-outpost-pk = jsondecode(data.http.get_forward_outpost.response_body).results[0].pk
app-name = var.component == var.instance ? var.instance : format("%s-%s", var.component, var.instance)
app-icon = "dashboard/statics/icons/favicon-96x96.png"
main-group = format("%s-users", local.app-name)
sub-groups = []
access-token-validity = "minutes=10"
}
data "authentik_flow" "default-authorization-flow" {
depends_on = [authentik_group.prj_users]
slug = "default-provider-authorization-implicit-consent"
}
resource "authentik_provider_proxy" "prj_forward" {
name = local.app-name
external_host = format("https://%s.%s", var.sub-domain, var.domain-name)
authorization_flow = data.authentik_flow.default-authorization-flow.id
mode = "forward_single"
access_token_validity = local.access-token-validity
}
resource "authentik_application" "prj_application" {
name = local.app-name
slug = local.app-name
protocol_provider = authentik_provider_proxy.prj_forward.id
meta_launch_url = format("https://%s.%s", var.sub-domain, var.domain-name)
meta_icon = format("https://%s.%s/%s", var.sub-domain, var.domain-name, local.app-icon)
}
resource "authentik_group" "prj_users" {
name = local.main-group
}
resource "authentik_group" "subgroup" {
count = length(local.sub-groups)
name = format("%s-%s", local.app-name, local.sub-groups[count.index])
parent = authentik_group.prj_users.id
}
data "authentik_group" "vynil-admin" {
depends_on = [authentik_group.prj_users] # fake dependency so it is not evaluated at plan stage
name = "vynil-forward-admins"
}
resource "authentik_policy_binding" "prj_access_users" {
target = authentik_application.prj_application.uuid
group = authentik_group.prj_users.id
order = 0
}
resource "authentik_policy_binding" "prj_access_vynil" {
target = authentik_application.prj_application.uuid
group = data.authentik_group.vynil-admin.id
order = 1
}
data "http" "get_forward_outpost" {
depends_on = [authentik_provider_proxy.prj_forward]
url = "http://authentik.${var.domain}-auth.svc/api/v3/outposts/instances/?name__iexact=forward"
method = "GET"
request_headers = local.request_headers
lifecycle {
postcondition {
condition = contains([200], self.status_code)
error_message = "Status code invalid"
}
}
}
provider "restapi" {
uri = "http://authentik.${var.domain}-auth.svc/api/v3/"
headers = local.request_headers
create_method = "PATCH"
update_method = "PATCH"
destroy_method = "PATCH"
write_returns_object = true
id_attribute = "name"
}
resource "restapi_object" "forward_outpost_binding" {
path = "/outposts/instances/${local.forward-outpost-pk}/"
data = jsonencode({
name = "forward"
providers = contains(local.forward-outpost-providers, authentik_provider_proxy.prj_forward.id) ? local.forward-outpost-providers : concat(local.forward-outpost-providers, [authentik_provider_proxy.prj_forward.id])
})
}
resource "kubectl_manifest" "prj_middleware" {
yaml_body = <<-EOF
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: "forward-${local.app-name}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
forwardAuth:
address: http://ak-outpost-forward.${var.domain}-auth.svc:9000/outpost.goauthentik.io/auth/traefik
trustForwardHeader: true
authResponseHeaders:
- X-authentik-username
# - X-authentik-groups
# - X-authentik-email
# - X-authentik-name
# - X-authentik-uid
# - X-authentik-jwt
# - X-authentik-meta-jwks
# - X-authentik-meta-outpost
# - X-authentik-meta-provider
# - X-authentik-meta-app
# - X-authentik-meta-version
EOF
}

View File

@@ -0,0 +1,44 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: apps
metadata:
name: traefik-ui
description: Access to the Traefik UI
options:
sub-domain:
default: traefik
examples:
- traefik
type: string
ingress-class:
default: traefik
examples:
- traefik
type: string
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
domain:
default: your-company
examples:
- your-company
type: string
issuer:
default: letsencrypt-prod
examples:
- letsencrypt-prod
type: string
dependencies:
- dist: null
category: share
component: authentik-forward
providers:
kubernetes: true
authentik: true
kubectl: true
postgresql: null
restapi: true
http: true

View File

@@ -0,0 +1,75 @@
locals {
dns-names = ["${var.sub-domain}.${var.domain-name}"]
middlewares = ["${var.instance}-https", "forward-${local.app-name}"]
service = {
"name" = "${var.component}-${var.instance}"
"port" = {
"number" = 80
}
}
rules = [ for v in local.dns-names : {
"host" = "${v}"
"http" = {
"paths" = [{
"backend" = {
"service" = local.service
}
"path" = "/"
"pathType" = "Prefix"
}]
}
}]
}
resource "kubectl_manifest" "prj_certificate" {
yaml_body = <<-EOF
apiVersion: "cert-manager.io/v1"
kind: "Certificate"
metadata:
name: "${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
secretName: "${var.instance}-cert"
dnsNames: ${jsonencode(local.dns-names)}
issuerRef:
name: "${var.issuer}"
kind: "ClusterIssuer"
group: "cert-manager.io"
EOF
}
resource "kubectl_manifest" "prj_https_redirect" {
yaml_body = <<-EOF
apiVersion: "traefik.containo.us/v1alpha1"
kind: "Middleware"
metadata:
name: "${var.instance}-https"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
redirectScheme:
scheme: "https"
permanent: true
EOF
}
resource "kubectl_manifest" "prj_ingress" {
force_conflicts = true
yaml_body = <<-EOF
apiVersion: "networking.k8s.io/v1"
kind: "Ingress"
metadata:
name: "${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
annotations:
"traefik.ingress.kubernetes.io/router.middlewares": "${join(",", [for m in local.middlewares : format("%s-%s@kubernetescrd", var.namespace, m)])}"
spec:
ingressClassName: "${var.ingress-class}"
rules: ${jsonencode(local.rules)}
tls:
- hosts: ${jsonencode(local.dns-names)}
secretName: "${var.instance}-cert"
EOF
}

21
apps/traefik-ui/svc.tf Normal file
View File

@@ -0,0 +1,21 @@
resource "kubectl_manifest" "service" {
yaml_body = <<-EOF
apiVersion: v1
kind: Service
metadata:
name: "${var.component}-${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
type: ClusterIP
ports:
- name: http
port: 80
protocol: TCP
targetPort: 9000
selector:
vynil.solidite.fr/owner-namespace: ${var.namespace}
vynil.solidite.fr/owner-component: traefik
vynil.solidite.fr/owner-category: share
EOF
}

81
meta/domain-auth/apps.tf Normal file
View File

@@ -0,0 +1,81 @@
locals {
annotations = {
"vynil.solidite.fr/meta" = "domain-auth"
"vynil.solidite.fr/name" = "${var.namespace}-auth"
"vynil.solidite.fr/domain" = var.domain-name
"vynil.solidite.fr/issuer" = var.issuer
"vynil.solidite.fr/ingress" = var.ingress-class
}
global = {
"domain" = var.namespace
"domain-name" = var.domain-name
"issuer" = var.issuer
"ingress-class" = var.ingress-class
}
authentik = { for k, v in var.authentik : k => v if k!="enable" }
authentik-ldap = { for k, v in var.authentik-ldap : k => v if k!="enable" }
authentik-forward = { for k, v in var.authentik-forward : k => v if k!="enable" }
}
resource "kubernetes_namespace_v1" "auth-ns" {
count = var.authentik.enable || var.authentik-ldap.enable || var.authentik-forward.enable ? 1 : 0
metadata {
annotations = local.annotations
labels = merge(local.common-labels, local.annotations)
name = "${var.namespace}-auth"
}
}
resource "kubectl_manifest" "authentik" {
count = var.authentik.enable || var.authentik-ldap.enable || var.authentik-forward.enable ? 1 : 0
depends_on = [kubernetes_namespace_v1.auth-ns]
yaml_body = <<-EOF
apiVersion: "vynil.solidite.fr/v1"
kind: "Install"
metadata:
name: "authentik"
namespace: "${var.namespace}-auth"
labels: ${jsonencode(local.common-labels)}
spec:
distrib: "core"
category: "share"
component: "authentik"
options: ${jsonencode(merge(local.global, local.authentik))}
EOF
}
resource "kubectl_manifest" "authentik-ldap" {
count = var.authentik-ldap.enable ? 1 : 0
depends_on = [kubernetes_namespace_v1.auth-ns]
yaml_body = <<-EOF
apiVersion: "vynil.solidite.fr/v1"
kind: "Install"
metadata:
name: "authentik-ldap"
namespace: "${var.namespace}-auth"
labels: ${jsonencode(local.common-labels)}
spec:
distrib: "core"
category: "share"
component: "authentik-ldap"
options: ${jsonencode(merge(local.global, local.authentik-ldap))}
EOF
}
resource "kubectl_manifest" "authentik-forward" {
count = var.authentik-forward.enable ? 1 : 0
depends_on = [kubernetes_namespace_v1.auth-ns]
yaml_body = <<-EOF
apiVersion: "vynil.solidite.fr/v1"
kind: "Install"
metadata:
name: "authentik-forward"
namespace: "${var.namespace}-auth"
labels: ${jsonencode(local.common-labels)}
spec:
distrib: "core"
category: "share"
component: "authentik-forward"
options: ${jsonencode(merge(local.global, local.authentik-forward))}
EOF
}

View File

@@ -0,0 +1,66 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: meta
metadata:
name: domain-auth
description: null
options:
authentik-ldap:
default:
enable: false
examples:
- enable: false
properties:
enable:
default: false
type: boolean
type: object
issuer:
default: letsencrypt-prod
examples:
- letsencrypt-prod
type: string
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
ingress-class:
default: traefik
examples:
- traefik
type: string
authentik:
default:
enable: true
examples:
- enable: true
properties:
enable:
default: true
type: boolean
type: object
authentik-forward:
default:
enable: false
examples:
- enable: false
properties:
enable:
default: false
type: boolean
type: object
domain:
default: your-company
examples:
- your-company
type: string
dependencies: []
providers:
kubernetes: true
authentik: null
kubectl: true
postgresql: null
restapi: null
http: null

43
meta/domain-ci/apps.tf Normal file
View File

@@ -0,0 +1,43 @@
locals {
annotations = {
"vynil.solidite.fr/meta" = "domain-ci"
"vynil.solidite.fr/name" = var.namespace
"vynil.solidite.fr/domain" = var.domain-name
"vynil.solidite.fr/issuer" = var.issuer
"vynil.solidite.fr/ingress" = var.ingress-class
}
global = {
"domain" = var.namespace
"domain-name" = var.domain-name
"issuer" = var.issuer
"ingress-class" = var.ingress-class
}
gitea = { for k, v in var.gitea : k => v if k!="enable" }
}
resource "kubernetes_namespace_v1" "ci-ns" {
count = ( var.gitea.enable )? 1 : 0
metadata {
annotations = local.annotations
labels = merge(local.common-labels, local.annotations)
name = "${var.namespace}-ci"
}
}
resource "kubectl_manifest" "gitea" {
count = var.gitea.enable ? 1 : 0
depends_on = [kubernetes_namespace_v1.ci-ns]
yaml_body = <<-EOF
apiVersion: "vynil.solidite.fr/v1"
kind: "Install"
metadata:
name: "gitea"
namespace: "${var.namespace}-ci"
labels: ${jsonencode(local.common-labels)}
spec:
distrib: "core"
category: "apps"
component: "gitea"
options: ${jsonencode(merge(local.global, local.gitea))}
EOF
}

46
meta/domain-ci/index.yaml Normal file
View File

@@ -0,0 +1,46 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: meta
metadata:
name: domain-ci
description: null
options:
gitea:
default:
enable: true
examples:
- enable: true
properties:
enable:
default: true
type: boolean
type: object
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
issuer:
default: letsencrypt-prod
examples:
- letsencrypt-prod
type: string
domain:
default: your-company
examples:
- your-company
type: string
ingress-class:
default: traefik
examples:
- traefik
type: string
dependencies: []
providers:
kubernetes: true
authentik: null
kubectl: true
postgresql: null
restapi: null
http: null

43
meta/domain-erp/apps.tf Normal file
View File

@@ -0,0 +1,43 @@
locals {
annotations = {
"vynil.solidite.fr/meta" = "domain-ci"
"vynil.solidite.fr/name" = var.namespace
"vynil.solidite.fr/domain" = var.domain-name
"vynil.solidite.fr/issuer" = var.issuer
"vynil.solidite.fr/ingress" = var.ingress-class
}
global = {
"domain" = var.namespace
"domain-name" = var.domain-name
"issuer" = var.issuer
"ingress-class" = var.ingress-class
}
dolibarr = { for k, v in var.dolibarr : k => v if k!="enable" }
}
resource "kubernetes_namespace_v1" "erp-ns" {
count = ( var.dolibarr.enable )? 1 : 0
metadata {
annotations = local.annotations
labels = merge(local.common-labels, local.annotations)
name = "${var.namespace}-erp"
}
}
resource "kubectl_manifest" "dolibarr" {
count = var.dolibarr.enable ? 1 : 0
depends_on = [kubernetes_namespace_v1.erp-ns]
yaml_body = <<-EOF
apiVersion: "vynil.solidite.fr/v1"
kind: "Install"
metadata:
name: "dolibarr"
namespace: "${kubernetes_namespace_v1.erp-ns[0].metadata[0].name}"
labels: ${jsonencode(local.common-labels)}
spec:
distrib: "core"
category: "apps"
component: "dolibarr"
options: ${jsonencode(merge(local.global, local.dolibarr))}
EOF
}

View File

@@ -0,0 +1,46 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: meta
metadata:
name: domain-erp
description: null
options:
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
dolibarr:
default:
enable: true
examples:
- enable: true
properties:
enable:
default: true
type: boolean
type: object
ingress-class:
default: traefik
examples:
- traefik
type: string
domain:
default: your-company
examples:
- your-company
type: string
issuer:
default: letsencrypt-prod
examples:
- letsencrypt-prod
type: string
dependencies: []
providers:
kubernetes: true
authentik: null
kubectl: true
postgresql: null
restapi: null
http: null

80
meta/domain-infra/apps.tf Normal file
View File

@@ -0,0 +1,80 @@
locals {
annotations = {
"vynil.solidite.fr/meta" = "domain-ci"
"vynil.solidite.fr/name" = var.namespace
"vynil.solidite.fr/domain" = var.domain-name
"vynil.solidite.fr/issuer" = var.issuer
"vynil.solidite.fr/ingress" = var.ingress-class
}
global = {
"domain" = var.namespace
"domain-name" = var.domain-name
"issuer" = var.issuer
"ingress-class" = var.ingress-class
}
traefik = { for k, v in var.traefik : k => v if k!="enable" }
dns = { for k, v in var.dns : k => v if k!="enable" }
api = { for k, v in var.api : k => v if k!="enable" }
}
resource "kubernetes_namespace_v1" "infra-ns" {
count = ( var.dns.enable )? 1 : 0
metadata {
annotations = local.annotations
labels = merge(local.common-labels, local.annotations)
name = "${var.namespace}-infra"
}
}
resource "kubectl_manifest" "dns" {
count = var.dns.enable ? 1 : 0
depends_on = [kubernetes_namespace_v1.infra-ns]
yaml_body = <<-EOF
apiVersion: "vynil.solidite.fr/v1"
kind: "Install"
metadata:
name: "dns"
namespace: "${kubernetes_namespace_v1.infra-ns.name}"
labels: ${jsonencode(local.common-labels)}
spec:
distrib: "core"
category: "share"
component: "dns"
options: ${jsonencode(merge(local.global, local.dns))}
EOF
}
resource "kubectl_manifest" "traefik" {
count = var.traefik.enable ? 1 : 0
depends_on = [kubernetes_namespace_v1.infra-ns]
yaml_body = <<-EOF
apiVersion: "vynil.solidite.fr/v1"
kind: "Install"
metadata:
name: "traefik-ui-${var.namespace}"
namespace: "${var.traefik.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
distrib: "core"
category: "apps"
component: "traefik-ui"
options: ${jsonencode(merge(local.global, local.traefik))}
EOF
}
resource "kubectl_manifest" "traefik" {
count = var.traefik.enable ? 1 : 0
depends_on = [kubernetes_namespace_v1.infra-ns]
yaml_body = <<-EOF
apiVersion: "vynil.solidite.fr/v1"
kind: "Install"
metadata:
name: "k8s-api-${var.namespace}"
namespace: "default"
labels: ${jsonencode(local.common-labels)}
spec:
distrib: "core"
category: "apps"
component: "k8s-ui"
options: ${jsonencode(merge(local.global, local.api))}
EOF
}

View File

@@ -0,0 +1,71 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: meta
metadata:
name: domain-infra
description: null
options:
ingress-class:
default: traefik
examples:
- traefik
type: string
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
traefik:
default:
enable: false
namespace: traefik
examples:
- enable: false
namespace: traefik
properties:
enable:
default: false
type: boolean
namespace:
default: traefik
type: string
type: object
domain:
default: your-company
examples:
- your-company
type: string
api:
default:
enable: false
examples:
- enable: false
properties:
enable:
default: false
type: boolean
type: object
issuer:
default: letsencrypt-prod
examples:
- letsencrypt-prod
type: string
dns:
default:
enable: false
examples:
- enable: false
properties:
enable:
default: false
type: boolean
type: object
dependencies: []
providers:
kubernetes: true
authentik: null
kubectl: true
postgresql: null
restapi: null
http: null

107
meta/domain/index.yaml Normal file
View File

@@ -0,0 +1,107 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: meta
metadata:
name: domain
description: null
options:
auth:
default:
enable: true
examples:
- enable: true
properties:
enable:
default: true
type: boolean
type: object
ci:
default:
enable: false
gitea:
enable: true
examples:
- enable: false
gitea:
enable: true
properties:
enable:
default: false
type: boolean
gitea:
default:
enable: true
properties:
enable:
default: true
type: boolean
type: object
type: object
erp:
default:
dolibarr:
enable: true
enable: false
examples:
- dolibarr:
enable: true
enable: false
properties:
dolibarr:
default:
enable: true
properties:
enable:
default: true
type: boolean
type: object
enable:
default: false
type: boolean
type: object
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
infra:
default:
enable: false
traefik:
enable: false
examples:
- enable: false
traefik:
enable: false
properties:
enable:
default: false
type: boolean
traefik:
default:
enable: false
properties:
enable:
default: false
type: boolean
type: object
type: object
issuer:
default: letsencrypt-prod
examples:
- letsencrypt-prod
type: string
ingress-class:
default: traefik
examples:
- traefik
type: string
dependencies: []
providers:
kubernetes: null
authentik: null
kubectl: true
postgresql: null
restapi: null
http: null

98
meta/domain/installs.tf Normal file
View File

@@ -0,0 +1,98 @@
locals {
global = {
"domain" = var.namespace
"domain-name" = var.domain-name
"issuer" = var.issuer
"ingress-class" = var.ingress-class
}
annotations = {
"vynil.solidite.fr/meta" = "domain"
"vynil.solidite.fr/name" = var.namespace
"vynil.solidite.fr/domain" = var.domain-name
"vynil.solidite.fr/issuer" = var.issuer
"vynil.solidite.fr/ingress" = var.ingress-class
}
auth = { for k, v in var.auth : k => v if k!="enable" }
infra = { for k, v in var.infra : k => v if k!="enable" }
ci = { for k, v in var.ci : k => v if k!="enable" }
erp = { for k, v in var.erp : k => v if k!="enable" }
# Force install authentik and it's modules when any are needed
use-ldap = (var.ci.enable && var.ci.gitea.enable) || (var.erp.enable && var.erp.dolibarr.enable)
use-forward = var.infra.enable && var.infra.traefik.enable
use-other-auth = false
added-auth-ldap = local.use-ldap?{
"authentik-ldap" = {"enable"= true}
}:{}
added-auth-forward = local.use-forward?{
"authentik-forward" = {"enable"= true}
}:{}
added-auth = local.use-ldap||local.use-forward||local.use-other-auth?merge({
"authentik" = {"enable" = true}
},local.added-auth-ldap,local.added-auth-forward):{}
}
resource "kubectl_manifest" "auth" {
count = var.auth.enable ? 1 : 0
yaml_body = <<-EOF
apiVersion: "vynil.solidite.fr/v1"
kind: "Install"
metadata:
name: "auth"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
distrib: "core"
category: "meta"
component: "domain-auth"
options: ${jsonencode(merge(merge(local.global, local.auth), local.added-auth))}
EOF
}
resource "kubectl_manifest" "infra" {
count = var.infra.enable ? 1 : 0
yaml_body = <<-EOF
apiVersion: "vynil.solidite.fr/v1"
kind: "Install"
metadata:
name: "infra"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
distrib: "core"
category: "meta"
component: "domain-infra"
options: ${jsonencode(merge(local.global, local.infra))}
EOF
}
resource "kubectl_manifest" "ci" {
count = var.ci.enable ? 1 : 0
yaml_body = <<-EOF
apiVersion: "vynil.solidite.fr/v1"
kind: "Install"
metadata:
name: "ci"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
distrib: "core"
category: "meta"
component: "domain-ci"
options: ${jsonencode(merge(local.global, local.ci))}
EOF
}
resource "kubectl_manifest" "erp" {
count = var.erp.enable ? 1 : 0
yaml_body = <<-EOF
apiVersion: "vynil.solidite.fr/v1"
kind: "Install"
metadata:
name: "erp"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
distrib: "core"
category: "meta"
component: "domain-erp"
options: ${jsonencode(merge(local.global, local.erp))}
EOF
}

View File

@@ -0,0 +1,22 @@
locals {
common-labels = {
"vynil.solidite.fr/owner-name" = var.instance
"vynil.solidite.fr/owner-namespace" = var.namespace
"vynil.solidite.fr/owner-category" = var.category
"vynil.solidite.fr/owner-component" = var.component
"app.kubernetes.io/managed-by" = "vynil"
"app.kubernetes.io/name" = var.component
"app.kubernetes.io/instance" = var.instance
}
}
data "kubernetes_secret_v1" "authentik" {
metadata {
name = "authentik"
namespace = var.namespace
}
}
data "kustomization_overlay" "data" {
resources = []
}

View File

@@ -0,0 +1,41 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: share
metadata:
name: authentik-forward
description: null
options:
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
sub-domain:
default: null
domain:
default: your-company
examples:
- your-company
type: string
issuer:
default: letsencrypt-prod
examples:
- letsencrypt-prod
type: string
ingress-class:
default: traefik
examples:
- traefik
type: string
dependencies:
- dist: null
category: share
component: authentik
providers:
kubernetes: true
authentik: true
kubectl: null
postgresql: null
restapi: null
http: true

View File

@@ -0,0 +1,80 @@
locals {
request_headers = {
"Content-Type" = "application/json"
Authorization = "Bearer ${local.authentik-token}"
}
authentik-token = data.kubernetes_secret_v1.authentik.data["AUTHENTIK_BOOTSTRAP_TOKEN"]
forward-outpost-json = jsondecode(data.http.get_forward_outpost.response_body).results
forward-outpost-providers = length(local.forward-outpost-json)>0?(contains(local.forward-outpost-json[0].providers, authentik_provider_proxy.provider_forward.id)?local.forward-outpost-json[0].providers:concat(local.forward-outpost-json[0].providers, [authentik_provider_proxy.provider_forward.id])):[authentik_provider_proxy.provider_forward.id]
}
data "http" "get_forward_outpost" {
depends_on = [authentik_provider_proxy.provider_forward]
url = "http://authentik.${var.namespace}.svc/api/v3/outposts/instances/?name__iexact=forward"
method = "GET"
request_headers = local.request_headers
lifecycle {
postcondition {
condition = contains([200], self.status_code)
error_message = "Status code invalid"
}
}
}
resource "authentik_service_connection_kubernetes" "local" {
depends_on = [data.kubernetes_secret_v1.authentik]
name = "local-forward"
local = true
}
data "authentik_flow" "default-authorization-flow" {
depends_on = [authentik_service_connection_kubernetes.local]
slug = "default-provider-authorization-implicit-consent"
}
resource "authentik_provider_proxy" "provider_forward" {
name = "authentik-forward-provider"
internal_host = "http://authentik"
external_host = "http://authentik"
authorization_flow = data.authentik_flow.default-authorization-flow.id
}
data "kubernetes_ingress_v1" "authentik" {
metadata {
name = "authentik"
namespace = var.namespace
}
}
resource "authentik_outpost" "outpost-forward" {
name = "forward"
type = "proxy"
service_connection = authentik_service_connection_kubernetes.local.id
config = jsonencode({
"log_level": "info",
"authentik_host": "http://authentik",
"docker_map_ports": true,
"kubernetes_replicas": 1,
"kubernetes_namespace": var.namespace,
"authentik_host_browser": "https://${data.kubernetes_ingress_v1.authentik.spec[0].rule[0].host}",
"object_naming_template": "ak-outpost-%(name)s",
"authentik_host_insecure": false,
"kubernetes_service_type": "ClusterIP",
"kubernetes_image_pull_secrets": [],
"kubernetes_disabled_components": [],
"kubernetes_ingress_annotations": {},
"kubernetes_ingress_secret_name": "authentik-outpost-tls"
})
protocol_providers = local.forward-outpost-providers
}
data "authentik_user" "akadmin" {
depends_on = [authentik_outpost.outpost-forward]
username = "akadmin"
}
resource "authentik_group" "group" {
name = "vynil-forward-admins"
users = [data.authentik_user.akadmin.id]
is_superuser = true
}

View File

@@ -0,0 +1,22 @@
locals {
common-labels = {
"vynil.solidite.fr/owner-name" = var.instance
"vynil.solidite.fr/owner-namespace" = var.namespace
"vynil.solidite.fr/owner-category" = var.category
"vynil.solidite.fr/owner-component" = var.component
"app.kubernetes.io/managed-by" = "vynil"
"app.kubernetes.io/name" = var.component
"app.kubernetes.io/instance" = var.instance
}
}
data "kubernetes_secret_v1" "authentik" {
metadata {
name = "authentik"
namespace = var.namespace
}
}
data "kustomization_overlay" "data" {
resources = []
}

View File

@@ -0,0 +1,24 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: share
metadata:
name: authentik-ldap
description: null
options:
domain:
default: your-company
examples:
- your-company
type: string
dependencies:
- dist: null
category: share
component: authentik
providers:
kubernetes: true
authentik: true
kubectl: true
postgresql: null
restapi: null
http: true

View File

@@ -0,0 +1,108 @@
locals {
request_headers = {
"Content-Type" = "application/json"
Authorization = "Bearer ${local.authentik-token}"
}
authentik-token = data.kubernetes_secret_v1.authentik.data["AUTHENTIK_BOOTSTRAP_TOKEN"]
ldap-outpost-json = jsondecode(data.http.get_ldap_outpost.response_body).results
ldap-outpost-prividers = length(local.ldap-outpost-json)>0?(contains(local.ldap-outpost-json[0].providers, authentik_provider_ldap.provider_ldap.id)?local.ldap-outpost-json[0].providers:concat(local.ldap-outpost-json[0].providers, [authentik_provider_ldap.provider_ldap.id])):[authentik_provider_ldap.provider_ldap.id]
}
//TODO: trouver un moyen d'attendre que le service soit ready
data "http" "get_ldap_outpost" {
depends_on = [authentik_provider_ldap.provider_ldap]
url = "http://authentik.${var.namespace}.svc/api/v3/outposts/instances/?name__iexact=ldap"
method = "GET"
request_headers = local.request_headers
lifecycle {
postcondition {
condition = contains([200], self.status_code)
error_message = "Status code invalid"
}
}
}
resource "authentik_stage_password" "ldap-password-stage" {
depends_on = [data.kubernetes_secret_v1.authentik]
name = "ldap-authentication-password"
backends = [
"authentik.core.auth.InbuiltBackend",
"authentik.core.auth.TokenBackend",
"authentik.sources.ldap.auth.LDAPBackend"
]
}
resource "authentik_stage_identification" "ldap-identification-stage" {
name = "ldap-identification-stage"
user_fields = ["username","email"]
password_stage = authentik_stage_password.ldap-password-stage.id
}
resource "authentik_stage_user_login" "ldap-authentication-login" {
depends_on = [data.kubernetes_secret_v1.authentik]
name = "ldap-authentication-login"
}
resource "authentik_flow" "ldap-authentication-flow" {
depends_on = [data.kubernetes_secret_v1.authentik]
name = "ldap-authentication-flow"
title = "ldap authentication flow"
slug = "ldap-authentication-flow"
designation = "authentication"
}
resource "authentik_flow_stage_binding" "ldap-authentication-flow-10" {
target = authentik_flow.ldap-authentication-flow.uuid
stage = authentik_stage_identification.ldap-identification-stage.id
order = 10
}
resource "authentik_flow_stage_binding" "ldap-authentication-flow-30" {
target = authentik_flow.ldap-authentication-flow.uuid
stage = authentik_stage_user_login.ldap-authentication-login.id
order = 30
}
data "authentik_user" "akadmin" {
depends_on = [kustomization_resource.post,authentik_flow_stage_binding.ldap-authentication-flow-30]
username = "akadmin"
}
resource "authentik_group" "group" {
name = "vynil-ldap-admins"
users = [data.authentik_user.akadmin.id]
is_superuser = true
}
resource "authentik_service_connection_kubernetes" "local" {
depends_on = [data.kubernetes_secret_v1.authentik]
name = "local-ldap"
local = true
}
resource "authentik_provider_ldap" "provider_ldap" {
name = "authentik-ldap-provider"
base_dn = "dc=${var.namespace},dc=namespace"
bind_flow = authentik_flow.ldap-authentication-flow.uuid
}
resource "authentik_outpost" "outpost-ldap" {
name = "ldap"
type = "ldap"
service_connection = authentik_service_connection_kubernetes.local.id
config = jsonencode({
"log_level": "info",
"authentik_host": "http://authentik",
"docker_map_ports": true,
"kubernetes_replicas": 1,
"kubernetes_namespace": var.namespace,
"authentik_host_browser": "",
"object_naming_template": "ak-outpost-%(name)s",
"authentik_host_insecure": false,
"kubernetes_service_type": "ClusterIP",
"kubernetes_image_pull_secrets": [],
"kubernetes_disabled_components": [],
"kubernetes_ingress_annotations": {},
"kubernetes_ingress_secret_name": "authentik-outpost-tls"
})
protocol_providers = local.ldap-outpost-prividers
}

View File

@@ -0,0 +1,71 @@
# Source: authentik/templates/server-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: authentik-server
labels:
helm.sh/chart: authentik-2023.6.3
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "2023.6.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "server"
spec:
selector:
matchLabels:
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/component: "server"
template:
metadata:
labels:
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/component: "server"
app.kubernetes.io/version: "2023.6.1"
annotations:
goauthentik.io/config-checksum: 39339b4fd4c8511ca989fe40932e07b38befc9e3642eab092900cdde5cdf8f37
spec:
enableServiceLinks: true
securityContext:
{}
containers:
- name: authentik
image: "ghcr.io/goauthentik/server:2023.6.1"
imagePullPolicy: "IfNotPresent"
args: ["server"]
env:
envFrom:
- secretRef:
name: authentik
volumeMounts:
ports:
- name: http
containerPort: 9000
protocol: TCP
- name: http-metrics
containerPort: 9300
protocol: TCP
- name: https
containerPort: 9443
protocol: TCP
livenessProbe:
httpGet:
path: /-/health/live/
port: http
initialDelaySeconds: 5
periodSeconds: 10
startupProbe:
failureThreshold: 60
httpGet:
path: /-/health/live/
port: http
periodSeconds: 5
readinessProbe:
httpGet:
path: /-/health/ready/
port: http
periodSeconds: 10
securityContext:
{}
volumes:

View File

@@ -0,0 +1,45 @@
# Source: authentik/templates/worker-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: authentik-worker
labels:
helm.sh/chart: authentik-2023.6.3
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "2023.6.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "worker"
spec:
selector:
matchLabels:
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/component: "worker"
template:
metadata:
labels:
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/component: "worker"
app.kubernetes.io/version: "2023.6.1"
annotations:
goauthentik.io/config-checksum: 39339b4fd4c8511ca989fe40932e07b38befc9e3642eab092900cdde5cdf8f37
spec:
serviceAccountName: authentik
enableServiceLinks: true
securityContext:
{}
containers:
- name: authentik
image: "ghcr.io/goauthentik/server:2023.6.1"
imagePullPolicy: "IfNotPresent"
args: ["worker"]
env:
envFrom:
- secretRef:
name: authentik
volumeMounts:
securityContext:
{}
volumes:

View File

@@ -0,0 +1,26 @@
# Source: authentik/templates/server-hpa.yaml
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: authentik-server
labels:
helm.sh/chart: authentik-2023.6.3
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "2023.6.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "server"
spec:
minReplicas: 1
maxReplicas: 5
metrics:
- resource:
name: cpu
target:
averageUtilization: 50
type: Utilization
type: Resource
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: authentik-server

View File

@@ -0,0 +1,26 @@
# Source: authentik/templates/worker-hpa.yaml
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: authentik-worker
labels:
helm.sh/chart: authentik-2023.6.3
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "2023.6.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "worker"
spec:
minReplicas: 1
maxReplicas: 5
metrics:
- resource:
name: cpu
target:
averageUtilization: 80
type: Utilization
type: Resource
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: authentik-worker

153
share/authentik/datas.tf Normal file
View File

@@ -0,0 +1,153 @@
locals {
common-labels = {
"vynil.solidite.fr/owner-name" = var.instance
"vynil.solidite.fr/owner-namespace" = var.namespace
"vynil.solidite.fr/owner-category" = var.category
"vynil.solidite.fr/owner-component" = var.component
"app.kubernetes.io/managed-by" = "vynil"
"app.kubernetes.io/name" = var.component
"app.kubernetes.io/instance" = var.instance
}
}
data "kubernetes_secret_v1" "authentik" {
metadata {
name = "authentik"
namespace = var.namespace
}
}
data "kustomization_overlay" "data" {
namespace = var.namespace
common_labels = local.common-labels
resources = [for file in fileset(path.module, "*.yaml"): file if file != "index.yaml"]
images {
name = "ghcr.io/goauthentik/server"
new_name = "${var.image.registry}/${var.image.repository}"
new_tag = "${var.image.tag}"
}
config_map_generator {
name = var.component
behavior = "create"
literals = [
"AUTHENTIK_EMAIL__PORT=${var.email.port}",
"AUTHENTIK_EMAIL__TIMEOUT=${var.email.timeout}",
"AUTHENTIK_EMAIL__USE_TLS=${var.email.use_tls}",
"AUTHENTIK_EMAIL__USE_SSL=${var.email.use_ssl}",
"AUTHENTIK_ERROR_REPORTING__ENABLED=${var.error_reporting.enabled}",
"AUTHENTIK_ERROR_REPORTING__ENVIRONMENT=${var.error_reporting.environment}",
"AUTHENTIK_ERROR_REPORTING__SEND_PII=${var.error_reporting.send_pii}",
"AUTHENTIK_GEOIP=${var.geoip}",
"AUTHENTIK_LOG_LEVEL=${var.loglevel}",
"AUTHENTIK_OUTPOSTS__CONTAINER_IMAGE_BASE=${var.image.registry}/${var.image.project}/%(type)s:%(version)s",
"AUTHENTIK_POSTGRESQL__HOST=${var.instance}-${var.component}.${var.namespace}.svc",
"AUTHENTIK_POSTGRESQL__NAME=${var.component}",
"AUTHENTIK_POSTGRESQL__PORT=5432",
"AUTHENTIK_POSTGRESQL__USER=${var.component}",
"AUTHENTIK_REDIS__HOST=${var.name}-${var.component}-redis",
"AUTHENTIK_BOOTSTRAP_EMAIL=${var.admin.email}@${var.domain-name}",
]
}
patches {
target {
kind = "Deployment"
name = "authentik-server"
}
patch = <<-EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: authentik-server
spec:
template:
spec:
containers:
- name: authentik
image: "${var.image.registry}/${var.image.repository}:${var.image.tag}"
imagePullPolicy: "${var.image.pullPolicy}"
env:
- name: AUTHENTIK_POSTGRESQL__PASSWORD
valueFrom:
secretKeyRef:
name: ${var.component}.${var.instance}-${var.component}.credentials.postgresql.acid.zalan.do
key: password
envFrom:
- secretRef:
name: ${var.component}
- configMapRef:
name: ${var.component}
EOF
}
patches {
target {
kind = "Deployment"
name = "authentik-worker"
}
patch = <<-EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: authentik-worker
spec:
template:
spec:
containers:
- name: authentik
image: "${var.image.registry}/${var.image.repository}:${var.image.tag}"
imagePullPolicy: "${var.image.pullPolicy}"
env:
- name: AUTHENTIK_POSTGRESQL__PASSWORD
valueFrom:
secretKeyRef:
name: ${var.component}.${var.name}-${var.component}.credentials.postgresql.acid.zalan.do
key: password
envFrom:
- secretRef:
name: ${var.component}
- configMapRef:
name: ${var.component}
EOF
}
patches {
target {
kind = "ClusterRole"
name = "authentik-vynil-auth"
}
patch = <<-EOF
- op: replace
path: /metadata/name
value: authentik-${var.namespace}
EOF
}
patches {
target {
kind = "ClusterRoleBinding"
name = "authentik-vynil-auth"
}
patch = <<-EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: authentik-vynil-auth
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: authentik-${var.namespace}
subjects:
- kind: ServiceAccount
name: authentik
namespace: ${var.namespace}
EOF
}
patches {
target {
kind = "ClusterRoleBinding"
name = "authentik-vynil-auth"
}
patch = <<-EOF
- op: replace
path: /metadata/name
value: authentik-${var.namespace}
EOF
}
}

207
share/authentik/index.yaml Normal file
View File

@@ -0,0 +1,207 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: share
metadata:
name: authentik
description: authentik is an open-source Identity Provider focused on flexibility and versatility
options:
email:
default:
port: 587
timeout: 30
use_ssl: false
use_tls: false
examples:
- port: 587
timeout: 30
use_ssl: false
use_tls: false
properties:
port:
default: 587
type: integer
timeout:
default: 30
type: integer
use_ssl:
default: false
type: boolean
use_tls:
default: false
type: boolean
type: object
geoip:
default: /geoip/GeoLite2-City.mmdb
examples:
- /geoip/GeoLite2-City.mmdb
type: string
admin:
default:
email: auth-admin
examples:
- email: auth-admin
properties:
email:
default: auth-admin
type: string
type: object
image:
default:
project: goauthentik
pullPolicy: IfNotPresent
registry: ghcr.io
repository: goauthentik/server
tag: 2023.5.4
examples:
- project: goauthentik
pullPolicy: IfNotPresent
registry: ghcr.io
repository: goauthentik/server
tag: 2023.5.4
properties:
project:
default: goauthentik
type: string
pullPolicy:
default: IfNotPresent
type: string
registry:
default: ghcr.io
type: string
repository:
default: goauthentik/server
type: string
tag:
default: 2023.5.4
type: string
type: object
error_reporting:
default:
enabled: false
environment: k8s
send_pii: false
examples:
- enabled: false
environment: k8s
send_pii: false
properties:
enabled:
default: false
type: boolean
environment:
default: k8s
type: string
send_pii:
default: false
type: boolean
type: object
sub-domain:
default: auth
examples:
- auth
type: string
redis:
default:
exporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.44.0
image: quay.io/opstree/redis:v7.0.5
storage: 8Gi
examples:
- exporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.44.0
image: quay.io/opstree/redis:v7.0.5
storage: 8Gi
properties:
exporter:
default:
enabled: true
image: quay.io/opstree/redis-exporter:v1.44.0
properties:
enabled:
default: true
type: boolean
image:
default: quay.io/opstree/redis-exporter:v1.44.0
type: string
type: object
image:
default: quay.io/opstree/redis:v7.0.5
type: string
storage:
default: 8Gi
type: string
type: object
postgres:
default:
replicas: 1
storage: 8Gi
version: '14'
examples:
- replicas: 1
storage: 8Gi
version: '14'
properties:
replicas:
default: 1
type: integer
storage:
default: 8Gi
type: string
version:
default: '14'
type: string
type: object
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
ingress-class:
default: traefik
examples:
- traefik
type: string
issuer:
default: letsencrypt-prod
examples:
- letsencrypt-prod
type: string
domain:
default: your-company
examples:
- your-company
type: string
loglevel:
default: info
examples:
- info
type: string
dependencies:
- dist: null
category: core
component: cert-manager
- dist: null
category: core
component: secret-generator
- dist: null
category: crd
component: prometheus
- dist: null
category: crd
component: traefik
- dist: null
category: dbo
component: postgresql
- dist: null
category: dbo
component: redis
providers:
kubernetes: null
authentik: true
kubectl: true
postgresql: null
restapi: null
http: null

View File

@@ -0,0 +1,75 @@
locals {
dns-names = ["${var.sub-domain}.${var.domain-name}"]
middlewares = ["${var.instance}-https"]
service = {
"name" = "${var.instance}"
"port" = {
"number" = 80
}
}
rules = [ for v in local.dns-names : {
"host" = "${v}"
"http" = {
"paths" = [{
"backend" = {
"service" = local.service
}
"path" = "/"
"pathType" = "Prefix"
}]
}
}]
}
resource "kubectl_manifest" "prj_certificate" {
yaml_body = <<-EOF
apiVersion: "cert-manager.io/v1"
kind: "Certificate"
metadata:
name: "${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
secretName: "${var.instance}-cert"
dnsNames: ${jsonencode(local.dns-names)}
issuerRef:
name: "${var.issuer}"
kind: "ClusterIssuer"
group: "cert-manager.io"
EOF
}
resource "kubectl_manifest" "prj_https_redirect" {
yaml_body = <<-EOF
apiVersion: "traefik.containo.us/v1alpha1"
kind: "Middleware"
metadata:
name: "${var.instance}-https"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
redirectScheme:
scheme: "https"
permanent: true
EOF
}
resource "kubectl_manifest" "prj_ingress" {
force_conflicts = true
yaml_body = <<-EOF
apiVersion: "networking.k8s.io/v1"
kind: "Ingress"
metadata:
name: "${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
annotations:
"traefik.ingress.kubernetes.io/router.middlewares": "${join(",", [for m in local.middlewares : format("%s-%s@kubernetescrd", var.namespace, m)])}"
spec:
ingressClassName: "${var.ingress-class}"
rules: ${jsonencode(local.rules)}
tls:
- hosts: ${jsonencode(local.dns-names)}
secretName: "${var.instance}-cert"
EOF
}

View File

@@ -0,0 +1,162 @@
# Source: authentik/templates/prom-rules.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: authentik
labels:
helm.sh/chart: authentik-2023.6.3
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "2023.6.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: authentik Aggregate request counters
rules:
- record: job:django_http_requests_before_middlewares_total:sum_rate30s
expr: sum(rate(django_http_requests_before_middlewares_total[30s])) by (job)
- record: job:django_http_requests_unknown_latency_total:sum_rate30s
expr: sum(rate(django_http_requests_unknown_latency_total[30s])) by (job)
- record: job:django_http_ajax_requests_total:sum_rate30s
expr: sum(rate(django_http_ajax_requests_total[30s])) by (job)
- record: job:django_http_responses_before_middlewares_total:sum_rate30s
expr: sum(rate(django_http_responses_before_middlewares_total[30s])) by (job)
- record: job:django_http_requests_unknown_latency_including_middlewares_total:sum_rate30s
expr: sum(rate(django_http_requests_unknown_latency_including_middlewares_total[30s])) by (job)
- record: job:django_http_requests_body_total_bytes:sum_rate30s
expr: sum(rate(django_http_requests_body_total_bytes[30s])) by (job)
- record: job:django_http_responses_streaming_total:sum_rate30s
expr: sum(rate(django_http_responses_streaming_total[30s])) by (job)
- record: job:django_http_responses_body_total_bytes:sum_rate30s
expr: sum(rate(django_http_responses_body_total_bytes[30s])) by (job)
- record: job:django_http_requests_total:sum_rate30s
expr: sum(rate(django_http_requests_total_by_method[30s])) by (job)
- record: job:django_http_requests_total_by_method:sum_rate30s
expr: sum(rate(django_http_requests_total_by_method[30s])) by (job,method)
- record: job:django_http_requests_total_by_transport:sum_rate30s
expr: sum(rate(django_http_requests_total_by_transport[30s])) by (job,transport)
- record: job:django_http_requests_total_by_view:sum_rate30s
expr: sum(rate(django_http_requests_total_by_view_transport_method[30s])) by (job,view)
- record: job:django_http_requests_total_by_view_transport_method:sum_rate30s
expr: sum(rate(django_http_requests_total_by_view_transport_method[30s])) by (job,view,transport,method)
- record: job:django_http_responses_total_by_templatename:sum_rate30s
expr: sum(rate(django_http_responses_total_by_templatename[30s])) by (job,templatename)
- record: job:django_http_responses_total_by_status:sum_rate30s
expr: sum(rate(django_http_responses_total_by_status[30s])) by (job,status)
- record: job:django_http_responses_total_by_status_name_method:sum_rate30s
expr: sum(rate(django_http_responses_total_by_status_name_method[30s])) by (job,status,name,method)
- record: job:django_http_responses_total_by_charset:sum_rate30s
expr: sum(rate(django_http_responses_total_by_charset[30s])) by (job,charset)
- record: job:django_http_exceptions_total_by_type:sum_rate30s
expr: sum(rate(django_http_exceptions_total_by_type[30s])) by (job,type)
- record: job:django_http_exceptions_total_by_view:sum_rate30s
expr: sum(rate(django_http_exceptions_total_by_view[30s])) by (job,view)
- name: authentik Aggregate latency histograms
rules:
- record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s
expr: histogram_quantile(0.50, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) by (job, le))
labels:
quantile: "50"
- record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s
expr: histogram_quantile(0.95, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) by (job, le))
labels:
quantile: "95"
- record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s
expr: histogram_quantile(0.99, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) by (job, le))
labels:
quantile: "99"
- record: job:django_http_requests_latency_including_middlewares_seconds:quantile_rate30s
expr: histogram_quantile(0.999, sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket[30s])) by (job, le))
labels:
quantile: "99.9"
- record: job:django_http_requests_latency_seconds:quantile_rate30s
expr: histogram_quantile(0.50, sum(rate(django_http_requests_latency_seconds_bucket[30s])) by (job, le))
labels:
quantile: "50"
- record: job:django_http_requests_latency_seconds:quantile_rate30s
expr: histogram_quantile(0.95, sum(rate(django_http_requests_latency_seconds_bucket[30s])) by (job, le))
labels:
quantile: "95"
- record: job:django_http_requests_latency_seconds:quantile_rate30s
expr: histogram_quantile(0.99, sum(rate(django_http_requests_latency_seconds_bucket[30s])) by (job, le))
labels:
quantile: "99"
- record: job:django_http_requests_latency_seconds:quantile_rate30s
expr: histogram_quantile(0.999, sum(rate(django_http_requests_latency_seconds_bucket[30s])) by (job, le))
labels:
quantile: "99.9"
- name: authentik Aggregate model operations
rules:
- record: job:django_model_inserts_total:sum_rate1m
expr: sum(rate(django_model_inserts_total[1m])) by (job, model)
- record: job:django_model_updates_total:sum_rate1m
expr: sum(rate(django_model_updates_total[1m])) by (job, model)
- record: job:django_model_deletes_total:sum_rate1m
expr: sum(rate(django_model_deletes_total[1m])) by (job, model)
- name: authentik Aggregate database operations
rules:
- record: job:django_db_new_connections_total:sum_rate30s
expr: sum(rate(django_db_new_connections_total[30s])) by (alias, vendor)
- record: job:django_db_new_connection_errors_total:sum_rate30s
expr: sum(rate(django_db_new_connection_errors_total[30s])) by (alias, vendor)
- record: job:django_db_execute_total:sum_rate30s
expr: sum(rate(django_db_execute_total[30s])) by (alias, vendor)
- record: job:django_db_execute_many_total:sum_rate30s
expr: sum(rate(django_db_execute_many_total[30s])) by (alias, vendor)
- record: job:django_db_errors_total:sum_rate30s
expr: sum(rate(django_db_errors_total[30s])) by (alias, vendor, type)
- name: authentik Aggregate migrations
rules:
- record: job:django_migrations_applied_total:max
expr: max(django_migrations_applied_total) by (job, connection)
- record: job:django_migrations_unapplied_total:max
expr: max(django_migrations_unapplied_total) by (job, connection)
- name: authentik Alerts
rules:
- alert: NoWorkersConnected
labels:
severity: critical
expr: max without (pid) (authentik_admin_workers) < 1
for: 10m
annotations:
summary: No workers connected
message: authentik instance {{ $labels.instance }}'s worker are either not running or not connected.
- alert: PendingMigrations
labels:
severity: critical
expr: max without (pid) (django_migrations_unapplied_total) > 0
for: 10m
annotations:
summary: Pending database migrations
message: authentik instance {{ $labels.instance }} has pending database migrations
- alert: FailedSystemTasks
labels:
severity: critical
expr: sum(increase(authentik_system_tasks{status="error"}[2h])) > 0
for: 2h
annotations:
summary: Failed system tasks
message: System task {{ $labels.task_name }} has failed
- alert: DisconnectedOutposts
labels:
severity: critical
expr: sum by (outpost) (max without (pid) (authentik_outposts_connected{uid!~"specific.*"})) < 1
for: 30m
annotations:
summary: Disconnected outpost
message: Outpost {{ $labels.outpost }} has at least 1 disconnected instance

View File

@@ -0,0 +1,20 @@
# Source: authentik/templates/prom-service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: authentik
labels:
helm.sh/chart: authentik-2023.6.3
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "2023.6.1"
app.kubernetes.io/managed-by: Helm
spec:
endpoints:
- port: http-metrics
scrapeTimeout: 3s
interval: 30s
selector:
matchLabels:
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik

View File

@@ -0,0 +1,26 @@
resource "kubectl_manifest" "authentik_postgresql" {
yaml_body = <<-EOF
apiVersion: "acid.zalan.do/v1"
kind: "postgresql"
metadata:
name: "${var.instance}-${var.component}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
databases:
${var.component}: "${var.component}"
numberOfInstances: ${var.postgres.replicas}
podAnnotations:
"k8up.io/backupcommand": "pg_dump -U postgres -d ${var.component} --clean"
"k8up.io/file-extension": ".sql"
postgresql:
version: "${var.postgres.version}"
teamId: "${var.instance}"
users:
${var.component}:
- "superuser"
- "createdb"
volume:
size: "${var.postgres.storage}"
EOF
}

View File

@@ -0,0 +1,19 @@
# Source: authentik/charts/serviceAccount/templates/cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: authentik-vynil-auth
labels:
helm.sh/chart: serviceAccount-1.2.2
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "2023.6.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: authentik-vynil-auth
subjects:
- kind: ServiceAccount
name: authentik
namespace: vynil-auth

View File

@@ -0,0 +1,18 @@
# Source: authentik/charts/serviceAccount/templates/cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: authentik-vynil-auth
labels:
helm.sh/chart: serviceAccount-1.2.2
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "2023.6.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- list

View File

@@ -0,0 +1,20 @@
# Source: authentik/charts/serviceAccount/templates/role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: authentik
namespace: vynil-auth
labels:
helm.sh/chart: serviceAccount-1.2.2
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "2023.6.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: authentik
subjects:
- kind: ServiceAccount
name: authentik
namespace: vynil-auth

View File

@@ -0,0 +1,74 @@
# Source: authentik/charts/serviceAccount/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: authentik
namespace: vynil-auth
labels:
helm.sh/chart: serviceAccount-1.2.2
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "2023.6.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- secrets
- services
- configmaps
verbs:
- get
- create
- delete
- list
- patch
- apiGroups:
- extensions
- apps
resources:
- deployments
verbs:
- get
- create
- delete
- list
- patch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- create
- delete
- list
- patch
- apiGroups:
- traefik.containo.us
- traefik.io
resources:
- middlewares
verbs:
- get
- create
- delete
- list
- patch
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- get
- create
- delete
- list
- patch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- list

30
share/authentik/redis.tf Normal file
View File

@@ -0,0 +1,30 @@
resource "kubectl_manifest" "authentik_redis" {
yaml_body = <<-EOF
apiVersion: "redis.redis.opstreelabs.in/v1beta1"
kind: "Redis"
metadata:
name: "${var.name}-${var.component}-redis"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
kubernetesConfig:
image: "${var.redis.image}"
imagePullPolicy: "IfNotPresent"
redisSecret:
name: "${var.component}"
key: "AUTHENTIK_REDIS__PASSWORD"
storage:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: "${var.redis.storage}"
redisExporter:
enabled: ${var.redis.exporter.enabled}
image: "${var.redis.exporter.image}"
securityContext:
runAsUser: 1000
fsGroup: 1000
EOF
}

23
share/authentik/secret.tf Normal file
View File

@@ -0,0 +1,23 @@
resource "kubectl_manifest" "authentik_secret" {
ignore_fields = ["metadata.annotations"]
yaml_body = <<-EOF
apiVersion: "secretgenerator.mittwald.de/v1alpha1"
kind: "StringSecret"
metadata:
name: "${var.component}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
spec:
forceRegenerate: false
fields:
- fieldName: "AUTHENTIK_SECRET_KEY"
length: "128"
- fieldName: "AUTHENTIK_BOOTSTRAP_PASSWORD"
length: "32"
- fieldName: "AUTHENTIK_BOOTSTRAP_TOKEN"
length: "64"
- fieldName: "AUTHENTIK_REDIS__PASSWORD"
length: "32"
EOF
}

View File

@@ -0,0 +1,12 @@
# Source: authentik/charts/serviceAccount/templates/service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: authentik
namespace: vynil-auth
labels:
helm.sh/chart: serviceAccount-1.2.2
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "2023.6.0"
app.kubernetes.io/managed-by: Helm

View File

@@ -0,0 +1,26 @@
# Source: authentik/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: authentik
labels:
helm.sh/chart: authentik-2023.6.3
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/version: "2023.6.1"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 9300
name: http-metrics
protocol: TCP
targetPort: http-metrics
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: authentik
app.kubernetes.io/instance: authentik
app.kubernetes.io/component: "server"

View File

@@ -0,0 +1,89 @@
# Source: coredns/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns-coredns
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "coredns"
helm.sh/chart: "coredns-1.24.1"
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
app.kubernetes.io/version: "1.10.1"
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
selector:
matchLabels:
app.kubernetes.io/instance: "coredns"
k8s-app: coredns
app.kubernetes.io/name: coredns
template:
metadata:
labels:
k8s-app: coredns
app.kubernetes.io/name: coredns
app.kubernetes.io/instance: "coredns"
annotations:
checksum/config: 2c80ea26dcf7cd4d57c4ccbe0561210d06f8e048704a7edb5c495e4e2d60999d
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
terminationGracePeriodSeconds: 30
serviceAccountName: coredns-coredns
dnsPolicy: Default
containers:
- name: "coredns"
image: "coredns/coredns:1.10.1"
imagePullPolicy: IfNotPresent
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
ports:
- {"containerPort":53,"name":"udp-53","protocol":"UDP"}
- {"containerPort":53,"name":"tcp-53","protocol":"TCP"}
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
securityContext:
capabilities:
add:
- NET_BIND_SERVICE
volumes:
- name: config-volume
configMap:
name: coredns-coredns
items:
- key: Corefile
path: Corefile

52
share/dns/config.tf Normal file
View File

@@ -0,0 +1,52 @@
locals {
begin-core = <<-EOF
.:53 {
errors {
consolidate 5m ".* i/o timeout$" warning
consolidate 30s "^Failed to .+"
}
health {
lameduck 5s
}
ready
EOF
end-core = <<-EOF
}
EOF
soa-ns = <<-EOF
@ IN SOA ${var.sub-domain}.${var.domain-name}. ${var.domain-name}. (
${formatdate("YYYYMMDDhh",timestamp())} ; Serial
4H ; Refresh
1H ; Retry
7D ; Expire
4H ) ; Negative Cache TTL
@ IN NS ${var.sub-domain}.${var.domain-name}.
EOF
files = merge({
"Corefile" = join("", concat([local.begin-core],[for z in var.zones: format("file /etc/coredns/%s.db %s", z.name,z.name)],[local.end-core]))
},[for z in var.zones: {
"${z.name}" = join("\n", concat([
"$TTL 60",
"$ORIGIN ${z.name}.",
local.soa-ns
],
[for k,v in z.hosts: format("%s IN A %s", k, v)],
[for k,v in z.hosts6: format("%s IN AAAA %s", k, v)],
[for k,v in z.alias: format("%s IN CNAME %s", k, v)],
z.wildcard!=""?[format("*.%s. IN A %s", z.name, z.wildcard)]:[],
z.wildcard6!=""?[format("*.%s. IN AAAA %s", z.namz, z.wildcard6)]:[],
))
}]...)
}
resource "kubectl_manifest" "coredns-config" {
yaml_body = <<-EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: "${var.component}-${var.instance}"
namespace: "${var.namespace}"
labels: ${jsonencode(local.common-labels)}
data: ${jsonencode(local.files)}
EOF
}

53
share/dns/datas.tf Normal file
View File

@@ -0,0 +1,53 @@
locals {
common-labels = {
"vynil.solidite.fr/owner-name" = var.instance
"vynil.solidite.fr/owner-namespace" = var.namespace
"vynil.solidite.fr/owner-category" = var.category
"vynil.solidite.fr/owner-component" = var.component
"app.kubernetes.io/managed-by" = "vynil"
"app.kubernetes.io/name" = var.component
"app.kubernetes.io/instance" = var.instance
}
items = concat([{
"key" = "Corefile"
"path" = "Corefile"
}],[for z in var.zones: {
"key" = z.name
"path" = z.name
}])
}
data "kustomization_overlay" "data" {
namespace = var.namespace
common_labels = local.common-labels
resources = [for file in fileset(path.module, "*.yaml"): file if file != "index.yaml"]
images {
name = "coredns/coredns"
new_name = "${var.image.registry}/${var.image.repository}"
new_tag = "${var.image.tag}"
}
patches {
target {
kind = "Deployment"
name = "coredns-coredns"
}
patch = <<-EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns-coredns
spec:
template:
spec:
containers:
- name: coredns
image: "${var.image.registry}/${var.image.repository}:${var.image.tag}"
imagePullPolicy: "${var.image.pullPolicy}"
volumes:
- name: config-volume
configMap:
name: "${var.component}-${var.instance}"
items: ${jsonencode(local.items)}
EOF
}
}

84
share/dns/index.yaml Normal file
View File

@@ -0,0 +1,84 @@
---
apiVersion: vinyl.solidite.fr/v1beta1
kind: Component
category: share
metadata:
name: dns
description: null
options:
domain:
default: your-company
examples:
- your-company
type: string
sub-domain:
default: dns
examples:
- dns
type: string
zones:
default: []
items:
properties:
alias:
default: {}
type: object
hosts:
default: {}
type: object
hosts6:
default: {}
type: object
name:
default: local.domain
type: string
wildcard:
default: ''
type: string
wildcard6:
default: ''
type: string
type: object
type: array
image:
default:
pullPolicy: IfNotPresent
registry: docker.io
repository: coredns/coredns
tag: 1.10.1
examples:
- pullPolicy: IfNotPresent
registry: docker.io
repository: coredns/coredns
tag: 1.10.1
properties:
pullPolicy:
default: IfNotPresent
enum:
- Always
- Never
- IfNotPresent
type: string
registry:
default: docker.io
type: string
repository:
default: coredns/coredns
type: string
tag:
default: 1.10.1
type: string
type: object
domain-name:
default: your_company.com
examples:
- your_company.com
type: string
dependencies: []
providers:
kubernetes: true
authentik: null
kubectl: null
postgresql: null
restapi: null
http: null

View File

@@ -0,0 +1,21 @@
# Source: coredns/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: coredns-coredns
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "coredns"
helm.sh/chart: "coredns-1.24.1"
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns-coredns
subjects:
- kind: ServiceAccount
name: coredns-coredns
namespace: vynil-infra

View File

@@ -0,0 +1,31 @@
# Source: coredns/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: coredns-coredns
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "coredns"
helm.sh/chart: "coredns-1.24.1"
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch

View File

@@ -0,0 +1,13 @@
# Source: coredns/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns-coredns
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "coredns"
helm.sh/chart: "coredns-1.24.1"
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns

View File

@@ -0,0 +1,26 @@
# Source: coredns/templates/service-metrics.yaml
apiVersion: v1
kind: Service
metadata:
name: coredns-coredns-metrics
labels:
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/instance: "coredns"
helm.sh/chart: "coredns-1.24.1"
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
app.kubernetes.io/component: metrics
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
spec:
selector:
app.kubernetes.io/instance: "coredns"
k8s-app: coredns
app.kubernetes.io/name: coredns
ports:
- name: metrics
port: 9153
targetPort: 9153

Some files were not shown because too many files have changed in this diff Show More