helm: don't automount Service token when integration is not enabled, improve k8s detection
Signed-off-by: Jens Langhammer <jens.langhammer@beryju.org>
This commit is contained in:
		| @ -1,7 +1,7 @@ | ||||
| """authentik core tasks""" | ||||
| from datetime import datetime | ||||
| from io import StringIO | ||||
| from pathlib import Path | ||||
| from os import environ | ||||
|  | ||||
| from boto3.exceptions import Boto3Error | ||||
| from botocore.exceptions import BotoCoreError, ClientError | ||||
| @ -9,6 +9,7 @@ from dbbackup.db.exceptions import CommandConnectorError | ||||
| from django.contrib.humanize.templatetags.humanize import naturaltime | ||||
| from django.core import management | ||||
| from django.utils.timezone import now | ||||
| from kubernetes.config.incluster_config import SERVICE_HOST_ENV_NAME | ||||
| from structlog.stdlib import get_logger | ||||
|  | ||||
| from authentik.core.models import ExpiringModel | ||||
| @ -40,9 +41,7 @@ def clean_expired_models(self: MonitoredTask): | ||||
| def backup_database(self: MonitoredTask):  # pragma: no cover | ||||
|     """Database backup""" | ||||
|     self.result_timeout_hours = 25 | ||||
|     if Path("/var/run/secrets/kubernetes.io").exists() and not CONFIG.y( | ||||
|         "postgresql.s3_backup" | ||||
|     ): | ||||
|     if SERVICE_HOST_ENV_NAME in environ and not CONFIG.y("postgresql.s3_backup"): | ||||
|         LOGGER.info("Running in k8s and s3 backups are not configured, skipping") | ||||
|         self.set_status( | ||||
|             TaskResult( | ||||
|  | ||||
| @ -39,6 +39,8 @@ class AuthentikOutpostConfig(AppConfig): | ||||
|             KubernetesServiceConnection, | ||||
|         ) | ||||
|  | ||||
|         # Explicitly check against token filename, as thats | ||||
|         # only present when the integration is enabled | ||||
|         if Path(SERVICE_TOKEN_FILENAME).exists(): | ||||
|             LOGGER.debug("Detected in-cluster Kubernetes Config") | ||||
|             if not KubernetesServiceConnection.objects.filter(local=True).exists(): | ||||
|  | ||||
| @ -22,6 +22,7 @@ spec: | ||||
|         app.kubernetes.io/instance: {{ .Release.Name }} | ||||
|         k8s.goauthentik.io/component: web | ||||
|     spec: | ||||
|       automountServiceAccountToken: false | ||||
|       affinity: | ||||
|         podAntiAffinity: | ||||
|           preferredDuringSchedulingIgnoredDuringExecution: | ||||
|  | ||||
| @ -24,6 +24,8 @@ spec: | ||||
|     spec: | ||||
|       {{- if .Values.kubernetesIntegration }} | ||||
|       serviceAccountName: {{ include "authentik.fullname" . }}-sa | ||||
|       {{- else }} | ||||
|       automountServiceAccountToken: false | ||||
|       {{- end }} | ||||
|       affinity: | ||||
|         podAntiAffinity: | ||||
|  | ||||
| @ -2,9 +2,9 @@ | ||||
| import os | ||||
| import warnings | ||||
| from multiprocessing import cpu_count | ||||
| from pathlib import Path | ||||
|  | ||||
| import structlog | ||||
| from kubernetes.config.incluster_config import SERVICE_HOST_ENV_NAME | ||||
|  | ||||
| bind = "0.0.0.0:8000" | ||||
|  | ||||
| @ -48,7 +48,7 @@ logconfig_dict = { | ||||
|  | ||||
| # if we're running in kubernetes, use fixed workers because we can scale with more pods | ||||
| # otherwise (assume docker-compose), use as much as we can | ||||
| if Path("/var/run/secrets/kubernetes.io").exists(): | ||||
| if SERVICE_HOST_ENV_NAME in os.environ: | ||||
|     workers = 2 | ||||
| else: | ||||
|     workers = cpu_count() * 2 + 1 | ||||
|  | ||||
		Reference in New Issue
	
	Block a user
	 Jens Langhammer
					Jens Langhammer