Files
instance-template-v3/templates/backup-cronjob.yaml
2026-04-26 21:17:34 +03:00

122 lines
5.0 KiB
YAML

{{- if .Values.backups.enabled -}}
# Daily dump → S3.
#
# Architecture: pg_dump in postgres:alpine, pipe through gzip, then
# `aws s3 cp -` to push the stream straight to MEGA S4. We use a
# single multi-stage shell command (no init container) so the dump
# never lands on the customer-server's local disk — the instance
# data and the backup destination are deliberately separated.
#
# AWS credentials come from a K8s Secret (default `s3-backup-creds`)
# provisioned out-of-band by Tower's bootstrap. Endpoint + bucket +
# prefix are committed in this file's values; only the access/secret
# pair lives in the Secret.
apiVersion: batch/v1
kind: CronJob
metadata:
name: {{ include "instance.fullname" . }}-backup
labels:
{{- include "instance.labels" . | nindent 4 }}
odoosky.io/role: backup
spec:
schedule: {{ .Values.backups.schedule | quote }}
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 5
failedJobsHistoryLimit: 3
jobTemplate:
metadata:
labels:
{{- include "instance.labels" . | nindent 8 }}
odoosky.io/role: backup
spec:
backoffLimit: 1
template:
metadata:
labels:
{{- include "instance.labels" . | nindent 12 }}
odoosky.io/role: backup
spec:
restartPolicy: Never
containers:
- name: pgdump-s3
# Image carries both pg_dump (postgresql-client) and
# aws-cli. We build it from alpine + apk install on
# first run; for now bitnami's prebuilt covers both.
image: bitnami/postgresql:16
imagePullPolicy: IfNotPresent
env:
- name: PGHOST
value: {{ include "instance.fullname" . }}-pg
- name: PGUSER
valueFrom:
secretKeyRef:
name: {{ include "instance.fullname" . }}-pg
key: POSTGRES_USER
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: {{ include "instance.fullname" . }}-pg
key: POSTGRES_PASSWORD
- name: PGDATABASE
valueFrom:
secretKeyRef:
name: {{ include "instance.fullname" . }}-pg
key: POSTGRES_DB
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: {{ .Values.backups.credentialsSecret }}
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ .Values.backups.credentialsSecret }}
key: AWS_SECRET_ACCESS_KEY
- name: S3_ENDPOINT
value: {{ .Values.backups.s3.endpoint | quote }}
- name: AWS_DEFAULT_REGION
value: {{ .Values.backups.s3.region | quote }}
- name: S3_BUCKET
value: {{ .Values.backups.s3.bucket | quote }}
- name: S3_PREFIX
value: {{ .Values.instance.code | quote }}
- name: RETAIN
value: {{ .Values.backups.retain | quote }}
command:
- /bin/bash
- -c
- |
set -euo pipefail
TS=$(date -u +%Y%m%dT%H%M%SZ)
KEY="${S3_PREFIX}/${TS}.sql.gz"
echo ">>> dumping to s3://${S3_BUCKET}/${KEY}"
# Install aws-cli on first run. bitnami/postgresql is
# debian-based so apt is available and fast.
if ! command -v aws >/dev/null 2>&1; then
apt-get update -qq && apt-get install -y -qq awscli >/dev/null
fi
pg_dump --format=plain --clean --if-exists --no-owner --no-acl \
| gzip -9 \
| aws --endpoint-url "$S3_ENDPOINT" s3 cp - "s3://${S3_BUCKET}/${KEY}"
echo ">>> uploaded"
echo ">>> rotating: keep last $RETAIN under ${S3_PREFIX}/"
# List, sort newest-first, drop the top N, delete the rest.
aws --endpoint-url "$S3_ENDPOINT" s3api list-objects-v2 \
--bucket "$S3_BUCKET" --prefix "${S3_PREFIX}/" \
--query 'Contents[].Key' --output text 2>/dev/null \
| tr '\t' '\n' | sort -r | tail -n +$((RETAIN + 1)) \
| while read OLDKEY; do
[ -n "$OLDKEY" ] || continue
echo ">>> deleting old: $OLDKEY"
aws --endpoint-url "$S3_ENDPOINT" s3 rm "s3://${S3_BUCKET}/${OLDKEY}"
done
echo ">>> done"
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: "1"
memory: 1Gi
{{- end }}