Backup CronJob: also archive filestore (/var/lib/odoo) to S3 alongside the SQL dump

Pairs each <TS>.sql.gz with a <TS>.filestore.tar.gz under the same prefix; rotation prunes both together. Backup pod runs on the same node as Odoo (podAffinity) and mounts the filestore PVC read-only — RWO permits multiple pods on the same node, so this is safe.

Restore (Tower-side) reads the companion key from S3, scales Odoo to 0, restores DB + filestore, and scales Odoo back up.
This commit is contained in:
Tower Deploy
2026-04-26 23:33:30 +03:00
parent 94fe1bf18b
commit 9dace864d8

View File

@@ -37,6 +37,24 @@ spec:
odoosky.io/role: backup
spec:
restartPolicy: Never
# Land on the same node as the running Odoo pod so the backup
# container can mount the filestore PVC. The PVC is RWO,
# which K8s reads as "one pod per node" — multiple pods on
# the SAME node can mount the same volume simultaneously,
# so this is safe and gives the backup direct read access
# to /var/lib/odoo without disturbing Odoo.
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/instance: {{ .Values.instance.code | quote }}
odoosky.io/role: odoo
topologyKey: kubernetes.io/hostname
volumes:
- name: filestore
persistentVolumeClaim:
claimName: {{ include "instance.fullname" . }}-odoo
containers:
- name: pgdump-s3
# postgres:16-alpine + `apk add aws-cli` — alpine's
@@ -49,6 +67,10 @@ spec:
# client/server protocol always lines up.
image: "{{ .Values.postgres.image }}:{{ .Values.postgres.tag }}"
imagePullPolicy: IfNotPresent
volumeMounts:
- name: filestore
mountPath: /var/lib/odoo
readOnly: true
env:
- name: PGHOST
value: {{ include "instance.fullname" . }}-pg
@@ -98,24 +120,44 @@ spec:
# element of the pipe failing fails the whole thing.
set -euo pipefail
TS=$(date -u +%Y%m%dT%H%M%SZ)
KEY="${S3_PREFIX}/${TS}.sql.gz"
echo ">>> dumping to s3://${S3_BUCKET}/${KEY}"
SQL_KEY="${S3_PREFIX}/${TS}.sql.gz"
FS_KEY="${S3_PREFIX}/${TS}.filestore.tar.gz"
if ! command -v aws >/dev/null 2>&1; then
apk add --no-cache aws-cli >/dev/null
apk add --no-cache aws-cli tar >/dev/null
fi
echo ">>> dumping DB to s3://${S3_BUCKET}/${SQL_KEY}"
pg_dump --format=plain --clean --if-exists --no-owner --no-acl \
| gzip -9 \
| aws --endpoint-url "$S3_ENDPOINT" s3 cp - "s3://${S3_BUCKET}/${KEY}"
echo ">>> uploaded"
echo ">>> rotating: keep last $RETAIN under ${S3_PREFIX}/"
| aws --endpoint-url "$S3_ENDPOINT" s3 cp - "s3://${S3_BUCKET}/${SQL_KEY}"
echo ">>> archiving filestore to s3://${S3_BUCKET}/${FS_KEY}"
# Tar the filestore tree from /var/lib/odoo. If the
# dir is empty (fresh instance) we still upload an
# empty tar so the snapshot is paired — restore code
# treats absent filestore object as "no filestore
# captured for this snapshot" (older backups).
if [ -d /var/lib/odoo ] && [ -n "$(ls -A /var/lib/odoo 2>/dev/null)" ]; then
tar -czf - -C /var/lib/odoo . \
| aws --endpoint-url "$S3_ENDPOINT" s3 cp - "s3://${S3_BUCKET}/${FS_KEY}"
else
echo "(filestore empty; skipping archive)"
fi
echo ">>> rotating: keep last $RETAIN snapshots under ${S3_PREFIX}/"
# Group keys by timestamp prefix (everything before
# the first dot after the date) and prune the oldest
# groups. Both .sql.gz and .filestore.tar.gz share
# the same timestamp prefix, so groups stay paired.
aws --endpoint-url "$S3_ENDPOINT" s3api list-objects-v2 \
--bucket "$S3_BUCKET" --prefix "${S3_PREFIX}/" \
--query 'Contents[].Key' --output text 2>/dev/null \
| tr '\t' '\n' | sort -r | tail -n +$((RETAIN + 1)) \
| while read OLDKEY; do
[ -n "$OLDKEY" ] || continue
echo ">>> deleting old: $OLDKEY"
aws --endpoint-url "$S3_ENDPOINT" s3 rm "s3://${S3_BUCKET}/${OLDKEY}"
| tr '\t' '\n' \
| grep -E '\.sql\.gz$' \
| sort -r | tail -n +$((RETAIN + 1)) \
| while read OLDSQL; do
[ -n "$OLDSQL" ] || continue
OLDFS="${OLDSQL%.sql.gz}.filestore.tar.gz"
echo ">>> deleting: $OLDSQL + $OLDFS (if present)"
aws --endpoint-url "$S3_ENDPOINT" s3 rm "s3://${S3_BUCKET}/${OLDSQL}" || true
aws --endpoint-url "$S3_ENDPOINT" s3 rm "s3://${S3_BUCKET}/${OLDFS}" 2>/dev/null || true
done
echo ">>> done"
resources: