From 591f6478c8d75ed72e0ae0e3fd2f927b92648829 Mon Sep 17 00:00:00 2001 From: Reza Chalak Date: Sat, 21 Oct 2023 23:31:20 +0330 Subject: [PATCH] project structures updated --- .../backup-zen-docker-images-latest.yml | 2 + .github/workflows/release-helm.yml | 26 +++ {helm => charts/backup-zen}/.helmignore | 0 charts/backup-zen/Chart.yaml | 6 + .../backup-zen}/templates/NOTES.txt | 0 .../backup-zen}/templates/_helpers.tpl | 6 +- .../templates/backup-config-configmap.yml | 0 .../templates/backup-creds-secret.yml | 0 .../backup-zen}/templates/cronjob.yml | 0 .../backup-zen}/templates/namespace.yml | 0 .../templates/objectStorage-secret.yml | 0 {helm => charts/backup-zen}/templates/pvc.yml | 0 {helm => charts/backup-zen}/values.yaml | 2 +- helm/Chart.yaml | 4 - helm/k8s.yaml | 176 --------------- mongo_backup/README.md | 9 - mongo_backup/main.sh | 82 ------- mongo_backup/mongo_backup.config | 71 ------ mongo_backup/mongo_backup_rotated.sh | 100 --------- mysql_backup/main.sh | 83 ------- mysql_backup/mysql_backup.config | 69 ------ mysql_backup/mysql_backup_rotated.sh | 148 ------------- pg_backup/README.md | 2 - pg_backup/main.sh | 83 ------- pg_backup/pg_backup.config | 79 ------- pg_backup/pg_backup.sh | 166 -------------- pg_backup/pg_backup_rotated.sh | 206 ------------------ 27 files changed, 38 insertions(+), 1282 deletions(-) create mode 100644 .github/workflows/release-helm.yml rename {helm => charts/backup-zen}/.helmignore (100%) create mode 100644 charts/backup-zen/Chart.yaml rename {helm => charts/backup-zen}/templates/NOTES.txt (100%) rename {helm => charts/backup-zen}/templates/_helpers.tpl (97%) rename {helm => charts/backup-zen}/templates/backup-config-configmap.yml (100%) rename {helm => charts/backup-zen}/templates/backup-creds-secret.yml (100%) rename {helm => charts/backup-zen}/templates/cronjob.yml (100%) rename {helm => charts/backup-zen}/templates/namespace.yml (100%) rename {helm => charts/backup-zen}/templates/objectStorage-secret.yml (100%) rename {helm => charts/backup-zen}/templates/pvc.yml (100%) rename {helm => charts/backup-zen}/values.yaml (98%) delete mode 100644 helm/Chart.yaml delete mode 100644 helm/k8s.yaml delete mode 100644 mongo_backup/README.md delete mode 100755 mongo_backup/main.sh delete mode 100644 mongo_backup/mongo_backup.config delete mode 100755 mongo_backup/mongo_backup_rotated.sh delete mode 100755 mysql_backup/main.sh delete mode 100644 mysql_backup/mysql_backup.config delete mode 100755 mysql_backup/mysql_backup_rotated.sh delete mode 100644 pg_backup/README.md delete mode 100755 pg_backup/main.sh delete mode 100644 pg_backup/pg_backup.config delete mode 100755 pg_backup/pg_backup.sh delete mode 100755 pg_backup/pg_backup_rotated.sh diff --git a/.github/workflows/backup-zen-docker-images-latest.yml b/.github/workflows/backup-zen-docker-images-latest.yml index 9a16b7f..323d4d1 100644 --- a/.github/workflows/backup-zen-docker-images-latest.yml +++ b/.github/workflows/backup-zen-docker-images-latest.yml @@ -3,8 +3,10 @@ name: Backup-zen Docker Image CI latest on: push: branches: [ "main" ] + paths: [ "docker" ] pull_request: branches: [ "main" ] + paths: [ "docker" ] jobs: build: diff --git a/.github/workflows/release-helm.yml b/.github/workflows/release-helm.yml new file mode 100644 index 0000000..9967fb4 --- /dev/null +++ b/.github/workflows/release-helm.yml @@ -0,0 +1,26 @@ +name: Release Charts + +on: + push: + branches: + - main +permissions: + contents: write +jobs: + release: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.5.0 + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" \ No newline at end of file diff --git a/helm/.helmignore b/charts/backup-zen/.helmignore similarity index 100% rename from helm/.helmignore rename to charts/backup-zen/.helmignore diff --git a/charts/backup-zen/Chart.yaml b/charts/backup-zen/Chart.yaml new file mode 100644 index 0000000..ff9417d --- /dev/null +++ b/charts/backup-zen/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +description: BackupZen is a solution for backing up and uploading various database types, leveraging the power of K8s cronjobs. +name: backup-zen +version: 0.1.0 +appVersion: 1.0.0 +kubeVersion: ">= 1.20.0" \ No newline at end of file diff --git a/helm/templates/NOTES.txt b/charts/backup-zen/templates/NOTES.txt similarity index 100% rename from helm/templates/NOTES.txt rename to charts/backup-zen/templates/NOTES.txt diff --git a/helm/templates/_helpers.tpl b/charts/backup-zen/templates/_helpers.tpl similarity index 97% rename from helm/templates/_helpers.tpl rename to charts/backup-zen/templates/_helpers.tpl index 109150d..9dce0fd 100644 --- a/helm/templates/_helpers.tpl +++ b/charts/backup-zen/templates/_helpers.tpl @@ -93,11 +93,11 @@ Create Dumper image address {{- if .Values.cronjob.image -}} {{- .Values.cronjob.image -}} {{- else if eq .Values.databaseType "PostgreSQL" -}} - backupzen/pg:{{ .Chart.Version }} + backupzen/pg:{{ .Chart.AppVersion }} {{- else if eq .Values.databaseType "MySQL" -}} - backupzen/mysql:{{ .Chart.Version }} + backupzen/mysql:{{ .Chart.AppVersion }} {{- else if eq .Values.databaseType "MongoDB" -}} - backupzen/mongo:{{ .Chart.Version }} + backupzen/mongo:{{ .Chart.AppVersion }} {{- end -}} {{- end -}} diff --git a/helm/templates/backup-config-configmap.yml b/charts/backup-zen/templates/backup-config-configmap.yml similarity index 100% rename from helm/templates/backup-config-configmap.yml rename to charts/backup-zen/templates/backup-config-configmap.yml diff --git a/helm/templates/backup-creds-secret.yml b/charts/backup-zen/templates/backup-creds-secret.yml similarity index 100% rename from helm/templates/backup-creds-secret.yml rename to charts/backup-zen/templates/backup-creds-secret.yml diff --git a/helm/templates/cronjob.yml b/charts/backup-zen/templates/cronjob.yml similarity index 100% rename from helm/templates/cronjob.yml rename to charts/backup-zen/templates/cronjob.yml diff --git a/helm/templates/namespace.yml b/charts/backup-zen/templates/namespace.yml similarity index 100% rename from helm/templates/namespace.yml rename to charts/backup-zen/templates/namespace.yml diff --git a/helm/templates/objectStorage-secret.yml b/charts/backup-zen/templates/objectStorage-secret.yml similarity index 100% rename from helm/templates/objectStorage-secret.yml rename to charts/backup-zen/templates/objectStorage-secret.yml diff --git a/helm/templates/pvc.yml b/charts/backup-zen/templates/pvc.yml similarity index 100% rename from helm/templates/pvc.yml rename to charts/backup-zen/templates/pvc.yml diff --git a/helm/values.yaml b/charts/backup-zen/values.yaml similarity index 98% rename from helm/values.yaml rename to charts/backup-zen/values.yaml index eee3047..7e25481 100644 --- a/helm/values.yaml +++ b/charts/backup-zen/values.yaml @@ -31,7 +31,7 @@ global: failedTeamsURL: https://myorg.webhook.office.com/webhookb2/blob-blob-blob cronjob: - image: mrezachalak/pg-backup-zen:12.1 + # image: mrezachalak/pg-backup-zen:12.1 pullPolicy: Always imagePullSecrets: [] restartPolicy: Never diff --git a/helm/Chart.yaml b/helm/Chart.yaml deleted file mode 100644 index cbb47ab..0000000 --- a/helm/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -description: A Helm chart for dumping databases with K8s cronjobs and uploading to object storages -name: backup-zen -version: 1.0.0 \ No newline at end of file diff --git a/helm/k8s.yaml b/helm/k8s.yaml deleted file mode 100644 index 487771e..0000000 --- a/helm/k8s.yaml +++ /dev/null @@ -1,176 +0,0 @@ ---- -# Source: backup-zen/templates/namespace.yml -apiVersion: v1 -kind: Namespace -metadata: - name: db-backups - labels: - app.kubernetes.io/name: backup-zen - app.kubernetes.io/instance: release-name - app.kubernetes.io/managed-by: Helm - helm.sh/chart: backup-zen-1.0.0 ---- -# Source: backup-zen/templates/backup-creds-secret.yml -apiVersion: v1 -kind: Secret -metadata: - name: backup-zen-release-name-backup-creds - labels: - app.kubernetes.io/name: backup-zen - app.kubernetes.io/instance: release-name - app.kubernetes.io/managed-by: Helm - helm.sh/chart: backup-zen-1.0.0 - namespace: db-backups -data: - creds.json: W3siZGF0YWJhc2VfbmFtZSI6ImRiMSIsInBhc3N3b3JkIjoicGFzc3dvcmQxIiwidXNlcm5hbWUiOiJ1c2VyMSJ9LHsiZGF0YWJhc2VfbmFtZSI6ImRiMiIsInBhc3N3b3JkIjoicGFzc3dvcmQyIiwidXNlcm5hbWUiOiJ1c2VyMiJ9XQ== -type: Opaque ---- -# Source: backup-zen/templates/objectStorage-secret.yml -apiVersion: v1 -kind: Secret -metadata: - name: backup-zen-release-name-object-storage-creds - labels: - app.kubernetes.io/name: backup-zen - app.kubernetes.io/instance: release-name - app.kubernetes.io/managed-by: Helm - helm.sh/chart: backup-zen-1.0.0 - namespace: db-backups -data: - MINIO_ACCESS_KEY_ID: YWNjZXNzLWtleQ== - MINIO_URL: aHR0cHM6Ly9sb2NhbGhvc3Q= - MINIO_SECRET_ACCESS_KEY: c2VjcmV0 - BUCKET_NAME: YmFja3VwemVuLW1pbmlv - OBJECT_NAME: YzM0MmpoYzM0Z2Y= -type: Opaque ---- -# Source: backup-zen/templates/backup-config-configmap.yml -apiVersion: v1 -kind: ConfigMap -metadata: - name: backup-zen-release-name-backup-config - labels: - app.kubernetes.io/name: backup-zen - app.kubernetes.io/instance: release-name - app.kubernetes.io/managed-by: Helm - helm.sh/chart: backup-zen-1.0.0 - namespace: db-backups -data: - DB_HOST: "mydb.rds.amazonaws.com" - DB_PORT: "54325" - TEAMS_NOTIFICATION: "true" - DAYS_TO_KEEP: "5" - DAY_OF_WEEK_TO_KEEP: "5" - WEEKS_TO_KEEP: "7" - FAILED_TEAMS_URL: "https://myorg.webhook.office.com/webhookb2/blob-blob-blob" - SUCCEEDED_TEAMS_URL: "https://myorg.webhook.office.com/webhookb2/blob-blob-blob" ---- -# Source: backup-zen/templates/pvc.yml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: mydb-backup - namespace: db-backups - labels: - app.kubernetes.io/name: backup-zen - app.kubernetes.io/instance: release-name - app.kubernetes.io/managed-by: Helm - helm.sh/chart: backup-zen-1.0.0 - -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - storageClassName: standard - volumeMode: Filesystem ---- -# Source: backup-zen/templates/cronjob.yml -apiVersion: batch/v1 -kind: CronJob -metadata: - name: backup-zen-release-name - namespace: db-backups -spec: - jobTemplate: - spec: - template: - spec: - initContainers: - - args: - - /app/pg_backup_rotated.sh - envFrom: - - configMapRef: - name: backup-zen-release-name-backup-config - - secretRef: - name: backup-zen-release-name-backup-creds - env: - - name: BACKUP_DIR - value: /backups - - name: KUBERNETES_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: mrezachalak/pg-backup-zen:12.1 - imagePullPolicy: Always - name: dumper - resources: {} - volumeMounts: - - mountPath: /backups - name: backups - - mountPath: /app/creds.json - subPath: creds.json - name: credentials - readOnly: true - - containers: - - command: - - "bash" - - "-c" - args: - - > - mc alias set backupUploader $(MC_ENDPOINT) $(MC_ACCESS_KEY) $(MC_SECRET_KEY) && mc mirror --remove /backups backupUploader/$(OBJECT_NAME)/backup-zen-release-name/ && curl -H 'Content-Type: application/json' -d '{"title": "MongoDB Backups have been successfully synchronized with the Bucket.","text":"
Pod name: $(KUBERNETES_POD_NAME)\nHost Address: $(DB_HOST)
"}' $(SUCCEEDED_TEAMS_URL) || curl -H 'Content-Type: application/json' -d '{"title": "MongoDB Backups failed to synchronize with the bucket.","text":"
Pod name: $(KUBERNETES_POD_NAME)\nHost Address: $(DB_HOST)
"}' $(FAILED_TEAMS_URL) - envFrom: - - configMapRef: - name: backup-zen-release-name-backup-config - - secretRef: - name: backup-zen-release-name-object-storage-creds - env: - - name: BACKUP_DIR - value: /backups - - name: KUBERNETES_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: mrezachalak/pg-backup-zen:12.1 - imagePullPolicy: Always - name: uploader - resources: - requests: - cpu: "1" - memory: 1Gi - limits: - cpu: "2" - memory: 2Gi - volumeMounts: - - mountPath: /backups - name: backups - imagePullSecrets: [] - restartPolicy: Never - terminationGracePeriodSeconds: 30 - volumes: - - name: backups - persistentVolumeClaim: - claimName: mydb-backup - - name: credentials - secret: - defaultMode: 0400 - items: - - key: creds.json - path: creds.json - secretName: backup-zen-release-name-backup-creds - concurrencyPolicy: Forbid - failedJobsHistoryLimit: 3 - schedule: 0 0 * * * - successfulJobsHistoryLimit: 3 diff --git a/mongo_backup/README.md b/mongo_backup/README.md deleted file mode 100644 index 1c349b2..0000000 --- a/mongo_backup/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# MongoDB Full Backup -## What is going to when this backup has ran? -Simply, the whole collections available in your mongodb will be backed up. - -## How to use? -1. Fill out mongo_backup.config -2. Run main.sh on your backup server -3. Then if succeeded Run `crontab -e` then add '0 2 * * * /bin/bash /path/to/this/repo/mongo_backup/main.sh' to your crontabs -4. You can also use [healthcheck.io](https://healthchecks.io/) to get notification if anything went wrong, i.e. if it did not ran. diff --git a/mongo_backup/main.sh b/mongo_backup/main.sh deleted file mode 100755 index a38b499..0000000 --- a/mongo_backup/main.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/bash -set -e -BACKUP_TIME=$(date +%Y%m%d%H%M) -AWS_COMMAND=$(which aws) - -########################### -####### LOAD CONFIG ####### -########################### - -while [ $# -gt 0 ]; do - case $1 in - -c) - CONFIG_FILE_PATH="$2" - shift 2 - ;; - *) - echo "Unknown Option \"$1\"" 1>&2 - exit 2 - ;; - esac -done - -if [ -z $CONFIG_FILE_PATH ] ; then - SCRIPTPATH=$(cd ${0%/*} && pwd -P) - CONFIG_FILE_PATH="${SCRIPTPATH}/mongo_backup.config" -fi - -if [ ! -r ${CONFIG_FILE_PATH} ] ; then - echo "Could not load config file from ${CONFIG_FILE_PATH}" 1>&2 - exit 1 -fi - -source "${CONFIG_FILE_PATH}" - - -########################### -####### INIT CONFIG ####### -########################### - -if $CREDENTIALS_IN_K8S; then - export KUBECONFIG=$CREDENTIALS_KUBECONFIG - export MONGODB_ROOT_PASSWORD=$(kubectl get secret $DB_CRED_SECRET --namespace $CREDENTIAL_NAMESPACE -o jsonpath="{.data.password}" | base64 --decode) -fi - -# Log files -mkdir -p $BACKUP_DIR/logs -LOG=$BACKUP_DIR/logs/log-$BACKUP_TIME.txt -echo \n\n\n >> $LOG -date | tee -a $LOG -echo \n >> $LOG -ps 2>&1 | tee -a $LOG - - -########################### -###### START PROCESS ###### -########################### - -# Port forward to mongodb statefulset on k8s cluster -if $K8S_MONGODB; then - export KUBECONFIG=$MONGODB_KUBECONFIG - kubectl port-forward statefulset/$MONGODB_STATEFULSET_NAME -n $MONGODB_NAMESPACE $PORT:$MONGODB_STATEFULSET_PORT & - pidofkubectl=$! -fi - -# run backup process -$SCRIPTPATH/mongo_backup_rotated.sh - -if $K8S_MONGODB; then - kill -9 $pidofkubectl -fi - -########################### -###### SYNC WITH S3 ###### -########################### - -##### Copy backups to AWS S3 bucket ##### -echo "aws sync starting" | tee -a $LOG -$AWS_COMMAND s3 sync $BACKUP_DIR s3://$S3_BACKUP_BUCKET/mongodb --delete --profile $AWS_PROFILE 2>&1 | tee -a $LOG - -echo "aws sync success" |tee -a $LOG -date | tee -a $LOG -echo "Mongodb backups has synced successfully" | mail -s "Mongodb sync success" $NOTIFICATION_EMAIL_ADDRESS diff --git a/mongo_backup/mongo_backup.config b/mongo_backup/mongo_backup.config deleted file mode 100644 index fc84748..0000000 --- a/mongo_backup/mongo_backup.config +++ /dev/null @@ -1,71 +0,0 @@ -############################## -## MONGODB BACKUP CONFIG ## -############################## - -# Optional system user to run backups as. If the user, the script is running as, doesn't match this -# the script terminates. Leave blank to skip check. -BACKUP_USER= - -# Optional if your mongodb is hosted on k8s set to true. Fill settings for -K8S_MONGODB=false - -# Optional hostname. Will default to "localhost" if none specified. -HOSTNAME=localhost - - -# Optional username to connect to database as. Will default to "root" if none specified. -USERNAME= - -# Port number to expose on your host in order to connect with -PORT=27017 - -# This dir will be created if it doesn't exist. This must be writable by the user, the script is -# running as. -BACKUP_DIR=~/db-backups/mongodb/ - -# Will produce gzipped dump containing the cluster globals, like users and passwords, if set to "yes" -ENABLE_GLOBALS_BACKUPS=yes - -##### SETTINGS FOR AWS S3 #### - -# Will put backups to this bucket -S3_BACKUP_BUCKET=bucket_name - -# The aws profile having access to the bucket -AWS_PROFILE=your-profile-with-s3-access - -#### SETTINGS FOR CREDENTIALS #### - -# Will use k8s secrets to read db password from credentials secret in $CREDENTIALS_NAMESPACE -# Will read password field in $DB_CREDENTIALS_SECRET -CREDENTIALS_IN_K8S=false -DB_CREDENTIALS_SECRET=mongodb-credentials -CREDENTIALS_KUBECONFIG=~/.kube/config.credentials -CREDENTIALS_NAMESPACE=credentials - -# Password explicitly -MONGODB_PASSWORD=put-your-password-here-or-put-it-in-your-k8s-secrets - -##### SETTINGS FOR KUBERNETES CLUSTER #### -# If K8S_MONGODB set to false this configs will not be used. - -MONGODB_KUBECONFIG=~/.kube/config.mongodb -MONGODB_NAMESPACE=mongodb-production -MONGODB_STATEFULSET_NAME=mongodb -MONGODB_STATEFULSET_PORT=27017 - -#### SETTINGS FOR NOTIFICATION #### -NOTIFICATION_EMAIL_ADDRESS=alerts@example.com - -#### SETTINGS FOR ROTATED BACKUPS #### - -# Which day to take the weekly backup from (1-7 = Monday-Sunday) -DAY_OF_WEEK_TO_KEEP=5 - -# Number of days to keep daily backups -DAYS_TO_KEEP=7 - -# How many weeks to keep weekly backups -WEEKS_TO_KEEP=5 - -###################################### diff --git a/mongo_backup/mongo_backup_rotated.sh b/mongo_backup/mongo_backup_rotated.sh deleted file mode 100755 index b1f37b9..0000000 --- a/mongo_backup/mongo_backup_rotated.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash - - -########################### -#### PRE-BACKUP CHECKS #### -########################### - -# Make sure we're running as the required backup user -if [ "$BACKUP_USER" != "" -a "$(id -un)" != "$BACKUP_USER" ] ; then - echo "This script must be run as $BACKUP_USER. Exiting." 1>&2 - exit 1 -fi - - -########################### -### INITIALISE DEFAULTS ### -########################### - -if [ ! $HOSTNAME ]; then - HOSTNAME="localhost" -fi; - -if [ ! $USERNAME ]; then - USERNAME="root" -fi; - - -########################### -#### START THE BACKUPS #### -########################### - -function perform_backups() -{ - SUFFIX=$1 - FINAL_BACKUP_DIR=$BACKUP_DIR"`date +\%Y-\%m-\%d`$SUFFIX/" - - echo "Making backup directory in $FINAL_BACKUP_DIR" - - if ! mkdir -p $FINAL_BACKUP_DIR; then - echo "Cannot create backup directory in $FINAL_BACKUP_DIR. Go and fix it!" 1>&2 - exit 1; - fi; - - ####################### - ### GLOBALS BACKUPS ### - ####################### - - echo -e "\n\nPerforming globals backup" - echo -e "--------------------------------------------\n" - - if [ $ENABLE_GLOBALS_BACKUPS = "yes" ] - then - echo "Globals backup" - - set -o pipefail - if ! mongodump --quiet --host localhost --port 27017 --authenticationDatabase admin --username root --password "$MONGODB_PASSWORD" --gzip --numParallelCollections=8 --out=$FINAL_BACKUP_DIR ; then - echo "[!!ERROR!!] Failed to produce backup" 1>&2 - fi - set +o pipefail - else - echo "None" - fi - echo -e "\nAll database backups complete!" -} - -# MONTHLY BACKUPS - -DAY_OF_MONTH=`date +%d` - -if [ $DAY_OF_MONTH -eq 1 ]; -then - # Delete all expired monthly directories - find $BACKUP_DIR -maxdepth 1 -name "*-monthly" -exec rm -rf '{}' ';' - - perform_backups "-monthly" - - exit 0; -fi - -# WEEKLY BACKUPS - -DAY_OF_WEEK=`date +%u` #1-7 (Monday-Sunday) -EXPIRED_DAYS=`expr $((($WEEKS_TO_KEEP * 7) + 1))` - -if [ $DAY_OF_WEEK = $DAY_OF_WEEK_TO_KEEP ]; -then - # Delete all expired weekly directories - find $BACKUP_DIR -maxdepth 1 -mtime +$EXPIRED_DAYS -name "*-weekly" -exec rm -rf '{}' ';' - - perform_backups "-weekly" - - exit 0; -fi - -# DAILY BACKUPS - -# Delete daily backups 7 days old or more -find $BACKUP_DIR -maxdepth 1 -mtime +$DAYS_TO_KEEP -name "*-daily" -exec rm -rf '{}' ';' - -perform_backups "-daily" diff --git a/mysql_backup/main.sh b/mysql_backup/main.sh deleted file mode 100755 index 35ac5bc..0000000 --- a/mysql_backup/main.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash -set -e -BACKUP_TIME=$(date +%Y%m%d%H%M) -AWS_COMMAND=$(which aws) -########################### -####### LOAD CONFIG ####### -########################### - -while [ $# -gt 0 ]; do - case $1 in - -c) - CONFIG_FILE_PATH="$2" - shift 2 - ;; - *) - ${ECHO} "Unknown Option \"$1\"" 1>&2 - exit 2 - ;; - esac -done - -if [ -z $CONFIG_FILE_PATH ] ; then - SCRIPTPATH=$(cd ${0%/*} && pwd -P) - CONFIG_FILE_PATH="${SCRIPTPATH}/mongo_backup.config" -fi - -if [ ! -r ${CONFIG_FILE_PATH} ] ; then - echo "Could not load config file from ${CONFIG_FILE_PATH}" 1>&2 - exit 1 -fi - -source "${CONFIG_FILE_PATH}" - - -########################### -####### INIT CONFIG ####### -########################### - -if $CREDENTIALS_IN_K8S; then - export KUBECONFIG=$CREDENTIALS_KUBECONFIG - export MYSQL_ROOT_PASSWORD=$(kubectl get secret $DB_CRED_SECRET --namespace $CREDENTIAL_NAMESPACE -o jsonpath="{.data.password}" | base64 --decode) -fi - -# Log files -mkdir -p $BACKUP_DIR/logs -LOG=$BACKUP_DIR/logs/log-$BACKUP_TIME.txt -echo \n\n\n >> $LOG -date | tee -a $LOG -echo \n >> $LOG -ps 2>&1 | tee -a $LOG - - -########################### -###### START PROCESS ###### -########################### - -# Port forward to mysql statefulset on k8s cluster -if $K8S_MYSQL; then - export KUBECONFIG=$MYSQL_KUBECONFIG - kubectl port-forward statefulset/$MYSQL_STATEFULSET_NAME -n $MYSQL_NAMESPACE $PORT:$MYSQL_STATEFULSET_PORT & - pidofkubectl=$! -fi - - -# run backup process -SCRIPTPATH=$(cd ${0%/*} && pwd -P) -$SCRIPTPATH/mysql_backup_rotated.sh - -if $K8S_MYSQL; then - kill -9 $pidofkubectl -fi - -########################### -###### SYNC WITH S3 ###### -########################### - -##### Copy backups to AWS S3 bucket ##### -echo "aws sync starting" | tee -a $LOG -$AWS_COMMAND s3 sync $BACKUP_DIR s3://$S3_BACKUP_BUCKET/mysql --delete --profile $AWS_PROFILE 2>&1 | tee -a $LOG - -echo "aws sync success" |tee -a $LOG -date | tee -a $LOG -echo "MySQL backups has synced successfully" | mail -s "MySQL sync success" $NOTIFICATION_EMAIL_ADDRESS diff --git a/mysql_backup/mysql_backup.config b/mysql_backup/mysql_backup.config deleted file mode 100644 index d2fb181..0000000 --- a/mysql_backup/mysql_backup.config +++ /dev/null @@ -1,69 +0,0 @@ -############################## -## MONGODB BACKUP CONFIG ## -############################## - -# Optional system user to run backups as. If the user the script is running as doesn't match this -# the script terminates. Leave blank to skip check. -BACKUP_USER= - -# Optional hostname to adhere to pg_hba policies. Will default to "localhost" if none specified. -HOSTNAME=127.0.0.1 - -# Optional username to connect to database as. Will default to "postgres" if none specified. -USERNAME= - -# This dir will be created if it doesn't exist. This must be writable by the user the script is -# running as. -BACKUP_DIR=~/db-backups/mysql/ - -# Will produce gzipped sql file containing the cluster globals, like users and passwords, if set to "yes" -ENABLE_GLOBALS_BACKUPS=yes - -# Specify databases to skip -SKIPPED_DATABASES= - -# Optional if your mongodb is hosted on k8s set to true. Fill settings for -K8S_MYSQL=false - -##### SETTINGS FOR AWS S3 #### - -# Will put backups to this bucket -S3_BACKUP_BUCKET=bucket_name - -# The aws profile having access to the bucket -AWS_PROFILE=your-profile-with-s3-access - -#### SETTINGS FOR CREDENTIALS #### - -# Will use k8s secrets to read db password from credentials secret in $CREDENTIALS_NAMESPACE -# Will read password field in $DB_CREDENTIALS_SECRET -CREDENTIALS_IN_K8S=false -DB_CRED_SECRET=mysqlPassword -CREDENTIALS_KUBECONFIG=~/.kube/config.credentials -CREDENTIALS_NAMESPACE=credentials - -# Otherwise set password explicitly here -MYSQL_PASSWORD=mysql-password - -##### SETTINGS FOR KUBERNETES CLUSTER #### -# If K8S_MYSQL set to false this configs will not be used. -MYSQL_KUBECONFIG=~/.kube/config.mysql -MYSQL_NAMESPACE=production -MYSQL_STATEFULSET_NAME=mysql -MYSQL_STATEFULSET_PORT=3306 - -#### SETTINGS FOR NOTIFICATION #### -NOTIFICATION_EMAIL_ADDRESS=alerts@example.com - -#### SETTINGS FOR ROTATED BACKUPS #### - -# Which day to take the weekly backup from (1-7 = Monday-Sunday) -DAY_OF_WEEK_TO_KEEP=5 - -# Number of days to keep daily backups -DAYS_TO_KEEP=7 - -# How many weeks to keep weekly backups -WEEKS_TO_KEEP=5 - -###################################### diff --git a/mysql_backup/mysql_backup_rotated.sh b/mysql_backup/mysql_backup_rotated.sh deleted file mode 100755 index f005284..0000000 --- a/mysql_backup/mysql_backup_rotated.sh +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/bash - -########################### -####### LOAD CONFIG ####### -########################### - -while [ $# -gt 0 ]; do - case $1 in - -c) - CONFIG_FILE_PATH="$2" - shift 2 - ;; - *) - ${ECHO} "Unknown Option \"$1\"" 1>&2 - exit 2 - ;; - esac -done - -if [ -z $CONFIG_FILE_PATH ] ; then - SCRIPTPATH=$(cd ${0%/*} && pwd -P) - CONFIG_FILE_PATH="${SCRIPTPATH}/mysql_backup.config" -fi - -if [ ! -r ${CONFIG_FILE_PATH} ] ; then - echo "Could not load config file from ${CONFIG_FILE_PATH}" 1>&2 - exit 1 -fi - -source "${CONFIG_FILE_PATH}" - -########################### -#### PRE-BACKUP CHECKS #### -########################### - -# Make sure we're running as the required backup user -if [ "$BACKUP_USER" != "" -a "$(id -un)" != "$BACKUP_USER" ] ; then - echo "This script must be run as $BACKUP_USER. Exiting." 1>&2 - exit 1 -fi - - -########################### -### INITIALISE DEFAULTS ### -########################### - -if [ ! $HOSTNAME ]; then - HOSTNAME="localhost" -fi; - -if [ ! $USERNAME ]; then - USERNAME="root" -fi; - - -########################### -#### START THE BACKUPS #### -########################### - -function perform_backups() -{ - SUFFIX=$1 - FINAL_BACKUP_DIR=$BACKUP_DIR"`date +\%Y-\%m-\%d`$SUFFIX/" - - echo "Making backup directory in $FINAL_BACKUP_DIR" - - if ! mkdir -p $FINAL_BACKUP_DIR; then - echo "Cannot create backup directory in $FINAL_BACKUP_DIR. Go and fix it!" 1>&2 - exit 1; - fi; - - - ########################### - ###### FULL BACKUPS ####### - ########################### - - - echo -e "\n\nPerforming full backups" - echo -e "--------------------------------------------\n" - - # Get all database list first - DBS="$(mysql -u $USERNAME -h $HOSTNAME -p$MYSQL_ROOT_PASSWORD -Bse 'show databases')" - - for db in $DBS - do - skipdb=-1 - if [ "$SKIPPED_DATABASES" != "" ]; - then - for i in $SKIPPED_DATABASES - do - [ "$db" == "$i" ] && skipdb=1 || : - done - fi - - if [ "$skipdb" == "-1" ] ; then - FILE="$FINAL_BACKUP_DIR/$db.gz" - # do all inone job in pipe, - # connect to mysql using mysqldump for select mysql database - # and pipe it out to gz file in backup dir :) - mysqldump -u $USERNAME -h $HOSTNAME --port=3306 -p$MYSQL_ROOT_PASSWORD $db | gzip -9 > $FILE.in_progress - if [ $? != 0 ]; then - echo "[!!ERROR!!] Failed to produce backup database $db" - else - mv $FILE.in_progress $FILE - fi - - fi - done - - - echo -e "\nAll database backups complete!" -} - -# MONTHLY BACKUPS - -DAY_OF_MONTH=`date +%d` - -if [ $DAY_OF_MONTH -eq 1 ]; -then - # Delete all expired monthly directories - find $BACKUP_DIR -maxdepth 1 -name "*-monthly" -exec rm -rf '{}' ';' - - perform_backups "-monthly" - - exit 0; -fi - -# WEEKLY BACKUPS - -DAY_OF_WEEK=`date +%u` #1-7 (Monday-Sunday) -EXPIRED_DAYS=`expr $((($WEEKS_TO_KEEP * 7) + 1))` - -if [ $DAY_OF_WEEK = $DAY_OF_WEEK_TO_KEEP ]; -then - # Delete all expired weekly directories - find $BACKUP_DIR -maxdepth 1 -mtime +$EXPIRED_DAYS -name "*-weekly" -exec rm -rf '{}' ';' - - perform_backups "-weekly" - - exit 0; -fi - -# DAILY BACKUPS - -# Delete daily backups 7 days old or more -find $BACKUP_DIR -maxdepth 1 -mtime +$DAYS_TO_KEEP -name "*-daily" -exec rm -rf '{}' ';' - -perform_backups "-daily" diff --git a/pg_backup/README.md b/pg_backup/README.md deleted file mode 100644 index b2a43fb..0000000 --- a/pg_backup/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# postgreSQL backup scripts has been adapted with PostgresWiki -https://wiki.postgresql.org/wiki/Automated_Backup_on_Linux \ No newline at end of file diff --git a/pg_backup/main.sh b/pg_backup/main.sh deleted file mode 100755 index 166f6ec..0000000 --- a/pg_backup/main.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash -set -e -BACKUP_TIME=$(date +%Y%m%d%H%M) -AWS_COMMAND=$(which aws) -########################### -####### LOAD CONFIG ####### -########################### - -while [ $# -gt 0 ]; do - case $1 in - -c) - CONFIG_FILE_PATH="$2" - shift 2 - ;; - *) - ${ECHO} "Unknown Option \"$1\"" 1>&2 - exit 2 - ;; - esac -done - -if [ -z $CONFIG_FILE_PATH ] ; then - SCRIPTPATH=$(cd ${0%/*} && pwd -P) - CONFIG_FILE_PATH="${SCRIPTPATH}/mongo_backup.config" -fi - -if [ ! -r ${CONFIG_FILE_PATH} ] ; then - echo "Could not load config file from ${CONFIG_FILE_PATH}" 1>&2 - exit 1 -fi - -source "${CONFIG_FILE_PATH}" - - -########################### -####### INIT CONFIG ####### -########################### - -if $CREDENTIALS_IN_K8S; then - export KUBECONFIG=$CREDENTIALS_KUBECONFIG - export PG_ROOT_PASSWORD=$(kubectl get secret $DB_CRED_SECRET --namespace $CREDENTIAL_NAMESPACE -o jsonpath="{.data.password}" | base64 --decode) -fi - -# Log files -mkdir -p $BACKUP_DIR/logs -LOG=$BACKUP_DIR/logs/log-$BACKUP_TIME.txt -echo \n\n\n >> $LOG -date | tee -a $LOG -echo \n >> $LOG -ps 2>&1 | tee -a $LOG - - -########################### -###### START PROCESS ###### -########################### - -# Port forward to postgresql statefulset on k8s cluster -if $K8S_PG; then - export KUBECONFIG=$PG_KUBECONFIG - kubectl port-forward statefulset/$PG_STATEFULSET_NAME -n $PG_NAMESPACE $PORT:$PG_STATEFULSET_PORT & - pidofkubectl=$! -fi - - -# run backup process -SCRIPTPATH=$(cd ${0%/*} && pwd -P) -$SCRIPTPATH/pg_backup_rotated.sh - -if $K8S_PG; then - kill -9 $pidofkubectl -fi - -########################### -###### SYNC WITH S3 ###### -########################### - -##### Copy backups to AWS S3 bucket ##### -echo "aws sync starting" | tee -a $LOG -$AWS_COMMAND s3 sync $BACKUP_DIR s3://$S3_BACKUP_BUCKET/postgresql --delete --profile $AWS_PROFILE 2>&1 | tee -a $LOG - -echo "aws sync success" |tee -a $LOG -date | tee -a $LOG -echo "Postgresql backups has synced successfully" | mail -s "Postgresql sync success" $NOTIFICATION_EMAIL_ADDRESS \ No newline at end of file diff --git a/pg_backup/pg_backup.config b/pg_backup/pg_backup.config deleted file mode 100644 index c686d25..0000000 --- a/pg_backup/pg_backup.config +++ /dev/null @@ -1,79 +0,0 @@ -############################## -## POSTGRESQL BACKUP CONFIG ## -############################## - -# Optional system user to run backups as. If the user the script is running as doesn't match this -# the script terminates. Leave blank to skip check. -BACKUP_USER= - -# Optional hostname to adhere to pg_hba policies. Will default to "localhost" if none specified. -HOSTNAME= - -# Optional username to connect to database as. Will default to "postgres" if none specified. -USERNAME= - -# This dir will be created if it doesn't exist. This must be writable by the user the script is -# running as. -BACKUP_DIR=~/db-backups/postgres/ - -# List of strings to match against in database name, separated by space or comma, for which we only -# wish to keep a backup of the schema, not the data. Any database names which contain any of these -# values will be considered candidates. (e.g. "system_log" will match "dev_system_log_2010-01") -SCHEMA_ONLY_LIST="" - -# Will produce a custom-format backup if set to "yes" -ENABLE_CUSTOM_BACKUPS=yes - -# Will produce a gzipped plain-format backup if set to "yes" -ENABLE_PLAIN_BACKUPS=no - -# Will produce gzipped sql file containing the cluster globals, like users and passwords, if set to "yes" -ENABLE_GLOBALS_BACKUPS=yes - - -# Optional if your mongodb is hosted on k8s set to true. Fill settings for -K8S_PG=false - -##### SETTINGS FOR AWS S3 #### - -# Will put backups to this bucket -S3_BACKUP_BUCKET=bucket_name - -# The aws profile having access to the bucket -AWS_PROFILE=your-profile-with-s3-access - -#### SETTINGS FOR CREDENTIALS #### - -# Will use k8s secrets to read db password from credentials secret in $CREDENTIALS_NAMESPACE -# Will read password field in $DB_CREDENTIALS_SECRET -CREDENTIALS_IN_K8S=false -DB_CRED_SECRET=PostgresPassword -CREDENTIALS_KUBECONFIG=~/.kube/config.credentials -CREDENTIALS_NAMESPACE=credentials - -# Otherwise set password explicitly here -PG_PASSWORD=postgres-password - -##### SETTINGS FOR KUBERNETES CLUSTER #### -# If K8S_PG set to false this configs will not be used. -PG_KUBECONFIG=~/.kube/config.postgres -PG_NAMESPACE=production -PG_STATEFULSET_NAME=postgres -PG_STATEFULSET_PORT=3306 - -#### SETTINGS FOR NOTIFICATION #### -NOTIFICATION_EMAIL_ADDRESS=alerts@example.com - - -#### SETTINGS FOR ROTATED BACKUPS #### - -# Which day to take the weekly backup from (1-7 = Monday-Sunday) -DAY_OF_WEEK_TO_KEEP=5 - -# Number of days to keep daily backups -DAYS_TO_KEEP=7 - -# How many weeks to keep weekly backups -WEEKS_TO_KEEP=5 - -###################################### diff --git a/pg_backup/pg_backup.sh b/pg_backup/pg_backup.sh deleted file mode 100755 index f53e30c..0000000 --- a/pg_backup/pg_backup.sh +++ /dev/null @@ -1,166 +0,0 @@ -#!/bin/bash - -########################### -####### LOAD CONFIG ####### -########################### - -while [ $# -gt 0 ]; do - case $1 in - -c) - if [ -r "$2" ]; then - source "$2" - shift 2 - else - ${ECHO} "Unreadable config file \"$2\"" 1>&2 - exit 1 - fi - ;; - *) - ${ECHO} "Unknown Option \"$1\"" 1>&2 - exit 2 - ;; - esac -done - -if [ $# = 0 ]; then - SCRIPTPATH=$(cd ${0%/*} && pwd -P) - source $SCRIPTPATH/pg_backup.config -fi; - -########################### -#### PRE-BACKUP CHECKS #### -########################### - -# Make sure we're running as the required backup user -if [ "$BACKUP_USER" != "" -a "$(id -un)" != "$BACKUP_USER" ]; then - echo "This script must be run as $BACKUP_USER. Exiting." 1>&2 - exit 1; -fi; - - -########################### -### INITIALISE DEFAULTS ### -########################### - -if [ ! $HOSTNAME ]; then - HOSTNAME="localhost" -fi; - -if [ ! $USERNAME ]; then - USERNAME="postgres" -fi; - - -########################### -#### START THE BACKUPS #### -########################### - - -FINAL_BACKUP_DIR=$BACKUP_DIR"`date +\%Y-\%m-\%d`/" - -echo "Making backup directory in $FINAL_BACKUP_DIR" - -if ! mkdir -p $FINAL_BACKUP_DIR; then - echo "Cannot create backup directory in $FINAL_BACKUP_DIR. Go and fix it!" 1>&2 - exit 1; -fi; - - -####################### -### GLOBALS BACKUPS ### -####################### - -echo -e "\n\nPerforming globals backup" -echo -e "--------------------------------------------\n" - -if [ $ENABLE_GLOBALS_BACKUPS = "yes" ] -then - echo "Globals backup" - - set -o pipefail - if ! pg_dumpall -g -h "$HOSTNAME" -U "$USERNAME" | gzip > $FINAL_BACKUP_DIR"globals".sql.gz.in_progress; then - echo "[!!ERROR!!] Failed to produce globals backup" 1>&2 - else - mv $FINAL_BACKUP_DIR"globals".sql.gz.in_progress $FINAL_BACKUP_DIR"globals".sql.gz - fi - set +o pipefail -else - echo "None" -fi - - -########################### -### SCHEMA-ONLY BACKUPS ### -########################### - -for SCHEMA_ONLY_DB in ${SCHEMA_ONLY_LIST//,/ } -do - SCHEMA_ONLY_CLAUSE="$SCHEMA_ONLY_CLAUSE or datname ~ '$SCHEMA_ONLY_DB'" -done - -SCHEMA_ONLY_QUERY="select datname from pg_database where false $SCHEMA_ONLY_CLAUSE order by datname;" - -echo -e "\n\nPerforming schema-only backups" -echo -e "--------------------------------------------\n" - -SCHEMA_ONLY_DB_LIST=`psql -h "$HOSTNAME" -U "$USERNAME" -At -c "$SCHEMA_ONLY_QUERY" postgres` - -echo -e "The following databases were matched for schema-only backup:\n${SCHEMA_ONLY_DB_LIST}\n" - -for DATABASE in $SCHEMA_ONLY_DB_LIST -do - echo "Schema-only backup of $DATABASE" - - set -o pipefail - if ! pg_dump -Fp -s -h "$HOSTNAME" -U "$USERNAME" "$DATABASE" | gzip > $FINAL_BACKUP_DIR"$DATABASE"_SCHEMA.sql.gz.in_progress; then - echo "[!!ERROR!!] Failed to backup database schema of $DATABASE" 1>&2 - else - mv $FINAL_BACKUP_DIR"$DATABASE"_SCHEMA.sql.gz.in_progress $FINAL_BACKUP_DIR"$DATABASE"_SCHEMA.sql.gz - fi - set +o pipefail -done - - -########################### -###### FULL BACKUPS ####### -########################### - -for SCHEMA_ONLY_DB in ${SCHEMA_ONLY_LIST//,/ } -do - EXCLUDE_SCHEMA_ONLY_CLAUSE="$EXCLUDE_SCHEMA_ONLY_CLAUSE and datname !~ '$SCHEMA_ONLY_DB'" -done - -FULL_BACKUP_QUERY="select datname from pg_database where not datistemplate and datallowconn $EXCLUDE_SCHEMA_ONLY_CLAUSE order by datname;" - -echo -e "\n\nPerforming full backups" -echo -e "--------------------------------------------\n" - -for DATABASE in `psql -h "$HOSTNAME" -U "$USERNAME" -At -c "$FULL_BACKUP_QUERY" postgres` -do - if [ $ENABLE_PLAIN_BACKUPS = "yes" ] - then - echo "Plain backup of $DATABASE" - - set -o pipefail - if ! pg_dump -Fp -h "$HOSTNAME" -U "$USERNAME" "$DATABASE" | gzip > $FINAL_BACKUP_DIR"$DATABASE".sql.gz.in_progress; then - echo "[!!ERROR!!] Failed to produce plain backup database $DATABASE" 1>&2 - else - mv $FINAL_BACKUP_DIR"$DATABASE".sql.gz.in_progress $FINAL_BACKUP_DIR"$DATABASE".sql.gz - fi - set +o pipefail - fi - - if [ $ENABLE_CUSTOM_BACKUPS = "yes" ] - then - echo "Custom backup of $DATABASE" - - if ! pg_dump -Fc -h "$HOSTNAME" -U "$USERNAME" "$DATABASE" -f $FINAL_BACKUP_DIR"$DATABASE".custom.in_progress; then - echo "[!!ERROR!!] Failed to produce custom backup database $DATABASE" 1>&2 - else - mv $FINAL_BACKUP_DIR"$DATABASE".custom.in_progress $FINAL_BACKUP_DIR"$DATABASE".custom - fi - fi - -done - -echo -e "\nAll database backups complete!" diff --git a/pg_backup/pg_backup_rotated.sh b/pg_backup/pg_backup_rotated.sh deleted file mode 100755 index 861c5c4..0000000 --- a/pg_backup/pg_backup_rotated.sh +++ /dev/null @@ -1,206 +0,0 @@ -#!/bin/bash - -########################### -####### LOAD CONFIG ####### -########################### - -while [ $# -gt 0 ]; do - case $1 in - -c) - CONFIG_FILE_PATH="$2" - shift 2 - ;; - *) - ${ECHO} "Unknown Option \"$1\"" 1>&2 - exit 2 - ;; - esac -done - -if [ -z $CONFIG_FILE_PATH ] ; then - SCRIPTPATH=$(cd ${0%/*} && pwd -P) - CONFIG_FILE_PATH="${SCRIPTPATH}/pg_backup.config" -fi - -if [ ! -r ${CONFIG_FILE_PATH} ] ; then - echo "Could not load config file from ${CONFIG_FILE_PATH}" 1>&2 - exit 1 -fi - -source "${CONFIG_FILE_PATH}" - -########################### -#### PRE-BACKUP CHECKS #### -########################### - -# Make sure we're running as the required backup user -if [ "$BACKUP_USER" != "" -a "$(id -un)" != "$BACKUP_USER" ] ; then - echo "This script must be run as $BACKUP_USER. Exiting." 1>&2 - exit 1 -fi - - -########################### -### INITIALISE DEFAULTS ### -########################### - -if [ ! $HOSTNAME ]; then - HOSTNAME="localhost" -fi; - -if [ ! $USERNAME ]; then - USERNAME="postgres" -fi; - - -########################### -#### START THE BACKUPS #### -########################### - -function perform_backups() -{ - SUFFIX=$1 - FINAL_BACKUP_DIR=$BACKUP_DIR"`date +\%Y-\%m-\%d`$SUFFIX/" - - echo "Making backup directory in $FINAL_BACKUP_DIR" - - if ! mkdir -p $FINAL_BACKUP_DIR; then - echo "Cannot create backup directory in $FINAL_BACKUP_DIR. Go and fix it!" 1>&2 - exit 1; - fi; - - ####################### - ### GLOBALS BACKUPS ### - ####################### - - echo -e "\n\nPerforming globals backup" - echo -e "--------------------------------------------\n" - - if [ $ENABLE_GLOBALS_BACKUPS = "yes" ] - then - echo "Globals backup" - - set -o pipefail - if ! pg_dumpall -g -h "$HOSTNAME" -U "$USERNAME" | gzip > $FINAL_BACKUP_DIR"globals".sql.gz.in_progress; then - echo "[!!ERROR!!] Failed to produce globals backup" 1>&2 - else - mv $FINAL_BACKUP_DIR"globals".sql.gz.in_progress $FINAL_BACKUP_DIR"globals".sql.gz - fi - set +o pipefail - else - echo "None" - fi - - - ########################### - ### SCHEMA-ONLY BACKUPS ### - ########################### - - for SCHEMA_ONLY_DB in ${SCHEMA_ONLY_LIST//,/ } - do - SCHEMA_ONLY_CLAUSE="$SCHEMA_ONLY_CLAUSE or datname ~ '$SCHEMA_ONLY_DB'" - done - - SCHEMA_ONLY_QUERY="select datname from pg_database where false $SCHEMA_ONLY_CLAUSE order by datname;" - - echo -e "\n\nPerforming schema-only backups" - echo -e "--------------------------------------------\n" - - SCHEMA_ONLY_DB_LIST=`psql -h "$HOSTNAME" -U "$USERNAME" -At -c "$SCHEMA_ONLY_QUERY" postgres` - - echo -e "The following databases were matched for schema-only backup:\n${SCHEMA_ONLY_DB_LIST}\n" - - for DATABASE in $SCHEMA_ONLY_DB_LIST - do - echo "Schema-only backup of $DATABASE" - set -o pipefail - if ! pg_dump -Fp -s -h "$HOSTNAME" -U "$USERNAME" "$DATABASE" | gzip > $FINAL_BACKUP_DIR"$DATABASE"_SCHEMA.sql.gz.in_progress; then - echo "[!!ERROR!!] Failed to backup database schema of $DATABASE" 1>&2 - else - mv $FINAL_BACKUP_DIR"$DATABASE"_SCHEMA.sql.gz.in_progress $FINAL_BACKUP_DIR"$DATABASE"_SCHEMA.sql.gz - fi - set +o pipefail - done - - - ########################### - ###### FULL BACKUPS ####### - ########################### - - for SCHEMA_ONLY_DB in ${SCHEMA_ONLY_LIST//,/ } - do - EXCLUDE_SCHEMA_ONLY_CLAUSE="$EXCLUDE_SCHEMA_ONLY_CLAUSE and datname !~ '$SCHEMA_ONLY_DB'" - done - - FULL_BACKUP_QUERY="select datname from pg_database where not datistemplate and datallowconn $EXCLUDE_SCHEMA_ONLY_CLAUSE order by datname;" - - echo -e "\n\nPerforming full backups" - echo -e "--------------------------------------------\n" - - for DATABASE in `psql -h "$HOSTNAME" -U "$USERNAME" -At -c "$FULL_BACKUP_QUERY" postgres` - do - if [ $ENABLE_PLAIN_BACKUPS = "yes" ] - then - echo "Plain backup of $DATABASE" - - set -o pipefail - if ! pg_dump -Fp -h "$HOSTNAME" -U "$USERNAME" "$DATABASE" | gzip > $FINAL_BACKUP_DIR"$DATABASE".sql.gz.in_progress; then - echo "[!!ERROR!!] Failed to produce plain backup database $DATABASE" 1>&2 - else - mv $FINAL_BACKUP_DIR"$DATABASE".sql.gz.in_progress $FINAL_BACKUP_DIR"$DATABASE".sql.gz - fi - set +o pipefail - - fi - - if [ $ENABLE_CUSTOM_BACKUPS = "yes" ] - then - echo "Custom backup of $DATABASE" - - if ! pg_dump -Fc -h "$HOSTNAME" -U "$USERNAME" "$DATABASE" -f $FINAL_BACKUP_DIR"$DATABASE".custom.in_progress; then - echo "[!!ERROR!!] Failed to produce custom backup database $DATABASE" - else - mv $FINAL_BACKUP_DIR"$DATABASE".custom.in_progress $FINAL_BACKUP_DIR"$DATABASE".custom - fi - fi - - done - - echo -e "\nAll database backups complete!" -} - -# MONTHLY BACKUPS - -DAY_OF_MONTH=`date +%d` - -if [ $DAY_OF_MONTH -eq 1 ]; -then - # Delete all expired monthly directories - find $BACKUP_DIR -maxdepth 1 -name "*-monthly" -exec rm -rf '{}' ';' - - perform_backups "-monthly" - - exit 0; -fi - -# WEEKLY BACKUPS - -DAY_OF_WEEK=`date +%u` #1-7 (Monday-Sunday) -EXPIRED_DAYS=`expr $((($WEEKS_TO_KEEP * 7) + 1))` - -if [ $DAY_OF_WEEK = $DAY_OF_WEEK_TO_KEEP ]; -then - # Delete all expired weekly directories - find $BACKUP_DIR -maxdepth 1 -mtime +$EXPIRED_DAYS -name "*-weekly" -exec rm -rf '{}' ';' - - perform_backups "-weekly" - - exit 0; -fi - -# DAILY BACKUPS - -# Delete daily backups 7 days old or more -find $BACKUP_DIR -maxdepth 1 -mtime +$DAYS_TO_KEEP -name "*-daily" -exec rm -rf '{}' ';' - -perform_backups "-daily"