-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathauthoringBackup
97 lines (78 loc) · 3.25 KB
/
authoringBackup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
#!/bin/bash
# Script to backup Adapt Authoring tool
# TODO should have check for disk space available and set a threshold
# Stop script on error
set -o errexit
# Stop script using undeclared variables
set -o nounset
# enter configuration values
# mongo database to be backed up
MONGO_DATABASE="adapt-tenant-db"
# app name, used for file naming so make it relevant for identification
APP_NAME="backupTest"
# mongo server and port
MONGO_HOST="127.0.0.1"
MONGO_PORT="27017"
MONGO_USERNAME=""
MONGO_PASSWORD=""
MONGO_AUTHDB="admin"
# specify where you want the mongodump and backup files to be saved until upload to S3
BACKUPS_DIR="/Users/danielgray/backups/$APP_NAME"
# the full path to the app you are backing up
APP_DIR="/Users/danielgray/websiteTest/adapt_authoring"
# Amazon S3 bucket name
S3_BUCKET="mybucket"
# Minimum disk space as a %
MINCAPACITY="75"
TIMESTAMP=$(date "+%Y-%m-%d")
DB_BACKUP_NAME="$APP_NAME-$MONGO_DATABASE-$TIMESTAMP"
APP_BACKUP_NAME="$APP_NAME-$TIMESTAMP"
function print_task
{
printf "\n==> ${1}\n"
}
function print_header
{
echo "-------------------------------------------------------------------------------"
printf "\n ${1}\n\n"
echo "-------------------------------------------------------------------------------"
}
print_header "Backing up database ($MONGO_DATABASE)"
print_task "Backup destination ($BACKUPS_DIR)"
# Check if we have some disk space
if [ $(df -P "$BACKUPS_DIR" | awk '{ gsub("%",""); capacity = $5 }; END { print capacity }') -gt "$MINCAPACITY" ]; then
print_task "Backup failed. Not enough disk space. ($TIMESTAMP)"
else
# mongo admin --eval "printjson(db.fsyncLock())"
# $MONGODUMP_PATH -h $MONGO_HOST:$MONGO_PORT -d $MONGO_DATABASE
# consider using use OPLOG --oplog
mongodump -d "$MONGO_DATABASE" -o "$BACKUPS_DIR" --host="$MONGO_HOST" --port="$MONGO_PORT" --authenticationDatabase="$MONGO_AUTHDB" --username="$MONGO_USERNAME" --password="$MONGO_PASSWORD"
# mongo admin --eval "printjson(db.fsyncUnlock())"
# might want to suppress tar errors --ignore-failed-read
# compress database files
tar -zcf "$BACKUPS_DIR/$DB_BACKUP_NAME.tgz" "$BACKUPS_DIR/$MONGO_DATABASE"
# compress the application files
# filtered files, use shell syntax, or globbing not regex.
# TODO test restore without all node_modules removed
# TODO add flag to ignore changed files, see http://stackoverflow.com/questions/20318852/tar-file-changed-as-we-read-it
# Also need to add if statement arount aws cp command
tar -zcf "$BACKUPS_DIR/$APP_BACKUP_NAME.tgz" --exclude='node_modules' --exclude='*.zip' --exclude='exports' "$APP_DIR"
# Copy application and database to Amazon S3
# needs check for files existence
aws s3 cp --quiet "$BACKUPS_DIR/$DB_BACKUP_NAME.tgz" "s3://$S3_BUCKET/"
aws s3 cp --quiet "$BACKUPS_DIR/$APP_BACKUP_NAME.tgz" "s3://$S3_BUCKET/"
# remove local files
if [ -f "$BACKUPS_DIR/$APP_BACKUP_NAME.tgz" ]; then
rm "$BACKUPS_DIR/$APP_BACKUP_NAME.tgz"
print_task "App backup removed"
else
print_task "App backup file not found and not removed"
fi
if [ -f "$BACKUPS_DIR/$DB_BACKUP_NAME.tgz" ]; then
rm "$BACKUPS_DIR/$DB_BACKUP_NAME.tgz"
print_task "Database backup removed"
else
print_task "Database backup file not found and not removed"
fi
print_task "Backup finished ($TIMESTAMP)"
fi