forked from thecity/rds-s3-backup
-
Notifications
You must be signed in to change notification settings - Fork 0
/
rds-s3-backup.rb
executable file
·170 lines (144 loc) · 6.46 KB
/
rds-s3-backup.rb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
#!/usr/bin/ruby
require 'rubygems'
require 'thor'
require 'cocaine'
require 'fog'
require 'logger'
# Fog defaults to 600 second timeout, but this isn't long enough. See:
# #68268570.
Fog.timeout = 7200
class RdsS3Backup < Thor
desc "s3_dump", "Runs a mysqldump from a restored snapshot of the specified RDS instance, and uploads the dump to S3"
method_option :rds_instance_id
method_option :s3_bucket
method_option :s3_prefix, :default => 'db_dumps'
method_option :aws_access_key_id
method_option :aws_secret_access_key
method_option :mysql_database
method_option :mysql_username
method_option :mysql_password
method_option :dump_ttl, :default => 0, :desc => "Number of old dumps to keep.", :type => :numeric
method_option :dump_directory, :default => '/mnt/', :desc => "Where to store the temporary sql dump file."
method_option :config_file, :desc => "YAML file of defaults for any option. Options given during execution override these."
method_option :aws_region, :default => "us-east-1", :desc => "Region of your RDS server (and S3 storage, unless aws-s3-region is specified)."
method_option :aws_s3_region, :desc => "Region to store your S3 dumpfiles, if different from the RDS region"
def s3_dump
my_options = build_configuration(options)
rds = Fog::AWS::RDS.new(:aws_access_key_id => my_options[:aws_access_key_id],
:aws_secret_access_key => my_options[:aws_secret_access_key],
:region => my_options[:aws_region])
rds_server = rds.servers.get(my_options[:rds_instance_id])
s3 = Fog::Storage.new(:provider => 'AWS',
:aws_access_key_id => my_options[:aws_access_key_id],
:aws_secret_access_key => my_options[:aws_secret_access_key],
:region => my_options[:aws_s3_region] || my_options[:aws_region],
:scheme => 'https')
s3_bucket = s3.directories.get(my_options[:s3_bucket])
snap_timestamp = Time.now.strftime('%Y-%m-%d-%H-%M-%S-%Z')
snap_name = "s3-dump-snap-#{snap_timestamp}"
backup_server_id = "#{rds_server.id}-s3-dump-server"
backup_file_name = "#{rds_server.id}-mysqldump-#{snap_timestamp}.sql.gz"
backup_file_filepath = File.join(my_options[:dump_directory], backup_file_name)
rds_server.snapshots.new(:id => snap_name).save
new_snap = rds_server.snapshots.get(snap_name)
# these double wait_fors look weird, but I've had ready? lie to me.
new_snap.wait_for { ready? }
new_snap.wait_for { ready? }
rds.restore_db_instance_from_db_snapshot(new_snap.id, backup_server_id)
backup_server = rds.servers.get(backup_server_id)
backup_server.wait_for { ready? }
backup_server.wait_for { ready? }
backup_server.modify(true, 'AutoMinorVersionUpgrade' => false, 'BackupRetentionPeriod' => 0)
sleep(10)
backup_server.wait_for { ready? }
backup_server.wait_for { ready? }
mysqldump_command = Cocaine::CommandLine.new('mysqldump',
'--opt --quick -h :host_address -u :mysql_username --password=:mysql_password :mysql_database | gzip -9 > :backup_filepath',
:host_address => backup_server.endpoint['Address'],
:mysql_username => my_options[:mysql_username],
:mysql_password => my_options[:mysql_password],
:mysql_database => my_options[:mysql_database],
:backup_filepath => backup_file_filepath,
:logger => Logger.new(STDOUT))
begin
mysqldump_command.run
rescue Cocaine::ExitStatusError, Cocaine::CommandNotFoundError => e
puts "Dump failed with error #{e.message}"
cleanup(new_snap, backup_server, backup_file_filepath)
exit(1)
end
tries = 1
saved_dump = begin
s3_bucket.files.new(:key => File.join(my_options[:s3_prefix], backup_file_name),
:body => File.open(backup_file_filepath),
:acl => 'private',
:content_type => 'application/x-gzip'
).save
rescue Exception => e
puts "An error of type #{e.class} happened."
puts "Error during processing: #{$!}"
puts "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
if tries < 3
puts "Retrying S3 upload after #{tries} tries"
tries += 1
retry
else
puts "Trapped exception #{e} on try #{tries}"
false
end
end
if saved_dump
exit_code = 0
if my_options[:dump_ttl] > 0
prune_dumpfiles(s3_bucket, File.join(my_options[:s3_prefix], "#{rds_server.id}-mysqldump-"), my_options[:dump_ttl])
end
else
exit_code = 1
# We will exit cleanly because the script didn't error out, but
# by writing to stderr, email will be sent notifying that the upload failed.
$stderr.puts "S3 upload failed! Tried #{tries} times."
end
cleanup(new_snap, backup_server, backup_file_filepath)
exit(exit_code)
end
no_tasks do
def build_configuration(thor_options)
merged_options = {}
begin
merged_options =
if options[:config_file]
options.merge(YAML.load(File.read(options[:config_file]))) {|key, cmdopt, cfgopt| cmdopt}
else
options
end
rescue Exception => e
puts "Unable to read specified configuration file #{options[:config_file]}. Reason given: #{e}"
exit(1)
end
reqd_options = %w(rds_instance_id s3_bucket aws_access_key_id aws_secret_access_key mysql_database mysql_username mysql_password)
nil_options = reqd_options.find_all{ |opt| merged_options[opt].nil?}
if nil_options.count > 0
puts "No value provided for required option(s) #{nil_options.join(' ')} in either config file or options."
exit(1)
end
merged_options
end
def cleanup(new_snap, backup_server, backup_file_filepath)
new_snap.wait_for { ready? }
new_snap.destroy
backup_server.wait_for { ready? }
backup_server.destroy(nil)
File.unlink(backup_file_filepath)
end
def prune_dumpfiles(s3_bucket, backup_file_prefix, dump_ttl)
my_files = s3_bucket.files.all('prefix' => backup_file_prefix)
if my_files.count > dump_ttl
files_by_date = my_files.sort {|x,y| x.last_modified <=> y.last_modified}
(files_by_date.count - dump_ttl).times do |i|
files_by_date[i].destroy
end
end
end
end
end
RdsS3Backup.start