backup_db_to_s3.erb
3.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
#!/bin/bash
# Generated by Chef.
#
# Perform mysqldump on databases and upload the
# resulting backup files into an S3 bucket.
set -e
<% bak_dir = "#{Chef::Config[:file_cache_path]}/backup_db_to_s3" -%>
bak_dir=<%= bak_dir %>
db_host=<%= @db_ip %>
db_port=<%= @db_port %>
bucket=<%= @s3_bucket %>
region=<%= @s3_region %>
aws_bin=<%= @aws_bin %>
mysqldump_bin=<%= @mysqldump_bin %>
log_dir=/var/log/backup_db_to_s3
if [[ ! -d "$log_dir" ]] ; then
mkdir -p "$log_dir"
fi
exec 3>&1 4>&2
trap 'exec 2>&4 1>&3' 0 1 2 3
exec 1>>"${log_dir}/backup_db_to_s3.log" 2>&1
if [[ ! -d "$bak_dir" ]] ; then
echo "$(date) : Create backup directory."
mkdir -p "$bak_dir"
fi
# Perform mysqldump on a database.
# Args:
# $1 = db name
# $2 = db user
# $3 = db password
# $4 = dump file filename, e.g. 'mydb.sql'
export_db() {
echo "$(date) : Export database ${1}."
"$mysqldump_bin" -h "$db_host" -P "$db_port" -C --opt \
--no-create-db --single-transaction --lock-tables \
-u "$2" -p"$3" "$1" > "${bak_dir}/${4}"
}
# Compress the backup file with gzip.
# Args:
# $1 = dump file filename, e.g. 'mydb.sql'
compress_backup_file() {
echo "$(date) : Gzip file ${1}."
gzip "${bak_dir}/${1}"
}
# Rotate the current backups in S3.
# Args:
# $1 = dump file filename, e.g. 'mydb.sql'
# $2 = max number of backup files to store at a time
increment_backup_names() {
bak_keyname="${1}.gz"
max_backups=$2
baks=$( "$aws_bin" --output text --region "$region" \
s3api list-objects --bucket "$bucket" \
| grep '^CONTENTS' | cut -f3 | grep "^${bak_keyname}" || echo "" )
echo "$(date) : Backup rotation for ${bak_keyname}."
start=$((max_backups - 1))
for (( x=start ; x > 0 ; x-- )) ; do
if echo "$baks" | grep "^${bak_keyname}\\.${x}\$" ; then
newx=$((x + 1))
if [[ $newx -lt $max_backups ]] ; then
"$aws_bin" --region "$region" \
s3 cp "s3://${bucket}/${bak_keyname}.${x}" \
"s3://${bucket}/${bak_keyname}.${newx}"
fi
fi
done
if echo "$baks" | grep "^${bak_keyname}\$" ; then
"$aws_bin" --region "$region" \
s3 cp "s3://${bucket}/${bak_keyname}" \
"s3://${bucket}/${bak_keyname}.1"
fi
}
# Upload the compressed db backup file.
# Args:
# $1 = dump file filename, e.g. 'mydb.sql'
upload_to_s3() {
echo "$(date) : Upload ${1}.gz to S3 bucket ${bucket}."
"$aws_bin" --region "$region" \
s3 mv "${bak_dir}/${1}.gz" "s3://${bucket}/${1}.gz"
}
# First, perform mysqldump on each database.
<% @db_map.each do |db| -%>
<% if db.is_a?(Array) -%>
<% db_name = db[0] -%>
<% db = db[1] -%>
<% else -%>
<% db_name = db[:db_name] -%>
<% end -%>
export_db <%= db_name %> <%= db[:db_user] %> '<%= db[:db_pass] %>' <%= db[:bak_filename] %>
<% end -%>
# Then compress and upload the backup files one by one.
<% @db_map.each do |db| -%>
<% if db.is_a?(Array) then db = db[1] end -%>
compress_backup_file <%= db[:bak_filename] %>
increment_backup_names <%= db[:bak_filename] %> <%= db[:bak_maxcopies] %>
upload_to_s3 <%= db[:bak_filename] %>
<% end -%>
echo "$(date) : Done."