mongodb_backup2s3.erb
5.04 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
#!/bin/bash
#
# This file was generated by CHEF. Changes will be overwritten!
# Backup script for MongoDB in AWS EC2 instances.
#
# If this mongod instance is primary, then backup the
# designated database/s and upload it to S3. Encrypt first if needed.
#
# Depends on:
# MongoDB
# AWSCLI
# OpenSSL
set -e
suffix=.mongodb_backup2s3
if [ -f /tmp/*"$suffix" ] ; then
( >&2 echo "[ERROR] Another operation might still be in progress" )
exit 200
fi
tmp_file=$( mktemp --suffix "$suffix" )
bin_aws=<%= @bin_aws %>
bin_mongo=<%= @bin_mongo %>
bin_mongodump=<%= @bin_mongodump %>
bin_openssl=<%= @bin_openssl %>
db_host=<%= @db_host %>
db_port=<%= @db_port %>
db_user='<%= @backup_user %>'
db_pass='<%= @backup_pass %>'
db_auth=<%= @backup_auth %>
s3_bucket=<%= @s3_bucket %>
s3_region=<%= @s3_region %>
pub_key=<%= @pub_key %>
<% bak_dir = "#{Chef::Config[:file_cache_path]}/mongodb_backup2s3" -%>
bak_dir=<%= bak_dir %>
# Perform the actual mongodump
# Args:
# $1 = db name
# $2 = backup dump filename, e.g. 'mydb'
export_db() {
echo "$(date) : Export database ${1} to ${bak_dir}."
rm -f "${bak_dir}/${2}.gz"
"$bin_mongodump" --host="${db_host}:${db_port}" \
-u "$db_user" -p "$db_pass" --authenticationDatabase="${db_auth}" \
--dumpDbUsersAndRoles --gzip -d "${1}" --archive="${bak_dir}/${1}.gz"
}
# Encrypt the backup file with OpenSSL
# Args:
# $1 = compressed dump filename, e.g. 'mydb.gz'
encrypt_backup() {
echo "$(date) : Encrypt file ${1}."
rm -f "${bak_dir}/${1}.enc"
"$bin_openssl" smime -encrypt -binary -text -aes256 \
-in "${bak_dir}/${1}" -out "${bak_dir}/${1}.enc" \
-outform DER "$pub_key"
rm "${bak_dir}/${1}"
}
# Rotate the current backups in S3
# Args:
# $1 = backup dump filename, e.g. 'mydb'
# $2 = max number of backup files to store at a time
rotate_backup() {
folder=$1
max=$2
# Backups will stored inside subfolders (prefixes)
# in S3, so only look at 'PRE' objects.
baks=$( "$bin_aws" --output text --region "$s3_region" \
s3 ls "s3://${s3_bucket}/" | grep '^\s*PRE' | \
sed -e 's/^ *PRE //' -e 's/\/$//' | \
grep "^${folder}" || echo "" )
echo "$(date) : Backup rotation for ${folder}."
start=$((max - 1))
for (( x=start ; x > 0 ; x-- )) ; do
if echo "$baks" | grep "^${folder}\\.${x}\$" ; then
newx=$((x + 1))
if [[ $newx -lt $max ]] ; then
"$bin_aws" --region "$s3_region" \
s3 mv --recursive "s3://${s3_bucket}/${folder}.${x}" \
"s3://${s3_bucket}/${folder}.${newx}"
else
"$bin_aws" --region "$s3_region" \
s3 rm --recursive "s3://${s3_bucket}/${folder}.${x}"
fi
fi
done
if echo "$baks" | grep "^${folder}\$" ; then
if [[ $max -gt 1 ]] ; then
"$bin_aws" --region "$s3_region" \
s3 mv --recursive "s3://${s3_bucket}/${folder}" \
"s3://${s3_bucket}/${folder}.1"
else
"$bin_aws" --region "$s3_region" \
s3 rm --recursive "s3://${s3_bucket}/${folder}"
fi
fi
}
# Upload the compressed db backup file. It will be uploaded
# to this example location: ${s3_bucket}/dump_filename/dump_filename.gz.enc
#
# A timestamp file will also be uploaded to this location:
# ${s3_bucket}/dump_filename/YYYY-MM-DDThh:mm:ss.txt
# Args:
# $1 = backup dump filename, e.g. 'mydb'
# $2 = if file is encrypted or not (boolean)
upload_to_s3() {
keyname=$1
if [ "$2" = true ] ; then
fname="${keyname}.gz.enc"
else
fname="${keyname}.gz"
fi
echo "$(date) : Upload ${fname} to S3 bucket ${s3_bucket}."
stamp=$( date +"%FT%T" )
echo "Uploaded: ${stamp}" > "${bak_dir}/${stamp}.txt"
"$bin_aws" --region "$s3_region" \
s3 mv "${bak_dir}/${fname}" \
"s3://${s3_bucket}/${keyname}/${fname}"
"$bin_aws" --region "$s3_region" \
s3 mv "${bak_dir}/${stamp}.txt" \
"s3://${s3_bucket}/${keyname}/${stamp}.txt"
}
## Do the backup only if this node is the primary:
if "$bin_mongo" --host="${db_host}:${db_port}" -u "$db_user" \
-p "$db_pass" --authenticationDatabase="${db_auth}" \
--eval 'db.isMaster()["ismaster"]' | grep -q true ; then
if [[ ! -d "$bak_dir" ]] ; then
mkdir -p "$bak_dir"
fi
<% @db_map.each do |x| -%>
<%
if x.is_a?(Array)
db_name = x[0]
x = x[1]
else
db_name = x[:db_name]
end
do_backup = x.has_key?(:backup) ? x[:backup] : true
is_enc = x.has_key?(:bak_encrypted) ? x[:bak_encrypted] : false
bak_filename = x[:bak_filename] || db_name
bak_maxcopies = x[:bak_maxcopies] || 30
-%>
<% if do_backup -%>
# Database: <%= db_name %>
export_db <%= db_name %> <%= bak_filename %>
<% if is_enc -%>
encrypt_backup <%= bak_filename %>.gz
<% end -%>
rotate_backup <%= bak_filename %> <%= bak_maxcopies %>
upload_to_s3 <%= bak_filename %> <%= is_enc %>
<% end -%>
<% end -%>
echo "$(date) : Done."
fi
rm "$tmp_file"