Commit f42750b61ad4e22d787a19592892dd81b6dac49c
Committed by
Earth Ugat
1 parent
e835e351
Bump to v0.4.0.
- Install mariadb::client before mariadb::default. Backup script will now compress first before encrypting. - Modify the way the backup script is called. Optimize cookbook a bit. Modify reload_from_s3 recipe to account for new compression-then-encryption order.
Showing
9 changed files
with
66 additions
and
60 deletions
| @@ -8,16 +8,16 @@ driver: | @@ -8,16 +8,16 @@ driver: | ||
| 8 | subnet_id: subnet-d530d8b1 | 8 | subnet_id: subnet-d530d8b1 |
| 9 | instance_type: t2.micro | 9 | instance_type: t2.micro |
| 10 | associate_public_ip: true | 10 | associate_public_ip: true |
| 11 | - require_chef_omnibus: true | 11 | + require_chef_omnibus: 12.12.15 |
| 12 | shared_credentials_profile: earth | 12 | shared_credentials_profile: earth |
| 13 | 13 | ||
| 14 | provisioner: | 14 | provisioner: |
| 15 | - name: chef_solo | 15 | + name: chef_zero |
| 16 | 16 | ||
| 17 | platforms: | 17 | platforms: |
| 18 | - name: ubuntu-14.04 | 18 | - name: ubuntu-14.04 |
| 19 | driver: | 19 | driver: |
| 20 | - image_id: ami-50946030 | 20 | + image_id: ami-d732f0b7 |
| 21 | transport: | 21 | transport: |
| 22 | username: ubuntu | 22 | username: ubuntu |
| 23 | ssh_key: ~/.ssh/cfe_stg_20160222.pem | 23 | ssh_key: ~/.ssh/cfe_stg_20160222.pem |
| 1 | +## 0.4.0 | ||
| 2 | +### Fixed | ||
| 3 | +- Install `mariadb::client` first, before doing `mariadb::default`. Otherswise, it might error out. | ||
| 4 | +- Fixed recipes to account for the above change, including `reload_from_s3`. | ||
| 5 | +- Backup script now properly compresses the file before encrypting it. | ||
| 6 | + | ||
| 7 | +### Changed | ||
| 8 | +- Optimized certain variables and recipt for better understandability. Some variables are now incompatible with previous versions. | ||
| 9 | + | ||
| 1 | # 0.3.1 | 10 | # 0.3.1 |
| 2 | 11 | ||
| 3 | Use encoding utf8 and collation utf8_general_ci as defaults when creating databases | 12 | Use encoding utf8 and collation utf8_general_ci as defaults when creating databases |
| @@ -8,6 +8,9 @@ The server is assumed to be using an IAM role with S3 bucket read/write access, | @@ -8,6 +8,9 @@ The server is assumed to be using an IAM role with S3 bucket read/write access, | ||
| 8 | 8 | ||
| 9 | When encryption is enabled for DB backups, the private and public keys are shared across all databases in the `db_map` attribute. Encryption is enabled separately for each individual database (see Usage below). | 9 | When encryption is enabled for DB backups, the private and public keys are shared across all databases in the `db_map` attribute. Encryption is enabled separately for each individual database (see Usage below). |
| 10 | 10 | ||
| 11 | + | ||
| 12 | +Note that enabling encryption can cause the backup process to take a very long time depending on how large the database is. That said, it is still very much recommended to do so. | ||
| 13 | + | ||
| 11 | ## Supported Platforms | 14 | ## Supported Platforms |
| 12 | 15 | ||
| 13 | Ubuntu 14.04 | 16 | Ubuntu 14.04 |
| @@ -47,17 +47,15 @@ else | @@ -47,17 +47,15 @@ else | ||
| 47 | default['cfe-mariadb']['backup']['aws_bin'] = '/usr/local/bin/aws' | 47 | default['cfe-mariadb']['backup']['aws_bin'] = '/usr/local/bin/aws' |
| 48 | default['cfe-mariadb']['backup']['mysqldump_bin'] = '/usr/bin/mysqldump' | 48 | default['cfe-mariadb']['backup']['mysqldump_bin'] = '/usr/bin/mysqldump' |
| 49 | end | 49 | end |
| 50 | + | ||
| 50 | # Path to directory where the backup script should be placed. | 51 | # Path to directory where the backup script should be placed. |
| 51 | # Uncomment to set custom locations. | 52 | # Uncomment to set custom locations. |
| 52 | #default['cfe-mariadb']['backup']['script_dir'] = ::File.join( | 53 | #default['cfe-mariadb']['backup']['script_dir'] = ::File.join( |
| 53 | # node['mariadb']['configuration']['path'], 'scripts' | 54 | # node['mariadb']['configuration']['path'], 'scripts' |
| 54 | #) | 55 | #) |
| 56 | +default['cfe-mariadb']['backup']['log_dir'] = '/var/log/backup_db_to_s3' | ||
| 55 | 57 | ||
| 56 | -default['cfe-mariadb']['backup']['cron']['min'] = '0' | ||
| 57 | -default['cfe-mariadb']['backup']['cron']['hour'] = '0' | ||
| 58 | -default['cfe-mariadb']['backup']['cron']['day'] = '*' | ||
| 59 | -default['cfe-mariadb']['backup']['cron']['mon'] = '*' | ||
| 60 | -default['cfe-mariadb']['backup']['cron']['wday'] = '*' | 58 | +default['cfe-mariadb']['backup']['cron']['sched'] = '0 0 * * *' |
| 61 | default['cfe-mariadb']['backup']['cron']['mailto'] = "''" # Empty single quotes | 59 | default['cfe-mariadb']['backup']['cron']['mailto'] = "''" # Empty single quotes |
| 62 | 60 | ||
| 63 | # Basic options for logrotate | 61 | # Basic options for logrotate |
| @@ -91,11 +89,6 @@ default['cfe-mariadb']['backup']['logrotate']['options'] = %w{ | @@ -91,11 +89,6 @@ default['cfe-mariadb']['backup']['logrotate']['options'] = %w{ | ||
| 91 | # | 89 | # |
| 92 | # The key file will be stored in the same directory | 90 | # The key file will be stored in the same directory |
| 93 | # as the script as 'pub.key'. | 91 | # as the script as 'pub.key'. |
| 94 | -# NOTE: | ||
| 95 | -# Enabling encryption will result in HUGE file sizes and, | ||
| 96 | -# depending on how large a database is, can take a LOT of time | ||
| 97 | -# during the backup process. That said, it is still recommended to | ||
| 98 | -# enforce encryption on DB backups. | ||
| 99 | default['cfe-mariadb']['encrypt']['priv_key'] = nil | 92 | default['cfe-mariadb']['encrypt']['priv_key'] = nil |
| 100 | default['cfe-mariadb']['encrypt']['pub_key'] = nil | 93 | default['cfe-mariadb']['encrypt']['pub_key'] = nil |
| 101 | 94 |
| @@ -4,7 +4,7 @@ maintainer_email 'sysadmin @ chromedia.com' | @@ -4,7 +4,7 @@ maintainer_email 'sysadmin @ chromedia.com' | ||
| 4 | license 'Apache License' | 4 | license 'Apache License' |
| 5 | description 'Simplifies setup of MariaDB in Chromedia.' | 5 | description 'Simplifies setup of MariaDB in Chromedia.' |
| 6 | long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) | 6 | long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) |
| 7 | -version '0.3.1' | 7 | +version '0.4.0' |
| 8 | 8 | ||
| 9 | { | 9 | { |
| 10 | 'mariadb' => '0.3.1', | 10 | 'mariadb' => '0.3.1', |
| @@ -50,6 +50,10 @@ file pub_key_file do | @@ -50,6 +50,10 @@ file pub_key_file do | ||
| 50 | only_if { md['encrypt']['pub_key'] } | 50 | only_if { md['encrypt']['pub_key'] } |
| 51 | end | 51 | end |
| 52 | 52 | ||
| 53 | +directory mdb['log_dir'] do | ||
| 54 | + recursive true | ||
| 55 | +end | ||
| 56 | + | ||
| 53 | template "#{mdb['script_dir']}/backup_db_to_s3" do | 57 | template "#{mdb['script_dir']}/backup_db_to_s3" do |
| 54 | variables( | 58 | variables( |
| 55 | :db_map => md['db_map'], | 59 | :db_map => md['db_map'], |
| @@ -66,13 +70,15 @@ end | @@ -66,13 +70,15 @@ end | ||
| 66 | # Chef built-in resource 'cron' seems to have a problem with | 70 | # Chef built-in resource 'cron' seems to have a problem with |
| 67 | # the syntax here. But they suggest using 'cron_d', instead, from | 71 | # the syntax here. But they suggest using 'cron_d', instead, from |
| 68 | # the cron cookbook (https://docs.chef.io/resource_cron.html). | 72 | # the cron cookbook (https://docs.chef.io/resource_cron.html). |
| 73 | +sched = mdbc['sched'].split(' ') | ||
| 69 | cron_d 'backup_db_to_s3' do | 74 | cron_d 'backup_db_to_s3' do |
| 70 | - command "bash #{mdb['script_dir']}/backup_db_to_s3" | ||
| 71 | - minute mdbc['min'] | ||
| 72 | - hour mdbc['hour'] | ||
| 73 | - day mdbc['day'] | ||
| 74 | - month mdbc['mon'] | ||
| 75 | - weekday mdbc['wday'] | 75 | + command "bash #{mdb['script_dir']}/backup_db_to_s3 "\ |
| 76 | + ">> #{mdb['log_dir']}/backup_db_to_s3.log 2>&1" | ||
| 77 | + minute sched[0] | ||
| 78 | + hour sched[1] | ||
| 79 | + day sched[2] | ||
| 80 | + month sched[3] | ||
| 81 | + weekday sched[4] | ||
| 76 | mailto mdbc['mailto'] | 82 | mailto mdbc['mailto'] |
| 77 | path '/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin' | 83 | path '/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin' |
| 78 | end | 84 | end |
| @@ -30,6 +30,8 @@ chef_gem 'chef-rewind' do | @@ -30,6 +30,8 @@ chef_gem 'chef-rewind' do | ||
| 30 | end | 30 | end |
| 31 | require 'chef/rewind' | 31 | require 'chef/rewind' |
| 32 | 32 | ||
| 33 | +# mysql2_chef_gem seems to want the client to be installed first. | ||
| 34 | +include_recipe 'mariadb::client' | ||
| 33 | include_recipe 'mariadb' | 35 | include_recipe 'mariadb' |
| 34 | 36 | ||
| 35 | # It looks like the service is sometimes not being restarted | 37 | # It looks like the service is sometimes not being restarted |
| @@ -49,11 +49,10 @@ node['cfe-mariadb']['db_map'].each do |dbx| | @@ -49,11 +49,10 @@ node['cfe-mariadb']['db_map'].each do |dbx| | ||
| 49 | dbx_name = dbx[:db_name] | 49 | dbx_name = dbx[:db_name] |
| 50 | end | 50 | end |
| 51 | 51 | ||
| 52 | - keyname = "#{dbx[:bak_filename]}#{dbx[:bak_encrypted] ? '.enc.gz' : '.gz'}" | ||
| 53 | - filegz = "#{tmp_dir}/#{keyname}" | ||
| 54 | - filesql = "#{tmp_dir}/#{dbx[:bak_filename]}" | 52 | + keyname = "#{dbx[:bak_filename]}.gz#{dbx[:bak_encrypted] ? '.enc' : ''}" |
| 53 | + filepath = "#{tmp_dir}/#{dbx[:bak_filename]}" | ||
| 55 | 54 | ||
| 56 | - awscli_s3_file filegz do | 55 | + awscli_s3_file "#{tmp_dir}/#{keyname}" do |
| 57 | region node['cfe-mariadb']['s3_region'] | 56 | region node['cfe-mariadb']['s3_region'] |
| 58 | bucket node['cfe-mariadb']['s3_bucket'] | 57 | bucket node['cfe-mariadb']['s3_bucket'] |
| 59 | key keyname | 58 | key keyname |
| @@ -64,30 +63,28 @@ node['cfe-mariadb']['db_map'].each do |dbx| | @@ -64,30 +63,28 @@ node['cfe-mariadb']['db_map'].each do |dbx| | ||
| 64 | end | 63 | end |
| 65 | end | 64 | end |
| 66 | 65 | ||
| 67 | - execute "unpack_#{filegz}" do | ||
| 68 | - command "gzip -d #{filegz}" | ||
| 69 | - end | ||
| 70 | - | ||
| 71 | - execute "decrypt_#{filesql}.enc" do | 66 | + execute "decrypt_#{filepath}.gz.enc" do |
| 72 | command "openssl smime -decrypt -binary -inkey #{priv_key_file} "\ | 67 | command "openssl smime -decrypt -binary -inkey #{priv_key_file} "\ |
| 73 | - "-in #{filesql}.enc -out #{filesql} -inform DEM" | ||
| 74 | - only_if { dbx[:bak_encrypted] } | ||
| 75 | - notifies :delete, "file[#{filesql}.enc]" | 68 | + "-in #{filepath}.gz.enc -out #{filepath}.gz -inform DEM" |
| 69 | + only_if { ::File.exist?("#{filepath}.gz.enc") } | ||
| 70 | + notifies :delete, "file[#{filepath}.gz.enc]" | ||
| 76 | end | 71 | end |
| 77 | 72 | ||
| 78 | - execute "reload_#{filesql}" do | 73 | + execute "gzip -d #{filepath}.gz" |
| 74 | + | ||
| 75 | + execute "reload_#{filepath}" do | ||
| 79 | command "mysql -h #{node['mariadb']['mysqld']['bind_address']} "\ | 76 | command "mysql -h #{node['mariadb']['mysqld']['bind_address']} "\ |
| 80 | "-P #{node['mariadb']['mysqld']['port']} -u #{dbx[:db_user]} "\ | 77 | "-P #{node['mariadb']['mysqld']['port']} -u #{dbx[:db_user]} "\ |
| 81 | - "-p'#{dbx[:db_pass]}' -D #{dbx_name} < #{filesql}" | ||
| 82 | - notifies :delete, "file[#{filesql}]" | 78 | + "-p'#{dbx[:db_pass]}' -D #{dbx_name} < #{filepath}" |
| 79 | + notifies :delete, "file[#{filepath}]" | ||
| 83 | sensitive true | 80 | sensitive true |
| 84 | end | 81 | end |
| 85 | 82 | ||
| 86 | - file "#{filesql}.enc" do | 83 | + file "#{filepath}.gz.enc" do |
| 87 | action :nothing | 84 | action :nothing |
| 88 | end | 85 | end |
| 89 | 86 | ||
| 90 | - file filesql do | 87 | + file filepath do |
| 91 | action :nothing | 88 | action :nothing |
| 92 | end | 89 | end |
| 93 | end | 90 | end |
| @@ -10,7 +10,10 @@ | @@ -10,7 +10,10 @@ | ||
| 10 | set -e | 10 | set -e |
| 11 | 11 | ||
| 12 | suffix=.backup_db_to_s3 | 12 | suffix=.backup_db_to_s3 |
| 13 | -[ -f /tmp/*"$suffix" ] && exit 200 | 13 | +if [ -f /tmp/*"$suffix" ] ; then |
| 14 | + ( >&2 echo "[ERROR] Another operation might still be in progress" ) | ||
| 15 | + exit 200 | ||
| 16 | +fi | ||
| 14 | tmp_file=$( mktemp --suffix "$suffix" ) | 17 | tmp_file=$( mktemp --suffix "$suffix" ) |
| 15 | 18 | ||
| 16 | <% bak_dir = "#{Chef::Config[:file_cache_path]}/backup_db_to_s3" -%> | 19 | <% bak_dir = "#{Chef::Config[:file_cache_path]}/backup_db_to_s3" -%> |
| @@ -24,15 +27,6 @@ aws_bin=<%= @aws_bin %> | @@ -24,15 +27,6 @@ aws_bin=<%= @aws_bin %> | ||
| 24 | mysqldump_bin=<%= @mysqldump_bin %> | 27 | mysqldump_bin=<%= @mysqldump_bin %> |
| 25 | pub_key_file=<%= @pub_key_file %> | 28 | pub_key_file=<%= @pub_key_file %> |
| 26 | 29 | ||
| 27 | -log_dir=/var/log/backup_db_to_s3 | ||
| 28 | -if [[ ! -d "$log_dir" ]] ; then | ||
| 29 | - mkdir -p "$log_dir" | ||
| 30 | -fi | ||
| 31 | - | ||
| 32 | -exec 3>&1 4>&2 | ||
| 33 | -trap 'exec 2>&4 1>&3' 0 1 2 3 | ||
| 34 | -exec 1>>"${log_dir}/backup_db_to_s3.log" 2>&1 | ||
| 35 | - | ||
| 36 | if [[ ! -d "$bak_dir" ]] ; then | 30 | if [[ ! -d "$bak_dir" ]] ; then |
| 37 | echo "$(date) : Create backup directory." | 31 | echo "$(date) : Create backup directory." |
| 38 | mkdir -p "$bak_dir" | 32 | mkdir -p "$bak_dir" |
| @@ -55,7 +49,7 @@ export_db() { | @@ -55,7 +49,7 @@ export_db() { | ||
| 55 | # Encrypt a file using OpenSSL and a given public key. | 49 | # Encrypt a file using OpenSSL and a given public key. |
| 56 | # The original file will be replaced by a new file, suffixed with '.enc'. | 50 | # The original file will be replaced by a new file, suffixed with '.enc'. |
| 57 | # Args: | 51 | # Args: |
| 58 | -# $1 = dump file filename, e.g. 'mydb.sql' | 52 | +# $1 = compressed dump file filename, e.g. 'mydb.sql.gz' |
| 59 | encrypt_file() { | 53 | encrypt_file() { |
| 60 | echo "$(date) : Encrypt file ${1}." | 54 | echo "$(date) : Encrypt file ${1}." |
| 61 | openssl smime -encrypt -binary -text -aes256 -in "${bak_dir}/${1}" \ | 55 | openssl smime -encrypt -binary -text -aes256 -in "${bak_dir}/${1}" \ |
| @@ -65,7 +59,7 @@ encrypt_file() { | @@ -65,7 +59,7 @@ encrypt_file() { | ||
| 65 | 59 | ||
| 66 | # Compress the backup file with gzip. | 60 | # Compress the backup file with gzip. |
| 67 | # Args: | 61 | # Args: |
| 68 | -# $1 = dump file filename, e.g. 'mydb.sql', 'mydb.sql.enc' | 62 | +# $1 = dump file filename, e.g. 'mydb.sql' |
| 69 | compress_backup_file() { | 63 | compress_backup_file() { |
| 70 | echo "$(date) : Gzip file ${1}." | 64 | echo "$(date) : Gzip file ${1}." |
| 71 | gzip "${bak_dir}/${1}" | 65 | gzip "${bak_dir}/${1}" |
| @@ -73,15 +67,14 @@ compress_backup_file() { | @@ -73,15 +67,14 @@ compress_backup_file() { | ||
| 73 | 67 | ||
| 74 | # Rotate the current backups in S3. | 68 | # Rotate the current backups in S3. |
| 75 | # Args: | 69 | # Args: |
| 76 | -# $1 = dump file filename, e.g. 'mydb.sql', 'mydb.sql.enc' | 70 | +# $1 = resulting dump filename, e.g. 'mydb.sql.gz', 'mydb.sql.gz.enc' |
| 77 | # $2 = max number of backup files to store at a time | 71 | # $2 = max number of backup files to store at a time |
| 78 | increment_backup_names() { | 72 | increment_backup_names() { |
| 79 | - bak_keyname="${1}.gz" | 73 | + bak_keyname=$1 |
| 80 | max_backups=$2 | 74 | max_backups=$2 |
| 81 | 75 | ||
| 82 | baks=$( "$aws_bin" --output text --region "$region" \ | 76 | baks=$( "$aws_bin" --output text --region "$region" \ |
| 83 | - s3api list-objects --bucket "$bucket" \ | ||
| 84 | - | grep '^CONTENTS' | cut -f3 | grep "^${bak_keyname}" || echo "" ) | 77 | + s3 ls "s3://${bucket}/" | awk '{ printf("%s\n", $4); }' || echo "" ) |
| 85 | 78 | ||
| 86 | echo "$(date) : Backup rotation for ${bak_keyname}." | 79 | echo "$(date) : Backup rotation for ${bak_keyname}." |
| 87 | start=$((max_backups - 1)) | 80 | start=$((max_backups - 1)) |
| @@ -106,15 +99,16 @@ increment_backup_names() { | @@ -106,15 +99,16 @@ increment_backup_names() { | ||
| 106 | 99 | ||
| 107 | # Upload the compressed db backup file. | 100 | # Upload the compressed db backup file. |
| 108 | # Args: | 101 | # Args: |
| 109 | -# $1 = dump file filename, e.g. 'mydb.sql', 'mydb.sql.enc' | 102 | +# $1 = resulting dump filename, e.g. 'mydb.sql.gz', 'mydb.sql.gz.enc' |
| 110 | upload_to_s3() { | 103 | upload_to_s3() { |
| 111 | - echo "$(date) : Upload ${1}.gz to S3 bucket ${bucket}." | 104 | + echo "$(date) : Upload ${1} to S3 bucket ${bucket}." |
| 112 | "$aws_bin" --region "$region" \ | 105 | "$aws_bin" --region "$region" \ |
| 113 | - s3 mv "${bak_dir}/${1}.gz" "s3://${bucket}/${1}.gz" | 106 | + s3 mv "${bak_dir}/${1}" "s3://${bucket}/${1}" |
| 114 | } | 107 | } |
| 115 | 108 | ||
| 116 | # First, perform mysqldump on each database (and encrypt if desired): | 109 | # First, perform mysqldump on each database (and encrypt if desired): |
| 117 | 110 | ||
| 111 | +<% bfname = '' -%> | ||
| 118 | <% @db_map.each do |db| -%> | 112 | <% @db_map.each do |db| -%> |
| 119 | <% if db.is_a?(Array) -%> | 113 | <% if db.is_a?(Array) -%> |
| 120 | <% db_name = db[0] -%> | 114 | <% db_name = db[0] -%> |
| @@ -123,17 +117,19 @@ upload_to_s3() { | @@ -123,17 +117,19 @@ upload_to_s3() { | ||
| 123 | <% db_name = db[:db_name] -%> | 117 | <% db_name = db[:db_name] -%> |
| 124 | <% end -%> | 118 | <% end -%> |
| 125 | export_db <%= db_name %> <%= db[:db_user] %> '<%= db[:db_pass] %>' <%= db[:bak_filename] %> | 119 | export_db <%= db_name %> <%= db[:db_user] %> '<%= db[:db_pass] %>' <%= db[:bak_filename] %> |
| 120 | +compress_backup_file <%= db[:bak_filename] %> | ||
| 126 | <% if db[:bak_encrypted] -%> | 121 | <% if db[:bak_encrypted] -%> |
| 127 | -encrypt_file <%= db[:bak_filename] %> | 122 | +<% bfname = "#{db[:bak_filename]}.gz.enc" -%> |
| 123 | +encrypt_file <%= db[:bak_filename] %>.gz | ||
| 124 | +<% else -%> | ||
| 125 | +<% bfname = "#{db[:bak_filename]}.gz" -%> | ||
| 128 | <% end -%> | 126 | <% end -%> |
| 129 | <% end -%> | 127 | <% end -%> |
| 130 | 128 | ||
| 131 | -# Then compress and upload the backup files one by one: | 129 | +# Then upload the backup files one by one: |
| 132 | 130 | ||
| 133 | <% @db_map.each do |db| -%> | 131 | <% @db_map.each do |db| -%> |
| 134 | <% if db.is_a?(Array) then db = db[1] end -%> | 132 | <% if db.is_a?(Array) then db = db[1] end -%> |
| 135 | -<% bfname = db[:bak_encrypted] ? "#{db[:bak_filename]}.enc" : db[:bak_filename] -%> | ||
| 136 | -compress_backup_file <%= bfname %> | ||
| 137 | increment_backup_names <%= bfname %> <%= db[:bak_maxcopies] %> | 133 | increment_backup_names <%= bfname %> <%= db[:bak_maxcopies] %> |
| 138 | upload_to_s3 <%= bfname %> | 134 | upload_to_s3 <%= bfname %> |
| 139 | 135 |