Commit 470e2e7f7f988bd1d7e94883ec7651d58f113352
Committed by
Earth Ugat
1 parent
8a37bc3e
Bump to v0.3.0
Showing
8 changed files
with
157 additions
and
32 deletions
| ... | ... | @@ -5,6 +5,9 @@ This installs MariaDB 10.0 by default and initiates databases and users. It can |
| 5 | 5 | |
| 6 | 6 | The server is assumed to be using an IAM role with S3 bucket read/write access, instead of a physical credentials file. |
| 7 | 7 | |
| 8 | + | |
| 9 | +When encryption is enabled for DB backups, the private and public keys are shared across all databases in the `db_map` attribute. Encryption is enabled separately for each individual database (see Usage below). | |
| 10 | + | |
| 8 | 11 | ## Supported Platforms |
| 9 | 12 | |
| 10 | 13 | Ubuntu 14.04 |
| ... | ... | @@ -84,10 +87,42 @@ Ubuntu 14.04 |
| 84 | 87 | <td>If not using EC2 roles, enter AWS creds here</td> |
| 85 | 88 | <td><tt>nil</tt></td> |
| 86 | 89 | </tr> |
| 90 | + <tr> | |
| 91 | + <td><tt>['cfe-mariadb']['encrypt']['priv_key']</tt></td> | |
| 92 | + <td>String</td> | |
| 93 | + <td>Contents of the private key file used by recipe `reload_from_s3` if encrypted backups are used.</td> | |
| 94 | + <td><tt>nil</tt></td> | |
| 95 | + </tr> | |
| 96 | + <tr> | |
| 97 | + <td><tt>['cfe-mariadb']['encrypt']['pub_key']</tt></td> | |
| 98 | + <td>String</td> | |
| 99 | + <td>Contents of the public key file used by the backup script to encrypt files before uploading to the S3 bucket.</td> | |
| 100 | + <td><tt>nil</tt></td> | |
| 101 | + </tr> | |
| 87 | 102 | </table> |
| 88 | 103 | |
| 89 | 104 | ## Usage |
| 90 | 105 | |
| 106 | +### `node['cfe-mariadb']['db_map']` | |
| 107 | + | |
| 108 | +Example config of a single database: | |
| 109 | + | |
| 110 | +```json | |
| 111 | +{ | |
| 112 | + 'example_db_name' => { | |
| 113 | + :db_user => 'example_db_username', | |
| 114 | + :db_pass => 'supersecret_pass', | |
| 115 | + :bak_filename => 'example_db_name.sql', | |
| 116 | + :bak_maxcopies => 30 | |
| 117 | + :bak_encrypted => false, | |
| 118 | + :char_set => 'latin1', | |
| 119 | + :collate => 'latin1_swedish_ci' | |
| 120 | + } | |
| 121 | +} | |
| 122 | +``` | |
| 123 | + | |
| 124 | +The properties `:bak_encrypted`, `:char_set`, and `:collate` are all optional and their default values are as shown above. | |
| 125 | + | |
| 91 | 126 | ### cfe-mariadb::default |
| 92 | 127 | |
| 93 | 128 | Include `cfe-mariadb` in your node's `run_list`: | ... | ... |
| ... | ... | @@ -26,8 +26,9 @@ |
| 26 | 26 | # :bak_filename => 'example_db_name.sql', |
| 27 | 27 | # :bak_maxcopies => 30 |
| 28 | 28 | ## Optional: |
| 29 | -# :char_set => 'latin1', | |
| 30 | -# :collate => 'latin1_swedish_ci' | |
| 29 | +# :char_set => 'latin1', | |
| 30 | +# :collate => 'latin1_swedish_ci', | |
| 31 | +# :bak_encrypted => false | |
| 31 | 32 | # } |
| 32 | 33 | # } |
| 33 | 34 | default['cfe-mariadb']['db_map'] = {} |
| ... | ... | @@ -76,6 +77,28 @@ default['cfe-mariadb']['backup']['logrotate']['options'] = %w{ |
| 76 | 77 | #default['cfe-mariadb']['reload']['aws_access_key_id'] = 'MYKEYID' |
| 77 | 78 | #default['cfe-mariadb']['reload']['aws_secret_access_key'] = 'MYSECRETKEY' |
| 78 | 79 | |
| 80 | +# Whether to encrypt the backup DB dumps before storing them in S3. | |
| 81 | +# 'priv_key': String. Contents of the private key file. | |
| 82 | +# | |
| 83 | +# Used only in recipe 'reload_from_s3' if some/all DB dumps | |
| 84 | +# to be reloaded are encrypted. | |
| 85 | +# | |
| 86 | +# File is automatically deleted after the recipe is run. | |
| 87 | +# 'pub_key': String. Contents of the public key file. | |
| 88 | +# | |
| 89 | +# Used by the backup script to encrypt the DB dump | |
| 90 | +# if ':bak_encrypted' is set to true for that DB. | |
| 91 | +# | |
| 92 | +# The key file will be stored in the same directory | |
| 93 | +# as the script as 'pub.key'. | |
| 94 | +# NOTE: | |
| 95 | +# Enabling encryption will result in HUGE file sizes and, | |
| 96 | +# depending on how large a database is, can take a LOT of time | |
| 97 | +# during the backup process. That said, it is still recommended to | |
| 98 | +# enforce encryption on DB backups. | |
| 99 | +default['cfe-mariadb']['encrypt']['priv_key'] = nil | |
| 100 | +default['cfe-mariadb']['encrypt']['pub_key'] = nil | |
| 101 | + | |
| 79 | 102 | default['mariadb']['server_root_password'] = 'secretpassword' |
| 80 | 103 | default['mariadb']['mysqld']['bind_address'] = '127.0.0.1' |
| 81 | 104 | default['mariadb']['mysqld']['port'] = '3306' | ... | ... |
| ... | ... | @@ -4,12 +4,13 @@ maintainer_email 'sysadmin @ chromedia.com' |
| 4 | 4 | license 'Apache License' |
| 5 | 5 | description 'Simplifies setup of MariaDB in Chromedia.' |
| 6 | 6 | long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) |
| 7 | -version '0.2.2' | |
| 7 | +version '0.3.0' | |
| 8 | 8 | |
| 9 | 9 | { |
| 10 | 10 | 'mariadb' => '0.3.1', |
| 11 | 11 | 'mysql2_chef_gem' => '1.1.0', |
| 12 | 12 | 'database' => '5.1.2', |
| 13 | + 'openssl' => '4.4.0', | |
| 13 | 14 | 'awscli' => '1.0.1', |
| 14 | 15 | 'cron' => '1.7.4' |
| 15 | 16 | }.each { |cb, ver| depends cb, '~> ' + ver } | ... | ... |
| ... | ... | @@ -27,6 +27,8 @@ node.default['cfe-mariadb']['backup']['script_dir'] = |
| 27 | 27 | ::File.join(node['mariadb']['configuration']['path'], 'scripts') unless |
| 28 | 28 | node['cfe-mariadb']['backup']['script_dir'] |
| 29 | 29 | |
| 30 | +package 'gzip' | |
| 31 | +include_recipe 'openssl::upgrade' | |
| 30 | 32 | include_recipe 'awscli' |
| 31 | 33 | |
| 32 | 34 | md = node['cfe-mariadb'] |
| ... | ... | @@ -34,8 +36,21 @@ mdb = md['backup'] |
| 34 | 36 | mdbc = mdb['cron'] |
| 35 | 37 | mdbl = mdb['logrotate'] |
| 36 | 38 | |
| 39 | +directory mdb['script_dir'] do | |
| 40 | + recursive true | |
| 41 | +end | |
| 42 | + | |
| 43 | +pub_key_file = "#{mdb['script_dir']}/pub.key" | |
| 44 | + | |
| 45 | +file pub_key_file do | |
| 46 | + content md['encrypt']['pub_key'] | |
| 47 | + mode 0600 | |
| 48 | + owner 'root' | |
| 49 | + group 'root' | |
| 50 | + only_if { md['encrypt']['pub_key'] } | |
| 51 | +end | |
| 52 | + | |
| 37 | 53 | template "#{mdb['script_dir']}/backup_db_to_s3" do |
| 38 | - only_if "test -d #{mdb['script_dir']} || mkdir -p #{mdb['script_dir']}" | |
| 39 | 54 | variables( |
| 40 | 55 | :db_map => md['db_map'], |
| 41 | 56 | :db_ip => node['mariadb']['mysqld']['bind_address'], |
| ... | ... | @@ -43,7 +58,8 @@ template "#{mdb['script_dir']}/backup_db_to_s3" do |
| 43 | 58 | :s3_region => md['s3_region'], |
| 44 | 59 | :s3_bucket => md['s3_bucket'], |
| 45 | 60 | :aws_bin => mdb['aws_bin'], |
| 46 | - :mysqldump_bin => mdb['mysqldump_bin'] | |
| 61 | + :mysqldump_bin => mdb['mysqldump_bin'], | |
| 62 | + :pub_key_file => pub_key_file | |
| 47 | 63 | ) |
| 48 | 64 | end |
| 49 | 65 | ... | ... |
| ... | ... | @@ -21,12 +21,25 @@ |
| 21 | 21 | # Download the gzip of a MySQL dump from an S3 bucket, |
| 22 | 22 | # then load it up into a (preferably empty) database. |
| 23 | 23 | |
| 24 | +package 'gzip' | |
| 25 | +include_recipe 'openssl::upgrade' | |
| 24 | 26 | include_recipe 'awscli' |
| 25 | 27 | |
| 26 | 28 | tmp_dir = ::File.join(Chef::Config[:file_cache_path], 'db_dumps') |
| 27 | 29 | manual_creds = node['cfe-mariadb'].has_key?('reload') && |
| 28 | 30 | node['cfe-mariadb']['reload'].has_key?('aws_access_key_id') |
| 29 | 31 | |
| 32 | +priv_key_file = "#{tmp_dir}/priv.key" | |
| 33 | + | |
| 34 | +file priv_key_file do | |
| 35 | + content node['cfe-mariadb']['encrypt']['priv_key'] || '' | |
| 36 | + mode 0600 | |
| 37 | + owner 'root' | |
| 38 | + group 'root' | |
| 39 | + sensitive true | |
| 40 | + only_if "test -d #{tmp_dir} || mkdir -p #{tmp_dir}" | |
| 41 | +end | |
| 42 | + | |
| 30 | 43 | node['cfe-mariadb']['db_map'].each do |dbx| |
| 31 | 44 | |
| 32 | 45 | if dbx.is_a?(Array) |
| ... | ... | @@ -36,25 +49,30 @@ node['cfe-mariadb']['db_map'].each do |dbx| |
| 36 | 49 | dbx_name = dbx[:db_name] |
| 37 | 50 | end |
| 38 | 51 | |
| 52 | + keyname = "#{dbx[:bak_filename]}#{dbx[:bak_encrypted] ? '.enc.gz' : '.gz'}" | |
| 53 | + filegz = "#{tmp_dir}/#{keyname}" | |
| 39 | 54 | filesql = "#{tmp_dir}/#{dbx[:bak_filename]}" |
| 40 | - filegz = "#{filesql}.gz" | |
| 41 | 55 | |
| 42 | 56 | awscli_s3_file filegz do |
| 43 | 57 | region node['cfe-mariadb']['s3_region'] |
| 44 | 58 | bucket node['cfe-mariadb']['s3_bucket'] |
| 45 | - key "#{dbx[:bak_filename]}.gz" | |
| 59 | + key keyname | |
| 60 | + only_if "test -d #{tmp_dir} || mkdir -p #{tmp_dir}" | |
| 46 | 61 | if manual_creds |
| 47 | 62 | aws_access_key_id node['cfe-mariadb']['reload']['aws_access_key_id'] |
| 48 | 63 | aws_secret_access_key node['cfe-mariadb']['reload']['aws_secret_access_key'] |
| 49 | 64 | end |
| 50 | - only_if "test -d #{tmp_dir} || mkdir -p #{tmp_dir}" | |
| 51 | - notifies :run, "execute[unpack_#{filegz}]", :immediately | |
| 52 | 65 | end |
| 53 | 66 | |
| 54 | 67 | execute "unpack_#{filegz}" do |
| 55 | 68 | command "gzip -d #{filegz}" |
| 56 | - notifies :run, "execute[reload_#{filesql}]", :immediately | |
| 57 | - action :nothing | |
| 69 | + end | |
| 70 | + | |
| 71 | + execute "decrypt_#{filesql}.enc" do | |
| 72 | + command "openssl smime -decrypt -binary -inkey #{priv_key_file} "\ | |
| 73 | + "-in #{filesql}.enc -out #{filesql} -inform DEM" | |
| 74 | + only_if { dbx[:bak_encrypted] } | |
| 75 | + notifies :delete, "file[#{filesql}.enc]" | |
| 58 | 76 | end |
| 59 | 77 | |
| 60 | 78 | execute "reload_#{filesql}" do |
| ... | ... | @@ -63,10 +81,17 @@ node['cfe-mariadb']['db_map'].each do |dbx| |
| 63 | 81 | "-p'#{dbx[:db_pass]}' -D #{dbx_name} < #{filesql}" |
| 64 | 82 | notifies :delete, "file[#{filesql}]" |
| 65 | 83 | sensitive true |
| 66 | - action :nothing | |
| 84 | + end | |
| 85 | + | |
| 86 | + file "#{filesql}.enc" do | |
| 87 | + action :nothing | |
| 67 | 88 | end |
| 68 | 89 | |
| 69 | 90 | file filesql do |
| 70 | 91 | action :nothing |
| 71 | 92 | end |
| 72 | 93 | end |
| 94 | + | |
| 95 | +file priv_key_file do | |
| 96 | + action :delete | |
| 97 | +end | ... | ... |
| ... | ... | @@ -19,9 +19,9 @@ |
| 19 | 19 | # |
| 20 | 20 | |
| 21 | 21 | mysql2_chef_gem 'default' do |
| 22 | + provider Chef::Provider::Mysql2ChefGem::Mariadb | |
| 22 | 23 | gem_version '0.4.4' |
| 23 | - provider Chef::Provider::Mysql2ChefGem::Mariadb | |
| 24 | - action :install | |
| 24 | + action :install | |
| 25 | 25 | end |
| 26 | 26 | |
| 27 | 27 | # Prepare the needed databases and users. | ... | ... |
| 1 | 1 | #!/bin/bash |
| 2 | 2 | # Generated by Chef. |
| 3 | 3 | # |
| 4 | -# Perform mysqldump on databases and upload the | |
| 5 | -# resulting backup files into an S3 bucket. | |
| 4 | +# Perform mysqldump on databases, optionally encrypt them, | |
| 5 | +# and upload the resulting backup files into an S3 bucket. | |
| 6 | +# | |
| 7 | +# This script is not meant to be run manually, | |
| 8 | +# but instead through a regular cron job. | |
| 6 | 9 | |
| 7 | 10 | set -e |
| 8 | 11 | |
| ... | ... | @@ -19,6 +22,7 @@ region=<%= @s3_region %> |
| 19 | 22 | |
| 20 | 23 | aws_bin=<%= @aws_bin %> |
| 21 | 24 | mysqldump_bin=<%= @mysqldump_bin %> |
| 25 | +pub_key_file=<%= @pub_key_file %> | |
| 22 | 26 | |
| 23 | 27 | log_dir=/var/log/backup_db_to_s3 |
| 24 | 28 | if [[ ! -d "$log_dir" ]] ; then |
| ... | ... | @@ -48,9 +52,20 @@ export_db() { |
| 48 | 52 | -u "$2" -p"$3" "$1" > "${bak_dir}/${4}" |
| 49 | 53 | } |
| 50 | 54 | |
| 51 | -# Compress the backup file with gzip. | |
| 55 | +# Encrypt a file using OpenSSL and a given public key. | |
| 56 | +# The original file will be replaced by a new file, suffixed with '.enc'. | |
| 52 | 57 | # Args: |
| 53 | 58 | # $1 = dump file filename, e.g. 'mydb.sql' |
| 59 | +encrypt_file() { | |
| 60 | + echo "$(date) : Encrypt file ${1}." | |
| 61 | + openssl smime -encrypt -binary -text -aes256 -in "${bak_dir}/${1}" \ | |
| 62 | + -out "${bak_dir}/${1}.enc" -outform DER "${pub_key_file}" | |
| 63 | + rm "${bak_dir}/${1}" | |
| 64 | +} | |
| 65 | + | |
| 66 | +# Compress the backup file with gzip. | |
| 67 | +# Args: | |
| 68 | +# $1 = dump file filename, e.g. 'mydb.sql', 'mydb.sql.enc' | |
| 54 | 69 | compress_backup_file() { |
| 55 | 70 | echo "$(date) : Gzip file ${1}." |
| 56 | 71 | gzip "${bak_dir}/${1}" |
| ... | ... | @@ -58,7 +73,7 @@ compress_backup_file() { |
| 58 | 73 | |
| 59 | 74 | # Rotate the current backups in S3. |
| 60 | 75 | # Args: |
| 61 | -# $1 = dump file filename, e.g. 'mydb.sql' | |
| 76 | +# $1 = dump file filename, e.g. 'mydb.sql', 'mydb.sql.enc' | |
| 62 | 77 | # $2 = max number of backup files to store at a time |
| 63 | 78 | increment_backup_names() { |
| 64 | 79 | bak_keyname="${1}.gz" |
| ... | ... | @@ -91,31 +106,37 @@ increment_backup_names() { |
| 91 | 106 | |
| 92 | 107 | # Upload the compressed db backup file. |
| 93 | 108 | # Args: |
| 94 | -# $1 = dump file filename, e.g. 'mydb.sql' | |
| 109 | +# $1 = dump file filename, e.g. 'mydb.sql', 'mydb.sql.enc' | |
| 95 | 110 | upload_to_s3() { |
| 96 | 111 | echo "$(date) : Upload ${1}.gz to S3 bucket ${bucket}." |
| 97 | 112 | "$aws_bin" --region "$region" \ |
| 98 | 113 | s3 mv "${bak_dir}/${1}.gz" "s3://${bucket}/${1}.gz" |
| 99 | 114 | } |
| 100 | 115 | |
| 101 | -# First, perform mysqldump on each database. | |
| 116 | +# First, perform mysqldump on each database (and encrypt if desired): | |
| 117 | + | |
| 102 | 118 | <% @db_map.each do |db| -%> |
| 103 | -<% if db.is_a?(Array) -%> | |
| 104 | -<% db_name = db[0] -%> | |
| 105 | -<% db = db[1] -%> | |
| 106 | -<% else -%> | |
| 107 | -<% db_name = db[:db_name] -%> | |
| 108 | -<% end -%> | |
| 119 | +<% if db.is_a?(Array) -%> | |
| 120 | +<% db_name = db[0] -%> | |
| 121 | +<% db = db[1] -%> | |
| 122 | +<% else -%> | |
| 123 | +<% db_name = db[:db_name] -%> | |
| 124 | +<% end -%> | |
| 109 | 125 | export_db <%= db_name %> <%= db[:db_user] %> '<%= db[:db_pass] %>' <%= db[:bak_filename] %> |
| 126 | +<% if db[:bak_encrypted] -%> | |
| 127 | +encrypt_file <%= db[:bak_filename] %> | |
| 128 | +<% end -%> | |
| 110 | 129 | <% end -%> |
| 111 | 130 | |
| 112 | -# Then compress and upload the backup files one by one. | |
| 131 | +# Then compress and upload the backup files one by one: | |
| 132 | + | |
| 113 | 133 | <% @db_map.each do |db| -%> |
| 114 | -<% if db.is_a?(Array) then db = db[1] end -%> | |
| 115 | -compress_backup_file <%= db[:bak_filename] %> | |
| 116 | -increment_backup_names <%= db[:bak_filename] %> <%= db[:bak_maxcopies] %> | |
| 117 | -upload_to_s3 <%= db[:bak_filename] %> | |
| 134 | +<% if db.is_a?(Array) then db = db[1] end -%> | |
| 135 | +<% bfname = db[:bak_encrypted] ? "#{db[:bak_filename]}.enc" : db[:bak_filename] -%> | |
| 136 | +compress_backup_file <%= bfname %> | |
| 137 | +increment_backup_names <%= bfname %> <%= db[:bak_maxcopies] %> | |
| 138 | +upload_to_s3 <%= bfname %> | |
| 118 | 139 | |
| 119 | -<% end -%> | |
| 140 | +<% end -%> | |
| 120 | 141 | rm "$tmp_file" |
| 121 | 142 | echo "$(date) : Done." | ... | ... |