Commit c197ede279dbf1b804929f6090b282420070a3d9
Committed by
Earth Ugat
1 parent
66c86a25
Version v0.5.0. Make recipe reload_from_s3 more idempotent.
Showing
7 changed files
with
75 additions
and
56 deletions
| 1 | +## 0.5.0 | ||
| 2 | +### Added | ||
| 3 | +- Add boolean attributes ':backup' and ':reload' to db_map. They give the option of whether or not to include that database when using the 'backup2s3' and 'reload_from_s3' recipes, respectively. | ||
| 4 | +- Use a timestamp file for idempotency checks for recipe 'reload_from_s3'. | ||
| 5 | + | ||
| 1 | ## 0.4.1 | 6 | ## 0.4.1 |
| 2 | ### Fixed | 7 | ### Fixed |
| 3 | - Logic bug in backup script template where only the last database gets backed up. | 8 | - Logic bug in backup script template where only the last database gets backed up. |
| @@ -94,8 +94,8 @@ Example config of a single database: | @@ -94,8 +94,8 @@ Example config of a single database: | ||
| 94 | :bak_filename => 'example_db_name.sql', | 94 | :bak_filename => 'example_db_name.sql', |
| 95 | :bak_maxcopies => 30 | 95 | :bak_maxcopies => 30 |
| 96 | :bak_encrypted => false, | 96 | :bak_encrypted => false, |
| 97 | - :char_set => 'latin1', | ||
| 98 | - :collate => 'latin1_swedish_ci' | 97 | + :char_set => 'utf8', |
| 98 | + :collate => 'utf8_general_ci' | ||
| 99 | } | 99 | } |
| 100 | } | 100 | } |
| 101 | ``` | 101 | ``` |
| @@ -132,6 +132,7 @@ This will install the backup script and also enable a cronjob to regularly run t | @@ -132,6 +132,7 @@ This will install the backup script and also enable a cronjob to regularly run t | ||
| 132 | Kind of the reverse of `backup2s3`. Download a gzip of a MySQL dump file from an S3 bucket, then load it up into a database. Do this for every database given in `node['cfe-mariadb']['db_map']`. | 132 | Kind of the reverse of `backup2s3`. Download a gzip of a MySQL dump file from an S3 bucket, then load it up into a database. Do this for every database given in `node['cfe-mariadb']['db_map']`. |
| 133 | 133 | ||
| 134 | This recipe assumes the node is using an EC2 role that can access the given S3 bucket. Otherwise, enter the AWS credentials in `node.default['cfe-mariadb']['reload']['aws_access_key_id']` and `node.default['cfe-mariadb']['reload']['aws_secret_access_key']`. | 134 | This recipe assumes the node is using an EC2 role that can access the given S3 bucket. Otherwise, enter the AWS credentials in `node.default['cfe-mariadb']['reload']['aws_access_key_id']` and `node.default['cfe-mariadb']['reload']['aws_secret_access_key']`. |
| 135 | + | ||
| 135 | ## License and Authors | 136 | ## License and Authors |
| 136 | 137 | ||
| 137 | Author:: Earth U. (<sysadmin @ chromedia.com>) | 138 | Author:: Earth U. (<sysadmin @ chromedia.com>) |
| @@ -25,10 +25,15 @@ | @@ -25,10 +25,15 @@ | ||
| 25 | # :db_pass => 'example_db_password', | 25 | # :db_pass => 'example_db_password', |
| 26 | # :bak_filename => 'example_db_name.sql', | 26 | # :bak_filename => 'example_db_name.sql', |
| 27 | # :bak_maxcopies => 30 | 27 | # :bak_maxcopies => 30 |
| 28 | + | ||
| 28 | ## Optional: | 29 | ## Optional: |
| 29 | # :char_set => 'utf8', | 30 | # :char_set => 'utf8', |
| 30 | # :collate => 'utf8_general_ci', | 31 | # :collate => 'utf8_general_ci', |
| 31 | -# :bak_encrypted => false | 32 | +# :bak_encrypted => false, |
| 33 | + | ||
| 34 | +## Whether to include in backup script, and reload DB data during Chef run: | ||
| 35 | +# :backup => true, | ||
| 36 | +# :reload => true | ||
| 32 | # } | 37 | # } |
| 33 | # } | 38 | # } |
| 34 | default['cfe-mariadb']['db_map'] = {} | 39 | default['cfe-mariadb']['db_map'] = {} |
| @@ -68,6 +73,9 @@ default['cfe-mariadb']['backup']['logrotate']['options'] = %w{ | @@ -68,6 +73,9 @@ default['cfe-mariadb']['backup']['logrotate']['options'] = %w{ | ||
| 68 | notifempty | 73 | notifempty |
| 69 | } | 74 | } |
| 70 | 75 | ||
| 76 | +default['cfe-mariadb']['reload']['file_stamp'] = | ||
| 77 | + "#{node['mariadb']['configuration']['path']}/reloaded.stamp" | ||
| 78 | + | ||
| 71 | # Optional attributes for recipe 'reload_from_s3' only | 79 | # Optional attributes for recipe 'reload_from_s3' only |
| 72 | # (Usually used during testing) | 80 | # (Usually used during testing) |
| 73 | # Recommend: use EC2 instances with proper S3 access roles and | 81 | # Recommend: use EC2 instances with proper S3 access roles and |
| @@ -4,7 +4,7 @@ maintainer_email 'sysadmin @ chromedia.com' | @@ -4,7 +4,7 @@ maintainer_email 'sysadmin @ chromedia.com' | ||
| 4 | license 'Apache License' | 4 | license 'Apache License' |
| 5 | description 'Simplifies setup of MariaDB in Chromedia.' | 5 | description 'Simplifies setup of MariaDB in Chromedia.' |
| 6 | long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) | 6 | long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) |
| 7 | -version '0.4.1' | 7 | +version '0.5.0' |
| 8 | 8 | ||
| 9 | { | 9 | { |
| 10 | 'mariadb' => '0.3.1', | 10 | 'mariadb' => '0.3.1', |
| @@ -25,70 +25,70 @@ package 'gzip' | @@ -25,70 +25,70 @@ package 'gzip' | ||
| 25 | include_recipe 'openssl::upgrade' | 25 | include_recipe 'openssl::upgrade' |
| 26 | include_recipe 'awscli' | 26 | include_recipe 'awscli' |
| 27 | 27 | ||
| 28 | -tmp_dir = ::File.join(Chef::Config[:file_cache_path], 'db_dumps') | ||
| 29 | manual_creds = node['cfe-mariadb'].has_key?('reload') && | 28 | manual_creds = node['cfe-mariadb'].has_key?('reload') && |
| 30 | node['cfe-mariadb']['reload'].has_key?('aws_access_key_id') | 29 | node['cfe-mariadb']['reload'].has_key?('aws_access_key_id') |
| 31 | 30 | ||
| 31 | +tmp_dir = ::File.join(Chef::Config[:file_cache_path], 'db_dumps') | ||
| 32 | priv_key_file = "#{tmp_dir}/priv.key" | 32 | priv_key_file = "#{tmp_dir}/priv.key" |
| 33 | 33 | ||
| 34 | -file priv_key_file do | ||
| 35 | - content node['cfe-mariadb']['encrypt']['priv_key'] || '' | ||
| 36 | - mode 0600 | ||
| 37 | - owner 'root' | ||
| 38 | - group 'root' | ||
| 39 | - sensitive true | ||
| 40 | - only_if "test -d #{tmp_dir} || mkdir -p #{tmp_dir}" | ||
| 41 | -end | ||
| 42 | - | ||
| 43 | -node['cfe-mariadb']['db_map'].each do |dbx| | 34 | +unless ::File.exist?(node['cfe-mariadb']['reload']['file_stamp']) |
| 35 | + directory(tmp_dir) { recursive true } | ||
| 44 | 36 | ||
| 45 | - if dbx.is_a?(Array) | ||
| 46 | - dbx_name = dbx[0] | ||
| 47 | - dbx = dbx[1] | ||
| 48 | - else | ||
| 49 | - dbx_name = dbx[:db_name] | 37 | + file priv_key_file do |
| 38 | + mode 0600 | ||
| 39 | + content node['cfe-mariadb']['encrypt']['priv_key'] || '' | ||
| 40 | + sensitive true | ||
| 50 | end | 41 | end |
| 51 | 42 | ||
| 52 | - keyname = "#{dbx[:bak_filename]}.gz#{dbx[:bak_encrypted] ? '.enc' : ''}" | ||
| 53 | - filepath = "#{tmp_dir}/#{dbx[:bak_filename]}" | ||
| 54 | - | ||
| 55 | - awscli_s3_file "#{tmp_dir}/#{keyname}" do | ||
| 56 | - region node['cfe-mariadb']['s3_region'] | ||
| 57 | - bucket node['cfe-mariadb']['s3_bucket'] | ||
| 58 | - key keyname | ||
| 59 | - only_if "test -d #{tmp_dir} || mkdir -p #{tmp_dir}" | ||
| 60 | - if manual_creds | ||
| 61 | - aws_access_key_id node['cfe-mariadb']['reload']['aws_access_key_id'] | ||
| 62 | - aws_secret_access_key node['cfe-mariadb']['reload']['aws_secret_access_key'] | 43 | + node['cfe-mariadb']['db_map'].each do |dbx| |
| 44 | + | ||
| 45 | + if dbx.is_a?(Array) | ||
| 46 | + dbx_name = dbx[0] | ||
| 47 | + dbx = dbx[1] | ||
| 48 | + else | ||
| 49 | + dbx_name = dbx[:db_name] | ||
| 63 | end | 50 | end |
| 64 | - end | ||
| 65 | 51 | ||
| 66 | - execute "decrypt_#{filepath}.gz.enc" do | ||
| 67 | - command "openssl smime -decrypt -binary -inkey #{priv_key_file} "\ | ||
| 68 | - "-in #{filepath}.gz.enc -out #{filepath}.gz -inform DEM" | ||
| 69 | - only_if { ::File.exist?("#{filepath}.gz.enc") } | ||
| 70 | - notifies :delete, "file[#{filepath}.gz.enc]" | ||
| 71 | - end | 52 | + unless dbx.has_key?(:reload) && dbx[:reload] == false |
| 53 | + keyname = "#{dbx[:bak_filename]}.gz#{dbx[:bak_encrypted] ? '.enc' : ''}" | ||
| 54 | + filepath = "#{tmp_dir}/#{dbx[:bak_filename]}" | ||
| 72 | 55 | ||
| 73 | - execute "gzip -d #{filepath}.gz" | 56 | + awscli_s3_file "#{tmp_dir}/#{keyname}" do |
| 57 | + region node['cfe-mariadb']['s3_region'] | ||
| 58 | + bucket node['cfe-mariadb']['s3_bucket'] | ||
| 59 | + key keyname | ||
| 60 | + if manual_creds | ||
| 61 | + aws_access_key_id node['cfe-mariadb']['reload']['aws_access_key_id'] | ||
| 62 | + aws_secret_access_key node['cfe-mariadb']['reload']['aws_secret_access_key'] | ||
| 63 | + end | ||
| 64 | + end | ||
| 74 | 65 | ||
| 75 | - execute "reload_#{filepath}" do | ||
| 76 | - command "mysql -h #{node['mariadb']['mysqld']['bind_address']} "\ | ||
| 77 | - "-P #{node['mariadb']['mysqld']['port']} -u #{dbx[:db_user]} "\ | ||
| 78 | - "-p'#{dbx[:db_pass]}' -D #{dbx_name} < #{filepath}" | ||
| 79 | - notifies :delete, "file[#{filepath}]" | ||
| 80 | - sensitive true | ||
| 81 | - end | 66 | + execute "decrypt_#{filepath}.gz.enc" do |
| 67 | + command "openssl smime -decrypt -binary -inkey #{priv_key_file} "\ | ||
| 68 | + "-in #{filepath}.gz.enc -out #{filepath}.gz -inform DEM" | ||
| 69 | + only_if { ::File.exist?("#{filepath}.gz.enc") } | ||
| 70 | + notifies :delete, "file[#{filepath}.gz.enc]" | ||
| 71 | + end | ||
| 82 | 72 | ||
| 83 | - file "#{filepath}.gz.enc" do | ||
| 84 | - action :nothing | ||
| 85 | - end | 73 | + execute "gzip -d #{filepath}.gz" |
| 74 | + | ||
| 75 | + execute "reload_#{filepath}" do | ||
| 76 | + command "mysql -h #{node['mariadb']['mysqld']['bind_address']} "\ | ||
| 77 | + "-P #{node['mariadb']['mysqld']['port']} "\ | ||
| 78 | + "-u #{dbx[:db_user]} -p'#{dbx[:db_pass]}' "\ | ||
| 79 | + "-D #{dbx_name} < #{filepath}" | ||
| 80 | + notifies :delete, "file[#{filepath}]" | ||
| 81 | + sensitive true | ||
| 82 | + end | ||
| 86 | 83 | ||
| 87 | - file filepath do | ||
| 88 | - action :nothing | 84 | + file("#{filepath}.gz.enc") { action :nothing } |
| 85 | + file(filepath) { action :nothing } | ||
| 86 | + | ||
| 87 | + file node['cfe-mariadb']['reload']['file_stamp'] do | ||
| 88 | + content %x{ date +"%FT%T" } | ||
| 89 | + end | ||
| 90 | + end | ||
| 89 | end | 91 | end |
| 90 | -end | ||
| 91 | 92 | ||
| 92 | -file priv_key_file do | ||
| 93 | - action :delete | 93 | + file(priv_key_file) { action :delete } |
| 94 | end | 94 | end |
| @@ -18,6 +18,7 @@ | @@ -18,6 +18,7 @@ | ||
| 18 | # limitations under the License. | 18 | # limitations under the License. |
| 19 | # | 19 | # |
| 20 | 20 | ||
| 21 | +# [DEPRECATED] | ||
| 21 | # This recipe is just here for backward-compatibility reasons. | 22 | # This recipe is just here for backward-compatibility reasons. |
| 22 | # The previous backups scripts create .tar.gz files, instead of .gz, so | 23 | # The previous backups scripts create .tar.gz files, instead of .gz, so |
| 23 | # this recipe is here to get .tar.gz backups. | 24 | # this recipe is here to get .tar.gz backups. |
| @@ -115,10 +115,12 @@ upload_to_s3() { | @@ -115,10 +115,12 @@ upload_to_s3() { | ||
| 115 | <% else -%> | 115 | <% else -%> |
| 116 | <% db_name = db[:db_name] -%> | 116 | <% db_name = db[:db_name] -%> |
| 117 | <% end -%> | 117 | <% end -%> |
| 118 | +<% unless db.has_key?(:backup) && db[:backup] == false -%> | ||
| 118 | export_db <%= db_name %> <%= db[:db_user] %> '<%= db[:db_pass] %>' <%= db[:bak_filename] %> | 119 | export_db <%= db_name %> <%= db[:db_user] %> '<%= db[:db_pass] %>' <%= db[:bak_filename] %> |
| 119 | compress_backup_file <%= db[:bak_filename] %> | 120 | compress_backup_file <%= db[:bak_filename] %> |
| 120 | -<% if db[:bak_encrypted] -%> | 121 | +<% if db[:bak_encrypted] -%> |
| 121 | encrypt_file <%= db[:bak_filename] %>.gz | 122 | encrypt_file <%= db[:bak_filename] %>.gz |
| 123 | +<% end -%> | ||
| 122 | <% end -%> | 124 | <% end -%> |
| 123 | <% end -%> | 125 | <% end -%> |
| 124 | 126 | ||
| @@ -131,9 +133,11 @@ encrypt_file <%= db[:bak_filename] %>.gz | @@ -131,9 +133,11 @@ encrypt_file <%= db[:bak_filename] %>.gz | ||
| 131 | <% else -%> | 133 | <% else -%> |
| 132 | <% bfname = "#{db[:bak_filename]}.gz" -%> | 134 | <% bfname = "#{db[:bak_filename]}.gz" -%> |
| 133 | <% end -%> | 135 | <% end -%> |
| 136 | +<% unless db.has_key?(:backup) && db[:backup] == false -%> | ||
| 134 | increment_backup_names <%= bfname %> <%= db[:bak_maxcopies] %> | 137 | increment_backup_names <%= bfname %> <%= db[:bak_maxcopies] %> |
| 135 | upload_to_s3 <%= bfname %> | 138 | upload_to_s3 <%= bfname %> |
| 136 | 139 | ||
| 137 | -<% end -%> | 140 | +<% end -%> |
| 141 | +<% end -%> | ||
| 138 | rm "$tmp_file" | 142 | rm "$tmp_file" |
| 139 | echo "$(date) : Done." | 143 | echo "$(date) : Done." |