Showing
7 changed files
with
302 additions
and
0 deletions
Berksfile
0 → 100644
attributes/default.rb
0 → 100644
| 1 | +# | |
| 2 | +# Cookbook Name:: cfe-mariadb | |
| 3 | +# Attributes:: default | |
| 4 | +# | |
| 5 | +# Copyright 2016, Chromedia | |
| 6 | +# | |
| 7 | +# All rights reserved - Do Not Redistribute | |
| 8 | +# | |
| 9 | + | |
| 10 | +## The 'db_map' should contain this format: | |
| 11 | +# default['cfe-mariadb']['db_map'] = { | |
| 12 | +# 'example_db_name' => { | |
| 13 | +# :db_user => 'example_db_username', | |
| 14 | +# :db_pass => 'example_db_password', | |
| 15 | +# :bak_filename => 'example_db_name.sql', | |
| 16 | +# :bak_maxcopies => 30 | |
| 17 | +## Optional: | |
| 18 | +# :char_set => 'latin1', | |
| 19 | +# :collate => 'latin1_swedish_ci' | |
| 20 | +# } | |
| 21 | +# } | |
| 22 | +default['cfe-mariadb']['db_map'] = {} | |
| 23 | +# Make sure to set replication on only when necessary | |
| 24 | +default['cfe-mariadb']['replication'] = false | |
| 25 | + | |
| 26 | +default['cfe-mariadb']['backup']['s3_region'] = 'us-east-1' | |
| 27 | +default['cfe-mariadb']['backup']['s3_bucket'] = 'example-bucket' | |
| 28 | +default['cfe-mariadb']['backup']['aws_bin'] = '/use/local/bin/aws', | |
| 29 | +default['cfe-mariadb']['backup']['mysqldump_bin'] = '/usr/bin/mysqldump', | |
| 30 | +# Path to directory where the backup script should be placed | |
| 31 | +default['cfe-mariadb']['backup']['script_dir'] = ::File.join( | |
| 32 | + node['mariadb']['configuration']['path'], 'scripts' | |
| 33 | +) | |
| 34 | +default['cfe-mariadb']['backup']['cron']['min'] = '0' | |
| 35 | +default['cfe-mariadb']['backup']['cron']['hour'] = '0' | |
| 36 | +default['cfe-mariadb']['backup']['cron']['day'] = '*' | |
| 37 | +default['cfe-mariadb']['backup']['cron']['mon'] = '*' | |
| 38 | +default['cfe-mariadb']['backup']['cron']['wday'] = '*' | |
| 39 | +default['cfe-mariadb']['backup']['cron']['mailto'] = '' | |
| 40 | + | |
| 41 | +default['mariadb']['server_root_password'] = 'password' | |
| 42 | +default['mariadb']['mysqld']['bind_address'] = '127.0.0.1' | |
| 43 | +default['mariadb']['mysqld']['port'] = '3306' | |
| 44 | +default['mariadb']['install']['type'] = 'package' | |
| 45 | +default['mariadb']['install']['version'] = '5.5' | |
| 46 | +default['mariadb']['forbid_remote_root'] = true | |
| 47 | +# io_capacity has to be roughly the IO capacity of the EC2 instance. | |
| 48 | +# buffer_pool_size can be increased to 75% (0.75) of RAM if dedicated server. | |
| 49 | +default['mariadb']['innodb']['io_capacity'] = '600' | |
| 50 | +default['mariadb']['innodb']['buffer_pool_size'] = | |
| 51 | + ( %x(free -m).split(' ')[7].to_i * 0.5 ).round.to_s | |
| 52 | + | |
| 53 | +## Tip: | |
| 54 | +## For encrypted attributes like passwords, | |
| 55 | +## the following is possible in the attributes file: | |
| 56 | +## | |
| 57 | +# secret = Chef::EncryptedDataBagItem.load( | |
| 58 | +# node['cfe-gitlab']['data_bag'], | |
| 59 | +# node['cfe-gitlab']['data_bag_item_secret'] | |
| 60 | +# ) | |
| 61 | +# default['cfe-gitlab']['some_prop'] = secret['db_pass'] | ... | ... |
metadata.rb
0 → 100644
| 1 | +name 'cfe-mariadb' | |
| 2 | +maintainer 'Chromedia' | |
| 3 | +maintainer_email 'sysadmin@chromedia.com' | |
| 4 | +license 'All rights reserved' | |
| 5 | +description 'Simplifies setup of MariaDB in Chromedia.' | |
| 6 | +long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) | |
| 7 | +version '0.1.0' | |
| 8 | + | |
| 9 | +{ | |
| 10 | + 'mariadb' => '0.2.12', | |
| 11 | + 'mysql2_chef_gem' => '1.0.2', | |
| 12 | + 'database' => '4.0.9', | |
| 13 | + 'awscli' => '1.0.1' | |
| 14 | +}.each { |cb, ver| depends cb, '~> ' + ver } | |
| 15 | + | |
| 16 | +supports 'ubuntu', '>= 14.04' | ... | ... |
recipes/backup2s3.rb
0 → 100644
| 1 | +# | |
| 2 | +# Cookbook Name:: cfe-mariadb | |
| 3 | +# Recipe:: backup2s3 | |
| 4 | +# | |
| 5 | +# Copyright 2016, Chromedia | |
| 6 | +# | |
| 7 | +# All rights reserved - Do Not Redistribute | |
| 8 | +# | |
| 9 | + | |
| 10 | +# Sets up the shell script that backs up the databases | |
| 11 | +# and uploads them to a S3 bucket. | |
| 12 | +# Also sets up the cron job to regularly run this script. | |
| 13 | + | |
| 14 | +# TODO: set up logrotate | |
| 15 | +include_recipe 'awscli' | |
| 16 | + | |
| 17 | +md = node['cfe-mariadb'] | |
| 18 | +mdb = md['backup'] | |
| 19 | +mdbc = mdb['cron'] | |
| 20 | + | |
| 21 | +template "#{scr_dir}/backup_db_to_s3" do | |
| 22 | + only_if "test -d #{mdb['script_dir']} || mkdir -p #{mdb['script_dir']}" | |
| 23 | + variables( | |
| 24 | + :db_map => md['db_map'], | |
| 25 | + :db_ip => node['mariadb']['mysqld']['bind_address'], | |
| 26 | + :db_port => node['mariadb']['mysqld']['port'], | |
| 27 | + :s3_region => mdb['s3_region'], | |
| 28 | + :s3_bucket => mdb['s3_bucket'], | |
| 29 | + :aws_bin => mdb['aws_bin'], | |
| 30 | + :mysqldump_bin => mdb['mysqldump_bin'] | |
| 31 | + ) | |
| 32 | +end | |
| 33 | + | |
| 34 | +cron 'backup_db_to_s3' do | |
| 35 | + command "bash #{scr_dir}/backup_db_to_s3" | |
| 36 | + minute mdbc['min'] | |
| 37 | + hour mdbc['hour'] | |
| 38 | + day mdbc['day'] | |
| 39 | + month mdbc['mon'] | |
| 40 | + weekday mdbc['wday'] | |
| 41 | + mailto mdbc['mailto'] | |
| 42 | + path '/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin' | |
| 43 | +end | ... | ... |
recipes/default.rb
0 → 100644
| 1 | +# | |
| 2 | +# Cookbook Name:: cfe-mariadb | |
| 3 | +# Recipe:: default | |
| 4 | +# | |
| 5 | +# Copyright 2016, Chromedia | |
| 6 | +# | |
| 7 | +# All rights reserved - Do Not Redistribute | |
| 8 | +# | |
| 9 | + | |
| 10 | +# Some notes: | |
| 11 | +# If the DB server is dedicated, it might be a good idea to | |
| 12 | +# add 'noatime' to the / disk's mount options. | |
| 13 | + | |
| 14 | +chef_gem 'chef-rewind' do | |
| 15 | + compile_time true if respond_to?(:compile_time) | |
| 16 | +end | |
| 17 | +require 'chef/rewind' | |
| 18 | + | |
| 19 | +mysql2_chef_gem 'default' do | |
| 20 | + action :install | |
| 21 | +end | |
| 22 | + | |
| 23 | +include_recipe 'mariadb' | |
| 24 | + | |
| 25 | +# It looks like the service is sometimes not being restarted | |
| 26 | +# correctly. This rewind will make sure the right provider is used. | |
| 27 | +rewind 'service[mysql]' do | |
| 28 | + supports :status => true, :restart => true | |
| 29 | +end | |
| 30 | + | |
| 31 | +# If we are not replicating. | |
| 32 | +# Binary logging actually causes an issue with some CiviCRM installations | |
| 33 | +# when using remote database servers. | |
| 34 | +if not node['cfe-mariadb']['replication'] | |
| 35 | + rewind 'mariadb_configuration[replication]' do | |
| 36 | + action :remove | |
| 37 | + end | |
| 38 | +end | |
| 39 | + | |
| 40 | +# Prepare the needed databases and users. | |
| 41 | +con = { | |
| 42 | + :port => node['mariadb']['mysqld']['port'], | |
| 43 | + :username => 'root', | |
| 44 | + :password => node['mariadb']['server_root_password'] | |
| 45 | +} | |
| 46 | +node.default['cfe-mariadb']['db_map'].each do |dbx_name, dbx| | |
| 47 | + | |
| 48 | + mysql_database dbx_name do | |
| 49 | + connection con | |
| 50 | + action :create | |
| 51 | + if dbx.has_key?(:char_set) | |
| 52 | + encoding dbx[:char_set] | |
| 53 | + end | |
| 54 | + if dbx.has_key?(:collate) | |
| 55 | + collation dbx[:collate] | |
| 56 | + end | |
| 57 | + end | |
| 58 | + | |
| 59 | + mysql_database_user dbx[:db_user] do | |
| 60 | + connection con | |
| 61 | + password dbx[:db_pass] | |
| 62 | + database_name dbx_name | |
| 63 | + host '%' | |
| 64 | + privileges [:all] | |
| 65 | + action :grant | |
| 66 | + end | |
| 67 | +end | ... | ... |
templates/default/backup_db_to_s3.erb
0 → 100644
| 1 | +#!/bin/bash | |
| 2 | +# Generated by Chef. | |
| 3 | +# | |
| 4 | +# Perform mysqldump on databases and upload the | |
| 5 | +# resulting backup files into an S3 bucket. | |
| 6 | + | |
| 7 | +set -e | |
| 8 | + | |
| 9 | +<% bak_dir = "#{Chef::Config[:file_cache_path]}/backup_db_to_s3" -%> | |
| 10 | +bak_dir=<%= bak_dir %> | |
| 11 | +db_host=<%= @db_ip %> | |
| 12 | +db_port=<%= @db_port %> | |
| 13 | +bucket=<%= @s3_bucket %> | |
| 14 | +region=<%= @s3_region %> | |
| 15 | + | |
| 16 | +aws_bin=<%= @aws_bin %> | |
| 17 | +mysqldump_bin=<%= @mysqldump_bin %> | |
| 18 | + | |
| 19 | +log_dir=/var/log/backup_db_to_s3 | |
| 20 | +if [[ ! -d "$log_dir" ]] ; then | |
| 21 | + mkdir -p "$log_dir" | |
| 22 | +fi | |
| 23 | + | |
| 24 | +exec 3>&1 4>&2 | |
| 25 | +trap 'exec 2>&4 1>&3' 0 1 2 3 | |
| 26 | +exec 1>>"${log_dir}/backup_db_to_s3.log" 2>&1 | |
| 27 | + | |
| 28 | +if [[ ! -d "$bak_dir" ]] ; then | |
| 29 | + echo "$(date) : Create backup directory." | |
| 30 | + mkdir -p "$bak_dir" | |
| 31 | +fi | |
| 32 | + | |
| 33 | +# Perform mysqldump on a database. | |
| 34 | +# Args: | |
| 35 | +# $1 = db name | |
| 36 | +# $2 = db user | |
| 37 | +# $3 = db password | |
| 38 | +# $4 = dump file filename, e.g. 'mydb.sql' | |
| 39 | +export_db() { | |
| 40 | + echo "$(date) : Export database ${1}." | |
| 41 | + "$mysqldump_bin" -h "$db_host" -P "$db_port" -C --opt \ | |
| 42 | + --no-create-db --single-transaction --lock-tables \ | |
| 43 | + -u "$2" -p"$3" "$1" > "${bak_dir}/${4}" | |
| 44 | +} | |
| 45 | + | |
| 46 | +# Compress the backup file with gzip. | |
| 47 | +# Args: | |
| 48 | +# $1 = dump file filename, e.g. 'mydb.sql' | |
| 49 | +compress_backup_file() { | |
| 50 | + echo "$(date) : Gzip file ${1}." | |
| 51 | + gzip "${bak_dir}/${1}" | |
| 52 | +} | |
| 53 | + | |
| 54 | +# Rotate the current backups in S3. | |
| 55 | +# Args: | |
| 56 | +# $1 = dump file filename, e.g. 'mydb.sql' | |
| 57 | +# $2 = max number of backup files to store at a time | |
| 58 | +increment_backup_names() { | |
| 59 | + bak_keyname="${1}.gz" | |
| 60 | + max_backups=$2 | |
| 61 | + | |
| 62 | + baks=$( "$aws_bin" --output text --region "$region" \ | |
| 63 | + s3api list-objects --bucket "$bucket" \ | |
| 64 | + | grep '^CONTENTS' | cut -f3 | grep "^${bak_keyname}" || echo "" ) | |
| 65 | + | |
| 66 | + echo "$(date) : Backup rotation for ${bak_keyname}." | |
| 67 | + start=$((max_backups - 1)) | |
| 68 | + | |
| 69 | + for (( x=start ; x > 0 ; x-- )) ; do | |
| 70 | + if echo "$baks" | grep "^${bak_keyname}\\.${x}\$" ; then | |
| 71 | + newx=$((x + 1)) | |
| 72 | + if [[ $newx -lt $max_backups ]] ; then | |
| 73 | + "$aws_bin" --region "$region" \ | |
| 74 | + s3 cp "s3://${bucket}/${bak_keyname}.${x}" \ | |
| 75 | + "s3://${bucket}/${bak_keyname}.${newx}" | |
| 76 | + fi | |
| 77 | + fi | |
| 78 | + done | |
| 79 | + | |
| 80 | + if echo "$baks" | grep "^${bak_keyname}\$" ; then | |
| 81 | + "$aws_bin" --region "$region" \ | |
| 82 | + s3 cp "s3://${bucket}/${bak_keyname}" \ | |
| 83 | + "s3://${bucket}/${bak_keyname}.1" | |
| 84 | + fi | |
| 85 | +} | |
| 86 | + | |
| 87 | +# Upload the compressed db backup file. | |
| 88 | +# Args: | |
| 89 | +# $1 = dump file filename, e.g. 'mydb.sql' | |
| 90 | +upload_to_s3() { | |
| 91 | + echo "$(date) : Upload ${1}.gz to S3 bucket ${bucket}." | |
| 92 | + "$aws_bin" --region "$region" \ | |
| 93 | + s3 mv "${bak_dir}/${1}.gz" "s3://${bucket}/${1}.gz" | |
| 94 | +} | |
| 95 | + | |
| 96 | +# First, perform mysqldump on each database. | |
| 97 | +<% @db_map.each do |db_name, db| -%> | |
| 98 | +export_db <%= db_name %> <%= db[:db_user] %> '<%= db[:db_pass] %>' <%= db[:bak_filename] %> | |
| 99 | +<% end -%> | |
| 100 | + | |
| 101 | +# Then compress and upload the backup files one by one. | |
| 102 | +<% @db_map.each do |db_name, db| -%> | |
| 103 | +compress_backup_file <%= db[:bak_filename] %> | |
| 104 | +increment_backup_names <%= db[:bak_filename] %> <%= db[:bak_maxcopies] %> | |
| 105 | +upload_to_s3 <%= db[:bak_filename] %> | |
| 106 | + | |
| 107 | +<% end -%> | |
| 108 | +echo "$(date) : Done." | ... | ... |