Commit 6c9f605142aeea3581e3743bc3a423de24f3b33a
1 parent
14c62b36
Bump to v0.3.0. Enable multiple instances of script. Enable encryption. Add time…
…stam to stored backups.
Showing
8 changed files
with
272 additions
and
148 deletions
... | ... | @@ -8,16 +8,16 @@ driver: |
8 | 8 | subnet_id: subnet-d530d8b1 |
9 | 9 | instance_type: t2.micro |
10 | 10 | associate_public_ip: true |
11 | - require_chef_omnibus: true | |
11 | + require_chef_omnibus: 12.12.15 | |
12 | 12 | shared_credentials_profile: earth |
13 | 13 | |
14 | 14 | provisioner: |
15 | - name: chef_solo | |
15 | + name: chef_zero | |
16 | 16 | |
17 | 17 | platforms: |
18 | 18 | - name: ubuntu-14.04 |
19 | 19 | driver: |
20 | - image_id: ami-50946030 | |
20 | + image_id: ami-548e2d34 | |
21 | 21 | transport: |
22 | 22 | username: ubuntu |
23 | 23 | ssh_key: ~/.ssh/cfe_stg_20160222.pem | ... | ... |
1 | -# 0.2.1 | |
1 | +## 0.3.0 - 2016-11-25 | |
2 | +### Changed | |
3 | +- 'dirs' attribute has been replaced with 'backups' which gives the option of creating multiple instances of the script, each having a different filename. | |
4 | +- Maximum number of backup copies can now be specified for each file/directory to be backed up. | |
5 | +- Attributes for cron entries have been simplified. | |
6 | +- Definition for 'aws_tar_extract' now includes options for decrypting encrypted tarballs. The contents of the private key file must be provided in this case. | |
2 | 7 | |
3 | -Include tar cookbook in default recipe. | |
8 | +### Added | |
9 | +- An option whether or not to use encryption for each file/directory backup is provided. If encryption is used, proper encryption keys must be provided beforehand as node attributes. | |
10 | +- Backup copies stored in S3 are now stored in their own subfolders (prefixes) along with a text file indicating the timestamp for when that backup was originally uploaded. | |
4 | 11 | |
5 | -# 0.2.0 | |
12 | +## 0.2.1 | |
13 | +- Include tar cookbook in default recipe. | |
6 | 14 | |
7 | -Add definition aws_tar_extract to abstract reloading of backups. | |
15 | +## 0.2.0 | |
16 | +- Add definition aws_tar_extract to abstract reloading of backups. | |
8 | 17 | |
9 | -# 0.1.0 | |
10 | - | |
11 | -Initial release of backup-file2s3 | |
18 | +## 0.1.0 | |
19 | +- Initial release of backup-file2s3 | ... | ... |
1 | 1 | # backup-file2s3-cookbook |
2 | 2 | |
3 | -Installs a script that backs up one or more directories into an S3 bucket. Also sets up the cronjob to regularly run said script. | |
3 | +Installs script/s that backs up one or more files/directories into an S3 bucket. Also sets up the cronjob to regularly run said script/s. | |
4 | 4 | |
5 | -There is also a definition `aws_tar_extract` to recover those backup files and unpack them. | |
5 | +There is an option to encrypt the backups with OpenSSL. If encryption is used, the contents of the public PEM key must be provided. | |
6 | + | |
7 | +There is also a definition `aws_tar_extract` to recover those backup files and unpack them. This definition can also decrypt the tarballs using private key contents in a node attribute (default), or explicitly passed to the definition. | |
6 | 8 | |
7 | 9 | ## Supported Platforms |
8 | 10 | |
... | ... | @@ -30,21 +32,27 @@ Ubuntu 14.04 |
30 | 32 | <td><tt>'us-east-1'</tt></td> |
31 | 33 | </tr> |
32 | 34 | <tr> |
33 | - <td><tt>['backup-file2s3']['max_backups']</tt></td> | |
34 | - <td>Integer</td> | |
35 | - <td>Number of old backup tarballs to retain in S3.</td> | |
36 | - <td><tt>30</tt></td> | |
37 | - </tr> | |
38 | - <tr> | |
39 | - <td><tt>['backup-file2s3']['dirs']</tt></td> | |
35 | + <td><tt>['backup-file2s3']['backups']</tt></td> | |
40 | 36 | <td>Array</td> |
41 | - <td>An array of directories to be backed up.</td> | |
37 | + <td>An array of hashes that details the script names and directories (or files) that are to be backed up. This is also where you specify whether or not you want to encrypt the backup. Please see the default attribs file for more details.</td> | |
42 | 38 | <td><tt>[]</tt></td> |
43 | 39 | </tr> |
44 | 40 | <tr> |
45 | - <td><tt>['backup-file2s3']['cron']['min']</tt></td> | |
41 | + <td><tt>['backup-file2s3']['cron']['sched']</tt></td> | |
42 | + <td>String</td> | |
43 | + <td>Crontab syntax for scheduling how often the backup script will run.</td> | |
44 | + <td><tt>'0 0 * * *'</tt></td> | |
45 | + </tr> | |
46 | + <tr> | |
47 | + <td><tt>['backup-file2s3']['encrypt']['pub_key']</tt></td> | |
48 | + <td>String</td> | |
49 | + <td>If encryption is used by any of the paths under 'backups', then this attribute should contain the public key used to encrypt.</td> | |
50 | + <td><tt>nil</tt></td> | |
51 | + </tr> | |
52 | + <tr> | |
53 | + <td><tt>['backup-file2s3']['encrypt']['priv_key']</tt></td> | |
46 | 54 | <td>String</td> |
47 | - <td>Related cron attributes are: `hour`, `day`, `mon`, `wday`, each specifying a corresponding crontab value. This cron job will determine how often the backup script is run.</td> | |
55 | + <td>If an encrypted tarball is fetched using 'aws_tar_extract', then this attribute should contain the default private key to decrypt. Although the private key can also be explicitly passed to the 'aws_tar_extract' definition.</td> | |
48 | 56 | <td><tt>nil</tt></td> |
49 | 57 | </tr> |
50 | 58 | </table> | ... | ... |
... | ... | @@ -18,28 +18,36 @@ |
18 | 18 | # limitations under the License. |
19 | 19 | # |
20 | 20 | |
21 | -default['backup-file2s3']['bucket'] = 'bucketname' | |
22 | -default['backup-file2s3']['region'] = 'us-east-1' | |
23 | -default['backup-file2s3']['max_backups'] = 30 | |
21 | +default['backup-file2s3']['bucket'] = 'bucketname' | |
22 | +default['backup-file2s3']['region'] = 'us-east-1' | |
24 | 23 | |
25 | -# The array of file directories to be backed up to S3 | |
26 | -default['backup-file2s3']['dirs'] = [] | |
24 | +# Contains hashes with details of file/directories to be backed up to S3. | |
25 | +default['backup-file2s3']['backups'] = [ | |
26 | +# { | |
27 | +# :script_name => 'default', # default: 'default', but only | |
28 | +# # one default can exist at a time | |
29 | +# :enable => true # default: true | |
30 | +# :paths => [ | |
31 | +# { | |
32 | +# :path => '/my/dir', | |
33 | +# :bak_filename => 'dir', # default: basename of path | |
34 | +# :bak_encrypted => false, # default: false | |
35 | +# :bak_maxcopies => 30 # default: 30 | |
36 | +# } | |
37 | +# ] | |
38 | +# } | |
39 | +] | |
27 | 40 | |
28 | -default['backup-file2s3']['script_dir'] = '/etc/backup_file_to_s3' | |
29 | -default['backup-file2s3']['log_dir'] = '/var/log/backup_file_to_s3' | |
30 | -default['backup-file2s3']['tmp_dir'] = '/tmp/backup_files' | |
41 | +# Encryption keys (string contents of the actual keys, if used) | |
42 | +default['backup-file2s3']['encrypt']['pub_key'] = nil | |
43 | +default['backup-file2s3']['encrypt']['priv_key'] = nil | |
31 | 44 | |
32 | -default['backup-file2s3']['aws_bin'] = value_for_platform( | |
33 | - 'ubuntu' => { 'default' => '/usr/local/bin/aws' }, | |
34 | - 'default' => '/usr/local/bin/aws' # haven't tested on other platforms yet | |
35 | -) | |
45 | +default['backup-file2s3']['script_dir'] = '/etc/backup_file_to_s3' | |
46 | +default['backup-file2s3']['log_dir'] = '/var/log/backup_file_to_s3' | |
47 | +default['backup-file2s3']['tmp_dir'] = '/tmp/backup_files' | |
36 | 48 | |
37 | 49 | # Basic options for cron |
38 | -default['backup-file2s3']['cron']['min'] = '0' | |
39 | -default['backup-file2s3']['cron']['hour'] = '0' | |
40 | -default['backup-file2s3']['cron']['day'] = '*' | |
41 | -default['backup-file2s3']['cron']['mon'] = '*' | |
42 | -default['backup-file2s3']['cron']['wday'] = '*' | |
50 | +default['backup-file2s3']['cron']['sched'] = '0 0 * * *' | |
43 | 51 | default['backup-file2s3']['cron']['mailto'] = "''" |
44 | 52 | |
45 | 53 | # Basic options for logrotate |
... | ... | @@ -51,3 +59,10 @@ default['backup-file2s3']['logrotate']['options'] = %w{ |
51 | 59 | compress |
52 | 60 | notifempty |
53 | 61 | } |
62 | + | |
63 | +# Constants | |
64 | +default['backup-file2s3']['tar_bin'] = '/bin/tar' | |
65 | +default['backup-file2s3']['aws_bin'] = value_for_platform( | |
66 | + 'ubuntu' => { 'default' => '/usr/local/bin/aws' }, | |
67 | + 'default' => '/usr/local/bin/aws' # haven't tested on other platforms yet | |
68 | +) | ... | ... |
... | ... | @@ -19,45 +19,79 @@ |
19 | 19 | # |
20 | 20 | |
21 | 21 | # Gets a tarball from AWS S3, then unpack it into a directory. |
22 | -# Parameters (all mandatory): | |
22 | +# Parameters: | |
23 | 23 | # :name | :file => The name of the backup tarball, without the extension |
24 | 24 | # :region => AWS region |
25 | 25 | # :bucket => AWS bucket |
26 | 26 | # :target_dir => Where the tarball is to be unpacked. If not |
27 | 27 | # exists, it will be created |
28 | 28 | # :creates => A file path used for idempotency |
29 | +# :encrypted => Boolean. Whether these backup files are encrypted. | |
30 | +# :priv_key => String. Contents of private key, if used. | |
29 | 31 | |
30 | 32 | define :aws_tar_extract, |
31 | 33 | :file => nil, # default is params[:name] |
32 | - :region => 'us-east-1', | |
34 | + :region => nil, | |
33 | 35 | :bucket => nil, |
34 | 36 | :target_dir => nil, |
35 | - :creates => nil do | |
36 | - file = params[:file] || params[:name] | |
37 | + :creates => nil, | |
38 | + :encrypted => false, | |
39 | + :priv_key => nil do | |
37 | 40 | |
38 | - tmp_dir = ::File.join(Chef::Config[:file_cache_path], 'backups') | |
39 | - filetgz = "#{tmp_dir}/#{file}.tar.gz" | |
41 | + fname = params[:file] || params[:name] | |
42 | + region = params[:region] || node['backup-file2s3']['region'] | |
43 | + bucket = params[:bucket] || node['backup-file2s3']['bucket'] | |
44 | + priv_key = params[:priv_key] || node['backup-file2s3']['encrypt']['priv_key'] | |
45 | + tmp_dir = ::File.join(Chef::Config[:file_cache_path], 'f2s3_backups') | |
40 | 46 | |
41 | 47 | include_recipe 'awscli' |
42 | 48 | include_recipe 'tar' |
43 | 49 | |
44 | - [ tmp_dir, params[:target_dir] ].each do |ndir| | |
45 | - directory ndir do | |
46 | - recursive true | |
50 | + unless ::File.exist?(params[:creates]) | |
51 | + | |
52 | + directory(tmp_dir) { recursive true } | |
53 | + directory(params[:target_dir]) { recursive true } | |
54 | + | |
55 | + file_priv_key = "#{tmp_dir}/priv.key" | |
56 | + fname_tgz = "#{tmp_dir}/#{fname}.tar.gz" | |
57 | + fname_path = "#{tmp_dir}/#{fname}.tar.gz" | |
58 | + | |
59 | + if params[:encrypted] | |
60 | + fname_path << '.enc' | |
61 | + | |
62 | + file file_priv_key do | |
63 | + content priv_key | |
64 | + mode 0600 | |
65 | + sensitive true | |
66 | + end | |
67 | + | |
68 | + execute "decrypt_#{fname}" do | |
69 | + command "openssl smime -decrypt -binary -inkey #{file_priv_key} "\ | |
70 | + "-in #{fname_path} -out #{fname_tgz} -inform DEM" | |
71 | + notifies :delete, "file[#{fname_path}]" | |
72 | + notifies :delete, "file[#{file_priv_key}]" | |
73 | + action :nothing | |
74 | + end | |
75 | + | |
76 | + file(fname_path) { action :nothing } | |
47 | 77 | end |
48 | - end | |
49 | 78 | |
50 | - unless ::File.exist?(params[:creates]) | |
51 | - awscli_s3_file filetgz do | |
52 | - region params[:region] | |
53 | - bucket params[:bucket] | |
54 | - key "#{file}.tar.gz" | |
79 | + awscli_s3_file fname_path do | |
80 | + region region | |
81 | + bucket bucket | |
82 | + key "#{fname}/#{::File.basename(fname_path)}" | |
83 | + if params[:encrypted] | |
84 | + notifies :run, "execute[decrypt_#{fname}]", :immediately | |
85 | + end | |
55 | 86 | end |
56 | 87 | |
57 | - tar_extract filetgz do | |
88 | + tar_extract fname_tgz do | |
58 | 89 | action :extract_local |
59 | 90 | target_dir params[:target_dir] |
60 | 91 | creates params[:creates] |
92 | + notifies :delete, "file[#{fname_tgz}]" | |
61 | 93 | end |
94 | + | |
95 | + file(fname_tgz) { action :nothing } | |
62 | 96 | end |
63 | 97 | end | ... | ... |
... | ... | @@ -4,7 +4,7 @@ maintainer_email 'sysadmin@chromedia.com' |
4 | 4 | license 'Apache License' |
5 | 5 | description 'Creates a script to backup directories into an S3 bucket.' |
6 | 6 | long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) |
7 | -version '0.2.1' | |
7 | +version '0.3.0' | |
8 | 8 | |
9 | 9 | depends 'awscli', '~> 1.0.1' |
10 | 10 | depends 'cron', '~> 1.7.4' | ... | ... |
... | ... | @@ -22,40 +22,70 @@ include_recipe 'awscli' |
22 | 22 | include_recipe 'tar' |
23 | 23 | |
24 | 24 | attribs = node['backup-file2s3'] |
25 | -scr_dir = attribs['script_dir'] | |
26 | -sname = 'backup_file_to_s3' | |
25 | +pub_key_file = "#{attribs['script_dir']}/pub.key" | |
27 | 26 | |
28 | -template "#{scr_dir}/#{sname}" do | |
29 | - mode 0644 | |
30 | - only_if "test -d #{scr_dir} || mkdir -p #{scr_dir}" | |
31 | - variables( | |
32 | - :aws_bin => attribs['aws_bin'], | |
33 | - :log_dir => attribs['log_dir'], | |
34 | - :tmp_dir => attribs['tmp_dir'], | |
35 | - :bucket => attribs['bucket'], | |
36 | - :region => attribs['region'] || 'us-east-1', | |
37 | - :max_backups => attribs['max_backups'] || 30, | |
38 | - :dirs => attribs['dirs'] | |
39 | - ) | |
27 | +directory attribs['script_dir'] { recursive true } | |
28 | +directory attribs['log_dir'] { recursive true } | |
29 | + | |
30 | +is_any_enc = attribs['backups'].any? do |back| | |
31 | + back[:paths].any? do |path| | |
32 | + path[:bak_encrypted] | |
33 | + end | |
40 | 34 | end |
35 | +if !attribs['encrypt']['pub_key'] && is_any_enc | |
36 | + Chef::Application.fatal!('No encryption public key contents supplied') | |
37 | +end | |
38 | + | |
39 | +file pub_key_file do | |
40 | + content attribs['encrypt']['pub_key'] | |
41 | + mode 0600 | |
42 | + owner 'root' | |
43 | + group 'root' | |
44 | + sensitive true | |
45 | + only_if { attribs['encrypt']['pub_key'] } | |
46 | +end | |
47 | + | |
48 | +attribs['backups'].each do |back| | |
49 | + snam = back[:script_name] || 'default' | |
50 | + sname = "#{snam.gsub(' ', '-')}_backup2s3" | |
51 | + enable = back.has_key?(:enable) ? back[:enable] : true | |
52 | + | |
53 | + template "#{attribs['script_dir']}/#{sname}" do | |
54 | + mode 0644 | |
55 | + source 'backup_file_to_s3.erb' | |
56 | + variables( | |
57 | + :aws_bin => attribs['aws_bin'], | |
58 | + :tar_bin => attribs['tar_bin'], | |
59 | + :tmp_dir => attribs['tmp_dir'], | |
60 | + :bucket => attribs['bucket'], | |
61 | + :region => attribs['region'], | |
62 | + :pub_key_file => pub_key_file, | |
63 | + :paths => back[:paths] | |
64 | + ) | |
65 | + action( enable ? :create : :delete ) | |
66 | + end | |
67 | + | |
68 | + sched = attribs['cron']['sched'].split(' ') | |
69 | + cron_d sname do | |
70 | + command "bash #{attribs['script_dir']}/#{sname} >> "\ | |
71 | + "#{attribs['log_dir']}/#{sname}.log 2>&1" | |
72 | + minute sched[0] | |
73 | + hour sched[1] | |
74 | + day sched[2] | |
75 | + month sched[3] | |
76 | + weekday sched[4] | |
77 | + mailto attribs['cron']['mailto'] | |
78 | + path '/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin' | |
41 | 79 | |
42 | -cra = attribs['cron'] | |
43 | -cron_d sname do | |
44 | - command "bash #{scr_dir}/#{sname}" | |
45 | - minute cra['min'] | |
46 | - hour cra['hour'] | |
47 | - day cra['day'] | |
48 | - month cra['mon'] | |
49 | - weekday cra['wday'] | |
50 | - mailto cra['mailto'] | |
51 | - path '/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin' | |
80 | + action( enable ? :create : :delete ) | |
81 | + end | |
52 | 82 | end |
53 | 83 | |
54 | 84 | package 'logrotate' |
55 | 85 | |
56 | 86 | loa = attribs['logrotate'] |
57 | -template "#{loa['conf_dir']}/#{sname}" do | |
58 | - source "#{sname}_logrotate.erb" | |
87 | +template "#{loa['conf_dir']}/backup_file_to_s3" do | |
88 | + source 'backup_file_to_s3_logrotate.erb' | |
59 | 89 | only_if "test -d #{loa['conf_dir']} || mkdir -p #{loa['conf_dir']}" |
60 | 90 | variables( |
61 | 91 | :log_dir => attribs['log_dir'], | ... | ... |
... | ... | @@ -2,59 +2,66 @@ |
2 | 2 | # |
3 | 3 | # Generated by Chef |
4 | 4 | # |
5 | -# Back up directories into an S3 bucket | |
5 | +# Back up directories/files into an S3 bucket | |
6 | 6 | |
7 | 7 | set -e |
8 | 8 | |
9 | 9 | suffix=.backup_file_to_s3 |
10 | -[ -f /tmp/*"$suffix" ] && exit 200 | |
11 | -tmp_file=$( mktemp --suffix "$suffix" ) | |
12 | - | |
13 | -log_dir=<%= @log_dir %> | |
14 | -if [[ ! -d "$log_dir" ]] ; then | |
15 | - mkdir -p "$log_dir" | |
10 | +if [ -f /tmp/*"$suffix" ] ; then | |
11 | + ( >&2 echo "[ERROR] Another operation might still be in progress" ) | |
12 | + exit 200 | |
16 | 13 | fi |
17 | - | |
18 | -aws_cmd=<%= @aws_bin %> | |
19 | - | |
20 | -exec 3>&1 4>&2 | |
21 | -trap 'exec 2>&4 1>&3' 0 1 2 3 | |
22 | -exec 1>>"${log_dir}/backup_file_to_s3.log" 2>&1 | |
14 | +tmp_file=$( mktemp --suffix "$suffix" ) | |
23 | 15 | |
24 | 16 | bucket=<%= @bucket %> |
25 | 17 | region=<%= @region %> |
26 | -max_backups=<%= @max_backups %> | |
27 | 18 | bak_dir=<%= @tmp_dir %> |
28 | 19 | |
29 | -# Array of directories to be backed up. | |
30 | -# | |
31 | -# Example: | |
32 | -# declare -a tar_dirs=( | |
33 | -# "/path/to/dira" | |
34 | -# "/another/path/to/dirb" | |
35 | -# ) | |
36 | -# | |
37 | -# Tarball names will be the basename of each path given. | |
38 | -declare -a tar_dirs=( | |
39 | -<% @dirs.each do |dirx| -%> | |
40 | - "<%= dirx %>" | |
41 | -<% end -%> | |
42 | -) | |
20 | +aws_bin=<%= @aws_bin %> | |
21 | +tar_bin=<%= @tar_bin || '/bin/tar' %> | |
43 | 22 | |
44 | -if [[ ! -d "$bak_dir" ]] ; then | |
45 | - echo "$(date) : Missing backup directory. Creating." | |
46 | - mkdir -p "$bak_dir" | |
47 | -fi | |
23 | +pub_key_file=<%= @pub_key_file %> | |
24 | + | |
25 | +# Create the tarball. | |
26 | +# Args: | |
27 | +# $1 = full path to be backed up, e.g. '/var/src/mydir' | |
28 | +# $2 = desired filename, sans '.tar.gz' extension, e.g. 'myfilename' | |
29 | +tar_dir() { | |
30 | + bname=$( basename "$1" ) | |
31 | + dname=$( dirname "$1" ) | |
32 | + | |
33 | + echo "$(date) : Creating tarball from ${1}" | |
34 | + "$tar_bin" -C "${dname}" -czf "${bak_dir}/${2}.tar.gz" "$bname" | |
35 | +} | |
36 | + | |
37 | +# Encrypt a tarball using OpenSSL and a given public key. | |
38 | +# The original file will be replaced by a new one with the same filename, | |
39 | +# suffixed with '.enc'. | |
40 | +# | |
41 | +# Given file should be located in $bak_dir. | |
42 | +# Args: | |
43 | +# $1 = filename, sans '.tar.gz' extension, e.g. 'myfilename' | |
44 | +encrypt_tarball() { | |
45 | + echo "$(date) : Encrypt file ${1}.tar.gz" | |
46 | + openssl smime -encrypt -binary -text -aes256 -in "${bak_dir}/${1}.tar.gz" \ | |
47 | + -out "${bak_dir}/${1}.tar.gz.enc" -outform DER "${pub_key_file}" | |
48 | + rm "${bak_dir}/${1}.tar.gz" | |
49 | +} | |
48 | 50 | |
49 | -# Rotate the current backups in S3 | |
50 | -# $1 = directory to be tarred | |
51 | +# Rotate the number suffixes of the current backups in S3 | |
52 | +# Args: | |
53 | +# $1 = filename, sans '.tar.gz' extension, e.g. 'myfilename' | |
54 | +# $2 = max number of backup copies to store at a time | |
51 | 55 | increment_backup_names() { |
52 | - fname=$( basename "$1" ) | |
53 | - bak_keyname="${fname}.tar.gz" | |
56 | + bak_keyname=$1 | |
57 | + max_backups=$2 | |
54 | 58 | |
55 | - baks=$( "$aws_cmd" --output text --region "$region" \ | |
56 | - s3api list-objects --bucket "$bucket" \ | |
57 | - | grep '^CONTENTS' | cut -f3 | grep "^${bak_keyname}" || echo "" ) | |
59 | + # Backups will stored inside subdirectories (prefixes) | |
60 | + # in S3, so only look at 'PRE' objects. | |
61 | + baks=$( "$aws_bin" --output text --region "$region" \ | |
62 | + s3 ls "s3://${bucket}/" | grep '^\s*PRE' | \ | |
63 | + sed -e 's/^ *PRE //' -e 's/\/$//' | \ | |
64 | + grep "^${bak_keyname}" || echo "" ) | |
58 | 65 | |
59 | 66 | echo "$(date) : Backup rotation for ${bak_keyname}" |
60 | 67 | start=$((max_backups - 1)) |
... | ... | @@ -63,48 +70,70 @@ increment_backup_names() { |
63 | 70 | if echo "$baks" | grep "^${bak_keyname}\\.${x}\$" ; then |
64 | 71 | newx=$((x + 1)) |
65 | 72 | if [[ $newx -lt $max_backups ]] ; then |
66 | - "$aws_cmd" --region "$region" \ | |
67 | - s3 cp "s3://${bucket}/${bak_keyname}.${x}" \ | |
73 | + "$aws_bin" --region "$region" \ | |
74 | + s3 mv --recursive "s3://${bucket}/${bak_keyname}.${x}" \ | |
68 | 75 | "s3://${bucket}/${bak_keyname}.${newx}" |
76 | + else | |
77 | + "$aws_bin" --region "$region" \ | |
78 | + s3 rm --recursive "s3://${bucket}/${bak_keyname}.${x}" | |
69 | 79 | fi |
70 | 80 | fi |
71 | 81 | done |
72 | 82 | |
73 | 83 | if echo "$baks" | grep "^${bak_keyname}\$" ; then |
74 | - "$aws_cmd" --region "$region" \ | |
75 | - s3 cp "s3://${bucket}/${bak_keyname}" \ | |
84 | + "$aws_bin" --region "$region" \ | |
85 | + s3 mv --recursive "s3://${bucket}/${bak_keyname}" \ | |
76 | 86 | "s3://${bucket}/${bak_keyname}.1" |
77 | 87 | fi |
78 | 88 | } |
79 | 89 | |
80 | -# Tar up the directory | |
81 | -# $1 = directory to be tarred | |
82 | -tar_dir() { | |
83 | - fname=$( basename "$1" ) | |
84 | - parent=$( dirname "$1" ) | |
85 | - echo "$(date) : Tar up ${1}" | |
90 | +# Upload the tarball to the S3 bucket. It will be uploaded | |
91 | +# to this location: ${bucket}/basename/myfile.tar.gz.enc | |
92 | +# | |
93 | +# A timestamp file will also be uploaded to this location: | |
94 | +# ${bucket}/basename/YYYY-MM-DDThh:mm:ss.txt | |
95 | +# Args: | |
96 | +# $1 = filename, sans '.tar.gz' extension, e.g. 'myfilename' | |
97 | +# $2 = if file is encrypted or not (boolean) | |
98 | +upload_to_s3() { | |
99 | + bak_keyname=$1 | |
100 | + if [ "$2" = true ] ; then | |
101 | + fname="${1}.tar.gz.enc" | |
102 | + else | |
103 | + fname="${1}.tar.gz" | |
104 | + fi | |
86 | 105 | |
87 | - tar -C "$parent" -czf "${bak_dir}/${fname}.tar.gz" "$fname" | |
88 | -} | |
106 | + echo "$(date) : Upload ${fname} to S3 bucket ${bucket}" | |
89 | 107 | |
90 | -# $1 = directory to be tarred | |
91 | -upload_to_s3() { | |
92 | - fname=$( basename "$1" ) | |
93 | - echo "$(date) : Upload ${fname}.tar.gz to S3 bucket ${bucket}" | |
108 | + stamp=$( date +"%FT%T ) | |
109 | + echo "Uploaded: ${stamp}" > "${bak_dir}/${stamp}.txt" | |
94 | 110 | |
95 | - "$aws_cmd" --region "$region" \ | |
96 | - s3 mv "${bak_dir}/${fname}.tar.gz" "s3://${bucket}/${fname}.tar.gz" | |
111 | + "$aws_bin" --region "$region" \ | |
112 | + s3 mv "${bak_dir}/${fname}" \ | |
113 | + "s3://${bucket}/${bak_keyname}/${fname}" | |
114 | + "$aws_bin" --region "$region" \ | |
115 | + s3 mv "${bak_dir}/${stamp}.txt" \ | |
116 | + "s3://${bucket}/${bak_keyname}/${stamp}.txt" | |
97 | 117 | } |
98 | 118 | |
99 | -for dirx in "${tar_dirs[@]}" ; do | |
100 | - if [[ -d "$dirx" ]] ; then | |
101 | - increment_backup_names "$dirx" | |
102 | - tar_dir "$dirx" | |
103 | - upload_to_s3 "$dirx" | |
104 | - else | |
105 | - echo "$(date) : WARNING : Directory ${dirx} does not exist. Skipping." | |
106 | - fi | |
107 | -done | |
119 | +if [[ ! -d "$bak_dir" ]] ; then | |
120 | + mkdir -p "$bak_dir" | |
121 | +fi | |
122 | + | |
123 | +<% @paths.each do |path| -%> | |
124 | +<% bname = path[:bak_filename] || ::File.basename(path[:path]) -%> | |
125 | +<% is_enc = path.has_key?(:bak_encrypted) ? path[:bak_encrypted] : false -%> | |
126 | +if [[ -d <%= path[:path] %> || -f <%= path[:path] %> ]] ; then | |
127 | + increment_backup_names <%= bname %> <%= path[:bak_maxcopies] || 30 %> | |
128 | + tar_dir <%= path[:path] %> <%= bname %> | |
129 | +<% if is_enc -%> | |
130 | + encrypt_tarball <%= bname %> | |
131 | +<% end -%> | |
132 | + upload_to_s3 <%= bname %> <%= is_enc %> | |
133 | +else | |
134 | + >&2 echo "$(date) [WARNING] Path <%= path[:path] %> does not exist" | |
135 | +fi | |
108 | 136 | |
137 | +<% end -%> | |
109 | 138 | rm "$tmp_file" |
110 | 139 | echo "$(date) : Done" | ... | ... |