├── .gitattributes ├── .gitignore ├── JNX ├── Configuration.pm ├── System.pm └── ZFS.pm ├── LICENSE ├── README.md ├── autoscrub.perl ├── checkbackup.perl └── zfstimemachinebackup.perl /.gitattributes: -------------------------------------------------------------------------------- 1 | #### `.gitattributes` 2 | *.pbxproj -crlf -diff -merge 3 | *.nib -crlf -diff -merge 4 | *.xib -crlf -diff -merge 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # author: Jolly aka Patrick Stein 2 | # 3 | # generic 4 | # 5 | *~ 6 | *.[oa] 7 | *.bak 8 | # 9 | # old svn /cvs 10 | # 11 | .cvs/ 12 | .svn/ 13 | # 14 | # apple specific 15 | # 16 | Desktop DF 17 | Desktop DB 18 | .Spotlight-V100 19 | .DS_Store 20 | .Trashes 21 | .com.apple.timemachine.supported 22 | .fseventsd 23 | .syncinfo 24 | .TemporaryItems 25 | # 26 | # xcode files 27 | # 28 | build/ 29 | *~.nib 30 | *.mode1 31 | *.mode1v3 32 | *.mode2v3 33 | *.pbxuser 34 | xcuserdata/ 35 | 36 | -------------------------------------------------------------------------------- /JNX/Configuration.pm: -------------------------------------------------------------------------------- 1 | # 2 | # name: Configuration.pm 3 | # purpose: one modul for all config tasks 4 | # 5 | # + newFromDefaults(hashreference,__PACKAGE__) 6 | # 7 | # 8 | 9 | # uses Config ini files if available 10 | 11 | package JNX::Configuration; 12 | 13 | use Getopt::Long; 14 | use strict; 15 | 16 | sub newFromDefaults # (\%,$) 17 | { 18 | my($default,$currentpackagename)=@_; 19 | 20 | my %default = %{$default} ; 21 | my %commandlineoption; 22 | my %returningoptions; 23 | 24 | 25 | if( ! defined($default{'configurationfilename'}) ) 26 | { 27 | $default{'configurationfilename'} = ['config.ini','string']; 28 | } 29 | if( defined($default{'debug'}) ) 30 | { 31 | $default{'debug'} = [int($default{'debug'}[0]),'number']; 32 | } 33 | else 34 | { 35 | $default{'debug'} =[0,'number']; 36 | } 37 | $default{'help'} =['','option']; 38 | 39 | my %optionconverter = ('string' => '=s', 'number' =>,'=i', 'flag'=>'!','option'=>'', ); 40 | my @ARGVCOPY = @ARGV; 41 | GetOptions( \%commandlineoption, map($_.$optionconverter{${$default{$_}}[-1]},keys %default) ); 42 | @ARGV = @ARGVCOPY; 43 | my $configfilename = (defined($commandlineoption{'configurationfilename'})?$commandlineoption{'configurationfilename'}:${$default{'configurationfilename'}}[0]); 44 | 45 | 46 | my $configurationObject = undef; 47 | 48 | if( eval "require Config::IniFiles" ) 49 | { 50 | $configurationObject = new Config::IniFiles( -file => $configfilename ) if -e $configfilename; 51 | } 52 | else 53 | { 54 | $commandlineoption{'configurationfilename'} = 'not used as Config:IniFiles module not present'; 55 | } 56 | 57 | 58 | my $programname = $0; 59 | $programname =~ s/^.*\///; 60 | my $packagename = ($currentpackagename eq 'main'?$programname:$currentpackagename); 61 | 62 | 63 | FILLUPHASH: while( my($key,$value) = each %default ) 64 | { 65 | if( defined($commandlineoption{$key}) ) 66 | { 67 | $returningoptions{$key} = $commandlineoption{$key}; 68 | next FILLUPHASH; 69 | } 70 | if( $configurationObject ) 71 | { 72 | if( defined($configurationObject->val($programname,$key)) ) 73 | { 74 | $returningoptions{$key} = $configurationObject->val($programname,$key); 75 | next FILLUPHASH; 76 | } 77 | if( defined($configurationObject->val($packagename,$key)) ) 78 | { 79 | $returningoptions{$key} = $configurationObject->val($packagename,$key); 80 | next FILLUPHASH; 81 | } 82 | if( defined($configurationObject->val('GLOBAL',$key)) ) 83 | { 84 | $returningoptions{$key} = $configurationObject->val('GLOBAL',$key); 85 | next FILLUPHASH; 86 | } 87 | } 88 | $returningoptions{$key} = ${$default{$key}}[0]; 89 | } 90 | 91 | if( $returningoptions{help} ) 92 | { 93 | delete $default{help} if $currentpackagename ne 'main' ; 94 | warn "[$packagename] module options are :\n",join("\t\n",map(sprintf("--%-30s default: %s%s",$_.' ('.${$default{$_}}[-1].')',${$default{$_}}[0],($returningoptions{$_} ne ${$default{$_}}[0]?sprintf("\n%-32s current: %s",'',$returningoptions{$_}):'')),sort keys %default))."\n"; 95 | exit if $currentpackagename eq 'main' ; 96 | } 97 | return %returningoptions; 98 | } 99 | 100 | 1; 101 | -------------------------------------------------------------------------------- /JNX/System.pm: -------------------------------------------------------------------------------- 1 | 2 | package JNX::System; 3 | 4 | use strict; 5 | use English; 6 | use Date::Parse qw(str2time); 7 | use Digest::MD5 qw(md5_hex); 8 | 9 | 10 | sub boottime 11 | { 12 | open(FILE,"sysctl kern.boottime|") || return 0; 13 | 14 | my $lastboottime = 0; 15 | 16 | while( my $line = ) 17 | { 18 | $lastboottime = $1 if $line =~ /^kern.boottime:\s*\{\ssec\s*=\s(\d+),/; 19 | } 20 | close(FILE); 21 | 22 | return $lastboottime; 23 | 24 | } 25 | 26 | sub lastwaketime 27 | { 28 | open(FILE,"pmset -g log|") || return 0; 29 | 30 | my $lastwaketime = 0; 31 | my $message = undef; 32 | 33 | my $line; 34 | do 35 | { 36 | $line = ; 37 | $message .= $line; 38 | 39 | if( $line =~ /^\s*$/ || !$line ) 40 | { 41 | 42 | if( ( $message =~ m/^\s+\-\s+Message:\s+Wake:/m 43 | && $message =~ m/^\s+\-\s+Time:\s+(\S.+)$/m 44 | ) 45 | || ( $message =~ m/^(.+?)( GMT)?\s+wake\s*\t/im ) 46 | ) 47 | { 48 | my $waketime = str2time($1); 49 | $lastwaketime = $waketime if $waketime > $lastwaketime; 50 | } 51 | 52 | $message = undef; 53 | } 54 | } 55 | while( $line ); 56 | close(FILE); 57 | 58 | my $lastboottime = boottime(); 59 | 60 | return $lastwaketime>$lastboottime?$lastwaketime:$lastboottime; 61 | } 62 | 63 | sub temporaryfilename 64 | { 65 | my($prefix,$tohash) = @_; 66 | 67 | my $hashname = undef; 68 | 69 | if( length $tohash ) 70 | { 71 | $hashname = md5_hex($tohash); 72 | } 73 | 74 | my $prgname = $PROGRAM_NAME; 75 | 76 | $prgname =~ s/^(.*\/)//; 77 | $prgname =~ s/\s+//g; 78 | $prgname .= '.' if length($prgname); 79 | 80 | $prefix .= '.' if length $prefix; 81 | 82 | return '/tmp/.'.$prgname.$prefix.$hashname; 83 | } 84 | 85 | sub pidfilename 86 | { 87 | my ($runcheckname) = @_; 88 | 89 | return temporaryfilename(undef,$runcheckname).'.PID'; 90 | } 91 | 92 | 93 | sub checkforrunningmyself 94 | { 95 | my ($runcheckname) = @_; 96 | 97 | my $filename = pidfilename($runcheckname); 98 | 99 | if( open(FILE,$filename) ) 100 | { 101 | my $otherpid = ; 102 | close(FILE); 103 | 104 | if( kill(0,int($otherpid)) ) 105 | { 106 | return 0; 107 | } 108 | } 109 | open(FILE,'>'.$filename) || die "Can't open pid file"; 110 | print FILE $$; 111 | close(FILE); 112 | 113 | return 1; 114 | } 115 | 116 | 117 | =head2 System::executecommand() 118 | 119 | Executes a command either locally or remotely 120 | 121 | Arguments: a hash the following keys are used: command, host, hostoptions, debug, verbose 122 | 123 | Returns: 124 | 125 | in scalar context returns the output of the command as string 126 | 127 | in array context returns the output in lines 128 | 129 | in $? returns exit value 130 | =cut 131 | 132 | sub executecommand 133 | { 134 | my %arguments = @_; 135 | 136 | my $command = $arguments{command}; 137 | 138 | return undef if !length($command); 139 | 140 | if( $arguments{host} && ( lc($arguments{host}) ne 'localhost') ) 141 | { 142 | $command = 'ssh '.$arguments{hostoptions}.' '.$arguments{host}." ".$arguments{remoteenvironment}." '".$arguments{command}."'"; 143 | } 144 | if( $arguments{inputfile} || $arguments{outputfile} ) 145 | { 146 | $command = '('.$command.')'; 147 | 148 | $command.= ' <"'.$arguments{inputfile}.'"' if $arguments{inputfile}; 149 | $command.= ' >"'.$arguments{outputfile}.'"' if $arguments{outputfile}; 150 | } 151 | 152 | if( $arguments{verbose} ) 153 | { 154 | print STDERR "Executing command: $command\n"; 155 | } 156 | if( $arguments{debug} ) 157 | { 158 | print STDERR "DEBUG: Would have executed command:$command\n"; 159 | return 1; 160 | } 161 | return `$command`; 162 | } 163 | 164 | 165 | 1; 166 | -------------------------------------------------------------------------------- /JNX/ZFS.pm: -------------------------------------------------------------------------------- 1 | package JNX::ZFS; 2 | use strict; 3 | use Time::Local qw(timelocal); 4 | use Date::Parse qw(str2time); 5 | use POSIX qw(strftime); 6 | 7 | 8 | $ENV{PATH}=$ENV{PATH}.':/usr/sbin/:/usr/local/sbin:/usr/local/bin'; 9 | 10 | 11 | 12 | =head1 ZFS::pools 13 | 14 | return hash of %{poolname}{scanerrors => \d ,lastscrub => time, status => string } 15 | =cut 16 | 17 | 18 | sub pools 19 | { 20 | my %pools; 21 | my($poolname,$status,$lastscrub); 22 | 23 | foreach (JNX::System::executecommand( @_, command=>'zpool status')) 24 | { 25 | $poolname = $1 if /^\s*pool:\s*(\S+)/i; 26 | $status = $1 if /^\s*state:\s*(\S+)/i; 27 | $lastscrub = $1 if /^\s*scan:\s*(.*)/i; 28 | 29 | if( /^\s*errors:/i ) 30 | { 31 | $pools{$poolname}{status} = $status ; 32 | 33 | #scan: scrub repaired 0 in 43h24m with 0 errors on Thu Mar 8 09:38:35 2012 34 | if( $lastscrub =~ m/with\s+(\d+)\s+errors\s+on\s+(.*?)$/ ) 35 | { 36 | $pools{$poolname}{scanerrors} = $1; 37 | $pools{$poolname}{lastscrub} = str2time($2); 38 | } 39 | elsif( $lastscrub =~ m/scrub\s+canceled\s+on\s+(.*?)$/ ) 40 | { 41 | $pools{$poolname}{lastscrub} = str2time($1); 42 | } 43 | elsif( $lastscrub =~ m/^scrub in progress/i ) 44 | { 45 | $pools{$poolname}{lastscrub} = time(); 46 | } 47 | $poolname = undef; 48 | $status = undef; 49 | $lastscrub = undef; 50 | } 51 | } 52 | return \%pools; 53 | } 54 | 55 | 56 | 57 | =head1 ZFS::createsnapshot 58 | 59 | Creates a snapshot with the current date on the given host and dataset 60 | 61 | Arguments: {dataset,recursive} 62 | 63 | Arguments are also given to System::executecommand() 64 | 65 | Returns: undef or snapshotname in 'YYYY-mm-dd-HHMMSS' format 66 | =cut 67 | 68 | 69 | sub createsnapshot 70 | { 71 | my %arguments = @_; 72 | 73 | return undef if !length( $arguments{dataset} ); 74 | 75 | my $snapshotdate = strftime "%Y-%m-%d-%H%M%S", localtime; 76 | my $snapshotname = $arguments{dataset}.'@'.$snapshotdate; 77 | 78 | return undef if !defined(JNX::System::executecommand( %arguments, command => 'zfs snapshot '.($arguments{recursive}?'-r ':'').'"'.$arguments{dataset}.'@'.$snapshotdate.'"')); 79 | 80 | print STDERR "Created Snapshot: $snapshotname\n" if $arguments{verbose}; 81 | 82 | my @snapshots = getsnapshotsfordataset( %arguments ); 83 | 84 | for my $name (reverse @snapshots) 85 | { 86 | print STDERR "Testing Snapshot: $name\n" if $arguments{verbose}; 87 | return $snapshotname if $name eq $snapshotdate; 88 | } 89 | print STDERR 'Could not create snapshot:'.$snapshotname."\n"; 90 | return undef; 91 | } 92 | 93 | 94 | =head1 ZFS::getsnapshotsfordataset 95 | 96 | Gets a list of snaphots for the host and datset 97 | 98 | Arguments: {dataset} 99 | 100 | Arguments are also given to System::executecommand() 101 | 102 | Returns: a list of snapshots for the given dataset 103 | =cut 104 | my %snapshotcache; 105 | my %datasetcache; 106 | 107 | 108 | sub getsnapshotsfordataset 109 | { 110 | my %arguments = @_; 111 | 112 | return undef if !length( $arguments{dataset} ); 113 | $arguments{host} = 'localhost' if !length( $arguments{host} ); 114 | 115 | 116 | if( time()-$snapshotcache{$arguments{host}}{lasttime}{$arguments{dataset}} > 500 ) 117 | { 118 | delete $snapshotcache{$arguments{host}}; 119 | $snapshotcache{$arguments{host}}{lasttime}{$arguments{dataset}}=time(); 120 | 121 | for (JNX::System::executecommand( %arguments, command => 'zfs list -H -t snapshot -o name -s name -d 1 -r "'.$arguments{dataset}.'"')) 122 | { 123 | if( /^([A-Za-z0-9\_\-\s\/\.]+)\@(\S+)\s/ ) 124 | { 125 | print STDERR "Got Snapshot: $arguments{host}: $1\@$2 \n"; 126 | push(@{$snapshotcache{$arguments{host}}{datasets}{$1}},$2) if length $2>0; 127 | } 128 | else 129 | { 130 | # print STDERR "Did not match: $_\n"; 131 | } 132 | } 133 | } 134 | else 135 | { 136 | print STDERR "Serving from cache\n"; 137 | } 138 | my $snapshotsref = $snapshotcache{$arguments{host}}{datasets}{$arguments{dataset}}; 139 | 140 | return $snapshotsref?@{$snapshotsref}:(); 141 | } 142 | 143 | 144 | 145 | 146 | 147 | =head1 ZFS::getsubdatasets 148 | 149 | Returns a list of datasets that are equal and below a given one 150 | 151 | Arguments: {dataset} 152 | 153 | Arguments are also given to System::executecommand() 154 | =cut 155 | 156 | 157 | sub getsubdatasets 158 | { 159 | my %arguments = @_; 160 | 161 | return undef if !length( $arguments{dataset} ); 162 | $arguments{host} = 'localhost' if !length( $arguments{host} ); 163 | 164 | if( time()-$datasetcache{$arguments{host}}{cachetime} > 500 ) 165 | { 166 | $datasetcache{$arguments{host}}{cachetime}=time(); 167 | 168 | my @datasets; 169 | 170 | for (JNX::System::executecommand( %arguments, command => 'zfs list -H -r -o name') ) 171 | { 172 | chomp; 173 | if( /^([A-Za-z0-9\_\-\s\/\.]+)$/ ) 174 | { 175 | push(@datasets,$1); 176 | } 177 | else 178 | { 179 | print STDERR "Did not match: $_\n"; 180 | } 181 | } 182 | 183 | $datasetcache{$arguments{host}}{datasets}=\@datasets; 184 | } 185 | return grep(/^\Q$arguments{dataset}\E/, @{$datasetcache{$arguments{host}}{datasets}}); 186 | } 187 | 188 | 189 | 190 | 191 | 192 | sub timeofsnapshot 193 | { 194 | my ($snapshotname) = @_; 195 | 196 | if( $snapshotname =~ /(?:^|@)(2\d{3})\-(\d{2})\-(\d{2})\-(\d{2})(\d{2})(\d{2})$/ ) 197 | { 198 | my($year,$month,$day,$hour,$minute,$second) = ($1,$2,$3,$4,$5,$6); 199 | my $snapshottime = timelocal($second,$minute,$hour,$day,$month-1,$year); 200 | 201 | return $snapshottime; 202 | } 203 | return 0; 204 | } 205 | 206 | 207 | =head1 ZFS::destroysnapshots 208 | 209 | Destroys a snapshots or list of snapshots ( dataset@snapshotname ) 210 | 211 | Arguments: { snapshot } 212 | 213 | Arguments are also given to System::executecommand() 214 | =cut 215 | 216 | sub destroysnapshots 217 | { 218 | my %arguments = @_; 219 | 220 | return undef if !$arguments{dataset}; 221 | return undef if !$arguments{snapshots}; 222 | 223 | $arguments{host} = 'localhost' if !length( $arguments{host} ); 224 | delete $snapshotcache{$arguments{host}}; 225 | 226 | my @snapshotstodelete; 227 | 228 | if( ref($arguments{snapshots}) eq "ARRAY" ) 229 | { 230 | @snapshotstodelete = @{$arguments{snapshots}}; 231 | } 232 | else 233 | { 234 | @snapshotstodelete = ($arguments{snapshots}); 235 | } 236 | 237 | 238 | 239 | foreach my $snapshot (@snapshotstodelete) 240 | { 241 | JNX::System::executecommand( %arguments, command => 'zfs destroy "'.$arguments{dataset}.'@'.$snapshot.'"' ); 242 | } 243 | } 244 | 245 | 246 | 1; 247 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2010-2021 Jolly aka Patrick Stein 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ZFS TimeMachine 2 | =============== 3 | 4 | TimeMachine style backups for ZFS users. ZFS-Timemachine creates incremental backups of zfs datasets on one host to datasets on another disk or host. This is done via sending snapshots, deleting old ones in time machine style. It works with FreeBSD and Macs (with TensCompliments ZFS implementation), but should work with other ZFS implementations as well. 5 | 6 | 7 | How it works 8 | ------------ 9 | 10 | - the script creates a snapshot on the source dataset every time it is called. 11 | - then it figures out the last snapshot on the destination dataset that matches to one on the source dataset. 12 | - it sends the snapshot from the source to the destination. 13 | - removes old snapshots on the source - it keeps just n-snapshots. 14 | - removes old snapshots on the destination - time machine fashion : 5min/last day, 1 hour last week, 1 day last 3 months, 1 week thereafter 15 | 16 | 17 | Requirements 18 | ------------ 19 | It requires perl and the Time::Local and Date::Parse libraries. If you are on a Mac you can install them by using the command line: 20 | 21 | $export PERL_MM_USE_DEFAULT=1 ; perl -MCPAN -e 'install Date::Parse' 'install Time::Local' 22 | 23 | If you are on a different OS (like linux or bsd) everything should work. 24 | 25 | 26 | How to use 27 | -------------- 28 | 29 | The simplest use of the script requires just two options, --sourcedataset and --destinationdataset options. Like this: 30 | 31 | $ sudo zfstimemachinebackup.perl --sourcedataset=tank --destinationdataset=root/backup 32 | 33 | Usually you want the script to create a snapshot on the sourcedataset when it is called, so add the --createsnapshotonsource option. To see all options use the --help commandline option. 34 | 35 | As there are quite a few options, let's go through them in detail: 36 | 37 | Source host: 38 | 39 | - --sourcehost (string): hostname where the source dataset is on. 40 | - --sourcehostoptions (string): options given to ssh (default: -c blowfish -C -l root). 41 | - --sourcedataset (string): the source dataset that is to be backed up. 42 | 43 | Destination host: 44 | 45 | - --destinationhost (string): hostname where the destination dataset is on. 46 | - --destinationhostoptions (string): options given to ssh (default: -c blowfish -C -l root) 47 | - --destinationdataset (string): the destination dataset where backups should be stored. 48 | 49 | Source options: 50 | 51 | - --createsnapshotonsource (flag): When set the script will create a new snapshot on the source dataset everytime it is called. 52 | - --snapshotstokeeponsource (number): How many snapshots we should keep on the source dataset. More datasets on source will be deleted (oldest beeing deleted first). If set to 0 no snapshots will be removed on the source. See also --minimumtimetokeepsnapshotsonsource option. 53 | - --minimumtimetokeepsnapshotsonsource (string): Minimum time how long snapshots should exist on the source. With this set snapshots on the source will be kept at least that long even if there are more than the number of snapshots given in the --snapshotstokeeponsource option. (Eg: *1week*, *1month* or something like that). 54 | - --replicate (flag): Only needed for the very first backup. It will replicate all snapshots from the source to the destination. 55 | - --recursive (flag): Should we backup all decendent datasets on the source to the destination. 56 | - --raw (flag): Backup the data in raw mode, this sends the encrypted version of a dataset, if using ZFS encryption 57 | - --datasetstoignoreonsource (string): If you are recursivly backing up, you can disable backing up datasets that match this comma seperated list of datasets. 58 | 59 | Destination snapshots: 60 | 61 | - --deletesnapshotsondestination (flag): Should old snapshots on the destination be deleted. 62 | - --keepbackupshash (string): A comma seperated list of value pairs that define the granularity of how many snapshots are kept on the destination when they are getting older. The default is *24h=>5min,7d=>1h,90d=>1d,1y=>1w,10y=>1month* which means: 63 | 64 | 24h=>5mi for snapshots younger than 24hours: keep not more than one per 5 minutes 65 | 7d=>1h for snapshots younger than 7 days: keep not more than one snapshot per 1 hour 66 | . 67 | . 68 | . 69 | - --maximumtimeperfilesystemhash (string) default: A comma seperated list of value pairs that define the granularity of how old snapshots can get on the destination. Special datasets might not be as important as others. Default of *.\*=>3months,.+/(Dropbox|Downloads|Caches|Mail Downloads|Saved Application State|Logs)$=>1month* means: 70 | 71 | .*=>10yrs keep everything 10 years by default - after that snapshots are removed 72 | .+/(Dropbox|Downloads|Caches|Mail Downloads|Saved Application State|Logs)$=>1month 73 | remove snapshots older than one month for datasets ending with the regex. 74 | 75 | 76 | Configuration: 77 | 78 | - --configurationfilename (string): config.ini filename the defaults are read from. Only works if you Config::Inifiles installed. 79 | - --debug (number): debugging level of the script itself. When set will also enable verbose. 80 | - --verbose (flag): showing more verbosely what is going on. 81 | - --help (flag): Shows all options of the script and all values without starting the script. 82 | 83 | 84 | Examples 85 | -------- 86 | 87 | My current setup looks like this: 88 | 89 | $ zfs list 90 | puddle 207Gi 214Gi 864Ki /Volumes/puddle 91 | puddle/Local 207Gi 214Gi 2.50Gi /Local 92 | puddle/Local/Users 204Gi 214Gi 891Mi /Local/Users 93 | puddle/Local/Users/jolly 204Gi 214Gi 50.4Gi /Local/Users/jolly 94 | puddle/Local/Users/jolly/Disks 22.4Gi 214Gi 22.3Gi /Local/Users/jolly/Disks 95 | puddle/Local/Users/jolly/Downloads 1.62Gi 214Gi 1.62Gi /Local/Users/jolly/Downloads 96 | puddle/Local/Users/jolly/Dropbox 3.53Gi 214Gi 3.53Gi /Local/Users/jolly/Dropbox 97 | puddle/Local/Users/jolly/Library 44.6Gi 214Gi 28.3Gi /Local/Users/jolly/Library 98 | puddle/Local/Users/jolly/Library/Caches 2.05Gi 214Gi 2.04Gi /Local/Users/jolly/Library/Caches 99 | puddle/Local/Users/jolly/Library/Logs 72.2Mi 214Gi 70.4Mi /Local/Users/jolly/Library/Logs 100 | puddle/Local/Users/jolly/Library/Mail 13.8Gi 214Gi 13.7Gi /Local/Users/jolly/Library/Mail 101 | puddle/Local/Users/jolly/Library/Mail Downloads 868Ki 214Gi 868Ki /Local/Users/jolly/Library/Mail Downloads 102 | puddle/Local/Users/jolly/Library/Saved Application State 50.8Mi 214Gi 9.38Mi /Local/Users/jolly/Library/Saved Application State 103 | puddle/Local/Users/jolly/Pictures 80.4Gi 214Gi 80.4Gi /Local/Users/jolly/Pictures 104 | ocean 1.24Ti 567Gi 266Ki /Volumes/ocean 105 | ocean/puddle 635Gi 567Gi 187Ki /Volumes/ocean/puddle 106 | ocean/puddle/Local 635Gi 567Gi 1.76Gi /Volumes/ocean/puddle/Local 107 | ocean/puddle/Local/Users 632Gi 567Gi 539Mi /Volumes/ocean/puddle/Local/Users 108 | ocean/puddle/Local/Users/jolly 631Gi 567Gi 49.8Gi /Volumes/ocean/puddle/Local/Users/jolly 109 | ocean/puddle/Local/Users/jolly/Disks 48.0Gi 567Gi 22.1Gi /Volumes/ocean/puddle/Local/Users/jolly/Disks 110 | ocean/puddle/Local/Users/jolly/Downloads 1.62Gi 567Gi 1.62Gi /Volumes/ocean/puddle/Local/Users/jolly/Downloads 111 | ocean/puddle/Local/Users/jolly/Dropbox 4.42Gi 567Gi 3.47Gi /Volumes/ocean/puddle/Local/Users/jolly/Dropbox 112 | ocean/puddle/Local/Users/jolly/Library 93.7Gi 567Gi 22.6Gi /Volumes/ocean/puddle/Local/Users/jolly/Library 113 | ocean/puddle/Local/Users/jolly/Library/Caches 1.90Gi 567Gi 1.90Gi /Volumes/ocean/puddle/Local/Users/jolly/Library/Caches 114 | ocean/puddle/Local/Users/jolly/Library/Logs 65.2Mi 567Gi 65.1Mi /Volumes/ocean/puddle/Local/Users/jolly/Library/Logs 115 | ocean/puddle/Local/Users/jolly/Library/Mail 18.6Gi 567Gi 11.2Gi /Volumes/ocean/puddle/Local/Users/jolly/Library/Mail 116 | ocean/puddle/Local/Users/jolly/Library/Mail Downloads 210Ki 567Gi 208Ki /Volumes/ocean/puddle/Local/Users/jolly/Library/Mail Downloads 117 | ocean/puddle/Local/Users/jolly/Library/Saved Application State 12.4Mi 567Gi 5.83Mi /Volumes/ocean/puddle/Local/Users/jolly/Library/Saved Application State 118 | ocean/puddle/Local/Users/jolly/Pictures 85.7Gi 567Gi 73.8Gi /Volumes/ocean/puddle/Local/Users/jolly/Pictures 119 | 120 | /Local is where my home directory lives. The script is called as follows 121 | 122 | 123 | $ sudo ./zfstimemachinebackup.perl --sourcedataset=puddle --destinationdataset=ocean/puddle --snapshotstokeeponsource=100 --createsnapshotonsource --recursive 124 | 125 | So puddle is set as source, ocean/puddle will receive the snapshots from puddle and 100 snapshots are kept on puddle itself. 126 | 127 | I'm also sending backups from the backupdisk to a remote machine with less space, so I keep backups only for 3 months: 128 | 129 | $ sudo ./zfstimemachinebackup.perl --sourcedataset=ocean/puddle --destinationdataset=backups/puddle --destinationhost=server.example.com --recursive --maximumtimeperfilesystemhash='.*=>3months,.+/(Dropbox|Downloads|Caches|Mail Downloads|Saved Application State|Logs)$=>1month' 130 | 131 | 132 | 133 | Autoscrub script 134 | ---------------- 135 | My backupserver usually sleeps and so it might take a day or two before finishing a download. The autoscrub script will scrub the given pool after the time given. 136 | 137 | usage: sudo ./autoscrub.perl --scrubinterval=14 138 | 139 | This will scrub your pools every 14 days. If you cancel a scrub that will be recognized but also it will be scrubed after the scrubinterval passed, in case you forgot that you canceled it. 140 | 141 | You can start it for different pools as well. 142 | 143 | I'm using it in a crontab entry: 144 | 145 | 1 * * * * cd ~jolly/Binaries/ZFSTimeMachine;./autoscrub.perl >/dev/null 2>&1 146 | 147 | 148 | 149 | Mac OS X only stuff 150 | =================== 151 | 152 | The following is only relevant to those who use Macs. 153 | 154 | 155 | CheckBackup Script 156 | ------------------- 157 | 158 | The checkbackup.perl script checks if your backupscript is working correctly. As I do sleep my machine it will check if the snapshots are beeing done within the last 2*snapshotinterval+snapshottime seconds since the last wake or reboot. Exit code is correct depending if the snapshot is there or not. 159 | If the checkbackupscript can't find out the last sleep and boot time it will bug you about backups beeing too old when the machine has beeing powerd off for some time. 160 | 161 | It has three options : 162 | 163 | --datasets which dataset(s) to use comma separated list 164 | --snaphotinterval how often do you create snapshots 165 | --snapshotime how long it usually take for a snapshot to complete 166 | 167 | 168 | $[checkbackup.perl] module options are : 169 | --configurationfilename (string) default: config.ini 170 | current: not used as Config:IniFiles module not present 171 | --debug (number) default: 0 172 | --help (option) default: 173 | current: 1 174 | --datasets (string) default: puddle 175 | --snapshotinterval (number) default: 300 176 | --snapshottime (number) default: 10 177 | 178 | 179 | I'm currently using a script at crontab (executing as root) to tell me when things go wrong: 180 | 181 | #!/bin/zsh 182 | 183 | ./checkbackup.perl --datasets="puddle/Local,puddle/Local/Users,puddle/Local/Users/jolly,puddle/Local/Users/jolly/Library,puddle/Local/Users/jolly/Disks,puddle/Local/Users/jolly/Pictures" --snapshotinterval=7200 || say -v alex "dataset snapshot on local host is too old" 184 | ./checkbackup.perl --datasets="example.com:pond/puddle/Local,example.com:pond/puddle/Local/Users,example.com:pond/puddle/Local/Users/jolly,example.com:pond/puddle/Local/Users/jolly/Library,example.com:pond/puddle/Local/Users/jolly/Disks,example.com:pond/puddle/Local/Users/jolly/Pictures" --snapshotinterval=7200 || say -v alex "dataset pond snapshots on example.com are too old" 185 | 186 | 187 | 188 | TimeMachine backups to ZFS Volumes 189 | ---------------------------------- 190 | 191 | For those of you that want to use MacOS X's TimeMachine to backup to a ZFS volume you can create the needed sparsebundle with the following commands. 192 | I now have moved everything except for my boot partitions to ZFS. To have some backup of the root drive I'm backing that of with Apples provided TimeMachine and here is how I do it: 193 | 194 | Create a zfs filesystem for the TimeMachine backups for several machines: 195 | 196 | sudo zfs create ocean/TimeMachine 197 | 198 | 199 | Create a 100Gb sparsebundle for TimeMachine (my root is rather small, your mileage may vary): 200 | 201 | hdiutil create -size 100g -library SPUD -fs JHFSX -type SPARSEBUNDLE -volname "tmmachinename" /Volumes/ocean/TimeMachine/tmmachinename.sparsebundle 202 | 203 | 204 | Set up crontab to mount the sparsebundle every 20 minutes if it's not mounted yet. This is needed as TimeMachine will unmount the backup disk if it's a sparsebundle after backing up. 205 | 206 | */20 * * * * if [ ! -d /Volumes/tmtinkerbell ] ;then hdiutil attach /Volumes/ocean/TimeMachine/tmmachinename.sparsebundle; fi /dev/null 2>&1 207 | 208 | 209 | Set up TimeMachine to use the sparsebundle: 210 | 211 | tmutil setdestination -p /Volumes/tmmachinename 212 | 213 | 214 | -------------------------------------------------------------------------------- /autoscrub.perl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | # 3 | # purpose: simple script to automatically scub zpools when needed 4 | # 5 | # example usage: perl autoscrub.perl --scrubinterval=7 6 | # 7 | # this will start a scrub on all pools if the last scrub was 7 days or longer ago 8 | # 9 | 10 | 11 | 12 | use JNX::Configuration; 13 | 14 | my %commandlineoption = JNX::Configuration::newFromDefaults( { 15 | 16 | 'host' => ['','string'], 17 | 'hostoptions' => ['-c blowfish -C -l root','string'], 18 | 19 | 'pools' => ['allavailablepools','string'], 20 | 21 | 'scrubinterval' => [7,'number'], 22 | 'verbose' => [0,'flag'], 23 | 'debug' => [0,'flag'], 24 | }, __PACKAGE__ ); 25 | 26 | $commandlineoption{verbose}=1 if $commandlineoption{debug}; 27 | 28 | my %host = ( host => $commandlineoption{host}, hostoptions => $commandlineoption{hostoptions} , debug=>$commandlineoption{debug},verbose=>$commandlineoption{verbose}); 29 | 30 | 31 | use strict; 32 | 33 | use JNX::ZFS; 34 | use JNX::System; 35 | 36 | my %pools = %{ JNX::ZFS::pools( %host ) }; 37 | my @scrubpools; 38 | 39 | if( $commandlineoption{pools} eq 'allavailablepools' ) 40 | { 41 | @scrubpools = (keys %pools); 42 | } 43 | else 44 | { 45 | @scrubpools = split( /[,\s]/,$commandlineoption{pools} ); 46 | } 47 | 48 | 49 | 50 | for my $pool (@scrubpools ) 51 | { 52 | if( defined $pools{$pool} ) 53 | { 54 | if( $pools{$pool}{lastscrub} < ( time() - (86400*$commandlineoption{scrubinterval})) ) 55 | { 56 | die "could not start scrub: $!" if !defined(JNX::System::executecommand(%host, command=> 'zpool scrub '.$pool)); 57 | print "$pool: starting scrub \n"; 58 | } 59 | else 60 | { 61 | print "$pool: no scrub needed\n"; 62 | } 63 | } 64 | } 65 | 66 | 67 | -------------------------------------------------------------------------------- /checkbackup.perl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | # 3 | # purpose: simple script to see if the backup script runs smooethly. 4 | # 5 | # example usage: perl checkbackup.perl --datasets=puddle --snapshotinterval=300 6 | # 7 | # this will check if the dataset puddle has a snapshot made within the last 10 minutes since wake/boot 8 | # 9 | 10 | 11 | 12 | use JNX::Configuration; 13 | 14 | my %commandlineoption = JNX::Configuration::newFromDefaults( { 15 | 'host' => ['','string'], 16 | 'hostoptions' => ['-c blowfish -C -l root','string'], 17 | 18 | 'datasets' => ['puddle','string'], 19 | 'snapshotinterval' => [300,'number'], 20 | 'snapshottime' => [10,'number'], 21 | 22 | 'verbose' => [0,'flag'], 23 | 'debug' => [0,'flag'], 24 | }, __PACKAGE__ ); 25 | 26 | $commandlineoption{verbose}=1 if $commandlineoption{debug}; 27 | 28 | use strict; 29 | use JNX::ZFS; 30 | use JNX::System; 31 | 32 | 33 | my %host = ( host => $commandlineoption{host}, hostoptions => $commandlineoption{hostoptions}, debug=>$commandlineoption{debug},verbose=>$commandlineoption{verbose}); 34 | 35 | JNX::System::checkforrunningmyself($commandlineoption{'datasets'}) || die "Already running which means lookup for snapshots is too slow"; 36 | 37 | my $lastwaketime = JNX::System::lastwaketime(); 38 | my @datasetstotest = split(/,/,$commandlineoption{'datasets'}); 39 | 40 | for my $datasettotest (@datasetstotest) 41 | { 42 | print STDERR "Testing dataset: $datasettotest\n"; 43 | 44 | my @snapshots = JNX::ZFS::getsnapshotsfordataset(%host,dataset=>$datasettotest); 45 | # print STDERR "Snapshots: @snapshots\n"; 46 | 47 | my $snapshottime = JNX::ZFS::timeofsnapshot( pop @snapshots ); 48 | 49 | my $snapshotoffset = (2 * $commandlineoption{'snapshotinterval'}) + $commandlineoption{'snapshottime'}; 50 | 51 | if( $snapshottime + $snapshotoffset < time() ) 52 | { 53 | if( $lastwaketime + $snapshotoffset < time() ) 54 | { 55 | print STDERR "Last snapshot for dataset (".$datasettotest."):".localtime($snapshottime)." - too old\n"; 56 | exit 1; 57 | } 58 | else 59 | { 60 | print STDERR "Not long enough after reboot\n"; 61 | exit 0; 62 | } 63 | } 64 | print STDERR "Last snapshot for dataset (".$datasettotest."):".localtime($snapshottime)." - ok\n"; 65 | } 66 | exit 0; 67 | 68 | -------------------------------------------------------------------------------- /zfstimemachinebackup.perl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | # author: patrick stein aka jolly 3 | # purpose: simple zfs backup from one dataset to another via sending snapshots, deleting old ones in time machine style. 4 | # 5 | # the script creates a snapshot on the source dataset every time it is called 6 | # then it figures out the last snapshot on the destination dataset that matches to one on the source dataset 7 | # it sends the snapshot from the source to the destination 8 | # removes old snapshots on the source - it keeps just n-snapshots 9 | # removes old snapshots on the destination - time machine fashion ( 5min/last day, 1 hour last week, 1 day last 3 months, 1 week thereafter ) 10 | # 11 | # 12 | # 13 | # example usage: perl zfstimemachinebackup.perl --sourcedataset=puddle --destinationdataset=tank/puddle --snapshotstokeeponsource=100 --createsnapshotonsource 14 | # 15 | ###################################### 16 | use strict; 17 | use feature "state"; 18 | use POSIX qw(strftime EXIT_FAILURE EXIT_SUCCESS); 19 | use Data::Dumper; 20 | 21 | use JNX::ZFS; 22 | use JNX::System; 23 | 24 | $ENV{PATH}=$ENV{PATH}.':/usr/sbin/'; 25 | 26 | 27 | 28 | use JNX::Configuration; 29 | 30 | my %commandlineoption = JNX::Configuration::newFromDefaults( { 31 | 'sourcehost' => ['','string'], 32 | 'sourcehostoptions' => ['-C -l root','string'], 33 | 'sourcedataset' => ['','string'], 34 | 'sourceenvironment' => ['','string'], 35 | 36 | 'destinationhost' => ['','string'], 37 | 'destinationhostoptions' => ['-C -l root','string'], 38 | 'destinationdataset' => ['','string'], 39 | 'destinationenvironment' => ['"PATH=\$PATH:/usr/local/bin"','string'], 40 | 41 | 'createsnapshotonsource' => [0,'flag'], 42 | 'snapshotstokeeponsource' => [0,'number'], 43 | 'minimumtimetokeepsnapshotsonsource' => ['','string'], 44 | 'raw' => [0,'flag'], 45 | 'replicate' => [0,'flag'], 46 | 'deduplicate' => [0,'flag'], 47 | 'deletesnapshotsondestination' => [1,'flag'], 48 | 'datasetstoignoreonsource' => ['','string'], 49 | 50 | 'recursive' => [0,'flag'], 51 | 'keepbackupshash' => ['24h=>5min,7d=>1h,90d=>1d,1y=>1w,10y=>1month','string'], 52 | 'maximumtimeperfilesystemhash' => ['.*=>10yrs,.+/(Dropbox|Downloads|Caches|Mail Downloads|Saved Application State|Logs)$=>1month','string'], 53 | 54 | 'verbose' => [0,'flag'], 55 | 'debug' => [0,'flag'], 56 | }, __PACKAGE__ ); 57 | 58 | $commandlineoption{verbose}=1 if $commandlineoption{debug}; 59 | 60 | my $scriptstarttime = time(); 61 | my $timebuckets = jnxparsetimeperbuckethash( $commandlineoption{keepbackupshash} ); 62 | my @maximumtimebuckets = jnxparsetimeperfilesystemhash( $commandlineoption{maximumtimeperfilesystemhash} ); 63 | my $snapshotstokeeponsource = $commandlineoption{snapshotstokeeponsource}; 64 | my $minimumtimetokeepsnapshotsonsource = jnxparsesimpletime( $commandlineoption{minimumtimetokeepsnapshotsonsource} ); 65 | my @datasetstoignoreonsource = split(',',$commandlineoption{datasetstoignoreonsource}); 66 | 67 | my %source = ( host => $commandlineoption{sourcehost} , hostoptions => $commandlineoption{sourcehostoptions} , remoteenvironment => $commandlineoption{sourceenvironment} ,dataset => $commandlineoption{sourcedataset} , debug=>$commandlineoption{debug},verbose=>$commandlineoption{verbose}); 68 | my %destination = ( host => $commandlineoption{destinationhost} , hostoptions => $commandlineoption{destinationhostoptions} , remoteenvironment => $commandlineoption{destinationenvironment} ,dataset => $commandlineoption{destinationdataset} , debug=>$commandlineoption{debug},verbose=>$commandlineoption{verbose}); 69 | 70 | 71 | #### 72 | # create a new snapshot 73 | #### 74 | 75 | my $newsnapshotname = undef; 76 | 77 | if( $commandlineoption{createsnapshotonsource} ) 78 | { 79 | $newsnapshotname = JNX::ZFS::createsnapshot( %source, recursive => $commandlineoption{recursive} ) || die "Could not create snapshot on $source{host}:$source{dataset}"; 80 | 81 | print 'Created '.($commandlineoption{recursive}?'recursive ':undef).'snapshot '.$newsnapshotname."\n"; 82 | } 83 | 84 | #### 85 | # prevent us from running twice 86 | #### 87 | JNX::System::checkforrunningmyself($commandlineoption{sourcedataset}.$commandlineoption{destinationdataset}) || die "Already running"; 88 | 89 | if( my $childpid = fork() ) 90 | { 91 | print "Waiting for working child to exit\n" if $commandlineoption{debug}; 92 | wait; 93 | 94 | print "Child work done, deleting pid file\n" if $commandlineoption{debug}; 95 | my $pidfile = JNX::System::pidfilename($commandlineoption{sourcedataset}.$commandlineoption{destinationdataset}); 96 | unlink($pidfile); 97 | exit; 98 | } 99 | 100 | 101 | { 102 | my @sourcedatasets = ( $commandlineoption{sourcedataset} ); 103 | 104 | if( $commandlineoption{recursive} ) 105 | { 106 | my @recursivedatasets = JNX::ZFS::getsubdatasets( %source ); 107 | 108 | print 'Got sourcefilesystems (before deleting unwanted ones):'.join("\n\t",@recursivedatasets)."\n" if $commandlineoption{verbose}; 109 | 110 | @sourcedatasets = (); 111 | 112 | WEEDOUTUNWANTEDONES: for my $sourcedataset (@recursivedatasets) 113 | { 114 | for my $datasettoignore (@datasetstoignoreonsource) 115 | { 116 | if( $sourcedataset =~ /^\Q$datasettoignore\E/ ) 117 | { 118 | print 'Ignoring dataset:'.$sourcedataset."\n"; 119 | next WEEDOUTUNWANTEDONES; 120 | } 121 | } 122 | print "Keeping dataset:$sourcedataset\n" if $commandlineoption{verbose}; 123 | push(@sourcedatasets,$sourcedataset); 124 | } 125 | print "Got sourcefilesystems:".join("\n\t",@sourcedatasets)."\n" if $commandlineoption{verbose}; 126 | } 127 | 128 | DATASET:for my $sourcedataset (@sourcedatasets) 129 | { 130 | my $destinationdataset = $sourcedataset; 131 | 132 | $destinationdataset =~ s/^\Q$source{dataset}\E/$destination{dataset}/; 133 | 134 | my $maximumtimeforfilesystem = 0; 135 | 136 | REGEXTEST: for my $regexandvaluearray (reverse @maximumtimebuckets) 137 | { 138 | my($regex,$value) = (@{$regexandvaluearray}); 139 | 140 | if( $sourcedataset =~ m/$regex/ ) 141 | { 142 | $maximumtimeforfilesystem = $value; 143 | print "Matched source: $regex $sourcedataset\n" if $commandlineoption{debug}; 144 | last REGEXTEST; 145 | } 146 | } 147 | 148 | print STDERR "Working on sourcedataset: $sourcedataset destinationdataset:$destinationdataset Maximumtime:$maximumtimeforfilesystem\n"; 149 | 150 | #### 151 | # figure out existing snapshots on both datasets 152 | #### 153 | my @sourcesnapshots = JNX::ZFS::getsnapshotsfordataset(%source ,dataset => $sourcedataset); 154 | my @destinationsnapshots = JNX::ZFS::getsnapshotsfordataset(%destination ,dataset => $destinationdataset); 155 | 156 | if( ! @sourcesnapshots ) 157 | { 158 | die "Did not find snapshot on source dataset"; 159 | } 160 | 161 | my $lastsourcesnapshot = @sourcesnapshots[$#sourcesnapshots]; 162 | my $snapshotdate = strftime "%Y-%m-%d-%H%M%S", localtime(JNX::ZFS::timeofsnapshot($lastsourcesnapshot)); 163 | 164 | 165 | my $lastcommonsnapshot = undef; 166 | 167 | { 168 | my %knownindestination; 169 | @knownindestination{@destinationsnapshots} = @destinationsnapshots; 170 | 171 | 172 | for my $snapshotname (@sourcesnapshots) 173 | { 174 | if( $knownindestination{$snapshotname} ) 175 | { 176 | $lastcommonsnapshot = $snapshotname; 177 | } 178 | } 179 | 180 | if( !$lastcommonsnapshot ) 181 | { 182 | print "Could not find common snapshot between source ($sourcedataset) and destination ($destinationdataset)\n"; 183 | print "Destination snapshots:\n\t".join("\n\t",@destinationsnapshots)."\n"; 184 | print "Source snapshots:\n\t".join("\n\t",@sourcesnapshots)."\n"; 185 | } 186 | else 187 | { 188 | print 'Last common snapshot: '.$lastcommonsnapshot."\n"; 189 | 190 | if( $commandlineoption{deletesnapshotsondestination} ) 191 | { 192 | my @snapshotsnewerondestination = (); 193 | my $foundlastcommon = 0; 194 | 195 | for my $snapshotname (@destinationsnapshots) 196 | { 197 | if( $snapshotname eq $lastcommonsnapshot ) 198 | { 199 | $foundlastcommon = 1; 200 | } 201 | elsif( $foundlastcommon ) 202 | { 203 | push( @snapshotsnewerondestination, $snapshotname ); 204 | } 205 | } 206 | 207 | if( @snapshotsnewerondestination ) 208 | { 209 | print 'Snapshots newer on destination dataset('.$destinationdataset.'):'.$snapshotsnewerondestination[0].(@snapshotsnewerondestination>1?' - '.$snapshotsnewerondestination[-1]:undef)."\n"; 210 | 211 | for my $snapshotname (@snapshotsnewerondestination) 212 | { 213 | JNX::ZFS::destroysnapshots( %destination, dataset => $destinationdataset,snapshots => $snapshotname ); 214 | 215 | @destinationsnapshots = grep(!/^\Q$snapshotname\E$/,@destinationsnapshots); # grep as delete @destinationsnapshots[$snapshotname] works only on hashes. 216 | } 217 | } 218 | } 219 | } 220 | } 221 | 222 | 223 | #### 224 | # send new snapshot diff to destination 225 | #### 226 | if( $lastcommonsnapshot eq $snapshotdate ) 227 | { 228 | print "Did not find newer snapshot on source $sourcedataset\n" if $commandlineoption{verbose}; 229 | } 230 | else 231 | { 232 | my $zfssendcommand = undef; 233 | my $zfsreceivecommand = 'zfs receive '.($commandlineoption{verbose}?'-v ':undef).'-F "'.$destinationdataset.'"'; 234 | 235 | if( $lastcommonsnapshot ) 236 | { 237 | $zfssendcommand = 'zfs send '.($commandlineoption{verbose}?'-v ':undef).($commandlineoption{raw}?'-w ':undef).($commandlineoption{deduplicate}?'-D ':undef).'-I "'.$sourcedataset.'@'.$lastcommonsnapshot.'" "'.$sourcedataset.'@'.$snapshotdate.'"'; 238 | } 239 | else 240 | { 241 | $zfssendcommand = 'zfs send '.($commandlineoption{verbose}?'-v ':undef).($commandlineoption{raw}?'-w ':undef).($commandlineoption{replicate}?'-R ':undef).($commandlineoption{deduplicate}?'-D ':undef).'"'.$sourcedataset.'@'.$snapshotdate.'"'; 242 | } 243 | 244 | 245 | { 246 | # workaround is needed as the 2012-01-13 panics the machine if zfs send pipes to zfs receive 247 | 248 | my $zfsbugworkaroundintermediatefifo = JNX::System::temporaryfilename($snapshotdate,$sourcedataset.$destinationdataset); 249 | 250 | unlink($zfsbugworkaroundintermediatefifo); 251 | system('mkfifo '."$zfsbugworkaroundintermediatefifo") && die "Could not create fifo: $zfsbugworkaroundintermediatefifo"; 252 | 253 | if( 0 == ( my $pid = fork() ) ) 254 | { 255 | die "Can't execute $zfssendcommand" if !defined(JNX::System::executecommand(%source, command=> $zfssendcommand, outputfile=>$zfsbugworkaroundintermediatefifo)); 256 | exit; 257 | } 258 | else 259 | { 260 | die "Could not fork zfs send" if $pid<0 261 | } 262 | 263 | die "Can't execute $zfsreceivecommand" if !defined(JNX::System::executecommand(%destination, command=>$zfsreceivecommand, inputfile=>$zfsbugworkaroundintermediatefifo)); 264 | 265 | unlink($zfsbugworkaroundintermediatefifo); 266 | } 267 | } 268 | 269 | #### 270 | # delete unneeded snapshots in source 271 | #### 272 | { 273 | my @snapshotstodelete = undef; 274 | 275 | for my $snapshotname (@sourcesnapshots) 276 | { 277 | if( $lastcommonsnapshot eq $snapshotname ) 278 | { 279 | $lastcommonsnapshot = undef; 280 | last; 281 | } 282 | push(@snapshotstodelete,$snapshotname); 283 | } 284 | 285 | if( $snapshotstokeeponsource>1 && !$lastcommonsnapshot && ( @snapshotstodelete > $snapshotstokeeponsource ) ) 286 | { 287 | splice(@snapshotstodelete,-1* $snapshotstokeeponsource); 288 | 289 | print 'Snapshots to delete on source ('.$sourcedataset.'): '.$snapshotstodelete[0].(@snapshotstodelete>1?' - '.$snapshotstodelete[-1]:undef)."\n"; 290 | 291 | for my $snapshotname (@snapshotstodelete) 292 | { 293 | if( length($snapshotname) ) 294 | { 295 | if( $minimumtimetokeepsnapshotsonsource > 0 ) 296 | { 297 | my $snapshottime = JNX::ZFS::timeofsnapshot($snapshotname); 298 | if( $snapshottime < $scriptstarttime-$minimumtimetokeepsnapshotsonsource ) 299 | { 300 | JNX::ZFS::destroysnapshots( %source, dataset=>$sourcedataset, snapshots => $snapshotname ); 301 | } 302 | } 303 | else 304 | { 305 | JNX::ZFS::destroysnapshots( %source, dataset=>$sourcedataset, snapshots => $snapshotname ); 306 | } 307 | } 308 | } 309 | } 310 | } 311 | 312 | 313 | #### 314 | # remove old snapshots in time machine fashion from destination 315 | #### 316 | if( $commandlineoption{deletesnapshotsondestination} ) 317 | { 318 | my %backupbuckets; 319 | 320 | for my $snapshotname (reverse @destinationsnapshots ) 321 | { 322 | if( my $snapshottime = JNX::ZFS::timeofsnapshot($snapshotname) ) 323 | { 324 | my $bucket = bucketfortime($snapshottime); 325 | my $keepsnapshot = 1; 326 | 327 | 328 | if( $backupbuckets{$bucket} ) 329 | { 330 | $keepsnapshot = 0; 331 | } 332 | elsif( ($maximumtimeforfilesystem > 0) && (($scriptstarttime-$snapshottime) > $maximumtimeforfilesystem) ) 333 | { 334 | $keepsnapshot = 0; 335 | } 336 | 337 | 338 | if( $keepsnapshot ) 339 | { 340 | $backupbuckets{$bucket}=$snapshotname; 341 | print 'Will keep snapshot: '.$snapshotname.'='.$snapshottime.' Backup in bucket: $backupbucket{'.$bucket.'}='.$backupbuckets{$bucket}."\n" if $commandlineoption{verbose}; 342 | } 343 | else 344 | { 345 | print 'Will remove snapshot:'.$snapshotname.'='.$snapshottime.' Backup in bucket: $backupbucket{'.$bucket.'}='.$backupbuckets{$bucket}."\n"; 346 | 347 | JNX::ZFS::destroysnapshots( %destination, dataset => $destinationdataset, snapshots => $snapshotname ); 348 | } 349 | } 350 | else 351 | { 352 | print STDERR "snapshot not in YYYY-MM-DD-HHMMSS format: $snapshotname - ignoring\n"; 353 | } 354 | } 355 | } 356 | } 357 | } 358 | 359 | 360 | exit; 361 | 362 | 363 | sub jnxparsetimeperbuckethash 364 | { 365 | my($timestring) = @_; 366 | my %timehash; 367 | 368 | my @keysandvalues = split(/,/,$timestring); 369 | 370 | foreach my $keyandvalue (@keysandvalues) 371 | { 372 | my($key,$value) = split(/=>/,$keyandvalue); 373 | 374 | #print STDERR "Key: $key Value: $value \n"; 375 | if( $key && $value ) 376 | { 377 | my $keytime = jnxparsesimpletime($key); 378 | my $valuetime = jnxparsesimpletime($value); 379 | 380 | print "Found Keytime: $keytime Valuetime: $valuetime \n" if $commandlineoption{verbose}; 381 | 382 | if( ($keytime>=0) && ($valuetime>=0) ) 383 | { 384 | $timehash{$keytime}=$valuetime; 385 | } 386 | } 387 | } 388 | print STDERR __PACKAGE__.'['.__LINE__.']:'."Created buckethash:".Data::Dumper->Dumper(\%timehash) if $commandlineoption{debug}; 389 | 390 | return \%timehash; 391 | } 392 | 393 | 394 | sub jnxparsetimeperfilesystemhash 395 | { 396 | my($timestring) = @_; 397 | my @filesystemarray; 398 | 399 | my @keysandvalues = split(/[^\\],/,$timestring); # escaping , inside a reges works 400 | 401 | foreach my $keyandvalue (@keysandvalues) 402 | { 403 | if( $keyandvalue =~ m/^(.+)=>(.+?)$/ ) # the right side can't contain a => as it's only a time 404 | { 405 | my($key,$value) = ($1,$2); 406 | 407 | $key =~ s/\\,/,/g; # replace \, in case someone has a escaped , inside a left regex 408 | 409 | if( $key && $value ) 410 | { 411 | my $valuetime = jnxparsesimpletime($value); 412 | 413 | 414 | if( length($key) && ($valuetime>=0) ) 415 | { 416 | printf __PACKAGE__.'['.__LINE__.']:'."Will use Maximumtime: %8d for filesystem matching:%s\n",$valuetime,$key if $commandlineoption{debug}; 417 | push(@filesystemarray, [$key,$valuetime] ); 418 | } 419 | } 420 | } 421 | } 422 | print STDERR __PACKAGE__.'['.__LINE__.']:'."Created timehash:".Data::Dumper->Dumper(\@filesystemarray) if $commandlineoption{debug}; 423 | 424 | return @filesystemarray; 425 | } 426 | 427 | 428 | sub jnxparsesimpletime 429 | { 430 | my($timestring) = @_; 431 | 432 | $timestring = lc $timestring; 433 | 434 | if( $timestring =~ m/(\d+)\s*(s(?:ec|econds?)?|h(?:ours?)?|d(?:ays?)?|w(:?eeks?)?|m(?:on|onths?)|m(?:ins?|inutes?)?|y(?:rs?|ears?)?)/ ) 435 | { 436 | my($count,$time) = ($1,$2); 437 | 438 | return $count*3600*24*364.25 if $time =~ /^y/; 439 | return $count*3600*24*30.5 if $time =~ /^mon/; 440 | return $count*3600*24*7 if $time =~ /^w/; 441 | return $count*3600*24 if $time =~ /^d/; 442 | return $count*3600 if $time =~ /^h/; 443 | return $count*60 if $time =~ /^m/; 444 | return $count; #defaults to seconds 445 | } 446 | return -1; 447 | } 448 | 449 | sub bucketfortime 450 | { 451 | my($timetotest) = @_; 452 | 453 | if( $timetotest > $scriptstarttime ) 454 | { 455 | print __PACKAGE__.'['.__LINE__.']:'."Time found in snapshot:".localtime($timetotest)." is in the future - exiting\n"; 456 | exit EXIT_FAILURE; 457 | } 458 | 459 | state $sortedbucketsvalues = [ sort{ $a<=>$b }( values %{$timebuckets}) ]; 460 | state $sortedbucketskeys = [ sort{ $a<=>$b }( keys %{$timebuckets} ) ]; 461 | 462 | my $timedistance = $scriptstarttime - $timetotest; 463 | my $buckettime = $$sortedbucketsvalues[-1]; # default is to put it in the last bucket and see if there are earlier buckets 464 | 465 | for my $bucketage (@{$sortedbucketskeys}) 466 | { 467 | if( $timedistance < $bucketage ) 468 | { 469 | $buckettime = $$timebuckets{$bucketage}; 470 | last; 471 | } 472 | } 473 | my $buckettimetouse = $scriptstarttime - ($scriptstarttime % $buckettime) + $buckettime; # align 474 | my $bucket = $timetotest - ($timetotest%$buckettime); 475 | 476 | print __PACKAGE__.'['.__LINE__.']:'."Timedistance: $timedistance , $timetotest, ".localtime($timetotest)." buckettime:$buckettime bucket:$bucket\n" if $commandlineoption{debug}; 477 | 478 | return $bucket; 479 | } 480 | 481 | 482 | 483 | --------------------------------------------------------------------------------