Attachment 'db-export-mysqlstandby.pl'

Download

#!/usr/bin/perl
####
#
# db-export-mysqlstandby - do a MySQL backup on the standby master
# Copyright (C) 2008 Anchor Systems - http://www.anchor.com.au/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
###
#
# There are two modes of operation:
# 1. Run a standard daily mysqldump of all data to a gzip
# 2. Run an uncompressed mysqldump which is turned into an xdelta.
#    This is described below
#
# * Ensures we are running on the standby rather than the master
# * Checks a lockfile
# * locks all tables in the database
# * Records the master logfile & position as metadata
# * Dumps all databases uncompressed
# * Uses a copy of the most recent daily dump from the master and the
#   uncompressed to generate an xdelta between them
# * Deletes the uncompressed backup
# * Removes the lockfile
# * Cleans up old files (>48 hours)
#
# The local database is connected to as the "backup" user with options from
# /home/backup/.my.cnf

use strict;
use warnings;

use DBI;
use Getopt::Long;
use Date::Format;       # better date/time formatting than Time::Local
use Switch '__';        # Allow case statements. Bonus points for anime smiley.

sub usage {
        print <<USAGE
Usage:
$0 [--daily]

--daily             Signals a standard gzipped mysqldump of all databases to
                    take place. Without this flag an uncompressed backup is
                    taken and an xdelta generated between it and the latest
                    gzipped backup.

USAGE
}

sub debug (@) {
  print @_ if -t STDOUT;
}

sub connect_db {
        eval {
                # Connect the local database
                $main::localdbh = DBI->connect($main::localdsn, "", "", {'RaiseError' => 1});
        };
        if ($@) {
                warn $@;
                cleanup(1);
        };
}

sub setup_local {
        # lock tables
        debug "FLUSH TABLES WITH READ LOCK\n";
        eval {
                $::localdbh->do("FLUSH TABLES WITH READ LOCK");
        };
        if ($@) {
                warn $@;
                cleanup(2);
        };

        # Grab the master binlog name and position
        my $sth;
        eval {
                debug "SHOW SLAVE STATUS\n";
                $sth = $::localdbh->prepare("SHOW SLAVE STATUS");
                $sth->execute;
        };
        if ($@) {
                warn $@;
                cleanup(2);
        };

        # There should be one row of show slave status data, no more, no less           
        my $rows = $sth->rows;
        if ($rows != 1) {
                warn "Error: there appears to be no slave configuration. Exiting.\n";
                $sth->finish;
                cleanup(2);
        }
        else {
                # Grab the data row as a hash
                if (!(defined(my $hash_ref = $sth->fetchrow_hashref))) {
                        warn "Error retrieving slave status data: ", $DBI::errstr;
                        $sth->finish;
                        cleanup(2);
                }
                else {
                        $::binlogname = $$hash_ref{'Relay_Master_Log_File'};
                        $::binlogpos = $$hash_ref{'Exec_Master_Log_Pos'};
                        debug("binlogname: $::binlogname\n");
                        debug("binlogpos: $::binlogpos\n");
                }
        }
        $sth->finish;
}

sub dump_data {
        # perform an uncompressed mysqldump of our local data
        eval {
                debug "/usr/bin/mysqldump --opt --skip-lock-tables -q -c -A -Q > $::backupdir/current.sql\n";
                system("/usr/bin/mysqldump --opt --skip-lock-tables -q -c -A -Q > $::backupdir/current.sql");
        };
        if ($@) {
                warn $@;
                cleanup(2);
        };

        # unlock tables
        eval {
                debug "UNLOCK TABLES\n";
                $::localdbh->do("UNLOCK TABLES");
        };
        if ($@) {
                warn "Error unlocking tables: $@";
        };

        # Run a cleanup of the directory
        system("/usr/sbin/tmpwatch --mtime 48 /var/lib/mysqlbackup");

        # Determine latest full dump from master
        my @files;
        eval {
                opendir DIR, $::backupdir;
                @files = grep { /\d{4}-\d{2}-\d{2}.gz/ && -f "$::backupdir/$_" } readdir DIR;
                @files = sort(@files);
                closedir DIR;
        };
        if ($@) {
                warn $@;
                cleanup(2);
        };

        if ($#files < 0) {
                warn "No dumps from the master to process xdeltas against. Will keep the current full backup.\n";

                eval {
                        my $newname = time2str("%Y-%m-%d", time);
                        debug "mv $::backupdir/current.sql $::backupdir/$newname\n";
                        rename "$::backupdir/current.sql", "$::backupdir/$newname";
                        debug "/bin/gzip -9 $::backupdir/$newname\n";
                        system("/bin/gzip -9 $::backupdir/$newname");
                };
                if ($@) {
                        warn $@;
                        cleanup(2);
                };
        
                cleanup(2);
        }

        # list of gzipped files is now sorted in ascending order, the last one is most recent
        my $backupfile = "$files[$#files]";

        # save the timestamp so we have consistency between the xdelta and metadata
        my $timestamp = time2str("%Y%m%d%H%M", time);

        # xdelta against it.
        # need to change the environment TMPDIR as xdelta uses /tmp by default which is nowhere
        # near big enough. use the mysqlbackup dir as it should be on /data with heaps of space.
        eval {
                debug "/usr/bin/env TMPDIR=$::backupdir /usr/bin/xdelta3 -q -e -s $::backupdir/$backupfile $::backupdir/current.sql $::backupdir/$timestamp.xdelta\n";
                system("/usr/bin/env TMPDIR=$::backupdir /usr/bin/xdelta3 -q -e -s $::backupdir/$backupfile $::backupdir/current.sql $::backupdir/$timestamp.xdelta");
        };
        if ($@) {
                warn $@;
                cleanup(2);
        };
        
        # delete uncompressed dump
        debug "unlinking $::backupdir/current.sql\n";
        unlink "$::backupdir/current.sql";
        
        # Write out the metadata
        debug "writing out file /var/lib/mysqlbackup/$timestamp.metadata\n";
        debug "xdelta against: $backupfile\n";
        open META, ">/var/lib/mysqlbackup/$timestamp.metadata";
        print META "binlogname: $::binlogname\n";
        print META "binlogpos: $::binlogpos\n";
        print META "xdelta against: $backupfile\n";
        close META;
}

sub conclude_local {
        # unlock tables
        debug "UNLOCK TABLES\n";
        $::localdbh->do("UNLOCK TABLES");
}

sub disconnect_db {
        # Disconnect the database connections
        if (defined($::localdbh)) { $::localdbh->disconnect() or warn "Error disconnecting local database: ", $DBI::errstr; };
}

sub cleanup {
        # Cleanup actions. Cascaded depending on point of failure.
        switch (shift) {
                case __ > 1 { conclude_local(); next; }
                case __ > 0 { disconnect_db(); }
        }
        system("rm -f $::lockfile");
        exit 1;
}

sub run_daily {
        # Standard daily backup run
        eval {
                # Run a cleanup of the directory
                system("/usr/sbin/tmpwatch --mtime 48 /var/lib/mysqlbackup");

                debug "/usr/bin/mysqldump --opt -q -c -A -Q | /bin/gzip -9 > $::backupdir/`/bin/date -I`.gz\n";
                system("/usr/bin/mysqldump --opt -q -c -A -Q | /bin/gzip -9 > $::backupdir/`/bin/date -I`.gz");
        };
        if ($@) {
                warn $@;
                system("/bin/rm -f $::backupdir/`/bin/date -I`.gz");
                unlink "$::lockfile";
                exit(1);
        };
}

# Get params
my $daily = '';
if (!GetOptions('daily' => \$daily)) {
        usage();
        exit(1);
}

# Check that we are running on the standby
debug("/usr/bin/mysqladmin variables | grep -q 'datadir.*hamysql'\n");
system("/usr/bin/mysqladmin variables | grep -q 'datadir.*hamysql'") || exit(0);

our $backupdir = "/var/lib/mysqlbackup/";
our $lockfile = "/tmp/db-export-mysqlstandby.lock";
# Ensure we don't run concurrently with ourselves
debug("/usr/bin/lockfile -r 0 $lockfile\n");
system("/usr/bin/lockfile -r 0 $lockfile") && die "Can't lock $lockfile. Another instance must be running or a stale lockfile was left around.\n";

# Signal a daily backup to be run.
if ($daily) {
        run_daily();
        unlink "$lockfile";
        exit(0);
}

# Set up the DSNs to the databases. Local database connects as "backup" using default options.
our $localdsn = "DBI:mysql:mysql;mysql_read_default_file=/home/backup/.my.cnf;mysql_connect_timeout=5";
our $localdbh;

# Variables for storing the master binlog name and position
our $binlogname;
our $binlogpos;

# Make the connection
connect_db();

# Setup the local server
setup_local();

# Backup the database, perform xdeltas etc
dump_data();

# Disconnect from both databases
disconnect_db();

# Remove the lockfile and exit
unlink "$lockfile";
exit(0);

Attached Files

To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.
  • [get | view] (2013-05-31 11:14:57, 8.0 KB) [[attachment:IPaddr2_vlan.sh]]
  • [get | view] (2013-05-31 11:14:58, 361.3 KB) [[attachment:LCA2008-talk.odp]]
  • [get | view] (2013-05-31 11:14:58, 832.2 KB) [[attachment:LCA2008-talk.pdf]]
  • [get | view] (2013-05-31 11:14:57, 3.7 KB) [[attachment:alter_mysql_slave]]
  • [get | view] (2013-05-31 11:14:57, 3.7 KB) [[attachment:alter_mysql_slave.sh]]
  • [get | view] (2013-05-31 11:14:57, 1.5 KB) [[attachment:authkeys]]
  • [get | view] (2013-05-31 11:14:57, 1.5 KB) [[attachment:authkeys.txt]]
  • [get | view] (2013-05-31 11:14:58, 1.1 KB) [[attachment:check-drbd.sh]]
  • [get | view] (2013-05-31 11:14:57, 11.4 KB) [[attachment:check_replication.pl]]
  • [get | view] (2013-05-31 11:14:57, 6.1 KB) [[attachment:cib.xml.template]]
  • [get | view] (2013-05-31 11:14:58, 8.0 KB) [[attachment:db-export-mysqlstandby.pl]]
  • [get | view] (2013-05-31 11:14:57, 3.8 KB) [[attachment:drbd.conf.txt]]
  • [get | view] (2013-05-31 11:14:58, 1.3 KB) [[attachment:drbddisk.sh]]
  • [get | view] (2013-05-31 11:14:57, 1.5 KB) [[attachment:ha.cf.txt]]
  • [get | view] (2013-05-31 11:14:58, 9.0 KB) [[attachment:init_mysql_slave.pl]]
  • [get | view] (2013-05-31 11:14:58, 1.2 KB) [[attachment:logd.cf.txt]]
  • [get | view] (2013-05-31 11:14:58, 2.2 KB) [[attachment:lvs-helper.sh]]
  • [get | view] (2013-05-31 11:14:57, 1.8 KB) [[attachment:my.cnf.txt]]
  • [get | view] (2013-05-31 11:14:57, 2.2 KB) [[attachment:myslave.cnf.txt]]
  • [get | view] (2013-05-31 11:14:57, 5.5 KB) [[attachment:mysqld.sh]]
  • [get | view] (2013-05-31 11:14:58, 2.0 KB) [[attachment:mysqlslave.cnf.txt]]
  • [get | view] (2013-05-31 11:14:57, 5.7 KB) [[attachment:mysqlslaved.sh]]
  • [get | view] (2013-05-31 11:14:57, 3.5 KB) [[attachment:write_heartbeat.pl]]

You are not allowed to attach a file to this page.