#!/usr/bin/perl -w # # $Id: //websites/unixwiz/unixwiz.net/webroot/evo/evo-get-updates.txt#1 $ # # written by : Stephen J. Friedl # Software Consultant # Tustin, California USA # steve@unixwiz.net -or- www.unixwiz.net/evo/ # # This program connects to the iSystems web site and looks for an # update file showing the software available. We compare this with # our own local hierarchy to see if we require an update, and we # fetch the files as required. # # This depends on the existance of a "updates.txt" file on the web # site: it contains the name, timestamp, and size of each of the # file found there, and we use this to do all our comparisons. # We presume this runs from a cron job periodically. # # COMMAND LINE # ------------ # # --help show a brief help listing to the standard error and # exit. This also reports the version information. # # --dir=D The top of the local directory tree is D. Below this # tree are directories in YYYY-MM-DD/ format, and the # Evo files themselves are stored there. # # --noexec Show what we would update, but don't actually do it. # Useful for seeing how much work there is to do. # # --nounzip Don't unzip any SYSTEM.db files # use strict; use Net::HTTP; # ------------------------------------------------------------------------ # We provide a custom user agent (browser) string when fetching the # update that serves to identify the version of software in use. This # should be updated every time the program is updated. # my $UserAgent = "unixwiz.net evo-get-updates-0.9"; # also a version number # ------------------------------------------------------------------------ # CONFIGURATION PARAMETERS # # $TOPURL - the full URL of the iSystems SB update site. This is only # changed for developer testing. # # $TOPDIR - the location on the local system where these updates are # stored. # my $TOPURL= "http://www.isystemsllc.com/sbupdate"; my $TOPDIR = "C:/Evolution Updates"; my $noexec = 0; my $nounzip = 0; $0 =~ s|.*[\\/]||; # dump leading part of program name foreach ( @ARGV ) { if ( m/^--help/ ) { print STDERR <new(Host => $host) or die "$@"; $s->write_request( 'GET' => $path, 'User-Agent' => $UserAgent); my($code, $mess, %h) = $s->read_response_headers; die "ERROR: got error $code fetching $path from $host\n" if $code != 200; my $result = ""; while (1) { my $buf; last unless $s->read_entity_body($buf, 1024); $result .= $buf; } die "ERROR: got no data fetching $path from $host\n" if not $result; my @LINES = split( m/\r?\n/, $result); die "ERROR: missing 'END' in file list\n" unless grep( m/^END$/, @LINES ); my %DIRS = (); my @COMMANDS = (); my @SUMMARY = (); foreach ( @LINES ) { next if m/^#/ or length == 0 or m/^END$/; my($filename, $mtime, $filesize) = split( m/\t/, $_); # ---------------------------------------------------------------- # SECURITY CHECK ON FILENAME # # We are relying on iSystems to provide us with the filename that # guides where we park the file locally, so technically it's # possible that they could pull some shenanigans by sending us # some bogus filenames: # # Current_Release/../../../../WINNT/SYSTEM32/EXPLORER.EXE # # They won't ever do this on purpose, but to be really safe we # take some steps to be sure it doesn't get done by accident. # # We exclude anything with two dots in a row (even within a name), # anything with a leading slash or anything with a colon or # backslash or whitespace. Only two directory parts are allowed. # my @PARTS = split( m|/|, $filename ); die "ERROR: ${filename} is a bogus pathname\n" if $filename =~ m|\.\.| or $filename =~ m|^/| or $filename =~ m|[\\:\s]| or @PARTS != 2; my($sourcedir, $basename) = @PARTS; # ---------------------------------------------------------------- # CREATE TARGET DIRECTORY # # The directory where we store the files is always determined by # the mod date on the files, so we forumalate the dirname from it. # Then we see if the filename exists or not and add the "mkdir" # command if necessary. # # NOTE: Here we are only *noticing* that a directory is needed, # but we're not actually creating it here. So to prevent the # same "mkdir" command from being executed on each loop, we # note all directories that we've already created and do them # only once. # my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime($mtime); my $filedir = sprintf("%04d-%02d-%02d", # YYYY-MM-DD format $year + 1900, $mon + 1, $mday); if ( ! -d $filedir and not $DIRS{$filedir} ) { $DIRS{$filedir} = 1; push @COMMANDS, "mkdir $filedir"; } my $dstfile = "$filedir/$basename"; if ( -f $dstfile and -s $dstfile == $filesize ) { # already there with the right size next; } # remove the file if it's there now so that wget doesn't # append ".1" to it. if ( -f $dstfile ) { # ugh: we need backslashes here ( my $cmd = "del $dstfile" ) =~ s|/|\\|g; push @COMMANDS, $cmd; } push @COMMANDS, "wget -P $filedir $TOPURL$filename"; if ( $filename =~ m/\.zip$/i and not $nounzip ) { push @COMMANDS, "unzip -d $filedir $filedir/$basename"; } push @SUMMARY, "$filedir/$basename"; } # ------------------------------------------------------------------------ # If we have nothing to do, then we exit *with error* so the calling batch # file not to bother offering to extract anything. # if ( @COMMANDS == 0 ) { print "All Evolution updates are current\n"; exit 1; } # we're already here, but fake it for reporting print STDERR "--> chdir $TOPDIR\n"; foreach my $cmd ( @COMMANDS ) { print STDERR "--> $cmd\n"; my $rc = 0; $rc = system($cmd) unless $noexec; die "ERROR: command failed with exitcode $rc\n" unless $rc == 0; } # ------------------------------------------------------------------------ # The downloading operation eats up a lot of screen space, making it hard # to tell exactly what was downloaded as it scrolls by. After the update, # this reminds the user just what was provided. # print "\n"; print "\n"; print "Summary of files downloaded:\n"; print map { "\t$_\n" } @SUMMARY; exit 0; # # get_host_from_url # get_path_from_url # # Given the "base" URL for the updates area, extract just the # hostname or just the path required for making a HTTP connection. # This allows us to configure the program with just a single URL # at the top of the file and pick out the parts needed for the # Net::HTTP connection. # sub get_host_from_url { my $url = shift; $url =~ s|^http:/+||i; # dump leading http part $url =~ s|/.*$||; # dump trailing path part return $url; } sub get_path_from_url { my $url = shift; $url =~ s|^http:/+||i; # dump leading http part $url =~ s|^[^/]*||; # dump the hostname return $url; }