<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<HTML>
<HEAD>
 <META http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
 <META NAME="GENERATOR" CONTENT="lfparser_2.45">
 <META NAME="LFCATEGORY" CONTENT="SystemAdministration">
 <link rel="icon" href="../../common/images/lf-16.png" type="image/png">
 <TITLE>lf321, SystemAdministration: storeBackup, the unconventional backup tool</TITLE>
<style type="text/css">
<!--
 td.top {font-family: Arial,Geneva,Verdana,Helvetica,sans-serif; font-size:12 }
 pre { font-family:monospace,Courier }
 pre.code { font-family:monospace,Courier;background-color:#aedbe8; }
 p.cl { color:#EE9500 }
 a.nodec { text-decoration:none }
 p.trans { font-size:8pt; text-align:right }
 p.clbox { width:50%; alignment:center; background-color:#FFD700; 
           border-style:none; border-width:medium; border-color:#FFD700; 
           padding:0.5cm;  text-align:center }
 p.code { width:80%; alignment:center; background-color:#aedbe8; 
          border-style:none; border-width:medium; border-color:#aedbe8; 
          padding:0.1cm;  text-align:left }
 p.foot { background-color:#AAAAAA; color:#FFFFFF; border-style:none; 
          border-width:medium; border-color:#AAAAAA; padding:0.5cm ; 
          margin-top:0.1cm; margin-right:1cm; margin-left:1cm; 
          text-align:center }
 .mark  { background-color:#e6e6ff }
-->
</style>
 
</HEAD>
<BODY bgcolor="#ffffff" text="#000000">
 <!-- this is generated html code. NEVER use this file for your
 translation work. Instead get the file with the same article number
 and .meta.shtml in its name. Translate this meta file and then
 use lfparser program to generate the final article -->
 <!-- lfparser can be obtained from http://main.linuxfocus.org/~guido/dev/lfparser.html -->

<!-- this is used by a number of tools:
 =LF=AUTHOR: Heinz-Josef Claes
 =LF=CAT___: SystemAdministration
 =LF=TITLE_: storeBackup, the unconventional backup tool
 =LF=NUMBER: 321
 =LF=ANAME_: article321.shtml
 -->

<!-- 2pdaIgnoreStart -->

<!-- start navegation bar, style=2 -->
 <!-- top navegation bar -->
 <TABLE summary="topbar_1" cellspacing="0" cellpadding="0" border="0" align="center" width="90%">
   <TR bgcolor="#2e2292">
     <TD class="top"><TABLE summary="topbar_1_logo" cellspacing="0" cellpadding="0" border="0" width=
       "100%">
         <TR><TD width="319"><IMG src="../../common/images/logolftop_319x45.gif"
           alt="[LinuxFocus-icon]" width="319" height="45" align="left" 
           border="0"></TD>

           <TD class="top">
             <TABLE summary="topbar_1_links" width="100%">
               <TR align="right">
                 <TD class="top">
                 <A class="nodec" href="index.shtml"><FONT color=
                 "#DDDDDD" size="2">&lt;--</FONT></A> &nbsp;| 
                 <A class="nodec" href="../index.shtml"><FONT color=
                 "#DDDDDD" size="2">Home</FONT></A> &nbsp;| 
                 <A class="nodec" href="../map.html"><FONT color=
                 "#DDDDDD" size="2">Map</FONT></A> &nbsp;| 
                 <A class="nodec" href="../indice.html"><FONT color=
                 "#DDDDDD" size="2">Index</FONT></A> &nbsp;| 
                 <A class="nodec" href="../Search/index.html"><FONT color=
                 "#DDDDDD" size="2">Search</FONT></A> </TD>
               </TR>

               <TR align="right">
                 <TD class="top">
                   <HR width="100%" noshade size="1">
                 </TD>
               </TR>
             </TABLE>
           </TD>
         </TR>
       </TABLE>
     </TD>
   </TR>
 </TABLE>
 <!-- end top navegation bar -->
 <!-- blue bar -->
 <TABLE summary="topbar_2" cellspacing="0" cellpadding="0" border="0" align="center"
 width="90%">
   <TR bgcolor="#00ffff">
     <TD><IMG src="../../common/images/transpix.gif" width="1" height=
     "2" alt=""></TD>
   </TR>
 </TABLE>
 <!-- end blue bar -->
 <!-- bottom navegation bar -->
 <TABLE summary="topbar_3" cellspacing="0" cellpadding="0" border="0" align="center"
 width="94%">
   <TR bgcolor="#000000">
     <TD>
       <TABLE summary="topbar_3_links" cellspacing="0" cellpadding="1" border="0" width=
       "100%">
         <TR align="center">
           <TD WIDTH="20%"><A class="nodec" href="../News/index.html"><FONT color=
           "#FFFFFF">News</FONT></A> </TD>
           <TD WIDTH="5%"><FONT color="#FFFFFF">|</FONT> </TD>
           <TD WIDTH="20%"><A class="nodec" href="../Archives/index.html"><FONT color=
           "#FFFFFF">Archives</FONT></A> </TD>
           <TD WIDTH="5%"><FONT color="#FFFFFF">|</FONT> </TD>
           <TD WIDTH="20%"><A class="nodec" href="../Links/index.html"><FONT color=
           "#FFFFFF">Links</FONT></A> </TD>
           <TD WIDTH="5%"><FONT color="#FFFFFF">|</FONT> </TD>
           <TD WIDTH="20%"><A class="nodec" href="../aboutus.html"><FONT color=
           "#FFFFFF">About LF</FONT></A> </TD>
         </TR>
       </TABLE>
     </TD>
   </TR>
 </TABLE>
 <!-- end bottom navegation bar -->
<!-- stop navegation bar -->

<!-- SSI_INFO -->

<!-- tr_staticssi include virtual -->
<!-- tr_staticssi exec cmd -->
<!-- addedByLfdynahead ver 1.5 --><TABLE ALIGN="right" border=0><TR><TD ALIGN="right"><FONT SIZE="-1" FACE="Arial,Helvetica">This document is available in: <A href="../../English/January2004/article321.shtml">English</a> &nbsp;<A href="../../ChineseGB/January2004/article321.shtml">ChineseGB</a> &nbsp;<A href="../../Deutsch/January2004/article321.shtml">Deutsch</a> &nbsp;<A href="../../Francais/January2004/article321.shtml">Francais</a> &nbsp;<A href="../../Italiano/January2004/article321.shtml">Italiano</a> &nbsp;<A href="../../Turkce/January2004/article321.shtml">Turkce</a> &nbsp;</FONT></TD></TR></TABLE><br>
 


<!-- SSI_INFO STOP -->
<!-- 2pdaIgnoreStop -->

<!-- SHORT BIO ABOUT THE AUTHOR -->
<TABLE ALIGN=LEFT BORDER=0  WIDTH="190" summary="about the author">
<TR>
<TD>

<img src="../../common/images/HeinzJosefClaes.jpg" alt=
    "[Photo of the Author]">
<BR>by  Heinz-Josef Claes <br> <small>&lt;hjclaes(at)web.de&gt;</small>
<BR><BR>
<I>About the author:</I><BR>
<!-- aboutauthor_start -->
<p>The author prefers to not publish any picture of him
    online.</p>
<!-- aboutauthor_stop -->
<!-- TRANSLATED TO en -->
<BR><BR><I>Translated to English by:</I><BR>
J&uuml;rgen Pohl <small>&lt;sept.sapins(at)verizon.net&gt;</small>
<br>
<!--
 =LF=TRANSTO=en: J&uuml;rgen Pohl
-->
<!-- TRANSLATED TO STOP -->
<BR><i>Content</i>:
<UL>
  <LI><A HREF="#321lfindex0">Why a new backup tool ?</A></LI>
  <LI><A HREF="#321lfindex1">What would be an ideal Backup Tool?</A></LI>
  <LI><A HREF="#321lfindex2">How does storeBackup work?</A></LI>
  <LI><A HREF="#321lfindex3">Features</A></LI>
  <LI><A HREF="#321lfindex4">Performance</A></LI>
  <LI><A HREF="#321lfindex5">Implementations</A></LI>
  <LI><A HREF="#321lfindex6">Installation</A></LI>
  <LI><A HREF="#321lfindex7">Operation</A></LI>
  <LI><A HREF="#321lfindex8">Future Plans</A></LI>
  <LI><A HREF="#321lfindex9">Version and License</A></LI>
  <LI><A HREF="http://cgi.linuxfocus.org/cgi-bin/lftalkback?anum=321">Talkback form for this article</A></LI>
</UL>

</TD></TR></TABLE>
<!-- HEAD OF THE ARTICLE -->
<br>&nbsp;
<table border="0"><tr><td>
<H2>storeBackup, the unconventional backup tool</H2>
 <img src="../../common/images/illustration321.jpg" alt=
    "[Illustration]" hspace="10">
<!-- ABSTRACT OF THE ARTICLE -->
<P><i>Abstract</i>:
<P>
<!-- articleabstract_start -->

    StoreBackup offers itself to the general user who does not
    neccessarily own a tape backup but a second harddrive or
    another computer. It offers itself to the users in the
    professional environment for extremely fast and comfortable
    access to their backups, also to save on the costs of tapes as
    well as administrative expenses. <br><br>
    Storage on harddrives or similar devices offers itself as an
    alternative or additional resource to data backup on tapes. The
    program to be introduced here performs well and saves storage
    capacity: <br><ul><li>Directories, including their tree structure, may be
      copied to another location (e.g. /home =&gt;
      /var/bkup/2003.12.13_02.04.26). Permissions to the files
      remain, enabling users to access the backup directly.</li><li>The content of the files is going to be compaired with
      the existing backup to make sure there is only <span style=
      "FONT-STYLE: italic">one backup</span> for each file, that
      means files with the same content exist physically only once
      in the backup.</li><li>Identical files are hard linked and appear in the backup
      in the same locations as in the original.</li><li>Backup files will be compressed, except they are marked
      'exclude'. Compression may be excluded entirely.</li><li>Backup series, generated independently ( e.g. from
      different machines) may refer through hard links to shared
      files. Full or partial backups may be executed with this
      method, always under the condition that files with the same
      content may exist only once in the backup.</li></ul>
<!-- articleabstract_stop -->

<br><!-- HR divider --><center><font color="#8282e0"><b>_________________ _________________ _________________</b></font></center><br>
</td></tr></table>
<!-- BODY OF THE ARTICLE -->


    <A NAME="321lfindex0">&nbsp;</A>
<H2>Why a new backup tool ?</H2>

    There are possibly thousands of backup programs. So, why
    another one? The reason arose from my activities as a
    consultant. The entire week I was moving around and I had no
    way to secure my data during the week at home. All I had was a
    250MB ZIP drive on my parallel port. The backup on the ZIP
    drive did not give me a lot of storage space and I had to live
    with a low performance (about 200KB/s). In addition to that I
    needed fast, simple access to my data - I did not like the
    usual options of full, differential and incremental backups
    (e.g. with tar or dump): on one hand it is ususally too
    cumbersome to retrieve one of the versions, on the other hand
    it is not possible to delete an old backup at will, this has to
    be planned carefully at the generation of the backup. <br>
    <br>
    It was my goal to be able to backup quickly during my work and
    find my files quickly and without hassle. <br>
    <br>
    So, at the end of 1999 the first version of storeBackup was
    created, it was, however, not suitable for large environments.
    It was not performing well enough, did not resize sufficiently
    and was not able do deal with nasty file names (e.g. '\n' in a
    name). <br>
    <br>
    Based on that experience with the first version I wrote a new
    one which was published a little bit less than a year later
    under the GPL. In the meantime the number of users had grown -
    from home user applications, securing of (mail) directories at
    ISPs or hospitals as well as universities and for general
    archiving.

    <A NAME="321lfindex1">&nbsp;</A>
<H2>What would be an ideal Backup Tool?</H2>

    The ideal backup tool would create every day a complete copy of
    the entire data system (including the applicable access rights)
    on another data system with minimal effort for the
    administrator and maximal comfort for the user. The computer
    and hard disk systems to make this possible should be in a
    distant, secure building, of course. With the help of a data
    system browser the user could access the secure data for
    searching and to copy data directly back. The backup would be
    usable directly and without problems . Dealing with backups
    would become something 'normal' - since the route over the
    administration would in general be unnecessary. <br>
    <br>
     The process described here has a small disadvantage: it needs
    a lot of harddrive space and it is quite slow because each time
    the total amount of data needs to be copied.

    <A NAME="321lfindex2">&nbsp;</A>
<H2>How does storeBackup work?</H2>

    StoreBackup tries to accomplish the "ideal backup" and to solve
    the two problems: storage space and performance.

    <A NAME="321lfindex3">&nbsp;</A>
<H3>Features</H3>

    The first measure to decrease the necessary harddrive storage
    space would be the compression of data - if that makes sense.
    storeBackup allows the use of any compression algorithm as an
    external program. The default is bzip2. <br>
    <br>
    Looking at the stored data closely, it is apparend that from
    backup to backup relatively few files change - which is the
    reason for incremental backups. We also find that many files
    with the same content may be found in a backup because users
    copy files or a version administration program (like cvs) is
    activ. In addition, files or directory structures are re-named
    by users, in incremental backups they are again (unnecessarily)
    secured. The solution to this is to check the backup for files
    with the same content (possibly compressed) and to refer to
    those. The hard link is this reference. (Explanation: data
    blocks in Unix systems are administered through inodes. Many
    different file names in as many directories may refer to an
    inode. The actual file is being deleted with its last hard link
    (=directory name). (Hard links may point to a specific file
    only within one file system.) <br>
    With this trick of the hard links, which were already created
    in existing backup files, each file is present in each backup
    although it exists physically on the harddrive only once.
    Copying and renaming of files or directories takes only the
    storage space of the hard links - nearly nothing. <br>
    <br>
    Most likely not only one computer needs to be secured but a
    number of them. They often have a high proportion of identical
    files, especially with directories like /etc or /usr.
    Obviously, there should be only one copy of identical files
    stored on the backup drive. To mount all directories from the
    backup server and to backup all computers in one sweep would be
    the most simple solution. This way duplicate files get detected
    and hard linked. However, this procedure has the disadvantage
    that all machines to be secured have to be available for the
    backup time. That procedure can in many cases not be feasible,
    for example, if notebooks shall be backed up using storeBackup.
    <br>
    Specifically with notebooks we can find a high overlap rate of
    files since users create local copies. In such cases or if
    servers are backed up independently from one another, and the
    available harddrive space shall be utilized optimally through
    hard links, storeBackup is able to hard link files in
    independent backups ( meaning: independent from each other,
    possibly from different machines). <br>
    <br>
    For the deletion of files storeBackup offers a set of options.
    It is a great advantage for deletion when each backup is a full
    backup, those may be deleted indiscriminately. Unlike with
    traditional backups, there is no need to consider if an
    incremential backup is depending on previous backups. <br>
    The options permit the deletion or saving of backups on
    specific workdays, first or last day of the week/month or year.
    It can be assured that a set of a minimum number of backups
    remains. This is especially useful if backups are not generated
    on a regular basis. It is possible to keep the last backupsof a
    laptop until the end of a four week vacation even though the
    period to keep it is set to three weeks. Furthermore it is
    possible to define the maximal number of backups. There are
    more options to resolve the existence of conflicts between
    contradictory rules (by using common sense).

    <A NAME="321lfindex4">&nbsp;</A>
<H3>Performance</H3>

    The procedure described above assumes that an existing backup
    is being checked for identical files prior to a new backup of a
    file. This applies to files in the previous backup as well as
    to the newly created one. Of course it does not make much sense
    to directly compare every file to be backed up with the
    previous backup. So, the md5 sums of the previous backup are
    being compared with the md5 sum of the file to be backed up
    with the utilization of the hash table. The program is using
    dbm files for this. .<br>
    Computing the md5 sum is fast, but in case of a large amount of
    data is still not fast enough. For this reason storeBackup
    checks initially if the file was altered since the last backup
    (path + file name, ctime, mtime and size are the same). If that
    is the case, the md5 sum of the last backup is being adopted
    and the hard link set. If the initial check shows a difference,
    the md5 sum is being computed and a check takes place to see if
    another file with the same md5 sum exists. (The comparison with
    a number of backup series uses a expanded but similarily
    efficient process). For this approach only a few md5 sums need
    to be calculated for a backup. <br>
    <br>
     My server (200 MHz, IDE) processes about 20 to 35
    files/second, my desktop machine (800MHz,IDE) about 150 to 200
    files/second. On fast computers with fast harddrives (2.4 GHz,
    1.4TB software RAID) I have measured 800 files/second. These
    results are for writing to local drives. Writing over NFS gets
    is a lot slower. Crucial is the speed of the harddrive. (All
    tests were done under Linux).

    <A NAME="321lfindex5">&nbsp;</A>
<H3>Implementations</H3>

    The storeBackup tools have been testet on Linux, FreeBSD,
    Solaris and AIX. They should be able to run on all Unix
    plattforms. Perl was used as the programming language.

    <A NAME="321lfindex6">&nbsp;</A>
<H3>Installation</H3>

    The installation is simple. StoreBackup can be downloaded from
    <a href=
    "http://www.sf.net/projects/storebackup">http://www.sf.net/projects/storebackup</a>
    as storeBackup version.tar.bz2 and unpacked to the desired
    location.

    <p class="code">tar jxf storeBackup-version.tar.bz2</p>
    This creates the directory storeBackup with the documentation
    and the executables in the subdirectory <i>bin</i>. They can be
    called with the complete path. As an alternative the $PATH
    environment variable may be set. Operating systems which do not
    have the program md5sum included (e.g. FreeBSD) need to compile
    it. Instructions for this can be found in the attached README
    file.

    <A NAME="321lfindex7">&nbsp;</A>
<H3>Operation</H3>

    We shall not describe all options here in detail, that can be
    found in the software package. <br>
    <br>
    The simplest method to generate a backup is:

    <p class="code">storeBackup.pl -s sourceDir -t targetDir</p>
    sourceDir und targetDir must be existing. StoreBackup will copy
    the files from sourceDir to targetDir/date_time and in this
    procedure compressing them with bzip2 ( avoiding .gz, bz2, .png
    etc) as well as linking duplicate files. <br>
    <br>
    In its up- to- date version (1.14.1) storeBackup.pl has 45
    parameter at its disposal, to describe them here would go
    beyond the scope of this article. They can be accessed with

    <p class="code">storeBackup.pl -h</p>
    In the files README and EXAMPLES we can find exhaustive
    explanations on the different applications. It shall be pointed
    out that the alternative to putting the parameters in the
    command line - which can become complex quickly - a
    configurations file may be used. It can be generated with

    <p class="code">storeBackup.pl --generate --file ConfigFile</p>
    or shorter with

    <p class="code">storeBackup.pl -g -f ConfigFile</p>
    . After finalising the configuration it may be read, the syntax
    checked and partially applied by

    <p class="code">storeBackup.pl -f ConfigFile --print</p>
    subsequently storeBackup may be startet with

    <p class="code">storeBackup.pl -f ConfigFile</p>
    The entire description of all options of storeBackup can be
    found in the files README and EXAMPLES which are part of the
    tar file. <br>
    <br>
    To detect where which version of a file in a backup exists,
    storeBackup can be utilized:

    <p class="code">storeBackupVersion.pl -f Filename</p>
    <i>filename</i> is the name of the file in question, it has to
    be written just like it is in the backup, i.e. with its
    compression attributes. To go to the backup directory in the
    correct location and executing the command is the easiest way.
    Exercising the option "-h" will exhibit explanations to all 11
    parameter. <br>
    <br>
    The recovery of single files may be done with cp, ftp, file
    browser or similar mechanism. For the recovery of partial
    directory trees or complete backups it makes sense to use the
    applicable tool storeBackupRecover.pl It will extract the wanted
    files or directories from the backup. This will restore the
    original, i.e. user, group and rights will be re-established.
    The files will also be decompressed if they were so in the
    original version. Original hard links will be restored too.
    .<br>
    Additional options in storeBackup permit statistical readouts,
    like the manipulation of performance parameters, the overwrite
    behaviour and others. A total of 10 parameters may be read out
    by using the option "-h". <br>
    <br>
    With storeBackupDel.pl backups may be deleted independently
    from the program storeBackupRecover.pl. This can be useful in case of
    a backup over NFS. Deleting directory trees over NFS is much
    slower than local deletion. storeBackup may be called over the
    NFS without delete function, this allows a better control the
    backup duration. The deletion of previously generated backups
    on the server with storeBackupDel - which, by the way, has the
    same options for the deletion as storeBackup - can be decoupled
    from the actual backup process. <br>
    <br>
    Existing backups are organized in directories. They can be
    displayed with storeBackupls.pl (more coherent than with 'ls').
    Simpy as a list
<pre class="code">
hjc@schlappix:~/backup ) storeBackupls.pl /media/zip/stbu/
  1  Fri May 23 2003   2003.05.23_12.37.53   -156
  2  Fri Jun 06 2003   2003.06.06_14.31.47   -142
  3  Fri Jun 13 2003   2003.06.13_14.17.18   -135
  4  Fri Jun 20 2003   2003.06.20_14.02.35   -128
  5  Fri Jun 27 2003   2003.06.27_14.23.55   -121
  6  Mon Jun 30 2003   2003.06.30_17.34.37   -118
  7  Fri Jul 04 2003   2003.07.04_13.10.06   -114
  8  Fri Jul 11 2003   2003.07.11_13.13.14   -107
  9  Fri Jul 18 2003   2003.07.18_14.03.49   -100
 10  Fri Jul 25 2003   2003.07.25_14.19.19   -93
 11  Thu Jul 31 2003   2003.07.31_17.07.55   -87
 12  Fri Aug 01 2003   2003.08.01_12.16.58   -86
 13  Fri Aug 15 2003   2003.08.15_15.10.19   -72
 14  Sat Aug 23 2003   2003.08.23_06.25.35   -64
 15  Wed Aug 27 2003   2003.08.27_18.21.09   -60
 16  Thu Aug 28 2003   2003.08.28_14.16.39   -59
 17  Fri Aug 29 2003   2003.08.29_14.35.10   -58
 18  Mon Sep 01 2003   2003.09.01_17.19.56   -55
 19  Tue Sep 02 2003   2003.09.02_18.18.46   -54
 20  Wed Sep 03 2003   2003.09.03_16.22.41   -53
 21  Thu Sep 04 2003   2003.09.04_16.59.19   -52
 22  Fri Sep 05 2003   2003.09.05_14.35.20   -51
 23  Mon Sep 08 2003   2003.09.08_20.08.52   -48
 24  Tue Sep 09 2003   2003.09.09_18.45.48   -47
 25  Wed Sep 10 2003   2003.09.10_18.30.48   -46
 26  Thu Sep 11 2003   2003.09.11_17.26.46   -45
 27  Fri Sep 12 2003   2003.09.12_15.23.03   -44
 28  Mon Sep 15 2003   2003.09.15_18.05.19   -41
 29  Tue Sep 16 2003   2003.09.16_18.04.16   -40
 30  Wed Sep 17 2003   2003.09.17_19.03.02   -39
 31  Thu Sep 18 2003   2003.09.18_18.21.09   -38
 32  Fri Sep 19 2003   2003.09.19_14.48.05   -37  not finished
 33  Mon Sep 22 2003   2003.09.22_18.58.55   -34
 34  Tue Sep 23 2003   2003.09.23_18.48.40   -33
 35  Wed Sep 24 2003   2003.09.24_19.32.24   -32
 36  Thu Sep 25 2003   2003.09.25_18.05.38   -31
 37  Fri Sep 26 2003   2003.09.26_14.59.59   -30
 38  Mon Sep 29 2003   2003.09.29_18.42.59   -27
 39  Tue Sep 30 2003   2003.09.30_18.02.03   -26
 40  Wed Oct 01 2003   2003.10.01_17.09.43   -25
 41  Thu Oct 02 2003   2003.10.02_15.26.33   -24
 42  Mon Oct 06 2003   2003.10.06_20.08.45   -20
 43  Tue Oct 07 2003   2003.10.07_19.46.54   -19
 44  Wed Oct 08 2003   2003.10.08_16.03.23   -18
 45  Thu Oct 09 2003   2003.10.09_16.58.28   -17
 46  Fri Oct 10 2003   2003.10.10_14.21.06   -16
 47  Mon Oct 13 2003   2003.10.13_18.58.24   -13
 48  Tue Oct 14 2003   2003.10.14_16.02.44   -12
 49  Wed Oct 15 2003   2003.10.15_19.04.12   -11
 50  Thu Oct 16 2003   2003.10.16_15.47.51   -10
 51  Mon Oct 20 2003   2003.10.20_09.34.52   -6
 52  Mon Oct 20 2003   2003.10.20_12.16.40   -6
 53  Tue Oct 21 2003   2003.10.21_09.43.40   -5
 54  Tue Oct 21 2003   2003.10.21_11.22.36   -5
 55  Tue Oct 21 2003   2003.10.21_16.01.15   -5
 56  Tue Oct 21 2003   2003.10.21_18.08.07   -5
 57  Wed Oct 22 2003   2003.10.22_10.02.51   -4
 58  Wed Oct 22 2003   2003.10.22_16.09.42   -4
 59  Wed Oct 22 2003   2003.10.22_18.03.05   -4
 60  Thu Oct 23 2003   2003.10.23_08.18.15   -3
 61  Thu Oct 23 2003   2003.10.23_14.16.24   -3
 62  Thu Oct 23 2003   2003.10.23_17.00.36   -3
 63  Fri Oct 24 2003   2003.10.24_13.29.30   -2
 64  Sun Oct 26 2003   2003.10.26_09.08.55   0
</pre>
    'not finished' means the backup was abortet). <br>
    or with information on the deletion conditions in the
    configuration file:
<pre class="code">
hjc@schlappix:~/backup ) storeBackupls.pl -f stbu.conf /media/zip/stbu/
analyse of old Backups in &lt;/media/zip/stbu/&gt;:
 Fri 2003.05.23_12.37.53 (156): keepLastOfMonth(400d)
 Fri 2003.06.06_14.31.47 (142): keepLastOfWeek(150d)
 Fri 2003.06.13_14.17.18 (135): keepLastOfWeek(150d)
 Fri 2003.06.20_14.02.35 (128): keepLastOfWeek(150d)
 Fri 2003.06.27_14.23.55 (121): keepLastOfWeek(150d)
 Mon 2003.06.30_17.34.37 (118): keepLastOfMonth(400d)
 Fri 2003.07.04_13.10.06 (114): keepLastOfWeek(150d), keepMinNumber50
 Fri 2003.07.11_13.13.14 (107): keepLastOfWeek(150d), keepMinNumber49
 Fri 2003.07.18_14.03.49 (100): keepLastOfWeek(150d), keepMinNumber48
 Fri 2003.07.25_14.19.19 (93): keepLastOfWeek(150d), keepMinNumber47
 Thu 2003.07.31_17.07.55 (87): keepLastOfMonth(400d), keepMinNumber46
 Fri 2003.08.01_12.16.58 (86): keepLastOfWeek(150d), keepMinNumber45
 Fri 2003.08.15_15.10.19 (72): keepLastOfWeek(150d), keepMinNumber44
 Sat 2003.08.23_06.25.35 (64): keepLastOfWeek(150d), keepMinNumber43
 Wed 2003.08.27_18.21.09 (60): keepMinNumber42, keepWeekDays(60d)
 Thu 2003.08.28_14.16.39 (59): keepMinNumber41, keepWeekDays(60d)
 Fri 2003.08.29_14.35.10 (58): keepLastOfMonth(400d), keepLastOfWeek(150d),
                               keepMinNumber40, keepWeekDays(60d)
 Mon 2003.09.01_17.19.56 (55): keepMinNumber39, keepWeekDays(60d)
 Tue 2003.09.02_18.18.46 (54): keepMinNumber38, keepWeekDays(60d)
 Wed 2003.09.03_16.22.41 (53): keepMinNumber37, keepWeekDays(60d)
 Thu 2003.09.04_16.59.19 (52): keepMinNumber36, keepWeekDays(60d)
 Fri 2003.09.05_14.35.20 (51): keepLastOfWeek(150d), keepMinNumber35, keepWeekDays(60d)
 Mon 2003.09.08_20.08.52 (48): keepMinNumber34, keepWeekDays(60d)
 Tue 2003.09.09_18.45.48 (47): keepMinNumber33, keepWeekDays(60d)
 Wed 2003.09.10_18.30.48 (46): keepMinNumber32, keepWeekDays(60d)
 Thu 2003.09.11_17.26.46 (45): keepMinNumber31, keepWeekDays(60d)
 Fri 2003.09.12_15.23.03 (44): keepLastOfWeek(150d), keepMinNumber30, keepWeekDays(60d)
 Mon 2003.09.15_18.05.19 (41): keepMinNumber29, keepWeekDays(60d)
 Tue 2003.09.16_18.04.16 (40): keepMinNumber28, keepWeekDays(60d)
 Wed 2003.09.17_19.03.02 (39): keepMinNumber27, keepWeekDays(60d)
 Thu 2003.09.18_18.21.09 (38): keepMinNumber26, keepWeekDays(60d)
 Fri 2003.09.19_14.48.05 (37): keepLastOfWeek(150d), keepMinNumber25, keepWeekDays(60d)
 Mon 2003.09.22_18.58.55 (34): keepMinNumber24, keepWeekDays(60d)
 Tue 2003.09.23_18.48.40 (33): keepMinNumber23, keepWeekDays(60d)
 Wed 2003.09.24_19.32.24 (32): keepMinNumber22, keepWeekDays(60d)
 Thu 2003.09.25_18.05.38 (31): keepMinNumber21, keepWeekDays(60d)
 Fri 2003.09.26_14.59.59 (30): keepLastOfWeek(150d), keepMinNumber20, keepWeekDays(60d)
 Mon 2003.09.29_18.42.59 (27): keepMinNumber19, keepWeekDays(60d)
 Tue 2003.09.30_18.02.03 (26): keepLastOfMonth(400d), keepMinNumber18, keepWeekDays(60d)
 Wed 2003.10.01_17.09.43 (25): keepMinNumber17, keepWeekDays(60d)
 Thu 2003.10.02_15.26.33 (24): keepLastOfWeek(150d), keepMinNumber16, keepWeekDays(60d)
 Mon 2003.10.06_20.08.45 (20): keepMinNumber15, keepWeekDays(60d)
 Tue 2003.10.07_19.46.54 (19): keepMinNumber14, keepWeekDays(60d)
 Wed 2003.10.08_16.03.23 (18): keepMinNumber13, keepWeekDays(60d)
 Thu 2003.10.09_16.58.28 (17): keepMinNumber12, keepWeekDays(60d)
 Fri 2003.10.10_14.21.06 (16): keepLastOfWeek(150d), keepMinNumber11, keepWeekDays(60d)
 Mon 2003.10.13_18.58.24 (13): keepMinNumber10, keepWeekDays(60d)
 Tue 2003.10.14_16.02.44 (12): keepMinNumber9, keepWeekDays(60d)
 Wed 2003.10.15_19.04.12 (11): keepMinNumber8, keepWeekDays(60d)
 Thu 2003.10.16_15.47.51 (10): keepLastOfWeek(150d), keepMinNumber7, keepWeekDays(60d)
 Mon 2003.10.20_09.34.52 (6): keepDuplicate(7d)
 Mon 2003.10.20_12.16.40 (6): keepMinNumber6, keepWeekDays(60d)
 Tue 2003.10.21_09.43.40 (5): keepDuplicate(7d)
 Tue 2003.10.21_11.22.36 (5): keepDuplicate(7d)
 Tue 2003.10.21_16.01.15 (5): keepDuplicate(7d)
 Tue 2003.10.21_18.08.07 (5): keepMinNumber5, keepWeekDays(60d)
 Wed 2003.10.22_10.02.51 (4): keepDuplicate(7d)
 Wed 2003.10.22_16.09.42 (4): keepDuplicate(7d)
 Wed 2003.10.22_18.03.05 (4): keepMinNumber4, keepWeekDays(60d)
 Thu 2003.10.23_08.18.15 (3): keepDuplicate(7d)
 Thu 2003.10.23_14.16.24 (3): keepDuplicate(7d)
 Thu 2003.10.23_17.00.36 (3): keepMinNumber3, keepWeekDays(60d)
 Fri 2003.10.24_13.29.30 (2): keepLastOfWeek(150d), keepMinNumber2, keepWeekDays(60d)
 Sun 2003.10.26_09.08.55 (0): keepLastOfMonth(400d), keepLastOfWeek(150d),
                              keepMinNumber1, keepWeekDays(60d)
</pre>
    <br>
    <br>
    In addition to the backup program described above the programs
    llt and multtail are present. llt will generate the display of
    the times for creating-, modifying- and access time of files.
    multitail allows tracking of a number of files like using
    'tail-f" but multitail offers more options than 'tail-f' and it
    is more robust.

    <A NAME="321lfindex8">&nbsp;</A>
<H2>Future Plans</H2>

    For the next versions of storeBackup the following features are
    planned: <br>

    <ul>
      <li>The worst time consumer of a backup (except the first
      backup during which everything gets compressed/ copied) is
      the hard linking. To generate a hard link is fast, but due to
      their large number - compared to the other operations and the
      parallel operations for compression specifically - this is
      the main time demand.<br>
       The next version of storeBackup will offer the option to
      backup the directory structure and modified files in a first
      step. This concludes the backup from the view of the data to
      be secured. In a second step the missing hard links will be
      created. These two steps will be completely disconnected from
      each other - meaning they can be run on different machines
      and it will be feasible to do several backups prior to
      generating new hard links.<br>
      Initial measurements indicate this option will result in a
      performance gain - compared to the "normal" full backup - by
      a factor of 5-10 (1/5 to 1/10 of the "normal"), if local
      writing is executed.
Backup up over the NFS will be much faster if you start
the process for hard linking locally on the remote machine.

      </li>

      <li>The plan for the next version will be the expansion of
      the search capabilities (with subsequent re-backup). It shall
      be possible to search the backups with a user-defined rule
      consisting of file name (pattern), file size, time of initial
      generation/ change, user i.d., group i.d., access rights on
      the file and a (simple) grep. The rules will include 'and',
      'or', 'not' and optional parantheses.</li>

      <li>Subsequent future plans envision an expansion of the
      options (in a tar-like fashion) and the support of new data
      types, e.g. devices.</li>
    </ul>

    <A NAME="321lfindex9">&nbsp;</A>
<H2>Version and License</H2>

    At the writing of this article the current version of
    storeBackup is 1.14.1. to be found at <a href=
    "http://www.sf.net/projects/storebackup">http://www.sf.net/projects/storebackup</a>
    for downloading.<br>
    StoreBackup is covered by the GPL.
    <!-- vim: set sw=2 ts=2 et tw=74: -->
  



<!-- 2pdaIgnoreStart -->
<A NAME="talkback">&nbsp;</a>
<h2>Talkback form for this article</h2>
Every article has its own talkback page. On this page you can submit a comment or look at comments from other readers:
<center>
<table border="0"  CELLSPACING="2" CELLPADDING="1" summary="tb-button-outerpart">
 <tr BGCOLOR="#C2C2C2"><td align=center>
  <table border="3"  CELLSPACING="2" CELLPADDING="1" summary="tb-button">
   <tr BGCOLOR="#C2C2C2"><td align=center>
    <A href="http://cgi.linuxfocus.org/cgi-bin/lftalkback?anum=321"><b>&nbsp;talkback page&nbsp;</b></a>
   </td></tr></table>
</td></tr></table>
</center>

<HR size="2" noshade>
<a style="background-color:#bdc6d5" href="index.shtml">&lt;--, back to the index of this issue </a><br><HR size="2" noshade>
<!-- ARTICLE FOOT -->
<CENTER><TABLE WIDTH="98%" summary="footer">
<TR><TD ALIGN=CENTER BGCOLOR="#bdc6d5" WIDTH="50%">
<A HREF="../../common/lfteam.html">Webpages maintained by the LinuxFocus Editor team</A>
<BR><FONT COLOR="#FFFFFF">&copy; Heinz-Josef Claes, <a href="../../common/copy.html">FDL</a> <BR><a href="http://www.linuxfocus.org">LinuxFocus.org</a></FONT>
</TD>
<TD BGCOLOR="#bdc6d5">
<!-- TRANSLATION INFO -->
<font size=2>Translation information:</font>
<TABLE summary="translators">
  <tr><td><font size="2">de --&gt; -- : Heinz-Josef Claes <small>&lt;hjclaes(at)web.de&gt;</small></font></td></tr>
  <tr><td><font size="2">de --&gt; en: J&uuml;rgen Pohl &lt;sept.sapins(at)verizon.net&gt;</font></td></tr>
</TABLE>
</TD>
</TR></TABLE></CENTER>
<p><font size=1>2003-12-26, generated by lfparser version 2.45</font></p>
<!-- 2pdaIgnoreStop -->
</BODY>
</HTML>