additional.bundles = slf4j.api,\
slf4j.org.apache.commons.logging,\
slf4j.log4j,\
- org.apache.log4j
+ org.apache.log4j,\
+ com.jcraft.jsch
\ No newline at end of file
<artifactId>org.apache.commons.exec</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.argeo.tp</groupId>
+ <artifactId>org.joda.time</artifactId>
+ </dependency>
+
<!-- Spring -->
<dependency>
<groupId>org.argeo.tp</groupId>
--- /dev/null
+package org.argeo.server.backup;
+
+import org.apache.commons.vfs.FileObject;
+import org.apache.commons.vfs.FileSystemManager;
+import org.apache.commons.vfs.FileSystemOptions;
+import org.apache.commons.vfs.provider.sftp.SftpFileSystemConfigBuilder;
+import org.argeo.ArgeoException;
+
+/**
+ * Simplify atomic backups implementation, especially by managing VFS.
+ */
+public abstract class AbstractAtomicBackup implements AtomicBackup {
+ private String name;
+ private String compression = "bz2";
+
+ protected abstract void writeBackup(FileObject targetFo);
+
+ public AbstractAtomicBackup() {
+ }
+
+ public AbstractAtomicBackup(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public String backup(FileSystemManager fileSystemManager,
+ String backupsBase, BackupContext backupContext,
+ FileSystemOptions opts) {
+ if (name == null)
+ throw new ArgeoException("Atomic backup name must be set");
+
+ FileObject targetFo = null;
+ try {
+ if (backupsBase.startsWith("sftp:"))
+ SftpFileSystemConfigBuilder.getInstance()
+ .setStrictHostKeyChecking(opts, "no");
+ if (compression == null || compression.equals("none"))
+ targetFo = fileSystemManager.resolveFile(backupsBase + '/'
+ + backupContext.getRelativeFolder() + '/' + name, opts);
+ else if (compression.equals("bz2"))
+ targetFo = fileSystemManager.resolveFile("bz2:" + backupsBase
+ + '/' + backupContext.getRelativeFolder() + '/' + name
+ + ".bz2" + "!" + name, opts);
+ else if (compression.equals("gz"))
+ targetFo = fileSystemManager.resolveFile("gz:" + backupsBase
+ + '/' + backupContext.getRelativeFolder() + '/' + name
+ + ".gz" + "!" + name, opts);
+ else
+ throw new ArgeoException("Unsupported compression "
+ + compression);
+
+ writeBackup(targetFo);
+
+ return targetFo.toString();
+ } catch (Exception e) {
+ throw new ArgeoException("Cannot backup " + name + " to "
+ + targetFo, e);
+ } finally {
+ BackupUtils.closeFOQuietly(targetFo);
+ }
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public void setCompression(String compression) {
+ this.compression = compression;
+ }
+}
--- /dev/null
+package org.argeo.server.backup;
+
+import org.apache.commons.vfs.FileSystemManager;
+import org.apache.commons.vfs.FileSystemOptions;
+
+/** Performs the backup of a single component, typically a database dump */
+public interface AtomicBackup {
+ /**
+ * Retrieves the data of the component in a format that allows to restore
+ * the component
+ *
+ * @param backupContext
+ * the context of this backup
+ * @return the VFS URI of the generated file or directory
+ */
+ public String backup(FileSystemManager fileSystemManager,
+ String backupsBase, BackupContext backupContext, FileSystemOptions opts);
+}
--- /dev/null
+package org.argeo.server.backup;
+
+import java.text.DateFormat;
+import java.util.Date;
+
+/**
+ * Transient information of a given backup, centralizing common information such
+ * as timestamp and location.
+ */
+public interface BackupContext {
+ /** Backup date */
+ public Date getTimestamp();
+
+ /** Formatted backup date */
+ public String getTimestampAsString();
+
+ /** System name */
+ public String getSystemName();
+
+ /** Local base */
+ public String getRelativeFolder();
+
+ /** Date format */
+ public DateFormat getDateFormat();
+}
--- /dev/null
+package org.argeo.server.backup;
+
+import org.apache.commons.vfs.FileSystemException;
+import org.apache.commons.vfs.impl.DefaultFileSystemManager;
+import org.apache.commons.vfs.provider.bzip2.Bzip2FileProvider;
+import org.apache.commons.vfs.provider.ftp.FtpFileProvider;
+import org.apache.commons.vfs.provider.gzip.GzipFileProvider;
+import org.apache.commons.vfs.provider.local.DefaultLocalFileProvider;
+import org.apache.commons.vfs.provider.ram.RamFileProvider;
+import org.apache.commons.vfs.provider.sftp.SftpFileProvider;
+import org.apache.commons.vfs.provider.url.UrlFileProvider;
+import org.argeo.ArgeoException;
+
+/**
+ * Programatically configured VFS file system manager which can be declared as a
+ * bean and associated with a life cycle (methods
+ * {@link DefaultFileSystemManager#init()} and
+ * {@link DefaultFileSystemManager#closet()}). Supports bz2, file, ram, gzip,
+ * ftp, sftp
+ */
+public class BackupFileSystemManager extends DefaultFileSystemManager {
+
+ public BackupFileSystemManager() {
+ super();
+ try {
+ addProvider("file", new DefaultLocalFileProvider());
+ addProvider("bz2", new Bzip2FileProvider());
+ addProvider("ftp", new FtpFileProvider());
+ addProvider("sftp", new SftpFileProvider());
+ addProvider("gzip", new GzipFileProvider());
+ addProvider("ram", new RamFileProvider());
+ setDefaultProvider(new UrlFileProvider());
+ } catch (FileSystemException e) {
+ throw new ArgeoException("Cannot configure backup file provider", e);
+ }
+ }
+}
--- /dev/null
+package org.argeo.server.backup;
+
+import java.text.DateFormat;
+
+import org.apache.commons.vfs.FileSystemManager;
+import org.apache.commons.vfs.FileSystemOptions;
+
+/** Purges previous backups */
+public interface BackupPurge {
+ /**
+ * Purge the backups identified by these arguments. Although these are the
+ * same fields as a {@link BackupContext} we don't pass it as argument since
+ * we want to use this interface to purge remote backups as well (that is,
+ * with a different base), or outside the scope of a running backup.
+ */
+ public void purge(FileSystemManager fileSystemManager, String base,
+ String name, DateFormat dateFormat, FileSystemOptions opts);
+}
--- /dev/null
+package org.argeo.server.backup;
+
+import org.apache.commons.vfs.FileObject;
+import org.apache.commons.vfs.FileSystemException;
+
+/** Backup utilities */
+public class BackupUtils {
+ /** Close a file object quietly even if it is null or throws an exception. */
+ public static void closeFOQuietly(FileObject fo) {
+ if (fo != null) {
+ try {
+ fo.close();
+ } catch (FileSystemException e) {
+ // silent
+ }
+ }
+ }
+
+ /** Prevents instantiation */
+ private BackupUtils() {
+ }
+}
--- /dev/null
+package org.argeo.server.backup;
+
+import org.apache.commons.vfs.FileObject;
+
+/** Backups a MySQL database using mysqldump */
+public class MySqlBackup extends OsCallBackup {
+ private String mysqldumpLocation = "/usr/bin/mysqldump";
+
+ private String dbUser;
+ private String dbPassword;
+ private String dbName;
+
+ public MySqlBackup() {
+ super();
+ }
+
+ public MySqlBackup(String dbUser, String dbPassword, String dbName) {
+ super(dbName);
+ this.dbUser = dbUser;
+ this.dbPassword = dbPassword;
+ this.dbName = dbName;
+ }
+
+ @Override
+ public void writeBackup(FileObject targetFo) {
+ if (getCommand() == null)
+ setCommand(mysqldumpLocation
+ + " --lock-tables --add-locks --add-drop-table"
+ + " -u ${dbUser} --password=${dbPassword} --databases ${dbName}");
+ getVariables().put("dbUser", dbUser);
+ getVariables().put("dbPassword", dbPassword);
+ getVariables().put("dbName", dbName);
+
+ super.writeBackup(targetFo);
+ }
+
+ public void setDbUser(String dbUser) {
+ this.dbUser = dbUser;
+ }
+
+ public void setDbPassword(String dbPassword) {
+ this.dbPassword = dbPassword;
+ }
+
+ public void setDbName(String dbName) {
+ this.dbName = dbName;
+ }
+
+ public void setMysqldumpLocation(String mysqldumpLocation) {
+ this.mysqldumpLocation = mysqldumpLocation;
+ }
+
+}
--- /dev/null
+package org.argeo.server.backup;
+
+import org.apache.commons.vfs.FileObject;
+import org.argeo.ArgeoException;
+
+/** Backups an OpenLDAP server using slapcat */
+public class OpenLdapBackup extends OsCallBackup {
+ private String slapcatLocation = "/usr/sbin/slapcat";
+ private String slapdConfLocation = "/etc/openldap/slapd.conf";
+ private String baseDn;
+
+ public OpenLdapBackup() {
+ super();
+ }
+
+ public OpenLdapBackup(String baseDn) {
+ super();
+ this.baseDn = baseDn;
+ }
+
+ @Override
+ public void writeBackup(FileObject targetFo) {
+ if (baseDn == null)
+ throw new ArgeoException("Base DN must be set");
+
+ if (getCommand() == null)
+ setCommand(slapcatLocation
+ + " -f ${slapdConfLocation} -b '${baseDn}'");
+ getVariables().put("slapdConfLocation", slapdConfLocation);
+ getVariables().put("baseDn", baseDn);
+
+ super.writeBackup(targetFo);
+ }
+
+}
import org.apache.commons.exec.ExecuteStreamHandler;
import org.apache.commons.exec.Executor;
import org.apache.commons.exec.PumpStreamHandler;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.vfs.FileContent;
import org.apache.commons.vfs.FileObject;
-import org.apache.commons.vfs.FileSystemManager;
-import org.apache.commons.vfs.VFS;
import org.argeo.ArgeoException;
/**
- * Runs a an OS command and save the output as a file. Typically used for MySQL
- * dumps
+ * Runs an OS command and save its standard output as a file. Typically used for
+ * MySQL or OpenLDAP dumps.
*/
-public class OsCallBackup implements Runnable {
-
+public class OsCallBackup extends AbstractAtomicBackup {
private final static Log log = LogFactory.getLog(OsCallBackup.class);
private String command;
private Map<String, String> variables = new HashMap<String, String>();
+ private Executor executor = new DefaultExecutor();
- private String target;
+ public OsCallBackup() {
+ }
- @Override
- public void run() {
- try {
- Executor executor = new DefaultExecutor();
+ public OsCallBackup(String name) {
+ super(name);
+ }
- CommandLine commandLine = CommandLine.parse(command, variables);
+ public OsCallBackup(String name, String command) {
+ super(name);
+ this.command = command;
+ }
+
+ @Override
+ public void writeBackup(FileObject targetFo) {
+ CommandLine commandLine = CommandLine.parse(command, variables);
+ ByteArrayOutputStream errBos = new ByteArrayOutputStream();
+ if (log.isTraceEnabled())
+ log.trace(commandLine.toString());
+ try {
// stdout
- FileSystemManager fsm = VFS.getManager();
- FileObject targetFo = fsm.resolveFile(target);
FileContent targetContent = targetFo.getContent();
-
// stderr
- ByteArrayOutputStream errBos = new ByteArrayOutputStream();
ExecuteStreamHandler streamHandler = new PumpStreamHandler(
targetContent.getOutputStream(), errBos);
-
executor.setStreamHandler(streamHandler);
-
- try {
- if (log.isDebugEnabled())
- log.debug(commandLine.toString());
-
- executor.execute(commandLine);
- } catch (ExecuteException e) {
- byte[] err = errBos.toByteArray();
- String errStr = new String(err);
- throw new ArgeoException("Process failed with exit value "
- + e.getExitValue() + ": " + errStr);
- }
+ executor.execute(commandLine);
+ } catch (ExecuteException e) {
+ byte[] err = errBos.toByteArray();
+ String errStr = new String(err);
+ throw new ArgeoException("Process " + commandLine
+ + " failed with exit value " + e.getExitValue() + ": "
+ + errStr, e);
} catch (Exception e) {
- throw new ArgeoException("Cannot backup to " + target
- + " with command " + command + " " + variables, e);
+ byte[] err = errBos.toByteArray();
+ String errStr = new String(err);
+ throw new ArgeoException("Process " + commandLine + " failed: "
+ + errStr, e);
+ } finally {
+ IOUtils.closeQuietly(errBos);
}
}
this.command = command;
}
- public void setVariables(Map<String, String> variables) {
- this.variables = variables;
+ protected String getCommand() {
+ return command;
}
- public void setTarget(String target) {
- this.target = target;
+ protected Map<String, String> getVariables() {
+ return variables;
}
- public static void main(String args[]) {
- OsCallBackup osCallBackup = new OsCallBackup();
- osCallBackup.setCommand("/usr/bin/mysqldump"
- + " --lock-tables --add-locks --add-drop-table"
- + " -u ${dbUser} --password=${dbPassword} --databases ${dbName}");
- Map<String, String> variables = new HashMap<String, String>();
- variables.put("dbUser", "root");
- variables.put("dbPassword", "");
- variables.put("dbName", "test");
- osCallBackup.setVariables(variables);
-
- osCallBackup
- .setTarget("/home/mbaudier/dev/src/commons/server/runtime/org.argeo.server.core/target/dump.sql");
+ public void setVariables(Map<String, String> variables) {
+ this.variables = variables;
+ }
- osCallBackup.run();
+ public void setExecutor(Executor executor) {
+ this.executor = executor;
}
+
}
--- /dev/null
+package org.argeo.server.backup;
+
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+import org.apache.commons.vfs.FileSystemManager;
+
+/** Simple implementation of a backup context */
+public class SimpleBackupContext implements BackupContext {
+ private DateFormat dateFormat = new SimpleDateFormat("yyyyMMdd_HHmm");
+ private final Date timestamp;
+ private final String name;
+
+ private final FileSystemManager fileSystemManager;
+
+ public SimpleBackupContext(FileSystemManager fileSystemManager,
+ String backupsBase, String name) {
+ this.name = name;
+ this.timestamp = new Date();
+ this.fileSystemManager = fileSystemManager;
+ }
+
+ public Date getTimestamp() {
+ return timestamp;
+ }
+
+ public String getTimestampAsString() {
+ return dateFormat.format(timestamp);
+ }
+
+ public String getSystemName() {
+ return name;
+ }
+
+ public String getRelativeFolder() {
+ return name + '/' + getTimestampAsString();
+ }
+
+ public DateFormat getDateFormat() {
+ return dateFormat;
+ }
+
+ public FileSystemManager getFileSystemManager() {
+ return fileSystemManager;
+ }
+
+}
--- /dev/null
+package org.argeo.server.backup;
+
+import java.text.DateFormat;
+import java.util.Date;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.vfs.FileObject;
+import org.apache.commons.vfs.FileSystemManager;
+import org.apache.commons.vfs.FileSystemOptions;
+import org.apache.commons.vfs.Selectors;
+import org.argeo.ArgeoException;
+import org.joda.time.DateTime;
+import org.joda.time.Period;
+
+/** Simple backup purge which keeps backups only for a given number of days */
+public class SimpleBackupPurge implements BackupPurge {
+ private final static Log log = LogFactory.getLog(SimpleBackupPurge.class);
+
+ private Integer daysKept = 5;
+
+ @Override
+ public void purge(FileSystemManager fileSystemManager, String base,
+ String name, DateFormat dateFormat, FileSystemOptions opts) {
+ try {
+ DateTime nowDt = new DateTime();
+ FileObject baseFo = fileSystemManager.resolveFile(
+ base + '/' + name, opts);
+
+ SortedMap<DateTime, FileObject> toDelete = new TreeMap<DateTime, FileObject>();
+ int backupCount = 0;
+
+ // scan backups an list those which should be deleted
+ for (FileObject backupFo : baseFo.getChildren()) {
+ String backupName = backupFo.getName().getBaseName();
+ Date backupDate = dateFormat.parse(backupName);
+ backupCount++;
+
+ DateTime backupDt = new DateTime(backupDate.getTime());
+ Period sinceThen = new Period(backupDt, nowDt);
+ // int days = sinceThen.getDays();
+ int days = sinceThen.getMinutes();
+ if (days > daysKept) {
+ toDelete.put(backupDt, backupFo);
+ }
+ }
+
+ if (toDelete.size() != 0 && toDelete.size() == backupCount) {
+ // all backups would be deleted
+ // but we want to keep at least one
+ DateTime lastBackupDt = toDelete.firstKey();
+ FileObject keptFo = toDelete.remove(lastBackupDt);
+ log.warn("Backup " + keptFo
+ + " kept although it is older than " + daysKept
+ + " days.");
+ }
+
+ // delete old backups
+ for (FileObject backupFo : toDelete.values()) {
+ backupFo.delete(Selectors.SELECT_ALL);
+ if (log.isDebugEnabled())
+ log.debug("Deleted backup " + backupFo);
+ }
+ } catch (Exception e) {
+ throw new ArgeoException("Could not purge previous backups", e);
+ }
+
+ }
+
+}
--- /dev/null
+package org.argeo.server.backup;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.vfs.FileObject;
+import org.apache.commons.vfs.FileSystemException;
+import org.apache.commons.vfs.FileSystemManager;
+import org.apache.commons.vfs.FileSystemOptions;
+import org.apache.commons.vfs.Selectors;
+import org.apache.commons.vfs.UserAuthenticator;
+import org.apache.commons.vfs.auth.StaticUserAuthenticator;
+import org.apache.commons.vfs.impl.DefaultFileSystemConfigBuilder;
+import org.apache.commons.vfs.impl.StandardFileSystemManager;
+import org.argeo.ArgeoException;
+
+/**
+ * Combines multiple backups and transfer them to a remote location. Purges
+ * remote and local data based on certain criteria.
+ */
+public class SystemBackup implements Runnable {
+ private final static Log log = LogFactory.getLog(SystemBackup.class);
+
+ private FileSystemManager fileSystemManager;
+ private UserAuthenticator userAuthenticator = null;
+
+ private String backupsBase;
+ private String name;
+
+ private List<AtomicBackup> atomicBackups = new ArrayList<AtomicBackup>();
+ private BackupPurge backupPurge = new SimpleBackupPurge();
+
+ private Map<String, UserAuthenticator> remoteBases = new HashMap<String, UserAuthenticator>();
+
+ @Override
+ public void run() {
+ if (atomicBackups.size() == 0)
+ throw new ArgeoException("No atomic backup listed");
+ List<String> failures = new ArrayList<String>();
+
+ SimpleBackupContext backupContext = new SimpleBackupContext(
+ fileSystemManager, backupsBase, name);
+
+ // purge older backups
+ FileSystemOptions opts = new FileSystemOptions();
+ try {
+ DefaultFileSystemConfigBuilder.getInstance().setUserAuthenticator(
+ opts, userAuthenticator);
+ } catch (FileSystemException e) {
+ throw new ArgeoException("Cannot create authentication", e);
+ }
+
+ try {
+
+ backupPurge.purge(fileSystemManager, backupsBase, name,
+ backupContext.getDateFormat(), opts);
+ } catch (Exception e) {
+ failures.add(e.getMessage());
+ log.error("Purge of " + backupsBase + " failed", e);
+ }
+
+ // perform backup
+ for (AtomicBackup atomickBackup : atomicBackups) {
+ try {
+ String target = atomickBackup.backup(fileSystemManager,
+ backupsBase, backupContext, opts);
+ if (log.isDebugEnabled())
+ log.debug("Performed backup " + target);
+ } catch (Exception e) {
+ failures.add(e.getMessage());
+ log.error("Atomic backup failed", e);
+ }
+ }
+
+ // dispatch to remote
+ for (String remoteBase : remoteBases.keySet()) {
+ FileObject localBaseFo = null;
+ FileObject remoteBaseFo = null;
+ UserAuthenticator auth = remoteBases.get(remoteBase);
+
+ try {
+ // authentication
+ FileSystemOptions remoteOpts = new FileSystemOptions();
+ DefaultFileSystemConfigBuilder.getInstance()
+ .setUserAuthenticator(remoteOpts, auth);
+ backupPurge.purge(fileSystemManager, remoteBase, name,
+ backupContext.getDateFormat(), remoteOpts);
+
+ localBaseFo = fileSystemManager.resolveFile(backupsBase + '/'
+ + backupContext.getRelativeFolder(), opts);
+ remoteBaseFo = fileSystemManager.resolveFile(remoteBase + '/'
+ + backupContext.getRelativeFolder(), remoteOpts);
+ remoteBaseFo.copyFrom(localBaseFo, Selectors.SELECT_ALL);
+ if (log.isDebugEnabled())
+ log.debug("Copied backup to " + remoteBaseFo + " from "
+ + localBaseFo);
+ // }
+ } catch (FileSystemException e) {
+ log.error(
+ "Cannot dispatch backups from "
+ + backupContext.getRelativeFolder() + " to "
+ + remoteBase, e);
+ }
+ BackupUtils.closeFOQuietly(localBaseFo);
+ BackupUtils.closeFOQuietly(remoteBaseFo);
+ }
+
+ if (failures.size() > 0) {
+ StringBuffer buf = new StringBuffer();
+ for (String failure : failures)
+ buf.append('\n').append(failure);
+ throw new ArgeoException("Errors when running the backup,"
+ + " check the logs and the backups as soon as possible."
+ + buf);
+ }
+ }
+
+ public void setFileSystemManager(FileSystemManager fileSystemManager) {
+ this.fileSystemManager = fileSystemManager;
+ }
+
+ public void setBackupsBase(String backupsBase) {
+ this.backupsBase = backupsBase;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public void setAtomicBackups(List<AtomicBackup> atomicBackups) {
+ this.atomicBackups = atomicBackups;
+ }
+
+ public void setBackupPurge(BackupPurge backupPurge) {
+ this.backupPurge = backupPurge;
+ }
+
+ public void setUserAuthenticator(UserAuthenticator userAuthenticator) {
+ this.userAuthenticator = userAuthenticator;
+ }
+
+ public void setRemoteBases(Map<String, UserAuthenticator> remoteBases) {
+ this.remoteBases = remoteBases;
+ }
+
+ public static void main(String args[]) {
+ while (true) {
+ try {
+ StandardFileSystemManager fsm = new StandardFileSystemManager();
+ fsm.init();
+
+ SystemBackup systemBackup = new SystemBackup();
+ systemBackup.setName("mySystem");
+ systemBackup
+ .setBackupsBase("/home/mbaudier/dev/src/commons/server/runtime/org.argeo.server.core/target");
+ systemBackup.setFileSystemManager(fsm);
+
+ List<AtomicBackup> atomicBackups = new ArrayList<AtomicBackup>();
+
+ MySqlBackup mySqlBackup = new MySqlBackup("root", "", "test");
+ atomicBackups.add(mySqlBackup);
+
+ systemBackup.setAtomicBackups(atomicBackups);
+
+ Map<String, UserAuthenticator> remoteBases = new HashMap<String, UserAuthenticator>();
+ StaticUserAuthenticator userAuthenticator = new StaticUserAuthenticator(
+ null, "demo", "demo");
+ remoteBases.put("sftp://localhost/home/mbaudier/test",
+ userAuthenticator);
+ systemBackup.setRemoteBases(remoteBases);
+
+ systemBackup.run();
+
+ fsm.close();
+ } catch (FileSystemException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ System.exit(1);
+ }
+
+ // wait
+ try {
+ Thread.sleep(120 * 1000);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+}