import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
+import java.util.Dictionary;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import org.argeo.cms.internal.http.CmsRemotingServlet;
import org.argeo.cms.internal.http.CmsWebDavServlet;
import org.argeo.cms.internal.http.HttpUtils;
+import org.argeo.cms.internal.jcr.JcrInitUtils;
import org.argeo.jcr.Jcr;
import org.argeo.jcr.JcrException;
import org.argeo.jcr.JcrUtils;
+import org.argeo.maintenance.backup.LogicalRestore;
import org.argeo.naming.LdapAttrs;
import org.argeo.osgi.useradmin.UserAdminConf;
import org.argeo.util.LangUtils;
throw new IllegalStateException("Cannot analyse clean state", e);
}
deployConfig = new DeployConfig(configurationAdmin, dataModels, isClean);
+ JcrInitUtils.addToDeployment(CmsDeployment.this);
httpExpected = deployConfig.getProps(KernelConstants.JETTY_FACTORY_PID, "default") != null;
try {
Configuration[] configs = configurationAdmin
KernelUtils.asyncOpen(confAdminSt);
}
+ public void addFactoryDeployConfig(String factoryPid, Dictionary<String, Object> props) {
+ deployConfig.putFactoryDeployConfig(factoryPid, props);
+ deployConfig.save();
+ try {
+ deployConfig.loadConfigs();
+ } catch (IOException e) {
+ throw new IllegalStateException(e);
+ }
+ }
+
+ public Dictionary<String, Object> getProps(String factoryPid, String cn) {
+ return deployConfig.getProps(factoryPid, cn);
+ }
+
private String httpPortsMsg(Object httpPort, Object httpsPort) {
return (httpPort != null ? "HTTP " + httpPort + " " : " ") + (httpsPort != null ? "HTTPS " + httpsPort : "");
}
// home
prepareDataModel(NodeConstants.NODE_REPOSITORY, deployedNodeRepository, publishAsLocalRepo);
+ // init from backup
+ if (deployConfig.isFirstInit()) {
+ Path restorePath = Paths.get(System.getProperty("user.dir"), "restore");
+ if (Files.exists(restorePath)) {
+ if (log.isDebugEnabled())
+ log.debug("Found backup " + restorePath + ", restoring it...");
+ LogicalRestore logicalRestore = new LogicalRestore(bc, deployedNodeRepository, restorePath);
+ KernelUtils.doAsDataAdmin(logicalRestore);
+ log.info("Restored backup from " + restorePath);
+ }
+ }
+
// init from repository
Collection<ServiceReference<Repository>> initRepositorySr;
try {
workspaces: for (String workspaceName : initSession.getWorkspace().getAccessibleWorkspaceNames()) {
if ("security".equals(workspaceName))
continue workspaces;
+ if (log.isDebugEnabled())
+ log.debug("Copying workspace " + workspaceName + " from init repository...");
+ long begin = System.currentTimeMillis();
Session targetSession = null;
Session sourceSession = null;
try {
targetSession = NodeUtils.openDataAdminSession(deployedNodeRepository, workspaceName);
}
sourceSession = initRepository.login(workspaceName);
+// JcrUtils.copyWorkspaceXml(sourceSession, targetSession);
+ // TODO deal with referenceable nodes
JcrUtils.copy(sourceSession.getRootNode(), targetSession.getRootNode());
targetSession.save();
+ long duration = System.currentTimeMillis() - begin;
+ if (log.isDebugEnabled())
+ log.debug("Copied workspace " + workspaceName + " from init repository in " + (duration / 1000)
+ + " s");
+ } catch (Exception e) {
+ log.error("Cannot copy workspace " + workspaceName + " from init repository.", e);
} finally {
Jcr.logout(sourceSession);
Jcr.logout(targetSession);
if (cn != null) {
List<String> publishAsLocalRepo = new ArrayList<>();
if (cn.equals(NodeConstants.NODE_REPOSITORY)) {
+// JackrabbitDataModelMigration.clearRepositoryCaches(repoContext.getRepositoryConfig());
prepareNodeRepository(repoContext.getRepository(), publishAsLocalRepo);
// TODO separate home repository
prepareHomeRepository(repoContext.getRepository());