schemaReferenceUrls)
- {
- this.schemaReferenceUrls = schemaReferenceUrls;
- }
-
- /**
- * Set the number times that the DB must be checked for the presence of the table
- * indicating that a schema change is in progress.
- *
- * @param schemaUpdateLockRetryCount the number of times to retry (default 24)
- */
- public void setSchemaUpdateLockRetryCount(int schemaUpdateLockRetryCount)
- {
- this.schemaUpdateLockRetryCount = schemaUpdateLockRetryCount;
- }
-
- /**
- * Set the wait time (seconds) between checks for the schema update lock.
- *
- * @param schemaUpdateLockRetryWaitSeconds the number of seconds between checks (default 5 seconds)
- */
- public void setSchemaUpdateLockRetryWaitSeconds(int schemaUpdateLockRetryWaitSeconds)
- {
- this.schemaUpdateLockRetryWaitSeconds = schemaUpdateLockRetryWaitSeconds;
- }
-
- /**
- * Optionally override the system's default maximum string length. Some databases have
- * limitations on how long the string_value columns can be while other do not.
- * Some parts of the persistence have alternatives when the string values exceed this
- * length while others do not. Either way, it is possible to adjust the text column sizes
- * and adjust this value manually to override the default associated with the database
- * being used.
- *
- * The system - as of V2.1.2 - will attempt to adjust the maximum string length size
- * automatically and therefore this method is not normally required. But it is possible
- * to manually override the value if, for example, the system doesn't guess the correct
- * maximum length or if the dialect is not explicitly catered for.
- *
- * All negative or zero values are ignored and the system defaults to its best guess based
- * on the dialect being used.
- *
- * @param maximumStringLength the maximum length of the string_value columns
- */
- public void setMaximumStringLength(int maximumStringLength)
- {
- if (maximumStringLength > 0)
- {
- this.maximumStringLength = maximumStringLength;
- }
- }
-
- /**
- * Set db.schema.name to be used
- */
- public void setDbSchemaName(String dbSchemaName)
- {
- if (PropertyCheck.isValidPropertyString(dbSchemaName))
- {
- this.dbSchemaName = dbSchemaName;
- }
- }
-
- /**
- * Sets the properties map from which we look up some configuration settings.
- *
- * @param globalProperties
- * the global properties
- */
- public void setGlobalProperties(Properties globalProperties)
- {
- this.globalProperties = globalProperties;
- }
-
- /**
- * Register a new script for execution when creating a clean schema. The order of registration
- * determines the order of execution.
- *
- * @param preCreateScriptUrl the script URL, possibly containing the ${db.script.dialect} placeholder
- */
- public void addPreCreateScriptUrl(String preCreateScriptUrl)
- {
- if (logger.isDebugEnabled())
- {
- logger.debug("Registered create script URL (pre-Hibernate): " + preCreateScriptUrl);
- }
- this.preCreateScriptUrls.add(preCreateScriptUrl);
- }
-
- /**
- * Register a new script for execution after the Hibernate schema creation phase. The order of registration
- * determines the order of execution.
- *
- * @param postUpdateScriptUrl the script URL, possibly containing the ${db.script.dialect} placeholder
- */
- public void addPostCreateScriptUrl(String postUpdateScriptUrl)
- {
- if (logger.isDebugEnabled())
- {
- logger.debug("Registered create script URL (post-Hibernate): " + postUpdateScriptUrl);
- }
- this.postCreateScriptUrls.add(postUpdateScriptUrl);
- }
-
- /**
- * Register a new SQL-based patch for consideration against the instance (before Hibernate execution)
- *
- * @param scriptPatch the patch that will be examined for execution
- */
- public void addPreUpdateScriptPatch(SchemaUpgradeScriptPatch scriptPatch)
- {
- if(false == scriptPatch.isIgnored())
- {
- if (logger.isDebugEnabled())
- {
- logger.debug("Registered script patch (pre-Hibernate): " + scriptPatch.getId());
- }
- this.preUpdateScriptPatches.add(scriptPatch);
- }
- else
- {
- logger.info("Ignoring script patch (pre-Hibernate): " + scriptPatch.getId());
- }
-
- }
-
- /**
- * Register a new SQL-based patch for consideration against the instance (after Hibernate execution)
- *
- * @param scriptPatch the patch that will be examined for execution
- */
- public void addPostUpdateScriptPatch(SchemaUpgradeScriptPatch scriptPatch)
- {
- if(false == scriptPatch.isIgnored())
- {
- if (logger.isDebugEnabled())
- {
- logger.debug("Registered script patch (post-Hibernate): " + scriptPatch.getId());
- }
- this.postUpdateScriptPatches.add(scriptPatch);
- }
- else
- {
- logger.info("Ignoring script patch (post-Hibernate): " + scriptPatch.getId());
- }
- }
-
- /**
- * Register a new SQL-based patch for consideration against the Activiti instance
- *
- * @param scriptPatch the patch that will be examined for execution
- */
- public void addUpdateActivitiScriptPatch(SchemaUpgradeScriptPatch scriptPatch)
- {
- if(false == scriptPatch.isIgnored())
- {
- if (logger.isDebugEnabled())
- {
- logger.debug("Registered Activiti script patch: " + scriptPatch.getId());
- }
- this.updateActivitiScriptPatches.add(scriptPatch);
- }
- else
- {
- logger.info("Ignoring Activiti script patch: " + scriptPatch.getId());
- }
-
- }
-
- private static class NoSchemaException extends Exception
- {
- private static final long serialVersionUID = 5574280159910824660L;
- }
-
- /**
- * Used to indicate a forced stop of the bootstrap.
- *
- * @see SchemaBootstrap#setStopAfterSchemaBootstrap(boolean)
- *
- * @author Derek Hulley
- * @since 3.1.1
- */
- private static class BootstrapStopException extends RuntimeException
- {
- private static final long serialVersionUID = 4250016675538442181L;
- private BootstrapStopException()
- {
- super(I18NUtil.getMessage(ERR_FORCED_STOP));
- }
- }
-
- /**
- * Count applied patches. This fails if multiple applied patch tables are found,
- * which normally indicates that the schema view needs to be limited.
- *
- * @param connection a valid database connection
- * @return Returns the number of applied patches
- * @throws NoSchemaException if the table of applied patches can't be found
- */
- private int countAppliedPatches(Connection connection) throws Exception
- {
- String defaultSchema = dbSchemaName != null ? dbSchemaName : databaseMetaDataHelper.getSchema(connection);
-
- if (defaultSchema != null && defaultSchema.length() == 0)
- {
- defaultSchema = null;
- }
- String defaultCatalog = connection.getCatalog();
- if (defaultCatalog != null && defaultCatalog.length() == 0)
- {
- defaultCatalog = null;
- }
- DatabaseMetaData dbMetadata = connection.getMetaData();
-
- ResultSet tableRs = dbMetadata.getTables(defaultCatalog, defaultSchema, "%", null);
- boolean newPatchTable = false;
- boolean oldPatchTable = false;
- try
- {
- boolean multipleSchemas = false;
- while (tableRs.next())
- {
- String tableName = tableRs.getString("TABLE_NAME");
- if (tableName.equalsIgnoreCase("applied_patch"))
- {
- if (oldPatchTable || newPatchTable)
- {
- // Found earlier
- multipleSchemas = true;
- }
- oldPatchTable = true;
- }
- else if (tableName.equalsIgnoreCase("alf_applied_patch"))
- {
- if (oldPatchTable || newPatchTable)
- {
- // Found earlier
- multipleSchemas = true;
- }
- newPatchTable = true;
- }
- }
- // We go through all the tables so that multiple visible schemas are detected
- if (multipleSchemas)
- {
- throw new AlfrescoRuntimeException(ERR_MULTIPLE_SCHEMAS);
- }
- }
- finally
- {
- try { tableRs.close(); } catch (Throwable e) {e.printStackTrace(); }
- }
-
- if (newPatchTable)
- {
- Statement stmt = connection.createStatement();
- try
- {
- ResultSet rs = stmt.executeQuery("select count(id) from alf_applied_patch");
- rs.next();
- int count = rs.getInt(1);
- return count;
- }
- catch (SQLException e)
- {
- // This should work at least and is probably an indication of the user viewing multiple schemas
- throw new AlfrescoRuntimeException(ERR_MULTIPLE_SCHEMAS);
- }
- finally
- {
- try { stmt.close(); } catch (Throwable e) {}
- }
- }
- else if (oldPatchTable)
- {
- // found the old style table name
- Statement stmt = connection.createStatement();
- try
- {
- ResultSet rs = stmt.executeQuery("select count(id) from applied_patch");
- rs.next();
- int count = rs.getInt(1);
- return count;
- }
- finally
- {
- try { stmt.close(); } catch (Throwable e) {}
- }
- }
- else
- {
- // The applied patches table is not present
- throw new NoSchemaException();
- }
- }
-
-
- /**
- * Check whether Activiti tables already created in db.
- *
- * @param connection a valid database connection
- * @return true
if Activiti tables already created in schema, otherwise false
- */
- private boolean checkActivitiTablesExist(Connection connection)
- {
- Statement stmt = null;
- try
- {
- stmt = connection.createStatement();
- stmt.executeQuery("select min(id_) from ACT_RU_TASK");
- return true;
- }
- catch (SQLException e)
- {
- logger.debug("Did not find ACT_RU_TASK table.");
- return false;
- }
- finally
- {
- try
- {
- if (stmt != null)
- {
- stmt.close();
- }
- }
- catch (Throwable e) {}
- }
- }
-
- /**
- * @return Returns the name of the applied patch table, or null if the table doesn't exist
- */
- private String getAppliedPatchTableName(Connection connection) throws Exception
- {
- Statement stmt = connection.createStatement();
- try
- {
- stmt.executeQuery("select * from alf_applied_patch");
- return "alf_applied_patch";
- }
- catch (Throwable e)
- {
- // we'll try another table name
- }
- finally
- {
- try { stmt.close(); } catch (Throwable e) {}
- }
- // for pre-1.4 databases, the table was named differently
- stmt = connection.createStatement();
- try
- {
- stmt.executeQuery("select * from applied_patch");
- return "applied_patch";
- }
- catch (Throwable e)
- {
- // It is not there
- return null;
- }
- finally
- {
- try { stmt.close(); } catch (Throwable e) {}
- }
- }
-
- /**
- * @return Returns the number of applied patches
- */
- private boolean didPatchSucceed(Connection connection, String patchId, boolean alternative) throws Exception
- {
- String patchTableName = getAppliedPatchTableName(connection);
- if (patchTableName == null)
- {
- // Table doesn't exist, yet
- return false;
- }
- Statement stmt = connection.createStatement();
- try
- {
- ResultSet rs = stmt.executeQuery("select succeeded, was_executed from " + patchTableName + " where id = '" + patchId + "'");
- if (!rs.next())
- {
- return false;
- }
- boolean succeeded = rs.getBoolean(1);
- boolean wasExecuted = rs.getBoolean(2);
-
- if (alternative)
- {
- return succeeded && wasExecuted;
- }
- else
- {
- return succeeded;
- }
- }
- finally
- {
- try { stmt.close(); } catch (Throwable e) {}
- }
- }
-
- /**
- * Finds the version.properties file and determines the installed version.schema.
- * The only way to determine the original installed schema number is by quering the for the minimum value in
- * alf_applied_patch.applied_to_schema. This might not work if an upgrade is attempted straight from
- * Alfresco v1.0!
- *
- * @return the installed schema number or -1 if the installation is new.
- */
- private int getInstalledSchemaNumber(Connection connection) throws Exception
- {
- Statement stmt = connection.createStatement();
- try
- {
- ResultSet rs = stmt.executeQuery(
- "select min(applied_to_schema) from alf_applied_patch where applied_to_schema > -1");
- if (!rs.next())
- {
- // Nothing in the table
- return -1;
- }
- if (rs.getObject(1) == null)
- {
- // Nothing in the table
- return -1;
- }
- int installedSchema = rs.getInt(1);
- return installedSchema;
- }
- finally
- {
- try { stmt.close(); } catch (Throwable e) {}
- }
- }
-
- private static class LockFailedException extends Exception
- {
- private static final long serialVersionUID = -6676398230191205456L;
- }
-
-
- /**
- * Records that the bootstrap process has started
- */
- private synchronized void setBootstrapStarted(Connection connection) throws Exception
- {
- // Create the marker table
- Statement stmt = connection.createStatement();
- try
- {
- stmt.executeUpdate("create table alf_bootstrap_lock (charval CHAR(1) NOT NULL)");
- // Success
- if (logger.isInfoEnabled())
- {
- logger.info("Bootstrap started.");
- }
- return;
- }
- catch (Throwable e)
- {
- // We throw a well-known exception to be handled by retrying code if required
- throw new LockFailedException();
- }
- finally
- {
- try { stmt.close(); } catch (Throwable e) {}
- }
- }
-
- /**
- * Used to identify the case where one of the nodes (!maybe even this one!)
- * has created the alf_bootstrap_lock table - This table indicates that a schema initialization
- * or schema DB update is in progress.
- *
- * @return true if the alf_bootstrap_lock marker table exists in the DB; false otherwise
- *
- */
- private synchronized boolean isBootstrapInProgress(Connection connection) throws Exception
- {
- Statement stmt = connection.createStatement();
- try
- {
- stmt.executeQuery("select * from alf_bootstrap_lock");
- // Success
- if (logger.isInfoEnabled())
- {
- logger.info("Bootstrap marker still present in the DB.");
- }
- return true;
- }
- catch (Throwable e)
- {
- // The exception will be thrown if the table does not exist.
- return false;
- }
- finally
- {
- try { stmt.close(); } catch (Throwable e) {}
- }
- }
-
- /**
- * Records that the bootstrap process has finished
- */
- private void setBootstrapCompleted(Connection connection) throws Exception
- {
- // Create the marker table
- Statement stmt = connection.createStatement();
- try
- {
- stmt.executeUpdate("drop table alf_bootstrap_lock");
-
- // from Thor
- executedStatementsThreadLocal.set(null);
- if (logger.isInfoEnabled())
- {
- logger.info("Bootstrap completed.");
- }
- }
- catch (Throwable e)
- {
- if (logger.isDebugEnabled())
- {
- logger.debug("Exception in deleting the alf_bootstrap_lock table: " + e.getMessage(), e);
- }
- throw AlfrescoRuntimeException.create(ERR_PREVIOUS_FAILED_BOOTSTRAP);
- }
- finally
- {
- try { stmt.close(); } catch (Throwable e) {}
- }
- }
-
- /**
- * Builds the schema from scratch or applies the necessary patches to the schema.
- */
- private boolean updateSchema(Connection connection) throws Exception
- {
- boolean create = false;
- try
- {
- final int numberOfPatchesApplied = countAppliedPatches(connection);
- if (logger.isInfoEnabled())
- {
- logger.info("Applied patches detected: " + numberOfPatchesApplied);
- }
- }
- catch (NoSchemaException e)
- {
- create = true;
- }
-
- // Get the dialect
- final Dialect dialect = this.dialect;
- String dialectStr = dialect.getClass().getSimpleName();
-
- if (create)
- {
- long start = System.currentTimeMillis();
-
- // execute pre-create scripts (not patches)
- for (String scriptUrl : this.preCreateScriptUrls)
- {
- executeScriptUrl(connection, scriptUrl);
- }
- // execute post-create scripts (not patches)
- for (String scriptUrl : this.postCreateScriptUrls)
- {
- executeScriptUrl(connection, scriptUrl);
- }
-
- if (logger.isInfoEnabled())
- {
- logger.info("Creating Alfresco tables took " + (System.currentTimeMillis() - start) + " ms");
- }
- }
- else
- {
- long start = System.currentTimeMillis();
-
- // Execute any pre-auto-update scripts
- checkSchemaPatchScripts(connection, preUpdateScriptPatches, true);
-
- // Execute any post-auto-update scripts
- checkSchemaPatchScripts(connection, postUpdateScriptPatches, true);
-
- if (logger.isInfoEnabled())
- {
- logger.info(
- "Checking and patching Alfresco tables took " + (System.currentTimeMillis() - start) + " ms");
- }
- }
-
- ensureCurrentClusterMemberIsBootstrapping(connection);
-
- // Initialise Activiti DB, using an unclosable connection
- boolean activitiTablesExist = checkActivitiTablesExist(connection);
- if (logger.isInfoEnabled())
- {
- logger.info("Activiti tables need to be " + (activitiTablesExist ? "checked for patches" : " created"));
- }
-
- if(!activitiTablesExist)
- {
- long start = System.currentTimeMillis();
-
- ensureCurrentClusterMemberIsBootstrapping(connection);
-
- // Activiti DB updates are performed as patches in alfresco, only give
- // control to activiti when creating new one.
- initialiseActivitiDBSchema(new UnclosableConnection(connection));
-
- // ALF-18996: Upgrade from 3.4.12 to 4.2.0 fails: Activiti tables have not been bootstrapped
- // The Activiti bootstrap is effectively doing the work of all the other patches,
- // which should be considered complete.
- int installedSchemaNumber = getInstalledSchemaNumber(connection);
- for (Patch activitiScriptPatch : updateActivitiScriptPatches)
- {
- AppliedPatch appliedPatch = new AppliedPatch();
- appliedPatch.setId(activitiScriptPatch.getId());
- appliedPatch.setDescription(activitiScriptPatch.getDescription());
- appliedPatch.setFixesFromSchema(activitiScriptPatch.getFixesFromSchema());
- appliedPatch.setFixesToSchema(activitiScriptPatch.getFixesToSchema());
- appliedPatch.setTargetSchema(activitiScriptPatch.getTargetSchema());
- appliedPatch.setAppliedToSchema(installedSchemaNumber);
- appliedPatch.setAppliedToServer("UNKNOWN");
- appliedPatch.setAppliedOnDate(new Date()); // the date applied
- appliedPatch.setSucceeded(true);
- appliedPatch.setWasExecuted(false);
- appliedPatch.setReport("Placeholder for Activiti bootstrap at schema " + installedSchemaNumber);
- appliedPatchDAO.createAppliedPatch(appliedPatch);
- }
- if (logger.isInfoEnabled())
- {
- logger.info("Creating Activiti tables took " + (System.currentTimeMillis() - start) + " ms");
- }
- }
- else
- {
- long start = System.currentTimeMillis();
-
- // Execute any auto-update scripts for Activiti tables
- checkSchemaPatchScripts(connection, updateActivitiScriptPatches, true);
-
- // verify that all Activiti patches have been applied correctly
- checkSchemaPatchScripts(connection, updateActivitiScriptPatches, false);
-
- if (logger.isInfoEnabled())
- {
- logger.info("Checking and patching Activiti tables took " + (System.currentTimeMillis() - start) + " ms");
- }
- }
-
- if (!create)
- {
- long start = System.currentTimeMillis();
-
- // verify that all patches have been applied correctly
- checkSchemaPatchScripts(connection, preUpdateScriptPatches, false); // check scripts
- checkSchemaPatchScripts(connection, postUpdateScriptPatches, false); // check scripts
-
- if (logger.isInfoEnabled())
- {
- logger.info("Checking that all patches have been applied took " + (System.currentTimeMillis() - start) + " ms");
- }
- }
-
- return create;
- }
-
- /**
- * Initialises the Activiti DB schema, if not present it's created.
- *
- * @param connection Connection to use the initialise DB schema
- */
- private void initialiseActivitiDBSchema(Connection connection)
- {
- // create instance of activiti engine to initialise schema
- ProcessEngine engine = null;
- ProcessEngineConfiguration engineConfig = ProcessEngineConfiguration.createStandaloneProcessEngineConfiguration();
- try
- {
- // build the engine
- engine = engineConfig.setDataSource(dataSource).
- setDatabaseSchemaUpdate("none").
- setProcessEngineName("activitiBootstrapEngine").
- setHistory("full").
- setJobExecutorActivate(false).
- buildProcessEngine();
-
- String schemaName = dbSchemaName != null ? dbSchemaName : databaseMetaDataHelper.getSchema(connection);
- if (logger.isInfoEnabled())
- {
- logger.info("Creating Activiti DB schema tables");
- }
- engine.getManagementService().databaseSchemaUpgrade(connection, null, schemaName);
- }
- finally
- {
- if (engine != null)
- {
- // close the process engine
- engine.close();
- }
- }
- }
-
- /**
- * Check that the necessary scripts have been executed against the database
- */
- private void checkSchemaPatchScripts(
- Connection connection,
- List scriptPatches,
- boolean apply) throws Exception
- {
- // first check if there have been any applied patches
- int appliedPatchCount = countAppliedPatches(connection);
- if (appliedPatchCount == 0)
- {
- // This is a new schema, so upgrade scripts are irrelevant
- // and patches will not have been applied yet
- return;
- }
-
- ensureCurrentClusterMemberIsBootstrapping(connection);
-
- // Retrieve the first installed schema number
- int installedSchema = getInstalledSchemaNumber(connection);
-
- nextPatch:
- for (SchemaUpgradeScriptPatch patch : scriptPatches)
- {
- final String patchId = patch.getId();
- final String scriptUrl = patch.getScriptUrl();
-
- // Check if any of the alternative patches were executed
- List alternatives = patch.getAlternatives();
- for (Patch alternativePatch : alternatives)
- {
- String alternativePatchId = alternativePatch.getId();
- boolean alternativeSucceeded = didPatchSucceed(connection, alternativePatchId, true);
- if (alternativeSucceeded)
- {
- continue nextPatch;
- }
- }
-
- // check if the script was successfully executed
- boolean wasSuccessfullyApplied = didPatchSucceed(connection, patchId, false);
- if (wasSuccessfullyApplied)
- {
- // Either the patch was executed before or the system was bootstrapped
- // with the patch bean present.
- continue;
- }
- else if (!patch.applies(installedSchema))
- {
- // Patch does not apply to the installed schema number
- continue;
- }
- else if (!apply)
- {
- // the script was not run and may not be run automatically
- throw AlfrescoRuntimeException.create(ERR_SCRIPT_NOT_RUN, scriptUrl);
- }
- // it wasn't run and it can be run now
- executeScriptUrl(connection, scriptUrl);
- }
- }
-
- /**
- * This method throws a LockFailedException in case the current Alfresco cluster node
- * is NOT the node that is doing the schema bootstrap;
- * This is the exception that signals that this node should wait for a while and retry
- *
- *
- * Only one node from the cluster should do the initialization of the DB, so all other
- * nodes should just retry to execute the schema checks after the initialization of
- * the DB has ended;
- *
- */
- private void ensureCurrentClusterMemberIsBootstrapping(Connection connection) throws Exception
- {
- if (isAnotherClusterMemberBootstrapping(connection))
- {
- logger.info("Another Alfresco cluster node is updating the DB");
-
- // We throw a well-known exception to be handled by retrying code if required
- throw new LockFailedException();
- }
- }
-
- /**
- *
- * @return true if there is a bootstrap in progress and this node has NOT executed any queries
- * therefore this node is NOT the one doing the schema initialization/upgrade
- *
- * @throws Exception if the DB connections has problems
- */
- private boolean isAnotherClusterMemberBootstrapping(Connection connection) throws Exception
- {
- return executedStatementsThreadLocal.get()== null && isBootstrapInProgress(connection);
- }
-
- private void executeScriptUrl(Connection connection, String scriptUrl) throws Exception
- {
- Dialect dialect = this.dialect;
- String dialectStr = dialect.getClass().getSimpleName();
- InputStream scriptInputStream = getScriptInputStream(dialect.getClass(), scriptUrl);
- // check that it exists
- if (scriptInputStream == null)
- {
- throw AlfrescoRuntimeException.create(ERR_SCRIPT_NOT_FOUND, scriptUrl);
- }
- // write the script to a temp location for future and failure reference
- File tempFile = null;
- try
- {
- tempFile = TempFileProvider.createTempFile("AlfrescoSchema-" + dialectStr + "-Update-", ".sql");
- ContentWriter writer = new FileContentWriter(tempFile);
- writer.putContent(scriptInputStream);
- }
- finally
- {
- try { scriptInputStream.close(); } catch (Throwable e) {} // usually a duplicate close
- }
- // now execute it
- String dialectScriptUrl = scriptUrl.replaceAll(PLACEHOLDER_DIALECT, dialect.getClass().getName());
- // Replace the script placeholders
- executeScriptFile(connection, tempFile, dialectScriptUrl);
- }
-
- /**
- * Replaces the dialect placeholder in the resource URL and attempts to find a file for
- * it. If not found, the dialect hierarchy will be walked until a compatible resource is
- * found. This makes it possible to have resources that are generic to all dialects.
- *
- * @return The Resource, otherwise null
- */
- private Resource getDialectResource(Class> dialectClass, String resourceUrl)
- {
- // replace the dialect placeholder
- String dialectResourceUrl = resolveDialectUrl(dialectClass, resourceUrl);
- // get a handle on the resource
- Resource resource = rpr.getResource(dialectResourceUrl);
- if (!resource.exists())
- {
- // it wasn't found. Get the superclass of the dialect and try again
- Class> superClass = dialectClass.getSuperclass();
- if (Dialect.class.isAssignableFrom(superClass))
- {
- // we still have a Dialect - try again
- return getDialectResource(superClass, resourceUrl);
- }
- else
- {
- // we have exhausted all options
- return null;
- }
- }
- else
- {
- // we have a handle to it
- return resource;
- }
- }
-
- /**
- * Takes resource URL containing the {@link SchemaBootstrap#PLACEHOLDER_DIALECT dialect placeholder text}
- * and substitutes the placeholder with the name of the given dialect's class.
- *
- * For example:
- *
- * resolveDialectUrl(MySQLInnoDBDialect.class, "classpath:alfresco/db/${db.script.dialect}/myfile.xml")
- *
- * would give the following String:
- *
- * classpath:alfresco/db/org.hibernate.dialect.MySQLInnoDBDialect/myfile.xml
- *
- */
- private String resolveDialectUrl(Class> dialectClass, String resourceUrl)
- {
- return resourceUrl.replaceAll(PLACEHOLDER_DIALECT, dialectClass.getName());
- }
-
- /**
- * Replaces the dialect placeholder in the script URL and attempts to find a file for
- * it. If not found, the dialect hierarchy will be walked until a compatible script is
- * found. This makes it possible to have scripts that are generic to all dialects.
- *
- * @return Returns an input stream onto the script, otherwise null
- */
- private InputStream getScriptInputStream(Class> dialectClazz, String scriptUrl) throws Exception
- {
- Resource resource = getDialectResource(dialectClazz, scriptUrl);
- if (resource == null)
- {
- throw new AlfrescoRuntimeException("Script [ " + scriptUrl + " ] can't be found for " + dialectClazz);
- }
- return resource.getInputStream();
- }
-
- /**
- * @param connection the DB connection to use
- * @param scriptFile the file containing the statements
- * @param scriptUrl the URL of the script to report. If this is null, the script
- * is assumed to have been auto-generated.
- */
- private void executeScriptFile(
- Connection connection,
- File scriptFile,
- String scriptUrl) throws Exception
- {
- final Dialect dialect = this.dialect;
-
- if (executedStatementsThreadLocal.get() == null)
- {
- // Validate the schema, pre-upgrade
- validateSchema("Alfresco-{0}-Validation-Pre-Upgrade-{1}-", null);
-
- dumpSchema("pre-upgrade");
-
- // There is no lock at this stage. This process can fall out if the lock can't be applied.
- setBootstrapStarted(connection);
- executedStatementsThreadLocal.set(new StringBuilder(8094));
- }
-
- if (scriptUrl == null)
- {
- LogUtil.info(logger, MSG_EXECUTING_GENERATED_SCRIPT, scriptFile);
- }
- else
- {
- LogUtil.info(logger, MSG_EXECUTING_COPIED_SCRIPT, scriptFile, scriptUrl);
- }
-
- InputStream scriptInputStream = new FileInputStream(scriptFile);
- BufferedReader reader = new BufferedReader(new InputStreamReader(scriptInputStream, "UTF-8"));
- try
- {
- int line = 0;
- // loop through all statements
- StringBuilder sb = new StringBuilder(1024);
- String fetchVarName = null;
- String fetchColumnName = null;
- Object defaultFetchValue = null;
- boolean doBatch = false;
- int batchUpperLimit = 0;
- int batchSize = 1;
- Map varAssignments = new HashMap(13);
- String delimiter = ";";
- // Special variable assignments:
- if (dialect instanceof PostgreSQLDialect)
- {
- // Needs 1/0 for true/false
- varAssignments.put("true", "true");
- varAssignments.put("false", "false");
- varAssignments.put("TRUE", "TRUE");
- varAssignments.put("FALSE", "FALSE");
- }
- else
- {
- // Needs true/false as strings
- varAssignments.put("true", "1");
- varAssignments.put("false", "0");
- varAssignments.put("TRUE", "1");
- varAssignments.put("FALSE", "0");
- }
- long now = System.currentTimeMillis();
- varAssignments.put("now", new Long(now).toString());
- varAssignments.put("NOW", new Long(now).toString());
-
- while(true)
- {
- String sqlOriginal = reader.readLine();
- line++;
-
- if (sqlOriginal == null)
- {
- // nothing left in the file
- break;
- }
-
- // trim it
- String sql = sqlOriginal.trim();
- // Check of includes
- if (sql.startsWith("--INCLUDE:"))
- {
- if (sb.length() > 0)
- {
- // This can only be set before a new SQL statement
- throw AlfrescoRuntimeException.create(ERR_STATEMENT_INCLUDE_BEFORE_SQL, (line - 1), scriptUrl);
- }
- String includedScriptUrl = sql.substring(10, sql.length());
- // Execute the script in line
- executeScriptUrl(connection, includedScriptUrl);
- }
- // Check for variable assignment
- else if (sql.startsWith("--ASSIGN:"))
- {
- if (sb.length() > 0)
- {
- // This can only be set before a new SQL statement
- throw AlfrescoRuntimeException.create(ERR_STATEMENT_VAR_ASSIGNMENT_BEFORE_SQL, (line - 1), scriptUrl);
- }
- String assignStr = sql.substring(9, sql.length());
- String[] fetchMapping = assignStr.split("!");
- String[] assigns = fetchMapping[0].split("=");
- if (assigns.length != 2 || assigns[0].length() == 0 || assigns[1].length() == 0)
- {
- throw AlfrescoRuntimeException.create(ERR_STATEMENT_VAR_ASSIGNMENT_FORMAT, (line - 1), scriptUrl);
- }
- fetchVarName = assigns[0];
- fetchColumnName = assigns[1];
- if (fetchMapping.length > 1 && fetchMapping[1].length() > 0)
- {
- defaultFetchValue = fetchMapping[1];
- }
- continue;
- }
- // Handle looping control
- else if (sql.startsWith("--FOREACH"))
- {
- // --FOREACH table.column batch.size.property
- String[] args = sql.split("[ \\t]+");
- int sepIndex;
- if (args.length == 3 && (sepIndex = args[1].indexOf('.')) != -1)
- {
- doBatch = true;
- // Select the upper bound of the table column
- String stmt = "SELECT MAX(" + args[1].substring(sepIndex+1) + ") AS upper_limit FROM " + args[1].substring(0, sepIndex);
- Object fetchedVal = executeStatement(connection, stmt, "upper_limit", false, line, scriptFile);
- if (fetchedVal instanceof Number)
- {
- batchUpperLimit = ((Number)fetchedVal).intValue();
- // Read the batch size from the named property
- String batchSizeString = globalProperties.getProperty(args[2]);
- // Fall back to the default property
- if (batchSizeString == null)
- {
- batchSizeString = globalProperties.getProperty(PROPERTY_DEFAULT_BATCH_SIZE);
- }
- batchSize = batchSizeString == null ? 10000 : Integer.parseInt(batchSizeString);
- }
- }
- continue;
- }
- // Allow transaction delineation
- else if (sql.startsWith("--BEGIN TXN"))
- {
- connection.setAutoCommit(false);
- continue;
- }
- else if (sql.startsWith("--END TXN"))
- {
- connection.commit();
- connection.setAutoCommit(true);
- continue;
- }
- else if (sql.startsWith("--SET-DELIMITER:"))
- {
- if (sb.length() > 0)
- {
- // This can only be set before a new SQL statement
- throw AlfrescoRuntimeException.create(ERR_DELIMITER_SET_BEFORE_SQL, (line - 1), scriptUrl);
- }
-
- // We're good...so set the new delimiter
- String newDelim = sql.substring(16).trim();
- if (newDelim.length() == 0)
- {
- throw AlfrescoRuntimeException.create(ERR_DELIMITER_INVALID, (line - 1), scriptUrl);
- }
- delimiter = newDelim;
- }
-
- // Check for comments
- if (sql.length() == 0 ||
- sql.startsWith( "--" ) ||
- sql.startsWith( "//" ) ||
- sql.startsWith( "/*" ) )
- {
- if (sb.length() > 0)
- {
- // we have an unterminated statement
- throw AlfrescoRuntimeException.create(ERR_STATEMENT_TERMINATOR, delimiter, (line - 1), scriptUrl);
- }
- // there has not been anything to execute - it's just a comment line
- continue;
- }
- // have we reached the end of a statement?
- boolean execute = false;
- boolean optional = false;
- if (sql.endsWith(delimiter))
- {
- sql = sql.substring(0, sql.length() - 1);
- execute = true;
- optional = false;
- }
- else if (sql.endsWith("(optional)") || sql.endsWith("(OPTIONAL)"))
- {
- // Get the end of statement
- int endIndex = sql.lastIndexOf(delimiter);
- if (endIndex > -1)
- {
- sql = sql.substring(0, endIndex);
- execute = true;
- optional = true;
- }
- else
- {
- // Ends with "(optional)" but there is no semi-colon.
- // Just take it at face value and probably fail.
- }
- }
- // Add newline
- if (sb.length() > 0)
- {
- sb.append("\n");
- }
- // Add leading whitespace for formatting
- int whitespaceCount = sqlOriginal.indexOf(sql);
- for (int i = 0; i < whitespaceCount; i++)
- {
- sb.append(" ");
- }
- // append to the statement being built up
- sb.append(sql);
- // execute, if required
- if (execute)
- {
- // Now substitute and execute the statement the appropriate number of times
- String unsubstituted = sb.toString();
- for(int lowerBound = 0; lowerBound <= batchUpperLimit; lowerBound += batchSize)
- {
- sql = unsubstituted;
-
- // Substitute in the next pair of range parameters
- if (doBatch)
- {
- varAssignments.put("LOWERBOUND", String.valueOf(lowerBound));
- varAssignments.put("UPPERBOUND", String.valueOf(lowerBound + batchSize - 1));
- }
-
- // Perform variable replacement using the ${var} format
- for (Map.Entry entry : varAssignments.entrySet())
- {
- String var = entry.getKey();
- Object val = entry.getValue();
- sql = sql.replaceAll("\\$\\{" + var + "\\}", val.toString());
- }
-
- // Handle the 0/1 values that PostgreSQL doesn't translate to TRUE
- if (this.dialect != null && this.dialect instanceof PostgreSQLDialect)
- {
- sql = sql.replaceAll("\\$\\{TRUE\\}", "TRUE");
- }
- else
- {
- sql = sql.replaceAll("\\$\\{TRUE\\}", "1");
- }
-
- if (this.dialect != null && this.dialect instanceof MySQLInnoDBDialect)
- {
- // note: enable bootstrap on MySQL 5.5 (eg. for auto-generated SQL)
- sql = sql.replaceAll("(?i)TYPE=InnoDB", "ENGINE=InnoDB");
- }
-
- if (this.dialect != null && this.dialect instanceof MySQLClusterNDBDialect)
- {
- // note: enable bootstrap on MySQL Cluster NDB
- /*
- * WARNING: Experimental/unsupported - see MySQLClusterNDBDialect !
- */
- sql = sql.replaceAll("(?i)TYPE=InnoDB", "ENGINE=NDB"); // belts-and-braces
- sql = sql.replaceAll("(?i)ENGINE=InnoDB", "ENGINE=NDB");
-
- sql = sql.replaceAll("(?i) BIT ", " BOOLEAN ");
- sql = sql.replaceAll("(?i) BIT,", " BOOLEAN,");
-
- sql = sql.replaceAll("(?i) string_value text", " string_value VARCHAR("+DEFAULT_MAX_STRING_LENGTH_NDB+")");
-
- sql = sql.replaceAll("(?i) VARCHAR(4000)", "TEXT(4000)");
- }
-
- Object fetchedVal = executeStatement(connection, sql, fetchColumnName, optional, line, scriptFile);
- if (fetchVarName != null && fetchColumnName != null)
- {
- if (fetchedVal != null)
- {
- varAssignments.put(fetchVarName, fetchedVal);
- }
- else
- {
- varAssignments.put(fetchVarName, defaultFetchValue);
- }
- }
- }
- sb.setLength(0);
- fetchVarName = null;
- fetchColumnName = null;
- defaultFetchValue = null;
- doBatch = false;
- batchUpperLimit = 0;
- batchSize = 1;
- }
- }
- }
- finally
- {
- try { reader.close(); } catch (Throwable e) {}
- try { scriptInputStream.close(); } catch (Throwable e) {}
- }
- }
-
- /**
- * Execute the given SQL statement, absorbing exceptions that we expect during
- * schema creation or upgrade.
- *
- * @param fetchColumnName the name of the column value to return
- */
- private Object executeStatement(
- Connection connection,
- String sql,
- String fetchColumnName,
- boolean optional,
- int line,
- File file) throws Exception
- {
- StringBuilder executedStatements = executedStatementsThreadLocal.get();
- if (executedStatements == null)
- {
- throw new IllegalArgumentException("The executedStatementsThreadLocal must be populated");
- }
-
- Statement stmt = connection.createStatement();
- Object ret = null;
- try
- {
- if (logger.isDebugEnabled())
- {
- LogUtil.debug(logger, MSG_EXECUTING_STATEMENT, sql);
- }
- boolean haveResults = stmt.execute(sql);
- // Record the statement
- executedStatements.append(sql).append(";\n\n");
- if (haveResults && fetchColumnName != null)
- {
- ResultSet rs = stmt.getResultSet();
- if (rs.next())
- {
- // Get the result value
- ret = rs.getObject(fetchColumnName);
- }
- }
- }
- catch (SQLException e)
- {
- if (optional)
- {
- // it was marked as optional, so we just ignore it
- LogUtil.debug(logger, MSG_OPTIONAL_STATEMENT_FAILED, sql, e.getMessage(), file.getAbsolutePath(), line);
- }
- else
- {
- LogUtil.error(logger, ERR_STATEMENT_FAILED, sql, e.getMessage(), file.getAbsolutePath(), line);
- throw e;
- }
- }
- finally
- {
- try { stmt.close(); } catch (Throwable e) {}
- }
- return ret;
- }
-
- /**
- * Performs dialect-specific checking. This includes checking for InnoDB, dumping the dialect being used
- * as well as setting any runtime, dialect-specific properties.
- */
- private void checkDialect(Dialect dialect)
- {
- Class> dialectClazz = dialect.getClass();
- LogUtil.info(logger, MSG_DIALECT_USED, dialectClazz.getName());
-// if (dialectClazz.equals(MySQLDialect.class) || dialectClazz.equals(MySQL5Dialect.class))
-// {
-// LogUtil.error(logger, ERR_DIALECT_SHOULD_USE, dialectClazz.getName(), MySQLInnoDBDialect.class.getName());
-// throw AlfrescoRuntimeException.create(WARN_DIALECT_UNSUPPORTED, dialectClazz.getName());
-// }
-// else if (dialectClazz.equals(HSQLDialect.class))
-// {
-// LogUtil.info(logger, WARN_DIALECT_HSQL);
-// }
-// else if (dialectClazz.equals(DerbyDialect.class))
-// {
-// LogUtil.info(logger, WARN_DIALECT_DERBY);
-// }
-// else if (dialectClazz.equals(Oracle9iDialect.class) || dialectClazz.equals(Oracle10gDialect.class))
-// {
-// LogUtil.error(logger, ERR_DIALECT_SHOULD_USE, dialectClazz.getName(), Oracle9Dialect.class.getName());
-// throw AlfrescoRuntimeException.create(WARN_DIALECT_UNSUPPORTED, dialectClazz.getName());
-// }
-// else if (dialectClazz.equals(OracleDialect.class) || dialectClazz.equals(Oracle9Dialect.class))
-// {
-// LogUtil.error(logger, ERR_DIALECT_SHOULD_USE, dialectClazz.getName(), Oracle9Dialect.class.getName());
-// throw AlfrescoRuntimeException.create(WARN_DIALECT_UNSUPPORTED, dialectClazz.getName());
-// }
-
- int maxStringLength = SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH;
- int serializableType = SerializableTypeHandler.getSerializableType();
- // Adjust the maximum allowable String length according to the dialect
- if (dialect instanceof SQLServerDialect)
- {
- // string_value nvarchar(1024) null,
- // serializable_value image null,
- maxStringLength = SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH;
- }
- else if (dialect instanceof MySQLClusterNDBDialect)
- {
- // string_value varchar(400),
- // serializable_value blob,
- maxStringLength = SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH_NDB;
- }
- else if (dialect instanceof MySQLInnoDBDialect)
- {
- // string_value text,
- // serializable_value blob,
- maxStringLength = Integer.MAX_VALUE;
- }
- else if (dialect instanceof Oracle9Dialect)
- {
- // string_value varchar2(1024 char),
- // serializable_value blob,
- maxStringLength = SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH;
- }
- else if (dialect instanceof PostgreSQLDialect)
- {
- // string_value varchar(1024),
- // serializable_value bytea,
- maxStringLength = SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH;
- }
-
- SchemaBootstrap.setMaxStringLength(maxStringLength, dialect);
- SerializableTypeHandler.setSerializableType(serializableType);
-
- // Now override the maximum string length if it was set directly
- if (maximumStringLength > 0)
- {
- SchemaBootstrap.setMaxStringLength(maximumStringLength, dialect);
- }
- }
-
- @Override
- public synchronized void onBootstrap(ApplicationEvent event)
- {
- if (event != null)
- {
- // Use the application context to load resources
- rpr = (ApplicationContext)event.getSource();
- }
-
- // do everything in a transaction
- Connection connection = null;
- try
- {
- // make sure that we AUTO-COMMIT
- connection = dataSource.getConnection();
- connection.setAutoCommit(true);
- LogUtil.info(logger, MSG_DATABASE_USED, connection);
-
- // Check and dump the dialect being used
- checkDialect(this.dialect);
-
- // Update the schema, if required.
- if (updateSchema)
- {
- // Retries are required here as the DB lock will be applied lazily upon first statement execution.
- // So if the schema is up to date (no statements executed) then the LockFailException cannot be
- // thrown. If it is thrown, the the update needs to be rerun as it will probably generate no SQL
- // statements the second time around.
- boolean updatedSchema = false;
- boolean createdSchema = false;
-
- for (int i = 0; i < schemaUpdateLockRetryCount; i++)
- {
- try
- {
- long start = System.currentTimeMillis();
-
- createdSchema = updateSchema(connection);
- updatedSchema = true;
-
- if (logger.isInfoEnabled())
- {
- logger.info(
- (createdSchema ? "Creating" : "Updating") +
- " the DB schema took " + (System.currentTimeMillis() - start) + " ms");
- }
- break;
- }
- catch (LockFailedException e)
- {
- logger.info(
- "The current Alfresco cluster node is waiting for another chance to bootstrap the DB schema. "
- + "Attempt: " + (i + 1) + " of " + schemaUpdateLockRetryCount);
- try { this.wait(schemaUpdateLockRetryWaitSeconds * 1000L); } catch (InterruptedException ee) {}
- }
- }
-
- if (!updatedSchema)
- {
- // The retries were exceeded
- throw new AlfrescoRuntimeException(ERR_UPDATE_IN_PROGRESS_ON_ANOTHER_NODE);
- }
-
- writeLogsWithDBStatementExecuted();
-
- if (executedStatementsThreadLocal.get() != null)
- {
- // Remove the flag indicating a running bootstrap
- setBootstrapCompleted(connection);
- }
- reportNormalizedDumps();
- }
- else
- {
- LogUtil.info(logger, MSG_BYPASSING_SCHEMA_UPDATE);
- }
-
- if (stopAfterSchemaBootstrap)
- {
- // 4.0+ schema dump
- dumpSchema("forced-exit");
- LogUtil.error(logger, ERR_FORCED_STOP);
- throw new BootstrapStopException();
- }
-
- if (event != null)
- {
- // all done successfully
- ((ApplicationContext) event.getSource()).publishEvent(new SchemaAvailableEvent(this));
- }
- }
- catch (BootstrapStopException e)
- {
- // We just let this out
- throw e;
- }
- catch (Throwable e)
- {
- LogUtil.error(logger, e, ERR_UPDATE_FAILED);
- if (updateSchema)
- {
- throw new AlfrescoRuntimeException(ERR_UPDATE_FAILED, e);
- }
- else
- {
- throw new AlfrescoRuntimeException(ERR_VALIDATION_FAILED, e);
- }
- }
- finally
- {
- try
- {
- if (connection != null)
- {
- connection.close();
- }
- }
- catch (Throwable e)
- {
- logger.warn("Error closing DB connection: " + e.getMessage());
- }
- }
- }
-
- private void reportNormalizedDumps()
- {
- if (executedStatementsThreadLocal.get() != null)
- {
- // Validate the schema, post-upgrade
- validateSchema("Alfresco-{0}-Validation-Post-Upgrade-{1}-", null);
- // 4.0+ schema dump
- dumpSchema("post-upgrade");
- }
- }
-
- private void writeLogsWithDBStatementExecuted()
- {
- // Copy the executed statements to the output file
- File schemaOutputFile = null;
- if (schemaOuputFilename != null)
- {
- schemaOutputFile = new File(schemaOuputFilename);
- }
- else
- {
- schemaOutputFile = TempFileProvider.createTempFile(
- "AlfrescoSchema-" + this.dialect.getClass().getSimpleName() + "-All_Statements-",
- ".sql");
- }
-
- StringBuilder executedStatements = executedStatementsThreadLocal.get();
- if (executedStatements == null)
- {
- LogUtil.info(logger, MSG_NO_CHANGES);
- }
- else
- {
- FileContentWriter writer = new FileContentWriter(schemaOutputFile);
- writer.setEncoding("UTF-8");
- String executedStatementsStr = executedStatements.toString();
- writer.putContent(executedStatementsStr);
- LogUtil.info(logger, MSG_ALL_STATEMENTS, schemaOutputFile.getPath());
- }
- }
-
- /**
- * Collate differences and validation problems with the schema with respect to an appropriate
- * reference schema.
- *
- * @param outputFileNameTemplate String
- * @param out PrintWriter
- * @return the number of potential problems found.
- */
- public synchronized int validateSchema(String outputFileNameTemplate, PrintWriter out)
- {
- int totalProblems = 0;
-
- // Discover available reference files (e.g. for prefixes alf_, etc.)
- // and process each in turn.
- for (String schemaReferenceUrl : schemaReferenceUrls)
- {
- Resource referenceResource = getDialectResource(dialect.getClass(), schemaReferenceUrl);
-
- if (referenceResource == null || !referenceResource.exists())
- {
- String resourceUrl = resolveDialectUrl(dialect.getClass(), schemaReferenceUrl);
- LogUtil.debug(logger, DEBUG_SCHEMA_COMP_NO_REF_FILE, resourceUrl);
- }
- else
- {
- // Validate schema against each reference file
- int problems = validateSchema(referenceResource, outputFileNameTemplate, out);
- totalProblems += problems;
- }
- }
-
- // Return number of problems found across all reference files.
- return totalProblems;
- }
-
- private int validateSchema(Resource referenceResource, String outputFileNameTemplate, PrintWriter out)
- {
- try
- {
- return attemptValidateSchema(referenceResource, outputFileNameTemplate, out);
- }
- catch (Throwable e)
- {
- if (logger.isErrorEnabled())
- {
- logger.error("Unable to validate database schema.", e);
- }
- return 0;
- }
- }
-
- /**
- * Validates and compares current DB schema with schema reference definition, specified in referenceResource
parameter.
- *
- * The method supports two mechanisms to report validation results:
- *
- * - using an external output stream, specified as
out
;
- * - using specially created {@link FileOutputStream}, which represents temporary file with name, formatted in accordance with
outputFileNameTemplate
template.
- *
- * It is necessary to take care about freeing resources of output stream in case of the 1st approach.
- * N.B.: The method only writes messages of the report. And it doesn't flush and doesn't close the specified output stream!
- *
- *
- * @param referenceResource - {@link Resource} instance, which determines file of reference schema
- * @param outputFileNameTemplate - {@link String} value, which determines template of temporary filename for validation report. It can't be null
if
- * out
is null
!
- * @param out - {@link PrintWriter} instance, which represents an external output stream for writing a validation report. This stream is never closed or flushed. It can't be
- * null
if outputFileNameTemplate
is null
!
- * @return {@link Integer} value, which determines amount of errors or warnings that were detected during validation
- */
- private int attemptValidateSchema(Resource referenceResource, String outputFileNameTemplate, PrintWriter out)
- {
- Date startTime = new Date();
-
- InputStream is = null;
- try
- {
- is = new BufferedInputStream(referenceResource.getInputStream());
- }
- catch (IOException e)
- {
- throw new RuntimeException("Unable to open schema reference file: " + referenceResource);
- }
-
- XMLToSchema xmlToSchema = new XMLToSchema(is);
- xmlToSchema.parse();
- Schema reference = xmlToSchema.getSchema();
- ExportDb exporter = new ExportDb(dataSource, dialect, descriptorService, databaseMetaDataHelper);
- exporter.setDbSchemaName(dbSchemaName);
- // Ensure that the database objects we're validating are filtered
- // by the same prefix as the reference file.
- exporter.setNamePrefix(reference.getDbPrefix());
- exporter.execute();
- Schema target = exporter.getSchema();
-
- SchemaComparator schemaComparator = new SchemaComparator(reference, target, dialect);
-
- schemaComparator.validateAndCompare();
-
- Results results = schemaComparator.getComparisonResults();
-
- Object[] outputFileNameParams = new Object[]
- {
- dialect.getClass().getSimpleName(),
- reference.getDbPrefix()
- };
- PrintWriter pw = null;
- File outputFile = null;
-
- try
- {
- if (out == null)
- {
- String outputFileName = MessageFormat.format(outputFileNameTemplate, outputFileNameParams);
-
- outputFile = TempFileProvider.createTempFile(outputFileName, ".txt");
-
- try
- {
- pw = new PrintWriter(outputFile, SchemaComparator.CHAR_SET);
- }
- catch (FileNotFoundException error)
- {
- throw new RuntimeException("Unable to open file for writing: " + outputFile);
- }
- catch (UnsupportedEncodingException error)
- {
- throw new RuntimeException("Unsupported char set: " + SchemaComparator.CHAR_SET, error);
- }
- }
- else
- {
- pw = out;
- }
-
- // Populate the file with details of the comparison's results.
- for (Result result : results)
- {
- pw.print(result.describe());
- pw.print(SchemaComparator.LINE_SEPARATOR);
- }
- }
- finally
- {
- // We care only about output streams for reporting, which are created specially for current reference resource...
- if (null == out)
- {
- pw.close();
- }
- }
-
-
- if (results.size() == 0)
- {
- LogUtil.info(logger, INFO_SCHEMA_COMP_ALL_OK, referenceResource);
- }
- else
- {
- int numProblems = results.size();
- if (outputFile == null)
- {
- LogUtil.warn(logger, WARN_SCHEMA_COMP_PROBLEMS_FOUND_NO_FILE, numProblems);
- }
- else
- {
- LogUtil.warn(logger, WARN_SCHEMA_COMP_PROBLEMS_FOUND, numProblems, outputFile);
- }
- }
- Date endTime = new Date();
- long durationMillis = endTime.getTime() - startTime.getTime();
- LogUtil.debug(logger, DEBUG_SCHEMA_COMP_TIME_TAKEN, durationMillis);
-
- return results.size();
- }
-
- /**
- * Produces schema dump in XML format: this is performed pre- and post-upgrade (i.e. if
- * changes are made to the schema) and can made upon demand via JMX.
- *
- * @return List of output files.
- */
- public List dumpSchema()
- {
- return dumpSchema("", null);
- }
-
- /**
- * Produces schema dump in XML format: this is performed pre- and post-upgrade (i.e. if
- * changes are made to the schema) and can made upon demand via JMX.
- *
- * @param dbPrefixes Array of database object prefixes to produce the dump for, e.g. "alf_".
- * @return List of output files.
- */
- public List dumpSchema(String[] dbPrefixes)
- {
- return dumpSchema("", dbPrefixes);
- }
-
- /**
- * Dumps the DB schema to temporary file(s), named similarly to:
- *
- * Alfresco-schema-DialectName-whenDumped-dbPrefix-23498732.xml
- *
- * Where the digits serve to create a unique temp file name. If whenDumped is empty or null,
- * then the output is similar to:
- *
- * Alfresco-schema-DialectName-dbPrefix-23498732.xml
- *
- * If dbPrefixes is null, then the default list is used (see {@link MultiFileDumper#DEFAULT_PREFIXES})
- * The dump files' paths are logged at info level.
- *
- * @param whenDumped String
- * @param dbPrefixes Array of database object prefixes to filter by, e.g. "alf_"
- * @return List of output files.
- */
- private List dumpSchema(String whenDumped, String[] dbPrefixes)
- {
- // Build a string to use as the file name template,
- // e.g. "Alfresco-schema-MySQLDialect-pre-upgrade-{0}-"
- StringBuilder sb = new StringBuilder(64);
- sb.append("Alfresco-schema-").
- append(dialect.getClass().getSimpleName());
- if (whenDumped != null && whenDumped.length() > 0)
- {
- sb.append("-");
- sb.append(whenDumped);
- }
- sb.append("-{0}-");
-
- File outputDir = TempFileProvider.getTempDir();
- String fileNameTemplate = sb.toString();
- return dumpSchema(outputDir, fileNameTemplate, dbPrefixes);
- }
-
- /**
- * Same as for {@link #dumpSchema(String, String[])} - except that the default list
- * of database object prefixes is used for filtering.
- *
- * @see #dumpSchema(String, String[])
- * @param whenDumped String
- * @return List
- */
- private List dumpSchema(String whenDumped)
- {
- return dumpSchema(whenDumped, null);
- }
-
- private List dumpSchema(File outputDir, String fileNameTemplate, String[] dbPrefixes)
- {
- try
- {
- return attemptDumpSchema(outputDir, fileNameTemplate, dbPrefixes);
- }
- catch (Throwable e)
- {
- if (logger.isErrorEnabled())
- {
- logger.error("Unable to dump schema to directory " + outputDir, e);
- }
- return null;
- }
- }
-
- private List attemptDumpSchema(File outputDir, String fileNameTemplate, String[] dbPrefixes)
- {
- DbToXMLFactory dbToXMLFactory = new MultiFileDumper.DbToXMLFactoryImpl(getApplicationContext());
-
- MultiFileDumper dumper;
-
- if (dbPrefixes == null)
- {
- dumper = new MultiFileDumper(outputDir, fileNameTemplate, dbToXMLFactory, dbSchemaName);
- }
- else
- {
- dumper = new MultiFileDumper(dbPrefixes, outputDir, fileNameTemplate, dbToXMLFactory, dbSchemaName);
- }
- List files = dumper.dumpFiles();
-
- for (File file : files)
- {
- if (logger.isInfoEnabled())
- {
- LogUtil.info(logger, MSG_NORMALIZED_SCHEMA, file.getAbsolutePath());
- }
- }
-
- return files;
- }
-
- @Override
- protected void onShutdown(ApplicationEvent event)
- {
- }
-
- /**
- * A {@link Connection} wrapper that delegates all calls to the wrapped class
- * except for the close methods, which are ignored.
- *
- * @author Frederik Heremans
- */
- public class UnclosableConnection implements Connection
- {
- private Connection wrapped;
-
- public UnclosableConnection(Connection wrappedConnection)
- {
- this.wrapped = wrappedConnection;
- }
-
- @Override
- public boolean isWrapperFor(Class> iface) throws SQLException
- {
- return wrapped.isWrapperFor(iface);
- }
-
- @Override
- public T unwrap(Class iface) throws SQLException
- {
- return wrapped.unwrap(iface);
- }
-
- @Override
- public void clearWarnings() throws SQLException
- {
- wrapped.clearWarnings();
- }
-
- @Override
- public void close() throws SQLException
- {
- // Ignore this call
- }
-
- @Override
- public void commit() throws SQLException
- {
- wrapped.commit();
- }
-
- @Override
- public Array createArrayOf(String typeName, Object[] elements) throws SQLException
- {
- return wrapped.createArrayOf(typeName, elements);
- }
-
- @Override
- public Blob createBlob() throws SQLException
- {
- return wrapped.createBlob();
- }
-
- @Override
- public Clob createClob() throws SQLException
- {
- return wrapped.createClob();
- }
-
- @Override
- public NClob createNClob() throws SQLException
- {
- return wrapped.createNClob();
- }
-
- @Override
- public SQLXML createSQLXML() throws SQLException
- {
- return wrapped.createSQLXML();
- }
-
- @Override
- public Statement createStatement() throws SQLException
- {
- return wrapped.createStatement();
- }
-
- @Override
- public Statement createStatement(int resultSetType, int resultSetConcurrency)
- throws SQLException
- {
- return wrapped.createStatement(resultSetType, resultSetConcurrency);
- }
-
- @Override
- public Statement createStatement(int resultSetType, int resultSetConcurrency,
- int resultSetHoldability) throws SQLException
- {
- return wrapped.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability);
- }
-
- @Override
- public Struct createStruct(String typeName, Object[] attributes) throws SQLException
- {
- return wrapped.createStruct(typeName, attributes);
- }
-
- @Override
- public boolean getAutoCommit() throws SQLException
- {
- return wrapped.getAutoCommit();
- }
-
- @Override
- public String getCatalog() throws SQLException
- {
- return wrapped.getCatalog();
- }
-
- @Override
- public Properties getClientInfo() throws SQLException
- {
- return wrapped.getClientInfo();
- }
-
- @Override
- public String getClientInfo(String name) throws SQLException
- {
- return wrapped.getClientInfo(name);
- }
-
- @Override
- public int getHoldability() throws SQLException
- {
- return wrapped.getHoldability();
- }
-
- @Override
- public DatabaseMetaData getMetaData() throws SQLException
- {
- return wrapped.getMetaData();
- }
-
- @Override
- public int getTransactionIsolation() throws SQLException
- {
- return wrapped.getTransactionIsolation();
- }
-
- @Override
- public Map> getTypeMap() throws SQLException
- {
- return wrapped.getTypeMap();
- }
-
- @Override
- public SQLWarning getWarnings() throws SQLException
- {
- return wrapped.getWarnings();
- }
-
- @Override
- public boolean isClosed() throws SQLException
- {
- return wrapped.isClosed();
- }
-
- @Override
- public boolean isReadOnly() throws SQLException
- {
- return wrapped.isReadOnly();
- }
-
- @Override
- public boolean isValid(int timeout) throws SQLException
- {
- return wrapped.isValid(timeout);
- }
-
- @Override
- public String nativeSQL(String sql) throws SQLException
- {
- return wrapped.nativeSQL(sql);
- }
-
- @Override
- public CallableStatement prepareCall(String sql) throws SQLException
- {
- return wrapped.prepareCall(sql);
- }
-
- @Override
- public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency)
- throws SQLException
- {
- return wrapped.prepareCall(sql, resultSetType, resultSetConcurrency);
- }
-
- @Override
- public CallableStatement prepareCall(String sql, int resultSetType,
- int resultSetConcurrency, int resultSetHoldability) throws SQLException
- {
- return wrapped.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability);
- }
-
- @Override
- public PreparedStatement prepareStatement(String sql) throws SQLException
- {
- return wrapped.prepareStatement(sql);
- }
-
- @Override
- public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys)
- throws SQLException
- {
- return wrapped.prepareStatement(sql, autoGeneratedKeys);
- }
-
- @Override
- public PreparedStatement prepareStatement(String sql, int[] columnIndexes)
- throws SQLException
- {
- return wrapped.prepareStatement(sql, columnIndexes);
- }
-
- @Override
- public PreparedStatement prepareStatement(String sql, String[] columnNames)
- throws SQLException
- {
- return wrapped.prepareStatement(sql, columnNames);
- }
-
- @Override
- public PreparedStatement prepareStatement(String sql, int resultSetType,
- int resultSetConcurrency) throws SQLException
- {
- return wrapped.prepareStatement(sql, resultSetType, resultSetConcurrency);
- }
-
- @Override
- public PreparedStatement prepareStatement(String sql, int resultSetType,
- int resultSetConcurrency, int resultSetHoldability) throws SQLException
- {
- return wrapped.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability);
- }
-
- @Override
- public void releaseSavepoint(Savepoint savepoint) throws SQLException
- {
- wrapped.releaseSavepoint(savepoint);
- }
-
- @Override
- public void rollback() throws SQLException
- {
- wrapped.rollback();
- }
-
- @Override
- public void rollback(Savepoint savepoint) throws SQLException
- {
- wrapped.rollback(savepoint);
- }
-
- @Override
- public void setAutoCommit(boolean autoCommit) throws SQLException
- {
- wrapped.setAutoCommit(autoCommit);
- }
-
- @Override
- public void setCatalog(String catalog) throws SQLException
- {
- wrapped.setCatalog(catalog);
- }
-
- @Override
- public void setClientInfo(Properties properties) throws SQLClientInfoException
- {
- wrapped.setClientInfo(properties);
- }
-
- @Override
- public void setClientInfo(String name, String value) throws SQLClientInfoException
- {
- wrapped.setClientInfo(name, value);
- }
-
- @Override
- public void setHoldability(int holdability) throws SQLException
- {
- wrapped.setHoldability(holdability);
- }
-
- @Override
- public void setReadOnly(boolean readOnly) throws SQLException
- {
- wrapped.setReadOnly(readOnly);
- }
-
- @Override
- public Savepoint setSavepoint() throws SQLException
- {
- return wrapped.setSavepoint();
- }
-
- @Override
- public Savepoint setSavepoint(String name) throws SQLException
- {
- return wrapped.setSavepoint(name);
- }
-
- @Override
- public void setTransactionIsolation(int level) throws SQLException
- {
- wrapped.setTransactionIsolation(level);
- }
-
- @Override
- public void setTypeMap(Map> map) throws SQLException
- {
- wrapped.setTypeMap(map);
- }
-
- @Override
- public void setSchema(String schema) throws SQLException
- {
- wrapped.setSchema(schema);
- }
-
- @Override
- public String getSchema() throws SQLException
- {
- return wrapped.getSchema();
- }
-
- @Override
- public void abort(Executor executor) throws SQLException
- {
- wrapped.abort(executor);
- }
-
- @Override
- public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException
- {
- wrapped.setNetworkTimeout(executor, milliseconds);
- }
-
- @Override
- public int getNetworkTimeout() throws SQLException
- {
- return wrapped.getNetworkTimeout();
- }
- }
-}
+/*
+ * #%L
+ * Alfresco Repository
+ * %%
+ * Copyright (C) 2005 - 2016 Alfresco Software Limited
+ * %%
+ * This file is part of the Alfresco software.
+ * If the software was purchased under a paid Alfresco license, the terms of
+ * the paid license agreement will prevail. Otherwise, the software is
+ * provided under the following open source license terms:
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ * #L%
+ */
+package org.alfresco.repo.domain.schema;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.PrintWriter;
+import java.io.UnsupportedEncodingException;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.NClob;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLClientInfoException;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.SQLXML;
+import java.sql.Savepoint;
+import java.sql.Statement;
+import java.sql.Struct;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.Executor;
+
+import javax.sql.DataSource;
+
+import org.activiti.engine.ProcessEngine;
+import org.activiti.engine.ProcessEngineConfiguration;
+import org.alfresco.error.AlfrescoRuntimeException;
+import org.alfresco.ibatis.SerializableTypeHandler;
+import org.alfresco.repo.admin.patch.AppliedPatch;
+import org.alfresco.repo.admin.patch.Patch;
+import org.alfresco.repo.admin.patch.impl.SchemaUpgradeScriptPatch;
+import org.alfresco.repo.content.filestore.FileContentWriter;
+import org.alfresco.repo.domain.dialect.MySQLClusterNDBDialect;
+import org.alfresco.repo.domain.dialect.Oracle9Dialect;
+import org.alfresco.repo.domain.dialect.SQLServerDialect;
+import org.alfresco.repo.domain.dialect.Dialect;
+import org.alfresco.repo.domain.dialect.MySQLInnoDBDialect;
+import org.alfresco.repo.domain.dialect.PostgreSQLDialect;
+import org.alfresco.repo.domain.patch.AppliedPatchDAO;
+import org.alfresco.service.cmr.repository.ContentWriter;
+import org.alfresco.service.descriptor.DescriptorService;
+import org.alfresco.util.DatabaseMetaDataHelper;
+import org.alfresco.util.DialectUtil;
+import org.alfresco.util.LogUtil;
+import org.alfresco.util.PropertyCheck;
+import org.alfresco.util.TempFileProvider;
+import org.alfresco.util.schemacomp.ExportDb;
+import org.alfresco.util.schemacomp.MultiFileDumper;
+import org.alfresco.util.schemacomp.MultiFileDumper.DbToXMLFactory;
+import org.alfresco.util.schemacomp.Result;
+import org.alfresco.util.schemacomp.Results;
+import org.alfresco.util.schemacomp.SchemaComparator;
+import org.alfresco.util.schemacomp.XMLToSchema;
+import org.alfresco.util.schemacomp.model.Schema;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.springframework.context.ApplicationContext;
+import org.springframework.context.ApplicationEvent;
+import org.springframework.core.io.Resource;
+import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
+import org.springframework.core.io.support.ResourcePatternResolver;
+import org.springframework.extensions.surf.util.AbstractLifecycleBean;
+import org.springframework.extensions.surf.util.I18NUtil;
+
+/**
+ * Bootstraps the schema and schema update. The schema is considered missing if the applied patch table
+ * is not present, and the schema is considered empty if the applied patch table is empty.
+ *
+ * @author Derek Hulley
+ */
+public class SchemaBootstrap extends AbstractLifecycleBean
+{
+ /** The global property containing the default batch size used by --FOREACH */
+ private static final String PROPERTY_DEFAULT_BATCH_SIZE = "system.upgrade.default.batchsize";
+
+ private static final String MSG_DIALECT_USED = "schema.update.msg.dialect_used";
+ private static final String MSG_DATABASE_USED = "schema.update.msg.database_used";
+ private static final String MSG_BYPASSING_SCHEMA_UPDATE = "schema.update.msg.bypassing";
+ private static final String MSG_NORMALIZED_SCHEMA = "schema.update.msg.normalized_schema";
+ private static final String MSG_NO_CHANGES = "schema.update.msg.no_changes";
+ private static final String MSG_ALL_STATEMENTS = "schema.update.msg.all_statements";
+ private static final String MSG_EXECUTING_GENERATED_SCRIPT = "schema.update.msg.executing_generated_script";
+ private static final String MSG_EXECUTING_COPIED_SCRIPT = "schema.update.msg.executing_copied_script";
+ private static final String MSG_EXECUTING_STATEMENT = "schema.update.msg.executing_statement";
+ private static final String MSG_OPTIONAL_STATEMENT_FAILED = "schema.update.msg.optional_statement_failed";
+ private static final String ERR_FORCED_STOP = "schema.update.err.forced_stop";
+ private static final String ERR_MULTIPLE_SCHEMAS = "schema.update.err.found_multiple";
+ private static final String ERR_PREVIOUS_FAILED_BOOTSTRAP = "schema.update.err.previous_failed";
+ private static final String ERR_UPDATE_IN_PROGRESS_ON_ANOTHER_NODE = "schema.update.err.upgrade_in_progress_on_another_node";
+ private static final String ERR_STATEMENT_FAILED = "schema.update.err.statement_failed";
+ private static final String ERR_UPDATE_FAILED = "schema.update.err.update_failed";
+ private static final String ERR_VALIDATION_FAILED = "schema.update.err.validation_failed";
+ private static final String ERR_SCRIPT_NOT_RUN = "schema.update.err.update_script_not_run";
+ private static final String ERR_SCRIPT_NOT_FOUND = "schema.update.err.script_not_found";
+ private static final String ERR_STATEMENT_INCLUDE_BEFORE_SQL = "schema.update.err.statement_include_before_sql";
+ private static final String ERR_STATEMENT_VAR_ASSIGNMENT_BEFORE_SQL = "schema.update.err.statement_var_assignment_before_sql";
+ private static final String ERR_STATEMENT_VAR_ASSIGNMENT_FORMAT = "schema.update.err.statement_var_assignment_format";
+ private static final String ERR_STATEMENT_TERMINATOR = "schema.update.err.statement_terminator";
+ private static final String ERR_DELIMITER_SET_BEFORE_SQL = "schema.update.err.delimiter_set_before_sql";
+ private static final String ERR_DELIMITER_INVALID = "schema.update.err.delimiter_invalid";
+ private static final String DEBUG_SCHEMA_COMP_NO_REF_FILE = "system.schema_comp.debug.no_ref_file";
+ private static final String INFO_SCHEMA_COMP_ALL_OK = "system.schema_comp.info.all_ok";
+ private static final String WARN_SCHEMA_COMP_PROBLEMS_FOUND = "system.schema_comp.warn.problems_found";
+ private static final String WARN_SCHEMA_COMP_PROBLEMS_FOUND_NO_FILE = "system.schema_comp.warn.problems_found_no_file";
+ private static final String DEBUG_SCHEMA_COMP_TIME_TAKEN = "system.schema_comp.debug.time_taken";
+
+ public static final int DEFAULT_LOCK_RETRY_COUNT = 24;
+ public static final int DEFAULT_LOCK_RETRY_WAIT_SECONDS = 5;
+
+ public static final int DEFAULT_MAX_STRING_LENGTH = 1024;
+ public static final int DEFAULT_MAX_STRING_LENGTH_NDB = 400;
+
+ private static volatile int maxStringLength = DEFAULT_MAX_STRING_LENGTH;
+ private Dialect dialect;
+
+ private ResourcePatternResolver rpr = new PathMatchingResourcePatternResolver(this.getClass().getClassLoader());
+
+ /**
+ * @see #DEFAULT_MAX_STRING_LENGTH
+ */
+ public static final void setMaxStringLength(int length, Dialect dialect)
+ {
+ int max = (dialect instanceof MySQLClusterNDBDialect ? DEFAULT_MAX_STRING_LENGTH_NDB : DEFAULT_MAX_STRING_LENGTH);
+
+ if (length < max)
+ {
+ throw new AlfrescoRuntimeException("The maximum string length must >= "+max+" characters.");
+ }
+ SchemaBootstrap.maxStringLength = length;
+ }
+
+ /**
+ * @return Returns the maximum number of characters that a string field can be
+ */
+ public static final int getMaxStringLength()
+ {
+ return SchemaBootstrap.maxStringLength;
+ }
+
+ /**
+ * Truncates or returns a string that will fit into the string columns in the schema. Text fields can
+ * either cope with arbitrarily long text fields or have the default limit, {@link #DEFAULT_MAX_STRING_LENGTH}.
+ *
+ * @param value the string to check
+ * @return Returns a string that is short enough for {@link SchemaBootstrap#getMaxStringLength()}
+ *
+ * @since 3.2
+ */
+ public static final String trimStringForTextFields(String value)
+ {
+ if (value != null && value.length() > maxStringLength)
+ {
+ return value.substring(0, maxStringLength);
+ }
+ else
+ {
+ return value;
+ }
+ }
+
+
+ /**
+ * Provide a reference to the DescriptorService, used to provide information
+ * about the repository such as the database schema version number.
+ *
+ * @param descriptorService the descriptorService to set
+ */
+ public void setDescriptorService(DescriptorService descriptorService)
+ {
+ this.descriptorService = descriptorService;
+ }
+
+
+ /**
+ * Defines the DatabaseMetaDataHelper to be used
+ *
+ * @param databaseMetaDataHelper DatabaseMetaDataHelper
+ */
+ public void setDatabaseMetaDataHelper(DatabaseMetaDataHelper databaseMetaDataHelper)
+ {
+ this.databaseMetaDataHelper = databaseMetaDataHelper;
+ }
+
+ /**
+ * Sets the previously auto-detected Hibernate dialect.
+ *
+ * @param dialect
+ * the dialect
+ */
+ public void setDialect(Dialect dialect)
+ {
+ this.dialect = dialect;
+ }
+
+ private static Log logger = LogFactory.getLog(SchemaBootstrap.class);
+
+ private DescriptorService descriptorService;
+ private DataSource dataSource;
+ private AppliedPatchDAO appliedPatchDAO;
+ private String schemaOuputFilename;
+ private boolean updateSchema;
+ private boolean stopAfterSchemaBootstrap;
+ private List preCreateScriptUrls;
+ private List postCreateScriptUrls;
+ private List schemaReferenceUrls;
+ private List preUpdateScriptPatches;
+ private List postUpdateScriptPatches;
+ private List updateActivitiScriptPatches;
+ private int schemaUpdateLockRetryCount = DEFAULT_LOCK_RETRY_COUNT;
+ private int schemaUpdateLockRetryWaitSeconds = DEFAULT_LOCK_RETRY_WAIT_SECONDS;
+ private int maximumStringLength;
+ private Properties globalProperties;
+ private String dbSchemaName;
+ private DatabaseMetaDataHelper databaseMetaDataHelper;
+
+ private ThreadLocal executedStatementsThreadLocal = new ThreadLocal();
+
+ public SchemaBootstrap()
+ {
+ preCreateScriptUrls = new ArrayList(1);
+ postCreateScriptUrls = new ArrayList(1);
+ preUpdateScriptPatches = new ArrayList(4);
+ postUpdateScriptPatches = new ArrayList(4);
+ updateActivitiScriptPatches = new ArrayList(4);
+ maximumStringLength = -1;
+ globalProperties = new Properties();
+ }
+
+ public void setDataSource(DataSource dataSource)
+ {
+ this.dataSource = dataSource;
+ }
+
+ public void setAppliedPatchDAO(AppliedPatchDAO appliedPatchDAO)
+ {
+ this.appliedPatchDAO = appliedPatchDAO;
+ }
+
+ /**
+ * Set this to output the full database creation script
+ *
+ * @param schemaOuputFilename the name of a file to dump the schema to, or null to ignore
+ */
+ public void setSchemaOuputFilename(String schemaOuputFilename)
+ {
+ this.schemaOuputFilename = schemaOuputFilename;
+ }
+
+ /**
+ * Set whether to modify the schema or not. Either way, the schema will be validated.
+ *
+ * @param updateSchema true to update and validate the schema, otherwise false to just
+ * validate the schema. Default is true.
+ */
+ public void setUpdateSchema(boolean updateSchema)
+ {
+ this.updateSchema = updateSchema;
+ }
+
+ /**
+ * Set whether this component should terminate the bootstrap process after running all the
+ * usual checks and scripts. This has the additional effect of dumping a final schema
+ * structure file just before exiting.
+ *
+ * WARNING: USE FOR DEBUG AND UPGRADE TESTING ONLY
+ *
+ * @param stopAfterSchemaBootstrap true to terminate (with exception) after
+ * running all the usual schema updates and checks.
+ */
+ public void setStopAfterSchemaBootstrap(boolean stopAfterSchemaBootstrap)
+ {
+ this.stopAfterSchemaBootstrap = stopAfterSchemaBootstrap;
+ }
+
+ /**
+ * Specifies the schema reference files that will be used to validate the repository
+ * schema whenever changes have been made. The database dialect placeholder will be
+ * resolved so that the correct reference files are loaded for the current database
+ * type (e.g. PostgreSQL)
+ *
+ * @param schemaReferenceUrls the schemaReferenceUrls to set
+ */
+ public void setSchemaReferenceUrls(List schemaReferenceUrls)
+ {
+ this.schemaReferenceUrls = schemaReferenceUrls;
+ }
+
+ /**
+ * Set the number times that the DB must be checked for the presence of the table
+ * indicating that a schema change is in progress.
+ *
+ * @param schemaUpdateLockRetryCount the number of times to retry (default 24)
+ */
+ public void setSchemaUpdateLockRetryCount(int schemaUpdateLockRetryCount)
+ {
+ this.schemaUpdateLockRetryCount = schemaUpdateLockRetryCount;
+ }
+
+ /**
+ * Set the wait time (seconds) between checks for the schema update lock.
+ *
+ * @param schemaUpdateLockRetryWaitSeconds the number of seconds between checks (default 5 seconds)
+ */
+ public void setSchemaUpdateLockRetryWaitSeconds(int schemaUpdateLockRetryWaitSeconds)
+ {
+ this.schemaUpdateLockRetryWaitSeconds = schemaUpdateLockRetryWaitSeconds;
+ }
+
+ /**
+ * Optionally override the system's default maximum string length. Some databases have
+ * limitations on how long the string_value columns can be while other do not.
+ * Some parts of the persistence have alternatives when the string values exceed this
+ * length while others do not. Either way, it is possible to adjust the text column sizes
+ * and adjust this value manually to override the default associated with the database
+ * being used.
+ *
+ * The system - as of V2.1.2 - will attempt to adjust the maximum string length size
+ * automatically and therefore this method is not normally required. But it is possible
+ * to manually override the value if, for example, the system doesn't guess the correct
+ * maximum length or if the dialect is not explicitly catered for.
+ *
+ * All negative or zero values are ignored and the system defaults to its best guess based
+ * on the dialect being used.
+ *
+ * @param maximumStringLength the maximum length of the string_value columns
+ */
+ public void setMaximumStringLength(int maximumStringLength)
+ {
+ if (maximumStringLength > 0)
+ {
+ this.maximumStringLength = maximumStringLength;
+ }
+ }
+
+ /**
+ * Set db.schema.name to be used
+ */
+ public void setDbSchemaName(String dbSchemaName)
+ {
+ if (PropertyCheck.isValidPropertyString(dbSchemaName))
+ {
+ this.dbSchemaName = dbSchemaName;
+ }
+ }
+
+ /**
+ * Sets the properties map from which we look up some configuration settings.
+ *
+ * @param globalProperties
+ * the global properties
+ */
+ public void setGlobalProperties(Properties globalProperties)
+ {
+ this.globalProperties = globalProperties;
+ }
+
+ /**
+ * Register a new script for execution when creating a clean schema. The order of registration
+ * determines the order of execution.
+ *
+ * @param preCreateScriptUrl the script URL, possibly containing the ${db.script.dialect} placeholder
+ */
+ public void addPreCreateScriptUrl(String preCreateScriptUrl)
+ {
+ if (logger.isDebugEnabled())
+ {
+ logger.debug("Registered create script URL (pre-Hibernate): " + preCreateScriptUrl);
+ }
+ this.preCreateScriptUrls.add(preCreateScriptUrl);
+ }
+
+ /**
+ * Register a new script for execution after the Hibernate schema creation phase. The order of registration
+ * determines the order of execution.
+ *
+ * @param postUpdateScriptUrl the script URL, possibly containing the ${db.script.dialect} placeholder
+ */
+ public void addPostCreateScriptUrl(String postUpdateScriptUrl)
+ {
+ if (logger.isDebugEnabled())
+ {
+ logger.debug("Registered create script URL (post-Hibernate): " + postUpdateScriptUrl);
+ }
+ this.postCreateScriptUrls.add(postUpdateScriptUrl);
+ }
+
+ /**
+ * Register a new SQL-based patch for consideration against the instance (before Hibernate execution)
+ *
+ * @param scriptPatch the patch that will be examined for execution
+ */
+ public void addPreUpdateScriptPatch(SchemaUpgradeScriptPatch scriptPatch)
+ {
+ if(false == scriptPatch.isIgnored())
+ {
+ if (logger.isDebugEnabled())
+ {
+ logger.debug("Registered script patch (pre-Hibernate): " + scriptPatch.getId());
+ }
+ this.preUpdateScriptPatches.add(scriptPatch);
+ }
+ else
+ {
+ logger.info("Ignoring script patch (pre-Hibernate): " + scriptPatch.getId());
+ }
+
+ }
+
+ /**
+ * Register a new SQL-based patch for consideration against the instance (after Hibernate execution)
+ *
+ * @param scriptPatch the patch that will be examined for execution
+ */
+ public void addPostUpdateScriptPatch(SchemaUpgradeScriptPatch scriptPatch)
+ {
+ if(false == scriptPatch.isIgnored())
+ {
+ if (logger.isDebugEnabled())
+ {
+ logger.debug("Registered script patch (post-Hibernate): " + scriptPatch.getId());
+ }
+ this.postUpdateScriptPatches.add(scriptPatch);
+ }
+ else
+ {
+ logger.info("Ignoring script patch (post-Hibernate): " + scriptPatch.getId());
+ }
+ }
+
+ /**
+ * Register a new SQL-based patch for consideration against the Activiti instance
+ *
+ * @param scriptPatch the patch that will be examined for execution
+ */
+ public void addUpdateActivitiScriptPatch(SchemaUpgradeScriptPatch scriptPatch)
+ {
+ if(false == scriptPatch.isIgnored())
+ {
+ if (logger.isDebugEnabled())
+ {
+ logger.debug("Registered Activiti script patch: " + scriptPatch.getId());
+ }
+ this.updateActivitiScriptPatches.add(scriptPatch);
+ }
+ else
+ {
+ logger.info("Ignoring Activiti script patch: " + scriptPatch.getId());
+ }
+
+ }
+
+ private static class NoSchemaException extends Exception
+ {
+ private static final long serialVersionUID = 5574280159910824660L;
+ }
+
+ /**
+ * Used to indicate a forced stop of the bootstrap.
+ *
+ * @see SchemaBootstrap#setStopAfterSchemaBootstrap(boolean)
+ *
+ * @author Derek Hulley
+ * @since 3.1.1
+ */
+ private static class BootstrapStopException extends RuntimeException
+ {
+ private static final long serialVersionUID = 4250016675538442181L;
+ private BootstrapStopException()
+ {
+ super(I18NUtil.getMessage(ERR_FORCED_STOP));
+ }
+ }
+
+ /**
+ * Count applied patches. This fails if multiple applied patch tables are found,
+ * which normally indicates that the schema view needs to be limited.
+ *
+ * @param connection a valid database connection
+ * @return Returns the number of applied patches
+ * @throws NoSchemaException if the table of applied patches can't be found
+ */
+ private int countAppliedPatches(Connection connection) throws Exception
+ {
+ String defaultSchema = dbSchemaName != null ? dbSchemaName : databaseMetaDataHelper.getSchema(connection);
+
+ if (defaultSchema != null && defaultSchema.length() == 0)
+ {
+ defaultSchema = null;
+ }
+ String defaultCatalog = connection.getCatalog();
+ if (defaultCatalog != null && defaultCatalog.length() == 0)
+ {
+ defaultCatalog = null;
+ }
+ DatabaseMetaData dbMetadata = connection.getMetaData();
+
+ ResultSet tableRs = dbMetadata.getTables(defaultCatalog, defaultSchema, "%", null);
+ boolean newPatchTable = false;
+ boolean oldPatchTable = false;
+ try
+ {
+ boolean multipleSchemas = false;
+ while (tableRs.next())
+ {
+ String tableName = tableRs.getString("TABLE_NAME");
+ if (tableName.equalsIgnoreCase("applied_patch"))
+ {
+ if (oldPatchTable || newPatchTable)
+ {
+ // Found earlier
+ multipleSchemas = true;
+ }
+ oldPatchTable = true;
+ }
+ else if (tableName.equalsIgnoreCase("alf_applied_patch"))
+ {
+ if (oldPatchTable || newPatchTable)
+ {
+ // Found earlier
+ multipleSchemas = true;
+ }
+ newPatchTable = true;
+ }
+ }
+ // We go through all the tables so that multiple visible schemas are detected
+ if (multipleSchemas)
+ {
+ throw new AlfrescoRuntimeException(ERR_MULTIPLE_SCHEMAS);
+ }
+ }
+ finally
+ {
+ try { tableRs.close(); } catch (Throwable e) {e.printStackTrace(); }
+ }
+
+ if (newPatchTable)
+ {
+ Statement stmt = connection.createStatement();
+ try
+ {
+ ResultSet rs = stmt.executeQuery("select count(id) from alf_applied_patch");
+ rs.next();
+ int count = rs.getInt(1);
+ return count;
+ }
+ catch (SQLException e)
+ {
+ // This should work at least and is probably an indication of the user viewing multiple schemas
+ throw new AlfrescoRuntimeException(ERR_MULTIPLE_SCHEMAS);
+ }
+ finally
+ {
+ try { stmt.close(); } catch (Throwable e) {}
+ }
+ }
+ else if (oldPatchTable)
+ {
+ // found the old style table name
+ Statement stmt = connection.createStatement();
+ try
+ {
+ ResultSet rs = stmt.executeQuery("select count(id) from applied_patch");
+ rs.next();
+ int count = rs.getInt(1);
+ return count;
+ }
+ finally
+ {
+ try { stmt.close(); } catch (Throwable e) {}
+ }
+ }
+ else
+ {
+ // The applied patches table is not present
+ throw new NoSchemaException();
+ }
+ }
+
+
+ /**
+ * Check whether Activiti tables already created in db.
+ *
+ * @param connection a valid database connection
+ * @return true
if Activiti tables already created in schema, otherwise false
+ */
+ private boolean checkActivitiTablesExist(Connection connection)
+ {
+ Statement stmt = null;
+ try
+ {
+ stmt = connection.createStatement();
+ stmt.executeQuery("select min(id_) from ACT_RU_TASK");
+ return true;
+ }
+ catch (SQLException e)
+ {
+ logger.debug("Did not find ACT_RU_TASK table.");
+ return false;
+ }
+ finally
+ {
+ try
+ {
+ if (stmt != null)
+ {
+ stmt.close();
+ }
+ }
+ catch (Throwable e) {}
+ }
+ }
+
+ /**
+ * @return Returns the name of the applied patch table, or null if the table doesn't exist
+ */
+ private String getAppliedPatchTableName(Connection connection) throws Exception
+ {
+ Statement stmt = connection.createStatement();
+ try
+ {
+ stmt.executeQuery("select * from alf_applied_patch");
+ return "alf_applied_patch";
+ }
+ catch (Throwable e)
+ {
+ // we'll try another table name
+ }
+ finally
+ {
+ try { stmt.close(); } catch (Throwable e) {}
+ }
+ // for pre-1.4 databases, the table was named differently
+ stmt = connection.createStatement();
+ try
+ {
+ stmt.executeQuery("select * from applied_patch");
+ return "applied_patch";
+ }
+ catch (Throwable e)
+ {
+ // It is not there
+ return null;
+ }
+ finally
+ {
+ try { stmt.close(); } catch (Throwable e) {}
+ }
+ }
+
+ /**
+ * @return Returns the number of applied patches
+ */
+ private boolean didPatchSucceed(Connection connection, String patchId, boolean alternative) throws Exception
+ {
+ String patchTableName = getAppliedPatchTableName(connection);
+ if (patchTableName == null)
+ {
+ // Table doesn't exist, yet
+ return false;
+ }
+ Statement stmt = connection.createStatement();
+ try
+ {
+ ResultSet rs = stmt.executeQuery("select succeeded, was_executed from " + patchTableName + " where id = '" + patchId + "'");
+ if (!rs.next())
+ {
+ return false;
+ }
+ boolean succeeded = rs.getBoolean(1);
+ boolean wasExecuted = rs.getBoolean(2);
+
+ if (alternative)
+ {
+ return succeeded && wasExecuted;
+ }
+ else
+ {
+ return succeeded;
+ }
+ }
+ finally
+ {
+ try { stmt.close(); } catch (Throwable e) {}
+ }
+ }
+
+ /**
+ * Finds the version.properties file and determines the installed version.schema.
+ * The only way to determine the original installed schema number is by quering the for the minimum value in
+ * alf_applied_patch.applied_to_schema. This might not work if an upgrade is attempted straight from
+ * Alfresco v1.0!
+ *
+ * @return the installed schema number or -1 if the installation is new.
+ */
+ private int getInstalledSchemaNumber(Connection connection) throws Exception
+ {
+ Statement stmt = connection.createStatement();
+ try
+ {
+ ResultSet rs = stmt.executeQuery(
+ "select min(applied_to_schema) from alf_applied_patch where applied_to_schema > -1");
+ if (!rs.next())
+ {
+ // Nothing in the table
+ return -1;
+ }
+ if (rs.getObject(1) == null)
+ {
+ // Nothing in the table
+ return -1;
+ }
+ int installedSchema = rs.getInt(1);
+ return installedSchema;
+ }
+ finally
+ {
+ try { stmt.close(); } catch (Throwable e) {}
+ }
+ }
+
+ private static class LockFailedException extends Exception
+ {
+ private static final long serialVersionUID = -6676398230191205456L;
+ }
+
+
+ /**
+ * Records that the bootstrap process has started
+ */
+ private synchronized void setBootstrapStarted(Connection connection) throws Exception
+ {
+ // Create the marker table
+ Statement stmt = connection.createStatement();
+ try
+ {
+ stmt.executeUpdate("create table alf_bootstrap_lock (charval CHAR(1) NOT NULL)");
+ // Success
+ if (logger.isInfoEnabled())
+ {
+ logger.info("Bootstrap started.");
+ }
+ return;
+ }
+ catch (Throwable e)
+ {
+ // We throw a well-known exception to be handled by retrying code if required
+ throw new LockFailedException();
+ }
+ finally
+ {
+ try { stmt.close(); } catch (Throwable e) {}
+ }
+ }
+
+ /**
+ * Used to identify the case where one of the nodes (!maybe even this one!)
+ * has created the alf_bootstrap_lock table - This table indicates that a schema initialization
+ * or schema DB update is in progress.
+ *
+ * @return true if the alf_bootstrap_lock marker table exists in the DB; false otherwise
+ *
+ */
+ private synchronized boolean isBootstrapInProgress(Connection connection) throws Exception
+ {
+ Statement stmt = connection.createStatement();
+ try
+ {
+ stmt.executeQuery("select * from alf_bootstrap_lock");
+ // Success
+ if (logger.isInfoEnabled())
+ {
+ logger.info("Bootstrap marker still present in the DB.");
+ }
+ return true;
+ }
+ catch (Throwable e)
+ {
+ // The exception will be thrown if the table does not exist.
+ return false;
+ }
+ finally
+ {
+ try { stmt.close(); } catch (Throwable e) {}
+ }
+ }
+
+ /**
+ * Records that the bootstrap process has finished
+ */
+ private void setBootstrapCompleted(Connection connection) throws Exception
+ {
+ // Create the marker table
+ Statement stmt = connection.createStatement();
+ try
+ {
+ stmt.executeUpdate("drop table alf_bootstrap_lock");
+
+ // from Thor
+ executedStatementsThreadLocal.set(null);
+ if (logger.isInfoEnabled())
+ {
+ logger.info("Bootstrap completed.");
+ }
+ }
+ catch (Throwable e)
+ {
+ if (logger.isDebugEnabled())
+ {
+ logger.debug("Exception in deleting the alf_bootstrap_lock table: " + e.getMessage(), e);
+ }
+ throw AlfrescoRuntimeException.create(ERR_PREVIOUS_FAILED_BOOTSTRAP);
+ }
+ finally
+ {
+ try { stmt.close(); } catch (Throwable e) {}
+ }
+ }
+
+ /**
+ * Builds the schema from scratch or applies the necessary patches to the schema.
+ */
+ private boolean updateSchema(Connection connection) throws Exception
+ {
+ boolean create = false;
+ try
+ {
+ final int numberOfPatchesApplied = countAppliedPatches(connection);
+ if (logger.isInfoEnabled())
+ {
+ logger.info("Applied patches detected: " + numberOfPatchesApplied);
+ }
+ }
+ catch (NoSchemaException e)
+ {
+ create = true;
+ }
+
+ // Get the dialect
+ final Dialect dialect = this.dialect;
+ String dialectStr = dialect.getClass().getSimpleName();
+
+ if (create)
+ {
+ long start = System.currentTimeMillis();
+
+ // execute pre-create scripts (not patches)
+ for (String scriptUrl : this.preCreateScriptUrls)
+ {
+ executeScriptUrl(connection, scriptUrl);
+ }
+ // execute post-create scripts (not patches)
+ for (String scriptUrl : this.postCreateScriptUrls)
+ {
+ executeScriptUrl(connection, scriptUrl);
+ }
+
+ if (logger.isInfoEnabled())
+ {
+ logger.info("Creating Alfresco tables took " + (System.currentTimeMillis() - start) + " ms");
+ }
+ }
+ else
+ {
+ long start = System.currentTimeMillis();
+
+ // Execute any pre-auto-update scripts
+ checkSchemaPatchScripts(connection, preUpdateScriptPatches, true);
+
+ // Execute any post-auto-update scripts
+ checkSchemaPatchScripts(connection, postUpdateScriptPatches, true);
+
+ if (logger.isInfoEnabled())
+ {
+ logger.info(
+ "Checking and patching Alfresco tables took " + (System.currentTimeMillis() - start) + " ms");
+ }
+ }
+
+ ensureCurrentClusterMemberIsBootstrapping(connection);
+
+ // Initialise Activiti DB, using an unclosable connection
+ boolean activitiTablesExist = checkActivitiTablesExist(connection);
+ if (logger.isInfoEnabled())
+ {
+ logger.info("Activiti tables need to be " + (activitiTablesExist ? "checked for patches" : " created"));
+ }
+
+ if(!activitiTablesExist)
+ {
+ long start = System.currentTimeMillis();
+
+ ensureCurrentClusterMemberIsBootstrapping(connection);
+
+ // Activiti DB updates are performed as patches in alfresco, only give
+ // control to activiti when creating new one.
+ initialiseActivitiDBSchema(new UnclosableConnection(connection));
+
+ // ALF-18996: Upgrade from 3.4.12 to 4.2.0 fails: Activiti tables have not been bootstrapped
+ // The Activiti bootstrap is effectively doing the work of all the other patches,
+ // which should be considered complete.
+ int installedSchemaNumber = getInstalledSchemaNumber(connection);
+ for (Patch activitiScriptPatch : updateActivitiScriptPatches)
+ {
+ AppliedPatch appliedPatch = new AppliedPatch();
+ appliedPatch.setId(activitiScriptPatch.getId());
+ appliedPatch.setDescription(activitiScriptPatch.getDescription());
+ appliedPatch.setFixesFromSchema(activitiScriptPatch.getFixesFromSchema());
+ appliedPatch.setFixesToSchema(activitiScriptPatch.getFixesToSchema());
+ appliedPatch.setTargetSchema(activitiScriptPatch.getTargetSchema());
+ appliedPatch.setAppliedToSchema(installedSchemaNumber);
+ appliedPatch.setAppliedToServer("UNKNOWN");
+ appliedPatch.setAppliedOnDate(new Date()); // the date applied
+ appliedPatch.setSucceeded(true);
+ appliedPatch.setWasExecuted(false);
+ appliedPatch.setReport("Placeholder for Activiti bootstrap at schema " + installedSchemaNumber);
+ appliedPatchDAO.createAppliedPatch(appliedPatch);
+ }
+ if (logger.isInfoEnabled())
+ {
+ logger.info("Creating Activiti tables took " + (System.currentTimeMillis() - start) + " ms");
+ }
+ }
+ else
+ {
+ long start = System.currentTimeMillis();
+
+ // Execute any auto-update scripts for Activiti tables
+ checkSchemaPatchScripts(connection, updateActivitiScriptPatches, true);
+
+ // verify that all Activiti patches have been applied correctly
+ checkSchemaPatchScripts(connection, updateActivitiScriptPatches, false);
+
+ if (logger.isInfoEnabled())
+ {
+ logger.info("Checking and patching Activiti tables took " + (System.currentTimeMillis() - start) + " ms");
+ }
+ }
+
+ if (!create)
+ {
+ long start = System.currentTimeMillis();
+
+ // verify that all patches have been applied correctly
+ checkSchemaPatchScripts(connection, preUpdateScriptPatches, false); // check scripts
+ checkSchemaPatchScripts(connection, postUpdateScriptPatches, false); // check scripts
+
+ if (logger.isInfoEnabled())
+ {
+ logger.info("Checking that all patches have been applied took " + (System.currentTimeMillis() - start) + " ms");
+ }
+ }
+
+ return create;
+ }
+
+ /**
+ * Initialises the Activiti DB schema, if not present it's created.
+ *
+ * @param connection Connection to use the initialise DB schema
+ */
+ private void initialiseActivitiDBSchema(Connection connection)
+ {
+ // create instance of activiti engine to initialise schema
+ ProcessEngine engine = null;
+ ProcessEngineConfiguration engineConfig = ProcessEngineConfiguration.createStandaloneProcessEngineConfiguration();
+ try
+ {
+ // build the engine
+ engine = engineConfig.setDataSource(dataSource).
+ setDatabaseSchemaUpdate("none").
+ setProcessEngineName("activitiBootstrapEngine").
+ setHistory("full").
+ setJobExecutorActivate(false).
+ buildProcessEngine();
+
+ String schemaName = dbSchemaName != null ? dbSchemaName : databaseMetaDataHelper.getSchema(connection);
+ if (logger.isInfoEnabled())
+ {
+ logger.info("Creating Activiti DB schema tables");
+ }
+ engine.getManagementService().databaseSchemaUpgrade(connection, null, schemaName);
+ }
+ finally
+ {
+ if (engine != null)
+ {
+ // close the process engine
+ engine.close();
+ }
+ }
+ }
+
+ /**
+ * Check that the necessary scripts have been executed against the database
+ */
+ private void checkSchemaPatchScripts(
+ Connection connection,
+ List scriptPatches,
+ boolean apply) throws Exception
+ {
+ // first check if there have been any applied patches
+ int appliedPatchCount = countAppliedPatches(connection);
+ if (appliedPatchCount == 0)
+ {
+ // This is a new schema, so upgrade scripts are irrelevant
+ // and patches will not have been applied yet
+ return;
+ }
+
+ ensureCurrentClusterMemberIsBootstrapping(connection);
+
+ // Retrieve the first installed schema number
+ int installedSchema = getInstalledSchemaNumber(connection);
+
+ nextPatch:
+ for (SchemaUpgradeScriptPatch patch : scriptPatches)
+ {
+ final String patchId = patch.getId();
+ final String scriptUrl = patch.getScriptUrl();
+
+ // Check if any of the alternative patches were executed
+ List alternatives = patch.getAlternatives();
+ for (Patch alternativePatch : alternatives)
+ {
+ String alternativePatchId = alternativePatch.getId();
+ boolean alternativeSucceeded = didPatchSucceed(connection, alternativePatchId, true);
+ if (alternativeSucceeded)
+ {
+ continue nextPatch;
+ }
+ }
+
+ // check if the script was successfully executed
+ boolean wasSuccessfullyApplied = didPatchSucceed(connection, patchId, false);
+ if (wasSuccessfullyApplied)
+ {
+ // Either the patch was executed before or the system was bootstrapped
+ // with the patch bean present.
+ continue;
+ }
+ else if (!patch.applies(installedSchema))
+ {
+ // Patch does not apply to the installed schema number
+ continue;
+ }
+ else if (!apply)
+ {
+ // the script was not run and may not be run automatically
+ throw AlfrescoRuntimeException.create(ERR_SCRIPT_NOT_RUN, scriptUrl);
+ }
+ // it wasn't run and it can be run now
+ executeScriptUrl(connection, scriptUrl);
+ }
+ }
+
+ /**
+ * This method throws a LockFailedException in case the current Alfresco cluster node
+ * is NOT the node that is doing the schema bootstrap;
+ * This is the exception that signals that this node should wait for a while and retry
+ *
+ *
+ * Only one node from the cluster should do the initialization of the DB, so all other
+ * nodes should just retry to execute the schema checks after the initialization of
+ * the DB has ended;
+ *
+ */
+ private void ensureCurrentClusterMemberIsBootstrapping(Connection connection) throws Exception
+ {
+ if (isAnotherClusterMemberBootstrapping(connection))
+ {
+ logger.info("Another Alfresco cluster node is updating the DB");
+
+ // We throw a well-known exception to be handled by retrying code if required
+ throw new LockFailedException();
+ }
+ }
+
+ /**
+ *
+ * @return true if there is a bootstrap in progress and this node has NOT executed any queries
+ * therefore this node is NOT the one doing the schema initialization/upgrade
+ *
+ * @throws Exception if the DB connections has problems
+ */
+ private boolean isAnotherClusterMemberBootstrapping(Connection connection) throws Exception
+ {
+ return executedStatementsThreadLocal.get()== null && isBootstrapInProgress(connection);
+ }
+
+ private void executeScriptUrl(Connection connection, String scriptUrl) throws Exception
+ {
+ Dialect dialect = this.dialect;
+ String dialectStr = dialect.getClass().getSimpleName();
+ InputStream scriptInputStream = getScriptInputStream(dialect.getClass(), scriptUrl);
+ // check that it exists
+ if (scriptInputStream == null)
+ {
+ throw AlfrescoRuntimeException.create(ERR_SCRIPT_NOT_FOUND, scriptUrl);
+ }
+ // write the script to a temp location for future and failure reference
+ File tempFile = null;
+ try
+ {
+ tempFile = TempFileProvider.createTempFile("AlfrescoSchema-" + dialectStr + "-Update-", ".sql");
+ ContentWriter writer = new FileContentWriter(tempFile);
+ writer.putContent(scriptInputStream);
+ }
+ finally
+ {
+ try { scriptInputStream.close(); } catch (Throwable e) {} // usually a duplicate close
+ }
+ // now execute it
+ String dialectScriptUrl = scriptUrl.replaceAll(DialectUtil.PLACEHOLDER_DIALECT, dialect.getClass().getName());
+ // Replace the script placeholders
+ executeScriptFile(connection, tempFile, dialectScriptUrl);
+ }
+
+ /**
+ * Replaces the dialect placeholder in the script URL and attempts to find a file for
+ * it. If not found, the dialect hierarchy will be walked until a compatible script is
+ * found. This makes it possible to have scripts that are generic to all dialects.
+ *
+ * @return Returns an input stream onto the script, otherwise null
+ */
+ private InputStream getScriptInputStream(Class> dialectClazz, String scriptUrl) throws Exception
+ {
+ Resource resource = DialectUtil.getDialectResource(rpr, dialectClazz, scriptUrl);
+ if (resource == null)
+ {
+ throw new AlfrescoRuntimeException("Script [ " + scriptUrl + " ] can't be found for " + dialectClazz);
+ }
+ return resource.getInputStream();
+ }
+
+ /**
+ * @param connection the DB connection to use
+ * @param scriptFile the file containing the statements
+ * @param scriptUrl the URL of the script to report. If this is null, the script
+ * is assumed to have been auto-generated.
+ */
+ private void executeScriptFile(
+ Connection connection,
+ File scriptFile,
+ String scriptUrl) throws Exception
+ {
+ final Dialect dialect = this.dialect;
+
+ if (executedStatementsThreadLocal.get() == null)
+ {
+ // Validate the schema, pre-upgrade
+ validateSchema("Alfresco-{0}-Validation-Pre-Upgrade-{1}-", null);
+
+ dumpSchema("pre-upgrade");
+
+ // There is no lock at this stage. This process can fall out if the lock can't be applied.
+ setBootstrapStarted(connection);
+ executedStatementsThreadLocal.set(new StringBuilder(8094));
+ }
+
+ if (scriptUrl == null)
+ {
+ LogUtil.info(logger, MSG_EXECUTING_GENERATED_SCRIPT, scriptFile);
+ }
+ else
+ {
+ LogUtil.info(logger, MSG_EXECUTING_COPIED_SCRIPT, scriptFile, scriptUrl);
+ }
+
+ InputStream scriptInputStream = new FileInputStream(scriptFile);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(scriptInputStream, "UTF-8"));
+ try
+ {
+ int line = 0;
+ // loop through all statements
+ StringBuilder sb = new StringBuilder(1024);
+ String fetchVarName = null;
+ String fetchColumnName = null;
+ Object defaultFetchValue = null;
+ boolean doBatch = false;
+ int batchUpperLimit = 0;
+ int batchSize = 1;
+ Map varAssignments = new HashMap(13);
+ String delimiter = ";";
+ // Special variable assignments:
+ if (dialect instanceof PostgreSQLDialect)
+ {
+ // Needs 1/0 for true/false
+ varAssignments.put("true", "true");
+ varAssignments.put("false", "false");
+ varAssignments.put("TRUE", "TRUE");
+ varAssignments.put("FALSE", "FALSE");
+ }
+ else
+ {
+ // Needs true/false as strings
+ varAssignments.put("true", "1");
+ varAssignments.put("false", "0");
+ varAssignments.put("TRUE", "1");
+ varAssignments.put("FALSE", "0");
+ }
+ long now = System.currentTimeMillis();
+ varAssignments.put("now", new Long(now).toString());
+ varAssignments.put("NOW", new Long(now).toString());
+
+ while(true)
+ {
+ String sqlOriginal = reader.readLine();
+ line++;
+
+ if (sqlOriginal == null)
+ {
+ // nothing left in the file
+ break;
+ }
+
+ // trim it
+ String sql = sqlOriginal.trim();
+ // Check of includes
+ if (sql.startsWith("--INCLUDE:"))
+ {
+ if (sb.length() > 0)
+ {
+ // This can only be set before a new SQL statement
+ throw AlfrescoRuntimeException.create(ERR_STATEMENT_INCLUDE_BEFORE_SQL, (line - 1), scriptUrl);
+ }
+ String includedScriptUrl = sql.substring(10, sql.length());
+ // Execute the script in line
+ executeScriptUrl(connection, includedScriptUrl);
+ }
+ // Check for variable assignment
+ else if (sql.startsWith("--ASSIGN:"))
+ {
+ if (sb.length() > 0)
+ {
+ // This can only be set before a new SQL statement
+ throw AlfrescoRuntimeException.create(ERR_STATEMENT_VAR_ASSIGNMENT_BEFORE_SQL, (line - 1), scriptUrl);
+ }
+ String assignStr = sql.substring(9, sql.length());
+ String[] fetchMapping = assignStr.split("!");
+ String[] assigns = fetchMapping[0].split("=");
+ if (assigns.length != 2 || assigns[0].length() == 0 || assigns[1].length() == 0)
+ {
+ throw AlfrescoRuntimeException.create(ERR_STATEMENT_VAR_ASSIGNMENT_FORMAT, (line - 1), scriptUrl);
+ }
+ fetchVarName = assigns[0];
+ fetchColumnName = assigns[1];
+ if (fetchMapping.length > 1 && fetchMapping[1].length() > 0)
+ {
+ defaultFetchValue = fetchMapping[1];
+ }
+ continue;
+ }
+ // Handle looping control
+ else if (sql.startsWith("--FOREACH"))
+ {
+ // --FOREACH table.column batch.size.property
+ String[] args = sql.split("[ \\t]+");
+ int sepIndex;
+ if (args.length == 3 && (sepIndex = args[1].indexOf('.')) != -1)
+ {
+ doBatch = true;
+ // Select the upper bound of the table column
+ String stmt = "SELECT MAX(" + args[1].substring(sepIndex+1) + ") AS upper_limit FROM " + args[1].substring(0, sepIndex);
+ Object fetchedVal = executeStatement(connection, stmt, "upper_limit", false, line, scriptFile);
+ if (fetchedVal instanceof Number)
+ {
+ batchUpperLimit = ((Number)fetchedVal).intValue();
+ // Read the batch size from the named property
+ String batchSizeString = globalProperties.getProperty(args[2]);
+ // Fall back to the default property
+ if (batchSizeString == null)
+ {
+ batchSizeString = globalProperties.getProperty(PROPERTY_DEFAULT_BATCH_SIZE);
+ }
+ batchSize = batchSizeString == null ? 10000 : Integer.parseInt(batchSizeString);
+ }
+ }
+ continue;
+ }
+ // Allow transaction delineation
+ else if (sql.startsWith("--BEGIN TXN"))
+ {
+ connection.setAutoCommit(false);
+ continue;
+ }
+ else if (sql.startsWith("--END TXN"))
+ {
+ connection.commit();
+ connection.setAutoCommit(true);
+ continue;
+ }
+ else if (sql.startsWith("--SET-DELIMITER:"))
+ {
+ if (sb.length() > 0)
+ {
+ // This can only be set before a new SQL statement
+ throw AlfrescoRuntimeException.create(ERR_DELIMITER_SET_BEFORE_SQL, (line - 1), scriptUrl);
+ }
+
+ // We're good...so set the new delimiter
+ String newDelim = sql.substring(16).trim();
+ if (newDelim.length() == 0)
+ {
+ throw AlfrescoRuntimeException.create(ERR_DELIMITER_INVALID, (line - 1), scriptUrl);
+ }
+ delimiter = newDelim;
+ }
+
+ // Check for comments
+ if (sql.length() == 0 ||
+ sql.startsWith( "--" ) ||
+ sql.startsWith( "//" ) ||
+ sql.startsWith( "/*" ) )
+ {
+ if (sb.length() > 0)
+ {
+ // we have an unterminated statement
+ throw AlfrescoRuntimeException.create(ERR_STATEMENT_TERMINATOR, delimiter, (line - 1), scriptUrl);
+ }
+ // there has not been anything to execute - it's just a comment line
+ continue;
+ }
+ // have we reached the end of a statement?
+ boolean execute = false;
+ boolean optional = false;
+ if (sql.endsWith(delimiter))
+ {
+ sql = sql.substring(0, sql.length() - 1);
+ execute = true;
+ optional = false;
+ }
+ else if (sql.endsWith("(optional)") || sql.endsWith("(OPTIONAL)"))
+ {
+ // Get the end of statement
+ int endIndex = sql.lastIndexOf(delimiter);
+ if (endIndex > -1)
+ {
+ sql = sql.substring(0, endIndex);
+ execute = true;
+ optional = true;
+ }
+ else
+ {
+ // Ends with "(optional)" but there is no semi-colon.
+ // Just take it at face value and probably fail.
+ }
+ }
+ // Add newline
+ if (sb.length() > 0)
+ {
+ sb.append("\n");
+ }
+ // Add leading whitespace for formatting
+ int whitespaceCount = sqlOriginal.indexOf(sql);
+ for (int i = 0; i < whitespaceCount; i++)
+ {
+ sb.append(" ");
+ }
+ // append to the statement being built up
+ sb.append(sql);
+ // execute, if required
+ if (execute)
+ {
+ // Now substitute and execute the statement the appropriate number of times
+ String unsubstituted = sb.toString();
+ for(int lowerBound = 0; lowerBound <= batchUpperLimit; lowerBound += batchSize)
+ {
+ sql = unsubstituted;
+
+ // Substitute in the next pair of range parameters
+ if (doBatch)
+ {
+ varAssignments.put("LOWERBOUND", String.valueOf(lowerBound));
+ varAssignments.put("UPPERBOUND", String.valueOf(lowerBound + batchSize - 1));
+ }
+
+ // Perform variable replacement using the ${var} format
+ for (Map.Entry entry : varAssignments.entrySet())
+ {
+ String var = entry.getKey();
+ Object val = entry.getValue();
+ sql = sql.replaceAll("\\$\\{" + var + "\\}", val.toString());
+ }
+
+ // Handle the 0/1 values that PostgreSQL doesn't translate to TRUE
+ if (this.dialect != null && this.dialect instanceof PostgreSQLDialect)
+ {
+ sql = sql.replaceAll("\\$\\{TRUE\\}", "TRUE");
+ }
+ else
+ {
+ sql = sql.replaceAll("\\$\\{TRUE\\}", "1");
+ }
+
+ if (this.dialect != null && this.dialect instanceof MySQLInnoDBDialect)
+ {
+ // note: enable bootstrap on MySQL 5.5 (eg. for auto-generated SQL)
+ sql = sql.replaceAll("(?i)TYPE=InnoDB", "ENGINE=InnoDB");
+ }
+
+ if (this.dialect != null && this.dialect instanceof MySQLClusterNDBDialect)
+ {
+ // note: enable bootstrap on MySQL Cluster NDB
+ /*
+ * WARNING: Experimental/unsupported - see MySQLClusterNDBDialect !
+ */
+ sql = sql.replaceAll("(?i)TYPE=InnoDB", "ENGINE=NDB"); // belts-and-braces
+ sql = sql.replaceAll("(?i)ENGINE=InnoDB", "ENGINE=NDB");
+
+ sql = sql.replaceAll("(?i) BIT ", " BOOLEAN ");
+ sql = sql.replaceAll("(?i) BIT,", " BOOLEAN,");
+
+ sql = sql.replaceAll("(?i) string_value text", " string_value VARCHAR("+DEFAULT_MAX_STRING_LENGTH_NDB+")");
+
+ sql = sql.replaceAll("(?i) VARCHAR(4000)", "TEXT(4000)");
+ }
+
+ Object fetchedVal = executeStatement(connection, sql, fetchColumnName, optional, line, scriptFile);
+ if (fetchVarName != null && fetchColumnName != null)
+ {
+ if (fetchedVal != null)
+ {
+ varAssignments.put(fetchVarName, fetchedVal);
+ }
+ else
+ {
+ varAssignments.put(fetchVarName, defaultFetchValue);
+ }
+ }
+ }
+ sb.setLength(0);
+ fetchVarName = null;
+ fetchColumnName = null;
+ defaultFetchValue = null;
+ doBatch = false;
+ batchUpperLimit = 0;
+ batchSize = 1;
+ }
+ }
+ }
+ finally
+ {
+ try { reader.close(); } catch (Throwable e) {}
+ try { scriptInputStream.close(); } catch (Throwable e) {}
+ }
+ }
+
+ /**
+ * Execute the given SQL statement, absorbing exceptions that we expect during
+ * schema creation or upgrade.
+ *
+ * @param fetchColumnName the name of the column value to return
+ */
+ private Object executeStatement(
+ Connection connection,
+ String sql,
+ String fetchColumnName,
+ boolean optional,
+ int line,
+ File file) throws Exception
+ {
+ StringBuilder executedStatements = executedStatementsThreadLocal.get();
+ if (executedStatements == null)
+ {
+ throw new IllegalArgumentException("The executedStatementsThreadLocal must be populated");
+ }
+
+ Statement stmt = connection.createStatement();
+ Object ret = null;
+ try
+ {
+ if (logger.isDebugEnabled())
+ {
+ LogUtil.debug(logger, MSG_EXECUTING_STATEMENT, sql);
+ }
+ boolean haveResults = stmt.execute(sql);
+ // Record the statement
+ executedStatements.append(sql).append(";\n\n");
+ if (haveResults && fetchColumnName != null)
+ {
+ ResultSet rs = stmt.getResultSet();
+ if (rs.next())
+ {
+ // Get the result value
+ ret = rs.getObject(fetchColumnName);
+ }
+ }
+ }
+ catch (SQLException e)
+ {
+ if (optional)
+ {
+ // it was marked as optional, so we just ignore it
+ LogUtil.debug(logger, MSG_OPTIONAL_STATEMENT_FAILED, sql, e.getMessage(), file.getAbsolutePath(), line);
+ }
+ else
+ {
+ LogUtil.error(logger, ERR_STATEMENT_FAILED, sql, e.getMessage(), file.getAbsolutePath(), line);
+ throw e;
+ }
+ }
+ finally
+ {
+ try { stmt.close(); } catch (Throwable e) {}
+ }
+ return ret;
+ }
+
+ /**
+ * Performs dialect-specific checking. This includes checking for InnoDB, dumping the dialect being used
+ * as well as setting any runtime, dialect-specific properties.
+ */
+ private void checkDialect(Dialect dialect)
+ {
+ Class> dialectClazz = dialect.getClass();
+ LogUtil.info(logger, MSG_DIALECT_USED, dialectClazz.getName());
+// if (dialectClazz.equals(MySQLDialect.class) || dialectClazz.equals(MySQL5Dialect.class))
+// {
+// LogUtil.error(logger, ERR_DIALECT_SHOULD_USE, dialectClazz.getName(), MySQLInnoDBDialect.class.getName());
+// throw AlfrescoRuntimeException.create(WARN_DIALECT_UNSUPPORTED, dialectClazz.getName());
+// }
+// else if (dialectClazz.equals(HSQLDialect.class))
+// {
+// LogUtil.info(logger, WARN_DIALECT_HSQL);
+// }
+// else if (dialectClazz.equals(DerbyDialect.class))
+// {
+// LogUtil.info(logger, WARN_DIALECT_DERBY);
+// }
+// else if (dialectClazz.equals(Oracle9iDialect.class) || dialectClazz.equals(Oracle10gDialect.class))
+// {
+// LogUtil.error(logger, ERR_DIALECT_SHOULD_USE, dialectClazz.getName(), Oracle9Dialect.class.getName());
+// throw AlfrescoRuntimeException.create(WARN_DIALECT_UNSUPPORTED, dialectClazz.getName());
+// }
+// else if (dialectClazz.equals(OracleDialect.class) || dialectClazz.equals(Oracle9Dialect.class))
+// {
+// LogUtil.error(logger, ERR_DIALECT_SHOULD_USE, dialectClazz.getName(), Oracle9Dialect.class.getName());
+// throw AlfrescoRuntimeException.create(WARN_DIALECT_UNSUPPORTED, dialectClazz.getName());
+// }
+
+ int maxStringLength = SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH;
+ int serializableType = SerializableTypeHandler.getSerializableType();
+ // Adjust the maximum allowable String length according to the dialect
+ if (dialect instanceof SQLServerDialect)
+ {
+ // string_value nvarchar(1024) null,
+ // serializable_value image null,
+ maxStringLength = SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH;
+ }
+ else if (dialect instanceof MySQLClusterNDBDialect)
+ {
+ // string_value varchar(400),
+ // serializable_value blob,
+ maxStringLength = SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH_NDB;
+ }
+ else if (dialect instanceof MySQLInnoDBDialect)
+ {
+ // string_value text,
+ // serializable_value blob,
+ maxStringLength = Integer.MAX_VALUE;
+ }
+ else if (dialect instanceof Oracle9Dialect)
+ {
+ // string_value varchar2(1024 char),
+ // serializable_value blob,
+ maxStringLength = SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH;
+ }
+ else if (dialect instanceof PostgreSQLDialect)
+ {
+ // string_value varchar(1024),
+ // serializable_value bytea,
+ maxStringLength = SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH;
+ }
+
+ SchemaBootstrap.setMaxStringLength(maxStringLength, dialect);
+ SerializableTypeHandler.setSerializableType(serializableType);
+
+ // Now override the maximum string length if it was set directly
+ if (maximumStringLength > 0)
+ {
+ SchemaBootstrap.setMaxStringLength(maximumStringLength, dialect);
+ }
+ }
+
+ @Override
+ public synchronized void onBootstrap(ApplicationEvent event)
+ {
+ if (event != null)
+ {
+ // Use the application context to load resources
+ rpr = (ApplicationContext)event.getSource();
+ }
+
+ // do everything in a transaction
+ Connection connection = null;
+ try
+ {
+ // make sure that we AUTO-COMMIT
+ connection = dataSource.getConnection();
+ connection.setAutoCommit(true);
+ LogUtil.info(logger, MSG_DATABASE_USED, connection);
+
+ // Check and dump the dialect being used
+ checkDialect(this.dialect);
+
+ // Update the schema, if required.
+ if (updateSchema)
+ {
+ // Retries are required here as the DB lock will be applied lazily upon first statement execution.
+ // So if the schema is up to date (no statements executed) then the LockFailException cannot be
+ // thrown. If it is thrown, the the update needs to be rerun as it will probably generate no SQL
+ // statements the second time around.
+ boolean updatedSchema = false;
+ boolean createdSchema = false;
+
+ for (int i = 0; i < schemaUpdateLockRetryCount; i++)
+ {
+ try
+ {
+ long start = System.currentTimeMillis();
+
+ createdSchema = updateSchema(connection);
+ updatedSchema = true;
+
+ if (logger.isInfoEnabled())
+ {
+ logger.info(
+ (createdSchema ? "Creating" : "Updating") +
+ " the DB schema took " + (System.currentTimeMillis() - start) + " ms");
+ }
+ break;
+ }
+ catch (LockFailedException e)
+ {
+ logger.info(
+ "The current Alfresco cluster node is waiting for another chance to bootstrap the DB schema. "
+ + "Attempt: " + (i + 1) + " of " + schemaUpdateLockRetryCount);
+ try { this.wait(schemaUpdateLockRetryWaitSeconds * 1000L); } catch (InterruptedException ee) {}
+ }
+ }
+
+ if (!updatedSchema)
+ {
+ // The retries were exceeded
+ throw new AlfrescoRuntimeException(ERR_UPDATE_IN_PROGRESS_ON_ANOTHER_NODE);
+ }
+
+ writeLogsWithDBStatementExecuted();
+
+ if (executedStatementsThreadLocal.get() != null)
+ {
+ // Remove the flag indicating a running bootstrap
+ setBootstrapCompleted(connection);
+
+ // Validate the schema, post-upgrade
+ validateSchema("Alfresco-{0}-Validation-Post-Upgrade-{1}-", null);
+ // 4.0+ schema dump
+ dumpSchema("post-upgrade");
+ }
+ }
+ else
+ {
+ LogUtil.info(logger, MSG_BYPASSING_SCHEMA_UPDATE);
+ }
+
+ if (stopAfterSchemaBootstrap)
+ {
+ // 4.0+ schema dump
+ dumpSchema("forced-exit");
+ LogUtil.error(logger, ERR_FORCED_STOP);
+ throw new BootstrapStopException();
+ }
+
+ if (event != null)
+ {
+ // all done successfully
+ ((ApplicationContext) event.getSource()).publishEvent(new SchemaAvailableEvent(this));
+ }
+ }
+ catch (BootstrapStopException e)
+ {
+ // We just let this out
+ throw e;
+ }
+ catch (Throwable e)
+ {
+ LogUtil.error(logger, e, ERR_UPDATE_FAILED);
+ if (updateSchema)
+ {
+ throw new AlfrescoRuntimeException(ERR_UPDATE_FAILED, e);
+ }
+ else
+ {
+ throw new AlfrescoRuntimeException(ERR_VALIDATION_FAILED, e);
+ }
+ }
+ finally
+ {
+ try
+ {
+ if (connection != null)
+ {
+ connection.close();
+ }
+ }
+ catch (Throwable e)
+ {
+ logger.warn("Error closing DB connection: " + e.getMessage());
+ }
+ }
+ }
+
+ private void writeLogsWithDBStatementExecuted()
+ {
+ // Copy the executed statements to the output file
+ File schemaOutputFile = null;
+ if (schemaOuputFilename != null)
+ {
+ schemaOutputFile = new File(schemaOuputFilename);
+ }
+ else
+ {
+ schemaOutputFile = TempFileProvider.createTempFile(
+ "AlfrescoSchema-" + this.dialect.getClass().getSimpleName() + "-All_Statements-",
+ ".sql");
+ }
+
+ StringBuilder executedStatements = executedStatementsThreadLocal.get();
+ if (executedStatements == null)
+ {
+ LogUtil.info(logger, MSG_NO_CHANGES);
+ }
+ else
+ {
+ FileContentWriter writer = new FileContentWriter(schemaOutputFile);
+ writer.setEncoding("UTF-8");
+ String executedStatementsStr = executedStatements.toString();
+ writer.putContent(executedStatementsStr);
+ LogUtil.info(logger, MSG_ALL_STATEMENTS, schemaOutputFile.getPath());
+ }
+ }
+
+ /**
+ * Collate differences and validation problems with the schema with respect to an appropriate
+ * reference schema.
+ *
+ * @param outputFileNameTemplate String
+ * @param out PrintWriter
+ * @return the number of potential problems found.
+ */
+ public synchronized int validateSchema(String outputFileNameTemplate, PrintWriter out)
+ {
+ int totalProblems = 0;
+
+ // Discover available reference files (e.g. for prefixes alf_, etc.)
+ // and process each in turn.
+ for (String schemaReferenceUrl : schemaReferenceUrls)
+ {
+ Resource referenceResource = DialectUtil.getDialectResource(rpr, dialect.getClass(), schemaReferenceUrl);
+
+ if (referenceResource == null || !referenceResource.exists())
+ {
+ String resourceUrl = DialectUtil.resolveDialectUrl(dialect.getClass(), schemaReferenceUrl);
+ LogUtil.debug(logger, DEBUG_SCHEMA_COMP_NO_REF_FILE, resourceUrl);
+ }
+ else
+ {
+ // Validate schema against each reference file
+ int problems = validateSchema(referenceResource, outputFileNameTemplate, out);
+ totalProblems += problems;
+ }
+ }
+
+ // Return number of problems found across all reference files.
+ return totalProblems;
+ }
+
+ private int validateSchema(Resource referenceResource, String outputFileNameTemplate, PrintWriter out)
+ {
+ try
+ {
+ return attemptValidateSchema(referenceResource, outputFileNameTemplate, out);
+ }
+ catch (Throwable e)
+ {
+ if (logger.isErrorEnabled())
+ {
+ logger.error("Unable to validate database schema.", e);
+ }
+ return 0;
+ }
+ }
+
+ /**
+ * Validates and compares current DB schema with schema reference definition, specified in referenceResource
parameter.
+ *
+ * The method supports two mechanisms to report validation results:
+ *
+ * - using an external output stream, specified as
out
;
+ * - using specially created {@link FileOutputStream}, which represents temporary file with name, formatted in accordance with
outputFileNameTemplate
template.
+ *
+ * It is necessary to take care about freeing resources of output stream in case of the 1st approach.
+ * N.B.: The method only writes messages of the report. And it doesn't flush and doesn't close the specified output stream!
+ *
+ *
+ * @param referenceResource - {@link Resource} instance, which determines file of reference schema
+ * @param outputFileNameTemplate - {@link String} value, which determines template of temporary filename for validation report. It can't be null
if
+ * out
is null
!
+ * @param out - {@link PrintWriter} instance, which represents an external output stream for writing a validation report. This stream is never closed or flushed. It can't be
+ * null
if outputFileNameTemplate
is null
!
+ * @return {@link Integer} value, which determines amount of errors or warnings that were detected during validation
+ */
+ private int attemptValidateSchema(Resource referenceResource, String outputFileNameTemplate, PrintWriter out)
+ {
+ Date startTime = new Date();
+
+ InputStream is = null;
+ try
+ {
+ is = new BufferedInputStream(referenceResource.getInputStream());
+ }
+ catch (IOException e)
+ {
+ throw new RuntimeException("Unable to open schema reference file: " + referenceResource);
+ }
+
+ XMLToSchema xmlToSchema = new XMLToSchema(is);
+ xmlToSchema.parse();
+ Schema reference = xmlToSchema.getSchema();
+ ExportDb exporter = new ExportDb(dataSource, dialect, descriptorService, databaseMetaDataHelper);
+ exporter.setDbSchemaName(dbSchemaName);
+ // Ensure that the database objects we're validating are filtered
+ // by the same prefix as the reference file.
+ exporter.setNamePrefix(reference.getDbPrefix());
+ exporter.execute();
+ Schema target = exporter.getSchema();
+
+ SchemaComparator schemaComparator = new SchemaComparator(reference, target, dialect);
+
+ schemaComparator.validateAndCompare();
+
+ Results results = schemaComparator.getComparisonResults();
+
+ Object[] outputFileNameParams = new Object[]
+ {
+ dialect.getClass().getSimpleName(),
+ reference.getDbPrefix()
+ };
+ PrintWriter pw = null;
+ File outputFile = null;
+
+ try
+ {
+ if (out == null)
+ {
+ String outputFileName = MessageFormat.format(outputFileNameTemplate, outputFileNameParams);
+
+ outputFile = TempFileProvider.createTempFile(outputFileName, ".txt");
+
+ try
+ {
+ pw = new PrintWriter(outputFile, SchemaComparator.CHAR_SET);
+ }
+ catch (FileNotFoundException error)
+ {
+ throw new RuntimeException("Unable to open file for writing: " + outputFile);
+ }
+ catch (UnsupportedEncodingException error)
+ {
+ throw new RuntimeException("Unsupported char set: " + SchemaComparator.CHAR_SET, error);
+ }
+ }
+ else
+ {
+ pw = out;
+ }
+
+ // Populate the file with details of the comparison's results.
+ for (Result result : results)
+ {
+ pw.print(result.describe());
+ pw.print(SchemaComparator.LINE_SEPARATOR);
+ }
+ }
+ finally
+ {
+ // We care only about output streams for reporting, which are created specially for current reference resource...
+ if (null == out)
+ {
+ pw.close();
+ }
+ }
+
+
+ if (results.size() == 0)
+ {
+ LogUtil.info(logger, INFO_SCHEMA_COMP_ALL_OK, referenceResource);
+ }
+ else
+ {
+ int numProblems = results.size();
+ if (outputFile == null)
+ {
+ LogUtil.warn(logger, WARN_SCHEMA_COMP_PROBLEMS_FOUND_NO_FILE, numProblems);
+ }
+ else
+ {
+ LogUtil.warn(logger, WARN_SCHEMA_COMP_PROBLEMS_FOUND, numProblems, outputFile);
+ }
+ }
+ Date endTime = new Date();
+ long durationMillis = endTime.getTime() - startTime.getTime();
+ LogUtil.debug(logger, DEBUG_SCHEMA_COMP_TIME_TAKEN, durationMillis);
+
+ return results.size();
+ }
+
+ /**
+ * Produces schema dump in XML format: this is performed pre- and post-upgrade (i.e. if
+ * changes are made to the schema) and can made upon demand via JMX.
+ *
+ * @return List of output files.
+ */
+ public List dumpSchema()
+ {
+ return dumpSchema("", null);
+ }
+
+ /**
+ * Produces schema dump in XML format: this is performed pre- and post-upgrade (i.e. if
+ * changes are made to the schema) and can made upon demand via JMX.
+ *
+ * @param dbPrefixes Array of database object prefixes to produce the dump for, e.g. "alf_".
+ * @return List of output files.
+ */
+ public List dumpSchema(String[] dbPrefixes)
+ {
+ return dumpSchema("", dbPrefixes);
+ }
+
+ /**
+ * Dumps the DB schema to temporary file(s), named similarly to:
+ *
+ * Alfresco-schema-DialectName-whenDumped-dbPrefix-23498732.xml
+ *
+ * Where the digits serve to create a unique temp file name. If whenDumped is empty or null,
+ * then the output is similar to:
+ *
+ * Alfresco-schema-DialectName-dbPrefix-23498732.xml
+ *
+ * If dbPrefixes is null, then the default list is used (see {@link MultiFileDumper#DEFAULT_PREFIXES})
+ * The dump files' paths are logged at info level.
+ *
+ * @param whenDumped String
+ * @param dbPrefixes Array of database object prefixes to filter by, e.g. "alf_"
+ * @return List of output files.
+ */
+ private List dumpSchema(String whenDumped, String[] dbPrefixes)
+ {
+ // Build a string to use as the file name template,
+ // e.g. "Alfresco-schema-MySQLDialect-pre-upgrade-{0}-"
+ StringBuilder sb = new StringBuilder(64);
+ sb.append("Alfresco-schema-").
+ append(dialect.getClass().getSimpleName());
+ if (whenDumped != null && whenDumped.length() > 0)
+ {
+ sb.append("-");
+ sb.append(whenDumped);
+ }
+ sb.append("-{0}-");
+
+ File outputDir = TempFileProvider.getTempDir();
+ String fileNameTemplate = sb.toString();
+ return dumpSchema(outputDir, fileNameTemplate, dbPrefixes);
+ }
+
+ /**
+ * Same as for {@link #dumpSchema(String, String[])} - except that the default list
+ * of database object prefixes is used for filtering.
+ *
+ * @see #dumpSchema(String, String[])
+ * @param whenDumped String
+ * @return List
+ */
+ private List dumpSchema(String whenDumped)
+ {
+ return dumpSchema(whenDumped, null);
+ }
+
+ private List dumpSchema(File outputDir, String fileNameTemplate, String[] dbPrefixes)
+ {
+ try
+ {
+ return attemptDumpSchema(outputDir, fileNameTemplate, dbPrefixes);
+ }
+ catch (Throwable e)
+ {
+ if (logger.isErrorEnabled())
+ {
+ logger.error("Unable to dump schema to directory " + outputDir, e);
+ }
+ return null;
+ }
+ }
+
+ private List attemptDumpSchema(File outputDir, String fileNameTemplate, String[] dbPrefixes)
+ {
+ DbToXMLFactory dbToXMLFactory = new MultiFileDumper.DbToXMLFactoryImpl(getApplicationContext());
+
+ MultiFileDumper dumper;
+
+ if (dbPrefixes == null)
+ {
+ dumper = new MultiFileDumper(outputDir, fileNameTemplate, dbToXMLFactory, dbSchemaName);
+ }
+ else
+ {
+ dumper = new MultiFileDumper(dbPrefixes, outputDir, fileNameTemplate, dbToXMLFactory, dbSchemaName);
+ }
+ List files = dumper.dumpFiles();
+
+ for (File file : files)
+ {
+ if (logger.isInfoEnabled())
+ {
+ LogUtil.info(logger, MSG_NORMALIZED_SCHEMA, file.getAbsolutePath());
+ }
+ }
+
+ return files;
+ }
+
+ @Override
+ protected void onShutdown(ApplicationEvent event)
+ {
+ }
+
+ /**
+ * A {@link Connection} wrapper that delegates all calls to the wrapped class
+ * except for the close methods, which are ignored.
+ *
+ * @author Frederik Heremans
+ */
+ public class UnclosableConnection implements Connection
+ {
+ private Connection wrapped;
+
+ public UnclosableConnection(Connection wrappedConnection)
+ {
+ this.wrapped = wrappedConnection;
+ }
+
+ @Override
+ public boolean isWrapperFor(Class> iface) throws SQLException
+ {
+ return wrapped.isWrapperFor(iface);
+ }
+
+ @Override
+ public T unwrap(Class iface) throws SQLException
+ {
+ return wrapped.unwrap(iface);
+ }
+
+ @Override
+ public void clearWarnings() throws SQLException
+ {
+ wrapped.clearWarnings();
+ }
+
+ @Override
+ public void close() throws SQLException
+ {
+ // Ignore this call
+ }
+
+ @Override
+ public void commit() throws SQLException
+ {
+ wrapped.commit();
+ }
+
+ @Override
+ public Array createArrayOf(String typeName, Object[] elements) throws SQLException
+ {
+ return wrapped.createArrayOf(typeName, elements);
+ }
+
+ @Override
+ public Blob createBlob() throws SQLException
+ {
+ return wrapped.createBlob();
+ }
+
+ @Override
+ public Clob createClob() throws SQLException
+ {
+ return wrapped.createClob();
+ }
+
+ @Override
+ public NClob createNClob() throws SQLException
+ {
+ return wrapped.createNClob();
+ }
+
+ @Override
+ public SQLXML createSQLXML() throws SQLException
+ {
+ return wrapped.createSQLXML();
+ }
+
+ @Override
+ public Statement createStatement() throws SQLException
+ {
+ return wrapped.createStatement();
+ }
+
+ @Override
+ public Statement createStatement(int resultSetType, int resultSetConcurrency)
+ throws SQLException
+ {
+ return wrapped.createStatement(resultSetType, resultSetConcurrency);
+ }
+
+ @Override
+ public Statement createStatement(int resultSetType, int resultSetConcurrency,
+ int resultSetHoldability) throws SQLException
+ {
+ return wrapped.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability);
+ }
+
+ @Override
+ public Struct createStruct(String typeName, Object[] attributes) throws SQLException
+ {
+ return wrapped.createStruct(typeName, attributes);
+ }
+
+ @Override
+ public boolean getAutoCommit() throws SQLException
+ {
+ return wrapped.getAutoCommit();
+ }
+
+ @Override
+ public String getCatalog() throws SQLException
+ {
+ return wrapped.getCatalog();
+ }
+
+ @Override
+ public Properties getClientInfo() throws SQLException
+ {
+ return wrapped.getClientInfo();
+ }
+
+ @Override
+ public String getClientInfo(String name) throws SQLException
+ {
+ return wrapped.getClientInfo(name);
+ }
+
+ @Override
+ public int getHoldability() throws SQLException
+ {
+ return wrapped.getHoldability();
+ }
+
+ @Override
+ public DatabaseMetaData getMetaData() throws SQLException
+ {
+ return wrapped.getMetaData();
+ }
+
+ @Override
+ public int getTransactionIsolation() throws SQLException
+ {
+ return wrapped.getTransactionIsolation();
+ }
+
+ @Override
+ public Map> getTypeMap() throws SQLException
+ {
+ return wrapped.getTypeMap();
+ }
+
+ @Override
+ public SQLWarning getWarnings() throws SQLException
+ {
+ return wrapped.getWarnings();
+ }
+
+ @Override
+ public boolean isClosed() throws SQLException
+ {
+ return wrapped.isClosed();
+ }
+
+ @Override
+ public boolean isReadOnly() throws SQLException
+ {
+ return wrapped.isReadOnly();
+ }
+
+ @Override
+ public boolean isValid(int timeout) throws SQLException
+ {
+ return wrapped.isValid(timeout);
+ }
+
+ @Override
+ public String nativeSQL(String sql) throws SQLException
+ {
+ return wrapped.nativeSQL(sql);
+ }
+
+ @Override
+ public CallableStatement prepareCall(String sql) throws SQLException
+ {
+ return wrapped.prepareCall(sql);
+ }
+
+ @Override
+ public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency)
+ throws SQLException
+ {
+ return wrapped.prepareCall(sql, resultSetType, resultSetConcurrency);
+ }
+
+ @Override
+ public CallableStatement prepareCall(String sql, int resultSetType,
+ int resultSetConcurrency, int resultSetHoldability) throws SQLException
+ {
+ return wrapped.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql) throws SQLException
+ {
+ return wrapped.prepareStatement(sql);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys)
+ throws SQLException
+ {
+ return wrapped.prepareStatement(sql, autoGeneratedKeys);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int[] columnIndexes)
+ throws SQLException
+ {
+ return wrapped.prepareStatement(sql, columnIndexes);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, String[] columnNames)
+ throws SQLException
+ {
+ return wrapped.prepareStatement(sql, columnNames);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int resultSetType,
+ int resultSetConcurrency) throws SQLException
+ {
+ return wrapped.prepareStatement(sql, resultSetType, resultSetConcurrency);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int resultSetType,
+ int resultSetConcurrency, int resultSetHoldability) throws SQLException
+ {
+ return wrapped.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability);
+ }
+
+ @Override
+ public void releaseSavepoint(Savepoint savepoint) throws SQLException
+ {
+ wrapped.releaseSavepoint(savepoint);
+ }
+
+ @Override
+ public void rollback() throws SQLException
+ {
+ wrapped.rollback();
+ }
+
+ @Override
+ public void rollback(Savepoint savepoint) throws SQLException
+ {
+ wrapped.rollback(savepoint);
+ }
+
+ @Override
+ public void setAutoCommit(boolean autoCommit) throws SQLException
+ {
+ wrapped.setAutoCommit(autoCommit);
+ }
+
+ @Override
+ public void setCatalog(String catalog) throws SQLException
+ {
+ wrapped.setCatalog(catalog);
+ }
+
+ @Override
+ public void setClientInfo(Properties properties) throws SQLClientInfoException
+ {
+ wrapped.setClientInfo(properties);
+ }
+
+ @Override
+ public void setClientInfo(String name, String value) throws SQLClientInfoException
+ {
+ wrapped.setClientInfo(name, value);
+ }
+
+ @Override
+ public void setHoldability(int holdability) throws SQLException
+ {
+ wrapped.setHoldability(holdability);
+ }
+
+ @Override
+ public void setReadOnly(boolean readOnly) throws SQLException
+ {
+ wrapped.setReadOnly(readOnly);
+ }
+
+ @Override
+ public Savepoint setSavepoint() throws SQLException
+ {
+ return wrapped.setSavepoint();
+ }
+
+ @Override
+ public Savepoint setSavepoint(String name) throws SQLException
+ {
+ return wrapped.setSavepoint(name);
+ }
+
+ @Override
+ public void setTransactionIsolation(int level) throws SQLException
+ {
+ wrapped.setTransactionIsolation(level);
+ }
+
+ @Override
+ public void setTypeMap(Map> map) throws SQLException
+ {
+ wrapped.setTypeMap(map);
+ }
+
+ @Override
+ public void setSchema(String schema) throws SQLException
+ {
+ wrapped.setSchema(schema);
+ }
+
+ @Override
+ public String getSchema() throws SQLException
+ {
+ return wrapped.getSchema();
+ }
+
+ @Override
+ public void abort(Executor executor) throws SQLException
+ {
+ wrapped.abort(executor);
+ }
+
+ @Override
+ public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException
+ {
+ wrapped.setNetworkTimeout(executor, milliseconds);
+ }
+
+ @Override
+ public int getNetworkTimeout() throws SQLException
+ {
+ return wrapped.getNetworkTimeout();
+ }
+ }
+}
diff --git a/src/main/java/org/alfresco/repo/domain/schema/script/ScriptExecutorImpl.java b/src/main/java/org/alfresco/repo/domain/schema/script/ScriptExecutorImpl.java
index 3c9af0f0da..310c6800a5 100644
--- a/src/main/java/org/alfresco/repo/domain/schema/script/ScriptExecutorImpl.java
+++ b/src/main/java/org/alfresco/repo/domain/schema/script/ScriptExecutorImpl.java
@@ -1,643 +1,585 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.repo.domain.schema.script;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-
-import javax.sql.DataSource;
-
-import org.alfresco.error.AlfrescoRuntimeException;
-import org.alfresco.repo.content.filestore.FileContentWriter;
-import org.alfresco.repo.domain.dialect.MySQLClusterNDBDialect;
-import org.alfresco.repo.domain.dialect.Dialect;
-import org.alfresco.repo.domain.dialect.MySQLInnoDBDialect;
-import org.alfresco.repo.domain.dialect.PostgreSQLDialect;
-import org.alfresco.service.cmr.repository.ContentWriter;
-import org.alfresco.util.LogUtil;
-import org.alfresco.util.TempFileProvider;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.springframework.core.io.Resource;
-import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
-import org.springframework.core.io.support.ResourcePatternResolver;
-
-
-public class ScriptExecutorImpl implements ScriptExecutor
-{
- /** The placeholder for the configured Dialect
class name: ${db.script.dialect} */
- private static final String PLACEHOLDER_DIALECT = "\\$\\{db\\.script\\.dialect\\}";
- /** The global property containing the default batch size used by --FOREACH */
- private static final String PROPERTY_DEFAULT_BATCH_SIZE = "system.upgrade.default.batchsize";
- private static final String MSG_EXECUTING_GENERATED_SCRIPT = "schema.update.msg.executing_generated_script";
- private static final String MSG_EXECUTING_COPIED_SCRIPT = "schema.update.msg.executing_copied_script";
- private static final String MSG_EXECUTING_STATEMENT = "schema.update.msg.executing_statement";
- private static final String MSG_OPTIONAL_STATEMENT_FAILED = "schema.update.msg.optional_statement_failed";
- private static final String ERR_STATEMENT_FAILED = "schema.update.err.statement_failed";
- private static final String ERR_SCRIPT_NOT_FOUND = "schema.update.err.script_not_found";
- private static final String ERR_STATEMENT_INCLUDE_BEFORE_SQL = "schema.update.err.statement_include_before_sql";
- private static final String ERR_STATEMENT_VAR_ASSIGNMENT_BEFORE_SQL = "schema.update.err.statement_var_assignment_before_sql";
- private static final String ERR_STATEMENT_VAR_ASSIGNMENT_FORMAT = "schema.update.err.statement_var_assignment_format";
- private static final String ERR_STATEMENT_VAR_ASSIGNMENT_NULL = "schema.update.err.statement_var_assignment_null";
- private static final String ERR_STATEMENT_TERMINATOR = "schema.update.err.statement_terminator";
- private static final String ERR_DELIMITER_SET_BEFORE_SQL = "schema.update.err.delimiter_set_before_sql";
- private static final String ERR_DELIMITER_INVALID = "schema.update.err.delimiter_invalid";
- private static final int DEFAULT_MAX_STRING_LENGTH = 1024;
- private static final int DEFAULT_MAX_STRING_LENGTH_NDB = 400;
- private static volatile int maxStringLength = DEFAULT_MAX_STRING_LENGTH;
- private Dialect dialect;
- private ResourcePatternResolver rpr = new PathMatchingResourcePatternResolver(this.getClass().getClassLoader());
- private static Log logger = LogFactory.getLog(ScriptExecutorImpl.class);
- private Properties globalProperties;
- private ThreadLocal executedStatementsThreadLocal = new ThreadLocal();
- private DataSource dataSource;
-
-
- /**
- * @return Returns the maximum number of characters that a string field can be
- */
- public static final int getMaxStringLength()
- {
- return ScriptExecutorImpl.maxStringLength;
- }
-
- /**
- * Truncates or returns a string that will fit into the string columns in the schema. Text fields can
- * either cope with arbitrarily long text fields or have the default limit, {@link #DEFAULT_MAX_STRING_LENGTH}.
- *
- * @param value the string to check
- * @return Returns a string that is short enough for {@link ScriptExecutorImpl#getMaxStringLength()}
- *
- * @since 3.2
- */
- public static final String trimStringForTextFields(String value)
- {
- if (value != null && value.length() > maxStringLength)
- {
- return value.substring(0, maxStringLength);
- }
- else
- {
- return value;
- }
- }
-
- /**
- * Sets the previously auto-detected Hibernate dialect.
- *
- * @param dialect
- * the dialect
- */
- public void setDialect(Dialect dialect)
- {
- this.dialect = dialect;
- }
-
- public ScriptExecutorImpl()
- {
- globalProperties = new Properties();
- }
-
- public void setDataSource(DataSource dataSource)
- {
- this.dataSource = dataSource;
- }
-
- /**
- * Sets the properties map from which we look up some configuration settings.
- *
- * @param globalProperties
- * the global properties
- */
- public void setGlobalProperties(Properties globalProperties)
- {
- this.globalProperties = globalProperties;
- }
-
-
- @Override
- public void executeScriptUrl(String scriptUrl) throws Exception
- {
- Connection connection = dataSource.getConnection();
- connection.setAutoCommit(true);
- try
- {
- executeScriptUrl(connection, scriptUrl);
- }
- finally
- {
- connection.close();
- }
- }
-
- private void executeScriptUrl(Connection connection, String scriptUrl) throws Exception
- {
- Dialect dialect = this.dialect;
- String dialectStr = dialect.getClass().getSimpleName();
- InputStream scriptInputStream = getScriptInputStream(dialect.getClass(), scriptUrl);
- // check that it exists
- if (scriptInputStream == null)
- {
- throw AlfrescoRuntimeException.create(ERR_SCRIPT_NOT_FOUND, scriptUrl);
- }
- // write the script to a temp location for future and failure reference
- File tempFile = null;
- try
- {
- tempFile = TempFileProvider.createTempFile("AlfrescoSchema-" + dialectStr + "-Update-", ".sql");
- ContentWriter writer = new FileContentWriter(tempFile);
- writer.putContent(scriptInputStream);
- }
- finally
- {
- try { scriptInputStream.close(); } catch (Throwable e) {} // usually a duplicate close
- }
- // now execute it
- String dialectScriptUrl = scriptUrl.replaceAll(PLACEHOLDER_DIALECT, dialect.getClass().getName());
- // Replace the script placeholders
- executeScriptFile(connection, tempFile, dialectScriptUrl);
- }
-
- /**
- * Replaces the dialect placeholder in the resource URL and attempts to find a file for
- * it. If not found, the dialect hierarchy will be walked until a compatible resource is
- * found. This makes it possible to have resources that are generic to all dialects.
- *
- * @return The Resource, otherwise null
- */
- private Resource getDialectResource(Class dialectClass, String resourceUrl)
- {
- // replace the dialect placeholder
- String dialectResourceUrl = resolveDialectUrl(dialectClass, resourceUrl);
- // get a handle on the resource
- Resource resource = rpr.getResource(dialectResourceUrl);
- if (!resource.exists())
- {
- // it wasn't found. Get the superclass of the dialect and try again
- Class superClass = dialectClass.getSuperclass();
- if (Dialect.class.isAssignableFrom(superClass))
- {
- // we still have a Dialect - try again
- return getDialectResource(superClass, resourceUrl);
- }
- else
- {
- // we have exhausted all options
- return null;
- }
- }
- else
- {
- // we have a handle to it
- return resource;
- }
- }
-
- /**
- * Takes resource URL containing the {@link ScriptExecutorImpl#PLACEHOLDER_DIALECT dialect placeholder text}
- * and substitutes the placeholder with the name of the given dialect's class.
- *
- * For example:
- *
- * resolveDialectUrl(MySQLInnoDBDialect.class, "classpath:alfresco/db/${db.script.dialect}/myfile.xml")
- *
- * would give the following String:
- *
- * classpath:alfresco/db/org.hibernate.dialect.MySQLInnoDBDialect/myfile.xml
- *
- *
- * @param dialectClass Class
- * @param resourceUrl String
- * @return String
- */
- private String resolveDialectUrl(Class dialectClass, String resourceUrl)
- {
- return resourceUrl.replaceAll(PLACEHOLDER_DIALECT, dialectClass.getName());
- }
-
- /**
- * Replaces the dialect placeholder in the script URL and attempts to find a file for
- * it. If not found, the dialect hierarchy will be walked until a compatible script is
- * found. This makes it possible to have scripts that are generic to all dialects.
- *
- * @return Returns an input stream onto the script, otherwise null
- */
- private InputStream getScriptInputStream(Class dialectClazz, String scriptUrl) throws Exception
- {
- Resource resource = getDialectResource(dialectClazz, scriptUrl);
- if (resource == null)
- {
- return null;
- }
- return resource.getInputStream();
- }
-
- /**
- * @param connection the DB connection to use
- * @param scriptFile the file containing the statements
- * @param scriptUrl the URL of the script to report. If this is null, the script
- * is assumed to have been auto-generated.
- */
- private void executeScriptFile(
- Connection connection,
- File scriptFile,
- String scriptUrl) throws Exception
- {
- final Dialect dialect = this.dialect;
-
- StringBuilder executedStatements = executedStatementsThreadLocal.get();
- if (executedStatements == null)
- {
- executedStatements = new StringBuilder(8094);
- executedStatementsThreadLocal.set(executedStatements);
- }
-
- if (scriptUrl == null)
- {
- LogUtil.info(logger, MSG_EXECUTING_GENERATED_SCRIPT, scriptFile);
- }
- else
- {
- LogUtil.info(logger, MSG_EXECUTING_COPIED_SCRIPT, scriptFile, scriptUrl);
- }
-
- InputStream scriptInputStream = new FileInputStream(scriptFile);
- BufferedReader reader = new BufferedReader(new InputStreamReader(scriptInputStream, "UTF-8"));
- try
- {
- int line = 0;
- // loop through all statements
- StringBuilder sb = new StringBuilder(1024);
- String fetchVarName = null;
- String fetchColumnName = null;
- Object defaultFetchValue = null;
- String batchTableName = null;
- boolean doBatch = false;
- int batchUpperLimit = 0;
- int batchSize = 1;
- Map varAssignments = new HashMap(13);
- String delimiter = ";";
- // Special variable assignments:
- if (dialect instanceof PostgreSQLDialect)
- {
- // Needs 1/0 for true/false
- varAssignments.put("true", "true");
- varAssignments.put("false", "false");
- varAssignments.put("TRUE", "TRUE");
- varAssignments.put("FALSE", "FALSE");
- }
- else
- {
- // Needs true/false as strings
- varAssignments.put("true", "1");
- varAssignments.put("false", "0");
- varAssignments.put("TRUE", "1");
- varAssignments.put("FALSE", "0");
- }
- long now = System.currentTimeMillis();
- varAssignments.put("now", new Long(now).toString());
- varAssignments.put("NOW", new Long(now).toString());
-
- while(true)
- {
- String sqlOriginal = reader.readLine();
- line++;
-
- if (sqlOriginal == null)
- {
- // nothing left in the file
- break;
- }
-
- // trim it
- String sql = sqlOriginal.trim();
- // Check of includes
- if (sql.startsWith("--INCLUDE:"))
- {
- if (sb.length() > 0)
- {
- // This can only be set before a new SQL statement
- throw AlfrescoRuntimeException.create(ERR_STATEMENT_INCLUDE_BEFORE_SQL, (line - 1), scriptUrl);
- }
- String includedScriptUrl = sql.substring(10, sql.length());
- // Execute the script in line
- executeScriptUrl(connection, includedScriptUrl);
- }
- // Check for variable assignment
- else if (sql.startsWith("--ASSIGN:"))
- {
- if (sb.length() > 0)
- {
- // This can only be set before a new SQL statement
- throw AlfrescoRuntimeException.create(ERR_STATEMENT_VAR_ASSIGNMENT_BEFORE_SQL, (line - 1), scriptUrl);
- }
- String assignStr = sql.substring(9, sql.length());
- String[] fetchMapping = assignStr.split("!");
- String[] assigns = fetchMapping[0].split("=");
- if (assigns.length != 2 || assigns[0].length() == 0 || assigns[1].length() == 0)
- {
- throw AlfrescoRuntimeException.create(ERR_STATEMENT_VAR_ASSIGNMENT_FORMAT, (line - 1), scriptUrl);
- }
- fetchVarName = assigns[0];
- fetchColumnName = assigns[1];
- if (fetchMapping.length > 1 && fetchMapping[1].length() > 0)
- {
- defaultFetchValue = fetchMapping[1];
- }
- continue;
- }
- // Handle looping control
- else if (sql.startsWith("--FOREACH"))
- {
- // --FOREACH table.column batch.size.property
- String[] args = sql.split("[ \\t]+");
- int sepIndex;
- if (args.length == 3 && (sepIndex = args[1].indexOf('.')) != -1)
- {
- doBatch = true;
- // Select the upper bound of the table column
- batchTableName = args[1].substring(0, sepIndex);
- String stmt = "SELECT MAX(" + args[1].substring(sepIndex+1) + ") AS upper_limit FROM " + batchTableName;
- Object fetchedVal = executeStatement(connection, stmt, "upper_limit", false, line, scriptFile);
- if (fetchedVal instanceof Number)
- {
- batchUpperLimit = ((Number)fetchedVal).intValue();
- // Read the batch size from the named property
- String batchSizeString = globalProperties.getProperty(args[2]);
- // Fall back to the default property
- if (batchSizeString == null)
- {
- batchSizeString = globalProperties.getProperty(PROPERTY_DEFAULT_BATCH_SIZE);
- }
- batchSize = batchSizeString == null ? 10000 : Integer.parseInt(batchSizeString);
- }
- }
- continue;
- }
- // Allow transaction delineation
- else if (sql.startsWith("--BEGIN TXN"))
- {
- connection.setAutoCommit(false);
- continue;
- }
- else if (sql.startsWith("--END TXN"))
- {
- connection.commit();
- connection.setAutoCommit(true);
- continue;
- }
- else if (sql.startsWith("--SET-DELIMITER:"))
- {
- if (sb.length() > 0)
- {
- // This can only be set before a new SQL statement
- throw AlfrescoRuntimeException.create(ERR_DELIMITER_SET_BEFORE_SQL, (line - 1), scriptUrl);
- }
-
- // We're good...so set the new delimiter
- String newDelim = sql.substring(16).trim();
- if (newDelim.length() == 0)
- {
- throw AlfrescoRuntimeException.create(ERR_DELIMITER_INVALID, (line - 1), scriptUrl);
- }
- delimiter = newDelim;
- }
-
- // Check for comments
- if (sql.length() == 0 ||
- sql.startsWith( "--" ) ||
- sql.startsWith( "//" ) ||
- sql.startsWith( "/*" ) )
- {
- if (sb.length() > 0)
- {
- // we have an unterminated statement
- throw AlfrescoRuntimeException.create(ERR_STATEMENT_TERMINATOR, delimiter, (line - 1), scriptUrl);
- }
- // there has not been anything to execute - it's just a comment line
- continue;
- }
- // have we reached the end of a statement?
- boolean execute = false;
- boolean optional = false;
- if (sql.endsWith(delimiter))
- {
- sql = sql.substring(0, sql.length() - 1);
- execute = true;
- optional = false;
- }
- else if (sql.endsWith("(optional)") || sql.endsWith("(OPTIONAL)"))
- {
- // Get the end of statement
- int endIndex = sql.lastIndexOf(delimiter);
- if (endIndex > -1)
- {
- sql = sql.substring(0, endIndex);
- execute = true;
- optional = true;
- }
- else
- {
- // Ends with "(optional)" but there is no semi-colon.
- // Just take it at face value and probably fail.
- }
- }
- // Add newline
- if (sb.length() > 0)
- {
- sb.append("\n");
- }
- // Add leading whitespace for formatting
- int whitespaceCount = sqlOriginal.indexOf(sql);
- for (int i = 0; i < whitespaceCount; i++)
- {
- sb.append(" ");
- }
- // append to the statement being built up
- sb.append(sql);
- // execute, if required
- if (execute)
- {
- // Now substitute and execute the statement the appropriate number of times
- String unsubstituted = sb.toString();
- for(int lowerBound = 0; lowerBound <= batchUpperLimit; lowerBound += batchSize)
- {
- sql = unsubstituted;
-
- // Substitute in the next pair of range parameters
- if (doBatch)
- {
- logger.info("Processing from " + lowerBound + " to " + (lowerBound + batchSize) + " rows of " + batchUpperLimit + " rows from table " + batchTableName + ".");
- varAssignments.put("LOWERBOUND", String.valueOf(lowerBound));
- varAssignments.put("UPPERBOUND", String.valueOf(lowerBound + batchSize - 1));
- }
-
- // Perform variable replacement using the ${var} format
- for (Map.Entry entry : varAssignments.entrySet())
- {
- String var = entry.getKey();
- Object val = entry.getValue();
- sql = sql.replaceAll("\\$\\{" + var + "\\}", val.toString());
- }
-
- // Handle the 0/1 values that PostgreSQL doesn't translate to TRUE
- if (this.dialect != null && this.dialect instanceof PostgreSQLDialect)
- {
- sql = sql.replaceAll("\\$\\{TRUE\\}", "TRUE");
- }
- else
- {
- sql = sql.replaceAll("\\$\\{TRUE\\}", "1");
- }
-
- if (this.dialect != null && this.dialect instanceof MySQLInnoDBDialect)
- {
- // note: enable bootstrap on MySQL 5.5 (eg. for auto-generated SQL)
- sql = sql.replaceAll("(?i)TYPE=InnoDB", "ENGINE=InnoDB");
- }
-
- if (this.dialect != null && this.dialect instanceof MySQLClusterNDBDialect)
- {
- // note: enable bootstrap on MySQL Cluster NDB
- /*
- * WARNING: Experimental/unsupported - see MySQLClusterNDBDialect !
- */
- sql = sql.replaceAll("(?i)TYPE=InnoDB", "ENGINE=NDB"); // belts-and-braces
- sql = sql.replaceAll("(?i)ENGINE=InnoDB", "ENGINE=NDB");
-
- sql = sql.replaceAll("(?i) BIT ", " BOOLEAN ");
- sql = sql.replaceAll("(?i) BIT,", " BOOLEAN,");
-
- sql = sql.replaceAll("(?i) string_value text", " string_value VARCHAR("+DEFAULT_MAX_STRING_LENGTH_NDB+")");
-
- sql = sql.replaceAll("(?i) VARCHAR(4000)", "TEXT(4000)");
- }
-
- Object fetchedVal = executeStatement(connection, sql, fetchColumnName, optional, line, scriptFile);
- if (fetchVarName != null && fetchColumnName != null)
- {
- if (fetchedVal == null)
- {
- fetchedVal = defaultFetchValue;
- }
- // We must have some value
- if (fetchedVal == null)
- {
- // The variable is null (not even empty)
- throw AlfrescoRuntimeException.create(ERR_STATEMENT_VAR_ASSIGNMENT_NULL, fetchVarName, fetchVarName, (line - 1), scriptUrl);
- }
- varAssignments.put(fetchVarName, fetchedVal);
- }
- }
- sb.setLength(0);
- fetchVarName = null;
- fetchColumnName = null;
- defaultFetchValue = null;
- batchTableName = null;
- doBatch = false;
- batchUpperLimit = 0;
- batchSize = 1;
- }
- }
- }
- finally
- {
- try { reader.close(); } catch (Throwable e) {}
- try { scriptInputStream.close(); } catch (Throwable e) {}
- }
- }
-
- /**
- * Execute the given SQL statement, absorbing exceptions that we expect during
- * schema creation or upgrade.
- *
- * @param fetchColumnName the name of the column value to return
- */
- private Object executeStatement(
- Connection connection,
- String sql,
- String fetchColumnName,
- boolean optional,
- int line,
- File file) throws Exception
- {
- StringBuilder executedStatements = executedStatementsThreadLocal.get();
- if (executedStatements == null)
- {
- throw new IllegalArgumentException("The executedStatementsThreadLocal must be populated");
- }
-
- Statement stmt = connection.createStatement();
- Object ret = null;
- try
- {
- if (logger.isDebugEnabled())
- {
- LogUtil.debug(logger, MSG_EXECUTING_STATEMENT, sql);
- }
- boolean haveResults = stmt.execute(sql);
- // Record the statement
- executedStatements.append(sql).append(";\n\n");
- if (haveResults && fetchColumnName != null)
- {
- ResultSet rs = stmt.getResultSet();
- if (rs.next())
- {
- // Get the result value
- ret = rs.getObject(fetchColumnName);
- }
- }
- }
- catch (SQLException e)
- {
- if (optional)
- {
- // it was marked as optional, so we just ignore it
- LogUtil.debug(logger, MSG_OPTIONAL_STATEMENT_FAILED, sql, e.getMessage(), file.getAbsolutePath(), line);
- }
- else
- {
- LogUtil.error(logger, ERR_STATEMENT_FAILED, sql, e.getMessage(), file.getAbsolutePath(), line);
- throw e;
- }
- }
- finally
- {
- try { stmt.close(); } catch (Throwable e) {}
- }
- return ret;
- }
-}
+/*
+ * #%L
+ * Alfresco Repository
+ * %%
+ * Copyright (C) 2005 - 2016 Alfresco Software Limited
+ * %%
+ * This file is part of the Alfresco software.
+ * If the software was purchased under a paid Alfresco license, the terms of
+ * the paid license agreement will prevail. Otherwise, the software is
+ * provided under the following open source license terms:
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ * #L%
+ */
+package org.alfresco.repo.domain.schema.script;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+import javax.sql.DataSource;
+
+import org.alfresco.error.AlfrescoRuntimeException;
+import org.alfresco.repo.content.filestore.FileContentWriter;
+import org.alfresco.repo.domain.dialect.MySQLClusterNDBDialect;
+import org.alfresco.repo.domain.dialect.Dialect;
+import org.alfresco.repo.domain.dialect.MySQLInnoDBDialect;
+import org.alfresco.repo.domain.dialect.PostgreSQLDialect;
+import org.alfresco.service.cmr.repository.ContentWriter;
+import org.alfresco.util.DialectUtil;
+import org.alfresco.util.LogUtil;
+import org.alfresco.util.TempFileProvider;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.springframework.core.io.Resource;
+import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
+import org.springframework.core.io.support.ResourcePatternResolver;
+
+
+public class ScriptExecutorImpl implements ScriptExecutor
+{
+ /** The global property containing the default batch size used by --FOREACH */
+ private static final String PROPERTY_DEFAULT_BATCH_SIZE = "system.upgrade.default.batchsize";
+ private static final String MSG_EXECUTING_GENERATED_SCRIPT = "schema.update.msg.executing_generated_script";
+ private static final String MSG_EXECUTING_COPIED_SCRIPT = "schema.update.msg.executing_copied_script";
+ private static final String MSG_EXECUTING_STATEMENT = "schema.update.msg.executing_statement";
+ private static final String MSG_OPTIONAL_STATEMENT_FAILED = "schema.update.msg.optional_statement_failed";
+ private static final String ERR_STATEMENT_FAILED = "schema.update.err.statement_failed";
+ private static final String ERR_SCRIPT_NOT_FOUND = "schema.update.err.script_not_found";
+ private static final String ERR_STATEMENT_INCLUDE_BEFORE_SQL = "schema.update.err.statement_include_before_sql";
+ private static final String ERR_STATEMENT_VAR_ASSIGNMENT_BEFORE_SQL = "schema.update.err.statement_var_assignment_before_sql";
+ private static final String ERR_STATEMENT_VAR_ASSIGNMENT_FORMAT = "schema.update.err.statement_var_assignment_format";
+ private static final String ERR_STATEMENT_VAR_ASSIGNMENT_NULL = "schema.update.err.statement_var_assignment_null";
+ private static final String ERR_STATEMENT_TERMINATOR = "schema.update.err.statement_terminator";
+ private static final String ERR_DELIMITER_SET_BEFORE_SQL = "schema.update.err.delimiter_set_before_sql";
+ private static final String ERR_DELIMITER_INVALID = "schema.update.err.delimiter_invalid";
+ private static final int DEFAULT_MAX_STRING_LENGTH = 1024;
+ private static final int DEFAULT_MAX_STRING_LENGTH_NDB = 400;
+ private static volatile int maxStringLength = DEFAULT_MAX_STRING_LENGTH;
+ private Dialect dialect;
+ private ResourcePatternResolver rpr = new PathMatchingResourcePatternResolver(this.getClass().getClassLoader());
+ private static Log logger = LogFactory.getLog(ScriptExecutorImpl.class);
+ private Properties globalProperties;
+ private ThreadLocal executedStatementsThreadLocal = new ThreadLocal();
+ private DataSource dataSource;
+
+
+ /**
+ * @return Returns the maximum number of characters that a string field can be
+ */
+ public static final int getMaxStringLength()
+ {
+ return ScriptExecutorImpl.maxStringLength;
+ }
+
+ /**
+ * Truncates or returns a string that will fit into the string columns in the schema. Text fields can
+ * either cope with arbitrarily long text fields or have the default limit, {@link #DEFAULT_MAX_STRING_LENGTH}.
+ *
+ * @param value the string to check
+ * @return Returns a string that is short enough for {@link ScriptExecutorImpl#getMaxStringLength()}
+ *
+ * @since 3.2
+ */
+ public static final String trimStringForTextFields(String value)
+ {
+ if (value != null && value.length() > maxStringLength)
+ {
+ return value.substring(0, maxStringLength);
+ }
+ else
+ {
+ return value;
+ }
+ }
+
+ /**
+ * Sets the previously auto-detected Hibernate dialect.
+ *
+ * @param dialect
+ * the dialect
+ */
+ public void setDialect(Dialect dialect)
+ {
+ this.dialect = dialect;
+ }
+
+ public ScriptExecutorImpl()
+ {
+ globalProperties = new Properties();
+ }
+
+ public void setDataSource(DataSource dataSource)
+ {
+ this.dataSource = dataSource;
+ }
+
+ /**
+ * Sets the properties map from which we look up some configuration settings.
+ *
+ * @param globalProperties
+ * the global properties
+ */
+ public void setGlobalProperties(Properties globalProperties)
+ {
+ this.globalProperties = globalProperties;
+ }
+
+
+ @Override
+ public void executeScriptUrl(String scriptUrl) throws Exception
+ {
+ Connection connection = dataSource.getConnection();
+ connection.setAutoCommit(true);
+ try
+ {
+ executeScriptUrl(connection, scriptUrl);
+ }
+ finally
+ {
+ connection.close();
+ }
+ }
+
+ private void executeScriptUrl(Connection connection, String scriptUrl) throws Exception
+ {
+ Dialect dialect = this.dialect;
+ String dialectStr = dialect.getClass().getSimpleName();
+ InputStream scriptInputStream = getScriptInputStream(dialect.getClass(), scriptUrl);
+ // check that it exists
+ if (scriptInputStream == null)
+ {
+ throw AlfrescoRuntimeException.create(ERR_SCRIPT_NOT_FOUND, scriptUrl);
+ }
+ // write the script to a temp location for future and failure reference
+ File tempFile = null;
+ try
+ {
+ tempFile = TempFileProvider.createTempFile("AlfrescoSchema-" + dialectStr + "-Update-", ".sql");
+ ContentWriter writer = new FileContentWriter(tempFile);
+ writer.putContent(scriptInputStream);
+ }
+ finally
+ {
+ try { scriptInputStream.close(); } catch (Throwable e) {} // usually a duplicate close
+ }
+ // now execute it
+ String dialectScriptUrl = scriptUrl.replaceAll(DialectUtil.PLACEHOLDER_DIALECT, dialect.getClass().getName());
+ // Replace the script placeholders
+ executeScriptFile(connection, tempFile, dialectScriptUrl);
+ }
+
+ /**
+ * Replaces the dialect placeholder in the script URL and attempts to find a file for
+ * it. If not found, the dialect hierarchy will be walked until a compatible script is
+ * found. This makes it possible to have scripts that are generic to all dialects.
+ *
+ * @return Returns an input stream onto the script, otherwise null
+ */
+ private InputStream getScriptInputStream(Class dialectClazz, String scriptUrl) throws Exception
+ {
+ Resource resource = DialectUtil.getDialectResource(rpr, dialectClazz, scriptUrl);
+ if (resource == null)
+ {
+ return null;
+ }
+ return resource.getInputStream();
+ }
+
+ /**
+ * @param connection the DB connection to use
+ * @param scriptFile the file containing the statements
+ * @param scriptUrl the URL of the script to report. If this is null, the script
+ * is assumed to have been auto-generated.
+ */
+ private void executeScriptFile(
+ Connection connection,
+ File scriptFile,
+ String scriptUrl) throws Exception
+ {
+ final Dialect dialect = this.dialect;
+
+ StringBuilder executedStatements = executedStatementsThreadLocal.get();
+ if (executedStatements == null)
+ {
+ executedStatements = new StringBuilder(8094);
+ executedStatementsThreadLocal.set(executedStatements);
+ }
+
+ if (scriptUrl == null)
+ {
+ LogUtil.info(logger, MSG_EXECUTING_GENERATED_SCRIPT, scriptFile);
+ }
+ else
+ {
+ LogUtil.info(logger, MSG_EXECUTING_COPIED_SCRIPT, scriptFile, scriptUrl);
+ }
+
+ InputStream scriptInputStream = new FileInputStream(scriptFile);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(scriptInputStream, "UTF-8"));
+ try
+ {
+ int line = 0;
+ // loop through all statements
+ StringBuilder sb = new StringBuilder(1024);
+ String fetchVarName = null;
+ String fetchColumnName = null;
+ Object defaultFetchValue = null;
+ String batchTableName = null;
+ boolean doBatch = false;
+ int batchUpperLimit = 0;
+ int batchSize = 1;
+ Map varAssignments = new HashMap(13);
+ String delimiter = ";";
+ // Special variable assignments:
+ if (dialect instanceof PostgreSQLDialect)
+ {
+ // Needs 1/0 for true/false
+ varAssignments.put("true", "true");
+ varAssignments.put("false", "false");
+ varAssignments.put("TRUE", "TRUE");
+ varAssignments.put("FALSE", "FALSE");
+ }
+ else
+ {
+ // Needs true/false as strings
+ varAssignments.put("true", "1");
+ varAssignments.put("false", "0");
+ varAssignments.put("TRUE", "1");
+ varAssignments.put("FALSE", "0");
+ }
+ long now = System.currentTimeMillis();
+ varAssignments.put("now", new Long(now).toString());
+ varAssignments.put("NOW", new Long(now).toString());
+
+ while(true)
+ {
+ String sqlOriginal = reader.readLine();
+ line++;
+
+ if (sqlOriginal == null)
+ {
+ // nothing left in the file
+ break;
+ }
+
+ // trim it
+ String sql = sqlOriginal.trim();
+ // Check of includes
+ if (sql.startsWith("--INCLUDE:"))
+ {
+ if (sb.length() > 0)
+ {
+ // This can only be set before a new SQL statement
+ throw AlfrescoRuntimeException.create(ERR_STATEMENT_INCLUDE_BEFORE_SQL, (line - 1), scriptUrl);
+ }
+ String includedScriptUrl = sql.substring(10, sql.length());
+ // Execute the script in line
+ executeScriptUrl(connection, includedScriptUrl);
+ }
+ // Check for variable assignment
+ else if (sql.startsWith("--ASSIGN:"))
+ {
+ if (sb.length() > 0)
+ {
+ // This can only be set before a new SQL statement
+ throw AlfrescoRuntimeException.create(ERR_STATEMENT_VAR_ASSIGNMENT_BEFORE_SQL, (line - 1), scriptUrl);
+ }
+ String assignStr = sql.substring(9, sql.length());
+ String[] fetchMapping = assignStr.split("!");
+ String[] assigns = fetchMapping[0].split("=");
+ if (assigns.length != 2 || assigns[0].length() == 0 || assigns[1].length() == 0)
+ {
+ throw AlfrescoRuntimeException.create(ERR_STATEMENT_VAR_ASSIGNMENT_FORMAT, (line - 1), scriptUrl);
+ }
+ fetchVarName = assigns[0];
+ fetchColumnName = assigns[1];
+ if (fetchMapping.length > 1 && fetchMapping[1].length() > 0)
+ {
+ defaultFetchValue = fetchMapping[1];
+ }
+ continue;
+ }
+ // Handle looping control
+ else if (sql.startsWith("--FOREACH"))
+ {
+ // --FOREACH table.column batch.size.property
+ String[] args = sql.split("[ \\t]+");
+ int sepIndex;
+ if (args.length == 3 && (sepIndex = args[1].indexOf('.')) != -1)
+ {
+ doBatch = true;
+ // Select the upper bound of the table column
+ batchTableName = args[1].substring(0, sepIndex);
+ String stmt = "SELECT MAX(" + args[1].substring(sepIndex+1) + ") AS upper_limit FROM " + batchTableName;
+ Object fetchedVal = executeStatement(connection, stmt, "upper_limit", false, line, scriptFile);
+ if (fetchedVal instanceof Number)
+ {
+ batchUpperLimit = ((Number)fetchedVal).intValue();
+ // Read the batch size from the named property
+ String batchSizeString = globalProperties.getProperty(args[2]);
+ // Fall back to the default property
+ if (batchSizeString == null)
+ {
+ batchSizeString = globalProperties.getProperty(PROPERTY_DEFAULT_BATCH_SIZE);
+ }
+ batchSize = batchSizeString == null ? 10000 : Integer.parseInt(batchSizeString);
+ }
+ }
+ continue;
+ }
+ // Allow transaction delineation
+ else if (sql.startsWith("--BEGIN TXN"))
+ {
+ connection.setAutoCommit(false);
+ continue;
+ }
+ else if (sql.startsWith("--END TXN"))
+ {
+ connection.commit();
+ connection.setAutoCommit(true);
+ continue;
+ }
+ else if (sql.startsWith("--SET-DELIMITER:"))
+ {
+ if (sb.length() > 0)
+ {
+ // This can only be set before a new SQL statement
+ throw AlfrescoRuntimeException.create(ERR_DELIMITER_SET_BEFORE_SQL, (line - 1), scriptUrl);
+ }
+
+ // We're good...so set the new delimiter
+ String newDelim = sql.substring(16).trim();
+ if (newDelim.length() == 0)
+ {
+ throw AlfrescoRuntimeException.create(ERR_DELIMITER_INVALID, (line - 1), scriptUrl);
+ }
+ delimiter = newDelim;
+ }
+
+ // Check for comments
+ if (sql.length() == 0 ||
+ sql.startsWith( "--" ) ||
+ sql.startsWith( "//" ) ||
+ sql.startsWith( "/*" ) )
+ {
+ if (sb.length() > 0)
+ {
+ // we have an unterminated statement
+ throw AlfrescoRuntimeException.create(ERR_STATEMENT_TERMINATOR, delimiter, (line - 1), scriptUrl);
+ }
+ // there has not been anything to execute - it's just a comment line
+ continue;
+ }
+ // have we reached the end of a statement?
+ boolean execute = false;
+ boolean optional = false;
+ if (sql.endsWith(delimiter))
+ {
+ sql = sql.substring(0, sql.length() - 1);
+ execute = true;
+ optional = false;
+ }
+ else if (sql.endsWith("(optional)") || sql.endsWith("(OPTIONAL)"))
+ {
+ // Get the end of statement
+ int endIndex = sql.lastIndexOf(delimiter);
+ if (endIndex > -1)
+ {
+ sql = sql.substring(0, endIndex);
+ execute = true;
+ optional = true;
+ }
+ else
+ {
+ // Ends with "(optional)" but there is no semi-colon.
+ // Just take it at face value and probably fail.
+ }
+ }
+ // Add newline
+ if (sb.length() > 0)
+ {
+ sb.append("\n");
+ }
+ // Add leading whitespace for formatting
+ int whitespaceCount = sqlOriginal.indexOf(sql);
+ for (int i = 0; i < whitespaceCount; i++)
+ {
+ sb.append(" ");
+ }
+ // append to the statement being built up
+ sb.append(sql);
+ // execute, if required
+ if (execute)
+ {
+ // Now substitute and execute the statement the appropriate number of times
+ String unsubstituted = sb.toString();
+ for(int lowerBound = 0; lowerBound <= batchUpperLimit; lowerBound += batchSize)
+ {
+ sql = unsubstituted;
+
+ // Substitute in the next pair of range parameters
+ if (doBatch)
+ {
+ logger.info("Processing from " + lowerBound + " to " + (lowerBound + batchSize) + " rows of " + batchUpperLimit + " rows from table " + batchTableName + ".");
+ varAssignments.put("LOWERBOUND", String.valueOf(lowerBound));
+ varAssignments.put("UPPERBOUND", String.valueOf(lowerBound + batchSize - 1));
+ }
+
+ // Perform variable replacement using the ${var} format
+ for (Map.Entry entry : varAssignments.entrySet())
+ {
+ String var = entry.getKey();
+ Object val = entry.getValue();
+ sql = sql.replaceAll("\\$\\{" + var + "\\}", val.toString());
+ }
+
+ // Handle the 0/1 values that PostgreSQL doesn't translate to TRUE
+ if (this.dialect != null && this.dialect instanceof PostgreSQLDialect)
+ {
+ sql = sql.replaceAll("\\$\\{TRUE\\}", "TRUE");
+ }
+ else
+ {
+ sql = sql.replaceAll("\\$\\{TRUE\\}", "1");
+ }
+
+ if (this.dialect != null && this.dialect instanceof MySQLInnoDBDialect)
+ {
+ // note: enable bootstrap on MySQL 5.5 (eg. for auto-generated SQL)
+ sql = sql.replaceAll("(?i)TYPE=InnoDB", "ENGINE=InnoDB");
+ }
+
+ if (this.dialect != null && this.dialect instanceof MySQLClusterNDBDialect)
+ {
+ // note: enable bootstrap on MySQL Cluster NDB
+ /*
+ * WARNING: Experimental/unsupported - see MySQLClusterNDBDialect !
+ */
+ sql = sql.replaceAll("(?i)TYPE=InnoDB", "ENGINE=NDB"); // belts-and-braces
+ sql = sql.replaceAll("(?i)ENGINE=InnoDB", "ENGINE=NDB");
+
+ sql = sql.replaceAll("(?i) BIT ", " BOOLEAN ");
+ sql = sql.replaceAll("(?i) BIT,", " BOOLEAN,");
+
+ sql = sql.replaceAll("(?i) string_value text", " string_value VARCHAR("+DEFAULT_MAX_STRING_LENGTH_NDB+")");
+
+ sql = sql.replaceAll("(?i) VARCHAR(4000)", "TEXT(4000)");
+ }
+
+ Object fetchedVal = executeStatement(connection, sql, fetchColumnName, optional, line, scriptFile);
+ if (fetchVarName != null && fetchColumnName != null)
+ {
+ if (fetchedVal == null)
+ {
+ fetchedVal = defaultFetchValue;
+ }
+ // We must have some value
+ if (fetchedVal == null)
+ {
+ // The variable is null (not even empty)
+ throw AlfrescoRuntimeException.create(ERR_STATEMENT_VAR_ASSIGNMENT_NULL, fetchVarName, fetchVarName, (line - 1), scriptUrl);
+ }
+ varAssignments.put(fetchVarName, fetchedVal);
+ }
+ }
+ sb.setLength(0);
+ fetchVarName = null;
+ fetchColumnName = null;
+ defaultFetchValue = null;
+ batchTableName = null;
+ doBatch = false;
+ batchUpperLimit = 0;
+ batchSize = 1;
+ }
+ }
+ }
+ finally
+ {
+ try { reader.close(); } catch (Throwable e) {}
+ try { scriptInputStream.close(); } catch (Throwable e) {}
+ }
+ }
+
+ /**
+ * Execute the given SQL statement, absorbing exceptions that we expect during
+ * schema creation or upgrade.
+ *
+ * @param fetchColumnName the name of the column value to return
+ */
+ private Object executeStatement(
+ Connection connection,
+ String sql,
+ String fetchColumnName,
+ boolean optional,
+ int line,
+ File file) throws Exception
+ {
+ StringBuilder executedStatements = executedStatementsThreadLocal.get();
+ if (executedStatements == null)
+ {
+ throw new IllegalArgumentException("The executedStatementsThreadLocal must be populated");
+ }
+
+ Statement stmt = connection.createStatement();
+ Object ret = null;
+ try
+ {
+ if (logger.isDebugEnabled())
+ {
+ LogUtil.debug(logger, MSG_EXECUTING_STATEMENT, sql);
+ }
+ boolean haveResults = stmt.execute(sql);
+ // Record the statement
+ executedStatements.append(sql).append(";\n\n");
+ if (haveResults && fetchColumnName != null)
+ {
+ ResultSet rs = stmt.getResultSet();
+ if (rs.next())
+ {
+ // Get the result value
+ ret = rs.getObject(fetchColumnName);
+ }
+ }
+ }
+ catch (SQLException e)
+ {
+ if (optional)
+ {
+ // it was marked as optional, so we just ignore it
+ LogUtil.debug(logger, MSG_OPTIONAL_STATEMENT_FAILED, sql, e.getMessage(), file.getAbsolutePath(), line);
+ }
+ else
+ {
+ LogUtil.error(logger, ERR_STATEMENT_FAILED, sql, e.getMessage(), file.getAbsolutePath(), line);
+ throw e;
+ }
+ }
+ finally
+ {
+ try { stmt.close(); } catch (Throwable e) {}
+ }
+ return ret;
+ }
+}
diff --git a/src/main/java/org/alfresco/util/DBScriptUtil.java b/src/main/java/org/alfresco/util/DBScriptUtil.java
new file mode 100644
index 0000000000..e74533e1aa
--- /dev/null
+++ b/src/main/java/org/alfresco/util/DBScriptUtil.java
@@ -0,0 +1,107 @@
+/*
+ * #%L
+ * Alfresco Repository
+ * %%
+ * Copyright (C) 2005 - 2019 Alfresco Software Limited
+ * %%
+ * This file is part of the Alfresco software.
+ * If the software was purchased under a paid Alfresco license, the terms of
+ * the paid license agreement will prevail. Otherwise, the software is
+ * provided under the following open source license terms:
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ * #L%
+ */
+package org.alfresco.util;
+
+import java.io.IOException;
+import java.io.LineNumberReader;
+
+import org.springframework.core.io.support.EncodedResource;
+
+public abstract class DBScriptUtil
+{
+ private static final String DEFAULT_SCRIPT_COMMENT_PREFIX = "--";
+
+ /**
+ * Read a script from the provided EncodedResource and build a String containing
+ * the lines.
+ *
+ * @param resource
+ * the resource (potentially associated with a specific encoding) to
+ * load the SQL script from
+ * @return a String containing the script lines
+ */
+ public static String readScript(EncodedResource resource) throws IOException
+ {
+ return readScript(resource, DEFAULT_SCRIPT_COMMENT_PREFIX);
+ }
+
+ /**
+ * Read a script from the provided EncodedResource, using the supplied line
+ * comment prefix, and build a String containing the lines.
+ *
+ * @param resource
+ * the resource (potentially associated with a specific encoding) to
+ * load the SQL script from
+ * @param lineCommentPrefix
+ * the prefix that identifies comments in the SQL script (typically
+ * "--")
+ * @return a String containing the script lines
+ */
+ private static String readScript(EncodedResource resource, String lineCommentPrefix) throws IOException
+ {
+ LineNumberReader lineNumberReader = new LineNumberReader(resource.getReader());
+ try
+ {
+ return readScript(lineNumberReader, lineCommentPrefix);
+ }
+ finally
+ {
+ lineNumberReader.close();
+ }
+ }
+
+ /**
+ * Read a script from the provided LineNumberReader, using the supplied line
+ * comment prefix, and build a String containing the lines.
+ *
+ * @param lineNumberReader
+ * the LineNumberReader containing the script to be processed
+ * @param lineCommentPrefix
+ * the prefix that identifies comments in the SQL script (typically
+ * "--")
+ * @return a String containing the script lines
+ */
+ private static String readScript(LineNumberReader lineNumberReader, String lineCommentPrefix) throws IOException
+ {
+ String statement = lineNumberReader.readLine();
+ StringBuilder scriptBuilder = new StringBuilder();
+ while (statement != null)
+ {
+ if (lineCommentPrefix != null && !statement.startsWith(lineCommentPrefix))
+ {
+ if (scriptBuilder.length() > 0)
+ {
+ scriptBuilder.append('\n');
+ }
+ scriptBuilder.append(statement);
+ }
+ statement = lineNumberReader.readLine();
+ }
+
+ return scriptBuilder.toString();
+ }
+
+}
diff --git a/src/main/java/org/alfresco/util/DialectUtil.java b/src/main/java/org/alfresco/util/DialectUtil.java
new file mode 100644
index 0000000000..22c4428e8b
--- /dev/null
+++ b/src/main/java/org/alfresco/util/DialectUtil.java
@@ -0,0 +1,94 @@
+/*
+ * #%L
+ * Alfresco Repository
+ * %%
+ * Copyright (C) 2005 - 2019 Alfresco Software Limited
+ * %%
+ * This file is part of the Alfresco software.
+ * If the software was purchased under a paid Alfresco license, the terms of
+ * the paid license agreement will prevail. Otherwise, the software is
+ * provided under the following open source license terms:
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ * #L%
+ */
+package org.alfresco.util;
+
+import org.alfresco.repo.domain.dialect.Dialect;
+import org.springframework.core.io.Resource;
+import org.springframework.core.io.support.ResourcePatternResolver;
+
+public abstract class DialectUtil
+{
+ /** The placeholder for the configured Dialect
class name: ${db.script.dialect} */
+ public static final String PLACEHOLDER_DIALECT = "\\$\\{db\\.script\\.dialect\\}";
+
+ /**
+ * Replaces the dialect placeholder in the resource URL and attempts to find a
+ * file for it. If not found, the dialect hierarchy will be walked until a
+ * compatible resource is found. This makes it possible to have resources that
+ * are generic to all dialects.
+ *
+ * @return The Resource, otherwise null
+ */
+ public static Resource getDialectResource(ResourcePatternResolver resourcePatternResolver, Class> dialectClass, String resourceUrl)
+ {
+ // replace the dialect placeholder
+ String dialectResourceUrl = resolveDialectUrl(dialectClass, resourceUrl);
+ // get a handle on the resource
+ Resource resource = resourcePatternResolver.getResource(dialectResourceUrl);
+ if (!resource.exists())
+ {
+ // it wasn't found. Get the superclass of the dialect and try again
+ Class> superClass = dialectClass.getSuperclass();
+ if (Dialect.class.isAssignableFrom(superClass))
+ {
+ // we still have a Dialect - try again
+ return getDialectResource(resourcePatternResolver, superClass, resourceUrl);
+ }
+ else
+ {
+ // we have exhausted all options
+ return null;
+ }
+ }
+ else
+ {
+ // we have a handle to it
+ return resource;
+ }
+ }
+
+ /**
+ * Takes resource URL containing the {@link DialectUtil#PLACEHOLDER_DIALECT
+ * dialect placeholder text} and substitutes the placeholder with the name of
+ * the given dialect's class.
+ *
+ * For example:
+ *
+ *
+ * resolveDialectUrl(MySQLInnoDBDialect.class, "classpath:alfresco/db/${db.script.dialect}/myfile.xml")
+ *
+ *
+ * would give the following String:
+ *
+ *
+ * classpath:alfresco/db/org.hibernate.dialect.MySQLInnoDBDialect/myfile.xml
+ *
+ */
+ public static String resolveDialectUrl(Class> dialectClass, String resourceUrl)
+ {
+ return resourceUrl.replaceAll(PLACEHOLDER_DIALECT, dialectClass.getName());
+ }
+}
diff --git a/src/main/java/org/alfresco/util/schemacomp/ExportDb.java b/src/main/java/org/alfresco/util/schemacomp/ExportDb.java
index 2768af31cc..54ee08076f 100644
--- a/src/main/java/org/alfresco/util/schemacomp/ExportDb.java
+++ b/src/main/java/org/alfresco/util/schemacomp/ExportDb.java
@@ -1,515 +1,641 @@
-/*
- * #%L
- * Alfresco Repository
- * %%
- * Copyright (C) 2005 - 2016 Alfresco Software Limited
- * %%
- * This file is part of the Alfresco software.
- * If the software was purchased under a paid Alfresco license, the terms of
- * the paid license agreement will prevail. Otherwise, the software is
- * provided under the following open source license terms:
- *
- * Alfresco is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * Alfresco is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with Alfresco. If not, see .
- * #L%
- */
-package org.alfresco.util.schemacomp;
-
-import java.lang.reflect.Field;
-import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.Map;
-import java.util.TreeMap;
-
-import javax.sql.DataSource;
-
-import org.alfresco.repo.domain.dialect.Dialect;
-import org.alfresco.repo.domain.dialect.TypeNames;
-import org.alfresco.service.descriptor.Descriptor;
-import org.alfresco.service.descriptor.DescriptorService;
-import org.alfresco.util.DatabaseMetaDataHelper;
-import org.alfresco.util.PropertyCheck;
-import org.alfresco.util.schemacomp.model.Column;
-import org.alfresco.util.schemacomp.model.ForeignKey;
-import org.alfresco.util.schemacomp.model.Index;
-import org.alfresco.util.schemacomp.model.PrimaryKey;
-import org.alfresco.util.schemacomp.model.Schema;
-import org.alfresco.util.schemacomp.model.Sequence;
-import org.alfresco.util.schemacomp.model.Table;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.springframework.context.ApplicationContext;
-
-
-/**
- * Exports a database schema to an in-memory {@link Schema} object.
- *
- * @author Matt Ward
- */
-public class ExportDb
-{
- /** Reverse map from database types to JDBC types (loaded from a Hibernate dialect). */
- private final Map reverseTypeMap = new TreeMap();
-
- /** The database metadata helper */
- private DatabaseMetaDataHelper databaseMetaDataHelper;
-
- /** The JDBC DataSource. */
- private DataSource dataSource;
-
- /** The object graph we're building */
- private Schema schema;
-
- /** What type of DBMS are we running? */
- private Dialect dialect;
-
- /** Used to gain the repository's schema version */
- private DescriptorService descriptorService;
-
- /** Only top-level tables starting with namePrefix will be exported, set to empty string for all objects */
- private String namePrefix = "alf_";
-
- /** Default schema name to use */
- private String dbSchemaName;
-
- private final static Log log = LogFactory.getLog(ExportDb.class);
-
-
- public ExportDb(ApplicationContext context)
- {
- this((DataSource) context.getBean("dataSource"),
- (Dialect) context.getBean("dialect"),
- (DescriptorService) context.getBean("descriptorComponent"),
- (DatabaseMetaDataHelper) context.getBean("databaseMetaDataHelper"));
- }
-
-
- /**
- * Create a new instance of the tool within the context of an existing database connection
- *
- * @param dataSource the database connection to use for metadata queries
- * @param dialect the Hibernate dialect
- * @param descriptorService descriptorService
- * @param databaseMetaDataHelper databaseMetaDataHelper
- */
- public ExportDb(final DataSource dataSource, final Dialect dialect, DescriptorService descriptorService, DatabaseMetaDataHelper databaseMetaDataHelper)
- {
- this.dataSource = dataSource;
- this.dialect = dialect;
- this.descriptorService = descriptorService;
- this.databaseMetaDataHelper = databaseMetaDataHelper;
- init();
- }
-
-
- private void init()
- {
- try
- {
- attemptInit();
- }
- catch (SecurityException error)
- {
- throw new RuntimeException("Unable to generate type map using hibernate.", error);
- }
- catch (IllegalArgumentException error)
- {
- throw new RuntimeException("Unable to generate type map using hibernate.", error);
- }
- catch (NoSuchFieldException error)
- {
- throw new RuntimeException("Unable to generate type map using hibernate.", error);
- }
- catch (IllegalAccessException error)
- {
- throw new RuntimeException("Unable to generate type map using hibernate.", error);
- }
- }
-
-
- /**
- * Initializes the fields ready to perform the database metadata reading
- * @throws NoSuchFieldException
- * @throws SecurityException
- * @throws IllegalAccessException
- * @throws IllegalArgumentException
- */
- @SuppressWarnings("unchecked")
- private void attemptInit() throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException
- {
- final Field typeNamesField = Dialect.class.getDeclaredField("typeNames");
- typeNamesField.setAccessible(true);
- final TypeNames typeNames = (TypeNames) typeNamesField.get(dialect);
- final Field defaultsField = TypeNames.class.getDeclaredField("defaults");
- defaultsField.setAccessible(true);
- final Map forwardMap2 = (Map) defaultsField.get(typeNames);
- for (final Map.Entry e : forwardMap2.entrySet())
- {
- this.reverseTypeMap.put(e.getValue().toLowerCase(), e.getKey());
- }
-
- final Field weightedField = TypeNames.class.getDeclaredField("weighted");
- weightedField.setAccessible(true);
- final Map> forwardMap1 = (Map>) weightedField
- .get(typeNames);
- for (final Map.Entry> e : forwardMap1.entrySet())
- {
- for (final String type : e.getValue().values())
- {
- this.reverseTypeMap.put(type.toLowerCase(), e.getKey());
- }
- }
- }
-
- public void execute()
- {
- PropertyCheck.mandatory(this, "dataSource", dataSource);
- PropertyCheck.mandatory(this, "dialect", dialect);
- PropertyCheck.mandatory(this, "descriptorService", descriptorService);
-
- Connection connection = null;
- try
- {
- connection = dataSource.getConnection();
- connection.setAutoCommit(false);
- Descriptor descriptor = descriptorService.getServerDescriptor();
- int schemaVersion = descriptor.getSchema();
- execute(connection, schemaVersion);
- }
- catch (Exception e)
- {
- throw new RuntimeException("Unable to execute export.", e);
- }
- finally
- {
- try
- {
- if (connection != null)
- {
- connection.close();
- }
- }
- catch (Throwable e)
- {
- // Little can be done at this stage.
- }
- }
- }
-
-
-
- private void execute(Connection con, int schemaVersion) throws Exception
- {
- final DatabaseMetaData dbmd = con.getMetaData();
-
- String schemaName = databaseMetaDataHelper.getSchema(con);
-
- schema = new Schema(schemaName, namePrefix, schemaVersion, true);
- String[] prefixFilters = namePrefixFilters(dbmd);
-
- for (String filter : prefixFilters)
- {
- extractSchema(dbmd, schemaName, filter);
- }
- }
-
-
- private void extractSchema(DatabaseMetaData dbmd, String schemaName, String prefixFilter)
- throws SQLException, IllegalArgumentException, IllegalAccessException
- {
- if (log.isDebugEnabled())
- {
- log.debug("Retrieving tables: schemaName=[" + schemaName + "], prefixFilter=[" + prefixFilter + "]");
- }
-
-
- final ResultSet tables = dbmd.getTables(null, schemaName, prefixFilter, new String[]
- {
- "TABLE", "VIEW", "SEQUENCE"
- });
-
-
- while (tables.next())
- {
- final String tableName = tables.getString("TABLE_NAME");
-
- if (log.isDebugEnabled())
- {
- log.debug("Examining table tableName=[" + tableName + "]");
- }
-
- // Oracle hack: ignore tables in the recycle bin
- // ALF-14129 fix, check whether schema already contains object with provided name
- if (tableName.startsWith("BIN$") || schema.containsByName(tableName))
- {
- continue;
- }
-
- if (tables.getString("TABLE_TYPE").equals("SEQUENCE"))
- {
- Sequence sequence = new Sequence(tableName);
- schema.add(sequence);
- continue;
- }
-
- Table table = new Table(tableName);
- schema.add(table);
-
- // Table columns
- final ResultSet columns = dbmd.getColumns(null, tables.getString("TABLE_SCHEM"), tableName, "%");
- while (columns.next())
- {
- String columnName = columns.getString("COLUMN_NAME");
- Column column = new Column(columnName);
-
- String dbType = columns.getString("TYPE_NAME");
- int colSize = columns.getInt("COLUMN_SIZE");
- int scale = columns.getInt("DECIMAL_DIGITS");
- int jdbcType = columns.getInt("DATA_TYPE");
- String type = generateType(dbType, colSize, scale, jdbcType);
- column.setType(type);
-
- String nullableString = columns.getString("IS_NULLABLE");
- column.setNullable(parseBoolean(nullableString));
-
- column.setOrder(columns.getInt("ORDINAL_POSITION"));
-
- try
- {
- String autoIncString = columns.getString("IS_AUTOINCREMENT");
- column.setAutoIncrement(parseBoolean(autoIncString));
- }
- catch(SQLException jtdsDoesNOtHAveIsUatoincrement)
- {
- column.setAutoIncrement((dbType.endsWith("identity")));
- }
-
- column.setParent(table);
- table.getColumns().add(column);
- }
- columns.close();
-
-
- // Primary key
- final ResultSet primarykeycols = dbmd.getPrimaryKeys(null, tables.getString("TABLE_SCHEM"), tableName);
-
- PrimaryKey pk = null;
-
- while (primarykeycols.next())
- {
- if (pk == null)
- {
- String pkName = primarykeycols.getString("PK_NAME");
- pk = new PrimaryKey(pkName);
- }
- String columnName = primarykeycols.getString("COLUMN_NAME");
- pk.getColumnNames().add(columnName);
-
- int columnOrder = primarykeycols.getInt("KEY_SEQ");
- pk.getColumnOrders().add(columnOrder);
- }
- primarykeycols.close();
-
- // If this table has a primary key, add it.
- if (pk != null)
- {
- pk.setParent(table);
- table.setPrimaryKey(pk);
- }
-
-
- // Indexes
- final ResultSet indexes = dbmd.getIndexInfo(null, tables.getString("TABLE_SCHEM"), tableName, false, true);
- String lastIndexName = "";
-
- Index index = null;
-
- while (indexes.next())
- {
- final String indexName = indexes.getString("INDEX_NAME");
- if (indexName == null)
- {
- // Oracle seems to have some dummy index entries
- continue;
- }
- // Skip the index corresponding to the PK if it is mentioned
- else if (indexName.equals(table.getPrimaryKey().getName()))
- {
- continue;
- }
-
- if (!indexName.equals(lastIndexName))
- {
- index = new Index(indexName);
- index.setUnique(!indexes.getBoolean("NON_UNIQUE"));
- index.setParent(table);
- table.getIndexes().add(index);
- lastIndexName = indexName;
- }
- if (index != null)
- {
- String columnName = indexes.getString("COLUMN_NAME");
- index.getColumnNames().add(columnName);
- }
- }
- indexes.close();
-
-
-
- final ResultSet foreignkeys = dbmd.getImportedKeys(null, tables.getString("TABLE_SCHEM"), tableName);
- String lastKeyName = "";
-
- ForeignKey fk = null;
-
- while (foreignkeys.next())
- {
- final String keyName = foreignkeys.getString("FK_NAME");
- if (!keyName.equals(lastKeyName))
- {
- fk = new ForeignKey(keyName);
- fk.setParent(table);
- table.getForeignKeys().add(fk);
- lastKeyName = keyName;
- }
- if (fk != null)
- {
- fk.setLocalColumn(foreignkeys.getString("FKCOLUMN_NAME"));
- fk.setTargetTable(foreignkeys.getString("PKTABLE_NAME"));
- fk.setTargetColumn(foreignkeys.getString("PKCOLUMN_NAME"));
- }
- }
- foreignkeys.close();
- }
- tables.close();
- }
-
-
- /**
- * Convert a boolean string as used in the database, to a boolean value.
- *
- * @param nullableString String
- * @return true if "YES", false if "NO"
- */
- private boolean parseBoolean(String nullableString)
- {
- // TODO: what about (from the javadoc):
- // empty string --- if the nullability for the parameter is unknown
- if (nullableString.equals("NO"))
- {
- return false;
- }
- if (nullableString.equals("YES"))
- {
- return true;
- }
-
- throw new IllegalArgumentException("Unsupported term \"" + nullableString +
- "\", perhaps this database doesn't use YES/NO for booleans?");
-
- }
-
- protected String generateType(final String dbTypeRaw, int size, final int digits, int sqlType)
- throws IllegalArgumentException, IllegalAccessException
- {
- final String dbType = dbTypeRaw.toLowerCase();
-
- final String sizeType = dbType + "(" + size + ")";
- if (this.reverseTypeMap.containsKey(sizeType))
- {
- // the map may contain an exact match, e.g. "char(1)"
- return sizeType;
- }
-
- final String precisionScaleType = dbType + "(" + size + ", " + digits + ")";
- if (this.reverseTypeMap.containsKey(precisionScaleType))
- {
- // the map may contain an exact match, e.g. "numeric(3, 1)"
- return precisionScaleType;
- }
- else
- {
- for (String key : reverseTypeMap.keySet())
- {
- // Populate the placeholders, examples:
- // varchar($l) => varchar(20)
- // numeric($p, $s) => numeric(5, 2)
- String popKey = key.replaceAll("\\$p", String.valueOf(size));
- popKey = popKey.replaceAll("\\$s", String.valueOf(digits));
- popKey = popKey.replaceAll("\\$l", String.valueOf(size));
-
- // Variation of the size type with size unit of char, e.g. varchar2(255 char)
- final String charSizeType = dbType + "(" + size + " char)";
-
- // If the populated key matches a precision/scale type or a size type
- // then the populated key gives us the string we're after.
- if (popKey.equals(precisionScaleType) || popKey.equals(sizeType) || popKey.equals(charSizeType))
- {
- return popKey;
- }
- }
- }
-
- return dbType;
- }
-
-
- /**
- * @return the schema
- */
- public Schema getSchema()
- {
- return this.schema;
- }
-
-
- /**
- * @return the namePrefix
- */
- public String getNamePrefix()
- {
- return this.namePrefix;
- }
-
- private String[] namePrefixFilters(DatabaseMetaData dbmd) throws SQLException
- {
- String filter = namePrefix + "%";
- // We're assuming the prefixes are either PREFIX_ or prefix_
- // but not mixed-case.
- return new String[]
- {
- filter.toLowerCase(),
- filter.toUpperCase()
- };
- }
-
-
- /**
- * @param namePrefix the namePrefix to set
- */
- public void setNamePrefix(String namePrefix)
- {
- this.namePrefix = namePrefix;
- }
-
- /**
- * Set the default schema name
- *
- * @param dbSchemaName the default schema name
- */
- public void setDbSchemaName(String dbSchemaName)
- {
- this.dbSchemaName = dbSchemaName;
- }
-}
+/*
+ * #%L
+ * Alfresco Repository
+ * %%
+ * Copyright (C) 2005 - 2016 Alfresco Software Limited
+ * %%
+ * This file is part of the Alfresco software.
+ * If the software was purchased under a paid Alfresco license, the terms of
+ * the paid license agreement will prevail. Otherwise, the software is
+ * provided under the following open source license terms:
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ * #L%
+ */
+package org.alfresco.util.schemacomp;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import javax.sql.DataSource;
+
+import org.alfresco.repo.domain.dialect.Dialect;
+import org.alfresco.repo.domain.dialect.TypeNames;
+import org.alfresco.service.descriptor.Descriptor;
+import org.alfresco.service.descriptor.DescriptorService;
+import org.alfresco.util.DatabaseMetaDataHelper;
+import org.alfresco.util.DialectUtil;
+import org.alfresco.util.PropertyCheck;
+import org.alfresco.util.DBScriptUtil;
+import org.alfresco.util.schemacomp.model.Column;
+import org.alfresco.util.schemacomp.model.ForeignKey;
+import org.alfresco.util.schemacomp.model.Index;
+import org.alfresco.util.schemacomp.model.PrimaryKey;
+import org.alfresco.util.schemacomp.model.Schema;
+import org.alfresco.util.schemacomp.model.Sequence;
+import org.alfresco.util.schemacomp.model.Table;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.springframework.context.ApplicationContext;
+import org.springframework.core.io.Resource;
+import org.springframework.core.io.support.EncodedResource;
+import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
+import org.springframework.core.io.support.ResourcePatternResolver;
+
+/**
+ * Exports a database schema to an in-memory {@link Schema} object.
+ *
+ * @author Matt Ward
+ */
+public class ExportDb
+{
+ private static final String SCHEMA_REFERENCE_SEQUENCES_SQL_FILE = "classpath:alfresco/dbscripts/create/${db.script.dialect}/Schema-Reference-Sequences.sql";
+
+ /** Reverse map from database types to JDBC types (loaded from a Hibernate dialect). */
+ private final Map reverseTypeMap = new TreeMap();
+
+ /** The database metadata helper */
+ private DatabaseMetaDataHelper databaseMetaDataHelper;
+
+ /** The JDBC DataSource. */
+ private DataSource dataSource;
+
+ /** The object graph we're building */
+ private Schema schema;
+
+ /** What type of DBMS are we running? */
+ private Dialect dialect;
+
+ /** Used to gain the repository's schema version */
+ private DescriptorService descriptorService;
+
+ /** Only top-level tables starting with namePrefix will be exported, set to empty string for all objects */
+ private String namePrefix = "alf_";
+
+ /** Default schema name to use */
+ private String dbSchemaName;
+
+ private final static Log log = LogFactory.getLog(ExportDb.class);
+
+ private ResourcePatternResolver rpr = new PathMatchingResourcePatternResolver(this.getClass().getClassLoader());
+
+ public ExportDb(ApplicationContext context)
+ {
+ this((DataSource) context.getBean("dataSource"),
+ (Dialect) context.getBean("dialect"),
+ (DescriptorService) context.getBean("descriptorComponent"),
+ (DatabaseMetaDataHelper) context.getBean("databaseMetaDataHelper"));
+ }
+
+
+ /**
+ * Create a new instance of the tool within the context of an existing database connection
+ *
+ * @param dataSource the database connection to use for metadata queries
+ * @param dialect the Hibernate dialect
+ * @param descriptorService descriptorService
+ * @param databaseMetaDataHelper databaseMetaDataHelper
+ */
+ public ExportDb(final DataSource dataSource, final Dialect dialect, DescriptorService descriptorService, DatabaseMetaDataHelper databaseMetaDataHelper)
+ {
+ this.dataSource = dataSource;
+ this.dialect = dialect;
+ this.descriptorService = descriptorService;
+ this.databaseMetaDataHelper = databaseMetaDataHelper;
+ init();
+ }
+
+
+ private void init()
+ {
+ try
+ {
+ attemptInit();
+ }
+ catch (SecurityException error)
+ {
+ throw new RuntimeException("Unable to generate type map using hibernate.", error);
+ }
+ catch (IllegalArgumentException error)
+ {
+ throw new RuntimeException("Unable to generate type map using hibernate.", error);
+ }
+ catch (NoSuchFieldException error)
+ {
+ throw new RuntimeException("Unable to generate type map using hibernate.", error);
+ }
+ catch (IllegalAccessException error)
+ {
+ throw new RuntimeException("Unable to generate type map using hibernate.", error);
+ }
+ }
+
+
+ /**
+ * Initializes the fields ready to perform the database metadata reading
+ * @throws NoSuchFieldException
+ * @throws SecurityException
+ * @throws IllegalAccessException
+ * @throws IllegalArgumentException
+ */
+ @SuppressWarnings("unchecked")
+ private void attemptInit() throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException
+ {
+ final Field typeNamesField = Dialect.class.getDeclaredField("typeNames");
+ typeNamesField.setAccessible(true);
+ final TypeNames typeNames = (TypeNames) typeNamesField.get(dialect);
+ final Field defaultsField = TypeNames.class.getDeclaredField("defaults");
+ defaultsField.setAccessible(true);
+ final Map forwardMap2 = (Map) defaultsField.get(typeNames);
+ for (final Map.Entry e : forwardMap2.entrySet())
+ {
+ this.reverseTypeMap.put(e.getValue().toLowerCase(), e.getKey());
+ }
+
+ final Field weightedField = TypeNames.class.getDeclaredField("weighted");
+ weightedField.setAccessible(true);
+ final Map> forwardMap1 = (Map>) weightedField
+ .get(typeNames);
+ for (final Map.Entry> e : forwardMap1.entrySet())
+ {
+ for (final String type : e.getValue().values())
+ {
+ this.reverseTypeMap.put(type.toLowerCase(), e.getKey());
+ }
+ }
+ }
+
+ public void execute()
+ {
+ PropertyCheck.mandatory(this, "dataSource", dataSource);
+ PropertyCheck.mandatory(this, "dialect", dialect);
+ PropertyCheck.mandatory(this, "descriptorService", descriptorService);
+
+ Connection connection = null;
+ try
+ {
+ connection = dataSource.getConnection();
+ connection.setAutoCommit(false);
+ Descriptor descriptor = descriptorService.getServerDescriptor();
+ int schemaVersion = descriptor.getSchema();
+ execute(connection, schemaVersion);
+ }
+ catch (Exception e)
+ {
+ throw new RuntimeException("Unable to execute export.", e);
+ }
+ finally
+ {
+ try
+ {
+ if (connection != null)
+ {
+ connection.close();
+ }
+ }
+ catch (Throwable e)
+ {
+ // Little can be done at this stage.
+ }
+ }
+ }
+
+
+
+ private void execute(Connection con, int schemaVersion) throws Exception
+ {
+ final DatabaseMetaData dbmd = con.getMetaData();
+
+ String schemaName = databaseMetaDataHelper.getSchema(con);
+
+ schema = new Schema(schemaName, namePrefix, schemaVersion, true);
+ String[] prefixFilters = namePrefixFilters(dbmd);
+
+ for (String filter : prefixFilters)
+ {
+ extractSchema(dbmd, schemaName, filter);
+ }
+ }
+
+
+ private void extractSchema(DatabaseMetaData dbmd, String schemaName, String prefixFilter)
+ throws SQLException, IllegalArgumentException, IllegalAccessException, IOException
+ {
+ if (log.isDebugEnabled())
+ {
+ log.debug("Retrieving tables: schemaName=[" + schemaName + "], prefixFilter=[" + prefixFilter + "]");
+ }
+
+ final String[] tableTypes;
+ Resource sequencesRefResource = getSequencesReferenceResource();
+ if (sequencesRefResource != null && sequencesRefResource.exists())
+ {
+ tableTypes = new String[] {"TABLE", "VIEW"};
+
+ retrieveAndProcessSequences(dbmd, sequencesRefResource, schemaName, prefixFilter);
+ }
+ else
+ {
+ tableTypes = new String[] { "TABLE", "VIEW", "SEQUENCE" };
+ }
+
+ final ResultSet tables = dbmd.getTables(null, schemaName, prefixFilter, tableTypes);
+
+ processTables(dbmd, tables);
+ }
+
+ private void processTables(DatabaseMetaData dbmd, ResultSet tables)
+ throws SQLException, IllegalArgumentException, IllegalAccessException
+ {
+ if (tables == null)
+ {
+ return;
+ }
+
+ while (tables.next())
+ {
+ final String tableName = tables.getString("TABLE_NAME");
+
+ if (log.isDebugEnabled())
+ {
+ log.debug("Examining table tableName=[" + tableName + "]");
+ }
+
+ // Oracle hack: ignore tables in the recycle bin
+ // ALF-14129 fix, check whether schema already contains object with provided name
+ if (tableName.startsWith("BIN$") || schema.containsByName(tableName))
+ {
+ continue;
+ }
+
+ if (tables.getString("TABLE_TYPE").equals("SEQUENCE"))
+ {
+ Sequence sequence = new Sequence(tableName);
+ schema.add(sequence);
+ continue;
+ }
+
+ Table table = new Table(tableName);
+ schema.add(table);
+
+ // Table columns
+ final ResultSet columns = dbmd.getColumns(null, tables.getString("TABLE_SCHEM"), tableName, "%");
+ while (columns.next())
+ {
+ String columnName = columns.getString("COLUMN_NAME");
+ Column column = new Column(columnName);
+
+ String dbType = columns.getString("TYPE_NAME");
+ int colSize = columns.getInt("COLUMN_SIZE");
+ int scale = columns.getInt("DECIMAL_DIGITS");
+ int jdbcType = columns.getInt("DATA_TYPE");
+ String type = generateType(dbType, colSize, scale, jdbcType);
+ column.setType(type);
+
+ String nullableString = columns.getString("IS_NULLABLE");
+ column.setNullable(parseBoolean(nullableString));
+
+ column.setOrder(columns.getInt("ORDINAL_POSITION"));
+
+ try
+ {
+ String autoIncString = columns.getString("IS_AUTOINCREMENT");
+ column.setAutoIncrement(parseBoolean(autoIncString));
+ }
+ catch(SQLException jtdsDoesNOtHAveIsUatoincrement)
+ {
+ column.setAutoIncrement((dbType.endsWith("identity")));
+ }
+
+ column.setParent(table);
+ table.getColumns().add(column);
+ }
+ columns.close();
+
+
+ // Primary key
+ final ResultSet primarykeycols = dbmd.getPrimaryKeys(null, tables.getString("TABLE_SCHEM"), tableName);
+
+ PrimaryKey pk = null;
+
+ while (primarykeycols.next())
+ {
+ if (pk == null)
+ {
+ String pkName = primarykeycols.getString("PK_NAME");
+ pk = new PrimaryKey(pkName);
+ }
+ String columnName = primarykeycols.getString("COLUMN_NAME");
+ pk.getColumnNames().add(columnName);
+
+ int columnOrder = primarykeycols.getInt("KEY_SEQ");
+ pk.getColumnOrders().add(columnOrder);
+ }
+ primarykeycols.close();
+
+ // If this table has a primary key, add it.
+ if (pk != null)
+ {
+ pk.setParent(table);
+ table.setPrimaryKey(pk);
+ }
+
+
+ // Indexes
+ final ResultSet indexes = dbmd.getIndexInfo(null, tables.getString("TABLE_SCHEM"), tableName, false, true);
+ String lastIndexName = "";
+
+ Index index = null;
+
+ while (indexes.next())
+ {
+ final String indexName = indexes.getString("INDEX_NAME");
+ if (indexName == null)
+ {
+ // Oracle seems to have some dummy index entries
+ continue;
+ }
+ // Skip the index corresponding to the PK if it is mentioned
+ else if (indexName.equals(table.getPrimaryKey().getName()))
+ {
+ continue;
+ }
+
+ if (!indexName.equals(lastIndexName))
+ {
+ index = new Index(indexName);
+ index.setUnique(!indexes.getBoolean("NON_UNIQUE"));
+ index.setParent(table);
+ table.getIndexes().add(index);
+ lastIndexName = indexName;
+ }
+ if (index != null)
+ {
+ String columnName = indexes.getString("COLUMN_NAME");
+ index.getColumnNames().add(columnName);
+ }
+ }
+ indexes.close();
+
+
+
+ final ResultSet foreignkeys = dbmd.getImportedKeys(null, tables.getString("TABLE_SCHEM"), tableName);
+ String lastKeyName = "";
+
+ ForeignKey fk = null;
+
+ while (foreignkeys.next())
+ {
+ final String keyName = foreignkeys.getString("FK_NAME");
+ if (!keyName.equals(lastKeyName))
+ {
+ fk = new ForeignKey(keyName);
+ fk.setParent(table);
+ table.getForeignKeys().add(fk);
+ lastKeyName = keyName;
+ }
+ if (fk != null)
+ {
+ fk.setLocalColumn(foreignkeys.getString("FKCOLUMN_NAME"));
+ fk.setTargetTable(foreignkeys.getString("PKTABLE_NAME"));
+ fk.setTargetColumn(foreignkeys.getString("PKCOLUMN_NAME"));
+ }
+ }
+ foreignkeys.close();
+ }
+ tables.close();
+ }
+
+
+ /**
+ * Convert a boolean string as used in the database, to a boolean value.
+ *
+ * @param nullableString String
+ * @return true if "YES", false if "NO"
+ */
+ private boolean parseBoolean(String nullableString)
+ {
+ // TODO: what about (from the javadoc):
+ // empty string --- if the nullability for the parameter is unknown
+ if (nullableString.equals("NO"))
+ {
+ return false;
+ }
+ if (nullableString.equals("YES"))
+ {
+ return true;
+ }
+
+ throw new IllegalArgumentException("Unsupported term \"" + nullableString +
+ "\", perhaps this database doesn't use YES/NO for booleans?");
+
+ }
+
+ protected String generateType(final String dbTypeRaw, int size, final int digits, int sqlType)
+ throws IllegalArgumentException, IllegalAccessException
+ {
+ final String dbType = dbTypeRaw.toLowerCase();
+
+ final String sizeType = dbType + "(" + size + ")";
+ if (this.reverseTypeMap.containsKey(sizeType))
+ {
+ // the map may contain an exact match, e.g. "char(1)"
+ return sizeType;
+ }
+
+ final String precisionScaleType = dbType + "(" + size + ", " + digits + ")";
+ if (this.reverseTypeMap.containsKey(precisionScaleType))
+ {
+ // the map may contain an exact match, e.g. "numeric(3, 1)"
+ return precisionScaleType;
+ }
+ else
+ {
+ for (String key : reverseTypeMap.keySet())
+ {
+ // Populate the placeholders, examples:
+ // varchar($l) => varchar(20)
+ // numeric($p, $s) => numeric(5, 2)
+ String popKey = key.replaceAll("\\$p", String.valueOf(size));
+ popKey = popKey.replaceAll("\\$s", String.valueOf(digits));
+ popKey = popKey.replaceAll("\\$l", String.valueOf(size));
+
+ // Variation of the size type with size unit of char, e.g. varchar2(255 char)
+ final String charSizeType = dbType + "(" + size + " char)";
+
+ // If the populated key matches a precision/scale type or a size type
+ // then the populated key gives us the string we're after.
+ if (popKey.equals(precisionScaleType) || popKey.equals(sizeType) || popKey.equals(charSizeType))
+ {
+ return popKey;
+ }
+ }
+ }
+
+ return dbType;
+ }
+
+
+ /**
+ * @return the schema
+ */
+ public Schema getSchema()
+ {
+ return this.schema;
+ }
+
+
+ /**
+ * @return the namePrefix
+ */
+ public String getNamePrefix()
+ {
+ return this.namePrefix;
+ }
+
+ private String[] namePrefixFilters(DatabaseMetaData dbmd) throws SQLException
+ {
+ String filter = namePrefix + "%";
+ // We're assuming the prefixes are either PREFIX_ or prefix_
+ // but not mixed-case.
+ return new String[]
+ {
+ filter.toLowerCase(),
+ filter.toUpperCase()
+ };
+ }
+
+
+ /**
+ * @param namePrefix the namePrefix to set
+ */
+ public void setNamePrefix(String namePrefix)
+ {
+ this.namePrefix = namePrefix;
+ }
+
+ /**
+ * Set the default schema name
+ *
+ * @param dbSchemaName the default schema name
+ */
+ public void setDbSchemaName(String dbSchemaName)
+ {
+ this.dbSchemaName = dbSchemaName;
+ }
+
+ /**
+ * Attempts to find the schema reference sequences sql file. If not found, the
+ * dialect hierarchy will be walked until a compatible resource is found. This
+ * makes it possible to have resources that are generic to all dialects.
+ *
+ * @return The Resource, otherwise null
+ */
+ private Resource getSequencesReferenceResource()
+ {
+ return DialectUtil.getDialectResource(rpr, dialect.getClass(), SCHEMA_REFERENCE_SEQUENCES_SQL_FILE);
+ }
+
+ /**
+ * Retrieves and process DB sequences for the schema validation check.
+ *
+ * @param dbmd
+ * the database meta data
+ * @param resource
+ * the resource to load the SQL script from
+ * @param schemaName
+ * the DB schema name
+ * @param prefixFilter
+ * the DB tables prefix filter
+ */
+ private void retrieveAndProcessSequences(DatabaseMetaData dbmd, Resource resource, String schemaName, String prefixFilter)
+ throws SQLException, IllegalArgumentException, IllegalAccessException, IOException
+ {
+ final String script = DBScriptUtil.readScript(new EncodedResource(resource, "UTF-8"));
+
+ if (!script.isEmpty())
+ {
+ retrieveAndProcessSequences(dbmd, script, schemaName, prefixFilter);
+ }
+ }
+
+ /**
+ * Retrieves and process DB sequences for the schema validation check.
+ *
+ * @param dbmd
+ * the database meta data
+ * @param script
+ * the SQL script
+ * @param schemaName
+ * the DB schema name
+ * @param prefixFilter
+ * the DB tables prefix filter
+ */
+ private void retrieveAndProcessSequences(DatabaseMetaData dbmd, String script, String schemaName, String prefixFilter)
+ throws SQLException, IllegalArgumentException, IllegalAccessException
+ {
+ if (log.isDebugEnabled())
+ {
+ log.debug("Retrieving DB sequences...");
+ }
+
+ PreparedStatement stmt = null;
+ try
+ {
+ stmt = dbmd.getConnection().prepareStatement(script);
+ stmt.setString(1, schemaName);
+ stmt.setString(2, prefixFilter);
+
+ boolean haveResults = stmt.execute();
+
+ if (haveResults)
+ {
+ ResultSet sequences = stmt.getResultSet();
+
+ if (log.isDebugEnabled())
+ {
+ log.debug("Sequences processing started...");
+ }
+
+ processTables(dbmd, sequences);
+
+ if (log.isDebugEnabled())
+ {
+ log.debug("Sequences processing completed.");
+ }
+ }
+ }
+ finally
+ {
+ if (stmt != null)
+ {
+ try
+ {
+ stmt.close();
+ }
+ catch (Throwable e)
+ {
+ // Little can be done at this stage.
+ }
+ }
+ }
+ }
+}