Merged BRANCHES/V3.2 to HEAD:

18363: WCM clustering - ETHREEOH-3962 (duplicate root node entry)
   19091: Fix Part 1 ALF-726: v3.1.x Content Cleaner Job needs to be ported to v3.2
   19159: Fixed ALF-726: Migrate pre-3.2 content URLs to new format and pick up tag existing orphaned content
   19169: Fix fallout from 19159 for ALF-726: Migrate pre-3.2 content URLs to new format and pick up tag existing orphaned content
   19262: ALF-726 Multithreading for content URL conversion



git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@19267 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2010-03-12 19:11:12 +00:00
parent a2c2e215a8
commit fdc8f6f331
33 changed files with 2589 additions and 1175 deletions

View File

@@ -49,8 +49,6 @@ import org.alfresco.service.namespace.QName;
import org.alfresco.service.namespace.RegexQNamePattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.context.ApplicationEventPublisherAware;
/**
* Migrates authority information previously stored in the user store to the spaces store, using the new structure used
@@ -58,7 +56,7 @@ import org.springframework.context.ApplicationEventPublisherAware;
*
* @author dward
*/
public class AuthorityMigrationPatch extends AbstractPatch implements ApplicationEventPublisherAware
public class AuthorityMigrationPatch extends AbstractPatch
{
/** The title we give to the batch process in progress messages / JMX. */
private static final String MSG_PROCESS_NAME = "patch.authorityMigration.process.name";
@@ -91,9 +89,6 @@ public class AuthorityMigrationPatch extends AbstractPatch implements Applicatio
/** The user bootstrap. */
private ImporterBootstrap userBootstrap;
/** The application event publisher. */
private ApplicationEventPublisher applicationEventPublisher;
/**
* Sets the authority service.
*
@@ -127,17 +122,6 @@ public class AuthorityMigrationPatch extends AbstractPatch implements Applicatio
this.userBootstrap = userBootstrap;
}
/**
* Sets the application event publisher.
*
* @param applicationEventPublisher
* the application event publisher
*/
public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher)
{
this.applicationEventPublisher = applicationEventPublisher;
}
/**
* Recursively retrieves the authorities under the given node and their associations.
*
@@ -238,14 +222,33 @@ public class AuthorityMigrationPatch extends AbstractPatch implements Applicatio
*/
private void migrateAuthorities(final Map<String, String> authoritiesToCreate, Map<String, Set<String>> parentAssocs)
{
BatchProcessor.Worker<Map.Entry<String, Set<String>>> worker = new BatchProcessor.Worker<Map.Entry<String, Set<String>>>()
{
final String tenantDomain = tenantAdminService.getCurrentUserDomain();
BatchProcessor.BatchProcessWorker<Map.Entry<String, Set<String>>> worker = new BatchProcessor.BatchProcessWorker<Map.Entry<String, Set<String>>>()
{
public String getIdentifier(Entry<String, Set<String>> entry)
{
return entry.getKey();
}
public void beforeProcess() throws Throwable
{
// Disable rules
ruleService.disableRules();
// Authentication
String systemUser = AuthenticationUtil.getSystemUserName();
systemUser = tenantAdminService.getDomainUser(systemUser, tenantDomain);
AuthenticationUtil.setRunAsUser(systemUser);
}
public void afterProcess() throws Throwable
{
// Enable rules
ruleService.enableRules();
// Clear authentication
AuthenticationUtil.clearCurrentSecurityContext();
}
public void process(Entry<String, Set<String>> authority) throws Throwable
{
String authorityName = authority.getKey();
@@ -290,10 +293,13 @@ public class AuthorityMigrationPatch extends AbstractPatch implements Applicatio
}
};
// Migrate using 2 threads, 20 authorities per transaction. Log every 100 entries.
new BatchProcessor<Map.Entry<String, Set<String>>>(AuthorityMigrationPatch.progress_logger,
this.transactionService.getRetryingTransactionHelper(), this.ruleService, this.tenantAdminService,
this.applicationEventPublisher, parentAssocs.entrySet(), I18NUtil
.getMessage(AuthorityMigrationPatch.MSG_PROCESS_NAME), 100, 2, 20).process(worker, true);
new BatchProcessor<Map.Entry<String, Set<String>>>(
I18NUtil.getMessage(AuthorityMigrationPatch.MSG_PROCESS_NAME),
this.transactionService.getRetryingTransactionHelper(),
parentAssocs.entrySet(),
2, 20,
AuthorityMigrationPatch.this.applicationEventPublisher,
AuthorityMigrationPatch.progress_logger, 100).process(worker, true);
}
/**

View File

@@ -0,0 +1,687 @@
/*
* Copyright (C) 2005-2010 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.admin.patch.impl;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.repo.admin.patch.AbstractPatch;
import org.alfresco.repo.admin.patch.PatchExecuter;
import org.alfresco.repo.admin.registry.RegistryKey;
import org.alfresco.repo.admin.registry.RegistryService;
import org.alfresco.repo.avm.AVMDAOs;
import org.alfresco.repo.avm.PlainFileNode;
import org.alfresco.repo.batch.BatchProcessor;
import org.alfresco.repo.batch.BatchProcessor.BatchProcessWorkerAdaptor;
import org.alfresco.repo.content.ContentStore;
import org.alfresco.repo.content.ContentStore.ContentUrlHandler;
import org.alfresco.repo.domain.contentdata.ContentDataDAO;
import org.alfresco.repo.domain.patch.PatchDAO;
import org.alfresco.repo.lock.JobLockService;
import org.alfresco.repo.lock.LockAcquisitionException;
import org.alfresco.repo.node.db.NodeDaoService;
import org.alfresco.repo.security.authentication.AuthenticationUtil;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.ServiceRegistry;
import org.alfresco.service.cmr.repository.ContentData;
import org.alfresco.service.cmr.repository.ContentReader;
import org.alfresco.service.namespace.NamespaceService;
import org.alfresco.service.namespace.QName;
import org.alfresco.util.VmShutdownListener;
import org.alfresco.util.VmShutdownListener.VmShutdownException;
import org.apache.commons.lang.mutable.MutableInt;
import org.apache.commons.lang.mutable.MutableLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.quartz.Job;
import org.quartz.JobDataMap;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.springframework.dao.DataIntegrityViolationException;
import org.springframework.extensions.surf.util.I18NUtil;
import org.springframework.extensions.surf.util.Pair;
import org.springframework.extensions.surf.util.PropertyCheck;
/**
* Component to migrate old-style content URL storage (<tt>contentUrl=store://...|mimetype=...</tt>)
* to the newer <b>alf_content_url</b> storage.
* <p/>
* The {@link ServiceRegistry} is used to record progress. The component picks up ranges of node IDs
* (DM and AVM) and records the progress. Since new nodes will not need converting, the converter
* will stop once it hits the largest node ID that it found upon first initiation. Once completed,
* the content store reader will start to pick up orphaned content and schedule it for deletion.
* <p/>
* A cluster-wide lock is set so that a single instance of this job will be running per Alfresco
* installation.
*
* @author Derek Hulley
* @since 3.2.1
*/
public class ContentUrlConverterPatch extends AbstractPatch
{
// Registry keys
private static final RegistryKey KEY_ADM_MAX_ID = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "adm", "max-id");
private static final RegistryKey KEY_ADM_RANGE_START_ID = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "adm", "range-start-id");
private static final RegistryKey KEY_ADM_DONE = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "adm", "done");
private static final RegistryKey KEY_AVM_MAX_ID = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "avm", "max-id");
private static final RegistryKey KEY_AVM_RANGE_START_ID = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "avm", "range-start-id");
private static final RegistryKey KEY_AVM_DONE = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "avm", "done");
private static final RegistryKey KEY_STORE_DONE = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "store", "done");
// Lock key
private static final QName LOCK = QName.createQName(NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter");
// Lock as per patching
private static Log logger = LogFactory.getLog(PatchExecuter.class);
private static VmShutdownListener shutdownListener = new VmShutdownListener("ContentUrlConverterPatch");
private RegistryService registryService;
private JobLockService jobLockService;
private NodeDaoService nodeDaoService;
private PatchDAO patchDAO;
private ContentStore contentStore;
private ContentDataDAO contentDataDAO;
private int threadCount;
private int batchSize;
private boolean runAsScheduledJob;
private ThreadLocal<Boolean> runningAsJob = new ThreadLocal<Boolean>();
/**
* Default constructor
*/
public ContentUrlConverterPatch()
{
runningAsJob.set(Boolean.FALSE);
threadCount = 2;
batchSize=500;
}
/**
* Service to record progress for later pick-up
*/
public void setRegistryService(RegistryService registryService)
{
this.registryService = registryService;
}
/**
* Service to prevent concurrent execution
*/
public void setJobLockService(JobLockService jobLockService)
{
this.jobLockService = jobLockService;
}
/**
* Provides low-level access to do the property transformation
*/
public void setNodeDaoService(NodeDaoService nodeDaoService)
{
this.nodeDaoService = nodeDaoService;
}
/**
* Component that provides low-level queries and updates to support this patch
*/
public void setPatchDAO(PatchDAO patchDAO)
{
this.patchDAO = patchDAO;
}
/**
* Set the store containing the content URLs to lift for potential cleaning.
*
* @param contentStore the store containing the system's content URLs
*/
public void setContentStore(ContentStore contentStore)
{
this.contentStore = contentStore;
}
/**
* Set the component that will write URLs coming from the
* {@link ContentStore#getUrls(ContentUrlHandler) content store}.
*
* @param contentDataDAO the DAO to write the URLs
*/
public void setContentDataDAO(ContentDataDAO contentDataDAO)
{
this.contentDataDAO = contentDataDAO;
}
/**
* Set the number of threads that will be used process the required work.
*
* @param threadCount the number of threads
*/
public void setThreadCount(int threadCount)
{
this.threadCount = threadCount;
}
/**
* Set the number of URLs that are processed per job pass; this property is ignored
* when this component is run as a patch. Keep the number low (500) when running
* at short intervals on a on a live machine.
*
* @param batchSize the number of nodes to process per batch when running on a schedule
*/
public void setBatchSize(int batchSize)
{
this.batchSize = batchSize;
}
/**
* Set whether the patch execution should just bypass any actual work i.e. the admin has
* chosen to manually trigger the work.
*
* @param runAsScheduledJob <tt>true</tt> to leave all work up to the scheduled job
*/
public void setRunAsScheduledJob(boolean runAsScheduledJob)
{
this.runAsScheduledJob = runAsScheduledJob;
}
@Override
protected void checkProperties()
{
PropertyCheck.mandatory(this, "registryService", registryService);
PropertyCheck.mandatory(this, "jobLockService", jobLockService);
PropertyCheck.mandatory(this, "nodeDaoService", nodeDaoService);
PropertyCheck.mandatory(this, "patchDAO", patchDAO);
super.checkProperties();
}
/**
* Method called when executed as a scheduled job.
*/
private void executeViaJob()
{
AuthenticationUtil.RunAsWork<String> patchRunAs = new AuthenticationUtil.RunAsWork<String>()
{
public String doWork() throws Exception
{
RetryingTransactionCallback<String> patchTxn = new RetryingTransactionCallback<String>()
{
public String execute() throws Exception
{
try
{
runningAsJob.set(Boolean.TRUE);
String report = applyInternal();
// done
return report;
}
finally
{
runningAsJob.set(Boolean.FALSE); // Back to default
}
}
};
return transactionService.getRetryingTransactionHelper().doInTransaction(patchTxn);
}
};
String report = AuthenticationUtil.runAs(patchRunAs, AuthenticationUtil.getSystemUserName());
if (report != null)
{
logger.info(report);
}
}
/**
* Gets a set of work to do and executes it within this transaction. If kicked off via a job,
* the task will exit before completion, on the assumption that it will be kicked off at regular
* intervals. When called as a patch, it will run to completion with full progress logging.
*/
@Override
protected String applyInternal() throws Exception
{
if (AlfrescoTransactionSupport.getTransactionReadState() != TxnReadState.TXN_READ_WRITE)
{
// Nothing to do
return null;
}
boolean isRunningAsJob = runningAsJob.get().booleanValue();
// Do we bug out of patch execution
if (runAsScheduledJob && !isRunningAsJob)
{
return I18NUtil.getMessage("patch.convertContentUrls.bypassingPatch");
}
boolean completed = false;
// Lock in proportion to the batch size (0.1s per node or 0.8 min per 500)
String lockToken = getLock(batchSize*100L);
if (lockToken == null)
{
// Some other process is busy
if (isRunningAsJob)
{
// Fine, we're doing batches
return null;
}
else
{
throw new RuntimeException("Unable to get job lock during patch execution. Only one server should perform the upgrade.");
}
}
try
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.start"));
logger.info(I18NUtil.getMessage("patch.convertContentUrls.adm.start"));
boolean admCompleted = applyADM(lockToken);
logger.info(I18NUtil.getMessage("patch.convertContentUrls.avm.start"));
boolean avmCompleted = applyAVM(lockToken);
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.start", contentStore));
boolean urlLiftingCompleted = applyUrlLifting(lockToken);
completed = admCompleted && avmCompleted && urlLiftingCompleted;
}
finally
{
jobLockService.releaseLock(lockToken, LOCK);
}
if (completed)
{
return I18NUtil.getMessage("patch.convertContentUrls.done");
}
else
{
return I18NUtil.getMessage("patch.convertContentUrls.inProgress");
}
}
/**
* Attempts to get the lock. If the lock couldn't be taken, then <tt>null</tt> is returned.
*
* @return Returns the lock token or <tt>null</tt>
*/
private String getLock(long time)
{
try
{
return jobLockService.getLock(LOCK, time);
}
catch (LockAcquisitionException e)
{
return null;
}
}
/**
* Attempts to get the lock. If it fails, the current transaction is marked for rollback.
*
* @return Returns the lock token
*/
private void refreshLock(String lockToken, long time)
{
if (lockToken == null)
{
throw new IllegalArgumentException("Must provide existing lockToken");
}
jobLockService.refreshLock(lockToken, LOCK, time);
}
private boolean applyADM(final String lockToken)
{
RetryingTransactionCallback<Boolean> callback = new RetryingTransactionCallback<Boolean>()
{
public Boolean execute() throws Throwable
{
return applyADM();
}
};
boolean done = false;
while (true && !shutdownListener.isVmShuttingDown())
{
refreshLock(lockToken, batchSize*100L);
done = transactionService.getRetryingTransactionHelper().doInTransaction(callback, false, true);
if (done)
{
break;
}
}
return done;
}
/**
* Do the DM conversion work
* @return Returns <tt>true</tt> if the work is done
*/
private boolean applyADM() throws Exception
{
Long maxId = (Long) registryService.getProperty(KEY_ADM_MAX_ID);
// Must we run at all?
Boolean done = (Boolean) registryService.getProperty(KEY_ADM_DONE);
if (done != null && done.booleanValue())
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.adm.done", maxId));
return true;
}
if (maxId == null)
{
maxId = patchDAO.getMaxAdmNodeID();
registryService.addProperty(KEY_ADM_MAX_ID, maxId);
}
Long startId = (Long) registryService.getProperty(KEY_ADM_RANGE_START_ID);
if (startId == null)
{
startId = 1L;
registryService.addProperty(KEY_ADM_RANGE_START_ID, startId);
}
// Each thread gets 10 executions i.e. we get ranges for threadCount*10 lots of work
Long endId = startId;
Collection<Pair<Long, Long>> batchProcessorWork = new ArrayList<Pair<Long,Long>>(2);
for (long i = 0; i < threadCount*10; i++)
{
endId = startId + (i+1L) * batchSize;
Pair<Long, Long> batchEntry = new Pair<Long, Long>(
startId + i * batchSize,
endId);
batchProcessorWork.add(batchEntry);
}
BatchProcessWorkerAdaptor<Pair<Long, Long>> batchProcessorWorker = new BatchProcessWorkerAdaptor<Pair<Long, Long>>()
{
public void process(Pair<Long, Long> range) throws Throwable
{
Long startId = range.getFirst();
Long endId = range.getSecond();
// Bulk-update the old content properties
patchDAO.updateAdmV31ContentProperties(startId, endId);
}
};
BatchProcessor<Pair<Long, Long>> batchProcessor = new BatchProcessor<Pair<Long, Long>>(
"ContentUrlConverter.ADM (" + maxId + ")",
transactionService.getRetryingTransactionHelper(),
batchProcessorWork, threadCount, 1,
applicationEventPublisher, null, 1);
batchProcessor.process(batchProcessorWorker, true);
// Advance
startId = endId;
// Have we
if (startId > maxId)
{
startId = maxId + 1;
// We're past the max ID that we're interested in
done = Boolean.TRUE;
registryService.addProperty(KEY_ADM_DONE, done);
logger.info(I18NUtil.getMessage("patch.convertContentUrls.adm.done", maxId));
return true;
}
// Progress
super.reportProgress(maxId, startId);
// Move the start ID on
registryService.addProperty(KEY_ADM_RANGE_START_ID, startId);
// More to do
return false;
}
private boolean applyAVM(final String lockToken)
{
RetryingTransactionCallback<Boolean> callback = new RetryingTransactionCallback<Boolean>()
{
public Boolean execute() throws Throwable
{
return applyAVM();
}
};
boolean done = false;
while (true && !shutdownListener.isVmShuttingDown())
{
refreshLock(lockToken, batchSize*100L);
done = transactionService.getRetryingTransactionHelper().doInTransaction(callback, false, true);
if (done)
{
break;
}
}
return done;
}
/**
* Do the AVM conversion work
*/
private boolean applyAVM() throws Exception
{
Long maxId = (Long) registryService.getProperty(KEY_AVM_MAX_ID);
// Must we run at all?
Boolean done = (Boolean) registryService.getProperty(KEY_AVM_DONE);
if (done != null && done.booleanValue())
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.avm.done", maxId));
return true;
}
if (maxId == null)
{
maxId = patchDAO.getMaxAvmNodeID();
registryService.addProperty(KEY_AVM_MAX_ID, maxId);
}
Long startId = (Long) registryService.getProperty(KEY_AVM_RANGE_START_ID);
if (startId == null)
{
startId = 1L;
registryService.addProperty(KEY_AVM_RANGE_START_ID, startId);
}
Long endId = startId + (batchSize * (long) threadCount * 10L);
final List<Long> nodeIds = patchDAO.getAvmNodesWithOldContentProperties(startId, endId);
BatchProcessWorkerAdaptor<Long> batchProcessorWorker = new BatchProcessWorkerAdaptor<Long>()
{
public void process(Long nodeId) throws Throwable
{
// Convert it
PlainFileNode node = (PlainFileNode) AVMDAOs.Instance().fAVMNodeDAO.getByID(nodeId);
ContentData contentData = node.getContentData();
node.setContentData(contentData);
AVMDAOs.Instance().fAVMNodeDAO.update(node);
}
};
BatchProcessor<Long> batchProcessor = new BatchProcessor<Long>(
"ContentUrlConverter.AVM (" + maxId + ")",
transactionService.getRetryingTransactionHelper(),
nodeIds, threadCount, batchSize,
applicationEventPublisher, null, 1);
batchProcessor.process(batchProcessorWorker, true);
// Advance
startId = endId;
// Have we
if (startId > maxId)
{
startId = maxId + 1;
// We're past the max ID that we're interested in
done = Boolean.TRUE;
registryService.addProperty(KEY_AVM_DONE, done);
logger.info(I18NUtil.getMessage("patch.convertContentUrls.avm.done", maxId));
return true;
}
// Progress
super.reportProgress(maxId, startId);
// Move the start ID on
registryService.addProperty(KEY_AVM_RANGE_START_ID, startId);
// More to do
return false;
}
private boolean applyUrlLifting(final String lockToken) throws Exception
{
RetryingTransactionCallback<Boolean> callback = new RetryingTransactionCallback<Boolean>()
{
public Boolean execute() throws Throwable
{
return applyUrlLiftingInTxn(lockToken);
}
};
return transactionService.getRetryingTransactionHelper().doInTransaction(callback, false, true);
}
private boolean applyUrlLiftingInTxn(final String lockToken) throws Exception
{
// Check the store
if (!contentStore.isWriteSupported())
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.readOnly"));
return true;
}
Boolean admDone = (Boolean) registryService.getProperty(KEY_ADM_DONE);
Boolean avmDone = (Boolean) registryService.getProperty(KEY_AVM_DONE);
if ((admDone == null || !admDone.booleanValue()) || (avmDone == null || !avmDone.booleanValue()))
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.pending"));
return false;
}
// Must we run at all?
Boolean done = (Boolean) registryService.getProperty(KEY_STORE_DONE);
if (done != null && done.booleanValue())
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.done"));
return true;
}
final long totalSize = contentStore.getTotalSize();
final MutableLong currentSize = new MutableLong(0L);
final MutableInt count = new MutableInt();
count.setValue(0);
ContentUrlHandler handler = new ContentUrlHandler()
{
private int allCount = 0;
public void handle(String contentUrl)
{
if (shutdownListener.isVmShuttingDown())
{
throw new VmShutdownListener.VmShutdownException();
}
ContentReader reader = contentStore.getReader(contentUrl);
if (!reader.exists())
{
// Not there any more
return;
}
currentSize.setValue(currentSize.longValue() + reader.getSize());
try
{
contentDataDAO.createContentUrlOrphaned(contentUrl);
count.setValue(count.intValue()+1);
}
catch (DataIntegrityViolationException e)
{
// That's OK, the URL was already managed
}
allCount++;
if (allCount % batchSize == 0)
{
// Update our lock
refreshLock(lockToken, batchSize*100L);
if (totalSize < 0)
{
// Report
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.progress", allCount));
}
else
{
ContentUrlConverterPatch.super.reportProgress(totalSize, currentSize.longValue());
}
}
}
};
try
{
contentStore.getUrls(handler);
}
catch (UnsupportedOperationException e)
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.noSupport"));
}
catch (VmShutdownException e)
{
// We didn't manage to complete
return false;
}
// Record the completion
done = Boolean.TRUE;
registryService.addProperty(KEY_STORE_DONE, done);
// Done
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.scheduled", count.intValue(), contentStore));
return true;
}
/**
* Job to initiate the {@link ContentUrlConverterPatch}
*
* @author Derek Hulley
* @since 3.2.1
*/
public static class ContentUrlConverterJob implements Job
{
public ContentUrlConverterJob()
{
}
/**
* Calls the cleaner to do its work
*/
public void execute(JobExecutionContext context) throws JobExecutionException
{
JobDataMap jobData = context.getJobDetail().getJobDataMap();
// extract the content cleaner to use
Object contentUrlConverterObj = jobData.get("contentUrlConverter");
if (contentUrlConverterObj == null || !(contentUrlConverterObj instanceof ContentUrlConverterPatch))
{
throw new AlfrescoRuntimeException(
"'contentUrlConverter' data must contain valid 'ContentUrlConverter' reference");
}
ContentUrlConverterPatch contentUrlConverter = (ContentUrlConverterPatch) contentUrlConverterObj;
contentUrlConverter.executeViaJob();
}
}
}

View File

@@ -32,12 +32,13 @@ import java.util.zip.CRC32;
import org.alfresco.model.ContentModel;
import org.alfresco.repo.admin.patch.AbstractPatch;
import org.alfresco.repo.batch.BatchProcessor;
import org.alfresco.repo.batch.BatchProcessor.Worker;
import org.alfresco.repo.batch.BatchProcessor.BatchProcessWorker;
import org.alfresco.repo.domain.ChildAssoc;
import org.alfresco.repo.domain.Node;
import org.alfresco.repo.domain.hibernate.ChildAssocImpl;
import org.alfresco.repo.domain.qname.QNameDAO;
import org.alfresco.repo.node.db.NodeDaoService;
import org.alfresco.repo.security.authentication.AuthenticationUtil;
import org.alfresco.service.cmr.admin.PatchException;
import org.alfresco.service.cmr.rule.RuleService;
import org.alfresco.service.namespace.QName;
@@ -48,8 +49,6 @@ import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.hibernate.type.LongType;
import org.hibernate.type.StringType;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.context.ApplicationEventPublisherAware;
import org.springframework.extensions.surf.util.I18NUtil;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.support.HibernateDaoSupport;
@@ -61,7 +60,7 @@ import org.springframework.orm.hibernate3.support.HibernateDaoSupport;
* @author Derek Hulley
* @since V2.2SP4
*/
public class FixNameCrcValuesPatch extends AbstractPatch implements ApplicationEventPublisherAware
public class FixNameCrcValuesPatch extends AbstractPatch
{
private static final String MSG_SUCCESS = "patch.fixNameCrcValues.result";
private static final String MSG_REWRITTEN = "patch.fixNameCrcValues.fixed";
@@ -71,7 +70,6 @@ public class FixNameCrcValuesPatch extends AbstractPatch implements ApplicationE
private NodeDaoService nodeDaoService;
private QNameDAO qnameDAO;
private RuleService ruleService;
private ApplicationEventPublisher applicationEventPublisher;
public FixNameCrcValuesPatch()
{
@@ -106,14 +104,6 @@ public class FixNameCrcValuesPatch extends AbstractPatch implements ApplicationE
this.ruleService = ruleService;
}
/* (non-Javadoc)
* @see org.springframework.context.ApplicationEventPublisherAware#setApplicationEventPublisher(org.springframework.context.ApplicationEventPublisher)
*/
public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher)
{
this.applicationEventPublisher = applicationEventPublisher;
}
@Override
protected void checkProperties()
{
@@ -180,20 +170,33 @@ public class FixNameCrcValuesPatch extends AbstractPatch implements ApplicationE
public String fixCrcValues() throws Exception
{
// get the association types to check
BatchProcessor<Long> batchProcessor = new BatchProcessor<Long>(logger, transactionService
.getRetryingTransactionHelper(), ruleService, tenantAdminService, applicationEventPublisher, findMismatchedCrcs(),
"FixNameCrcValuesPatch", 1000, 2, 20);
BatchProcessor<Long> batchProcessor = new BatchProcessor<Long>(
"FixNameCrcValuesPatch",
transactionService.getRetryingTransactionHelper(),
findMismatchedCrcs(),
2, 20,
applicationEventPublisher,
logger, 1000);
// Precautionary flush and clear so that we have an empty session
getSession().flush();
getSession().clear();
int updated = batchProcessor.process(new Worker<Long>(){
int updated = batchProcessor.process(new BatchProcessWorker<Long>()
{
public String getIdentifier(Long entry)
{
return entry.toString();
}
public void beforeProcess() throws Throwable
{
// Switch rules off
ruleService.disableRules();
// Authenticate as system
String systemUsername = AuthenticationUtil.getSystemUserName();
AuthenticationUtil.setFullyAuthenticatedUser(systemUsername);
}
public void process(Long childAssocId) throws Throwable
{
@@ -247,7 +250,13 @@ public class FixNameCrcValuesPatch extends AbstractPatch implements ApplicationE
// Record
writeLine(I18NUtil.getMessage(MSG_REWRITTEN, childNode.getId(), childName, oldChildCrc, childCrc,
qname, oldQNameCrc, qnameCrc));
}}, true);
}
public void afterProcess() throws Throwable
{
ruleService.enableRules();
}
}, true);
String msg = I18NUtil.getMessage(MSG_SUCCESS, updated, logFile);