Merged BRANCHES/V3.2 to HEAD:

18363: WCM clustering - ETHREEOH-3962 (duplicate root node entry)
   19091: Fix Part 1 ALF-726: v3.1.x Content Cleaner Job needs to be ported to v3.2
   19159: Fixed ALF-726: Migrate pre-3.2 content URLs to new format and pick up tag existing orphaned content
   19169: Fix fallout from 19159 for ALF-726: Migrate pre-3.2 content URLs to new format and pick up tag existing orphaned content
   19262: ALF-726 Multithreading for content URL conversion



git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@19267 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2010-03-12 19:11:12 +00:00
parent a2c2e215a8
commit fdc8f6f331
33 changed files with 2589 additions and 1175 deletions

View File

@@ -37,7 +37,6 @@
</constructor-arg>
</bean>
<!-- Abstract bean definition defining base definition for content store cleaner -->
<!-- Performs the content cleanup -->
<bean id="contentStoreCleaner" class="org.alfresco.repo.content.cleanup.ContentStoreCleaner" init-method="init">
<property name="protectDays" >

View File

@@ -17,6 +17,7 @@
<bean id="patchDAO" class="org.alfresco.repo.domain.patch.ibatis.PatchDAOImpl">
<property name="sqlMapClientTemplate" ref="repoSqlMapClientTemplate"/>
<property name="contentDataDAO" ref="contentDataDAO"/>
</bean>
<bean id="appliedPatchDAO" class="org.alfresco.repo.domain.patch.ibatis.AppliedPatchDAOImpl">

View File

@@ -11,6 +11,7 @@
<typeHandler javaType="java.io.Serializable" jdbcType="BLOB" callback="org.alfresco.ibatis.SerializableTypeHandlerCallback"/>
<sqlMap resource="alfresco/ibatis/#resource.dialect#/appliedpatch-common-SqlMap.xml"/>
<sqlMap resource="alfresco/ibatis/#resource.dialect#/patch-common-SqlMap.xml"/>
<sqlMap resource="alfresco/ibatis/#resource.dialect#/qname-common-SqlMap.xml"/>
<sqlMap resource="alfresco/ibatis/#resource.dialect#/qname-insert-SqlMap.xml"/>
<sqlMap resource="alfresco/ibatis/#resource.dialect#/locks-common-SqlMap.xml"/>

View File

@@ -15,6 +15,7 @@
<typeAlias alias="ContentUrl" type="org.alfresco.repo.domain.contentdata.ContentUrlEntity"/>
<typeAlias alias="ContentData" type="org.alfresco.repo.domain.contentdata.ContentDataEntity"/>
<typeAlias alias="ContentClean" type="org.alfresco.repo.domain.contentclean.ContentCleanEntity"/>
<typeAlias alias="Ids" type="org.alfresco.ibatis.IdsEntity"/>
<!-- -->
<!-- Result Maps -->
@@ -191,7 +192,7 @@
from
alf_content_url cu
where
cu.orphan_time <= #orphanTime#
cu.orphan_time <= #orphanTime# and cu.orphan_time is not null
]]>
</select>
@@ -224,7 +225,7 @@
from
alf_content_url
where
orphan_time <= #orphanTime#
orphan_time <= #orphanTime# and orphan_time is not null
]]>
</delete>
@@ -247,16 +248,15 @@
</select>
<!-- Get the ContentData entity by Node and property QName -->
<select id="select_ContentDataByNodeAndQName" parameterMap="parameter_NodeAndQNameMap" resultClass="long">
<select id="select_ContentDataByNodeAndQName" parameterClass="Ids" resultClass="long">
select
np.long_value as id
from
alf_node_properties np
where
np.node_id = ? and
np.qname_id = ? and
np.actual_type_n = 3 and
np.persisted_type_n = 3
np.node_id = #idOne# and
np.qname_id in <iterate property="ids" open="(" close=")" conjunction=",">#ids[]#</iterate> and
(np.actual_type_n = 3 or np.actual_type_n = 21)
</select>
<update id="update_ContentData" parameterClass="ContentData">

View File

@@ -0,0 +1,106 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE sqlMap
PUBLIC "-//ibatis.apache.org//DTD SQL Map 2.0//EN"
"http://ibatis.apache.org/dtd/sql-map-2.dtd">
<sqlMap namespace="alfresco.patch">
<!-- -->
<!-- Type Defs -->
<!-- -->
<typeAlias alias="Ids" type="org.alfresco.ibatis.IdsEntity"/>
<!-- -->
<!-- Result Maps -->
<!-- -->
<resultMap class="java.util.HashMap" id="result_admOldContentProp">
<result property="nodeId" column="node_id" jdbcType="BIGINT" javaType="java.lang.Long"/>
<result property="qnameId" column="qname_id" jdbcType="BIGINT" javaType="java.lang.Long"/>
<result property="listIndex" column="list_index" jdbcType="INTEGER" javaType="java.lang.Integer"/>
<result property="localeId" column="locale_id" jdbcType="BIGINT" javaType="java.lang.Long"/>
<result property="stringValue" column="string_value" jdbcType="VARCHAR" javaType="java.lang.String"/>
</resultMap>
<!-- -->
<!-- Parameter Maps -->
<!-- -->
<parameterMap class="map" id="parameter_admNewContentProp">
<parameter property="longValue" jdbcType="BIGINT" javaType="java.lang.Long"/>
<parameter property="nodeId" jdbcType="BIGINT" javaType="java.lang.Long"/>
<parameter property="qnameId" jdbcType="BIGINT" javaType="java.lang.Long"/>
<parameter property="listIndex" jdbcType="INTEGER" javaType="java.lang.Integer"/>
<parameter property="localeId" jdbcType="BIGINT" javaType="java.lang.Long"/>
</parameterMap>
<!-- -->
<!-- Selects -->
<!-- -->
<select id="select_avmMaxNodeId" resultClass="java.lang.Long">
select max(id) from avm_nodes
</select>
<select id="select_admMaxNodeId" resultClass="java.lang.Long">
select max(id) from alf_node
</select>
<select id="select_avmNodesWithOldContentProperties" parameterClass="Ids" resultClass="java.lang.Long">
<![CDATA[
select
id
from
avm_nodes
where
id >= #idOne#
and id < #idTwo#
and class_type = 'plainfile'
and content_url is not null
and content_url not like 'id:%'
order by
id ASC
]]>
</select>
<select id="select_admOldContentProperties" parameterClass="Ids" resultMap="result_admOldContentProp">
<![CDATA[
select
node_id,
qname_id,
list_index,
locale_id,
string_value
from
alf_node_properties
where
node_id >= #idOne#
and node_id < #idTwo#
and actual_type_n = 11
order by
node_id ASC
]]>
</select>
<!-- -->
<!-- Updates -->
<!-- -->
<update id="update_admOldContentProperty" parameterMap="parameter_admNewContentProp">
update
alf_node_properties
set
actual_type_n = 21,
persisted_type_n = 3,
long_value = ?,
string_value = null
where
node_id = ?
and qname_id = ?
and list_index = ?
and locale_id = ?
</update>
</sqlMap>

View File

@@ -3,7 +3,7 @@ patch.service.preceeded_by_alternative=Preceeded by alternative patch ''{0}''.
patch.service.not_relevant=Not relevant to schema {0}
patch.executer.checking=Checking for patches to apply ...
patch.service.applying_patch=\tApplying patch ''{0}'' ({1}).
patch.progress=\t\tPatch {0}% complete, estimated complete at {1}.
patch.progress=\t\tPatch {0} {1}% complete, estimated complete at {2}.
patch.validation.failed=Validation of patch ''{0}'' failed. Patch is applicable to a newer schema than the schema of this build ({1}).\nfixesToSchema: {2}\ntargetSchema: {3}.\nIf this patch should always be run once on every installation, please ensure that the ''fixesToSchema'' value is set to '''${version.schema}'''.
patch.executer.no_patches_required=No patches were required.
patch.executer.system_readonly=Patches cannot be applied to a read-only system. Possible incompatibilities may exist between the application code and the existing data.
@@ -11,6 +11,7 @@ patch.executer.not_executed =\n=== Recorded patch (not executed) === \nID: {0}\n
patch.executer.executed =\n=== Applied patch === \nID: {0}\nRESULT: \n{1}\n=====================================
patch.executer.failed =\n=== Failed to apply patch === \nID: {0}\nRESULT: \n{1}\n=====================================
patch.noLongerSupportedPatch.err.use_incremental_upgrade = \nPatch ''{0}'' was last supported on version {1}.\n Please follow an incremental upgrade using version {2}.
# General patch messages
patch.genericBootstrap.result.exists=Bootstrap location already exists: {0}
@@ -19,6 +20,8 @@ patch.genericBootstrap.err.multiple_found=Multiple nodes found: {0}
patch.general.property_not_set=Patch property ''{0}'' has not been set on this patch: {1}
patch.QNamePatch.result=Successfully updated the ''{0}'' QName to ''{1}''.
# Individual patch messages
patch.noOpPatch.description=A placeholder patch; usually marks a superceded patch.
@@ -301,4 +304,20 @@ patch.redeployNominatedInvitationProcessWithPropsForShare.description=Redeploy n
patch.redeployNominatedInvitationProcessWithPropsForShare.result=Nominated invitation workflow redeployed
patch.thumbnailsAssocQName.description=Update the 'cm:thumbnails' association QName to 'rn:rendition'.
patch.QNamePatch.result=Successfully updated the ''{0}'' QName to ''{1}''.
patch.convertContentUrls.description=Converts pre-3.2 content URLs to use the alf_content_data table. The conversion work can also be done on a schedule; please contact Alfresco Support for further details.
patch.convertContentUrls.bypassingPatch=Content URL conversion was NOT performed by this patch. Activate the scheduled job 'contentUrlConverterTrigger'.
patch.convertContentUrls.start=Content URL conversion progress:
patch.convertContentUrls.inProgress=Content URL conversion increment completed. Awaiting next scheduled call...
patch.convertContentUrls.done=Content URL conversion completed.
patch.convertContentUrls.adm.start=\tProcessing ADM Content URLs.
patch.convertContentUrls.adm.done=\tFinished processing ADM nodes up to ID {0}.
patch.convertContentUrls.avm.start=\tProcessing AVM Content URLs.
patch.convertContentUrls.avm.done=\tFinished processing AVM nodes up to ID {0}.
patch.convertContentUrls.store.start=\tReading content URLs from store {0}.
patch.convertContentUrls.store.readOnly=\tNo content URLs will be marked for deletion. The content store is read-only.
patch.convertContentUrls.store.pending=\tContent URLs will be marked for deletion once the URL conversion process is complete.
patch.convertContentUrls.store.noSupport=\tNo content URLs will be marked for deletion. The store does not support URL enumeration.
patch.convertContentUrls.store.progress=\t\tProcessed {0} content URLs from store.
patch.convertContentUrls.store.scheduled=\tScheduled {0} content URLs for deletion from store: {1}
patch.convertContentUrls.store.done=This job is complete. Deactivate the scheduled job 'contentUrlConverterTrigger'.

View File

@@ -56,8 +56,8 @@
<default>0</default>
</property>
<property name="sys:versionEdition">
<type>d:any</type>
<multiple>true</multiple>
<type>d:content</type>
<multiple>false</multiple>
</property>
<property name="sys:versionProperties">
<type>d:content</type>

View File

@@ -1957,6 +1957,7 @@
<property name="fixesFromSchema"><value>0</value></property>
<property name="fixesToSchema"><value>3006</value></property>
<property name="targetSchema"><value>3007</value></property>
<property name="applyToTenants"><value>false</value></property>
<property name="dependsOn" >
<list>
<ref bean="patch.uniqueChildName" />
@@ -1970,6 +1971,9 @@
<property name="nodeDaoService">
<ref bean="nodeDaoService" />
</property>
<property name="qnameDAO">
<ref bean="qnameDAO" />
</property>
<property name="ruleService">
<ref bean="ruleService" />
</property>
@@ -2101,4 +2105,40 @@
</property>
</bean>
<bean id="patch.convertContentUrls" class="org.alfresco.repo.admin.patch.impl.ContentUrlConverterPatch" parent="basePatch">
<property name="id"><value>patch.convertContentUrls</value></property>
<property name="description"><value>patch.convertContentUrls.description</value></property>
<property name="fixesFromSchema"><value>0</value></property>
<property name="fixesToSchema"><value>3499</value></property>
<property name="targetSchema"><value>3500</value></property>
<property name="applyToTenants"><value>false</value></property>
<property name="registryService">
<ref bean="registryService"/>
</property>
<property name="jobLockService">
<ref bean="jobLockService"/>
</property>
<property name="nodeDaoService">
<ref bean="nodeDaoService"/>
</property>
<property name="patchDAO">
<ref bean="patchDAO"/>
</property>
<property name="contentDataDAO">
<ref bean="contentDataDAO"/>
</property>
<property name="contentStore">
<ref bean="fileContentStore"/>
</property>
<property name="threadCount">
<value>${system.content.contentUrlConverter.threadCount}</value>
</property>
<property name="batchSize">
<value>${system.content.contentUrlConverter.batchSize}</value>
</property>
<property name="runAsScheduledJob">
<value>${system.content.contentUrlConverter.runAsScheduledJob}</value>
</property>
</bean>
</beans>

View File

@@ -142,6 +142,12 @@ system.content.eagerOrphanCleanup=false
system.content.orphanProtectDays=14
# The CRON expression to trigger the deletion of resources associated with orphaned content.
system.content.orphanCleanup.cronExpression=0 0 4 * * ?
# The CRON expression to trigger content URL conversion. This process is not intesive and can
# be triggered on a live system. Similarly, it can be triggered using JMX on a dedicated machine.
system.content.contentUrlConverter.cronExpression=* * * * * ? 2099
system.content.contentUrlConverter.threadCount=2
system.content.contentUrlConverter.batchSize=500
system.content.contentUrlConverter.runAsScheduledJob=false
# #################### #
# Lucene configuration #

View File

@@ -109,6 +109,30 @@
</property>
</bean>
<bean id="contentUrlConverterJobDetail" class="org.springframework.scheduling.quartz.JobDetailBean">
<property name="jobClass">
<value>org.alfresco.repo.admin.patch.impl.ContentUrlConverterPatch$ContentUrlConverterJob</value>
</property>
<property name="jobDataAsMap">
<map>
<entry key="contentUrlConverter">
<ref bean="patch.convertContentUrls" />
</entry>
</map>
</property>
</bean>
<bean id="contentUrlConverterTrigger" class="org.alfresco.util.CronTriggerBean">
<property name="jobDetail">
<ref bean="contentUrlConverterJobDetail" />
</property>
<property name="scheduler">
<ref bean="schedulerFactory" />
</property>
<property name="cronExpression">
<value>${system.content.contentUrlConverter.cronExpression}</value>
</property>
</bean>
<bean id="nodeServiceCleanupJobDetail" class="org.springframework.scheduling.quartz.JobDetailBean">
<property name="jobClass">
<value>org.alfresco.repo.node.cleanup.NodeCleanupJob</value>

View File

@@ -19,4 +19,4 @@ version.build=@build-number@
# Schema number
version.schema=4005
version.schema=4006

View File

@@ -40,13 +40,15 @@ import org.alfresco.service.namespace.NamespaceService;
import org.alfresco.service.transaction.TransactionService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.context.ApplicationEventPublisherAware;
/**
* Base implementation of the patch. This class ensures that the patch is thread- and transaction-safe.
*
* @author Derek Hulley
*/
public abstract class AbstractPatch implements Patch
public abstract class AbstractPatch implements Patch, ApplicationEventPublisherAware
{
/**
* I18N message when properties not set.
@@ -92,7 +94,8 @@ public abstract class AbstractPatch implements Patch
protected SearchService searchService;
protected AuthenticationContext authenticationContext;
protected TenantAdminService tenantAdminService;
/** Publishes batch event notifications for JMX viewing */
protected ApplicationEventPublisher applicationEventPublisher;
public AbstractPatch()
{
@@ -161,6 +164,14 @@ public abstract class AbstractPatch implements Patch
this.tenantAdminService = tenantAdminService;
}
/**
* Set automatically
*/
public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher)
{
this.applicationEventPublisher = applicationEventPublisher;
}
/**
* This ensures that this bean gets registered with the appropriate {@link PatchService service}.
*/
@@ -358,6 +369,8 @@ public abstract class AbstractPatch implements Patch
checkPropertyNotNull(nodeService, "nodeService");
checkPropertyNotNull(searchService, "searchService");
checkPropertyNotNull(authenticationContext, "authenticationContext");
checkPropertyNotNull(tenantAdminService, "tenantAdminService");
checkPropertyNotNull(applicationEventPublisher, "applicationEventPublisher");
if (fixesFromSchema == -1 || fixesToSchema == -1 || targetSchema == -1)
{
throw new AlfrescoRuntimeException(
@@ -541,7 +554,7 @@ public abstract class AbstractPatch implements Patch
{
Date end = new Date(currentTime + timeRemaining);
String msg = I18NUtil.getMessage(MSG_PROGRESS, report, end);
String msg = I18NUtil.getMessage(MSG_PROGRESS, getId(), report, end);
progress_logger.info(msg);
}
}

View File

@@ -49,8 +49,6 @@ import org.alfresco.service.namespace.QName;
import org.alfresco.service.namespace.RegexQNamePattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.context.ApplicationEventPublisherAware;
/**
* Migrates authority information previously stored in the user store to the spaces store, using the new structure used
@@ -58,7 +56,7 @@ import org.springframework.context.ApplicationEventPublisherAware;
*
* @author dward
*/
public class AuthorityMigrationPatch extends AbstractPatch implements ApplicationEventPublisherAware
public class AuthorityMigrationPatch extends AbstractPatch
{
/** The title we give to the batch process in progress messages / JMX. */
private static final String MSG_PROCESS_NAME = "patch.authorityMigration.process.name";
@@ -91,9 +89,6 @@ public class AuthorityMigrationPatch extends AbstractPatch implements Applicatio
/** The user bootstrap. */
private ImporterBootstrap userBootstrap;
/** The application event publisher. */
private ApplicationEventPublisher applicationEventPublisher;
/**
* Sets the authority service.
*
@@ -127,17 +122,6 @@ public class AuthorityMigrationPatch extends AbstractPatch implements Applicatio
this.userBootstrap = userBootstrap;
}
/**
* Sets the application event publisher.
*
* @param applicationEventPublisher
* the application event publisher
*/
public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher)
{
this.applicationEventPublisher = applicationEventPublisher;
}
/**
* Recursively retrieves the authorities under the given node and their associations.
*
@@ -238,14 +222,33 @@ public class AuthorityMigrationPatch extends AbstractPatch implements Applicatio
*/
private void migrateAuthorities(final Map<String, String> authoritiesToCreate, Map<String, Set<String>> parentAssocs)
{
BatchProcessor.Worker<Map.Entry<String, Set<String>>> worker = new BatchProcessor.Worker<Map.Entry<String, Set<String>>>()
{
final String tenantDomain = tenantAdminService.getCurrentUserDomain();
BatchProcessor.BatchProcessWorker<Map.Entry<String, Set<String>>> worker = new BatchProcessor.BatchProcessWorker<Map.Entry<String, Set<String>>>()
{
public String getIdentifier(Entry<String, Set<String>> entry)
{
return entry.getKey();
}
public void beforeProcess() throws Throwable
{
// Disable rules
ruleService.disableRules();
// Authentication
String systemUser = AuthenticationUtil.getSystemUserName();
systemUser = tenantAdminService.getDomainUser(systemUser, tenantDomain);
AuthenticationUtil.setRunAsUser(systemUser);
}
public void afterProcess() throws Throwable
{
// Enable rules
ruleService.enableRules();
// Clear authentication
AuthenticationUtil.clearCurrentSecurityContext();
}
public void process(Entry<String, Set<String>> authority) throws Throwable
{
String authorityName = authority.getKey();
@@ -290,10 +293,13 @@ public class AuthorityMigrationPatch extends AbstractPatch implements Applicatio
}
};
// Migrate using 2 threads, 20 authorities per transaction. Log every 100 entries.
new BatchProcessor<Map.Entry<String, Set<String>>>(AuthorityMigrationPatch.progress_logger,
this.transactionService.getRetryingTransactionHelper(), this.ruleService, this.tenantAdminService,
this.applicationEventPublisher, parentAssocs.entrySet(), I18NUtil
.getMessage(AuthorityMigrationPatch.MSG_PROCESS_NAME), 100, 2, 20).process(worker, true);
new BatchProcessor<Map.Entry<String, Set<String>>>(
I18NUtil.getMessage(AuthorityMigrationPatch.MSG_PROCESS_NAME),
this.transactionService.getRetryingTransactionHelper(),
parentAssocs.entrySet(),
2, 20,
AuthorityMigrationPatch.this.applicationEventPublisher,
AuthorityMigrationPatch.progress_logger, 100).process(worker, true);
}
/**

View File

@@ -0,0 +1,687 @@
/*
* Copyright (C) 2005-2010 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.admin.patch.impl;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.repo.admin.patch.AbstractPatch;
import org.alfresco.repo.admin.patch.PatchExecuter;
import org.alfresco.repo.admin.registry.RegistryKey;
import org.alfresco.repo.admin.registry.RegistryService;
import org.alfresco.repo.avm.AVMDAOs;
import org.alfresco.repo.avm.PlainFileNode;
import org.alfresco.repo.batch.BatchProcessor;
import org.alfresco.repo.batch.BatchProcessor.BatchProcessWorkerAdaptor;
import org.alfresco.repo.content.ContentStore;
import org.alfresco.repo.content.ContentStore.ContentUrlHandler;
import org.alfresco.repo.domain.contentdata.ContentDataDAO;
import org.alfresco.repo.domain.patch.PatchDAO;
import org.alfresco.repo.lock.JobLockService;
import org.alfresco.repo.lock.LockAcquisitionException;
import org.alfresco.repo.node.db.NodeDaoService;
import org.alfresco.repo.security.authentication.AuthenticationUtil;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.ServiceRegistry;
import org.alfresco.service.cmr.repository.ContentData;
import org.alfresco.service.cmr.repository.ContentReader;
import org.alfresco.service.namespace.NamespaceService;
import org.alfresco.service.namespace.QName;
import org.alfresco.util.VmShutdownListener;
import org.alfresco.util.VmShutdownListener.VmShutdownException;
import org.apache.commons.lang.mutable.MutableInt;
import org.apache.commons.lang.mutable.MutableLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.quartz.Job;
import org.quartz.JobDataMap;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.springframework.dao.DataIntegrityViolationException;
import org.springframework.extensions.surf.util.I18NUtil;
import org.springframework.extensions.surf.util.Pair;
import org.springframework.extensions.surf.util.PropertyCheck;
/**
* Component to migrate old-style content URL storage (<tt>contentUrl=store://...|mimetype=...</tt>)
* to the newer <b>alf_content_url</b> storage.
* <p/>
* The {@link ServiceRegistry} is used to record progress. The component picks up ranges of node IDs
* (DM and AVM) and records the progress. Since new nodes will not need converting, the converter
* will stop once it hits the largest node ID that it found upon first initiation. Once completed,
* the content store reader will start to pick up orphaned content and schedule it for deletion.
* <p/>
* A cluster-wide lock is set so that a single instance of this job will be running per Alfresco
* installation.
*
* @author Derek Hulley
* @since 3.2.1
*/
public class ContentUrlConverterPatch extends AbstractPatch
{
// Registry keys
private static final RegistryKey KEY_ADM_MAX_ID = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "adm", "max-id");
private static final RegistryKey KEY_ADM_RANGE_START_ID = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "adm", "range-start-id");
private static final RegistryKey KEY_ADM_DONE = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "adm", "done");
private static final RegistryKey KEY_AVM_MAX_ID = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "avm", "max-id");
private static final RegistryKey KEY_AVM_RANGE_START_ID = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "avm", "range-start-id");
private static final RegistryKey KEY_AVM_DONE = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "avm", "done");
private static final RegistryKey KEY_STORE_DONE = new RegistryKey(
NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter", "store", "done");
// Lock key
private static final QName LOCK = QName.createQName(NamespaceService.SYSTEM_MODEL_1_0_URI, "ContentUrlConverter");
// Lock as per patching
private static Log logger = LogFactory.getLog(PatchExecuter.class);
private static VmShutdownListener shutdownListener = new VmShutdownListener("ContentUrlConverterPatch");
private RegistryService registryService;
private JobLockService jobLockService;
private NodeDaoService nodeDaoService;
private PatchDAO patchDAO;
private ContentStore contentStore;
private ContentDataDAO contentDataDAO;
private int threadCount;
private int batchSize;
private boolean runAsScheduledJob;
private ThreadLocal<Boolean> runningAsJob = new ThreadLocal<Boolean>();
/**
* Default constructor
*/
public ContentUrlConverterPatch()
{
runningAsJob.set(Boolean.FALSE);
threadCount = 2;
batchSize=500;
}
/**
* Service to record progress for later pick-up
*/
public void setRegistryService(RegistryService registryService)
{
this.registryService = registryService;
}
/**
* Service to prevent concurrent execution
*/
public void setJobLockService(JobLockService jobLockService)
{
this.jobLockService = jobLockService;
}
/**
* Provides low-level access to do the property transformation
*/
public void setNodeDaoService(NodeDaoService nodeDaoService)
{
this.nodeDaoService = nodeDaoService;
}
/**
* Component that provides low-level queries and updates to support this patch
*/
public void setPatchDAO(PatchDAO patchDAO)
{
this.patchDAO = patchDAO;
}
/**
* Set the store containing the content URLs to lift for potential cleaning.
*
* @param contentStore the store containing the system's content URLs
*/
public void setContentStore(ContentStore contentStore)
{
this.contentStore = contentStore;
}
/**
* Set the component that will write URLs coming from the
* {@link ContentStore#getUrls(ContentUrlHandler) content store}.
*
* @param contentDataDAO the DAO to write the URLs
*/
public void setContentDataDAO(ContentDataDAO contentDataDAO)
{
this.contentDataDAO = contentDataDAO;
}
/**
* Set the number of threads that will be used process the required work.
*
* @param threadCount the number of threads
*/
public void setThreadCount(int threadCount)
{
this.threadCount = threadCount;
}
/**
* Set the number of URLs that are processed per job pass; this property is ignored
* when this component is run as a patch. Keep the number low (500) when running
* at short intervals on a on a live machine.
*
* @param batchSize the number of nodes to process per batch when running on a schedule
*/
public void setBatchSize(int batchSize)
{
this.batchSize = batchSize;
}
/**
* Set whether the patch execution should just bypass any actual work i.e. the admin has
* chosen to manually trigger the work.
*
* @param runAsScheduledJob <tt>true</tt> to leave all work up to the scheduled job
*/
public void setRunAsScheduledJob(boolean runAsScheduledJob)
{
this.runAsScheduledJob = runAsScheduledJob;
}
@Override
protected void checkProperties()
{
PropertyCheck.mandatory(this, "registryService", registryService);
PropertyCheck.mandatory(this, "jobLockService", jobLockService);
PropertyCheck.mandatory(this, "nodeDaoService", nodeDaoService);
PropertyCheck.mandatory(this, "patchDAO", patchDAO);
super.checkProperties();
}
/**
* Method called when executed as a scheduled job.
*/
private void executeViaJob()
{
AuthenticationUtil.RunAsWork<String> patchRunAs = new AuthenticationUtil.RunAsWork<String>()
{
public String doWork() throws Exception
{
RetryingTransactionCallback<String> patchTxn = new RetryingTransactionCallback<String>()
{
public String execute() throws Exception
{
try
{
runningAsJob.set(Boolean.TRUE);
String report = applyInternal();
// done
return report;
}
finally
{
runningAsJob.set(Boolean.FALSE); // Back to default
}
}
};
return transactionService.getRetryingTransactionHelper().doInTransaction(patchTxn);
}
};
String report = AuthenticationUtil.runAs(patchRunAs, AuthenticationUtil.getSystemUserName());
if (report != null)
{
logger.info(report);
}
}
/**
* Gets a set of work to do and executes it within this transaction. If kicked off via a job,
* the task will exit before completion, on the assumption that it will be kicked off at regular
* intervals. When called as a patch, it will run to completion with full progress logging.
*/
@Override
protected String applyInternal() throws Exception
{
if (AlfrescoTransactionSupport.getTransactionReadState() != TxnReadState.TXN_READ_WRITE)
{
// Nothing to do
return null;
}
boolean isRunningAsJob = runningAsJob.get().booleanValue();
// Do we bug out of patch execution
if (runAsScheduledJob && !isRunningAsJob)
{
return I18NUtil.getMessage("patch.convertContentUrls.bypassingPatch");
}
boolean completed = false;
// Lock in proportion to the batch size (0.1s per node or 0.8 min per 500)
String lockToken = getLock(batchSize*100L);
if (lockToken == null)
{
// Some other process is busy
if (isRunningAsJob)
{
// Fine, we're doing batches
return null;
}
else
{
throw new RuntimeException("Unable to get job lock during patch execution. Only one server should perform the upgrade.");
}
}
try
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.start"));
logger.info(I18NUtil.getMessage("patch.convertContentUrls.adm.start"));
boolean admCompleted = applyADM(lockToken);
logger.info(I18NUtil.getMessage("patch.convertContentUrls.avm.start"));
boolean avmCompleted = applyAVM(lockToken);
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.start", contentStore));
boolean urlLiftingCompleted = applyUrlLifting(lockToken);
completed = admCompleted && avmCompleted && urlLiftingCompleted;
}
finally
{
jobLockService.releaseLock(lockToken, LOCK);
}
if (completed)
{
return I18NUtil.getMessage("patch.convertContentUrls.done");
}
else
{
return I18NUtil.getMessage("patch.convertContentUrls.inProgress");
}
}
/**
* Attempts to get the lock. If the lock couldn't be taken, then <tt>null</tt> is returned.
*
* @return Returns the lock token or <tt>null</tt>
*/
private String getLock(long time)
{
try
{
return jobLockService.getLock(LOCK, time);
}
catch (LockAcquisitionException e)
{
return null;
}
}
/**
* Attempts to get the lock. If it fails, the current transaction is marked for rollback.
*
* @return Returns the lock token
*/
private void refreshLock(String lockToken, long time)
{
if (lockToken == null)
{
throw new IllegalArgumentException("Must provide existing lockToken");
}
jobLockService.refreshLock(lockToken, LOCK, time);
}
private boolean applyADM(final String lockToken)
{
RetryingTransactionCallback<Boolean> callback = new RetryingTransactionCallback<Boolean>()
{
public Boolean execute() throws Throwable
{
return applyADM();
}
};
boolean done = false;
while (true && !shutdownListener.isVmShuttingDown())
{
refreshLock(lockToken, batchSize*100L);
done = transactionService.getRetryingTransactionHelper().doInTransaction(callback, false, true);
if (done)
{
break;
}
}
return done;
}
/**
* Do the DM conversion work
* @return Returns <tt>true</tt> if the work is done
*/
private boolean applyADM() throws Exception
{
Long maxId = (Long) registryService.getProperty(KEY_ADM_MAX_ID);
// Must we run at all?
Boolean done = (Boolean) registryService.getProperty(KEY_ADM_DONE);
if (done != null && done.booleanValue())
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.adm.done", maxId));
return true;
}
if (maxId == null)
{
maxId = patchDAO.getMaxAdmNodeID();
registryService.addProperty(KEY_ADM_MAX_ID, maxId);
}
Long startId = (Long) registryService.getProperty(KEY_ADM_RANGE_START_ID);
if (startId == null)
{
startId = 1L;
registryService.addProperty(KEY_ADM_RANGE_START_ID, startId);
}
// Each thread gets 10 executions i.e. we get ranges for threadCount*10 lots of work
Long endId = startId;
Collection<Pair<Long, Long>> batchProcessorWork = new ArrayList<Pair<Long,Long>>(2);
for (long i = 0; i < threadCount*10; i++)
{
endId = startId + (i+1L) * batchSize;
Pair<Long, Long> batchEntry = new Pair<Long, Long>(
startId + i * batchSize,
endId);
batchProcessorWork.add(batchEntry);
}
BatchProcessWorkerAdaptor<Pair<Long, Long>> batchProcessorWorker = new BatchProcessWorkerAdaptor<Pair<Long, Long>>()
{
public void process(Pair<Long, Long> range) throws Throwable
{
Long startId = range.getFirst();
Long endId = range.getSecond();
// Bulk-update the old content properties
patchDAO.updateAdmV31ContentProperties(startId, endId);
}
};
BatchProcessor<Pair<Long, Long>> batchProcessor = new BatchProcessor<Pair<Long, Long>>(
"ContentUrlConverter.ADM (" + maxId + ")",
transactionService.getRetryingTransactionHelper(),
batchProcessorWork, threadCount, 1,
applicationEventPublisher, null, 1);
batchProcessor.process(batchProcessorWorker, true);
// Advance
startId = endId;
// Have we
if (startId > maxId)
{
startId = maxId + 1;
// We're past the max ID that we're interested in
done = Boolean.TRUE;
registryService.addProperty(KEY_ADM_DONE, done);
logger.info(I18NUtil.getMessage("patch.convertContentUrls.adm.done", maxId));
return true;
}
// Progress
super.reportProgress(maxId, startId);
// Move the start ID on
registryService.addProperty(KEY_ADM_RANGE_START_ID, startId);
// More to do
return false;
}
private boolean applyAVM(final String lockToken)
{
RetryingTransactionCallback<Boolean> callback = new RetryingTransactionCallback<Boolean>()
{
public Boolean execute() throws Throwable
{
return applyAVM();
}
};
boolean done = false;
while (true && !shutdownListener.isVmShuttingDown())
{
refreshLock(lockToken, batchSize*100L);
done = transactionService.getRetryingTransactionHelper().doInTransaction(callback, false, true);
if (done)
{
break;
}
}
return done;
}
/**
* Do the AVM conversion work
*/
private boolean applyAVM() throws Exception
{
Long maxId = (Long) registryService.getProperty(KEY_AVM_MAX_ID);
// Must we run at all?
Boolean done = (Boolean) registryService.getProperty(KEY_AVM_DONE);
if (done != null && done.booleanValue())
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.avm.done", maxId));
return true;
}
if (maxId == null)
{
maxId = patchDAO.getMaxAvmNodeID();
registryService.addProperty(KEY_AVM_MAX_ID, maxId);
}
Long startId = (Long) registryService.getProperty(KEY_AVM_RANGE_START_ID);
if (startId == null)
{
startId = 1L;
registryService.addProperty(KEY_AVM_RANGE_START_ID, startId);
}
Long endId = startId + (batchSize * (long) threadCount * 10L);
final List<Long> nodeIds = patchDAO.getAvmNodesWithOldContentProperties(startId, endId);
BatchProcessWorkerAdaptor<Long> batchProcessorWorker = new BatchProcessWorkerAdaptor<Long>()
{
public void process(Long nodeId) throws Throwable
{
// Convert it
PlainFileNode node = (PlainFileNode) AVMDAOs.Instance().fAVMNodeDAO.getByID(nodeId);
ContentData contentData = node.getContentData();
node.setContentData(contentData);
AVMDAOs.Instance().fAVMNodeDAO.update(node);
}
};
BatchProcessor<Long> batchProcessor = new BatchProcessor<Long>(
"ContentUrlConverter.AVM (" + maxId + ")",
transactionService.getRetryingTransactionHelper(),
nodeIds, threadCount, batchSize,
applicationEventPublisher, null, 1);
batchProcessor.process(batchProcessorWorker, true);
// Advance
startId = endId;
// Have we
if (startId > maxId)
{
startId = maxId + 1;
// We're past the max ID that we're interested in
done = Boolean.TRUE;
registryService.addProperty(KEY_AVM_DONE, done);
logger.info(I18NUtil.getMessage("patch.convertContentUrls.avm.done", maxId));
return true;
}
// Progress
super.reportProgress(maxId, startId);
// Move the start ID on
registryService.addProperty(KEY_AVM_RANGE_START_ID, startId);
// More to do
return false;
}
private boolean applyUrlLifting(final String lockToken) throws Exception
{
RetryingTransactionCallback<Boolean> callback = new RetryingTransactionCallback<Boolean>()
{
public Boolean execute() throws Throwable
{
return applyUrlLiftingInTxn(lockToken);
}
};
return transactionService.getRetryingTransactionHelper().doInTransaction(callback, false, true);
}
private boolean applyUrlLiftingInTxn(final String lockToken) throws Exception
{
// Check the store
if (!contentStore.isWriteSupported())
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.readOnly"));
return true;
}
Boolean admDone = (Boolean) registryService.getProperty(KEY_ADM_DONE);
Boolean avmDone = (Boolean) registryService.getProperty(KEY_AVM_DONE);
if ((admDone == null || !admDone.booleanValue()) || (avmDone == null || !avmDone.booleanValue()))
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.pending"));
return false;
}
// Must we run at all?
Boolean done = (Boolean) registryService.getProperty(KEY_STORE_DONE);
if (done != null && done.booleanValue())
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.done"));
return true;
}
final long totalSize = contentStore.getTotalSize();
final MutableLong currentSize = new MutableLong(0L);
final MutableInt count = new MutableInt();
count.setValue(0);
ContentUrlHandler handler = new ContentUrlHandler()
{
private int allCount = 0;
public void handle(String contentUrl)
{
if (shutdownListener.isVmShuttingDown())
{
throw new VmShutdownListener.VmShutdownException();
}
ContentReader reader = contentStore.getReader(contentUrl);
if (!reader.exists())
{
// Not there any more
return;
}
currentSize.setValue(currentSize.longValue() + reader.getSize());
try
{
contentDataDAO.createContentUrlOrphaned(contentUrl);
count.setValue(count.intValue()+1);
}
catch (DataIntegrityViolationException e)
{
// That's OK, the URL was already managed
}
allCount++;
if (allCount % batchSize == 0)
{
// Update our lock
refreshLock(lockToken, batchSize*100L);
if (totalSize < 0)
{
// Report
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.progress", allCount));
}
else
{
ContentUrlConverterPatch.super.reportProgress(totalSize, currentSize.longValue());
}
}
}
};
try
{
contentStore.getUrls(handler);
}
catch (UnsupportedOperationException e)
{
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.noSupport"));
}
catch (VmShutdownException e)
{
// We didn't manage to complete
return false;
}
// Record the completion
done = Boolean.TRUE;
registryService.addProperty(KEY_STORE_DONE, done);
// Done
logger.info(I18NUtil.getMessage("patch.convertContentUrls.store.scheduled", count.intValue(), contentStore));
return true;
}
/**
* Job to initiate the {@link ContentUrlConverterPatch}
*
* @author Derek Hulley
* @since 3.2.1
*/
public static class ContentUrlConverterJob implements Job
{
public ContentUrlConverterJob()
{
}
/**
* Calls the cleaner to do its work
*/
public void execute(JobExecutionContext context) throws JobExecutionException
{
JobDataMap jobData = context.getJobDetail().getJobDataMap();
// extract the content cleaner to use
Object contentUrlConverterObj = jobData.get("contentUrlConverter");
if (contentUrlConverterObj == null || !(contentUrlConverterObj instanceof ContentUrlConverterPatch))
{
throw new AlfrescoRuntimeException(
"'contentUrlConverter' data must contain valid 'ContentUrlConverter' reference");
}
ContentUrlConverterPatch contentUrlConverter = (ContentUrlConverterPatch) contentUrlConverterObj;
contentUrlConverter.executeViaJob();
}
}
}

View File

@@ -32,12 +32,13 @@ import java.util.zip.CRC32;
import org.alfresco.model.ContentModel;
import org.alfresco.repo.admin.patch.AbstractPatch;
import org.alfresco.repo.batch.BatchProcessor;
import org.alfresco.repo.batch.BatchProcessor.Worker;
import org.alfresco.repo.batch.BatchProcessor.BatchProcessWorker;
import org.alfresco.repo.domain.ChildAssoc;
import org.alfresco.repo.domain.Node;
import org.alfresco.repo.domain.hibernate.ChildAssocImpl;
import org.alfresco.repo.domain.qname.QNameDAO;
import org.alfresco.repo.node.db.NodeDaoService;
import org.alfresco.repo.security.authentication.AuthenticationUtil;
import org.alfresco.service.cmr.admin.PatchException;
import org.alfresco.service.cmr.rule.RuleService;
import org.alfresco.service.namespace.QName;
@@ -48,8 +49,6 @@ import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.hibernate.type.LongType;
import org.hibernate.type.StringType;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.context.ApplicationEventPublisherAware;
import org.springframework.extensions.surf.util.I18NUtil;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.support.HibernateDaoSupport;
@@ -61,7 +60,7 @@ import org.springframework.orm.hibernate3.support.HibernateDaoSupport;
* @author Derek Hulley
* @since V2.2SP4
*/
public class FixNameCrcValuesPatch extends AbstractPatch implements ApplicationEventPublisherAware
public class FixNameCrcValuesPatch extends AbstractPatch
{
private static final String MSG_SUCCESS = "patch.fixNameCrcValues.result";
private static final String MSG_REWRITTEN = "patch.fixNameCrcValues.fixed";
@@ -71,7 +70,6 @@ public class FixNameCrcValuesPatch extends AbstractPatch implements ApplicationE
private NodeDaoService nodeDaoService;
private QNameDAO qnameDAO;
private RuleService ruleService;
private ApplicationEventPublisher applicationEventPublisher;
public FixNameCrcValuesPatch()
{
@@ -106,14 +104,6 @@ public class FixNameCrcValuesPatch extends AbstractPatch implements ApplicationE
this.ruleService = ruleService;
}
/* (non-Javadoc)
* @see org.springframework.context.ApplicationEventPublisherAware#setApplicationEventPublisher(org.springframework.context.ApplicationEventPublisher)
*/
public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher)
{
this.applicationEventPublisher = applicationEventPublisher;
}
@Override
protected void checkProperties()
{
@@ -180,21 +170,34 @@ public class FixNameCrcValuesPatch extends AbstractPatch implements ApplicationE
public String fixCrcValues() throws Exception
{
// get the association types to check
BatchProcessor<Long> batchProcessor = new BatchProcessor<Long>(logger, transactionService
.getRetryingTransactionHelper(), ruleService, tenantAdminService, applicationEventPublisher, findMismatchedCrcs(),
"FixNameCrcValuesPatch", 1000, 2, 20);
BatchProcessor<Long> batchProcessor = new BatchProcessor<Long>(
"FixNameCrcValuesPatch",
transactionService.getRetryingTransactionHelper(),
findMismatchedCrcs(),
2, 20,
applicationEventPublisher,
logger, 1000);
// Precautionary flush and clear so that we have an empty session
getSession().flush();
getSession().clear();
int updated = batchProcessor.process(new Worker<Long>(){
int updated = batchProcessor.process(new BatchProcessWorker<Long>()
{
public String getIdentifier(Long entry)
{
return entry.toString();
}
public void beforeProcess() throws Throwable
{
// Switch rules off
ruleService.disableRules();
// Authenticate as system
String systemUsername = AuthenticationUtil.getSystemUserName();
AuthenticationUtil.setFullyAuthenticatedUser(systemUsername);
}
public void process(Long childAssocId) throws Throwable
{
ChildAssoc assoc = (ChildAssoc) getHibernateTemplate().get(ChildAssocImpl.class, childAssocId);
@@ -247,7 +250,13 @@ public class FixNameCrcValuesPatch extends AbstractPatch implements ApplicationE
// Record
writeLine(I18NUtil.getMessage(MSG_REWRITTEN, childNode.getId(), childName, oldChildCrc, childCrc,
qname, oldQNameCrc, qnameCrc));
}}, true);
}
public void afterProcess() throws Throwable
{
ruleService.enableRules();
}
}, true);
String msg = I18NUtil.getMessage(MSG_SUCCESS, updated, logFile);

View File

@@ -19,6 +19,7 @@
package org.alfresco.repo.admin.registry;
import java.io.Serializable;
import java.util.Arrays;
/**
* Key for looking up registry metadata.
@@ -115,6 +116,46 @@ public class RegistryKey implements Serializable
return sb.toString();
}
@Override
public boolean equals(Object obj)
{
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
RegistryKey other = (RegistryKey) obj;
if (namespaceUri == null)
{
if (other.namespaceUri != null)
return false;
}
else if (!namespaceUri.equals(other.namespaceUri))
return false;
if (!Arrays.equals(path, other.path))
return false;
if (property == null)
{
if (other.property != null)
return false;
}
else if (!property.equals(other.property))
return false;
return true;
}
@Override
public int hashCode()
{
final int prime = 31;
int result = 1;
result = prime * result + ((namespaceUri == null) ? 0 : namespaceUri.hashCode());
result = prime * result + Arrays.hashCode(path);
result = prime * result + ((property == null) ? 0 : property.hashCode());
return result;
}
public String getNamespaceUri()
{
return namespaceUri;

View File

@@ -23,6 +23,7 @@ import java.util.List;
import org.alfresco.repo.domain.DbAccessControlList;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.cmr.repository.ContentData;
import org.alfresco.service.transaction.TransactionService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -333,8 +334,12 @@ public class OrphanReaper
else if (node.getType() == AVMNodeType.PLAIN_FILE)
{
PlainFileNode file = (PlainFileNode)node;
if (!file.isLegacyContentData())
if (file.isLegacyContentData())
{
// We quickly convert the old ContentData to the new storage
ContentData contentData = file.getContentData();
file.setContentData(contentData);
}
Long contentDataId = file.getContentDataId();
if (contentDataId != null)
{
@@ -342,7 +347,6 @@ public class OrphanReaper
AVMDAOs.Instance().contentDataDAO.deleteContentData(contentDataId);
}
}
}
// Finally, delete it
AVMDAOs.Instance().fAVMNodeDAO.delete(node);
}

View File

@@ -413,9 +413,16 @@ public class PlainFileNodeImpl extends FileNodeImpl implements PlainFileNode
{
Long contentDataId = getContentDataId();
try
{
if (contentDataId == null)
{
return new ContentData(null, null, 0L, null);
}
else
{
return AVMDAOs.Instance().contentDataDAO.getContentData(contentDataId).getSecond();
}
}
catch (Throwable e)
{
throw new AlfrescoRuntimeException(

View File

@@ -34,15 +34,12 @@ import java.util.concurrent.TimeUnit;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.security.authentication.AuthenticationUtil;
import org.alfresco.repo.security.authentication.AuthenticationUtil.RunAsWork;
import org.alfresco.repo.tenant.TenantService;
import org.alfresco.repo.tenant.TenantUserService;
import org.alfresco.repo.transaction.RetryingTransactionHelper;
import org.alfresco.repo.transaction.TransactionListenerAdapter;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.cmr.rule.RuleService;
import org.alfresco.util.TraceableThreadFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.context.ApplicationEventPublisher;
/**
@@ -59,20 +56,15 @@ import org.springframework.context.ApplicationEventPublisher;
*/
public class BatchProcessor<T> implements BatchMonitor
{
/** The factory for all new threads */
private TraceableThreadFactory threadFactory;
/** The logger to use. */
private final Log logger;
/** The retrying transaction helper. */
private final RetryingTransactionHelper retryingTransactionHelper;
/** The rule service. */
private final RuleService ruleService;
/** The tenant user service. */
private final TenantUserService tenantUserService;
private final String tenantDomain;
/** The collection. */
private final Collection<T> collection;
@@ -113,85 +105,79 @@ public class BatchProcessor<T> implements BatchMonitor
private Date endTime;
/**
* Instantiates a new batch processor.
* Instantiates a new batch processor using a the default logger, which references
* this class as the log category.
*
* @param logger
* the logger to use
* @param retryingTransactionHelper
* the retrying transaction helper
* @param ruleService
* the rule service
* @param collection
* the collection
* @param processName
* the process name
* @param loggingInterval
* the number of entries to process before reporting progress
* @param applicationEventPublisher
* the application event publisher
* @param workerThreads
* the number of worker threads
* @param batchSize
* the number of entries we process at a time in a transaction
* @see #BatchProcessor(String, RetryingTransactionHelper, Collection, int, int, ApplicationEventPublisher, Log, int)
*/
public BatchProcessor(Log logger, RetryingTransactionHelper retryingTransactionHelper, RuleService ruleService,
ApplicationEventPublisher applicationEventPublisher, Collection<T> collection, String processName,
int loggingInterval, int workerThreads, int batchSize)
public BatchProcessor(
String processName,
RetryingTransactionHelper retryingTransactionHelper,
Collection<T> collection,
int workerThreads, int batchSize)
{
this(logger, retryingTransactionHelper, ruleService, null, applicationEventPublisher, collection, processName,
loggingInterval, workerThreads, batchSize);
this(
processName,
retryingTransactionHelper,
collection,
workerThreads,
batchSize, null, null, 1);
}
/**
* Instantiates a new batch processor.
*
* @param logger
* the logger to use
* @param retryingTransactionHelper
* the retrying transaction helper
* @param ruleService
* the rule service
* @param tenantUserService
* the tenant user service
* @param collection
* the collection
* @param processName
* the process name
* @param loggingInterval
* the number of entries to process before reporting progress
* @param applicationEventPublisher
* the application event publisher
* @param retryingTransactionHelper
* the retrying transaction helper
* @param collection
* the collection
* @param workerThreads
* the number of worker threads
* @param batchSize
* the number of entries we process at a time in a transaction
* @param applicationEventPublisher
* the application event publisher (may be <tt>null</tt>)
* @param logger
* the logger to use (may be <tt>null</tt>)
* @param loggingInterval
* the number of entries to process before reporting progress
*/
public BatchProcessor(Log logger, RetryingTransactionHelper retryingTransactionHelper, RuleService ruleService,
TenantUserService tenantUserService, ApplicationEventPublisher applicationEventPublisher, Collection<T> collection, String processName,
int loggingInterval, int workerThreads, int batchSize)
public BatchProcessor(
String processName,
RetryingTransactionHelper retryingTransactionHelper,
Collection<T> collection,
int workerThreads, int batchSize,
ApplicationEventPublisher applicationEventPublisher,
Log logger,
int loggingInterval)
{
this.logger = logger;
this.retryingTransactionHelper = retryingTransactionHelper;
this.ruleService = ruleService;
this.tenantUserService = tenantUserService;
this.collection = collection;
this.threadFactory = new TraceableThreadFactory();
this.threadFactory.setNamePrefix(processName);
this.threadFactory.setThreadDaemon(true);
this.processName = processName;
this.loggingInterval = loggingInterval;
this.retryingTransactionHelper = retryingTransactionHelper;
this.collection = collection;
this.workerThreads = workerThreads;
this.batchSize = batchSize;
if (tenantUserService != null)
if (logger == null)
{
this.tenantDomain = tenantUserService.getUserDomain(AuthenticationUtil.getRunAsUser());
this.logger = LogFactory.getLog(this.getClass());
}
else
{
this.tenantDomain = TenantService.DEFAULT_DOMAIN;
this.logger = logger;
}
this.loggingInterval = loggingInterval;
// Let the (enterprise) monitoring side know of our presence
if (applicationEventPublisher != null)
{
applicationEventPublisher.publishEvent(new BatchMonitorEvent(this));
}
}
/*
* (non-Javadoc)
@@ -302,14 +288,14 @@ public class BatchProcessor<T> implements BatchMonitor
* the worker
* @param splitTxns
* Can the modifications to Alfresco be split across multiple transactions for maximum performance? If
* <code>true</code>, worker invocations are isolated in separate transactions in batches of 10 for
* <code>true</code>, worker invocations are isolated in separate transactions in batches for
* increased performance. If <code>false</code>, all invocations are performed in the current
* transaction. This is required if calling synchronously (e.g. in response to an authentication event in
* the same transaction).
* @return the number of invocations
*/
@SuppressWarnings("serial")
public int process(final Worker<T> worker, final boolean splitTxns)
public int process(final BatchProcessWorker<T> worker, final boolean splitTxns)
{
int count = this.collection.size();
synchronized (this)
@@ -330,9 +316,10 @@ public class BatchProcessor<T> implements BatchMonitor
}
// Create a thread pool executor with the specified number of threads and a finite blocking queue of jobs
ExecutorService executorService = splitTxns && this.workerThreads > 1 ? new ThreadPoolExecutor(
this.workerThreads, this.workerThreads, 0L, TimeUnit.MILLISECONDS, new ArrayBlockingQueue<Runnable>(
this.workerThreads * this.batchSize * 10)
ExecutorService executorService = splitTxns && this.workerThreads > 1 ?
new ThreadPoolExecutor(
this.workerThreads, this.workerThreads, 0L, TimeUnit.MILLISECONDS,
new ArrayBlockingQueue<Runnable>(this.workerThreads * this.batchSize * 10)
{
// Add blocking behaviour to work queue
@Override
@@ -349,7 +336,8 @@ public class BatchProcessor<T> implements BatchMonitor
return true;
}
}) : null;
},
threadFactory) : null;
try
{
Iterator<T> iterator = this.collection.iterator();
@@ -452,9 +440,8 @@ public class BatchProcessor<T> implements BatchMonitor
/**
* An interface for workers to be invoked by the {@link BatchProcessor}.
*/
public interface Worker<T>
public interface BatchProcessWorker<T>
{
/**
* Gets an identifier for the given entry (for monitoring / logging purposes).
*
@@ -464,6 +451,14 @@ public class BatchProcessor<T> implements BatchMonitor
*/
public String getIdentifier(T entry);
/**
* Callback to allow thread initialization before the work entries are
* {@link #process(Object) processed}. Typically, this will include authenticating
* as a valid user and disbling or enabling any system flags that might affect the
* entry processing.
*/
public void beforeProcess() throws Throwable;
/**
* Processes the given entry.
*
@@ -473,6 +468,38 @@ public class BatchProcessor<T> implements BatchMonitor
* on any error
*/
public void process(T entry) throws Throwable;
/**
* Callback to allow thread cleanup after the work entries have been
* {@link #process(Object) processed}.
* Typically, this will involve cleanup of authentication and resetting any
* system flags previously set.
* <p/>
* This call is made regardless of the outcome of the entry processing.
*/
public void afterProcess() throws Throwable;
}
/**
* Adaptor that allows implementations to only implement {@link #process(Object)}
*/
public static abstract class BatchProcessWorkerAdaptor<TT> implements BatchProcessWorker<TT>
{
/**
* @return Returns the <code>toString()</code> of the entry
*/
public String getIdentifier(TT entry)
{
return entry.toString();
}
/** No-op */
public void beforeProcess() throws Throwable
{
}
/** No-op */
public void afterProcess() throws Throwable
{
}
}
/**
@@ -491,7 +518,7 @@ public class BatchProcessor<T> implements BatchMonitor
* @param splitTxns
* If <code>true</code>, the worker invocation is made in a new transaction.
*/
public TxnCallback(Worker<T> worker, List<T> batch, boolean splitTxns)
public TxnCallback(BatchProcessWorker<T> worker, List<T> batch, boolean splitTxns)
{
this.worker = worker;
this.batch = batch;
@@ -499,7 +526,7 @@ public class BatchProcessor<T> implements BatchMonitor
}
/** The worker. */
private final Worker<T> worker;
private final BatchProcessWorker<T> worker;
/** The batch. */
private final List<T> batch;
@@ -602,26 +629,21 @@ public class BatchProcessor<T> implements BatchMonitor
*/
public void run()
{
// Disable rules for this thread
BatchProcessor.this.ruleService.disableRules();
try
{
}
catch (Throwable e)
{
BatchProcessor.this.logger.error("Failed to cleanup Worker after processing.", e);
}
final BatchProcessor<T>.TxnCallback callback = this;
try
{
String systemUser = AuthenticationUtil.getSystemUserName();
if (tenantUserService != null)
{
systemUser = tenantUserService.getDomainUser(AuthenticationUtil.getSystemUserName(), tenantDomain);
}
AuthenticationUtil.runAs(new RunAsWork<Void>()
{
public Void doWork() throws Exception
{
worker.beforeProcess();
BatchProcessor.this.retryingTransactionHelper.doInTransaction(callback, false, splitTxns);
return null;
}
}, systemUser);
worker.afterProcess();
}
catch (Throwable t)
{
@@ -651,11 +673,6 @@ public class BatchProcessor<T> implements BatchMonitor
throw new AlfrescoRuntimeException("Transactional error during " + getProcessName(), t);
}
}
finally
{
// Re-enable rules
BatchProcessor.this.ruleService.enableRules();
}
commitProgress();
}

View File

@@ -34,6 +34,7 @@ import org.alfresco.service.namespace.NamespaceService;
import org.alfresco.service.namespace.QName;
import org.alfresco.service.transaction.TransactionService;
import org.alfresco.util.VmShutdownListener;
import org.alfresco.util.VmShutdownListener.VmShutdownException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.extensions.surf.util.Pair;
@@ -41,8 +42,6 @@ import org.springframework.extensions.surf.util.PropertyCheck;
/**
* This component is responsible cleaning up orphaned content.
* <p/>
* <b>TODO: Fix up new comments</b>
*
* Clean-up happens at two levels.<p/>
* <u><b>Eager cleanup:</b></u> (since 3.2)<p/>
@@ -53,10 +52,9 @@ import org.springframework.extensions.surf.util.PropertyCheck;
* procedures should be plugged in as listeners if this is required.
* <p/>
* <u><b>Lazy cleanup:</b></u><p/>
* This is triggered by means of a {@link ContentStoreCleanupJob Quartz job}. This is
* a heavy-weight process that effectively compares the database metadata with the
* content URLs controlled by the various stores. Once again, the listeners are called
* appropriately.
* This is triggered by means of a {@link ContentStoreCleanupJob Quartz job}. This process
* gets content URLs that have been marked as orphaned and cleans up the various stores.
* Once again, the listeners are called appropriately.
* <p/>
* <u><b>How backup policies are affected:</b></u><p/>
* When restoring the system from a backup, the type of restore required is dictated by
@@ -352,15 +350,4 @@ public class ContentStoreCleaner
// Done
return size;
}
/**
* Message carrier to break out of loops using the callback.
*
* @author Derek Hulley
* @since 2.1.3
*/
private class VmShutdownException extends RuntimeException
{
private static final long serialVersionUID = -5876107469054587072L;
}
}

View File

@@ -22,7 +22,6 @@ import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
@@ -33,6 +32,7 @@ import org.alfresco.model.ContentModel;
import org.alfresco.repo.content.MimetypeMap;
import org.alfresco.repo.descriptor.DescriptorServiceImpl.BaseDescriptor;
import org.alfresco.repo.importer.ImporterBootstrap;
import org.alfresco.service.cmr.repository.ContentData;
import org.alfresco.service.cmr.repository.ContentReader;
import org.alfresco.service.cmr.repository.ContentService;
import org.alfresco.service.cmr.repository.ContentWriter;
@@ -201,17 +201,18 @@ public class RepositoryDescriptorDAOImpl implements DescriptorDAO
props.put(ContentModel.PROP_SYS_VERSION_SCHEMA, serverDescriptor.getSchema());
this.nodeService.addProperties(currentDescriptorNodeRef, props);
// The version edition property may already have been overwritten with a license, so only set the property
// if it doesn't already contain ContentData
final Serializable value = this.nodeService.getProperty(currentDescriptorNodeRef,
// ALF-726: v3.1.x Content Cleaner Job needs to be ported to v3.2
// In order to migrate properly, this property needs to be d:content. We will rewrite the property with the
// license update code. There is no point attempting to rewrite the property here.
final Serializable value = this.nodeService.getProperty(
currentDescriptorNodeRef,
ContentModel.PROP_SYS_VERSION_EDITION);
if (!(value instanceof Collection) || ((Collection<?>) value).isEmpty()
|| ((Collection<?>) value).iterator().next() instanceof String)
if (value == null)
{
final Collection<String> editions = new ArrayList<String>();
editions.add(serverDescriptor.getEdition());
this.nodeService.setProperty(currentDescriptorNodeRef, ContentModel.PROP_SYS_VERSION_EDITION,
(Serializable) editions);
this.nodeService.setProperty(
currentDescriptorNodeRef,
ContentModel.PROP_SYS_VERSION_EDITION,
new ContentData(null, null, 0L, null));
}
// done

View File

@@ -0,0 +1,57 @@
/*
* Copyright (C) 2005-2008 Alfresco Software Limited.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* As a special exception to the terms and conditions of version 2.0 of
* the GPL, you may redistribute this Program in connection with Free/Libre
* and Open Source Software ("FLOSS") applications as described in Alfresco's
* FLOSS exception. You should have recieved a copy of the text describing
* the FLOSS exception, and it is also available here:
* http://www.alfresco.com/legal/licensing"
*/
package org.alfresco.repo.domain;
import java.io.Serializable;
/**
* Data type carrying the ID of a <code>ContentData</code> reference.
*
* @author Derek Hulley
* @since 3.2.1
*/
public class ContentDataId implements Serializable
{
private static final long serialVersionUID = -4980820192507809266L;
private final Long id;
public ContentDataId(Long id)
{
super();
this.id = id;
}
@Override
public String toString()
{
return "ContentDataId [id=" + id + "]";
}
public Long getId()
{
return id;
}
}

View File

@@ -126,9 +126,20 @@ public class NodePropertyValue implements Cloneable, Serializable
@Override
Serializable convert(Serializable value)
{
if (value == null)
{
return null;
}
else if (value instanceof ContentDataId)
{
return ((ContentDataId)value).getId();
}
else
{
return DefaultTypeConverter.INSTANCE.convert(Long.class, value);
}
}
},
FLOAT
{
@@ -467,6 +478,41 @@ public class NodePropertyValue implements Cloneable, Serializable
{
return DefaultTypeConverter.INSTANCE.convert(Period.class, value);
}
},
CONTENT_DATA_ID
{
@Override
public Integer getOrdinalNumber()
{
return Integer.valueOf(21);
}
@Override
protected ValueType getPersistedType(Serializable value)
{
return ValueType.LONG;
}
@Override
Serializable convert(Serializable value)
{
if (value == null)
{
return null;
}
else if (value instanceof Long)
{
return value;
}
else if (value instanceof ContentDataId)
{
return ((ContentDataId)value).getId();
}
else
{
return DefaultTypeConverter.INSTANCE.convert(ContentData.class, value);
}
}
}
;
@@ -566,6 +612,10 @@ public class NodePropertyValue implements Cloneable, Serializable
{
return ValueType.PERIOD;
}
else if (value instanceof ContentDataId)
{
return ValueType.CONTENT_DATA_ID;
}
else
{
// type is not recognised as belonging to any particular slot
@@ -592,7 +642,7 @@ public class NodePropertyValue implements Cloneable, Serializable
valueTypesByPropertyType.put(DataTypeDefinition.DATE, ValueType.DATE);
valueTypesByPropertyType.put(DataTypeDefinition.DATETIME, ValueType.DATE);
valueTypesByPropertyType.put(DataTypeDefinition.CATEGORY, ValueType.NODEREF);
valueTypesByPropertyType.put(DataTypeDefinition.CONTENT, ValueType.CONTENT);
valueTypesByPropertyType.put(DataTypeDefinition.CONTENT, ValueType.CONTENT_DATA_ID);
valueTypesByPropertyType.put(DataTypeDefinition.TEXT, ValueType.STRING);
valueTypesByPropertyType.put(DataTypeDefinition.MLTEXT, ValueType.MLTEXT);
valueTypesByPropertyType.put(DataTypeDefinition.NODE_REF, ValueType.NODEREF);

View File

@@ -387,7 +387,8 @@ public abstract class AbstractContentDataDAOImpl implements ContentDataDAO
}
/**
* Caching method that creates an entity for <b>content_url_entity</b>.
* Method to create (or get an existing) content URL. The URL will be unorphaned
* whether it has been created or is being re-used.
*/
private ContentUrlEntity getOrCreateContentUrlEntity(String contentUrl, long size)
{
@@ -405,6 +406,12 @@ public abstract class AbstractContentDataDAOImpl implements ContentDataDAO
" Inbound: " + contentUrl + "\n" +
" Existing: " + contentUrlEntity);
}
// Check orphan state
if (contentUrlEntity.getOrphanTime() != null)
{
Long id = contentUrlEntity.getId();
updateContentUrlOrphanTime(id, null);
}
}
else
{
@@ -446,7 +453,7 @@ public abstract class AbstractContentDataDAOImpl implements ContentDataDAO
* @param orphanTime the time (ms since epoch) that the entity was orphaned
* @return Returns the number of rows updated
*/
protected abstract int updateContentUrlOrphanTime(Long id, long orphanTime);
protected abstract int updateContentUrlOrphanTime(Long id, Long orphanTime);
/**
* Create the row for the <b>alf_content_data<b>

View File

@@ -25,6 +25,7 @@ import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.service.cmr.repository.ContentData;
import org.springframework.extensions.surf.util.Pair;
import org.springframework.dao.ConcurrencyFailureException;
import org.springframework.dao.DataIntegrityViolationException;
/**
* DAO services for <b>alf_content_data</b> table
@@ -50,6 +51,15 @@ public interface ContentDataDAO
*/
void updateContentData(Long id, ContentData contentData);
/**
* Creates an immediately-orphaned content URL, if possible
*
* @param contentUrl the URL to create if it doesn't exist
* @return Returns the ID-URL pair
* @throws DataIntegrityViolationException if the URL already exists
*/
Pair<Long, String> createContentUrlOrphaned(String contentUrl);
/**
* @param id the unique ID of the entity
* @return the ContentData pair (id, ContentData) or <tt>null</tt> if it doesn't exist

View File

@@ -18,16 +18,21 @@
*/
package org.alfresco.repo.domain.contentdata.ibatis;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.ibatis.IdsEntity;
import org.alfresco.repo.domain.contentdata.AbstractContentDataDAOImpl;
import org.alfresco.repo.domain.contentdata.ContentDataEntity;
import org.alfresco.repo.domain.contentdata.ContentUrlEntity;
import org.alfresco.service.cmr.repository.ContentData;
import org.springframework.dao.ConcurrencyFailureException;
import org.springframework.dao.DataIntegrityViolationException;
import org.springframework.extensions.surf.util.Pair;
import org.springframework.orm.ibatis.SqlMapClientTemplate;
import com.ibatis.sqlmap.client.event.RowHandler;
@@ -60,6 +65,17 @@ public class ContentDataDAOImpl extends AbstractContentDataDAOImpl
this.template = sqlMapClientTemplate;
}
public Pair<Long, String> createContentUrlOrphaned(String contentUrl)
{
ContentUrlEntity contentUrlEntity = new ContentUrlEntity();
contentUrlEntity.setContentUrl(contentUrl);
contentUrlEntity.setSize(0L);
contentUrlEntity.setOrphanTime(System.currentTimeMillis());
Long id = (Long) template.insert(INSERT_CONTENT_URL, contentUrlEntity);
// Done
return new Pair<Long, String>(id, contentUrl);
}
@Override
protected ContentUrlEntity createContentUrlEntity(String contentUrl, long size)
{
@@ -135,7 +151,7 @@ public class ContentDataDAOImpl extends AbstractContentDataDAOImpl
}
}
public int updateContentUrlOrphanTime(Long id, long orphanTime)
public int updateContentUrlOrphanTime(Long id, Long orphanTime)
{
ContentUrlEntity contentUrlEntity = new ContentUrlEntity();
contentUrlEntity.setId(id);
@@ -178,7 +194,14 @@ public class ContentDataDAOImpl extends AbstractContentDataDAOImpl
contentDataEntity.setMimetypeId(mimetypeId);
contentDataEntity.setEncodingId(encodingId);
contentDataEntity.setLocaleId(localeId);
try
{
template.insert(INSERT_CONTENT_DATA, contentDataEntity);
}
catch (Throwable e)
{
throw new AlfrescoRuntimeException("Failed to insert ContentData: " + contentDataEntity, e);
}
// Done
return contentDataEntity;
}
@@ -226,23 +249,30 @@ public class ContentDataDAOImpl extends AbstractContentDataDAOImpl
public void deleteContentDataForNode(Long nodeId, Set<Long> qnameIds)
{
/*
* TODO: use IN clause in parameters
*/
for (Long qnameId : qnameIds)
if (qnameIds.size() == 0)
{
// Get the ContentData that matches (may be multiple due to collection properties)
Map<String, Object> params = new HashMap<String, Object>(11);
params.put("nodeId", nodeId);
params.put("qnameId", qnameId);
// There will be no results
return;
}
IdsEntity idsEntity = new IdsEntity();
idsEntity.setIdOne(nodeId);
idsEntity.setIds(new ArrayList<Long>(qnameIds));
@SuppressWarnings("unchecked")
List<Long> ids = (List<Long>) template.queryForList(SELECT_CONTENT_DATA_BY_NODE_AND_QNAME, params);
List<Long> ids = (List<Long>) template.queryForList(SELECT_CONTENT_DATA_BY_NODE_AND_QNAME, idsEntity);
// Delete each one
for (Long id : ids)
{
try
{
// Delete the ContentData entity
deleteContentData(id);
}
catch (ConcurrencyFailureException e)
{
// The DB may return results even though the row has just been
// deleted. Since we are deleting the row, it doesn't matter
// if it is deleted here or not.
}
}
}
}

View File

@@ -19,8 +19,12 @@
package org.alfresco.repo.domain.patch;
import java.util.List;
import java.util.Map;
import org.alfresco.ibatis.BatchingDAO;
import org.alfresco.repo.domain.avm.AVMNodeEntity;
import org.alfresco.repo.domain.contentdata.ContentDataDAO;
import org.alfresco.service.cmr.repository.ContentData;
/**
@@ -31,8 +35,25 @@ import org.alfresco.repo.domain.avm.AVMNodeEntity;
* @author janv
* @since 3.2
*/
public abstract class AbstractPatchDAOImpl implements PatchDAO
public abstract class AbstractPatchDAOImpl implements PatchDAO, BatchingDAO
{
private ContentDataDAO contentDataDAO;
protected AbstractPatchDAOImpl()
{
}
/**
* Set the DAO that supplies {@link ContentData} IDs
*/
public void setContentDataDAO(ContentDataDAO contentDataDAO)
{
this.contentDataDAO = contentDataDAO;
}
/**
* {@inheritDoc}
*/
public Long getAVMNodesCountWhereNewInStore()
{
return getAVMNodeEntitiesCountWhereNewInStore();
@@ -63,4 +84,91 @@ public abstract class AbstractPatchDAOImpl implements PatchDAO
protected abstract List<AVMNodeEntity> getNullVersionLayeredDirectoryNodeEntities();
protected abstract List<AVMNodeEntity> getNullVersionLayeredFileNodeEntities();
/**
* {@inheritDoc}
* <p>
* @see #getAdmOldContentProperties(Long, Long)
*/
public void updateAdmV31ContentProperties(Long minNodeId, Long maxNodeId)
{
List<Map<String, Object>> props = getAdmOldContentProperties(minNodeId, maxNodeId);
// Do a first pass to create the ContentData IDs
for (Map<String, Object> prop : props)
{
String stringValue = (String) prop.get("stringValue");
try
{
ContentData contentData = ContentData.createContentProperty(stringValue);
Long contentDataId = contentDataDAO.createContentData(contentData).getFirst();
prop.put("contentDataId", contentDataId);
}
catch (Throwable e)
{
// We don't care about this too much as it'll just leak a binary
}
}
// Now do the updates in the context of a batch
try
{
// Run using a batch
startBatch();
for (Map<String, Object> prop : props)
{
Long nodeId = (Long) prop.get("nodeId");
Long qnameId = (Long) prop.get("qnameId");
Integer listIndex = (Integer) prop.get("listIndex");
Long localeId = (Long) prop.get("localeId");
Long contentDataId = (Long) prop.get("contentDataId");
if (contentDataId == null)
{
// There was a problem with this
continue;
}
// Update
updateAdmOldContentProperty(nodeId, qnameId, listIndex, localeId, contentDataId);
}
}
finally
{
executeBatch();
}
}
/**
* Results are of the form:
* <pre>
* nodeId: java.lang.Long
* qnameId: java.lang.Long
* listIndex: java.lang.Integer
* localeId: java.lang.Long
* stringValue: java.lang.String
* </pre>
*
*
* @param minNodeId inclusive lower bound for Node ID
* @param maxNodeId exclusive upper bound for Node ID
* @return Returns a map of query results
*/
protected abstract List<Map<String, Object>> getAdmOldContentProperties(Long minNodeId, Long maxNodeId);
/**
*
* @param nodeId part of the unique key
* @param qnameId part of the unique key
* @param listIndex part of the unique key
* @param localeId part of the unique key
* @param longValue the new ContentData ID
* @return Returns the row update count
*/
protected abstract void updateAdmOldContentProperty(
Long nodeId,
Long qnameId,
Integer listIndex,
Long localeId,
Long longValue);
}

View File

@@ -21,12 +21,14 @@ package org.alfresco.repo.domain.patch;
import java.util.List;
import org.alfresco.repo.domain.avm.AVMNodeEntity;
import org.alfresco.repo.domain.contentdata.ContentDataDAO;
import org.alfresco.service.cmr.repository.ContentData;
/**
* Additional DAO services for patches
*
* @author janv
* @author Derek Hulley
* @since 3.2
*/
public interface PatchDAO
@@ -40,4 +42,21 @@ public interface PatchDAO
public List<AVMNodeEntity> getNullVersionLayeredDirectories(int count);
public List<AVMNodeEntity> getNullVersionLayeredFiles(int count);
public Long getMaxAvmNodeID();
public List<Long> getAvmNodesWithOldContentProperties(Long minNodeId, Long maxNodeId);
// DM-related
public Long getMaxAdmNodeID();
/**
* Migrates DM content properties from the old V3.1 format (String-based {@link ContentData#toString()})
* to the new V3.2 format (ID based storage using {@link ContentDataDAO}).
*
* @param minNodeId the inclusive node ID to limit the updates to
* @param maxNodeId the exclusive node ID to limit the updates to
*/
public void updateAdmV31ContentProperties(Long minNodeId, Long maxNodeId);
}

View File

@@ -18,8 +18,12 @@
*/
package org.alfresco.repo.domain.patch.ibatis;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.alfresco.ibatis.IdsEntity;
import org.alfresco.repo.domain.avm.AVMNodeEntity;
import org.alfresco.repo.domain.patch.AbstractPatchDAOImpl;
import org.springframework.orm.ibatis.SqlMapClientTemplate;
@@ -36,6 +40,11 @@ public class PatchDAOImpl extends AbstractPatchDAOImpl
private static final String SELECT_AVM_NODE_ENTITIES_WITH_EMPTY_GUID = "alfresco.avm.select_AVMNodesWithEmptyGUID";
private static final String SELECT_AVM_LD_NODE_ENTITIES_NULL_VERSION = "alfresco.avm.select_AVMNodes_nullVersionLayeredDirectories";
private static final String SELECT_AVM_LF_NODE_ENTITIES_NULL_VERSION = "alfresco.avm.select_AVMNodes_nullVersionLayeredFiles";
private static final String SELECT_AVM_MAX_NODE_ID = "alfresco.patch.select_avmMaxNodeId";
private static final String SELECT_ADM_MAX_NODE_ID = "alfresco.patch.select_admMaxNodeId";
private static final String SELECT_AVM_NODES_WITH_OLD_CONTENT_PROPERTIES = "alfresco.patch.select_avmNodesWithOldContentProperties";
private static final String SELECT_ADM_OLD_CONTENT_PROPERTIES = "alfresco.patch.select_admOldContentProperties";
private static final String UPDATE_ADM_OLD_CONTENT_PROPERTY = "alfresco.patch.update_admOldContentProperty";
private SqlMapClientTemplate template;
@@ -44,6 +53,30 @@ public class PatchDAOImpl extends AbstractPatchDAOImpl
this.template = sqlMapClientTemplate;
}
public void startBatch()
{
try
{
template.getSqlMapClient().startBatch();
}
catch (SQLException e)
{
throw new RuntimeException("Failed to start batch", e);
}
}
public void executeBatch()
{
try
{
template.getSqlMapClient().executeBatch();
}
catch (SQLException e)
{
throw new RuntimeException("Failed to start batch", e);
}
}
@Override
protected Long getAVMNodeEntitiesCountWhereNewInStore()
{
@@ -70,4 +103,45 @@ public class PatchDAOImpl extends AbstractPatchDAOImpl
{
return (List<AVMNodeEntity>) template.queryForList(SELECT_AVM_LF_NODE_ENTITIES_NULL_VERSION);
}
public Long getMaxAvmNodeID()
{
return (Long) template.queryForObject(SELECT_AVM_MAX_NODE_ID);
}
@SuppressWarnings("unchecked")
public List<Long> getAvmNodesWithOldContentProperties(Long minNodeId, Long maxNodeId)
{
IdsEntity ids = new IdsEntity();
ids.setIdOne(minNodeId);
ids.setIdTwo(maxNodeId);
return (List<Long>) template.queryForList(SELECT_AVM_NODES_WITH_OLD_CONTENT_PROPERTIES, ids);
}
public Long getMaxAdmNodeID()
{
return (Long) template.queryForObject(SELECT_ADM_MAX_NODE_ID);
}
@SuppressWarnings("unchecked")
@Override
protected List<Map<String, Object>> getAdmOldContentProperties(Long minNodeId, Long maxNodeId)
{
IdsEntity ids = new IdsEntity();
ids.setIdOne(minNodeId);
ids.setIdTwo(maxNodeId);
return (List<Map<String, Object>>) template.queryForList(SELECT_ADM_OLD_CONTENT_PROPERTIES, ids);
}
@Override
protected void updateAdmOldContentProperty(Long nodeId, Long qnameId, Integer listIndex, Long localeId, Long longValue)
{
Map<String, Object> params = new HashMap<String, Object>(11);
params.put("nodeId", nodeId);
params.put("qnameId", qnameId);
params.put("listIndex", listIndex);
params.put("localeId", localeId);
params.put("longValue", longValue);
template.update(UPDATE_ADM_OLD_CONTENT_PROPERTY, params);
}
}

View File

@@ -46,6 +46,7 @@ import org.alfresco.model.ContentModel;
import org.alfresco.repo.cache.SimpleCache;
import org.alfresco.repo.domain.AuditableProperties;
import org.alfresco.repo.domain.ChildAssoc;
import org.alfresco.repo.domain.ContentDataId;
import org.alfresco.repo.domain.DbAccessControlList;
import org.alfresco.repo.domain.LocaleDAO;
import org.alfresco.repo.domain.Node;
@@ -4986,11 +4987,13 @@ public class HibernateNodeDaoServiceImpl
" Value: " + value);
}
// Handle ContentData
if (value instanceof ContentData && propertyTypeQName.equals(DataTypeDefinition.CONTENT))
// We used to check the property type, but we now handle d:any ContentData as well
if (value instanceof ContentData)
{
// Needs converting to an ID
ContentData contentData = (ContentData) value;
value = contentDataDAO.createContentData(contentData).getFirst();
Long contentDataId = contentDataDAO.createContentData(contentData).getFirst();
value = new ContentDataId(contentDataId);
}
// Handle MLText
if (value instanceof MLText)
@@ -5374,8 +5377,24 @@ public class HibernateNodeDaoServiceImpl
{
Serializable value = propertyValue.getValue(propertyTypeQName);
// Handle conversions to and from ContentData
if (propertyTypeQName.equals(DataTypeDefinition.CONTENT) && (value instanceof Long))
if (value instanceof ContentDataId)
{
// ContentData used to be persisted
Long contentDataId = ((ContentDataId) value).getId();
Pair<Long, ContentData> contentDataPair = contentDataDAO.getContentData(contentDataId);
if (contentDataPair == null)
{
// It is invalid
value = null;
}
else
{
value = contentDataPair.getSecond();
}
}
else if (propertyTypeQName.equals(DataTypeDefinition.CONTENT) && (value instanceof Long))
{
// ContentData used to be persisted
Pair<Long, ContentData> contentDataPair = contentDataDAO.getContentData((Long)value);
if (contentDataPair == null)
{

View File

@@ -289,14 +289,13 @@ public class AVMFullIndexRecoveryComponent extends AbstractReindexComponent
logger.info(" Rebuilding index for " + store);
}
final int latest = avmService.getLatestSnapshotID(store);
if (latest <= 0)
{
if (!avmSnapShotTriggeredIndexingMethodInterceptor.hasIndexBeenCreated(store))
{
avmSnapShotTriggeredIndexingMethodInterceptor.createIndex(store);
}
final int latest = avmService.getLatestSnapshotID(store);
if (latest <= 0)
{
return;
}
@@ -306,7 +305,6 @@ public class AVMFullIndexRecoveryComponent extends AbstractReindexComponent
{
public Object execute() throws Exception
{
if (mode == RecoveryMode.AUTO)
{
logger.info(" Rebuilding index for snapshots " + latestIndexed +" to "+latest);

View File

@@ -38,7 +38,7 @@ import org.alfresco.repo.attributes.Attribute;
import org.alfresco.repo.attributes.LongAttributeValue;
import org.alfresco.repo.attributes.MapAttributeValue;
import org.alfresco.repo.batch.BatchProcessor;
import org.alfresco.repo.batch.BatchProcessor.Worker;
import org.alfresco.repo.batch.BatchProcessor.BatchProcessWorker;
import org.alfresco.repo.lock.JobLockService;
import org.alfresco.repo.lock.LockAcquisitionException;
import org.alfresco.repo.management.subsystems.ActivateableBean;
@@ -538,10 +538,14 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
// First, analyze the group structure. Create maps of authorities to their parents for associations to create
// and delete. Also deal with 'overlaps' with other zones in the authentication chain.
final BatchProcessor<NodeDescription> groupProcessor = new BatchProcessor<NodeDescription>(
ChainingUserRegistrySynchronizer.logger, this.retryingTransactionHelper, this.ruleService,
this.applicationEventPublisher, userRegistry.getGroups(lastModified), zone + " Group Analysis",
this.loggingInterval, this.workerThreads, 20);
class Analyzer implements Worker<NodeDescription>
zone + " Group Analysis",
this.retryingTransactionHelper,
userRegistry.getGroups(lastModified),
this.workerThreads, 20,
this.applicationEventPublisher,
ChainingUserRegistrySynchronizer.logger,
this.loggingInterval);
class Analyzer implements BatchProcessWorker<NodeDescription>
{
private final Set<String> allZoneAuthorities = new TreeSet<String>();
private final Map<String, String> groupsToCreate = new TreeMap<String, String>();
@@ -579,19 +583,27 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
return this.groupAssocsToDelete;
}
/*
* (non-Javadoc)
* @see org.alfresco.repo.security.sync.BatchProcessor.Worker#getIdentifier(java.lang.Object)
*/
public String getIdentifier(NodeDescription entry)
{
return entry.getSourceId();
}
/*
* (non-Javadoc)
* @see org.alfresco.repo.security.sync.BatchProcessor.Worker#process(java.lang.Object)
*/
public void beforeProcess() throws Throwable
{
// Disable rules
ruleService.disableRules();
// Authentication
AuthenticationUtil.setRunAsUser(AuthenticationUtil.getSystemUserName());
}
public void afterProcess() throws Throwable
{
// Enable rules
ruleService.enableRules();
// Clear authentication
AuthenticationUtil.clearCurrentSecurityContext();
}
public void process(NodeDescription group) throws Throwable
{
PropertyMap groupProperties = group.getProperties();
@@ -801,17 +813,36 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
// Add the groups and their parent associations in depth-first order
final Map<String, String> groupsToCreate = groupAnalyzer.getGroupsToCreate();
BatchProcessor<Map.Entry<String, Set<String>>> groupCreator = new BatchProcessor<Map.Entry<String, Set<String>>>(
ChainingUserRegistrySynchronizer.logger, this.retryingTransactionHelper, this.ruleService,
this.applicationEventPublisher, sortedGroupAssociations.entrySet(), zone
+ " Group Creation and Association", this.loggingInterval, this.workerThreads, 20);
groupCreator.process(new Worker<Map.Entry<String, Set<String>>>()
zone + " Group Creation and Association",
this.retryingTransactionHelper,
sortedGroupAssociations.entrySet(),
this.workerThreads, 20,
this.applicationEventPublisher,
ChainingUserRegistrySynchronizer.logger,
this.loggingInterval);
groupCreator.process(new BatchProcessWorker<Map.Entry<String, Set<String>>>()
{
public String getIdentifier(Map.Entry<String, Set<String>> entry)
{
return entry.getKey() + " " + entry.getValue();
}
public void beforeProcess() throws Throwable
{
// Disable rules
ruleService.disableRules();
// Authentication
AuthenticationUtil.setRunAsUser(AuthenticationUtil.getSystemUserName());
}
public void afterProcess() throws Throwable
{
// Enable rules
ruleService.enableRules();
// Clear authentication
AuthenticationUtil.clearCurrentSecurityContext();
}
public void process(Map.Entry<String, Set<String>> entry) throws Throwable
{
Set<String> parents = entry.getValue();
@@ -896,10 +927,14 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
}
}
final BatchProcessor<NodeDescription> personProcessor = new BatchProcessor<NodeDescription>(
ChainingUserRegistrySynchronizer.logger, this.retryingTransactionHelper, this.ruleService,
this.applicationEventPublisher, userRegistry.getPersons(lastModified), zone
+ " User Creation and Association", this.loggingInterval, this.workerThreads, 10);
class PersonWorker implements Worker<NodeDescription>
zone + " User Creation and Association",
this.retryingTransactionHelper,
userRegistry.getPersons(lastModified),
this.workerThreads, 10,
this.applicationEventPublisher,
ChainingUserRegistrySynchronizer.logger,
this.loggingInterval);
class PersonWorker implements BatchProcessWorker<NodeDescription>
{
private long latestTime;
@@ -918,6 +953,22 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
return entry.getSourceId();
}
public void beforeProcess() throws Throwable
{
// Disable rules
ruleService.disableRules();
// Authentication
AuthenticationUtil.setRunAsUser(AuthenticationUtil.getSystemUserName());
}
public void afterProcess() throws Throwable
{
// Enable rules
ruleService.enableRules();
// Clear authentication
AuthenticationUtil.clearCurrentSecurityContext();
}
public void process(NodeDescription person) throws Throwable
{
PropertyMap personProperties = person.getProperties();
@@ -1055,10 +1106,14 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
if (allowDeletions)
{
BatchProcessor<String> authorityDeletionProcessor = new BatchProcessor<String>(
ChainingUserRegistrySynchronizer.logger, this.retryingTransactionHelper, this.ruleService,
this.applicationEventPublisher, deletionCandidates, zone + " Authority Deletion",
this.loggingInterval, this.workerThreads, 10);
class AuthorityDeleter implements Worker<String>
zone + " Authority Deletion",
this.retryingTransactionHelper,
deletionCandidates,
this.workerThreads, 10,
this.applicationEventPublisher,
ChainingUserRegistrySynchronizer.logger,
this.loggingInterval);
class AuthorityDeleter implements BatchProcessWorker<String>
{
private int personProcessedCount;
private int groupProcessedCount;
@@ -1078,6 +1133,22 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
return entry;
}
public void beforeProcess() throws Throwable
{
// Disable rules
ruleService.disableRules();
// Authentication
AuthenticationUtil.setRunAsUser(AuthenticationUtil.getSystemUserName());
}
public void afterProcess() throws Throwable
{
// Enable rules
ruleService.enableRules();
// Clear authentication
AuthenticationUtil.clearCurrentSecurityContext();
}
public void process(String authority) throws Throwable
{
if (AuthorityType.getAuthorityType(authority) == AuthorityType.USER)

View File

@@ -61,6 +61,9 @@ public class TraceableThreadFactory implements ThreadFactory
this.namePrefix = "TraceableThread-" + factoryNumber.getAndIncrement() + "-thread-";
this.threadNumber = new AtomicInteger(1);
this.threadDaemon = true;
this.threadPriority = Thread.NORM_PRIORITY;
}
/**