Merged 5.0.N (5.0.3) to 5.1.N (5.1.1) (PARTIAL MERGE)

114790 amorarasu: MNT-15007: CLONE - String values when migrating from MySQL to other DBs
      Merged V4.2-BUG-FIX (4.2.6) to 5.0.N (5.0.3)
         114311 amorarasu: Merged V4.1-BUG-FIX (4.1.11) to V4.2-BUG-FIX (4.2.6)
            114245 tvalkevych: Merged V4.1.9 (4.1.9.13) to V4.1-BUG-FIX (4.1.11)
               113717 dhulley: MNT-14911: String values when migrating from MySQL to other DBs
                - Add a new job that allows node string values to be re-persisted according to the current 'system.maximumStringLength' value
                - Job is unscheduled by default
                - Set the 'system.maximumStringLength' and the 'system.maximumStringLength.jobCronExpression'
                - Various touched code format fixes, method naming fixes, etc


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/BRANCHES/DEV/5.1.N/root@114988 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Raluca Munteanu
2015-10-23 07:46:06 +00:00
parent fb20d2a4e2
commit d0f097601a
15 changed files with 1185 additions and 444 deletions

View File

@@ -750,6 +750,17 @@
</where>
</select>
<select id="select_PropertiesByActualType" parameterType="Ids" resultMap="result_NodeProperty">
<include refid="alfresco.node.select_NodeProperty_Results"/>
from
alf_node node
join alf_node_properties prop on (prop.node_id = node.id)
where
actual_type_n = #{idOne} and
<![CDATA[node.id >= #{idTwo}]]> and
<![CDATA[node.id < #{idThree}]]>
</select>
<sql id="select_NodeAspects_Results">
select
node.id as node_id,
@@ -757,7 +768,7 @@
aspects.qname_id as qname_id
</sql>
<select id="select_NodeAspects" parameterType="Ids" resultMap="result_NodeAspects">
<select id="select_NodeAspects" parameterType="NodeAspects" resultMap="result_NodeAspects">
<include refid="alfresco.node.select_NodeAspects_Results"/>
from
alf_node node
@@ -1195,6 +1206,20 @@
<if test="isPrimary != null">and assoc.is_primary = #{isPrimary}</if>
</select>
<select id="select_NodeMinId" resultType="java.lang.Long">
select
min(id)
from
alf_node
</select>
<select id="select_NodeMaxId" resultType="java.lang.Long">
select
max(id)
from
alf_node
</select>
<sql id="select_Transaction_Results">
select
txn.id as id,

View File

@@ -236,6 +236,17 @@
</property>
</bean>
<!-- String length adjustment -->
<bean id="nodeStringLengthWorker" class="org.alfresco.repo.node.db.NodeStringLengthWorker">
<constructor-arg index="0" ref="nodeDAO" />
<constructor-arg index="1" ref="jobLockService" />
<constructor-arg index="2" ref="transactionService" />
<constructor-arg index="3" ref="qnameDAO" />
<constructor-arg index="4" ref="policyBehaviourFilter" />
<constructor-arg index="5" value="${system.maximumStringLength.jobQueryRange}" />
<constructor-arg index="6" value="${system.maximumStringLength.jobThreadCount}" />
</bean>
<bean id="storesToIgnorePolicies" class="org.springframework.beans.factory.config.SetFactoryBean">
<property name="sourceSet">
<set>

View File

@@ -215,8 +215,12 @@ system.readpermissions.bulkfetchsize=1000
# Manually control how the system handles maximum string lengths.
# Any zero or negative value is ignored.
# Only change this after consulting support or reading the appropriate Javadocs for
# org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2
# org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2.
# Before database migration, the string value storage may need to be adjusted using the scheduled job
system.maximumStringLength=-1
system.maximumStringLength.jobCronExpression=* * * * * ? 2099
system.maximumStringLength.jobQueryRange=10000
system.maximumStringLength.jobThreadCount=4
#
# Limit hibernate session size by trying to amalgamate events for the L2 session invalidation

View File

@@ -120,6 +120,20 @@
</property>
</bean>
<bean id="maxStringLengthJobDetail" class="org.springframework.scheduling.quartz.JobDetailBean">
<property name="jobClass" value="org.alfresco.repo.node.db.NodeStringLengthWorker$NodeStringLengthJob" />
<property name="jobDataAsMap">
<map>
<entry key="nodeStringLengthWorker" value-ref="nodeStringLengthWorker" />
</map>
</property>
</bean>
<bean id="maxStringLengthJobTrigger" class="org.alfresco.util.CronTriggerBean">
<property name="jobDetail" ref="maxStringLengthJobDetail" />
<property name="scheduler" ref="schedulerFactory" />
<property name="cronExpression" value="${system.maximumStringLength.jobCronExpression}" />
</bean>
<bean id="nodeServiceCleanupJobDetail" class="org.springframework.scheduling.quartz.JobDetailBean">
<property name="jobClass">
<value>org.alfresco.repo.node.cleanup.NodeCleanupJob</value>

View File

@@ -18,9 +18,6 @@
*/
package org.alfresco.encryption;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Checks the repository key stores.
*
@@ -29,8 +26,6 @@ import org.apache.commons.logging.LogFactory;
*/
public class KeyStoreChecker
{
private static final Log logger = LogFactory.getLog(KeyStoreChecker.class);
private AlfrescoKeyStore mainKeyStore;
public KeyStoreChecker()

View File

@@ -22,8 +22,10 @@ import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import javax.crypto.SealedObject;
@@ -318,8 +320,14 @@ public class ReEncryptor implements ApplicationContextAware
// get encrypted properties
Collection<PropertyDefinition> propertyDefs = dictionaryDAO.getPropertiesOfDataType(DataTypeDefinition.ENCRYPTED);
Set<QName> qnames = new HashSet<QName>();
for(PropertyDefinition propDef : propertyDefs)
{
qnames.add(propDef.getName());
}
// TODO use callback mechanism, or select based on set of nodes?
List<NodePropertyEntity> properties = nodeDAO.selectProperties(propertyDefs);
List<NodePropertyEntity> properties = nodeDAO.selectNodePropertiesByTypes(qnames);
if(logger.isDebugEnabled())
{

View File

@@ -29,8 +29,8 @@ import java.util.Set;
import org.alfresco.repo.node.NodeBulkLoader;
import org.alfresco.repo.transaction.TransactionalResourceHelper;
import org.alfresco.service.cmr.dictionary.DataTypeDefinition;
import org.alfresco.service.cmr.dictionary.InvalidTypeException;
import org.alfresco.service.cmr.dictionary.PropertyDefinition;
import org.alfresco.service.cmr.repository.AssociationRef;
import org.alfresco.service.cmr.repository.ChildAssociationRef;
import org.alfresco.service.cmr.repository.InvalidNodeRefException;
@@ -856,6 +856,16 @@ public interface NodeDAO extends NodeBulkLoader
*/
public Long getMaxTxnId();
/**
* @return Returns the minimum node id or <tt>0</tt> if there are no nodes
*/
public Long getMinNodeId();
/**
* @return Returns the maximum node id or <tt>0</tt> if there are no nodes
*/
public Long getMaxNodeId();
/**
* Select children by property values
*/
@@ -868,7 +878,17 @@ public interface NodeDAO extends NodeBulkLoader
/**
* Used by the re-encryptor to re-encrypt encryptable properties with a new encryption key.
*/
public List<NodePropertyEntity> selectProperties(Collection<PropertyDefinition> propertyDefs);
public List<NodePropertyEntity> selectNodePropertiesByTypes(Set<QName> qnames);
/**
* Select all node properties that are between two node IDs and of the given <b>actual</b> type
*
* @param dataType the actual, original type of the property, as given by one of the constants
* on {@link DataTypeDefinition#TEXT DataTypeDefinition}
* @param minNodeId the minimum node ID (inclusive)
* @param maxNodeId the maximum node ID (exclusive)
*/
public List<NodePropertyEntity> selectNodePropertiesByDataType(QName dataType, long minNodeId, long maxNodeId);
/**
* Counts the number of child associations directly under parentNodeId.

View File

@@ -51,7 +51,6 @@ import org.alfresco.repo.domain.node.TransactionEntity;
import org.alfresco.repo.domain.node.TransactionQueryEntity;
import org.alfresco.repo.domain.qname.QNameDAO;
import org.alfresco.service.cmr.dictionary.DictionaryService;
import org.alfresco.service.cmr.dictionary.PropertyDefinition;
import org.alfresco.service.cmr.repository.ChildAssociationRef;
import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.cmr.repository.StoreRef;
@@ -97,12 +96,15 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
private static final String SELECT_NODES_BY_IDS = "alfresco.node.select_NodesByIds";
private static final String SELECT_NODE_PROPERTIES = "alfresco.node.select_NodeProperties";
private static final String SELECT_PROPERTIES_BY_TYPES = "alfresco.node.select_PropertiesByTypes";
private static final String SELECT_PROPERTIES_BY_ACTUAL_TYPE = "alfresco.node.select_PropertiesByActualType";
private static final String SELECT_NODE_ASPECTS = "alfresco.node.select_NodeAspects";
private static final String INSERT_NODE_PROPERTY = "alfresco.node.insert_NodeProperty";
private static final String UPDATE_PRIMARY_CHILDREN_SHARED_ACL = "alfresco.node.update.update_PrimaryChildrenSharedAcl";
private static final String INSERT_NODE_ASPECT = "alfresco.node.insert_NodeAspect";
private static final String DELETE_NODE_ASPECTS = "alfresco.node.delete_NodeAspects";
private static final String DELETE_NODE_PROPERTIES = "alfresco.node.delete_NodeProperties";
private static final String SELECT_NODE_MIN_ID = "alfresco.node.select_NodeMinId";
private static final String SELECT_NODE_MAX_ID = "alfresco.node.select_NodeMaxId";
private static final String SELECT_NODES_WITH_ASPECT_IDS = "alfresco.node.select_NodesWithAspectIds";
private static final String INSERT_NODE_ASSOC = "alfresco.node.insert.insert_NodeAssoc";
private static final String UPDATE_NODE_ASSOC = "alfresco.node.update_NodeAssoc";
@@ -350,6 +352,18 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
return template.update(UPDATE_NODE_BULK_TOUCH, ids);
}
@Override
public Long getMinNodeId()
{
return (Long) template.selectOne(SELECT_NODE_MIN_ID);
}
@Override
public Long getMaxNodeId()
{
return (Long) template.selectOne(SELECT_NODE_MAX_ID);
}
@Override
protected void updatePrimaryChildrenSharedAclId(
Long txnId,
@@ -540,6 +554,54 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
return makePersistentPropertiesMap(rows);
}
@Override
public List<NodePropertyEntity> selectNodePropertiesByTypes(Set<QName> qnames)
{
final List<NodePropertyEntity> properties = new ArrayList<NodePropertyEntity>();
// qnames of properties that are encrypted
Set<Long> qnameIds = qnameDAO.convertQNamesToIds(qnames, false);
if(qnameIds.size() > 0)
{
IdsEntity param = new IdsEntity();
param.setIds(new ArrayList<Long>(qnameIds));
// TODO - use a callback approach
template.select(SELECT_PROPERTIES_BY_TYPES, param, new ResultHandler()
{
@Override
public void handleResult(ResultContext context)
{
properties.add((NodePropertyEntity)context.getResultObject());
}
});
}
return properties;
}
@Override
public List<NodePropertyEntity> selectNodePropertiesByDataType(QName dataType, long minNodeId, long maxNodeId)
{
int typeOrdinal = NodePropertyValue.convertToTypeOrdinal(dataType);
IdsEntity ids = new IdsEntity();
ids.setIdOne((long)typeOrdinal);
ids.setIdTwo(minNodeId);
ids.setIdThree(maxNodeId);
final List<NodePropertyEntity> properties = new ArrayList<NodePropertyEntity>();
template.select(SELECT_PROPERTIES_BY_ACTUAL_TYPE, ids, new ResultHandler()
{
@Override
public void handleResult(ResultContext context)
{
properties.add((NodePropertyEntity)context.getResultObject());
}
});
return properties;
}
@Override
protected int deleteNodeProperties(Long nodeId, Set<Long> qnameIds)
{
@@ -1619,37 +1681,6 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
return template.selectOne(SELECT_TXN_MAX_ID);
}
@Override
public List<NodePropertyEntity> selectProperties(Collection<PropertyDefinition> propertyDefs)
{
final List<NodePropertyEntity> properties = new ArrayList<NodePropertyEntity>();
Set<QName> qnames = new HashSet<QName>();
for(PropertyDefinition propDef : propertyDefs)
{
qnames.add(propDef.getName());
}
// qnames of properties that are encrypted
Set<Long> qnameIds = qnameDAO.convertQNamesToIds(qnames, false);
if(qnameIds.size() > 0)
{
IdsEntity param = new IdsEntity();
param.setIds(new ArrayList<Long>(qnameIds));
// TODO - use a callback approach
template.select(SELECT_PROPERTIES_BY_TYPES, param, new ResultHandler()
{
@Override
public void handleResult(ResultContext context)
{
properties.add((NodePropertyEntity)context.getResultObject());
}
});
}
return properties;
}
public int countChildAssocsByParent(Long parentNodeId, boolean isPrimary)
{
NodeEntity parentNode = new NodeEntity();

View File

@@ -21,6 +21,8 @@ package org.alfresco.repo.domain.patch;
import java.util.List;
import java.util.Set;
import org.alfresco.repo.domain.node.NodeDAO;
import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.namespace.QName;
import org.alfresco.util.Pair;
@@ -36,6 +38,10 @@ public interface PatchDAO
{
// DM-related
/**
* @deprecated in 4.1: use {@link NodeDAO#getMaxNodeId()}
*/
@Deprecated
public long getMaxAdmNodeID();
/**

View File

@@ -0,0 +1,430 @@
/*
* Copyright (C) 2005-2010 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.node.db;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.repo.batch.BatchProcessWorkProvider;
import org.alfresco.repo.batch.BatchProcessor;
import org.alfresco.repo.batch.BatchProcessor.BatchProcessWorker;
import org.alfresco.repo.batch.BatchProcessor.BatchProcessWorkerAdaptor;
import org.alfresco.repo.domain.node.NodeDAO;
import org.alfresco.repo.domain.node.NodePropertyEntity;
import org.alfresco.repo.domain.node.NodePropertyValue;
import org.alfresco.repo.domain.qname.QNameDAO;
import org.alfresco.repo.domain.schema.SchemaBootstrap;
import org.alfresco.repo.lock.JobLockService;
import org.alfresco.repo.lock.JobLockService.JobLockRefreshCallback;
import org.alfresco.repo.lock.LockAcquisitionException;
import org.alfresco.repo.policy.BehaviourFilter;
import org.alfresco.repo.transaction.RetryingTransactionHelper;
import org.alfresco.service.cmr.dictionary.DataTypeDefinition;
import org.alfresco.service.namespace.NamespaceService;
import org.alfresco.service.namespace.QName;
import org.alfresco.service.transaction.TransactionService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.quartz.Job;
import org.quartz.JobDataMap;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.springframework.beans.BeansException;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
/**
* <h1>Max String Length Worker</h1>
*
* <h2>What it is</h2>
* A worker for a scheduled job that checks and adjusts string storage for persisted strings in the system.
* <p>
* <h2>Settings that control the behaviour</h2>
* <ul>
* <li><b>${system.maximumStringLength}</b> - the maximum length of a string that can be persisted in the *alf_node_properties.string_value* column.</li>
* <li><b>${system.maximumStringLength.jobQueryRange}</b> - the node ID range to query for.
* The process will repeat from the first to the last node, querying for up to this many nodes.
* Only reduce the value if the NodeDAO query takes a long time.</li>
* <li><b>${system.maximumStringLength.jobThreadCount}</b> - the number of threads that will handle persistence checks and changes.
* Increase or decrease this to allow for free CPU capacity on the machine executing the job.</li>
* </ul>
* <h2>How to use it</h2>
* sdfsf
*
* @author Derek Hulley
* @since 4.1.9.2
*/
public class NodeStringLengthWorker implements ApplicationContextAware
{
private static final QName LOCK = QName.createQName(NamespaceService.SYSTEM_MODEL_1_0_URI, "NodeStringLengthWorker");
private static final long LOCK_TTL = 60000L;
private static Log logger = LogFactory.getLog(NodeStringLengthWorker.class);
private final NodeDAO nodeDAO;
private final JobLockService jobLockService;
private final TransactionService transactionService;
private final QNameDAO qnameDAO;
private final BehaviourFilter behaviourFilter;
private ApplicationContext ctx;
private final int queryRange;
private final int threadCount;
private final int batchSize;
public NodeStringLengthWorker(
NodeDAO nodeDAO, JobLockService jobLockService, TransactionService transactionService, QNameDAO qnameDAO,
BehaviourFilter behaviourFilter,
int queryRange, int threadCount)
{
this.nodeDAO = nodeDAO;
this.jobLockService = jobLockService;
this.transactionService = transactionService;
this.qnameDAO = qnameDAO;
this.behaviourFilter = behaviourFilter;
this.queryRange = queryRange;
this.threadCount = threadCount;
this.batchSize = 100;
}
/**
* Set the application context for event publishing during batch processing
*/
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException
{
this.ctx = applicationContext;
}
/**
* Performs the work, including logging details of progress.
*/
public NodeStringLengthWorkResult execute()
{
// Build refresh callback
final NodeStringLengthWorkResult progress = new NodeStringLengthWorkResult();
JobLockRefreshCallback lockCallback = new JobLockRefreshCallback()
{
@Override
public void lockReleased()
{
progress.inProgress.set(false);
}
@Override
public boolean isActive()
{
return progress.inProgress.get();
}
};
String lockToken = null;
try
{
progress.inProgress.set(true);
// Get the lock
lockToken = jobLockService.getLock(LOCK, LOCK_TTL);
// Start the refresh timer
jobLockService.refreshLock(lockToken, LOCK, LOCK_TTL, lockCallback);
// Now we know that we'll do something
if (logger.isInfoEnabled())
{
logger.info("NodeStringLengthWorker: Starting");
}
// Do the work
doWork(progress);
// Done
if (logger.isInfoEnabled())
{
logger.info("NodeStringLengthWorker: " + progress);
}
}
catch (LockAcquisitionException e)
{
if (logger.isDebugEnabled())
{
logger.debug("Skipping node string length job: " + e.getMessage());
}
}
catch (Exception e)
{
progress.inProgress.set(false);
logger.error("Node string length job " + progress);
logger.error("Stopping node string length job with exception.", e);
}
finally
{
if (lockToken != null)
{
jobLockService.releaseLock(lockToken, LOCK);
}
progress.inProgress.set(false); // The background
}
// Done
return progress;
}
/**
* @param progress the thread-safe progress
*/
private synchronized void doWork(NodeStringLengthWorkResult progress) throws Exception
{
// Build batch processor
BatchProcessWorkProvider<NodePropertyEntity> workProvider = new NodeStringLengthWorkProvider(progress);
BatchProcessWorker<NodePropertyEntity> worker = new NodeStringLengthBatch(progress);
RetryingTransactionHelper retryingTransactionHelper = transactionService.getRetryingTransactionHelper();
retryingTransactionHelper.setForceWritable(true);
BatchProcessor<NodePropertyEntity> batchProcessor = new BatchProcessor<NodePropertyEntity>(
"NodeStringLengthWorker",
retryingTransactionHelper,
workProvider,
threadCount,
batchSize,
ctx,
logger,
1000);
batchProcessor.process(worker, true);
}
/**
* Work provider for batch job providing string properties to process
* @author Derek Hulley
* @since 4.1.9.2
*/
private class NodeStringLengthWorkProvider implements BatchProcessWorkProvider<NodePropertyEntity>
{
private final long maxNodeId;
private final NodeStringLengthWorkResult progress;
private NodeStringLengthWorkProvider(NodeStringLengthWorkResult progress)
{
this.progress = progress;
this.maxNodeId = nodeDAO.getMaxNodeId();
}
@Override
public int getTotalEstimatedWorkSize()
{
return -1;
}
@Override
public Collection<NodePropertyEntity> getNextWork()
{
// Check that there are not too many errors
if (progress.errors.get() > 1000)
{
logger.warn("Node string length work terminating; too many errors.");
return Collections.emptyList();
}
// Keep shifting the query window up until we get results or we hit the original max node ID
List<NodePropertyEntity> ret = Collections.emptyList();
while (ret.isEmpty() && progress.currentMinNodeId.get() < maxNodeId)
{
// Calculate the node ID range
Long minNodeId = null;
if (progress.currentMinNodeId.get() == 0L)
{
minNodeId = nodeDAO.getMinNodeId();
progress.currentMinNodeId.set(minNodeId);
}
else
{
minNodeId = progress.currentMinNodeId.addAndGet(queryRange);
}
long maxNodeId = minNodeId + queryRange;
// Query for the properties
ret = nodeDAO.selectNodePropertiesByDataType(DataTypeDefinition.TEXT, minNodeId, maxNodeId);
}
// Done
if (logger.isDebugEnabled())
{
logger.debug("Node string length work provider found " + ret.size() + " new property entities.");
}
return ret;
}
}
/**
* Class that does the actual node manipulation to change the string storage
* @author Derek Hulley
* @since 4.1.9.2
*/
private class NodeStringLengthBatch extends BatchProcessWorkerAdaptor<NodePropertyEntity>
{
private final int typeOrdinalText = NodePropertyValue.convertToTypeOrdinal(DataTypeDefinition.TEXT);
private final int typeOrdinalAny = NodePropertyValue.convertToTypeOrdinal(DataTypeDefinition.ANY);
private final NodeStringLengthWorkResult progress;
private NodeStringLengthBatch(NodeStringLengthWorkResult progress)
{
this.progress = progress;
}
@Override
public void process(NodePropertyEntity entry) throws Throwable
{
progress.propertiesProcessed.incrementAndGet();
try
{
Long nodeId = entry.getNodeId();
NodePropertyValue prop = entry.getValue();
// Get the current string value
String text = (String) prop.getValue(DataTypeDefinition.TEXT);
// Decide if the string needs changing or not
boolean repersist = false;
int persistedTypeOrdinal = prop.getPersistedType().intValue();
if (text.length() > SchemaBootstrap.getMaxStringLength())
{
// The text needs to be stored as a serializable_value (ANY)
if (typeOrdinalAny != persistedTypeOrdinal)
{
repersist = true;
}
}
else
{
// The text is shorter than the current max, so it should be stored as a string_value (TEXT)
if (typeOrdinalText != persistedTypeOrdinal)
{
repersist = true;
}
}
// Only do any work if we need to
if (repersist)
{
// We do not want any behaviours associated with our transactions
behaviourFilter.disableBehaviour();
progress.propertiesChanged.incrementAndGet();
if (logger.isTraceEnabled())
{
logger.trace("Fixing property " + getIdentifier(entry) + ". Value: " + text);
}
else if (logger.isDebugEnabled())
{
logger.debug("Fixing property " + getIdentifier(entry));
}
Long propQNameId = entry.getKey().getQnameId();
QName propQName = qnameDAO.getQName(propQNameId).getSecond();
nodeDAO.removeNodeProperties(nodeId, Collections.singleton(propQName));
nodeDAO.addNodeProperty(nodeId, propQName, text);
}
}
catch (Exception e)
{
// Record the failure
progress.errors.incrementAndGet();
// Rethrow so that the processing framework can handle things
throw e;
}
}
@Override
public String getIdentifier(NodePropertyEntity entry)
{
Long nodeId = entry.getNodeId();
NodePropertyValue prop = entry.getValue();
return ("Property with persisted type " + prop.getPersistedType() + " on node " + nodeDAO.getNodePair(nodeId));
}
}
/**
* Thread-safe helper class to carry the job progress information
* @author Derek Hulley
* @since 4.1.9.2
*/
public static class NodeStringLengthWorkResult
{
private final AtomicBoolean inProgress = new AtomicBoolean(false);
private final AtomicInteger propertiesProcessed = new AtomicInteger(0);
private final AtomicInteger propertiesChanged = new AtomicInteger(0);
private final AtomicInteger errors = new AtomicInteger(0);
private final AtomicLong currentMinNodeId = new AtomicLong(0L);
@Override
public String toString()
{
String part1 = "Changed";
String part2 = String.format(" %4d out of a potential %4d properties. ", propertiesChanged.get(), propertiesProcessed.get());
String part3 = String.format("[%2d Errors]", errors.get());
return part1 + part2 + part3;
}
public int getPropertiesProcessed()
{
return propertiesProcessed.get();
}
public int getPropertiesChanged()
{
return propertiesChanged.get();
}
public int getErrors()
{
return errors.get();
}
}
/**
* A scheduled job that checks and adjusts string storage for persisted strings in the system.
* <p>
* Job data:
* <ul>
* <li><b>nodeStringLengthWorker</b> - The worker that performs the actual processing.</li>
* </ul>
*
* @author Derek Hulley
* @since 4.1.9.2
* @see NodeStringLengthWorker
*/
public static class NodeStringLengthJob implements Job
{
public static final String JOB_DATA_NODE_WORKER = "nodeStringLengthWorker";
public void execute(JobExecutionContext context) throws JobExecutionException
{
JobDataMap jobData = context.getJobDetail().getJobDataMap();
// extract the content Cleanup to use
Object nodeStringLengthWorkerObj = jobData.get(JOB_DATA_NODE_WORKER);
if (nodeStringLengthWorkerObj == null || !(nodeStringLengthWorkerObj instanceof NodeStringLengthWorker))
{
throw new AlfrescoRuntimeException(
"MaxStringLengthJob data '" + JOB_DATA_NODE_WORKER + "' must reference a " + NodeStringLengthWorker.class.getSimpleName());
}
NodeStringLengthWorker worker = (NodeStringLengthWorker) nodeStringLengthWorkerObj;
worker.execute();
}
}
}

View File

@@ -19,9 +19,12 @@
package org.alfresco.repo.domain.node;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import junit.framework.TestCase;
@@ -33,8 +36,10 @@ import org.alfresco.repo.domain.node.NodeDAO.NodeRefQueryCallback;
import org.alfresco.repo.transaction.RetryingTransactionHelper;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.ServiceRegistry;
import org.alfresco.service.cmr.dictionary.DataTypeDefinition;
import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.cmr.repository.StoreRef;
import org.alfresco.service.namespace.QName;
import org.alfresco.service.transaction.TransactionService;
import org.alfresco.test_category.OwnJVMTestsCategory;
import org.alfresco.util.ApplicationContextHelper;
@@ -106,6 +111,80 @@ public class NodeDAOTest extends TestCase
assertNotNull("Txn ID should be present by forcing it", txnId2);
}
public void testSelectNodePropertiesByTypes() throws Exception
{
final Set<QName> qnames = Collections.singleton(ContentModel.PROP_NAME);
RetryingTransactionCallback<List<NodePropertyEntity>> callback = new RetryingTransactionCallback<List<NodePropertyEntity>>()
{
public List<NodePropertyEntity> execute() throws Throwable
{
return nodeDAO.selectNodePropertiesByTypes(qnames);
}
};
List<NodePropertyEntity> props = txnHelper.doInTransaction(callback, true);
if (props.size() == 0)
{
return;
}
NodePropertyEntity prop = props.get(0);
String value = prop.getValue().getStringValue();
assertNotNull(value);
}
public void testSelectNodePropertiesByDataType() throws Exception
{
// Prepare the bits that repeat the actual query
final AtomicLong min = new AtomicLong(0L);
final AtomicLong max = new AtomicLong(0L);
RetryingTransactionCallback<List<NodePropertyEntity>> callback = new RetryingTransactionCallback<List<NodePropertyEntity>>()
{
public List<NodePropertyEntity> execute() throws Throwable
{
long minNodeId = min.get();
long maxNodeId = max.get();
return nodeDAO.selectNodePropertiesByDataType(DataTypeDefinition.TEXT, minNodeId, maxNodeId);
}
};
// Get the current max node id
Long minNodeId = nodeDAO.getMinNodeId();
if (minNodeId == null)
{
return; // there are no nodes!
}
Long maxNodeId = nodeDAO.getMaxNodeId(); // won't be null at this point as we have a min
min.set(minNodeId.longValue());
// Iterate across all nodes in the system
while (min.longValue() <= maxNodeId.longValue())
{
max.set(min.get() + 1000L); // 1K increments
// Get the properties
List<NodePropertyEntity> props = txnHelper.doInTransaction(callback, true);
for (NodePropertyEntity prop : props)
{
// Check the property
Long nodeId = prop.getNodeId();
assertNotNull(nodeId);
assertTrue("the min should be inclusive.", min.longValue() <= nodeId.longValue());
assertTrue("the max should be exclusive.", max.longValue() > nodeId.longValue());
NodePropertyValue propVal = prop.getValue();
assertNotNull(propVal);
assertEquals("STRING", propVal.getActualTypeString());
String valueStr = propVal.getStringValue();
Serializable valueSer = propVal.getSerializableValue();
assertTrue("Test is either TEXT or SERIALIZABLE", valueStr != null || valueSer != null);
String value = (String) propVal.getValue(DataTypeDefinition.TEXT);
assertNotNull(value);
// This all checks out
}
// Shift the window up
min.set(max.get());
}
}
public void testGetNodesWithAspects() throws Throwable
{
final NodeRefQueryCallback callback = new NodeRefQueryCallback()
@@ -130,6 +209,16 @@ public class NodeDAOTest extends TestCase
}, true);
}
public void testGetMinMaxNodeId() throws Exception
{
Long minNodeId = nodeDAO.getMinNodeId();
assertNotNull(minNodeId);
assertTrue(minNodeId.longValue() > 0L);
Long maxNodeId = nodeDAO.getMaxNodeId();
assertNotNull(maxNodeId);
assertTrue(maxNodeId.longValue() > minNodeId.longValue());
}
public void testGetPrimaryChildAcls() throws Throwable
{
List<NodeIdAndAclId> acls = nodeDAO.getPrimaryChildrenAcls(1L);
@@ -150,6 +239,25 @@ public class NodeDAOTest extends TestCase
}
}
public void testCacheNodes() throws Throwable
{
Long minNodeId = nodeDAO.getMinNodeId();
final List<Long> nodeIds = new ArrayList<Long>(10000);
for (long i = 0; i < 1000; i++)
{
nodeIds.add(Long.valueOf(minNodeId.longValue() + i));
}
RetryingTransactionCallback<Void> callback = new RetryingTransactionCallback<Void>()
{
public Void execute() throws Throwable
{
nodeDAO.cacheNodesById(nodeIds);
return null;
}
};
txnHelper.doInTransaction(callback, true);
}
/**
* Ensure that the {@link NodeEntity} values cached as root nodes are valid instances.
* <p/>

View File

@@ -32,8 +32,10 @@ import org.alfresco.model.ContentModel;
import org.alfresco.repo.domain.node.NodeDAO;
import org.alfresco.repo.domain.node.NodeDAO.ChildAssocRefQueryCallback;
import org.alfresco.repo.domain.node.Transaction;
import org.alfresco.repo.domain.schema.SchemaBootstrap;
import org.alfresco.repo.node.BaseNodeServiceTest;
import org.alfresco.repo.node.cleanup.NodeCleanupRegistry;
import org.alfresco.repo.node.db.NodeStringLengthWorker.NodeStringLengthWorkResult;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.repo.transaction.TransactionListenerAdapter;
@@ -48,6 +50,9 @@ import org.alfresco.service.transaction.TransactionService;
import org.alfresco.test_category.OwnJVMTestsCategory;
import org.alfresco.util.Pair;
import org.junit.experimental.categories.Category;
import org.hibernate.dialect.Dialect;
import org.hibernate.dialect.MySQLInnoDBDialect;
import org.springframework.context.event.ContextRefreshedEvent;
import org.springframework.extensions.surf.util.I18NUtil;
/**
@@ -696,4 +701,88 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest
// expect to go here
}
}
/**
* Check that the maximum string lengths can be adjusted up and down.
* Note that this test ONLY works for MySQL because the other databases cannot support more than 1024 characters
* in the string_value column and the value may not be set to less than 1024.
*
* @see SchemaBootstrap#DEFAULT_MAX_STRING_LENGTH
*/
@SuppressWarnings("deprecation")
public void testNodeStringLengthWorker() throws Exception
{
setComplete();
endTransaction();
// Skip of the dialect is not MySQL
Dialect dialect = (Dialect) applicationContext.getBean("dialect");
if (!(dialect instanceof MySQLInnoDBDialect))
{
return;
}
SchemaBootstrap schemaBootstrap = (SchemaBootstrap) applicationContext.getBean("schemaBootstrap");
assertEquals("Expected max string length to be MAX", Integer.MAX_VALUE, SchemaBootstrap.getMaxStringLength());
NodeStringLengthWorker worker = (NodeStringLengthWorker) applicationContext.getBean("nodeStringLengthWorker");
// If we run this worker just to get everything into the correct starting state.
// If it does not work, then that will be detected later anyway
NodeStringLengthWorkResult result = worker.execute();
assertTrue(result.getPropertiesProcessed() > 0);
assertEquals(0, result.getErrors());
// Now set the max string length to DEFAULT_MAX_STRING_LENGTH characters
schemaBootstrap.setMaximumStringLength(SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH);
schemaBootstrap.onApplicationEvent(new ContextRefreshedEvent(applicationContext));
// Move any values persisted before the test
result = worker.execute();
int firstPassChanged = result.getPropertiesChanged();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH + 1; i++)
{
sb.append("A");
}
final String longString = sb.toString();
// Persist the property using the default MAX_VALUE so that it does into the string_value
schemaBootstrap.setMaximumStringLength(Integer.MAX_VALUE);
schemaBootstrap.onApplicationEvent(new ContextRefreshedEvent(applicationContext));
txnService.getRetryingTransactionHelper().doInTransaction(new RetryingTransactionCallback<Void>()
{
@Override
public Void execute() throws Throwable
{
nodeService.setProperty(rootNodeRef, PROP_QNAME_STRING_VALUE, longString);
return null;
}
});
// The worker should do nothing
result = worker.execute();
assertEquals(firstPassChanged, result.getPropertiesChanged());
// Now bring the limit down to the match for other DBs
schemaBootstrap.setMaximumStringLength(SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH);
schemaBootstrap.onApplicationEvent(new ContextRefreshedEvent(applicationContext));
result = worker.execute();
assertEquals(firstPassChanged + 1, result.getPropertiesChanged());
// Put the limit back to the MySQL default and all the large values should go back into MySQL's TEXT field
schemaBootstrap.setMaximumStringLength(Integer.MAX_VALUE);
schemaBootstrap.onApplicationEvent(new ContextRefreshedEvent(applicationContext));
result = worker.execute();
assertEquals(firstPassChanged + 1, result.getPropertiesChanged());
// Check that our string is still OK
String checkLongString = txnService.getRetryingTransactionHelper().doInTransaction(new RetryingTransactionCallback<String>()
{
@Override
public String execute() throws Throwable
{
return (String) nodeService.getProperty(rootNodeRef, PROP_QNAME_STRING_VALUE);
}
});
assertEquals("String manipulation corrupted the long string value. ", longString, checkLongString);
}
}