Merged 5.0.N (5.0.3) to 5.1.N (5.1.1) (PARTIAL MERGE)

114790 amorarasu: MNT-15007: CLONE - String values when migrating from MySQL to other DBs
      Merged V4.2-BUG-FIX (4.2.6) to 5.0.N (5.0.3)
         114311 amorarasu: Merged V4.1-BUG-FIX (4.1.11) to V4.2-BUG-FIX (4.2.6)
            114245 tvalkevych: Merged V4.1.9 (4.1.9.13) to V4.1-BUG-FIX (4.1.11)
               113717 dhulley: MNT-14911: String values when migrating from MySQL to other DBs
                - Add a new job that allows node string values to be re-persisted according to the current 'system.maximumStringLength' value
                - Job is unscheduled by default
                - Set the 'system.maximumStringLength' and the 'system.maximumStringLength.jobCronExpression'
                - Various touched code format fixes, method naming fixes, etc


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/BRANCHES/DEV/5.1.N/root@114988 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Raluca Munteanu
2015-10-23 07:46:06 +00:00
parent fb20d2a4e2
commit d0f097601a
15 changed files with 1185 additions and 444 deletions

View File

@@ -750,6 +750,17 @@
</where> </where>
</select> </select>
<select id="select_PropertiesByActualType" parameterType="Ids" resultMap="result_NodeProperty">
<include refid="alfresco.node.select_NodeProperty_Results"/>
from
alf_node node
join alf_node_properties prop on (prop.node_id = node.id)
where
actual_type_n = #{idOne} and
<![CDATA[node.id >= #{idTwo}]]> and
<![CDATA[node.id < #{idThree}]]>
</select>
<sql id="select_NodeAspects_Results"> <sql id="select_NodeAspects_Results">
select select
node.id as node_id, node.id as node_id,
@@ -757,7 +768,7 @@
aspects.qname_id as qname_id aspects.qname_id as qname_id
</sql> </sql>
<select id="select_NodeAspects" parameterType="Ids" resultMap="result_NodeAspects"> <select id="select_NodeAspects" parameterType="NodeAspects" resultMap="result_NodeAspects">
<include refid="alfresco.node.select_NodeAspects_Results"/> <include refid="alfresco.node.select_NodeAspects_Results"/>
from from
alf_node node alf_node node
@@ -1194,6 +1205,20 @@
<if test="qnameLocalName != null">and assoc.qname_localname = #{qnameLocalName}</if> <if test="qnameLocalName != null">and assoc.qname_localname = #{qnameLocalName}</if>
<if test="isPrimary != null">and assoc.is_primary = #{isPrimary}</if> <if test="isPrimary != null">and assoc.is_primary = #{isPrimary}</if>
</select> </select>
<select id="select_NodeMinId" resultType="java.lang.Long">
select
min(id)
from
alf_node
</select>
<select id="select_NodeMaxId" resultType="java.lang.Long">
select
max(id)
from
alf_node
</select>
<sql id="select_Transaction_Results"> <sql id="select_Transaction_Results">
select select

View File

@@ -235,6 +235,17 @@
<value>${index.tracking.purgeSize}</value> <value>${index.tracking.purgeSize}</value>
</property> </property>
</bean> </bean>
<!-- String length adjustment -->
<bean id="nodeStringLengthWorker" class="org.alfresco.repo.node.db.NodeStringLengthWorker">
<constructor-arg index="0" ref="nodeDAO" />
<constructor-arg index="1" ref="jobLockService" />
<constructor-arg index="2" ref="transactionService" />
<constructor-arg index="3" ref="qnameDAO" />
<constructor-arg index="4" ref="policyBehaviourFilter" />
<constructor-arg index="5" value="${system.maximumStringLength.jobQueryRange}" />
<constructor-arg index="6" value="${system.maximumStringLength.jobThreadCount}" />
</bean>
<bean id="storesToIgnorePolicies" class="org.springframework.beans.factory.config.SetFactoryBean"> <bean id="storesToIgnorePolicies" class="org.springframework.beans.factory.config.SetFactoryBean">
<property name="sourceSet"> <property name="sourceSet">

View File

@@ -213,10 +213,14 @@ system.readpermissions.bulkfetchsize=1000
# #
# Manually control how the system handles maximum string lengths. # Manually control how the system handles maximum string lengths.
# Any zero or negative value is ignored. # Any zero or negative value is ignored.
# Only change this after consulting support or reading the appropriate Javadocs for # Only change this after consulting support or reading the appropriate Javadocs for
# org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2 # org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2.
# Before database migration, the string value storage may need to be adjusted using the scheduled job
system.maximumStringLength=-1 system.maximumStringLength=-1
system.maximumStringLength.jobCronExpression=* * * * * ? 2099
system.maximumStringLength.jobQueryRange=10000
system.maximumStringLength.jobThreadCount=4
# #
# Limit hibernate session size by trying to amalgamate events for the L2 session invalidation # Limit hibernate session size by trying to amalgamate events for the L2 session invalidation

View File

@@ -120,6 +120,20 @@
</property> </property>
</bean> </bean>
<bean id="maxStringLengthJobDetail" class="org.springframework.scheduling.quartz.JobDetailBean">
<property name="jobClass" value="org.alfresco.repo.node.db.NodeStringLengthWorker$NodeStringLengthJob" />
<property name="jobDataAsMap">
<map>
<entry key="nodeStringLengthWorker" value-ref="nodeStringLengthWorker" />
</map>
</property>
</bean>
<bean id="maxStringLengthJobTrigger" class="org.alfresco.util.CronTriggerBean">
<property name="jobDetail" ref="maxStringLengthJobDetail" />
<property name="scheduler" ref="schedulerFactory" />
<property name="cronExpression" value="${system.maximumStringLength.jobCronExpression}" />
</bean>
<bean id="nodeServiceCleanupJobDetail" class="org.springframework.scheduling.quartz.JobDetailBean"> <bean id="nodeServiceCleanupJobDetail" class="org.springframework.scheduling.quartz.JobDetailBean">
<property name="jobClass"> <property name="jobClass">
<value>org.alfresco.repo.node.cleanup.NodeCleanupJob</value> <value>org.alfresco.repo.node.cleanup.NodeCleanupJob</value>

View File

@@ -33,47 +33,47 @@ public class BootstrapReEncryptor extends AbstractLifecycleBean
{ {
private static Log logger = LogFactory.getLog(BootstrapReEncryptor.class); private static Log logger = LogFactory.getLog(BootstrapReEncryptor.class);
private boolean enabled; private boolean enabled;
private ReEncryptor reEncryptor; private ReEncryptor reEncryptor;
public void setEnabled(boolean enabled) public void setEnabled(boolean enabled)
{ {
this.enabled = enabled; this.enabled = enabled;
} }
public void setReEncryptor(ReEncryptor reEncryptor) public void setReEncryptor(ReEncryptor reEncryptor)
{ {
this.reEncryptor = reEncryptor; this.reEncryptor = reEncryptor;
} }
public int reEncrypt() public int reEncrypt()
{ {
try try
{ {
return reEncryptor.bootstrapReEncrypt(); return reEncryptor.bootstrapReEncrypt();
} }
catch(MissingKeyException e) catch(MissingKeyException e)
{ {
throw new AlfrescoRuntimeException("Bootstrap re-encryption failed", e); throw new AlfrescoRuntimeException("Bootstrap re-encryption failed", e);
} }
} }
@Override @Override
protected void onBootstrap(ApplicationEvent event) protected void onBootstrap(ApplicationEvent event)
{ {
if(enabled) if(enabled)
{ {
if(logger.isDebugEnabled()) if(logger.isDebugEnabled())
{ {
logger.debug("Re-encrypting encryptable properties..."); logger.debug("Re-encrypting encryptable properties...");
} }
int propertiesReEncrypted = reEncrypt(); int propertiesReEncrypted = reEncrypt();
if(logger.isDebugEnabled()) if(logger.isDebugEnabled())
{ {
logger.debug("...done, re-encrypted " + propertiesReEncrypted + " properties."); logger.debug("...done, re-encrypted " + propertiesReEncrypted + " properties.");
} }
} }
} }
@Override @Override
protected void onShutdown(ApplicationEvent event) protected void onShutdown(ApplicationEvent event)

View File

@@ -41,44 +41,44 @@ public class EncryptionChecker extends AbstractLifecycleBean
private TransactionService transactionService; private TransactionService transactionService;
private KeyStoreChecker keyStoreChecker; private KeyStoreChecker keyStoreChecker;
public void setKeyStoreChecker(KeyStoreChecker keyStoreChecker) public void setKeyStoreChecker(KeyStoreChecker keyStoreChecker)
{ {
this.keyStoreChecker = keyStoreChecker; this.keyStoreChecker = keyStoreChecker;
} }
public void setTransactionService(TransactionService transactionService) public void setTransactionService(TransactionService transactionService)
{ {
this.transactionService = transactionService; this.transactionService = transactionService;
} }
@Override @Override
protected void onBootstrap(ApplicationEvent event) protected void onBootstrap(ApplicationEvent event)
{ {
RetryingTransactionHelper txnHelper = transactionService.getRetryingTransactionHelper(); RetryingTransactionHelper txnHelper = transactionService.getRetryingTransactionHelper();
txnHelper.setForceWritable(true); // Force write in case server is read-only txnHelper.setForceWritable(true); // Force write in case server is read-only
txnHelper.doInTransaction(new RetryingTransactionCallback<Void>() txnHelper.doInTransaction(new RetryingTransactionCallback<Void>()
{ {
public Void execute() throws Throwable public Void execute() throws Throwable
{ {
try try
{ {
keyStoreChecker.validateKeyStores(); keyStoreChecker.validateKeyStores();
} }
catch(Throwable e) catch(Throwable e)
{ {
// Just throw as a runtime exception // Just throw as a runtime exception
throw new AlfrescoRuntimeException("Keystores are invalid", e); throw new AlfrescoRuntimeException("Keystores are invalid", e);
} }
return null; return null;
} }
}); });
} }
@Override @Override
protected void onShutdown(ApplicationEvent event) protected void onShutdown(ApplicationEvent event)
{ {
} }
} }

View File

@@ -57,170 +57,170 @@ public class EncryptionKeysRegistryImpl implements EncryptionKeysRegistry
private String cipherAlgorithm; private String cipherAlgorithm;
private String cipherProvider; private String cipherProvider;
public void setAttributeService(AttributeService attributeService) public void setAttributeService(AttributeService attributeService)
{ {
this.attributeService = attributeService; this.attributeService = attributeService;
} }
public void setCipherAlgorithm(String cipherAlgorithm) public void setCipherAlgorithm(String cipherAlgorithm)
{ {
this.cipherAlgorithm = cipherAlgorithm; this.cipherAlgorithm = cipherAlgorithm;
} }
public void setCipherProvider(String cipherProvider) public void setCipherProvider(String cipherProvider)
{ {
this.cipherProvider = cipherProvider; this.cipherProvider = cipherProvider;
} }
public void setTransactionService(TransactionService transactionService) public void setTransactionService(TransactionService transactionService)
{ {
this.transactionService = transactionService; this.transactionService = transactionService;
} }
protected Encryptor getEncryptor(final KeyMap keys) protected Encryptor getEncryptor(final KeyMap keys)
{ {
DefaultEncryptor encryptor = new DefaultEncryptor(); DefaultEncryptor encryptor = new DefaultEncryptor();
encryptor.setCipherAlgorithm(cipherAlgorithm); encryptor.setCipherAlgorithm(cipherAlgorithm);
encryptor.setCipherProvider(cipherProvider); encryptor.setCipherProvider(cipherProvider);
encryptor.setKeyProvider(new KeyProvider() encryptor.setKeyProvider(new KeyProvider()
{ {
@Override @Override
public Key getKey(String keyAlias) public Key getKey(String keyAlias)
{ {
return keys.getCachedKey(keyAlias).getKey(); return keys.getCachedKey(keyAlias).getKey();
} }
}); });
return encryptor; return encryptor;
} }
public void init() public void init()
{ {
} }
public void registerKey(String keyAlias, Key key) public void registerKey(String keyAlias, Key key)
{ {
if(isKeyRegistered(keyAlias)) if(isKeyRegistered(keyAlias))
{ {
throw new IllegalArgumentException("Key " + keyAlias + " is already registered"); throw new IllegalArgumentException("Key " + keyAlias + " is already registered");
} }
// register the key by creating an attribute that stores a guid and its encrypted value // register the key by creating an attribute that stores a guid and its encrypted value
String guid = GUID.generate(); String guid = GUID.generate();
KeyMap keys = new KeyMap(); KeyMap keys = new KeyMap();
keys.setKey(keyAlias, key); keys.setKey(keyAlias, key);
Encryptor encryptor = getEncryptor(keys); Encryptor encryptor = getEncryptor(keys);
Serializable encrypted = encryptor.sealObject(keyAlias, null, guid); Serializable encrypted = encryptor.sealObject(keyAlias, null, guid);
Pair<String, Serializable> keyCheck = new Pair<String, Serializable>(guid, encrypted); Pair<String, Serializable> keyCheck = new Pair<String, Serializable>(guid, encrypted);
attributeService.createAttribute(keyCheck, TOP_LEVEL_KEY, keyAlias); attributeService.createAttribute(keyCheck, TOP_LEVEL_KEY, keyAlias);
logger.info("Registered key " + keyAlias); logger.info("Registered key " + keyAlias);
} }
public void unregisterKey(String keyAlias) public void unregisterKey(String keyAlias)
{ {
attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias); attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias);
} }
public boolean isKeyRegistered(String keyAlias) public boolean isKeyRegistered(String keyAlias)
{ {
try try
{ {
return (attributeService.getAttribute(TOP_LEVEL_KEY, keyAlias) != null); return (attributeService.getAttribute(TOP_LEVEL_KEY, keyAlias) != null);
} }
catch(Throwable e) catch(Throwable e)
{ {
// there is an issue getting the attribute. Remove it. // there is an issue getting the attribute. Remove it.
attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias); attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias);
return (attributeService.getAttribute(TOP_LEVEL_KEY, keyAlias) != null); return (attributeService.getAttribute(TOP_LEVEL_KEY, keyAlias) != null);
} }
} }
public List<String> getRegisteredKeys(final Set<String> keyStoreKeys) public List<String> getRegisteredKeys(final Set<String> keyStoreKeys)
{ {
final List<String> registeredKeys = new ArrayList<String>(); final List<String> registeredKeys = new ArrayList<String>();
attributeService.getAttributes(new AttributeQueryCallback() attributeService.getAttributes(new AttributeQueryCallback()
{ {
public boolean handleAttribute(Long id, Serializable value, public boolean handleAttribute(Long id, Serializable value,
Serializable[] keys) Serializable[] keys)
{ {
// Add as a registered key if the keystore contains the key // Add as a registered key if the keystore contains the key
String keyAlias = (String)keys[1]; String keyAlias = (String)keys[1];
if(keyStoreKeys.contains(keyAlias)) if(keyStoreKeys.contains(keyAlias))
{ {
registeredKeys.add(keyAlias); registeredKeys.add(keyAlias);
} }
return true; return true;
} }
}, },
TOP_LEVEL_KEY); TOP_LEVEL_KEY);
return registeredKeys; return registeredKeys;
} }
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public KEY_STATUS checkKey(String keyAlias, Key key) public KEY_STATUS checkKey(String keyAlias, Key key)
{ {
Pair<String, Serializable> keyCheck = null; Pair<String, Serializable> keyCheck = null;
if(attributeService.exists(TOP_LEVEL_KEY, keyAlias)) if(attributeService.exists(TOP_LEVEL_KEY, keyAlias))
{ {
try try
{ {
// check that the key has not changed by decrypting the encrypted guid attribute // check that the key has not changed by decrypting the encrypted guid attribute
// comparing against the guid // comparing against the guid
try try
{ {
keyCheck = (Pair<String, Serializable>)attributeService.getAttribute(TOP_LEVEL_KEY, keyAlias); keyCheck = (Pair<String, Serializable>)attributeService.getAttribute(TOP_LEVEL_KEY, keyAlias);
} }
catch(Throwable e) catch(Throwable e)
{ {
// there is an issue getting the attribute. Remove it. // there is an issue getting the attribute. Remove it.
attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias); attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias);
return KEY_STATUS.MISSING; return KEY_STATUS.MISSING;
} }
if(keyCheck == null) if(keyCheck == null)
{ {
return KEY_STATUS.MISSING; return KEY_STATUS.MISSING;
} }
KeyMap keys = new KeyMap(); KeyMap keys = new KeyMap();
keys.setKey(keyAlias, key); keys.setKey(keyAlias, key);
Encryptor encryptor = getEncryptor(keys); Encryptor encryptor = getEncryptor(keys);
Serializable storedGUID = encryptor.unsealObject(keyAlias, keyCheck.getSecond()); Serializable storedGUID = encryptor.unsealObject(keyAlias, keyCheck.getSecond());
return EqualsHelper.nullSafeEquals(storedGUID, keyCheck.getFirst()) ? KEY_STATUS.OK : KEY_STATUS.CHANGED; return EqualsHelper.nullSafeEquals(storedGUID, keyCheck.getFirst()) ? KEY_STATUS.OK : KEY_STATUS.CHANGED;
} }
catch(InvalidKeyException e) catch(InvalidKeyException e)
{ {
// key exception indicates that the key has changed - it can't decrypt the // key exception indicates that the key has changed - it can't decrypt the
// previously-encrypted data // previously-encrypted data
return KEY_STATUS.CHANGED; return KEY_STATUS.CHANGED;
} }
} }
else else
{ {
return KEY_STATUS.MISSING; return KEY_STATUS.MISSING;
} }
} }
// note that this removes _all_ keys in the keystore. Use with care. // note that this removes _all_ keys in the keystore. Use with care.
public void removeRegisteredKeys(final Set<String> keys) public void removeRegisteredKeys(final Set<String> keys)
{ {
RetryingTransactionHelper retryingTransactionHelper = transactionService.getRetryingTransactionHelper(); RetryingTransactionHelper retryingTransactionHelper = transactionService.getRetryingTransactionHelper();
final RetryingTransactionCallback<Void> removeKeysCallback = new RetryingTransactionCallback<Void>() final RetryingTransactionCallback<Void> removeKeysCallback = new RetryingTransactionCallback<Void>()
{ {
public Void execute() throws Throwable public Void execute() throws Throwable
{ {
for(String keyAlias : keys) for(String keyAlias : keys)
{ {
attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias); attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias);
} }
return null; return null;
} }
}; };
retryingTransactionHelper.doInTransaction(removeKeysCallback, false); retryingTransactionHelper.doInTransaction(removeKeysCallback, false);
} }
} }

View File

@@ -18,9 +18,6 @@
*/ */
package org.alfresco.encryption; package org.alfresco.encryption;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/** /**
* Checks the repository key stores. * Checks the repository key stores.
* *
@@ -29,8 +26,6 @@ import org.apache.commons.logging.LogFactory;
*/ */
public class KeyStoreChecker public class KeyStoreChecker
{ {
private static final Log logger = LogFactory.getLog(KeyStoreChecker.class);
private AlfrescoKeyStore mainKeyStore; private AlfrescoKeyStore mainKeyStore;
public KeyStoreChecker() public KeyStoreChecker()
@@ -38,16 +33,16 @@ public class KeyStoreChecker
} }
public void setMainKeyStore(AlfrescoKeyStore mainKeyStore) public void setMainKeyStore(AlfrescoKeyStore mainKeyStore)
{ {
this.mainKeyStore = mainKeyStore; this.mainKeyStore = mainKeyStore;
} }
public void validateKeyStores() throws InvalidKeystoreException, MissingKeyException public void validateKeyStores() throws InvalidKeystoreException, MissingKeyException
{ {
mainKeyStore.validateKeys(); mainKeyStore.validateKeys();
if(!mainKeyStore.exists()) if(!mainKeyStore.exists())
{ {
mainKeyStore.create(); mainKeyStore.create();
} }
} }
} }

View File

@@ -22,8 +22,10 @@ import java.io.Serializable;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Set;
import javax.crypto.SealedObject; import javax.crypto.SealedObject;
@@ -70,20 +72,20 @@ public class ReEncryptor implements ApplicationContextAware
{ {
private static Log logger = LogFactory.getLog(ReEncryptor.class); private static Log logger = LogFactory.getLog(ReEncryptor.class);
private NodeDAO nodeDAO; private NodeDAO nodeDAO;
private DictionaryDAO dictionaryDAO; private DictionaryDAO dictionaryDAO;
private QNameDAO qnameDAO; private QNameDAO qnameDAO;
private MetadataEncryptor metadataEncryptor; private MetadataEncryptor metadataEncryptor;
private ApplicationContext applicationContext; private ApplicationContext applicationContext;
private TransactionService transactionService; private TransactionService transactionService;
private RetryingTransactionHelper transactionHelper; private RetryingTransactionHelper transactionHelper;
private int numThreads; private int numThreads;
private int chunkSize; private int chunkSize;
private boolean splitTxns = true; private boolean splitTxns = true;
private static final QName LOCK = QName.createQName(NamespaceService.SYSTEM_MODEL_1_0_URI, "OrphanReaper"); private static final QName LOCK = QName.createQName(NamespaceService.SYSTEM_MODEL_1_0_URI, "OrphanReaper");
private JobLockService jobLockService; private JobLockService jobLockService;
@@ -92,54 +94,54 @@ public class ReEncryptor implements ApplicationContextAware
*/ */
public void setTransactionService(TransactionService transactionService) public void setTransactionService(TransactionService transactionService)
{ {
this.transactionService = transactionService; this.transactionService = transactionService;
this.transactionHelper = transactionService.getRetryingTransactionHelper(); this.transactionHelper = transactionService.getRetryingTransactionHelper();
} }
public void setMetadataEncryptor(MetadataEncryptor metadataEncryptor) public void setMetadataEncryptor(MetadataEncryptor metadataEncryptor)
{ {
this.metadataEncryptor = metadataEncryptor; this.metadataEncryptor = metadataEncryptor;
} }
public MetadataEncryptor getMetadataEncryptor() public MetadataEncryptor getMetadataEncryptor()
{ {
return metadataEncryptor; return metadataEncryptor;
} }
public void setJobLockService(JobLockService jobLockService) public void setJobLockService(JobLockService jobLockService)
{ {
this.jobLockService = jobLockService; this.jobLockService = jobLockService;
} }
public void setNumThreads(int numThreads) public void setNumThreads(int numThreads)
{ {
this.numThreads = numThreads; this.numThreads = numThreads;
} }
public void setChunkSize(int chunkSize) public void setChunkSize(int chunkSize)
{ {
this.chunkSize = chunkSize; this.chunkSize = chunkSize;
} }
public void setSplitTxns(boolean splitTxns) public void setSplitTxns(boolean splitTxns)
{ {
this.splitTxns = splitTxns; this.splitTxns = splitTxns;
} }
public void setNodeDAO(NodeDAO nodeDAO) public void setNodeDAO(NodeDAO nodeDAO)
{ {
this.nodeDAO = nodeDAO; this.nodeDAO = nodeDAO;
} }
public void setDictionaryDAO(DictionaryDAO dictionaryDAO) public void setDictionaryDAO(DictionaryDAO dictionaryDAO)
{ {
this.dictionaryDAO = dictionaryDAO; this.dictionaryDAO = dictionaryDAO;
} }
public void setQnameDAO(QNameDAO qnameDAO) public void setQnameDAO(QNameDAO qnameDAO)
{ {
this.qnameDAO = qnameDAO; this.qnameDAO = qnameDAO;
} }
/** /**
* Attempts to get the lock. If the lock couldn't be taken, then <tt>null</tt> is returned. * Attempts to get the lock. If the lock couldn't be taken, then <tt>null</tt> is returned.
@@ -170,12 +172,12 @@ public class ReEncryptor implements ApplicationContextAware
jobLockService.refreshLock(lockToken, LOCK, time); jobLockService.refreshLock(lockToken, LOCK, time);
} }
protected void reEncryptProperties(final List<NodePropertyEntity> properties, final String lockToken) protected void reEncryptProperties(final List<NodePropertyEntity> properties, final String lockToken)
{ {
final Iterator<NodePropertyEntity> it = properties.iterator(); final Iterator<NodePropertyEntity> it = properties.iterator();
// TODO use BatchProcessWorkerAdaptor? // TODO use BatchProcessWorkerAdaptor?
BatchProcessor.BatchProcessWorker<NodePropertyEntity> worker = new BatchProcessor.BatchProcessWorker<NodePropertyEntity>() BatchProcessor.BatchProcessWorker<NodePropertyEntity> worker = new BatchProcessor.BatchProcessWorker<NodePropertyEntity>()
{ {
public String getIdentifier(NodePropertyEntity entity) public String getIdentifier(NodePropertyEntity entity)
@@ -194,62 +196,62 @@ public class ReEncryptor implements ApplicationContextAware
public void process(final NodePropertyEntity entity) throws Throwable public void process(final NodePropertyEntity entity) throws Throwable
{ {
NodePropertyValue nodePropValue = entity.getValue(); NodePropertyValue nodePropValue = entity.getValue();
// TODO check that we have the correct type i.e. can be cast to Serializable // TODO check that we have the correct type i.e. can be cast to Serializable
Serializable value = nodePropValue.getSerializableValue(); Serializable value = nodePropValue.getSerializableValue();
if(value instanceof SealedObject) if(value instanceof SealedObject)
{ {
SealedObject sealed = (SealedObject)value; SealedObject sealed = (SealedObject)value;
NodePropertyKey propertyKey = entity.getKey(); NodePropertyKey propertyKey = entity.getKey();
QName propertyQName = qnameDAO.getQName(propertyKey.getQnameId()).getSecond(); QName propertyQName = qnameDAO.getQName(propertyKey.getQnameId()).getSecond();
// decrypt... // decrypt...
Serializable decrypted = metadataEncryptor.decrypt(propertyQName, sealed); Serializable decrypted = metadataEncryptor.decrypt(propertyQName, sealed);
// ...and then re-encrypt. The new key will be used. // ...and then re-encrypt. The new key will be used.
Serializable resealed = metadataEncryptor.encrypt(propertyQName, decrypted); Serializable resealed = metadataEncryptor.encrypt(propertyQName, decrypted);
// TODO update resealed using batch update? // TODO update resealed using batch update?
// does the node DAO do batch updating? // does the node DAO do batch updating?
nodeDAO.setNodeProperties(entity.getNodeId(), Collections.singletonMap(propertyQName, resealed)); nodeDAO.setNodeProperties(entity.getNodeId(), Collections.singletonMap(propertyQName, resealed));
} }
else else
{ {
NodePropertyKey nodeKey = entity.getKey(); NodePropertyKey nodeKey = entity.getKey();
QName propertyQName = qnameDAO.getQName(nodeKey.getQnameId()).getSecond(); QName propertyQName = qnameDAO.getQName(nodeKey.getQnameId()).getSecond();
logger.warn("Encountered an encrypted property that is not a SealedObject, for node id " + logger.warn("Encountered an encrypted property that is not a SealedObject, for node id " +
entity.getNodeId() + ", property " + propertyQName); entity.getNodeId() + ", property " + propertyQName);
} }
} }
}; };
BatchProcessWorkProvider<NodePropertyEntity> provider = new BatchProcessWorkProvider<NodePropertyEntity>() BatchProcessWorkProvider<NodePropertyEntity> provider = new BatchProcessWorkProvider<NodePropertyEntity>()
{ {
@Override @Override
public int getTotalEstimatedWorkSize() public int getTotalEstimatedWorkSize()
{ {
return properties.size(); return properties.size();
} }
@Override @Override
public Collection<NodePropertyEntity> getNextWork() public Collection<NodePropertyEntity> getNextWork()
{ {
List<NodePropertyEntity> sublist = new ArrayList<NodePropertyEntity>(chunkSize); List<NodePropertyEntity> sublist = new ArrayList<NodePropertyEntity>(chunkSize);
synchronized(it) synchronized(it)
{ {
int count = 0; int count = 0;
while(it.hasNext() && count < chunkSize) while(it.hasNext() && count < chunkSize)
{ {
sublist.add(it.next()); sublist.add(it.next());
count++; count++;
} }
} }
return sublist; return sublist;
} }
}; };
new BatchProcessor<NodePropertyEntity>( new BatchProcessor<NodePropertyEntity>(
"Reencryptor", "Reencryptor",
@@ -258,54 +260,54 @@ public class ReEncryptor implements ApplicationContextAware
numThreads, chunkSize, numThreads, chunkSize,
applicationContext, applicationContext,
logger, 100).process(worker, splitTxns); logger, 100).process(worker, splitTxns);
} }
/** /**
* Re-encrypt using the configured backup keystore to decrypt and the main keystore to encrypt * Re-encrypt using the configured backup keystore to decrypt and the main keystore to encrypt
*/ */
public int bootstrapReEncrypt() throws MissingKeyException public int bootstrapReEncrypt() throws MissingKeyException
{ {
if(!metadataEncryptor.backupKeyAvailable(KeyProvider.ALIAS_METADATA)) if(!metadataEncryptor.backupKeyAvailable(KeyProvider.ALIAS_METADATA))
{ {
throw new MissingKeyException("Backup key store is either not present or does not contain a metadata encryption key"); throw new MissingKeyException("Backup key store is either not present or does not contain a metadata encryption key");
} }
return reEncrypt(); return reEncrypt();
} }
/** /**
* Re-encrypt by decrypting using the configured keystore and encrypting using a keystore configured using the provided new key store parameters. * Re-encrypt by decrypting using the configured keystore and encrypting using a keystore configured using the provided new key store parameters.
* Called from e.g. JMX. * Called from e.g. JMX.
* *
* Assumes that the main key store has been already been reloaded. * Assumes that the main key store has been already been reloaded.
* *
* Note: it is the responsibility of the end user to ensure that the underlying keystores have been set up appropriately * Note: it is the responsibility of the end user to ensure that the underlying keystores have been set up appropriately
* i.e. the old key store is backed up to the location defined by the property '${dir.keystore}/backup-keystore' and the new * i.e. the old key store is backed up to the location defined by the property '${dir.keystore}/backup-keystore' and the new
* key store replaces it. This can be done while the repository is running. * key store replaces it. This can be done while the repository is running.
*/ */
public int reEncrypt() throws MissingKeyException public int reEncrypt() throws MissingKeyException
{ {
if(!metadataEncryptor.keyAvailable(KeyProvider.ALIAS_METADATA)) if(!metadataEncryptor.keyAvailable(KeyProvider.ALIAS_METADATA))
{ {
throw new MissingKeyException("Main key store is either not present or does not contain a metadata encryption key"); throw new MissingKeyException("Main key store is either not present or does not contain a metadata encryption key");
} }
if(!metadataEncryptor.backupKeyAvailable(KeyProvider.ALIAS_METADATA)) if(!metadataEncryptor.backupKeyAvailable(KeyProvider.ALIAS_METADATA))
{ {
throw new MissingKeyException("Backup key store is either not present or does not contain a metadata encryption key"); throw new MissingKeyException("Backup key store is either not present or does not contain a metadata encryption key");
} }
int numProps = reEncryptImpl(); int numProps = reEncryptImpl();
return numProps; return numProps;
} }
protected int reEncryptImpl() protected int reEncryptImpl()
{ {
// Take out a re-encryptor lock // Take out a re-encryptor lock
RetryingTransactionCallback<String> txnWork = new RetryingTransactionCallback<String>() RetryingTransactionCallback<String> txnWork = new RetryingTransactionCallback<String>()
{ {
public String execute() throws Exception public String execute() throws Exception
{ {
String lockToken = getLock(20000L); String lockToken = getLock(20000L);
return lockToken; return lockToken;
} }
}; };
@@ -316,30 +318,36 @@ public class ReEncryptor implements ApplicationContextAware
return 0; return 0;
} }
// get encrypted properties // get encrypted properties
Collection<PropertyDefinition> propertyDefs = dictionaryDAO.getPropertiesOfDataType(DataTypeDefinition.ENCRYPTED); Collection<PropertyDefinition> propertyDefs = dictionaryDAO.getPropertiesOfDataType(DataTypeDefinition.ENCRYPTED);
// TODO use callback mechanism, or select based on set of nodes? Set<QName> qnames = new HashSet<QName>();
List<NodePropertyEntity> properties = nodeDAO.selectProperties(propertyDefs); for(PropertyDefinition propDef : propertyDefs)
{
qnames.add(propDef.getName());
}
if(logger.isDebugEnabled()) // TODO use callback mechanism, or select based on set of nodes?
{ List<NodePropertyEntity> properties = nodeDAO.selectNodePropertiesByTypes(qnames);
logger.debug("Found " + properties.size() + " properties to re-encrypt...");
}
// reencrypt these properties TODO don't call if num props == 0 if(logger.isDebugEnabled())
reEncryptProperties(properties, lockToken); {
logger.debug("Found " + properties.size() + " properties to re-encrypt...");
}
if(logger.isDebugEnabled()) // reencrypt these properties TODO don't call if num props == 0
{ reEncryptProperties(properties, lockToken);
logger.debug("...done re-encrypting.");
}
return properties.size(); if(logger.isDebugEnabled())
} {
logger.debug("...done re-encrypting.");
}
@Override return properties.size();
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException }
{
this.applicationContext = applicationContext; @Override
} public void setApplicationContext(ApplicationContext applicationContext) throws BeansException
{
this.applicationContext = applicationContext;
}
} }

View File

@@ -29,8 +29,8 @@ import java.util.Set;
import org.alfresco.repo.node.NodeBulkLoader; import org.alfresco.repo.node.NodeBulkLoader;
import org.alfresco.repo.transaction.TransactionalResourceHelper; import org.alfresco.repo.transaction.TransactionalResourceHelper;
import org.alfresco.service.cmr.dictionary.DataTypeDefinition;
import org.alfresco.service.cmr.dictionary.InvalidTypeException; import org.alfresco.service.cmr.dictionary.InvalidTypeException;
import org.alfresco.service.cmr.dictionary.PropertyDefinition;
import org.alfresco.service.cmr.repository.AssociationRef; import org.alfresco.service.cmr.repository.AssociationRef;
import org.alfresco.service.cmr.repository.ChildAssociationRef; import org.alfresco.service.cmr.repository.ChildAssociationRef;
import org.alfresco.service.cmr.repository.InvalidNodeRefException; import org.alfresco.service.cmr.repository.InvalidNodeRefException;
@@ -816,8 +816,8 @@ public interface NodeDAO extends NodeBulkLoader
/** /**
* Remove unused transactions from commit time 'fromCommitTime' to commit time 'toCommitTime' * Remove unused transactions from commit time 'fromCommitTime' to commit time 'toCommitTime'
* *
* @param fromCommitTime delete unused transactions from commit time * @param fromCommitTime delete unused transactions from commit time
* @param toCommitTime delete unused transactions to commit time * @param toCommitTime delete unused transactions to commit time
* *
* @return int * @return int
*/ */
@@ -856,6 +856,16 @@ public interface NodeDAO extends NodeBulkLoader
*/ */
public Long getMaxTxnId(); public Long getMaxTxnId();
/**
* @return Returns the minimum node id or <tt>0</tt> if there are no nodes
*/
public Long getMinNodeId();
/**
* @return Returns the maximum node id or <tt>0</tt> if there are no nodes
*/
public Long getMaxNodeId();
/** /**
* Select children by property values * Select children by property values
*/ */
@@ -868,7 +878,17 @@ public interface NodeDAO extends NodeBulkLoader
/** /**
* Used by the re-encryptor to re-encrypt encryptable properties with a new encryption key. * Used by the re-encryptor to re-encrypt encryptable properties with a new encryption key.
*/ */
public List<NodePropertyEntity> selectProperties(Collection<PropertyDefinition> propertyDefs); public List<NodePropertyEntity> selectNodePropertiesByTypes(Set<QName> qnames);
/**
* Select all node properties that are between two node IDs and of the given <b>actual</b> type
*
* @param dataType the actual, original type of the property, as given by one of the constants
* on {@link DataTypeDefinition#TEXT DataTypeDefinition}
* @param minNodeId the minimum node ID (inclusive)
* @param maxNodeId the maximum node ID (exclusive)
*/
public List<NodePropertyEntity> selectNodePropertiesByDataType(QName dataType, long minNodeId, long maxNodeId);
/** /**
* Counts the number of child associations directly under parentNodeId. * Counts the number of child associations directly under parentNodeId.

View File

@@ -51,7 +51,6 @@ import org.alfresco.repo.domain.node.TransactionEntity;
import org.alfresco.repo.domain.node.TransactionQueryEntity; import org.alfresco.repo.domain.node.TransactionQueryEntity;
import org.alfresco.repo.domain.qname.QNameDAO; import org.alfresco.repo.domain.qname.QNameDAO;
import org.alfresco.service.cmr.dictionary.DictionaryService; import org.alfresco.service.cmr.dictionary.DictionaryService;
import org.alfresco.service.cmr.dictionary.PropertyDefinition;
import org.alfresco.service.cmr.repository.ChildAssociationRef; import org.alfresco.service.cmr.repository.ChildAssociationRef;
import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.cmr.repository.StoreRef; import org.alfresco.service.cmr.repository.StoreRef;
@@ -97,12 +96,15 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
private static final String SELECT_NODES_BY_IDS = "alfresco.node.select_NodesByIds"; private static final String SELECT_NODES_BY_IDS = "alfresco.node.select_NodesByIds";
private static final String SELECT_NODE_PROPERTIES = "alfresco.node.select_NodeProperties"; private static final String SELECT_NODE_PROPERTIES = "alfresco.node.select_NodeProperties";
private static final String SELECT_PROPERTIES_BY_TYPES = "alfresco.node.select_PropertiesByTypes"; private static final String SELECT_PROPERTIES_BY_TYPES = "alfresco.node.select_PropertiesByTypes";
private static final String SELECT_PROPERTIES_BY_ACTUAL_TYPE = "alfresco.node.select_PropertiesByActualType";
private static final String SELECT_NODE_ASPECTS = "alfresco.node.select_NodeAspects"; private static final String SELECT_NODE_ASPECTS = "alfresco.node.select_NodeAspects";
private static final String INSERT_NODE_PROPERTY = "alfresco.node.insert_NodeProperty"; private static final String INSERT_NODE_PROPERTY = "alfresco.node.insert_NodeProperty";
private static final String UPDATE_PRIMARY_CHILDREN_SHARED_ACL = "alfresco.node.update.update_PrimaryChildrenSharedAcl"; private static final String UPDATE_PRIMARY_CHILDREN_SHARED_ACL = "alfresco.node.update.update_PrimaryChildrenSharedAcl";
private static final String INSERT_NODE_ASPECT = "alfresco.node.insert_NodeAspect"; private static final String INSERT_NODE_ASPECT = "alfresco.node.insert_NodeAspect";
private static final String DELETE_NODE_ASPECTS = "alfresco.node.delete_NodeAspects"; private static final String DELETE_NODE_ASPECTS = "alfresco.node.delete_NodeAspects";
private static final String DELETE_NODE_PROPERTIES = "alfresco.node.delete_NodeProperties"; private static final String DELETE_NODE_PROPERTIES = "alfresco.node.delete_NodeProperties";
private static final String SELECT_NODE_MIN_ID = "alfresco.node.select_NodeMinId";
private static final String SELECT_NODE_MAX_ID = "alfresco.node.select_NodeMaxId";
private static final String SELECT_NODES_WITH_ASPECT_IDS = "alfresco.node.select_NodesWithAspectIds"; private static final String SELECT_NODES_WITH_ASPECT_IDS = "alfresco.node.select_NodesWithAspectIds";
private static final String INSERT_NODE_ASSOC = "alfresco.node.insert.insert_NodeAssoc"; private static final String INSERT_NODE_ASSOC = "alfresco.node.insert.insert_NodeAssoc";
private static final String UPDATE_NODE_ASSOC = "alfresco.node.update_NodeAssoc"; private static final String UPDATE_NODE_ASSOC = "alfresco.node.update_NodeAssoc";
@@ -350,6 +352,18 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
return template.update(UPDATE_NODE_BULK_TOUCH, ids); return template.update(UPDATE_NODE_BULK_TOUCH, ids);
} }
@Override
public Long getMinNodeId()
{
return (Long) template.selectOne(SELECT_NODE_MIN_ID);
}
@Override
public Long getMaxNodeId()
{
return (Long) template.selectOne(SELECT_NODE_MAX_ID);
}
@Override @Override
protected void updatePrimaryChildrenSharedAclId( protected void updatePrimaryChildrenSharedAclId(
Long txnId, Long txnId,
@@ -540,6 +554,54 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
return makePersistentPropertiesMap(rows); return makePersistentPropertiesMap(rows);
} }
@Override
public List<NodePropertyEntity> selectNodePropertiesByTypes(Set<QName> qnames)
{
final List<NodePropertyEntity> properties = new ArrayList<NodePropertyEntity>();
// qnames of properties that are encrypted
Set<Long> qnameIds = qnameDAO.convertQNamesToIds(qnames, false);
if(qnameIds.size() > 0)
{
IdsEntity param = new IdsEntity();
param.setIds(new ArrayList<Long>(qnameIds));
// TODO - use a callback approach
template.select(SELECT_PROPERTIES_BY_TYPES, param, new ResultHandler()
{
@Override
public void handleResult(ResultContext context)
{
properties.add((NodePropertyEntity)context.getResultObject());
}
});
}
return properties;
}
@Override
public List<NodePropertyEntity> selectNodePropertiesByDataType(QName dataType, long minNodeId, long maxNodeId)
{
int typeOrdinal = NodePropertyValue.convertToTypeOrdinal(dataType);
IdsEntity ids = new IdsEntity();
ids.setIdOne((long)typeOrdinal);
ids.setIdTwo(minNodeId);
ids.setIdThree(maxNodeId);
final List<NodePropertyEntity> properties = new ArrayList<NodePropertyEntity>();
template.select(SELECT_PROPERTIES_BY_ACTUAL_TYPE, ids, new ResultHandler()
{
@Override
public void handleResult(ResultContext context)
{
properties.add((NodePropertyEntity)context.getResultObject());
}
});
return properties;
}
@Override @Override
protected int deleteNodeProperties(Long nodeId, Set<Long> qnameIds) protected int deleteNodeProperties(Long nodeId, Set<Long> qnameIds)
{ {
@@ -1567,9 +1629,9 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
@Override @Override
public int deleteTxnsUnused(long fromCommitTime, long toCommitTime) public int deleteTxnsUnused(long fromCommitTime, long toCommitTime)
{ {
TransactionQueryEntity txnQuery = new TransactionQueryEntity(); TransactionQueryEntity txnQuery = new TransactionQueryEntity();
txnQuery.setMinCommitTime(fromCommitTime); txnQuery.setMinCommitTime(fromCommitTime);
txnQuery.setMaxCommitTime(toCommitTime); txnQuery.setMaxCommitTime(toCommitTime);
int numDeleted = template.delete(DELETE_TXNS_UNUSED, txnQuery); int numDeleted = template.delete(DELETE_TXNS_UNUSED, txnQuery);
return numDeleted; return numDeleted;
} }
@@ -1619,37 +1681,6 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
return template.selectOne(SELECT_TXN_MAX_ID); return template.selectOne(SELECT_TXN_MAX_ID);
} }
@Override
public List<NodePropertyEntity> selectProperties(Collection<PropertyDefinition> propertyDefs)
{
final List<NodePropertyEntity> properties = new ArrayList<NodePropertyEntity>();
Set<QName> qnames = new HashSet<QName>();
for(PropertyDefinition propDef : propertyDefs)
{
qnames.add(propDef.getName());
}
// qnames of properties that are encrypted
Set<Long> qnameIds = qnameDAO.convertQNamesToIds(qnames, false);
if(qnameIds.size() > 0)
{
IdsEntity param = new IdsEntity();
param.setIds(new ArrayList<Long>(qnameIds));
// TODO - use a callback approach
template.select(SELECT_PROPERTIES_BY_TYPES, param, new ResultHandler()
{
@Override
public void handleResult(ResultContext context)
{
properties.add((NodePropertyEntity)context.getResultObject());
}
});
}
return properties;
}
public int countChildAssocsByParent(Long parentNodeId, boolean isPrimary) public int countChildAssocsByParent(Long parentNodeId, boolean isPrimary)
{ {
NodeEntity parentNode = new NodeEntity(); NodeEntity parentNode = new NodeEntity();

View File

@@ -21,6 +21,8 @@ package org.alfresco.repo.domain.patch;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import org.alfresco.repo.domain.node.NodeDAO;
import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.namespace.QName; import org.alfresco.service.namespace.QName;
import org.alfresco.util.Pair; import org.alfresco.util.Pair;
@@ -36,6 +38,10 @@ public interface PatchDAO
{ {
// DM-related // DM-related
/**
* @deprecated in 4.1: use {@link NodeDAO#getMaxNodeId()}
*/
@Deprecated
public long getMaxAdmNodeID(); public long getMaxAdmNodeID();
/** /**

View File

@@ -0,0 +1,430 @@
/*
* Copyright (C) 2005-2010 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.node.db;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.repo.batch.BatchProcessWorkProvider;
import org.alfresco.repo.batch.BatchProcessor;
import org.alfresco.repo.batch.BatchProcessor.BatchProcessWorker;
import org.alfresco.repo.batch.BatchProcessor.BatchProcessWorkerAdaptor;
import org.alfresco.repo.domain.node.NodeDAO;
import org.alfresco.repo.domain.node.NodePropertyEntity;
import org.alfresco.repo.domain.node.NodePropertyValue;
import org.alfresco.repo.domain.qname.QNameDAO;
import org.alfresco.repo.domain.schema.SchemaBootstrap;
import org.alfresco.repo.lock.JobLockService;
import org.alfresco.repo.lock.JobLockService.JobLockRefreshCallback;
import org.alfresco.repo.lock.LockAcquisitionException;
import org.alfresco.repo.policy.BehaviourFilter;
import org.alfresco.repo.transaction.RetryingTransactionHelper;
import org.alfresco.service.cmr.dictionary.DataTypeDefinition;
import org.alfresco.service.namespace.NamespaceService;
import org.alfresco.service.namespace.QName;
import org.alfresco.service.transaction.TransactionService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.quartz.Job;
import org.quartz.JobDataMap;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.springframework.beans.BeansException;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
/**
* <h1>Max String Length Worker</h1>
*
* <h2>What it is</h2>
* A worker for a scheduled job that checks and adjusts string storage for persisted strings in the system.
* <p>
* <h2>Settings that control the behaviour</h2>
* <ul>
* <li><b>${system.maximumStringLength}</b> - the maximum length of a string that can be persisted in the *alf_node_properties.string_value* column.</li>
* <li><b>${system.maximumStringLength.jobQueryRange}</b> - the node ID range to query for.
* The process will repeat from the first to the last node, querying for up to this many nodes.
* Only reduce the value if the NodeDAO query takes a long time.</li>
* <li><b>${system.maximumStringLength.jobThreadCount}</b> - the number of threads that will handle persistence checks and changes.
* Increase or decrease this to allow for free CPU capacity on the machine executing the job.</li>
* </ul>
* <h2>How to use it</h2>
* sdfsf
*
* @author Derek Hulley
* @since 4.1.9.2
*/
public class NodeStringLengthWorker implements ApplicationContextAware
{
private static final QName LOCK = QName.createQName(NamespaceService.SYSTEM_MODEL_1_0_URI, "NodeStringLengthWorker");
private static final long LOCK_TTL = 60000L;
private static Log logger = LogFactory.getLog(NodeStringLengthWorker.class);
private final NodeDAO nodeDAO;
private final JobLockService jobLockService;
private final TransactionService transactionService;
private final QNameDAO qnameDAO;
private final BehaviourFilter behaviourFilter;
private ApplicationContext ctx;
private final int queryRange;
private final int threadCount;
private final int batchSize;
public NodeStringLengthWorker(
NodeDAO nodeDAO, JobLockService jobLockService, TransactionService transactionService, QNameDAO qnameDAO,
BehaviourFilter behaviourFilter,
int queryRange, int threadCount)
{
this.nodeDAO = nodeDAO;
this.jobLockService = jobLockService;
this.transactionService = transactionService;
this.qnameDAO = qnameDAO;
this.behaviourFilter = behaviourFilter;
this.queryRange = queryRange;
this.threadCount = threadCount;
this.batchSize = 100;
}
/**
* Set the application context for event publishing during batch processing
*/
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException
{
this.ctx = applicationContext;
}
/**
* Performs the work, including logging details of progress.
*/
public NodeStringLengthWorkResult execute()
{
// Build refresh callback
final NodeStringLengthWorkResult progress = new NodeStringLengthWorkResult();
JobLockRefreshCallback lockCallback = new JobLockRefreshCallback()
{
@Override
public void lockReleased()
{
progress.inProgress.set(false);
}
@Override
public boolean isActive()
{
return progress.inProgress.get();
}
};
String lockToken = null;
try
{
progress.inProgress.set(true);
// Get the lock
lockToken = jobLockService.getLock(LOCK, LOCK_TTL);
// Start the refresh timer
jobLockService.refreshLock(lockToken, LOCK, LOCK_TTL, lockCallback);
// Now we know that we'll do something
if (logger.isInfoEnabled())
{
logger.info("NodeStringLengthWorker: Starting");
}
// Do the work
doWork(progress);
// Done
if (logger.isInfoEnabled())
{
logger.info("NodeStringLengthWorker: " + progress);
}
}
catch (LockAcquisitionException e)
{
if (logger.isDebugEnabled())
{
logger.debug("Skipping node string length job: " + e.getMessage());
}
}
catch (Exception e)
{
progress.inProgress.set(false);
logger.error("Node string length job " + progress);
logger.error("Stopping node string length job with exception.", e);
}
finally
{
if (lockToken != null)
{
jobLockService.releaseLock(lockToken, LOCK);
}
progress.inProgress.set(false); // The background
}
// Done
return progress;
}
/**
* @param progress the thread-safe progress
*/
private synchronized void doWork(NodeStringLengthWorkResult progress) throws Exception
{
// Build batch processor
BatchProcessWorkProvider<NodePropertyEntity> workProvider = new NodeStringLengthWorkProvider(progress);
BatchProcessWorker<NodePropertyEntity> worker = new NodeStringLengthBatch(progress);
RetryingTransactionHelper retryingTransactionHelper = transactionService.getRetryingTransactionHelper();
retryingTransactionHelper.setForceWritable(true);
BatchProcessor<NodePropertyEntity> batchProcessor = new BatchProcessor<NodePropertyEntity>(
"NodeStringLengthWorker",
retryingTransactionHelper,
workProvider,
threadCount,
batchSize,
ctx,
logger,
1000);
batchProcessor.process(worker, true);
}
/**
* Work provider for batch job providing string properties to process
* @author Derek Hulley
* @since 4.1.9.2
*/
private class NodeStringLengthWorkProvider implements BatchProcessWorkProvider<NodePropertyEntity>
{
private final long maxNodeId;
private final NodeStringLengthWorkResult progress;
private NodeStringLengthWorkProvider(NodeStringLengthWorkResult progress)
{
this.progress = progress;
this.maxNodeId = nodeDAO.getMaxNodeId();
}
@Override
public int getTotalEstimatedWorkSize()
{
return -1;
}
@Override
public Collection<NodePropertyEntity> getNextWork()
{
// Check that there are not too many errors
if (progress.errors.get() > 1000)
{
logger.warn("Node string length work terminating; too many errors.");
return Collections.emptyList();
}
// Keep shifting the query window up until we get results or we hit the original max node ID
List<NodePropertyEntity> ret = Collections.emptyList();
while (ret.isEmpty() && progress.currentMinNodeId.get() < maxNodeId)
{
// Calculate the node ID range
Long minNodeId = null;
if (progress.currentMinNodeId.get() == 0L)
{
minNodeId = nodeDAO.getMinNodeId();
progress.currentMinNodeId.set(minNodeId);
}
else
{
minNodeId = progress.currentMinNodeId.addAndGet(queryRange);
}
long maxNodeId = minNodeId + queryRange;
// Query for the properties
ret = nodeDAO.selectNodePropertiesByDataType(DataTypeDefinition.TEXT, minNodeId, maxNodeId);
}
// Done
if (logger.isDebugEnabled())
{
logger.debug("Node string length work provider found " + ret.size() + " new property entities.");
}
return ret;
}
}
/**
* Class that does the actual node manipulation to change the string storage
* @author Derek Hulley
* @since 4.1.9.2
*/
private class NodeStringLengthBatch extends BatchProcessWorkerAdaptor<NodePropertyEntity>
{
private final int typeOrdinalText = NodePropertyValue.convertToTypeOrdinal(DataTypeDefinition.TEXT);
private final int typeOrdinalAny = NodePropertyValue.convertToTypeOrdinal(DataTypeDefinition.ANY);
private final NodeStringLengthWorkResult progress;
private NodeStringLengthBatch(NodeStringLengthWorkResult progress)
{
this.progress = progress;
}
@Override
public void process(NodePropertyEntity entry) throws Throwable
{
progress.propertiesProcessed.incrementAndGet();
try
{
Long nodeId = entry.getNodeId();
NodePropertyValue prop = entry.getValue();
// Get the current string value
String text = (String) prop.getValue(DataTypeDefinition.TEXT);
// Decide if the string needs changing or not
boolean repersist = false;
int persistedTypeOrdinal = prop.getPersistedType().intValue();
if (text.length() > SchemaBootstrap.getMaxStringLength())
{
// The text needs to be stored as a serializable_value (ANY)
if (typeOrdinalAny != persistedTypeOrdinal)
{
repersist = true;
}
}
else
{
// The text is shorter than the current max, so it should be stored as a string_value (TEXT)
if (typeOrdinalText != persistedTypeOrdinal)
{
repersist = true;
}
}
// Only do any work if we need to
if (repersist)
{
// We do not want any behaviours associated with our transactions
behaviourFilter.disableBehaviour();
progress.propertiesChanged.incrementAndGet();
if (logger.isTraceEnabled())
{
logger.trace("Fixing property " + getIdentifier(entry) + ". Value: " + text);
}
else if (logger.isDebugEnabled())
{
logger.debug("Fixing property " + getIdentifier(entry));
}
Long propQNameId = entry.getKey().getQnameId();
QName propQName = qnameDAO.getQName(propQNameId).getSecond();
nodeDAO.removeNodeProperties(nodeId, Collections.singleton(propQName));
nodeDAO.addNodeProperty(nodeId, propQName, text);
}
}
catch (Exception e)
{
// Record the failure
progress.errors.incrementAndGet();
// Rethrow so that the processing framework can handle things
throw e;
}
}
@Override
public String getIdentifier(NodePropertyEntity entry)
{
Long nodeId = entry.getNodeId();
NodePropertyValue prop = entry.getValue();
return ("Property with persisted type " + prop.getPersistedType() + " on node " + nodeDAO.getNodePair(nodeId));
}
}
/**
* Thread-safe helper class to carry the job progress information
* @author Derek Hulley
* @since 4.1.9.2
*/
public static class NodeStringLengthWorkResult
{
private final AtomicBoolean inProgress = new AtomicBoolean(false);
private final AtomicInteger propertiesProcessed = new AtomicInteger(0);
private final AtomicInteger propertiesChanged = new AtomicInteger(0);
private final AtomicInteger errors = new AtomicInteger(0);
private final AtomicLong currentMinNodeId = new AtomicLong(0L);
@Override
public String toString()
{
String part1 = "Changed";
String part2 = String.format(" %4d out of a potential %4d properties. ", propertiesChanged.get(), propertiesProcessed.get());
String part3 = String.format("[%2d Errors]", errors.get());
return part1 + part2 + part3;
}
public int getPropertiesProcessed()
{
return propertiesProcessed.get();
}
public int getPropertiesChanged()
{
return propertiesChanged.get();
}
public int getErrors()
{
return errors.get();
}
}
/**
* A scheduled job that checks and adjusts string storage for persisted strings in the system.
* <p>
* Job data:
* <ul>
* <li><b>nodeStringLengthWorker</b> - The worker that performs the actual processing.</li>
* </ul>
*
* @author Derek Hulley
* @since 4.1.9.2
* @see NodeStringLengthWorker
*/
public static class NodeStringLengthJob implements Job
{
public static final String JOB_DATA_NODE_WORKER = "nodeStringLengthWorker";
public void execute(JobExecutionContext context) throws JobExecutionException
{
JobDataMap jobData = context.getJobDetail().getJobDataMap();
// extract the content Cleanup to use
Object nodeStringLengthWorkerObj = jobData.get(JOB_DATA_NODE_WORKER);
if (nodeStringLengthWorkerObj == null || !(nodeStringLengthWorkerObj instanceof NodeStringLengthWorker))
{
throw new AlfrescoRuntimeException(
"MaxStringLengthJob data '" + JOB_DATA_NODE_WORKER + "' must reference a " + NodeStringLengthWorker.class.getSimpleName());
}
NodeStringLengthWorker worker = (NodeStringLengthWorker) nodeStringLengthWorkerObj;
worker.execute();
}
}
}

View File

@@ -19,9 +19,12 @@
package org.alfresco.repo.domain.node; package org.alfresco.repo.domain.node;
import java.io.Serializable; import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import junit.framework.TestCase; import junit.framework.TestCase;
@@ -33,8 +36,10 @@ import org.alfresco.repo.domain.node.NodeDAO.NodeRefQueryCallback;
import org.alfresco.repo.transaction.RetryingTransactionHelper; import org.alfresco.repo.transaction.RetryingTransactionHelper;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.ServiceRegistry; import org.alfresco.service.ServiceRegistry;
import org.alfresco.service.cmr.dictionary.DataTypeDefinition;
import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.cmr.repository.StoreRef; import org.alfresco.service.cmr.repository.StoreRef;
import org.alfresco.service.namespace.QName;
import org.alfresco.service.transaction.TransactionService; import org.alfresco.service.transaction.TransactionService;
import org.alfresco.test_category.OwnJVMTestsCategory; import org.alfresco.test_category.OwnJVMTestsCategory;
import org.alfresco.util.ApplicationContextHelper; import org.alfresco.util.ApplicationContextHelper;
@@ -106,6 +111,80 @@ public class NodeDAOTest extends TestCase
assertNotNull("Txn ID should be present by forcing it", txnId2); assertNotNull("Txn ID should be present by forcing it", txnId2);
} }
public void testSelectNodePropertiesByTypes() throws Exception
{
final Set<QName> qnames = Collections.singleton(ContentModel.PROP_NAME);
RetryingTransactionCallback<List<NodePropertyEntity>> callback = new RetryingTransactionCallback<List<NodePropertyEntity>>()
{
public List<NodePropertyEntity> execute() throws Throwable
{
return nodeDAO.selectNodePropertiesByTypes(qnames);
}
};
List<NodePropertyEntity> props = txnHelper.doInTransaction(callback, true);
if (props.size() == 0)
{
return;
}
NodePropertyEntity prop = props.get(0);
String value = prop.getValue().getStringValue();
assertNotNull(value);
}
public void testSelectNodePropertiesByDataType() throws Exception
{
// Prepare the bits that repeat the actual query
final AtomicLong min = new AtomicLong(0L);
final AtomicLong max = new AtomicLong(0L);
RetryingTransactionCallback<List<NodePropertyEntity>> callback = new RetryingTransactionCallback<List<NodePropertyEntity>>()
{
public List<NodePropertyEntity> execute() throws Throwable
{
long minNodeId = min.get();
long maxNodeId = max.get();
return nodeDAO.selectNodePropertiesByDataType(DataTypeDefinition.TEXT, minNodeId, maxNodeId);
}
};
// Get the current max node id
Long minNodeId = nodeDAO.getMinNodeId();
if (minNodeId == null)
{
return; // there are no nodes!
}
Long maxNodeId = nodeDAO.getMaxNodeId(); // won't be null at this point as we have a min
min.set(minNodeId.longValue());
// Iterate across all nodes in the system
while (min.longValue() <= maxNodeId.longValue())
{
max.set(min.get() + 1000L); // 1K increments
// Get the properties
List<NodePropertyEntity> props = txnHelper.doInTransaction(callback, true);
for (NodePropertyEntity prop : props)
{
// Check the property
Long nodeId = prop.getNodeId();
assertNotNull(nodeId);
assertTrue("the min should be inclusive.", min.longValue() <= nodeId.longValue());
assertTrue("the max should be exclusive.", max.longValue() > nodeId.longValue());
NodePropertyValue propVal = prop.getValue();
assertNotNull(propVal);
assertEquals("STRING", propVal.getActualTypeString());
String valueStr = propVal.getStringValue();
Serializable valueSer = propVal.getSerializableValue();
assertTrue("Test is either TEXT or SERIALIZABLE", valueStr != null || valueSer != null);
String value = (String) propVal.getValue(DataTypeDefinition.TEXT);
assertNotNull(value);
// This all checks out
}
// Shift the window up
min.set(max.get());
}
}
public void testGetNodesWithAspects() throws Throwable public void testGetNodesWithAspects() throws Throwable
{ {
final NodeRefQueryCallback callback = new NodeRefQueryCallback() final NodeRefQueryCallback callback = new NodeRefQueryCallback()
@@ -130,6 +209,16 @@ public class NodeDAOTest extends TestCase
}, true); }, true);
} }
public void testGetMinMaxNodeId() throws Exception
{
Long minNodeId = nodeDAO.getMinNodeId();
assertNotNull(minNodeId);
assertTrue(minNodeId.longValue() > 0L);
Long maxNodeId = nodeDAO.getMaxNodeId();
assertNotNull(maxNodeId);
assertTrue(maxNodeId.longValue() > minNodeId.longValue());
}
public void testGetPrimaryChildAcls() throws Throwable public void testGetPrimaryChildAcls() throws Throwable
{ {
List<NodeIdAndAclId> acls = nodeDAO.getPrimaryChildrenAcls(1L); List<NodeIdAndAclId> acls = nodeDAO.getPrimaryChildrenAcls(1L);
@@ -150,6 +239,25 @@ public class NodeDAOTest extends TestCase
} }
} }
public void testCacheNodes() throws Throwable
{
Long minNodeId = nodeDAO.getMinNodeId();
final List<Long> nodeIds = new ArrayList<Long>(10000);
for (long i = 0; i < 1000; i++)
{
nodeIds.add(Long.valueOf(minNodeId.longValue() + i));
}
RetryingTransactionCallback<Void> callback = new RetryingTransactionCallback<Void>()
{
public Void execute() throws Throwable
{
nodeDAO.cacheNodesById(nodeIds);
return null;
}
};
txnHelper.doInTransaction(callback, true);
}
/** /**
* Ensure that the {@link NodeEntity} values cached as root nodes are valid instances. * Ensure that the {@link NodeEntity} values cached as root nodes are valid instances.
* <p/> * <p/>

View File

@@ -32,8 +32,10 @@ import org.alfresco.model.ContentModel;
import org.alfresco.repo.domain.node.NodeDAO; import org.alfresco.repo.domain.node.NodeDAO;
import org.alfresco.repo.domain.node.NodeDAO.ChildAssocRefQueryCallback; import org.alfresco.repo.domain.node.NodeDAO.ChildAssocRefQueryCallback;
import org.alfresco.repo.domain.node.Transaction; import org.alfresco.repo.domain.node.Transaction;
import org.alfresco.repo.domain.schema.SchemaBootstrap;
import org.alfresco.repo.node.BaseNodeServiceTest; import org.alfresco.repo.node.BaseNodeServiceTest;
import org.alfresco.repo.node.cleanup.NodeCleanupRegistry; import org.alfresco.repo.node.cleanup.NodeCleanupRegistry;
import org.alfresco.repo.node.db.NodeStringLengthWorker.NodeStringLengthWorkResult;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport; import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.repo.transaction.TransactionListenerAdapter; import org.alfresco.repo.transaction.TransactionListenerAdapter;
@@ -48,6 +50,9 @@ import org.alfresco.service.transaction.TransactionService;
import org.alfresco.test_category.OwnJVMTestsCategory; import org.alfresco.test_category.OwnJVMTestsCategory;
import org.alfresco.util.Pair; import org.alfresco.util.Pair;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.hibernate.dialect.Dialect;
import org.hibernate.dialect.MySQLInnoDBDialect;
import org.springframework.context.event.ContextRefreshedEvent;
import org.springframework.extensions.surf.util.I18NUtil; import org.springframework.extensions.surf.util.I18NUtil;
/** /**
@@ -696,4 +701,88 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest
// expect to go here // expect to go here
} }
} }
/**
* Check that the maximum string lengths can be adjusted up and down.
* Note that this test ONLY works for MySQL because the other databases cannot support more than 1024 characters
* in the string_value column and the value may not be set to less than 1024.
*
* @see SchemaBootstrap#DEFAULT_MAX_STRING_LENGTH
*/
@SuppressWarnings("deprecation")
public void testNodeStringLengthWorker() throws Exception
{
setComplete();
endTransaction();
// Skip of the dialect is not MySQL
Dialect dialect = (Dialect) applicationContext.getBean("dialect");
if (!(dialect instanceof MySQLInnoDBDialect))
{
return;
}
SchemaBootstrap schemaBootstrap = (SchemaBootstrap) applicationContext.getBean("schemaBootstrap");
assertEquals("Expected max string length to be MAX", Integer.MAX_VALUE, SchemaBootstrap.getMaxStringLength());
NodeStringLengthWorker worker = (NodeStringLengthWorker) applicationContext.getBean("nodeStringLengthWorker");
// If we run this worker just to get everything into the correct starting state.
// If it does not work, then that will be detected later anyway
NodeStringLengthWorkResult result = worker.execute();
assertTrue(result.getPropertiesProcessed() > 0);
assertEquals(0, result.getErrors());
// Now set the max string length to DEFAULT_MAX_STRING_LENGTH characters
schemaBootstrap.setMaximumStringLength(SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH);
schemaBootstrap.onApplicationEvent(new ContextRefreshedEvent(applicationContext));
// Move any values persisted before the test
result = worker.execute();
int firstPassChanged = result.getPropertiesChanged();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH + 1; i++)
{
sb.append("A");
}
final String longString = sb.toString();
// Persist the property using the default MAX_VALUE so that it does into the string_value
schemaBootstrap.setMaximumStringLength(Integer.MAX_VALUE);
schemaBootstrap.onApplicationEvent(new ContextRefreshedEvent(applicationContext));
txnService.getRetryingTransactionHelper().doInTransaction(new RetryingTransactionCallback<Void>()
{
@Override
public Void execute() throws Throwable
{
nodeService.setProperty(rootNodeRef, PROP_QNAME_STRING_VALUE, longString);
return null;
}
});
// The worker should do nothing
result = worker.execute();
assertEquals(firstPassChanged, result.getPropertiesChanged());
// Now bring the limit down to the match for other DBs
schemaBootstrap.setMaximumStringLength(SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH);
schemaBootstrap.onApplicationEvent(new ContextRefreshedEvent(applicationContext));
result = worker.execute();
assertEquals(firstPassChanged + 1, result.getPropertiesChanged());
// Put the limit back to the MySQL default and all the large values should go back into MySQL's TEXT field
schemaBootstrap.setMaximumStringLength(Integer.MAX_VALUE);
schemaBootstrap.onApplicationEvent(new ContextRefreshedEvent(applicationContext));
result = worker.execute();
assertEquals(firstPassChanged + 1, result.getPropertiesChanged());
// Check that our string is still OK
String checkLongString = txnService.getRetryingTransactionHelper().doInTransaction(new RetryingTransactionCallback<String>()
{
@Override
public String execute() throws Throwable
{
return (String) nodeService.getProperty(rootNodeRef, PROP_QNAME_STRING_VALUE);
}
});
assertEquals("String manipulation corrupted the long string value. ", longString, checkLongString);
}
} }