diff --git a/config/alfresco/ibatis/org.hibernate.dialect.Dialect/node-common-SqlMap.xml b/config/alfresco/ibatis/org.hibernate.dialect.Dialect/node-common-SqlMap.xml index 2688c25f8a..aec197ee1a 100644 --- a/config/alfresco/ibatis/org.hibernate.dialect.Dialect/node-common-SqlMap.xml +++ b/config/alfresco/ibatis/org.hibernate.dialect.Dialect/node-common-SqlMap.xml @@ -750,6 +750,17 @@ + + select node.id as node_id, @@ -757,7 +768,7 @@ aspects.qname_id as qname_id - from alf_node node @@ -1194,6 +1205,20 @@ and assoc.qname_localname = #{qnameLocalName} and assoc.is_primary = #{isPrimary} + + + + select diff --git a/config/alfresco/node-services-context.xml b/config/alfresco/node-services-context.xml index d8b0e28407..eb447dd042 100644 --- a/config/alfresco/node-services-context.xml +++ b/config/alfresco/node-services-context.xml @@ -235,6 +235,17 @@ ${index.tracking.purgeSize} + + + + + + + + + + + diff --git a/config/alfresco/repository.properties b/config/alfresco/repository.properties index 68297e7b69..380a79b040 100644 --- a/config/alfresco/repository.properties +++ b/config/alfresco/repository.properties @@ -213,10 +213,14 @@ system.readpermissions.bulkfetchsize=1000 # # Manually control how the system handles maximum string lengths. -# Any zero or negative value is ignored. -# Only change this after consulting support or reading the appropriate Javadocs for -# org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2 +# Any zero or negative value is ignored. +# Only change this after consulting support or reading the appropriate Javadocs for +# org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2. +# Before database migration, the string value storage may need to be adjusted using the scheduled job system.maximumStringLength=-1 +system.maximumStringLength.jobCronExpression=* * * * * ? 2099 +system.maximumStringLength.jobQueryRange=10000 +system.maximumStringLength.jobThreadCount=4 # # Limit hibernate session size by trying to amalgamate events for the L2 session invalidation diff --git a/config/alfresco/scheduled-jobs-context.xml b/config/alfresco/scheduled-jobs-context.xml index 1e59fcfa95..82b597b48d 100644 --- a/config/alfresco/scheduled-jobs-context.xml +++ b/config/alfresco/scheduled-jobs-context.xml @@ -120,6 +120,20 @@ + + + + + + + + + + + + + + org.alfresco.repo.node.cleanup.NodeCleanupJob diff --git a/source/java/org/alfresco/encryption/BootstrapReEncryptor.java b/source/java/org/alfresco/encryption/BootstrapReEncryptor.java index c5fa98e691..33b3df7c04 100644 --- a/source/java/org/alfresco/encryption/BootstrapReEncryptor.java +++ b/source/java/org/alfresco/encryption/BootstrapReEncryptor.java @@ -33,47 +33,47 @@ public class BootstrapReEncryptor extends AbstractLifecycleBean { private static Log logger = LogFactory.getLog(BootstrapReEncryptor.class); - private boolean enabled; - private ReEncryptor reEncryptor; - - public void setEnabled(boolean enabled) - { - this.enabled = enabled; - } + private boolean enabled; + private ReEncryptor reEncryptor; + + public void setEnabled(boolean enabled) + { + this.enabled = enabled; + } - public void setReEncryptor(ReEncryptor reEncryptor) - { - this.reEncryptor = reEncryptor; - } + public void setReEncryptor(ReEncryptor reEncryptor) + { + this.reEncryptor = reEncryptor; + } - public int reEncrypt() - { - try - { - return reEncryptor.bootstrapReEncrypt(); - } - catch(MissingKeyException e) - { - throw new AlfrescoRuntimeException("Bootstrap re-encryption failed", e); - } - } + public int reEncrypt() + { + try + { + return reEncryptor.bootstrapReEncrypt(); + } + catch(MissingKeyException e) + { + throw new AlfrescoRuntimeException("Bootstrap re-encryption failed", e); + } + } @Override protected void onBootstrap(ApplicationEvent event) - { - if(enabled) - { - if(logger.isDebugEnabled()) - { - logger.debug("Re-encrypting encryptable properties..."); - } - int propertiesReEncrypted = reEncrypt(); - if(logger.isDebugEnabled()) - { - logger.debug("...done, re-encrypted " + propertiesReEncrypted + " properties."); - } - } - } + { + if(enabled) + { + if(logger.isDebugEnabled()) + { + logger.debug("Re-encrypting encryptable properties..."); + } + int propertiesReEncrypted = reEncrypt(); + if(logger.isDebugEnabled()) + { + logger.debug("...done, re-encrypted " + propertiesReEncrypted + " properties."); + } + } + } @Override protected void onShutdown(ApplicationEvent event) diff --git a/source/java/org/alfresco/encryption/EncryptionChecker.java b/source/java/org/alfresco/encryption/EncryptionChecker.java index e2b0c7cac7..985265276e 100644 --- a/source/java/org/alfresco/encryption/EncryptionChecker.java +++ b/source/java/org/alfresco/encryption/EncryptionChecker.java @@ -41,44 +41,44 @@ public class EncryptionChecker extends AbstractLifecycleBean private TransactionService transactionService; private KeyStoreChecker keyStoreChecker; - public void setKeyStoreChecker(KeyStoreChecker keyStoreChecker) - { - this.keyStoreChecker = keyStoreChecker; - } - - public void setTransactionService(TransactionService transactionService) - { - this.transactionService = transactionService; - } + public void setKeyStoreChecker(KeyStoreChecker keyStoreChecker) + { + this.keyStoreChecker = keyStoreChecker; + } + + public void setTransactionService(TransactionService transactionService) + { + this.transactionService = transactionService; + } - @Override - protected void onBootstrap(ApplicationEvent event) - { - RetryingTransactionHelper txnHelper = transactionService.getRetryingTransactionHelper(); - txnHelper.setForceWritable(true); // Force write in case server is read-only - - txnHelper.doInTransaction(new RetryingTransactionCallback() - { - public Void execute() throws Throwable - { - try - { - keyStoreChecker.validateKeyStores(); - } - catch(Throwable e) - { - // Just throw as a runtime exception - throw new AlfrescoRuntimeException("Keystores are invalid", e); - } + @Override + protected void onBootstrap(ApplicationEvent event) + { + RetryingTransactionHelper txnHelper = transactionService.getRetryingTransactionHelper(); + txnHelper.setForceWritable(true); // Force write in case server is read-only + + txnHelper.doInTransaction(new RetryingTransactionCallback() + { + public Void execute() throws Throwable + { + try + { + keyStoreChecker.validateKeyStores(); + } + catch(Throwable e) + { + // Just throw as a runtime exception + throw new AlfrescoRuntimeException("Keystores are invalid", e); + } - return null; - } - }); - } + return null; + } + }); + } - @Override - protected void onShutdown(ApplicationEvent event) - { - - } + @Override + protected void onShutdown(ApplicationEvent event) + { + + } } diff --git a/source/java/org/alfresco/encryption/EncryptionKeysRegistryImpl.java b/source/java/org/alfresco/encryption/EncryptionKeysRegistryImpl.java index d7e574f479..f5fc9ecd4b 100644 --- a/source/java/org/alfresco/encryption/EncryptionKeysRegistryImpl.java +++ b/source/java/org/alfresco/encryption/EncryptionKeysRegistryImpl.java @@ -57,170 +57,170 @@ public class EncryptionKeysRegistryImpl implements EncryptionKeysRegistry private String cipherAlgorithm; private String cipherProvider; - public void setAttributeService(AttributeService attributeService) - { - this.attributeService = attributeService; - } + public void setAttributeService(AttributeService attributeService) + { + this.attributeService = attributeService; + } - public void setCipherAlgorithm(String cipherAlgorithm) - { - this.cipherAlgorithm = cipherAlgorithm; - } + public void setCipherAlgorithm(String cipherAlgorithm) + { + this.cipherAlgorithm = cipherAlgorithm; + } - public void setCipherProvider(String cipherProvider) - { - this.cipherProvider = cipherProvider; - } + public void setCipherProvider(String cipherProvider) + { + this.cipherProvider = cipherProvider; + } - public void setTransactionService(TransactionService transactionService) - { - this.transactionService = transactionService; - } + public void setTransactionService(TransactionService transactionService) + { + this.transactionService = transactionService; + } - protected Encryptor getEncryptor(final KeyMap keys) - { - DefaultEncryptor encryptor = new DefaultEncryptor(); - encryptor.setCipherAlgorithm(cipherAlgorithm); - encryptor.setCipherProvider(cipherProvider); - encryptor.setKeyProvider(new KeyProvider() - { - @Override - public Key getKey(String keyAlias) - { - return keys.getCachedKey(keyAlias).getKey(); - } - }); - return encryptor; - } + protected Encryptor getEncryptor(final KeyMap keys) + { + DefaultEncryptor encryptor = new DefaultEncryptor(); + encryptor.setCipherAlgorithm(cipherAlgorithm); + encryptor.setCipherProvider(cipherProvider); + encryptor.setKeyProvider(new KeyProvider() + { + @Override + public Key getKey(String keyAlias) + { + return keys.getCachedKey(keyAlias).getKey(); + } + }); + return encryptor; + } - public void init() - { - } + public void init() + { + } - public void registerKey(String keyAlias, Key key) - { - if(isKeyRegistered(keyAlias)) - { - throw new IllegalArgumentException("Key " + keyAlias + " is already registered"); - } + public void registerKey(String keyAlias, Key key) + { + if(isKeyRegistered(keyAlias)) + { + throw new IllegalArgumentException("Key " + keyAlias + " is already registered"); + } - // register the key by creating an attribute that stores a guid and its encrypted value - String guid = GUID.generate(); + // register the key by creating an attribute that stores a guid and its encrypted value + String guid = GUID.generate(); - KeyMap keys = new KeyMap(); - keys.setKey(keyAlias, key); - Encryptor encryptor = getEncryptor(keys); - Serializable encrypted = encryptor.sealObject(keyAlias, null, guid); - Pair keyCheck = new Pair(guid, encrypted); - attributeService.createAttribute(keyCheck, TOP_LEVEL_KEY, keyAlias); - logger.info("Registered key " + keyAlias); - } - - public void unregisterKey(String keyAlias) - { - attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias); - } - - public boolean isKeyRegistered(String keyAlias) - { - try - { - return (attributeService.getAttribute(TOP_LEVEL_KEY, keyAlias) != null); - } - catch(Throwable e) - { - // there is an issue getting the attribute. Remove it. - attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias); - return (attributeService.getAttribute(TOP_LEVEL_KEY, keyAlias) != null); - } - } - - public List getRegisteredKeys(final Set keyStoreKeys) - { - final List registeredKeys = new ArrayList(); + KeyMap keys = new KeyMap(); + keys.setKey(keyAlias, key); + Encryptor encryptor = getEncryptor(keys); + Serializable encrypted = encryptor.sealObject(keyAlias, null, guid); + Pair keyCheck = new Pair(guid, encrypted); + attributeService.createAttribute(keyCheck, TOP_LEVEL_KEY, keyAlias); + logger.info("Registered key " + keyAlias); + } + + public void unregisterKey(String keyAlias) + { + attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias); + } + + public boolean isKeyRegistered(String keyAlias) + { + try + { + return (attributeService.getAttribute(TOP_LEVEL_KEY, keyAlias) != null); + } + catch(Throwable e) + { + // there is an issue getting the attribute. Remove it. + attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias); + return (attributeService.getAttribute(TOP_LEVEL_KEY, keyAlias) != null); + } + } + + public List getRegisteredKeys(final Set keyStoreKeys) + { + final List registeredKeys = new ArrayList(); - attributeService.getAttributes(new AttributeQueryCallback() - { - public boolean handleAttribute(Long id, Serializable value, - Serializable[] keys) - { - // Add as a registered key if the keystore contains the key - String keyAlias = (String)keys[1]; - if(keyStoreKeys.contains(keyAlias)) - { - registeredKeys.add(keyAlias); - } - return true; - } + attributeService.getAttributes(new AttributeQueryCallback() + { + public boolean handleAttribute(Long id, Serializable value, + Serializable[] keys) + { + // Add as a registered key if the keystore contains the key + String keyAlias = (String)keys[1]; + if(keyStoreKeys.contains(keyAlias)) + { + registeredKeys.add(keyAlias); + } + return true; + } - }, - TOP_LEVEL_KEY); + }, + TOP_LEVEL_KEY); - return registeredKeys; - } + return registeredKeys; + } - @SuppressWarnings("unchecked") - public KEY_STATUS checkKey(String keyAlias, Key key) - { - Pair keyCheck = null; + @SuppressWarnings("unchecked") + public KEY_STATUS checkKey(String keyAlias, Key key) + { + Pair keyCheck = null; - if(attributeService.exists(TOP_LEVEL_KEY, keyAlias)) - { - try - { - // check that the key has not changed by decrypting the encrypted guid attribute - // comparing against the guid - try - { - keyCheck = (Pair)attributeService.getAttribute(TOP_LEVEL_KEY, keyAlias); - } - catch(Throwable e) - { - // there is an issue getting the attribute. Remove it. - attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias); - return KEY_STATUS.MISSING; - } - - if(keyCheck == null) - { - return KEY_STATUS.MISSING; - } + if(attributeService.exists(TOP_LEVEL_KEY, keyAlias)) + { + try + { + // check that the key has not changed by decrypting the encrypted guid attribute + // comparing against the guid + try + { + keyCheck = (Pair)attributeService.getAttribute(TOP_LEVEL_KEY, keyAlias); + } + catch(Throwable e) + { + // there is an issue getting the attribute. Remove it. + attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias); + return KEY_STATUS.MISSING; + } + + if(keyCheck == null) + { + return KEY_STATUS.MISSING; + } - KeyMap keys = new KeyMap(); - keys.setKey(keyAlias, key); - Encryptor encryptor = getEncryptor(keys); - Serializable storedGUID = encryptor.unsealObject(keyAlias, keyCheck.getSecond()); - return EqualsHelper.nullSafeEquals(storedGUID, keyCheck.getFirst()) ? KEY_STATUS.OK : KEY_STATUS.CHANGED; - } - catch(InvalidKeyException e) - { - // key exception indicates that the key has changed - it can't decrypt the - // previously-encrypted data - return KEY_STATUS.CHANGED; - } - } - else - { - return KEY_STATUS.MISSING; - } - } - - // note that this removes _all_ keys in the keystore. Use with care. - public void removeRegisteredKeys(final Set keys) - { - RetryingTransactionHelper retryingTransactionHelper = transactionService.getRetryingTransactionHelper(); - final RetryingTransactionCallback removeKeysCallback = new RetryingTransactionCallback() + KeyMap keys = new KeyMap(); + keys.setKey(keyAlias, key); + Encryptor encryptor = getEncryptor(keys); + Serializable storedGUID = encryptor.unsealObject(keyAlias, keyCheck.getSecond()); + return EqualsHelper.nullSafeEquals(storedGUID, keyCheck.getFirst()) ? KEY_STATUS.OK : KEY_STATUS.CHANGED; + } + catch(InvalidKeyException e) + { + // key exception indicates that the key has changed - it can't decrypt the + // previously-encrypted data + return KEY_STATUS.CHANGED; + } + } + else + { + return KEY_STATUS.MISSING; + } + } + + // note that this removes _all_ keys in the keystore. Use with care. + public void removeRegisteredKeys(final Set keys) + { + RetryingTransactionHelper retryingTransactionHelper = transactionService.getRetryingTransactionHelper(); + final RetryingTransactionCallback removeKeysCallback = new RetryingTransactionCallback() { public Void execute() throws Throwable { - for(String keyAlias : keys) - { - attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias); - } + for(String keyAlias : keys) + { + attributeService.removeAttribute(TOP_LEVEL_KEY, keyAlias); + } - return null; + return null; } }; retryingTransactionHelper.doInTransaction(removeKeysCallback, false); - } + } } diff --git a/source/java/org/alfresco/encryption/KeyStoreChecker.java b/source/java/org/alfresco/encryption/KeyStoreChecker.java index 873634a6a2..0d1eceb54f 100644 --- a/source/java/org/alfresco/encryption/KeyStoreChecker.java +++ b/source/java/org/alfresco/encryption/KeyStoreChecker.java @@ -18,9 +18,6 @@ */ package org.alfresco.encryption; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - /** * Checks the repository key stores. * @@ -29,8 +26,6 @@ import org.apache.commons.logging.LogFactory; */ public class KeyStoreChecker { - private static final Log logger = LogFactory.getLog(KeyStoreChecker.class); - private AlfrescoKeyStore mainKeyStore; public KeyStoreChecker() @@ -38,16 +33,16 @@ public class KeyStoreChecker } public void setMainKeyStore(AlfrescoKeyStore mainKeyStore) - { - this.mainKeyStore = mainKeyStore; - } + { + this.mainKeyStore = mainKeyStore; + } - public void validateKeyStores() throws InvalidKeystoreException, MissingKeyException - { - mainKeyStore.validateKeys(); - if(!mainKeyStore.exists()) - { - mainKeyStore.create(); - } - } + public void validateKeyStores() throws InvalidKeystoreException, MissingKeyException + { + mainKeyStore.validateKeys(); + if(!mainKeyStore.exists()) + { + mainKeyStore.create(); + } + } } diff --git a/source/java/org/alfresco/encryption/ReEncryptor.java b/source/java/org/alfresco/encryption/ReEncryptor.java index feb39c99c0..6815bd52b5 100644 --- a/source/java/org/alfresco/encryption/ReEncryptor.java +++ b/source/java/org/alfresco/encryption/ReEncryptor.java @@ -22,8 +22,10 @@ import java.io.Serializable; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Set; import javax.crypto.SealedObject; @@ -70,20 +72,20 @@ public class ReEncryptor implements ApplicationContextAware { private static Log logger = LogFactory.getLog(ReEncryptor.class); - private NodeDAO nodeDAO; - private DictionaryDAO dictionaryDAO; - private QNameDAO qnameDAO; - - private MetadataEncryptor metadataEncryptor; - - private ApplicationContext applicationContext; - private TransactionService transactionService; - private RetryingTransactionHelper transactionHelper; + private NodeDAO nodeDAO; + private DictionaryDAO dictionaryDAO; + private QNameDAO qnameDAO; + + private MetadataEncryptor metadataEncryptor; + + private ApplicationContext applicationContext; + private TransactionService transactionService; + private RetryingTransactionHelper transactionHelper; - private int numThreads; - private int chunkSize; - private boolean splitTxns = true; - + private int numThreads; + private int chunkSize; + private boolean splitTxns = true; + private static final QName LOCK = QName.createQName(NamespaceService.SYSTEM_MODEL_1_0_URI, "OrphanReaper"); private JobLockService jobLockService; @@ -92,54 +94,54 @@ public class ReEncryptor implements ApplicationContextAware */ public void setTransactionService(TransactionService transactionService) { - this.transactionService = transactionService; + this.transactionService = transactionService; this.transactionHelper = transactionService.getRetryingTransactionHelper(); } - public void setMetadataEncryptor(MetadataEncryptor metadataEncryptor) - { - this.metadataEncryptor = metadataEncryptor; - } + public void setMetadataEncryptor(MetadataEncryptor metadataEncryptor) + { + this.metadataEncryptor = metadataEncryptor; + } - public MetadataEncryptor getMetadataEncryptor() - { - return metadataEncryptor; - } - - public void setJobLockService(JobLockService jobLockService) - { - this.jobLockService = jobLockService; - } - - public void setNumThreads(int numThreads) - { - this.numThreads = numThreads; - } + public MetadataEncryptor getMetadataEncryptor() + { + return metadataEncryptor; + } + + public void setJobLockService(JobLockService jobLockService) + { + this.jobLockService = jobLockService; + } + + public void setNumThreads(int numThreads) + { + this.numThreads = numThreads; + } - public void setChunkSize(int chunkSize) - { - this.chunkSize = chunkSize; - } + public void setChunkSize(int chunkSize) + { + this.chunkSize = chunkSize; + } - public void setSplitTxns(boolean splitTxns) - { - this.splitTxns = splitTxns; - } + public void setSplitTxns(boolean splitTxns) + { + this.splitTxns = splitTxns; + } - public void setNodeDAO(NodeDAO nodeDAO) - { - this.nodeDAO = nodeDAO; - } + public void setNodeDAO(NodeDAO nodeDAO) + { + this.nodeDAO = nodeDAO; + } - public void setDictionaryDAO(DictionaryDAO dictionaryDAO) - { - this.dictionaryDAO = dictionaryDAO; - } + public void setDictionaryDAO(DictionaryDAO dictionaryDAO) + { + this.dictionaryDAO = dictionaryDAO; + } - public void setQnameDAO(QNameDAO qnameDAO) - { - this.qnameDAO = qnameDAO; - } + public void setQnameDAO(QNameDAO qnameDAO) + { + this.qnameDAO = qnameDAO; + } /** * Attempts to get the lock. If the lock couldn't be taken, then null is returned. @@ -170,12 +172,12 @@ public class ReEncryptor implements ApplicationContextAware jobLockService.refreshLock(lockToken, LOCK, time); } - protected void reEncryptProperties(final List properties, final String lockToken) - { - final Iterator it = properties.iterator(); - - // TODO use BatchProcessWorkerAdaptor? - + protected void reEncryptProperties(final List properties, final String lockToken) + { + final Iterator it = properties.iterator(); + + // TODO use BatchProcessWorkerAdaptor? + BatchProcessor.BatchProcessWorker worker = new BatchProcessor.BatchProcessWorker() { public String getIdentifier(NodePropertyEntity entity) @@ -194,62 +196,62 @@ public class ReEncryptor implements ApplicationContextAware public void process(final NodePropertyEntity entity) throws Throwable { - NodePropertyValue nodePropValue = entity.getValue(); - // TODO check that we have the correct type i.e. can be cast to Serializable - Serializable value = nodePropValue.getSerializableValue(); - if(value instanceof SealedObject) - { - SealedObject sealed = (SealedObject)value; + NodePropertyValue nodePropValue = entity.getValue(); + // TODO check that we have the correct type i.e. can be cast to Serializable + Serializable value = nodePropValue.getSerializableValue(); + if(value instanceof SealedObject) + { + SealedObject sealed = (SealedObject)value; - NodePropertyKey propertyKey = entity.getKey(); - QName propertyQName = qnameDAO.getQName(propertyKey.getQnameId()).getSecond(); + NodePropertyKey propertyKey = entity.getKey(); + QName propertyQName = qnameDAO.getQName(propertyKey.getQnameId()).getSecond(); - // decrypt... - Serializable decrypted = metadataEncryptor.decrypt(propertyQName, sealed); + // decrypt... + Serializable decrypted = metadataEncryptor.decrypt(propertyQName, sealed); - // ...and then re-encrypt. The new key will be used. - Serializable resealed = metadataEncryptor.encrypt(propertyQName, decrypted); - - // TODO update resealed using batch update? - // does the node DAO do batch updating? - nodeDAO.setNodeProperties(entity.getNodeId(), Collections.singletonMap(propertyQName, resealed)); - } - else - { - NodePropertyKey nodeKey = entity.getKey(); - QName propertyQName = qnameDAO.getQName(nodeKey.getQnameId()).getSecond(); - logger.warn("Encountered an encrypted property that is not a SealedObject, for node id " + - entity.getNodeId() + ", property " + propertyQName); - } + // ...and then re-encrypt. The new key will be used. + Serializable resealed = metadataEncryptor.encrypt(propertyQName, decrypted); + + // TODO update resealed using batch update? + // does the node DAO do batch updating? + nodeDAO.setNodeProperties(entity.getNodeId(), Collections.singletonMap(propertyQName, resealed)); + } + else + { + NodePropertyKey nodeKey = entity.getKey(); + QName propertyQName = qnameDAO.getQName(nodeKey.getQnameId()).getSecond(); + logger.warn("Encountered an encrypted property that is not a SealedObject, for node id " + + entity.getNodeId() + ", property " + propertyQName); + } } }; BatchProcessWorkProvider provider = new BatchProcessWorkProvider() - { - @Override - public int getTotalEstimatedWorkSize() - { - return properties.size(); - } + { + @Override + public int getTotalEstimatedWorkSize() + { + return properties.size(); + } - @Override - public Collection getNextWork() - { - List sublist = new ArrayList(chunkSize); + @Override + public Collection getNextWork() + { + List sublist = new ArrayList(chunkSize); - synchronized(it) - { - int count = 0; - while(it.hasNext() && count < chunkSize) - { - sublist.add(it.next()); - count++; - } - } + synchronized(it) + { + int count = 0; + while(it.hasNext() && count < chunkSize) + { + sublist.add(it.next()); + count++; + } + } - return sublist; - } - }; + return sublist; + } + }; new BatchProcessor( "Reencryptor", @@ -258,54 +260,54 @@ public class ReEncryptor implements ApplicationContextAware numThreads, chunkSize, applicationContext, logger, 100).process(worker, splitTxns); - } + } - /** - * Re-encrypt using the configured backup keystore to decrypt and the main keystore to encrypt - */ - public int bootstrapReEncrypt() throws MissingKeyException - { - if(!metadataEncryptor.backupKeyAvailable(KeyProvider.ALIAS_METADATA)) - { - throw new MissingKeyException("Backup key store is either not present or does not contain a metadata encryption key"); - } - return reEncrypt(); - } + /** + * Re-encrypt using the configured backup keystore to decrypt and the main keystore to encrypt + */ + public int bootstrapReEncrypt() throws MissingKeyException + { + if(!metadataEncryptor.backupKeyAvailable(KeyProvider.ALIAS_METADATA)) + { + throw new MissingKeyException("Backup key store is either not present or does not contain a metadata encryption key"); + } + return reEncrypt(); + } - /** - * Re-encrypt by decrypting using the configured keystore and encrypting using a keystore configured using the provided new key store parameters. - * Called from e.g. JMX. - * - * Assumes that the main key store has been already been reloaded. - * - * Note: it is the responsibility of the end user to ensure that the underlying keystores have been set up appropriately - * i.e. the old key store is backed up to the location defined by the property '${dir.keystore}/backup-keystore' and the new - * key store replaces it. This can be done while the repository is running. - */ - public int reEncrypt() throws MissingKeyException - { - if(!metadataEncryptor.keyAvailable(KeyProvider.ALIAS_METADATA)) - { - throw new MissingKeyException("Main key store is either not present or does not contain a metadata encryption key"); - } - if(!metadataEncryptor.backupKeyAvailable(KeyProvider.ALIAS_METADATA)) - { - throw new MissingKeyException("Backup key store is either not present or does not contain a metadata encryption key"); - } - - int numProps = reEncryptImpl(); - return numProps; - } + /** + * Re-encrypt by decrypting using the configured keystore and encrypting using a keystore configured using the provided new key store parameters. + * Called from e.g. JMX. + * + * Assumes that the main key store has been already been reloaded. + * + * Note: it is the responsibility of the end user to ensure that the underlying keystores have been set up appropriately + * i.e. the old key store is backed up to the location defined by the property '${dir.keystore}/backup-keystore' and the new + * key store replaces it. This can be done while the repository is running. + */ + public int reEncrypt() throws MissingKeyException + { + if(!metadataEncryptor.keyAvailable(KeyProvider.ALIAS_METADATA)) + { + throw new MissingKeyException("Main key store is either not present or does not contain a metadata encryption key"); + } + if(!metadataEncryptor.backupKeyAvailable(KeyProvider.ALIAS_METADATA)) + { + throw new MissingKeyException("Backup key store is either not present or does not contain a metadata encryption key"); + } + + int numProps = reEncryptImpl(); + return numProps; + } - protected int reEncryptImpl() - { - // Take out a re-encryptor lock - RetryingTransactionCallback txnWork = new RetryingTransactionCallback() + protected int reEncryptImpl() + { + // Take out a re-encryptor lock + RetryingTransactionCallback txnWork = new RetryingTransactionCallback() { public String execute() throws Exception { - String lockToken = getLock(20000L); - return lockToken; + String lockToken = getLock(20000L); + return lockToken; } }; @@ -316,30 +318,36 @@ public class ReEncryptor implements ApplicationContextAware return 0; } - // get encrypted properties - Collection propertyDefs = dictionaryDAO.getPropertiesOfDataType(DataTypeDefinition.ENCRYPTED); - // TODO use callback mechanism, or select based on set of nodes? - List properties = nodeDAO.selectProperties(propertyDefs); + // get encrypted properties + Collection propertyDefs = dictionaryDAO.getPropertiesOfDataType(DataTypeDefinition.ENCRYPTED); + Set qnames = new HashSet(); + for(PropertyDefinition propDef : propertyDefs) + { + qnames.add(propDef.getName()); + } - if(logger.isDebugEnabled()) - { - logger.debug("Found " + properties.size() + " properties to re-encrypt..."); - } + // TODO use callback mechanism, or select based on set of nodes? + List properties = nodeDAO.selectNodePropertiesByTypes(qnames); - // reencrypt these properties TODO don't call if num props == 0 - reEncryptProperties(properties, lockToken); + if(logger.isDebugEnabled()) + { + logger.debug("Found " + properties.size() + " properties to re-encrypt..."); + } - if(logger.isDebugEnabled()) - { - logger.debug("...done re-encrypting."); - } + // reencrypt these properties TODO don't call if num props == 0 + reEncryptProperties(properties, lockToken); - return properties.size(); - } + if(logger.isDebugEnabled()) + { + logger.debug("...done re-encrypting."); + } - @Override - public void setApplicationContext(ApplicationContext applicationContext) throws BeansException - { - this.applicationContext = applicationContext; - } + return properties.size(); + } + + @Override + public void setApplicationContext(ApplicationContext applicationContext) throws BeansException + { + this.applicationContext = applicationContext; + } } diff --git a/source/java/org/alfresco/repo/domain/node/NodeDAO.java b/source/java/org/alfresco/repo/domain/node/NodeDAO.java index 4ea513d328..bd6ff8cc42 100644 --- a/source/java/org/alfresco/repo/domain/node/NodeDAO.java +++ b/source/java/org/alfresco/repo/domain/node/NodeDAO.java @@ -29,8 +29,8 @@ import java.util.Set; import org.alfresco.repo.node.NodeBulkLoader; import org.alfresco.repo.transaction.TransactionalResourceHelper; +import org.alfresco.service.cmr.dictionary.DataTypeDefinition; import org.alfresco.service.cmr.dictionary.InvalidTypeException; -import org.alfresco.service.cmr.dictionary.PropertyDefinition; import org.alfresco.service.cmr.repository.AssociationRef; import org.alfresco.service.cmr.repository.ChildAssociationRef; import org.alfresco.service.cmr.repository.InvalidNodeRefException; @@ -815,8 +815,8 @@ public interface NodeDAO extends NodeBulkLoader /** * Remove unused transactions from commit time 'fromCommitTime' to commit time 'toCommitTime' * - * @param fromCommitTime delete unused transactions from commit time - * @param toCommitTime delete unused transactions to commit time + * @param fromCommitTime delete unused transactions from commit time + * @param toCommitTime delete unused transactions to commit time * * @return int */ @@ -850,6 +850,16 @@ public interface NodeDAO extends NodeBulkLoader */ public Long getMaxTxnId(); + /** + * @return Returns the minimum node id or 0 if there are no nodes + */ + public Long getMinNodeId(); + + /** + * @return Returns the maximum node id or 0 if there are no nodes + */ + public Long getMaxNodeId(); + /** * Select children by property values */ @@ -862,7 +872,17 @@ public interface NodeDAO extends NodeBulkLoader /** * Used by the re-encryptor to re-encrypt encryptable properties with a new encryption key. */ - public List selectProperties(Collection propertyDefs); + public List selectNodePropertiesByTypes(Set qnames); + + /** + * Select all node properties that are between two node IDs and of the given actual type + * + * @param dataType the actual, original type of the property, as given by one of the constants + * on {@link DataTypeDefinition#TEXT DataTypeDefinition} + * @param minNodeId the minimum node ID (inclusive) + * @param maxNodeId the maximum node ID (exclusive) + */ + public List selectNodePropertiesByDataType(QName dataType, long minNodeId, long maxNodeId); /** * Counts the number of child associations directly under parentNodeId. diff --git a/source/java/org/alfresco/repo/domain/node/ibatis/NodeDAOImpl.java b/source/java/org/alfresco/repo/domain/node/ibatis/NodeDAOImpl.java index 5d88726133..ef184249a3 100644 --- a/source/java/org/alfresco/repo/domain/node/ibatis/NodeDAOImpl.java +++ b/source/java/org/alfresco/repo/domain/node/ibatis/NodeDAOImpl.java @@ -51,7 +51,6 @@ import org.alfresco.repo.domain.node.TransactionEntity; import org.alfresco.repo.domain.node.TransactionQueryEntity; import org.alfresco.repo.domain.qname.QNameDAO; import org.alfresco.service.cmr.dictionary.DictionaryService; -import org.alfresco.service.cmr.dictionary.PropertyDefinition; import org.alfresco.service.cmr.repository.ChildAssociationRef; import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.StoreRef; @@ -97,12 +96,15 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl private static final String SELECT_NODES_BY_IDS = "alfresco.node.select_NodesByIds"; private static final String SELECT_NODE_PROPERTIES = "alfresco.node.select_NodeProperties"; private static final String SELECT_PROPERTIES_BY_TYPES = "alfresco.node.select_PropertiesByTypes"; + private static final String SELECT_PROPERTIES_BY_ACTUAL_TYPE = "alfresco.node.select_PropertiesByActualType"; private static final String SELECT_NODE_ASPECTS = "alfresco.node.select_NodeAspects"; private static final String INSERT_NODE_PROPERTY = "alfresco.node.insert_NodeProperty"; private static final String UPDATE_PRIMARY_CHILDREN_SHARED_ACL = "alfresco.node.update.update_PrimaryChildrenSharedAcl"; private static final String INSERT_NODE_ASPECT = "alfresco.node.insert_NodeAspect"; private static final String DELETE_NODE_ASPECTS = "alfresco.node.delete_NodeAspects"; private static final String DELETE_NODE_PROPERTIES = "alfresco.node.delete_NodeProperties"; + private static final String SELECT_NODE_MIN_ID = "alfresco.node.select_NodeMinId"; + private static final String SELECT_NODE_MAX_ID = "alfresco.node.select_NodeMaxId"; private static final String SELECT_NODES_WITH_ASPECT_IDS = "alfresco.node.select_NodesWithAspectIds"; private static final String INSERT_NODE_ASSOC = "alfresco.node.insert.insert_NodeAssoc"; private static final String UPDATE_NODE_ASSOC = "alfresco.node.update_NodeAssoc"; @@ -349,6 +351,18 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl return template.update(UPDATE_NODE_BULK_TOUCH, ids); } + @Override + public Long getMinNodeId() + { + return (Long) template.selectOne(SELECT_NODE_MIN_ID); + } + + @Override + public Long getMaxNodeId() + { + return (Long) template.selectOne(SELECT_NODE_MAX_ID); + } + @Override protected void updatePrimaryChildrenSharedAclId( Long txnId, @@ -538,6 +552,54 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl return makePersistentPropertiesMap(rows); } + @Override + public List selectNodePropertiesByTypes(Set qnames) + { + final List properties = new ArrayList(); + + // qnames of properties that are encrypted + Set qnameIds = qnameDAO.convertQNamesToIds(qnames, false); + if(qnameIds.size() > 0) + { + IdsEntity param = new IdsEntity(); + param.setIds(new ArrayList(qnameIds)); + // TODO - use a callback approach + template.select(SELECT_PROPERTIES_BY_TYPES, param, new ResultHandler() + { + @Override + public void handleResult(ResultContext context) + { + properties.add((NodePropertyEntity)context.getResultObject()); + } + }); + } + + return properties; + } + + @Override + public List selectNodePropertiesByDataType(QName dataType, long minNodeId, long maxNodeId) + { + int typeOrdinal = NodePropertyValue.convertToTypeOrdinal(dataType); + + IdsEntity ids = new IdsEntity(); + ids.setIdOne((long)typeOrdinal); + ids.setIdTwo(minNodeId); + ids.setIdThree(maxNodeId); + final List properties = new ArrayList(); + + template.select(SELECT_PROPERTIES_BY_ACTUAL_TYPE, ids, new ResultHandler() + { + @Override + public void handleResult(ResultContext context) + { + properties.add((NodePropertyEntity)context.getResultObject()); + } + }); + + return properties; + } + @Override protected int deleteNodeProperties(Long nodeId, Set qnameIds) { @@ -1565,9 +1627,9 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl @Override public int deleteTxnsUnused(long fromCommitTime, long toCommitTime) { - TransactionQueryEntity txnQuery = new TransactionQueryEntity(); - txnQuery.setMinCommitTime(fromCommitTime); - txnQuery.setMaxCommitTime(toCommitTime); + TransactionQueryEntity txnQuery = new TransactionQueryEntity(); + txnQuery.setMinCommitTime(fromCommitTime); + txnQuery.setMaxCommitTime(toCommitTime); int numDeleted = template.delete(DELETE_TXNS_UNUSED, txnQuery); return numDeleted; } @@ -1602,37 +1664,6 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl return template.selectOne(SELECT_TXN_MAX_ID); } - @Override - public List selectProperties(Collection propertyDefs) - { - final List properties = new ArrayList(); - - Set qnames = new HashSet(); - for(PropertyDefinition propDef : propertyDefs) - { - qnames.add(propDef.getName()); - } - - // qnames of properties that are encrypted - Set qnameIds = qnameDAO.convertQNamesToIds(qnames, false); - if(qnameIds.size() > 0) - { - IdsEntity param = new IdsEntity(); - param.setIds(new ArrayList(qnameIds)); - // TODO - use a callback approach - template.select(SELECT_PROPERTIES_BY_TYPES, param, new ResultHandler() - { - @Override - public void handleResult(ResultContext context) - { - properties.add((NodePropertyEntity)context.getResultObject()); - } - }); - } - - return properties; - } - public int countChildAssocsByParent(Long parentNodeId, boolean isPrimary) { NodeEntity parentNode = new NodeEntity(); diff --git a/source/java/org/alfresco/repo/domain/patch/PatchDAO.java b/source/java/org/alfresco/repo/domain/patch/PatchDAO.java index 235ede71d8..66dfc8763c 100644 --- a/source/java/org/alfresco/repo/domain/patch/PatchDAO.java +++ b/source/java/org/alfresco/repo/domain/patch/PatchDAO.java @@ -21,6 +21,8 @@ package org.alfresco.repo.domain.patch; import java.util.List; import java.util.Set; + +import org.alfresco.repo.domain.node.NodeDAO; import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.namespace.QName; import org.alfresco.util.Pair; @@ -36,6 +38,10 @@ public interface PatchDAO { // DM-related + /** + * @deprecated in 4.1: use {@link NodeDAO#getMaxNodeId()} + */ + @Deprecated public long getMaxAdmNodeID(); /** diff --git a/source/java/org/alfresco/repo/node/db/NodeStringLengthWorker.java b/source/java/org/alfresco/repo/node/db/NodeStringLengthWorker.java new file mode 100644 index 0000000000..ee53220476 --- /dev/null +++ b/source/java/org/alfresco/repo/node/db/NodeStringLengthWorker.java @@ -0,0 +1,430 @@ +/* + * Copyright (C) 2005-2010 Alfresco Software Limited. + * + * This file is part of Alfresco + * + * Alfresco is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * Alfresco is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with Alfresco. If not, see . + */ +package org.alfresco.repo.node.db; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import org.alfresco.error.AlfrescoRuntimeException; +import org.alfresco.repo.batch.BatchProcessWorkProvider; +import org.alfresco.repo.batch.BatchProcessor; +import org.alfresco.repo.batch.BatchProcessor.BatchProcessWorker; +import org.alfresco.repo.batch.BatchProcessor.BatchProcessWorkerAdaptor; +import org.alfresco.repo.domain.node.NodeDAO; +import org.alfresco.repo.domain.node.NodePropertyEntity; +import org.alfresco.repo.domain.node.NodePropertyValue; +import org.alfresco.repo.domain.qname.QNameDAO; +import org.alfresco.repo.domain.schema.SchemaBootstrap; +import org.alfresco.repo.lock.JobLockService; +import org.alfresco.repo.lock.JobLockService.JobLockRefreshCallback; +import org.alfresco.repo.lock.LockAcquisitionException; +import org.alfresco.repo.policy.BehaviourFilter; +import org.alfresco.repo.transaction.RetryingTransactionHelper; +import org.alfresco.service.cmr.dictionary.DataTypeDefinition; +import org.alfresco.service.namespace.NamespaceService; +import org.alfresco.service.namespace.QName; +import org.alfresco.service.transaction.TransactionService; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.quartz.Job; +import org.quartz.JobDataMap; +import org.quartz.JobExecutionContext; +import org.quartz.JobExecutionException; +import org.springframework.beans.BeansException; +import org.springframework.context.ApplicationContext; +import org.springframework.context.ApplicationContextAware; + +/** + *

Max String Length Worker

+ * + *

What it is

+ * A worker for a scheduled job that checks and adjusts string storage for persisted strings in the system. + *

+ *

Settings that control the behaviour

+ *
    + *
  • ${system.maximumStringLength} - the maximum length of a string that can be persisted in the *alf_node_properties.string_value* column.
  • + *
  • ${system.maximumStringLength.jobQueryRange} - the node ID range to query for. + * The process will repeat from the first to the last node, querying for up to this many nodes. + * Only reduce the value if the NodeDAO query takes a long time.
  • + *
  • ${system.maximumStringLength.jobThreadCount} - the number of threads that will handle persistence checks and changes. + * Increase or decrease this to allow for free CPU capacity on the machine executing the job.
  • + *
+ *

How to use it

+ * sdfsf + * + * @author Derek Hulley + * @since 4.1.9.2 + */ +public class NodeStringLengthWorker implements ApplicationContextAware +{ + private static final QName LOCK = QName.createQName(NamespaceService.SYSTEM_MODEL_1_0_URI, "NodeStringLengthWorker"); + private static final long LOCK_TTL = 60000L; + + private static Log logger = LogFactory.getLog(NodeStringLengthWorker.class); + + private final NodeDAO nodeDAO; + private final JobLockService jobLockService; + private final TransactionService transactionService; + private final QNameDAO qnameDAO; + private final BehaviourFilter behaviourFilter; + private ApplicationContext ctx; + + private final int queryRange; + private final int threadCount; + private final int batchSize; + + public NodeStringLengthWorker( + NodeDAO nodeDAO, JobLockService jobLockService, TransactionService transactionService, QNameDAO qnameDAO, + BehaviourFilter behaviourFilter, + int queryRange, int threadCount) + { + this.nodeDAO = nodeDAO; + this.jobLockService = jobLockService; + this.transactionService = transactionService; + this.qnameDAO = qnameDAO; + this.behaviourFilter = behaviourFilter; + + this.queryRange = queryRange; + this.threadCount = threadCount; + this.batchSize = 100; + } + + /** + * Set the application context for event publishing during batch processing + */ + @Override + public void setApplicationContext(ApplicationContext applicationContext) throws BeansException + { + this.ctx = applicationContext; + } + + /** + * Performs the work, including logging details of progress. + */ + public NodeStringLengthWorkResult execute() + { + // Build refresh callback + final NodeStringLengthWorkResult progress = new NodeStringLengthWorkResult(); + JobLockRefreshCallback lockCallback = new JobLockRefreshCallback() + { + @Override + public void lockReleased() + { + progress.inProgress.set(false); + } + + @Override + public boolean isActive() + { + return progress.inProgress.get(); + } + }; + + String lockToken = null; + try + { + progress.inProgress.set(true); + // Get the lock + lockToken = jobLockService.getLock(LOCK, LOCK_TTL); + // Start the refresh timer + jobLockService.refreshLock(lockToken, LOCK, LOCK_TTL, lockCallback); + + // Now we know that we'll do something + if (logger.isInfoEnabled()) + { + logger.info("NodeStringLengthWorker: Starting"); + } + + // Do the work + doWork(progress); + // Done + if (logger.isInfoEnabled()) + { + logger.info("NodeStringLengthWorker: " + progress); + } + } + catch (LockAcquisitionException e) + { + if (logger.isDebugEnabled()) + { + logger.debug("Skipping node string length job: " + e.getMessage()); + } + } + catch (Exception e) + { + progress.inProgress.set(false); + logger.error("Node string length job " + progress); + logger.error("Stopping node string length job with exception.", e); + } + finally + { + if (lockToken != null) + { + jobLockService.releaseLock(lockToken, LOCK); + } + progress.inProgress.set(false); // The background + } + // Done + return progress; + } + + /** + * @param progress the thread-safe progress + */ + private synchronized void doWork(NodeStringLengthWorkResult progress) throws Exception + { + // Build batch processor + BatchProcessWorkProvider workProvider = new NodeStringLengthWorkProvider(progress); + BatchProcessWorker worker = new NodeStringLengthBatch(progress); + RetryingTransactionHelper retryingTransactionHelper = transactionService.getRetryingTransactionHelper(); + retryingTransactionHelper.setForceWritable(true); + + BatchProcessor batchProcessor = new BatchProcessor( + "NodeStringLengthWorker", + retryingTransactionHelper, + workProvider, + threadCount, + batchSize, + ctx, + logger, + 1000); + batchProcessor.process(worker, true); + } + + /** + * Work provider for batch job providing string properties to process + * @author Derek Hulley + * @since 4.1.9.2 + */ + private class NodeStringLengthWorkProvider implements BatchProcessWorkProvider + { + private final long maxNodeId; + private final NodeStringLengthWorkResult progress; + + private NodeStringLengthWorkProvider(NodeStringLengthWorkResult progress) + { + this.progress = progress; + this.maxNodeId = nodeDAO.getMaxNodeId(); + } + + @Override + public int getTotalEstimatedWorkSize() + { + return -1; + } + + @Override + public Collection getNextWork() + { + // Check that there are not too many errors + if (progress.errors.get() > 1000) + { + logger.warn("Node string length work terminating; too many errors."); + return Collections.emptyList(); + } + + // Keep shifting the query window up until we get results or we hit the original max node ID + List ret = Collections.emptyList(); + while (ret.isEmpty() && progress.currentMinNodeId.get() < maxNodeId) + { + // Calculate the node ID range + Long minNodeId = null; + if (progress.currentMinNodeId.get() == 0L) + { + minNodeId = nodeDAO.getMinNodeId(); + progress.currentMinNodeId.set(minNodeId); + } + else + { + minNodeId = progress.currentMinNodeId.addAndGet(queryRange); + } + long maxNodeId = minNodeId + queryRange; + + // Query for the properties + ret = nodeDAO.selectNodePropertiesByDataType(DataTypeDefinition.TEXT, minNodeId, maxNodeId); + } + + // Done + if (logger.isDebugEnabled()) + { + logger.debug("Node string length work provider found " + ret.size() + " new property entities."); + } + return ret; + } + } + + /** + * Class that does the actual node manipulation to change the string storage + * @author Derek Hulley + * @since 4.1.9.2 + */ + private class NodeStringLengthBatch extends BatchProcessWorkerAdaptor + { + private final int typeOrdinalText = NodePropertyValue.convertToTypeOrdinal(DataTypeDefinition.TEXT); + private final int typeOrdinalAny = NodePropertyValue.convertToTypeOrdinal(DataTypeDefinition.ANY); + private final NodeStringLengthWorkResult progress; + + private NodeStringLengthBatch(NodeStringLengthWorkResult progress) + { + this.progress = progress; + } + + @Override + public void process(NodePropertyEntity entry) throws Throwable + { + progress.propertiesProcessed.incrementAndGet(); + + try + { + Long nodeId = entry.getNodeId(); + NodePropertyValue prop = entry.getValue(); + // Get the current string value + String text = (String) prop.getValue(DataTypeDefinition.TEXT); + + // Decide if the string needs changing or not + boolean repersist = false; + int persistedTypeOrdinal = prop.getPersistedType().intValue(); + if (text.length() > SchemaBootstrap.getMaxStringLength()) + { + // The text needs to be stored as a serializable_value (ANY) + if (typeOrdinalAny != persistedTypeOrdinal) + { + repersist = true; + } + } + else + { + // The text is shorter than the current max, so it should be stored as a string_value (TEXT) + if (typeOrdinalText != persistedTypeOrdinal) + { + repersist = true; + } + } + + // Only do any work if we need to + if (repersist) + { + // We do not want any behaviours associated with our transactions + behaviourFilter.disableBehaviour(); + + progress.propertiesChanged.incrementAndGet(); + if (logger.isTraceEnabled()) + { + logger.trace("Fixing property " + getIdentifier(entry) + ". Value: " + text); + } + else if (logger.isDebugEnabled()) + { + logger.debug("Fixing property " + getIdentifier(entry)); + } + Long propQNameId = entry.getKey().getQnameId(); + QName propQName = qnameDAO.getQName(propQNameId).getSecond(); + nodeDAO.removeNodeProperties(nodeId, Collections.singleton(propQName)); + nodeDAO.addNodeProperty(nodeId, propQName, text); + } + } + catch (Exception e) + { + // Record the failure + progress.errors.incrementAndGet(); + // Rethrow so that the processing framework can handle things + throw e; + } + } + + @Override + public String getIdentifier(NodePropertyEntity entry) + { + Long nodeId = entry.getNodeId(); + NodePropertyValue prop = entry.getValue(); + return ("Property with persisted type " + prop.getPersistedType() + " on node " + nodeDAO.getNodePair(nodeId)); + } + } + + /** + * Thread-safe helper class to carry the job progress information + * @author Derek Hulley + * @since 4.1.9.2 + */ + public static class NodeStringLengthWorkResult + { + private final AtomicBoolean inProgress = new AtomicBoolean(false); + private final AtomicInteger propertiesProcessed = new AtomicInteger(0); + private final AtomicInteger propertiesChanged = new AtomicInteger(0); + private final AtomicInteger errors = new AtomicInteger(0); + private final AtomicLong currentMinNodeId = new AtomicLong(0L); + @Override + public String toString() + { + String part1 = "Changed"; + String part2 = String.format(" %4d out of a potential %4d properties. ", propertiesChanged.get(), propertiesProcessed.get()); + String part3 = String.format("[%2d Errors]", errors.get()); + return part1 + part2 + part3; + } + + public int getPropertiesProcessed() + { + return propertiesProcessed.get(); + } + + public int getPropertiesChanged() + { + return propertiesChanged.get(); + } + + public int getErrors() + { + return errors.get(); + } + } + + /** + * A scheduled job that checks and adjusts string storage for persisted strings in the system. + *

+ * Job data: + *

    + *
  • nodeStringLengthWorker - The worker that performs the actual processing.
  • + *
+ * + * @author Derek Hulley + * @since 4.1.9.2 + * @see NodeStringLengthWorker + */ + public static class NodeStringLengthJob implements Job + { + public static final String JOB_DATA_NODE_WORKER = "nodeStringLengthWorker"; + + public void execute(JobExecutionContext context) throws JobExecutionException + { + JobDataMap jobData = context.getJobDetail().getJobDataMap(); + // extract the content Cleanup to use + Object nodeStringLengthWorkerObj = jobData.get(JOB_DATA_NODE_WORKER); + if (nodeStringLengthWorkerObj == null || !(nodeStringLengthWorkerObj instanceof NodeStringLengthWorker)) + { + throw new AlfrescoRuntimeException( + "MaxStringLengthJob data '" + JOB_DATA_NODE_WORKER + "' must reference a " + NodeStringLengthWorker.class.getSimpleName()); + } + NodeStringLengthWorker worker = (NodeStringLengthWorker) nodeStringLengthWorkerObj; + worker.execute(); + } + } +} diff --git a/source/test-java/org/alfresco/repo/domain/node/NodeDAOTest.java b/source/test-java/org/alfresco/repo/domain/node/NodeDAOTest.java index 8112de88d8..41e4d8de96 100644 --- a/source/test-java/org/alfresco/repo/domain/node/NodeDAOTest.java +++ b/source/test-java/org/alfresco/repo/domain/node/NodeDAOTest.java @@ -19,9 +19,12 @@ package org.alfresco.repo.domain.node; import java.io.Serializable; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; import junit.framework.TestCase; @@ -33,8 +36,10 @@ import org.alfresco.repo.domain.node.NodeDAO.NodeRefQueryCallback; import org.alfresco.repo.transaction.RetryingTransactionHelper; import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.service.ServiceRegistry; +import org.alfresco.service.cmr.dictionary.DataTypeDefinition; import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.StoreRef; +import org.alfresco.service.namespace.QName; import org.alfresco.service.transaction.TransactionService; import org.alfresco.test_category.OwnJVMTestsCategory; import org.alfresco.util.ApplicationContextHelper; @@ -106,6 +111,80 @@ public class NodeDAOTest extends TestCase assertNotNull("Txn ID should be present by forcing it", txnId2); } + public void testSelectNodePropertiesByTypes() throws Exception + { + final Set qnames = Collections.singleton(ContentModel.PROP_NAME); + RetryingTransactionCallback> callback = new RetryingTransactionCallback>() + { + public List execute() throws Throwable + { + return nodeDAO.selectNodePropertiesByTypes(qnames); + } + }; + List props = txnHelper.doInTransaction(callback, true); + if (props.size() == 0) + { + return; + } + NodePropertyEntity prop = props.get(0); + String value = prop.getValue().getStringValue(); + assertNotNull(value); + } + + public void testSelectNodePropertiesByDataType() throws Exception + { + // Prepare the bits that repeat the actual query + final AtomicLong min = new AtomicLong(0L); + final AtomicLong max = new AtomicLong(0L); + RetryingTransactionCallback> callback = new RetryingTransactionCallback>() + { + public List execute() throws Throwable + { + long minNodeId = min.get(); + long maxNodeId = max.get(); + return nodeDAO.selectNodePropertiesByDataType(DataTypeDefinition.TEXT, minNodeId, maxNodeId); + } + }; + + // Get the current max node id + Long minNodeId = nodeDAO.getMinNodeId(); + if (minNodeId == null) + { + return; // there are no nodes! + } + Long maxNodeId = nodeDAO.getMaxNodeId(); // won't be null at this point as we have a min + min.set(minNodeId.longValue()); + + // Iterate across all nodes in the system + while (min.longValue() <= maxNodeId.longValue()) + { + max.set(min.get() + 1000L); // 1K increments + + // Get the properties + List props = txnHelper.doInTransaction(callback, true); + for (NodePropertyEntity prop : props) + { + // Check the property + Long nodeId = prop.getNodeId(); + assertNotNull(nodeId); + assertTrue("the min should be inclusive.", min.longValue() <= nodeId.longValue()); + assertTrue("the max should be exclusive.", max.longValue() > nodeId.longValue()); + NodePropertyValue propVal = prop.getValue(); + assertNotNull(propVal); + assertEquals("STRING", propVal.getActualTypeString()); + String valueStr = propVal.getStringValue(); + Serializable valueSer = propVal.getSerializableValue(); + assertTrue("Test is either TEXT or SERIALIZABLE", valueStr != null || valueSer != null); + String value = (String) propVal.getValue(DataTypeDefinition.TEXT); + assertNotNull(value); + // This all checks out + } + + // Shift the window up + min.set(max.get()); + } + } + public void testGetNodesWithAspects() throws Throwable { final NodeRefQueryCallback callback = new NodeRefQueryCallback() @@ -130,6 +209,16 @@ public class NodeDAOTest extends TestCase }, true); } + public void testGetMinMaxNodeId() throws Exception + { + Long minNodeId = nodeDAO.getMinNodeId(); + assertNotNull(minNodeId); + assertTrue(minNodeId.longValue() > 0L); + Long maxNodeId = nodeDAO.getMaxNodeId(); + assertNotNull(maxNodeId); + assertTrue(maxNodeId.longValue() > minNodeId.longValue()); + } + public void testGetPrimaryChildAcls() throws Throwable { List acls = nodeDAO.getPrimaryChildrenAcls(1L); @@ -150,6 +239,25 @@ public class NodeDAOTest extends TestCase } } + public void testCacheNodes() throws Throwable + { + Long minNodeId = nodeDAO.getMinNodeId(); + final List nodeIds = new ArrayList(10000); + for (long i = 0; i < 1000; i++) + { + nodeIds.add(Long.valueOf(minNodeId.longValue() + i)); + } + RetryingTransactionCallback callback = new RetryingTransactionCallback() + { + public Void execute() throws Throwable + { + nodeDAO.cacheNodesById(nodeIds); + return null; + } + }; + txnHelper.doInTransaction(callback, true); + } + /** * Ensure that the {@link NodeEntity} values cached as root nodes are valid instances. *

diff --git a/source/test-java/org/alfresco/repo/node/db/DbNodeServiceImplTest.java b/source/test-java/org/alfresco/repo/node/db/DbNodeServiceImplTest.java index 7bd9c2325b..75ca185e5c 100644 --- a/source/test-java/org/alfresco/repo/node/db/DbNodeServiceImplTest.java +++ b/source/test-java/org/alfresco/repo/node/db/DbNodeServiceImplTest.java @@ -32,8 +32,10 @@ import org.alfresco.model.ContentModel; import org.alfresco.repo.domain.node.NodeDAO; import org.alfresco.repo.domain.node.NodeDAO.ChildAssocRefQueryCallback; import org.alfresco.repo.domain.node.Transaction; +import org.alfresco.repo.domain.schema.SchemaBootstrap; import org.alfresco.repo.node.BaseNodeServiceTest; import org.alfresco.repo.node.cleanup.NodeCleanupRegistry; +import org.alfresco.repo.node.db.NodeStringLengthWorker.NodeStringLengthWorkResult; import org.alfresco.repo.transaction.AlfrescoTransactionSupport; import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.repo.transaction.TransactionListenerAdapter; @@ -48,6 +50,9 @@ import org.alfresco.service.transaction.TransactionService; import org.alfresco.test_category.OwnJVMTestsCategory; import org.alfresco.util.Pair; import org.junit.experimental.categories.Category; +import org.hibernate.dialect.Dialect; +import org.hibernate.dialect.MySQLInnoDBDialect; +import org.springframework.context.event.ContextRefreshedEvent; import org.springframework.extensions.surf.util.I18NUtil; /** @@ -696,4 +701,88 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest // expect to go here } } + + /** + * Check that the maximum string lengths can be adjusted up and down. + * Note that this test ONLY works for MySQL because the other databases cannot support more than 1024 characters + * in the string_value column and the value may not be set to less than 1024. + * + * @see SchemaBootstrap#DEFAULT_MAX_STRING_LENGTH + */ + @SuppressWarnings("deprecation") + public void testNodeStringLengthWorker() throws Exception + { + setComplete(); + endTransaction(); + + // Skip of the dialect is not MySQL + Dialect dialect = (Dialect) applicationContext.getBean("dialect"); + if (!(dialect instanceof MySQLInnoDBDialect)) + { + return; +} + SchemaBootstrap schemaBootstrap = (SchemaBootstrap) applicationContext.getBean("schemaBootstrap"); + assertEquals("Expected max string length to be MAX", Integer.MAX_VALUE, SchemaBootstrap.getMaxStringLength()); + + NodeStringLengthWorker worker = (NodeStringLengthWorker) applicationContext.getBean("nodeStringLengthWorker"); + + // If we run this worker just to get everything into the correct starting state. + // If it does not work, then that will be detected later anyway + NodeStringLengthWorkResult result = worker.execute(); + assertTrue(result.getPropertiesProcessed() > 0); + assertEquals(0, result.getErrors()); + + // Now set the max string length to DEFAULT_MAX_STRING_LENGTH characters + schemaBootstrap.setMaximumStringLength(SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH); + schemaBootstrap.onApplicationEvent(new ContextRefreshedEvent(applicationContext)); + // Move any values persisted before the test + result = worker.execute(); + int firstPassChanged = result.getPropertiesChanged(); + + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH + 1; i++) + { + sb.append("A"); + } + final String longString = sb.toString(); + // Persist the property using the default MAX_VALUE so that it does into the string_value + schemaBootstrap.setMaximumStringLength(Integer.MAX_VALUE); + schemaBootstrap.onApplicationEvent(new ContextRefreshedEvent(applicationContext)); + txnService.getRetryingTransactionHelper().doInTransaction(new RetryingTransactionCallback() + { + @Override + public Void execute() throws Throwable + { + nodeService.setProperty(rootNodeRef, PROP_QNAME_STRING_VALUE, longString); + return null; + } + }); + + // The worker should do nothing + result = worker.execute(); + assertEquals(firstPassChanged, result.getPropertiesChanged()); + + // Now bring the limit down to the match for other DBs + schemaBootstrap.setMaximumStringLength(SchemaBootstrap.DEFAULT_MAX_STRING_LENGTH); + schemaBootstrap.onApplicationEvent(new ContextRefreshedEvent(applicationContext)); + result = worker.execute(); + assertEquals(firstPassChanged + 1, result.getPropertiesChanged()); + + // Put the limit back to the MySQL default and all the large values should go back into MySQL's TEXT field + schemaBootstrap.setMaximumStringLength(Integer.MAX_VALUE); + schemaBootstrap.onApplicationEvent(new ContextRefreshedEvent(applicationContext)); + result = worker.execute(); + assertEquals(firstPassChanged + 1, result.getPropertiesChanged()); + + // Check that our string is still OK + String checkLongString = txnService.getRetryingTransactionHelper().doInTransaction(new RetryingTransactionCallback() + { + @Override + public String execute() throws Throwable + { + return (String) nodeService.getProperty(rootNodeRef, PROP_QNAME_STRING_VALUE); + } + }); + assertEquals("String manipulation corrupted the long string value. ", longString, checkLongString); + } }