diff --git a/config/alfresco/authority-services-context.xml b/config/alfresco/authority-services-context.xml index 999a8e9e0d..f36a751aeb 100644 --- a/config/alfresco/authority-services-context.xml +++ b/config/alfresco/authority-services-context.xml @@ -82,6 +82,12 @@ + + + + + + diff --git a/config/alfresco/bootstrap-context.xml b/config/alfresco/bootstrap-context.xml index 0553bef9dc..ddaee27dc7 100644 --- a/config/alfresco/bootstrap-context.xml +++ b/config/alfresco/bootstrap-context.xml @@ -784,6 +784,12 @@ /${spaces.company_home.childname} + + + + + + diff --git a/config/alfresco/cache-context.xml b/config/alfresco/cache-context.xml index ab9612da21..6f914a2fbc 100644 --- a/config/alfresco/cache-context.xml +++ b/config/alfresco/cache-context.xml @@ -148,6 +148,35 @@ + + + + + + + + + + org.alfresco.cache.node.allRootNodesCache + + + + + + + + + + + + + org.alfresco.cache.node.allRootNodesTransactionalCache + + + + + + @@ -176,7 +205,7 @@ org.alfresco.cache.node.nodesTransactionalCache - + @@ -209,7 +238,7 @@ org.alfresco.cache.node.aspectsTransactionalCache - + @@ -392,7 +421,73 @@ org.alfresco.authorityTransactionalCache - + + + + + + + + + + + + + + + + + + + org.alfresco.cache.authorityToChildAuthorityCache + + + + + + + + + + + + + org.alfresco.authorityToChildAuthorityTransactionalCache + + + + + + + + + + + + + + + + + + + + org.alfresco.cache.zoneToAuthorityCache + + + + + + + + + + + + + org.alfresco.zoneToAuthorityTransactionalCache + + diff --git a/config/alfresco/dao/dao-context.xml b/config/alfresco/dao/dao-context.xml index fd587769be..1c0edc83f9 100644 --- a/config/alfresco/dao/dao-context.xml +++ b/config/alfresco/dao/dao-context.xml @@ -115,6 +115,7 @@ + diff --git a/config/alfresco/ehcache-default.xml b/config/alfresco/ehcache-default.xml index a9e9b761e9..429564cbf4 100644 --- a/config/alfresco/ehcache-default.xml +++ b/config/alfresco/ehcache-default.xml @@ -42,6 +42,13 @@ overflowToDisk="false" statistics="false" /> + + + + + + + + + + + + + + + + + + + diff --git a/config/alfresco/ibatis/org.hibernate.dialect.Dialect/node-common-SqlMap.xml b/config/alfresco/ibatis/org.hibernate.dialect.Dialect/node-common-SqlMap.xml index 206e2ebe66..389a164a3f 100644 --- a/config/alfresco/ibatis/org.hibernate.dialect.Dialect/node-common-SqlMap.xml +++ b/config/alfresco/ibatis/org.hibernate.dialect.Dialect/node-common-SqlMap.xml @@ -921,6 +921,42 @@ assoc.assoc_index ASC, assoc.id ASC + + + + where + parentNode.id = #{parentNode.id} + and assoc.child_node_id = #{childNode.id} + + and assoc.type_qname_id in + + #{item} + + + and assoc.type_qname_id = #{typeQNameId} + and assoc.qname_crc = #{qnameCrc} + and assoc.qname_ns_id = #{qnameNamespaceId} + and assoc.qname_localname = #{qnameLocalName} + and assoc.is_primary = #{isPrimary} + and assoc.child_node_name = #{childNodeName} + and assoc.child_node_name_crc = #{childNodeNameCrc} + + and child_node_name_crc in + + #{item} + + + + and childNode.type_qname_id in + + #{item} + + + + and parentStore.id = childStore.id + childStore.id]]> + + + + + + + + + \ No newline at end of file diff --git a/config/alfresco/ibatis/org.hibernate.dialect.MySQLInnoDBDialect/node-select-children-SqlMap.xml b/config/alfresco/ibatis/org.hibernate.dialect.MySQLInnoDBDialect/node-select-children-SqlMap.xml new file mode 100644 index 0000000000..ee4d233790 --- /dev/null +++ b/config/alfresco/ibatis/org.hibernate.dialect.MySQLInnoDBDialect/node-select-children-SqlMap.xml @@ -0,0 +1,16 @@ + + + + + + + + + + diff --git a/config/alfresco/public-services-security-context.xml b/config/alfresco/public-services-security-context.xml index 3822fef80e..624fdb9476 100644 --- a/config/alfresco/public-services-security-context.xml +++ b/config/alfresco/public-services-security-context.xml @@ -359,6 +359,7 @@ org.alfresco.service.cmr.repository.NodeService.exists=ACL_ALLOW org.alfresco.service.cmr.repository.NodeService.getNodeStatus=ACL_NODE.0.sys:base.ReadProperties org.alfresco.service.cmr.repository.NodeService.getNodeRef=AFTER_ACL_NODE.sys:base.ReadProperties + org.alfresco.service.cmr.repository.NodeService.getAllRootNodes=ACL_NODE.0.sys:base.ReadProperties,AFTER_ACL_NODE.sys:base.ReadProperties org.alfresco.service.cmr.repository.NodeService.getRootNode=ACL_NODE.0.sys:base.ReadProperties org.alfresco.service.cmr.repository.NodeService.createNode=ACL_NODE.0.sys:base.CreateChildren org.alfresco.service.cmr.repository.NodeService.moveNode=ACL_NODE.0.sys:base.DeleteNode,ACL_NODE.1.sys:base.CreateChildren @@ -758,6 +759,7 @@ org.alfresco.service.cmr.security.AuthorityService.deleteAuthority=ACL_METHOD.ROLE_ADMINISTRATOR org.alfresco.service.cmr.security.AuthorityService.getContainedAuthorities=ACL_ALLOW org.alfresco.service.cmr.security.AuthorityService.getContainingAuthorities=ACL_ALLOW + org.alfresco.service.cmr.security.AuthorityService.getContainingAuthoritiesInZone=ACL_ALLOW org.alfresco.service.cmr.security.AuthorityService.getShortName=ACL_ALLOW org.alfresco.service.cmr.security.AuthorityService.getName=ACL_ALLOW org.alfresco.service.cmr.security.AuthorityService.authorityExists=ACL_ALLOW diff --git a/config/alfresco/repository.properties b/config/alfresco/repository.properties index 9757a72da1..178ac98842 100644 --- a/config/alfresco/repository.properties +++ b/config/alfresco/repository.properties @@ -269,12 +269,12 @@ lucene.indexer.writerRamBufferSizeMb=16 # # Target number of indexes and deltas in the overall index and what index size to merge in memory # -lucene.indexer.mergerTargetIndexCount=5 +lucene.indexer.mergerTargetIndexCount=8 lucene.indexer.mergerTargetOverlayCount=5 lucene.indexer.mergerTargetOverlaysBlockingFactor=2 -lucene.indexer.maxDocsForInMemoryMerge=10000 +lucene.indexer.maxDocsForInMemoryMerge=60000 lucene.indexer.maxRamInMbForInMemoryMerge=16 -lucene.indexer.maxDocsForInMemoryIndex=10000 +lucene.indexer.maxDocsForInMemoryIndex=60000 lucene.indexer.maxRamInMbForInMemoryIndex=16 # # Other lucene properties diff --git a/source/java/org/alfresco/email/server/EmailServiceImpl.java b/source/java/org/alfresco/email/server/EmailServiceImpl.java index 31dcc49f80..9b177f537b 100644 --- a/source/java/org/alfresco/email/server/EmailServiceImpl.java +++ b/source/java/org/alfresco/email/server/EmailServiceImpl.java @@ -381,7 +381,7 @@ public class EmailServiceImpl implements EmailService */ private boolean isEmailContributeUser(String userName) { - return this.authorityService.getContainingAuthorities(AuthorityType.GROUP, userName, false).contains( + return this.authorityService.getAuthoritiesForUser(userName).contains( authorityService.getName(AuthorityType.GROUP, "EMAIL_CONTRIBUTORS")); } } diff --git a/source/java/org/alfresco/repo/avm/AVMNodeService.java b/source/java/org/alfresco/repo/avm/AVMNodeService.java index b898b7648f..c31c3f85be 100644 --- a/source/java/org/alfresco/repo/avm/AVMNodeService.java +++ b/source/java/org/alfresco/repo/avm/AVMNodeService.java @@ -335,6 +335,11 @@ public class AVMNodeService extends AbstractNodeServiceImpl implements NodeServi throw new InvalidStoreRefException(storeName +":/" + " not found.", storeRef); } } + + public Set getAllRootNodes(StoreRef storeRef) + { + return Collections.singleton(getRootNode(storeRef)); + } /** * @see #createNode(NodeRef, QName, QName, QName, Map) @@ -1662,8 +1667,18 @@ public class AVMNodeService extends AbstractNodeServiceImpl implements NodeServi return result; } - - + @Override + public List getChildAssocs(NodeRef nodeRef, QName typeQName, QName qname, int maxResults, + boolean preload) throws InvalidNodeRefException + { + List result = getChildAssocs(nodeRef, typeQName, qname); + if (result.size() > maxResults) + { + return result.subList(0, maxResults); + } + return result; + } + public List getChildAssocs(NodeRef nodeRef, QNamePattern typeQNamePattern, QNamePattern qnamePattern, boolean preload) throws InvalidNodeRefException { diff --git a/source/java/org/alfresco/repo/avm/AVMServiceConcurrentTest.java b/source/java/org/alfresco/repo/avm/AVMServiceConcurrentTest.java index 03aa2cf86e..b891bde770 100644 --- a/source/java/org/alfresco/repo/avm/AVMServiceConcurrentTest.java +++ b/source/java/org/alfresco/repo/avm/AVMServiceConcurrentTest.java @@ -111,6 +111,8 @@ public class AVMServiceConcurrentTest extends AVMServiceTestBase testTX = fTransactionService.getUserTransaction(); testTX.begin(); + try + { searchService = fIndexerAndSearcher.getSearcher(AVMNodeConverter.ToStoreRef("main"), true); results = searchService.query(storeRef, "lucene", "PATH:\"/test/*\""); @@ -121,7 +123,11 @@ public class AVMServiceConcurrentTest extends AVMServiceTestBase assertEquals(loops, results.length()); results.close(); - testTX.commit(); + } + finally + { + try { testTX.commit(); } catch (Exception e) {} + } // delete @@ -233,7 +239,8 @@ public class AVMServiceConcurrentTest extends AVMServiceTestBase testTX = fTransactionService.getUserTransaction(); testTX.begin(); - + try + { searchService = fIndexerAndSearcher.getSearcher(AVMNodeConverter.ToStoreRef("main"), true); results = searchService.query(storeRef, "lucene", "PATH:\"/test/*\""); for(ResultSetRow row : results) @@ -242,8 +249,11 @@ public class AVMServiceConcurrentTest extends AVMServiceTestBase } assertEquals(loops, results.length()); results.close(); - - testTX.commit(); + } + finally + { + try { testTX.commit(); } catch (Exception e) {} + } // update diff --git a/source/java/org/alfresco/repo/avm/locking/AVMLockingServiceImpl.java b/source/java/org/alfresco/repo/avm/locking/AVMLockingServiceImpl.java index 9f34cdf48b..bb25f83aec 100644 --- a/source/java/org/alfresco/repo/avm/locking/AVMLockingServiceImpl.java +++ b/source/java/org/alfresco/repo/avm/locking/AVMLockingServiceImpl.java @@ -483,15 +483,7 @@ public class AVMLockingServiceImpl implements AVMLockingService { return true; } - Set containing = authorityService.getContainingAuthorities(null, user, false); - for (String parent : containing) - { - if (parent.equalsIgnoreCase(authority)) - { - return true; - } - } - return false; + return authorityService.getAuthoritiesForUser(user).contains(authority); } /** diff --git a/source/java/org/alfresco/repo/domain/node/AbstractNodeDAOImpl.java b/source/java/org/alfresco/repo/domain/node/AbstractNodeDAOImpl.java index 98a1466e68..44ac30809d 100644 --- a/source/java/org/alfresco/repo/domain/node/AbstractNodeDAOImpl.java +++ b/source/java/org/alfresco/repo/domain/node/AbstractNodeDAOImpl.java @@ -55,9 +55,9 @@ import org.alfresco.repo.domain.usage.UsageDAO; import org.alfresco.repo.policy.BehaviourFilter; import org.alfresco.repo.security.permissions.AccessControlListProperties; import org.alfresco.repo.transaction.AlfrescoTransactionSupport; -import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState; import org.alfresco.repo.transaction.TransactionAwareSingleton; import org.alfresco.repo.transaction.TransactionListenerAdapter; +import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState; import org.alfresco.service.cmr.dictionary.DataTypeDefinition; import org.alfresco.service.cmr.dictionary.DictionaryService; import org.alfresco.service.cmr.dictionary.InvalidTypeException; @@ -71,20 +71,20 @@ import org.alfresco.service.cmr.repository.DuplicateChildNodeNameException; import org.alfresco.service.cmr.repository.InvalidNodeRefException; import org.alfresco.service.cmr.repository.InvalidStoreRefException; import org.alfresco.service.cmr.repository.NodeRef; -import org.alfresco.service.cmr.repository.NodeRef.Status; import org.alfresco.service.cmr.repository.Path; import org.alfresco.service.cmr.repository.StoreRef; +import org.alfresco.service.cmr.repository.NodeRef.Status; import org.alfresco.service.cmr.repository.datatype.DefaultTypeConverter; import org.alfresco.service.namespace.QName; import org.alfresco.service.transaction.ReadOnlyServerException; import org.alfresco.service.transaction.TransactionService; import org.alfresco.util.EqualsHelper; -import org.alfresco.util.EqualsHelper.MapValueComparison; import org.alfresco.util.GUID; import org.alfresco.util.Pair; import org.alfresco.util.PropertyCheck; import org.alfresco.util.ReadWriteLockExecuter; import org.alfresco.util.SerializationUtils; +import org.alfresco.util.EqualsHelper.MapValueComparison; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.springframework.dao.ConcurrencyFailureException; @@ -135,6 +135,15 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO * VALUE KEY: IGNORED
*/ private EntityLookupCache rootNodesCache; + + + /** + * Cache for nodes with the root aspect by StoreRef:
+ * KEY: StoreRef
+ * VALUE: A set of nodes with the root aspect
+ */ + private SimpleCache> allRootNodesCache; + /** * Bidirectional cache for the Node ID to Node lookups:
* KEY: Node ID
@@ -163,7 +172,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO * VALUE KEY: ChildByNameKey
*/ private EntityLookupCache parentAssocsCache; - + /** * Constructor. Set up various instance-specific members such as caches and locks. */ @@ -272,8 +281,18 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO cache, CACHE_REGION_ROOT_NODES, new RootNodesCacheCallbackDAO()); - } + } + /** + * Set the cache that maintains the extended Store root node data + * + * @param cache the cache + */ + public void setAllRootNodesCache(SimpleCache> allRootNodesCache) + { + this.allRootNodesCache = allRootNodesCache; + } + /** * Set the cache that maintains node ID-NodeRef cross referencing data * @@ -636,6 +655,48 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO return rootNodePair.getSecond().getNodePair(); } } + + public Set getAllRootNodes(StoreRef storeRef) + { + Set rootNodes = allRootNodesCache.get(storeRef); + if (rootNodes == null) + { + final Map> allRootNodes = new HashMap>(97); + getNodesWithAspects(Collections.singleton(ContentModel.ASPECT_ROOT), 0L, Long.MAX_VALUE, new NodeRefQueryCallback() + { + @Override + public boolean handle(Pair nodePair) + { + NodeRef nodeRef = nodePair.getSecond(); + StoreRef storeRef = nodeRef.getStoreRef(); + Set rootNodes = allRootNodes.get(storeRef); + if (rootNodes == null) + { + rootNodes = new HashSet(97); + allRootNodes.put(storeRef, rootNodes); + } + rootNodes.add(nodeRef); + return true; + } + }); + rootNodes = allRootNodes.get(storeRef); + if (rootNodes == null) + { + rootNodes = Collections.emptySet(); + allRootNodes.put(storeRef, rootNodes); + } + for (Map.Entry> entry : allRootNodes.entrySet()) + { + StoreRef entryStoreRef = entry.getKey(); + // Prevent unnecessary cross-invalidation + if (!allRootNodesCache.contains(entryStoreRef)) + { + allRootNodesCache.put(entryStoreRef, entry.getValue()); + } + } + } + return rootNodes; + } public Pair newStore(StoreRef storeRef) { @@ -684,6 +745,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO } // All the NodeRef-based caches are invalid. ID-based caches are fine. rootNodesCache.removeByKey(oldStoreRef); + allRootNodesCache.remove(oldStoreRef); nodesCache.clear(); if (isDebugEnabled) @@ -1251,7 +1313,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO childAssocRetryingHelper.doWithRetry(callback); // Check for cyclic relationships - getPaths(newChildNode.getNodePair(), false); + cycleCheck(newChildNode.getNodePair()); // Update ACLs for moved tree Long newParentAclId = newParentNode.getAclId(); @@ -1568,6 +1630,10 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO nodeUpdate.setAuditableProperties(auditableProps); nodeUpdate.setUpdateAuditableProperties(true); } + if (nodeAspects.contains(ContentModel.ASPECT_ROOT)) + { + allRootNodesCache.remove(node.getNodePair().getSecond().getStoreRef()); + } // Remove value from the cache nodesCache.removeByKey(nodeId); @@ -2178,7 +2244,9 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO // If we are adding the sys:aspect_root, then the parent assocs cache is unreliable if (newAspectQNames.contains(ContentModel.ASPECT_ROOT)) { + Pair nodePair = getNodePair(nodeId); invalidateCachesByNodeId(null, nodeId, parentAssocsCache); + allRootNodesCache.remove(nodePair.getSecond().getStoreRef()); } // Touch to bring into current txn @@ -2226,7 +2294,9 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO // If we are removing the sys:aspect_root, then the parent assocs cache is unreliable if (aspectQNames.contains(ContentModel.ASPECT_ROOT)) { + Pair nodePair = getNodePair(nodeId); invalidateCachesByNodeId(null, nodeId, parentAssocsCache); + allRootNodesCache.remove(nodePair.getSecond().getStoreRef()); } // Touch to bring into current txn @@ -2563,12 +2633,12 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO QName assocQName, String childNodeName) { + ParentAssocsInfo parentAssocInfo = getParentAssocsCached(childNodeId); ChildAssocEntity assoc = newChildAssocImpl( parentNodeId, childNodeId, false, assocTypeQName, assocQName, childNodeName); Long assocId = assoc.getId(); // update cache - ParentAssocsInfo parentAssocInfo = getParentAssocsCached(childNodeId); - parentAssocInfo = parentAssocInfo.addAssoc(assocId, assoc); + parentAssocInfo = parentAssocInfo.addAssoc(assocId, assoc, getCurrentTransactionId()); setParentAssocsCached(childNodeId, parentAssocInfo); // Done return assoc.getPair(qnameDAO); @@ -2584,7 +2654,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO // Update cache Long childNodeId = assoc.getChildNode().getId(); ParentAssocsInfo parentAssocInfo = getParentAssocsCached(childNodeId); - parentAssocInfo = parentAssocInfo.removeAssoc(assocId); + parentAssocInfo = parentAssocInfo.removeAssoc(assocId, getCurrentTransactionId()); setParentAssocsCached(childNodeId, parentAssocInfo); // Delete it int count = deleteChildAssocById(assocId); @@ -2948,12 +3018,13 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO assoc.getParentNode().getNodePair(), assoc.getChildNode().getNodePair()); } + resultsCallback.done(); } else { // Decide whether we query or filter - ParentAssocsInfo parentAssocs = getParentAssocsCacheOnly(childNodeId); - if ((parentAssocs == null) || (parentAssocs.getParentAssocs().size() > PARENT_ASSOCS_CACHE_FILTER_THRESHOLD)) + ParentAssocsInfo parentAssocs = getParentAssocsCached(childNodeId); + if (parentAssocs.getParentAssocs().size() > PARENT_ASSOCS_CACHE_FILTER_THRESHOLD) { // Query selectParentAssocs(childNodeId, assocTypeQName, assocQName, isPrimary, resultsCallback); @@ -2973,11 +3044,70 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO assoc.getChildNode().getNodePair()); } } + resultsCallback.done(); } } } + + /** + * Potentially cheaper than evaluating all of a node's paths to check for child association cycles + * + * @param nodePair + * the node to check + * @param path + * a set containing the nodes in the path to the node + */ + public void cycleCheck(Pair nodePair) + { + CycleCallBack callback = new CycleCallBack(); + callback.cycleCheck(nodePair); + if (callback.toThrow != null) + { + throw callback.toThrow; + } + } + + class CycleCallBack implements ChildAssocRefQueryCallback + { + final Set path = new HashSet(97); + CyclicChildRelationshipException toThrow; + + @Override + public void done() + { + } + + @Override + public boolean handle(Pair childAssocPair, Pair parentNodePair, + Pair childNodePair) + { + ChildAssociationRef childAssociationRef = childAssocPair.getSecond(); + if (!path.add(childAssociationRef)) + { + // Remember exception we want to throw and exit. If we throw within here, it will be wrapped by IBatis + toThrow = new CyclicChildRelationshipException("Child Association Cycle Detected " + path, childAssociationRef); + return false; + } + cycleCheck(childNodePair); + path.remove(childAssociationRef); + return toThrow == null; + } + + @Override + public boolean preLoadNodes() + { + return false; + } + + public void cycleCheck(Pair nodePair) + { + getChildAssocs(nodePair.getFirst(), null, null, null, null, null, this); + } + }; + + public List getPaths(Pair nodePair, boolean primaryOnly) throws InvalidNodeRefException { // create storage for the paths - only need 1 bucket if we are looking for the primary path @@ -3203,7 +3333,8 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO // Validate that we aren't pairing up a cached node with historic parent associations from an old // transaction (or the other way around) Long txnId = parentAssocsInfo.getTxnId(); - if (txnId != null && !txnId.equals(child.getTransaction().getId())) + Long childTxnId = child.getTransaction().getId(); + if (txnId != null && !txnId.equals(childTxnId)) { if (logger.isDebugEnabled()) { @@ -3211,7 +3342,17 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO + " detected loading parent associations. Cached transaction ID: " + child.getTransaction().getId() + ", actual transaction ID: " + txnId); } - invalidateNodeCaches(nodeId); + if (AlfrescoTransactionSupport.getTransactionReadState() != TxnReadState.TXN_READ_WRITE + || !getCurrentTransaction().getId().equals(childTxnId)) + { + // Force a reload of the node and its parent assocs + invalidateNodeCaches(nodeId); + } + else + { + // The node is for the current transaction, so only invalidate the parent assocs + invalidateCachesByNodeId(null, nodeId, parentAssocsCache); + } } else { @@ -3256,7 +3397,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO // Select all the parent associations List assocs = selectParentAssocs(nodeId); - + // Retrieve the transaction ID from the DB for validation purposes - prevents skew between a cached node and // its parent assocs Long txnId = assocs.isEmpty() ? null : assocs.get(0).getChildNode().getTransaction().getId(); @@ -3516,6 +3657,12 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO HashSet qnameIdsSet = new HashSet(qnameIds); Set qnames = qnameDAO.convertIdsToQNames(qnameIdsSet); aspectsCache.setValue(nodeId, qnames); + aspectNodeIds.remove(nodeId); + } + // Cache the absence of aspects too! + for (Long nodeId: aspectNodeIds) + { + aspectsCache.setValue(nodeId, Collections.emptySet()); } Map> propsByNodeId = selectNodeProperties(propertiesNodeIds); diff --git a/source/java/org/alfresco/repo/domain/node/NodeDAO.java b/source/java/org/alfresco/repo/domain/node/NodeDAO.java index 05e39403e1..03c2c07026 100644 --- a/source/java/org/alfresco/repo/domain/node/NodeDAO.java +++ b/source/java/org/alfresco/repo/domain/node/NodeDAO.java @@ -113,6 +113,8 @@ public interface NodeDAO extends NodeBulkLoader public Pair getRootNode(StoreRef storeRef); + public Set getAllRootNodes(StoreRef storeRef); + /* * Node */ @@ -491,6 +493,27 @@ public interface NodeDAO extends NodeBulkLoader ChildAssocRefQueryCallback resultsCallback); /** + * Gets the first n child associations of a given parent node, optionally filtering on association QName + * and association type QName. + *

+ * This is an efficient query for node paths. + * + * @param parentNodeId the parent node ID + * @param assocTypeQName the association type qname to filter on; null for no filtering + * @param assocQName the association qname to filter on; null for no filtering + * @param maxResults the maximum number of results to return. The query will be terminated efficiently + * after that number of results + * @param preload should the child nodes be batch loaded? + * @return a list of child associations + */ + public List getChildAssocs( + Long parentNodeId, + QName assocTypeQName, + QName assocQName, + final int maxResults, + boolean preload); + + /** * Get the child associations of a given parent node, optionally filtering on type QName. * * @param parentNodeId the parent node ID @@ -597,6 +620,14 @@ public interface NodeDAO extends NodeBulkLoader */ public List getPaths(Pair nodePair, boolean primaryOnly) throws InvalidNodeRefException; + /** + * Potentially cheaper than evaluating all of a node's paths to check for child association cycles. + * + * @param nodePair + * the node to check + */ + public void cycleCheck(Pair nodePair); + /* * Transactions */ diff --git a/source/java/org/alfresco/repo/domain/node/ParentAssocsInfo.java b/source/java/org/alfresco/repo/domain/node/ParentAssocsInfo.java index 466b8665a2..842c011b91 100644 --- a/source/java/org/alfresco/repo/domain/node/ParentAssocsInfo.java +++ b/source/java/org/alfresco/repo/domain/node/ParentAssocsInfo.java @@ -159,27 +159,27 @@ import org.apache.commons.logging.LogFactory; return (primaryAssocId != null) ? parentAssocsById.get(primaryAssocId) : null; } - public ParentAssocsInfo changeIsRoot(boolean isRoot) + public ParentAssocsInfo changeIsRoot(boolean isRoot, Long txnId) { - return new ParentAssocsInfo(this.txnId, isRoot, this.isRoot, parentAssocsById, primaryAssocId); + return new ParentAssocsInfo(txnId, isRoot, this.isRoot, parentAssocsById, primaryAssocId); } - public ParentAssocsInfo changeIsStoreRoot(boolean isStoreRoot) + public ParentAssocsInfo changeIsStoreRoot(boolean isStoreRoot, Long txnId) { - return new ParentAssocsInfo(this.txnId, this.isRoot, isStoreRoot, parentAssocsById, primaryAssocId); + return new ParentAssocsInfo(txnId, this.isRoot, isStoreRoot, parentAssocsById, primaryAssocId); } - public ParentAssocsInfo addAssoc(Long assocId, ChildAssocEntity parentAssoc) + public ParentAssocsInfo addAssoc(Long assocId, ChildAssocEntity parentAssoc, Long txnId) { Map parentAssocs = new HashMap(parentAssocsById); parentAssocs.put(parentAssoc.getId(), parentAssoc); - return new ParentAssocsInfo(this.txnId, isRoot, isStoreRoot, parentAssocs, primaryAssocId); + return new ParentAssocsInfo(txnId, isRoot, isStoreRoot, parentAssocs, primaryAssocId); } - public ParentAssocsInfo removeAssoc(Long assocId) + public ParentAssocsInfo removeAssoc(Long assocId, Long txnId) { Map parentAssocs = new HashMap(parentAssocsById); parentAssocs.remove(assocId); - return new ParentAssocsInfo(this.txnId, isRoot, isStoreRoot, parentAssocs, primaryAssocId); + return new ParentAssocsInfo(txnId, isRoot, isStoreRoot, parentAssocs, primaryAssocId); } } diff --git a/source/java/org/alfresco/repo/domain/node/ibatis/NodeDAOImpl.java b/source/java/org/alfresco/repo/domain/node/ibatis/NodeDAOImpl.java index f409bc410c..ababc63a71 100644 --- a/source/java/org/alfresco/repo/domain/node/ibatis/NodeDAOImpl.java +++ b/source/java/org/alfresco/repo/domain/node/ibatis/NodeDAOImpl.java @@ -23,6 +23,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -55,6 +56,7 @@ import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.StoreRef; import org.alfresco.service.namespace.QName; import org.alfresco.util.Pair; +import org.apache.ibatis.executor.result.DefaultResultContext; import org.apache.ibatis.session.ResultContext; import org.apache.ibatis.session.ResultHandler; import org.apache.ibatis.session.RowBounds; @@ -117,6 +119,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl private static final String SELECT_CHILD_ASSOC_BY_ID = "alfresco.node.select_ChildAssocById"; private static final String SELECT_CHILD_ASSOCS_BY_PROPERTY_VALUE = "alfresco.node.select_ChildAssocsByPropertyValue"; private static final String SELECT_CHILD_ASSOCS_OF_PARENT = "alfresco.node.select_ChildAssocsOfParent"; + private static final String SELECT_CHILD_ASSOCS_OF_PARENT_LIMITED = "alfresco.node.select_ChildAssocsOfParent_Limited"; private static final String SELECT_CHILD_ASSOC_OF_PARENT_BY_NAME = "alfresco.node.select_ChildAssocOfParentByName"; private static final String SELECT_CHILD_ASSOCS_OF_PARENT_WITHOUT_PARENT_ASSOCS_OF_TYPE = "alfresco.node.select_ChildAssocsOfParentWithoutParentAssocsOfType"; @@ -1053,6 +1056,77 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl resultsCallback.done(); } + public List getChildAssocs( + Long parentNodeId, + QName assocTypeQName, + QName assocQName, + final int maxResults, + boolean preload) + { + ChildAssocEntity assoc = new ChildAssocEntity(); + // Parent + NodeEntity parentNode = new NodeEntity(); + parentNode.setId(parentNodeId); + assoc.setParentNode(parentNode); + + // Type QName + if (assocTypeQName != null) + { + if (!assoc.setTypeQNameAll(qnameDAO, assocTypeQName, false)) + { + return Collections.emptyList(); // Shortcut + } + } + // QName + if (assocQName != null) + { + if (!assoc.setQNameAll(qnameDAO, assocQName, false)) + { + return Collections.emptyList(); // Shortcut + } + } + + final List result = new LinkedList(); + final List toLoad = new LinkedList(); + + // We can't invoke the row handler whilst the limited query is running as it's illegal on some databases (MySQL) + List entities = template.selectList(SELECT_CHILD_ASSOCS_OF_PARENT_LIMITED, assoc, new RowBounds(0, + maxResults)); + ChildAssocResultHandler rowHandler = new ChildAssocResultHandler(new ChildAssocRefQueryCallback(){ + + @Override + public boolean handle(Pair childAssocPair, Pair parentNodePair, + Pair childNodePair) + { + result.add(childAssocPair.getSecond()); + toLoad.add(childNodePair.getSecond()); + return true; + } + + @Override + public void done() + { + } + + @Override + public boolean preLoadNodes() + { + return false; + }}); + final DefaultResultContext resultContext = new DefaultResultContext(); + for (Object entity : entities) + { + resultContext.nextResultObject(entity); + rowHandler.handleResult(resultContext); + } + if (preload && !toLoad.isEmpty()) + { + cacheNodes(toLoad); + } + + return result; + } + @Override protected void selectChildAssocs( Long parentNodeId, diff --git a/source/java/org/alfresco/repo/jscript/People.java b/source/java/org/alfresco/repo/jscript/People.java index f71919acec..c630b33c3d 100644 --- a/source/java/org/alfresco/repo/jscript/People.java +++ b/source/java/org/alfresco/repo/jscript/People.java @@ -871,10 +871,10 @@ public final class People extends BaseScopableProcessorExtension implements Init { ParameterCheck.mandatory("Person", person); Object[] parents = null; - Set authorities = this.authorityService.getContainingAuthorities( + Set authorities = this.authorityService.getContainingAuthoritiesInZone( AuthorityType.GROUP, (String)person.getProperties().get(ContentModel.PROP_USERNAME), - false); + AuthorityService.ZONE_APP_DEFAULT, null, 1000); parents = new Object[authorities.size()]; int i = 0; for (String authority : authorities) diff --git a/source/java/org/alfresco/repo/node/db/DbNodeServiceImpl.java b/source/java/org/alfresco/repo/node/db/DbNodeServiceImpl.java index 27f130a545..dc0b62330e 100644 --- a/source/java/org/alfresco/repo/node/db/DbNodeServiceImpl.java +++ b/source/java/org/alfresco/repo/node/db/DbNodeServiceImpl.java @@ -66,10 +66,10 @@ import org.alfresco.service.cmr.repository.InvalidChildAssociationRefException; import org.alfresco.service.cmr.repository.InvalidNodeRefException; import org.alfresco.service.cmr.repository.InvalidStoreRefException; import org.alfresco.service.cmr.repository.NodeRef; -import org.alfresco.service.cmr.repository.NodeRef.Status; import org.alfresco.service.cmr.repository.NodeService; import org.alfresco.service.cmr.repository.Path; import org.alfresco.service.cmr.repository.StoreRef; +import org.alfresco.service.cmr.repository.NodeRef.Status; import org.alfresco.service.cmr.repository.datatype.DefaultTypeConverter; import org.alfresco.service.namespace.QName; import org.alfresco.service.namespace.QNamePattern; @@ -281,6 +281,12 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl // done return rootNodePair.getSecond(); } + + @Override + public Set getAllRootNodes(StoreRef storeRef) + { + return nodeDAO.getAllRootNodes(storeRef); + } /** * @see #createNode(NodeRef, QName, QName, QName, Map) @@ -1124,6 +1130,9 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl propagateTimeStamps(childParentAssocRef); invokeOnDeleteNode(childParentAssocRef, childNodeType, childNodeQNames, false); + // Index + nodeIndexer.indexDeleteNode(childParentAssocRef); + // lose interest in tracking this node ref untrackNewNodeRef(childNodeRef); } @@ -1168,8 +1177,7 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl } // check that the child addition of the child has not created a cyclic relationship - // this functionality is provided for free in getPath - getPaths(childRef, false); + nodeDAO.cycleCheck(childNodePair); // Invoke policy behaviours for (ChildAssociationRef childAssocRef : childAssociationRefs) @@ -1686,6 +1694,22 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl return orderedList; } + /** + * Fetches the first n child associations in an efficient manner + */ + public List getChildAssocs( + NodeRef nodeRef, + final QName typeQName, + final QName qname, + final int maxResults, + final boolean preload) + { + // Get the node + Pair nodePair = getNodePairNotNull(nodeRef); + // Get the assocs pointing to it + return nodeDAO.getChildAssocs(nodePair.getFirst(), typeQName, qname, maxResults, preload); + } + public List getChildAssocs(NodeRef nodeRef, Set childNodeTypeQNames) { // Get the node diff --git a/source/java/org/alfresco/repo/preference/PreferenceServiceImpl.java b/source/java/org/alfresco/repo/preference/PreferenceServiceImpl.java index 12d194f6eb..640396505d 100644 --- a/source/java/org/alfresco/repo/preference/PreferenceServiceImpl.java +++ b/source/java/org/alfresco/repo/preference/PreferenceServiceImpl.java @@ -28,6 +28,7 @@ import java.util.Map; import org.alfresco.error.AlfrescoRuntimeException; import org.alfresco.model.ContentModel; import org.alfresco.repo.content.MimetypeMap; +import org.alfresco.repo.rule.RuleModel; import org.alfresco.repo.security.authentication.AuthenticationContext; import org.alfresco.repo.security.authentication.AuthenticationUtil; import org.alfresco.repo.security.authentication.AuthenticationUtil.RunAsWork; @@ -255,6 +256,12 @@ public class PreferenceServiceImpl implements PreferenceService contentWriter.setEncoding("UTF-8"); contentWriter.setMimetype(MimetypeMap.MIMETYPE_TEXT_PLAIN); contentWriter.putContent(jsonPrefs.toString()); + + // Lets stop rule inheritance from trying to kick in - we may be in many groups + if (!PreferenceServiceImpl.this.nodeService.hasAspect(personNodeRef, RuleModel.ASPECT_IGNORE_INHERITED_RULES)) + { + PreferenceServiceImpl.this.nodeService.addAspect(personNodeRef, RuleModel.ASPECT_IGNORE_INHERITED_RULES, null); + } } catch (JSONException exception) { diff --git a/source/java/org/alfresco/repo/replication/ReplicationServiceIntegrationTest.java b/source/java/org/alfresco/repo/replication/ReplicationServiceIntegrationTest.java index 58b0cda889..cbf91d9ce4 100644 --- a/source/java/org/alfresco/repo/replication/ReplicationServiceIntegrationTest.java +++ b/source/java/org/alfresco/repo/replication/ReplicationServiceIntegrationTest.java @@ -76,6 +76,7 @@ import org.alfresco.service.transaction.TransactionService; import org.alfresco.util.ApplicationContextHelper; import org.alfresco.util.GUID; import org.alfresco.util.Pair; +import org.apache.tools.ant.taskdefs.Retry; import org.springframework.context.ConfigurableApplicationContext; /** diff --git a/source/java/org/alfresco/repo/search/impl/lucene/ADMLuceneIndexerImpl.java b/source/java/org/alfresco/repo/search/impl/lucene/ADMLuceneIndexerImpl.java index 05a7526a56..87f7547127 100644 --- a/source/java/org/alfresco/repo/search/impl/lucene/ADMLuceneIndexerImpl.java +++ b/source/java/org/alfresco/repo/search/impl/lucene/ADMLuceneIndexerImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005-2010 Alfresco Software Limited. + * Copyright (C) 2005-2011 Alfresco Software Limited. * * This file is part of Alfresco * @@ -33,6 +33,7 @@ import java.util.Collections; import java.util.Date; import java.util.Deque; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; @@ -41,6 +42,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.TreeSet; import org.alfresco.model.ContentModel; import org.alfresco.repo.content.MimetypeMap; @@ -56,6 +58,7 @@ import org.alfresco.repo.search.impl.lucene.fts.FullTextSearchIndexer; import org.alfresco.repo.security.authentication.AuthenticationUtil; import org.alfresco.repo.security.authentication.AuthenticationUtil.RunAsWork; import org.alfresco.repo.tenant.TenantService; +import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.service.cmr.dictionary.AspectDefinition; import org.alfresco.service.cmr.dictionary.DataTypeDefinition; import org.alfresco.service.cmr.dictionary.PropertyDefinition; @@ -72,6 +75,7 @@ import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.NodeService; import org.alfresco.service.cmr.repository.Path; import org.alfresco.service.cmr.repository.StoreRef; +import org.alfresco.service.cmr.repository.Path.ChildAssocElement; import org.alfresco.service.cmr.repository.datatype.DefaultTypeConverter; import org.alfresco.service.cmr.repository.datatype.TypeConversionException; import org.alfresco.service.namespace.QName; @@ -100,9 +104,15 @@ import org.springframework.extensions.surf.util.I18NUtil; * The implementation of the lucene based indexer. Supports basic transactional behaviour if used on its own. * * @author andyh + * @author dward */ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl implements ADMLuceneIndexer { + /** + * The maximum number of parent associations a node can have before we choose to cascade reindex its parent rather than itself. + */ + private static final int PATH_GENERATION_FACTOR = 5; + static Log s_logger = LogFactory.getLog(ADMLuceneIndexerImpl.class); /** @@ -181,7 +191,10 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp { if (s_logger.isDebugEnabled()) { - s_logger.debug("Create node " + relationshipRef.getChildRef()); + NodeRef parentRef = relationshipRef.getParentRef(); + Path path = parentRef == null ? new Path() : nodeService.getPath(parentRef); + path.append(new ChildAssocElement(relationshipRef)); + s_logger.debug("Create node " + path + " " + relationshipRef.getChildRef()); } checkAbleToDoWork(IndexUpdateStatus.SYNCRONOUS); try @@ -221,7 +234,7 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp Document document = mainReader.document(doc); String id = document.get("ID"); NodeRef ref = new NodeRef(id); - deleteImpl(ref.toString(), IndexDeleteMode.DELETE, true, mainReader); + deleteImpl(ref.toString(), getDeltaReader(), mainReader); } td.close(); } @@ -243,6 +256,14 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp throw new LuceneIndexException("Filed to close main reader", e); } } + try + { + closeDeltaReader(); + } + catch (Exception e) + { + s_logger.warn("Failed to close delta reader", e); + } } } @@ -252,7 +273,7 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp if (s_logger.isDebugEnabled()) { - s_logger.debug("Update node " + nodeRef); + s_logger.debug("Update node " + nodeService.getPath(nodeRef) + " " + nodeRef); } checkAbleToDoWork(IndexUpdateStatus.SYNCRONOUS); try @@ -274,7 +295,10 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp { if (s_logger.isDebugEnabled()) { - s_logger.debug("Delete node " + relationshipRef.getChildRef()); + NodeRef parentRef = relationshipRef.getParentRef(); + Path path = parentRef == null ? new Path() : nodeService.getPath(parentRef); + path.append(new ChildAssocElement(relationshipRef)); + s_logger.debug("Delete node " + path + " " + relationshipRef.getChildRef()); } checkAbleToDoWork(IndexUpdateStatus.SYNCRONOUS); try @@ -283,9 +307,6 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp { throw new LuceneIndexException("Delete node failed - node is not in the required store"); } - // The requires a reindex - a delete may remove too much from under this node - that also lives under - // other nodes via secondary associations. All the nodes below require reindex. - // This is true if the deleted node is via secondary or primary assoc. delete(relationshipRef.getChildRef()); } catch (LuceneIndexException e) @@ -295,86 +316,74 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp } } - public void createChildRelationship(ChildAssociationRef relationshipRef) throws LuceneIndexException + private void childRelationshipEvent(ChildAssociationRef relationshipRef, String event) throws LuceneIndexException { if (s_logger.isDebugEnabled()) { - s_logger.debug("Create child " + relationshipRef); + NodeRef parentRef = relationshipRef.getParentRef(); + Path path = parentRef == null ? new Path() : nodeService.getPath(parentRef); + path.append(new ChildAssocElement(relationshipRef)); + s_logger.debug(event + " " + path + " " + relationshipRef.getChildRef()); } checkAbleToDoWork(IndexUpdateStatus.SYNCRONOUS); try { - // TODO: Optimise - // reindex(relationshipRef.getParentRef()); if (!relationshipRef.getChildRef().getStoreRef().equals(store)) { - throw new LuceneIndexException("Create child relationship failed - node is not in the required store"); + throw new LuceneIndexException(event + " failed - node is not in the required store"); + } + NodeRef parentRef = relationshipRef.getParentRef(); + NodeRef childRef = relationshipRef.getChildRef(); + if (parentRef != null) + { + // If the child has a lot of secondary parents, its cheaper to cascade reindex its parent rather than itself + if (nodeService.getParentAssocs(childRef).size() > PATH_GENERATION_FACTOR) + { + reindex(parentRef, true); + reindex(childRef, false); + } + // Otherwise, it's cheaper to re-evaluate all the paths to this node + else + { + reindex(childRef, true); + } + } + else + { + reindex(childRef, true); } - reindex(relationshipRef.getChildRef(), true); } catch (LuceneIndexException e) { setRollbackOnly(); - throw new LuceneIndexException("Failed to create child relationship", e); + throw new LuceneIndexException(event + " failed", e); } } + public void createChildRelationship(ChildAssociationRef relationshipRef) throws LuceneIndexException + { + childRelationshipEvent(relationshipRef, "Create child relationship"); + } + public void updateChildRelationship(ChildAssociationRef relationshipBeforeRef, ChildAssociationRef relationshipAfterRef) throws LuceneIndexException { - if (s_logger.isDebugEnabled()) - { - s_logger.debug("Update child " + relationshipBeforeRef + " to " + relationshipAfterRef); - } - checkAbleToDoWork(IndexUpdateStatus.SYNCRONOUS); - try - { - // TODO: Optimise - if (!relationshipBeforeRef.getChildRef().getStoreRef().equals(store)) - { - throw new LuceneIndexException("Update child relationship failed - node is not in the required store"); - } - if (!relationshipAfterRef.getChildRef().getStoreRef().equals(store)) - { - throw new LuceneIndexException("Update child relationship failed - node is not in the required store"); - } - if (relationshipBeforeRef.getParentRef() != null) - { - // reindex(relationshipBeforeRef.getParentRef()); - } - move(relationshipBeforeRef.getChildRef()); - } - catch (LuceneIndexException e) - { - setRollbackOnly(); - throw new LuceneIndexException("Failed to update child relationship", e); - } + childRelationshipEvent(relationshipBeforeRef, "Update child relationship"); + childRelationshipEvent(relationshipAfterRef, "Update child relationship"); } public void deleteChildRelationship(ChildAssociationRef relationshipRef) throws LuceneIndexException { - if (s_logger.isDebugEnabled()) - { - s_logger.debug("Delete child " + relationshipRef); - } - checkAbleToDoWork(IndexUpdateStatus.SYNCRONOUS); - try - { - if (!relationshipRef.getChildRef().getStoreRef().equals(store)) - { - throw new LuceneIndexException("Delete child relationship failed - node is not in the required store"); - } - // TODO: Optimise - if (relationshipRef.getParentRef() != null) - { - // reindex(relationshipRef.getParentRef()); - } - reindex(relationshipRef.getChildRef(), true); - } - catch (LuceneIndexException e) - { - setRollbackOnly(); - throw new LuceneIndexException("Failed to delete child relationship", e); - } + childRelationshipEvent(relationshipRef, "Delete child relationship"); + } + + /** + * Are we deleting leaves only (not meta data) + * + * @return - deleting only nodes. + */ + public boolean getDeleteOnlyNodes() + { + return true; } /** @@ -486,7 +495,7 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp } } - setInfo(docs, getDeletions(), true); + setInfo(docs, getDeletions(), getContainerDeletions(), getDeleteOnlyNodes()); // mergeDeltaIntoMain(new LinkedHashSet()); } catch (IOException e) @@ -509,34 +518,6 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp } - static class Counter - { - int countInParent = 0; - - int count = -1; - - int getCountInParent() - { - return countInParent; - } - - int getRepeat() - { - return (count / countInParent) + 1; - } - - void incrementParentCount() - { - countInParent++; - } - - void increment() - { - count++; - } - - } - private static class Pair { private F first; @@ -576,7 +557,48 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp } } - public List createDocuments(final String stringNodeRef, final FTSStatus ftsStatus, final boolean indexAllProperties, final boolean includeDirectoryDocuments) + protected Set deleteImpl(String nodeRef, IndexReader deltaReader, IndexReader mainReader) + throws LuceneIndexException, IOException + { + Set containerRefs = new LinkedHashSet(); + // Delete all and reindex as they could be secondary links we have deleted and they need to be updated. + // Most will skip any indexing as they will really have gone. + Set temp = deleteContainerAndBelow(nodeRef, deltaReader, true, true); + containerRefs.addAll(temp); + temp = deleteContainerAndBelow(nodeRef, mainReader, false, true); + containerRefs.addAll(temp); + // Only mask out the container if it is present in the main index + if (!temp.isEmpty()) + { + containerDeletions.add(nodeRef); + } + + Set leafrefs = new LinkedHashSet(); + temp = deletePrimary(containerRefs, deltaReader, true); + leafrefs.addAll(temp); + temp = deletePrimary(containerRefs, mainReader, false); + leafrefs.addAll(temp); + + Set refs = new LinkedHashSet(); + refs.addAll(containerRefs); + refs.addAll(leafrefs); + deletions.addAll(refs); + + // make sure leaves are also removed from the delta before reindexing + + for(String id : refs) + { + // Only delete the leaves, as we may have hit secondary associations + deleteLeafOnly(id, deltaReader, true); + } + return refs; + } + + public List createDocuments(final String stringNodeRef, final FTSStatus ftsStatus, + final boolean indexAllProperties, final boolean includeDirectoryDocuments, final boolean cascade, + final Set pathsProcessedSinceFlush, + final Map> childAssociationsSinceFlush, final IndexReader deltaReader, + final IndexReader mainReader) { if (tenantService.isEnabled() && ((AuthenticationUtil.getRunAsUser() == null) || (AuthenticationUtil.isRunAsUserTheSystemUser()))) { @@ -586,21 +608,27 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp { public List doWork() { - return createDocumentsImpl(stringNodeRef, ftsStatus, indexAllProperties, includeDirectoryDocuments); + return createDocumentsImpl(stringNodeRef, ftsStatus, indexAllProperties, includeDirectoryDocuments, + cascade, pathsProcessedSinceFlush, childAssociationsSinceFlush, deltaReader, mainReader); } }, tenantService.getDomainUser(AuthenticationUtil.getSystemUserName(), tenantService.getDomain(new NodeRef(stringNodeRef).getStoreRef().getIdentifier()))); } else { - return createDocumentsImpl(stringNodeRef, ftsStatus, indexAllProperties, includeDirectoryDocuments); + return createDocumentsImpl(stringNodeRef, ftsStatus, indexAllProperties, includeDirectoryDocuments, + cascade, pathsProcessedSinceFlush, childAssociationsSinceFlush, deltaReader, mainReader); } } - private List createDocumentsImpl(String stringNodeRef, FTSStatus ftsStatus, boolean indexAllProperties, boolean includeDirectoryDocuments) + private List createDocumentsImpl(final String stringNodeRef, FTSStatus ftsStatus, + boolean indexAllProperties, boolean includeDirectoryDocuments, final boolean cascade, + final Set pathsProcessedSinceFlush, + final Map> childAssociationsSinceFlush, final IndexReader deltaReader, + final IndexReader mainReader) { - NodeRef nodeRef = new NodeRef(stringNodeRef); - NodeRef.Status nodeStatus = nodeService.getNodeStatus(nodeRef); // DH: Let me know if this field gets dropped (performance) - List docs = new LinkedList(); + final NodeRef nodeRef = new NodeRef(stringNodeRef); + final NodeRef.Status nodeStatus = nodeService.getNodeStatus(nodeRef); // DH: Let me know if this field gets dropped (performance) + final List docs = new LinkedList(); if (nodeStatus == null) { throw new InvalidNodeRefException("Node does not exist: " + nodeRef, nodeRef); @@ -613,18 +641,50 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp return docs; } - Map nodeCounts = getNodeCounts(nodeRef); - ChildAssociationRef qNameRef = null; - Map properties = nodeService.getProperties(nodeRef); + final Map properties = nodeService.getProperties(nodeRef); - Collection directPaths = new LinkedHashSet(nodeService.getPaths(nodeRef, false)); - Collection> categoryPaths = getCategoryPaths(nodeRef, properties); - Collection> paths = new ArrayList>(directPaths.size() + categoryPaths.size()); - for (Path path : directPaths) + boolean isRoot = nodeRef.equals(tenantService.getName(nodeService.getRootNode(nodeRef.getStoreRef()))); + + // Generate / regenerate all applicable parent paths as the system user (the current user doesn't necessarily have access + // to all of these) + if (includeDirectoryDocuments) { - paths.add(new Pair(path, null)); + AuthenticationUtil.runAs(new RunAsWork() + { + @Override + public Void doWork() throws Exception + { + // We we must cope with the possibility of the container not existing for some of this node's parents + for (ChildAssociationRef assocRef: nodeService.getParentAssocs(nodeRef)) + { + NodeRef parentRef = tenantService.getName(assocRef.getParentRef()); + if (!childAssociationsSinceFlush.containsKey(parentRef)) + { + String parentRefSString = parentRef.toString(); + if (!locateContainer(parentRefSString, deltaReader) + && !locateContainer(parentRefSString, mainReader)) + { + generateContainersAndBelow(nodeService.getPaths(parentRef, false), docs, false, + pathsProcessedSinceFlush, childAssociationsSinceFlush); + } + } + } + + // Now regenerate the containers for this node, cascading if necessary + // Only process 'containers' - not leaves + if (isCategory(getDictionaryService().getType(nodeService.getType(nodeRef))) + || mayHaveChildren(nodeRef) + && !getCachedChildren(childAssociationsSinceFlush, nodeRef).isEmpty()) + { + generateContainersAndBelow(nodeService.getPaths(nodeRef, false), docs, cascade, + pathsProcessedSinceFlush, childAssociationsSinceFlush); + } + + return null; + } + }, tenantService.getDomainUser(AuthenticationUtil.getSystemUserName(), tenantService.getDomain(nodeRef + .getStoreRef().getIdentifier()))); } - paths.addAll(categoryPaths); // check index control @@ -654,9 +714,9 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp } } } - + Document xdoc = new Document(); - xdoc.add(new Field("ID", nodeRef.toString(), Field.Store.YES, Field.Index.NO_NORMS, Field.TermVector.NO)); + xdoc.add(new Field("ID", stringNodeRef, Field.Store.YES, Field.Index.NO_NORMS, Field.TermVector.NO)); xdoc.add(new Field("TX", nodeStatus.getChangeTxnId(), Field.Store.YES, Field.Index.NO_NORMS, Field.TermVector.NO)); boolean isAtomic = true; for (QName propertyName : properties.keySet()) @@ -675,89 +735,32 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp } } - boolean isRoot = nodeRef.equals(tenantService.getName(nodeService.getRootNode(nodeRef.getStoreRef()))); - boolean mayHaveChildren = includeDirectoryDocuments && mayHaveChildren(nodeRef); - boolean isCategory = isCategory(getDictionaryService().getType(nodeService.getType(nodeRef))); - StringBuilder qNameBuffer = new StringBuilder(64); StringBuilder assocTypeQNameBuffer = new StringBuilder(64); - for (Iterator> it = paths.iterator(); it.hasNext(); /**/) + if (!isRoot) { - Pair pair = it.next(); - // Lucene flags in order are: Stored, indexed, tokenised - - qNameRef = tenantService.getName(getLastRefOrNull(pair.getFirst())); - - String pathString = pair.getFirst().toString(); - if ((pathString.length() > 0) && (pathString.charAt(0) == '/')) + for (Pair pair : getAllParents(nodeRef, properties)) { - pathString = pathString.substring(1); - } - - if (isRoot) - { - // Root node - } - else if (pair.getFirst().size() == 1) - { - // Pseudo root node ignore - } - else - // not a root node - { - Counter counter = nodeCounts.get(tenantService.getBaseName(qNameRef)); - // If we have something in a container with root aspect we will - // not find it - - if ((counter == null) || (counter.getRepeat() < counter.getCountInParent())) + ChildAssociationRef qNameRef = tenantService.getName(pair.getFirst()); + if ((qNameRef != null) && (qNameRef.getParentRef() != null) && (qNameRef.getQName() != null)) { - if ((qNameRef != null) && (qNameRef.getParentRef() != null) && (qNameRef.getQName() != null)) + if (qNameBuffer.length() > 0) { - if (qNameBuffer.length() > 0) - { - qNameBuffer.append(";/"); - assocTypeQNameBuffer.append(";/"); - } - qNameBuffer.append(ISO9075.getXPathName(qNameRef.getQName())); - assocTypeQNameBuffer.append(ISO9075.getXPathName(qNameRef.getTypeQName())); - xdoc.add(new Field("PARENT", qNameRef.getParentRef().toString(), Field.Store.YES, Field.Index.NO_NORMS, Field.TermVector.NO)); - // xdoc.add(new Field("ASSOCTYPEQNAME", ISO9075.getXPathName(qNameRef.getTypeQName()), - // Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO)); - xdoc.add(new Field("LINKASPECT", (pair.getSecond() == null) ? "" : ISO9075.getXPathName(pair.getSecond()), Field.Store.YES, Field.Index.NO_NORMS, - Field.TermVector.NO)); - } - } - - if (counter != null) - { - counter.increment(); - } - - // check for child associations - - if (mayHaveChildren) - { - if (directPaths.contains(pair.getFirst())) - { - Document directoryEntry = new Document(); - directoryEntry.add(new Field("ID", nodeRef.toString(), Field.Store.YES, Field.Index.NO_NORMS, Field.TermVector.NO)); - directoryEntry.add(new Field("PATH", pathString, Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO)); - for (NodeRef parent : getParents(pair.getFirst())) - { - directoryEntry.add(new Field("ANCESTOR", tenantService.getName(parent).toString(), Field.Store.NO, Field.Index.NO_NORMS, Field.TermVector.NO)); - } - directoryEntry.add(new Field("ISCONTAINER", "T", Field.Store.YES, Field.Index.NO_NORMS, Field.TermVector.NO)); - - if (isCategory) - { - directoryEntry.add(new Field("ISCATEGORY", "T", Field.Store.YES, Field.Index.NO_NORMS, Field.TermVector.NO)); - } - - docs.add(directoryEntry); + qNameBuffer.append(";/"); + assocTypeQNameBuffer.append(";/"); } + qNameBuffer.append(ISO9075.getXPathName(qNameRef.getQName())); + assocTypeQNameBuffer.append(ISO9075.getXPathName(qNameRef.getTypeQName())); + xdoc.add(new Field("PARENT", qNameRef.getParentRef().toString(), Field.Store.YES, + Field.Index.NO_NORMS, Field.TermVector.NO)); + // xdoc.add(new Field("ASSOCTYPEQNAME", ISO9075.getXPathName(qNameRef.getTypeQName()), + // Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO)); + xdoc.add(new Field("LINKASPECT", (pair.getSecond() == null) ? "" : ISO9075.getXPathName(pair + .getSecond()), Field.Store.YES, Field.Index.NO_NORMS, Field.TermVector.NO)); } } + } // Root Node @@ -806,9 +809,105 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp docs.add(xdoc); // } } - return docs; } + + private void generateContainersAndBelow(List paths, List docs, boolean cascade, + Set pathsProcessedSinceFlush, Map> childAssociationsSinceFlush) + { + if (paths.isEmpty()) + { + return; + } + + for (Path path: paths) + { + NodeRef nodeRef = tenantService.getName(((ChildAssocElement) path.last()).getRef().getChildRef()); + + // Prevent duplication of path cascading + if (pathsProcessedSinceFlush.add(path)) + { + // Categories have special powers - generate their container regardless of their actual children + boolean isCategory = isCategory(getDictionaryService().getType(nodeService.getType(nodeRef))); + + // For other containers, we only add a doc if they actually have children + if (!isCategory) + { + // Only process 'containers' - not leaves + if (!mayHaveChildren(nodeRef)) + { + continue; + } + + // Only process 'containers' - not leaves + if (getCachedChildren(childAssociationsSinceFlush, nodeRef).isEmpty()) + { + continue; + } + } + + // Skip the root, which is a single document + if (path.size() > 1) + { + String pathString = path.toString(); + if ((pathString.length() > 0) && (pathString.charAt(0) == '/')) + { + pathString = pathString.substring(1); + } + Document directoryEntry = new Document(); + directoryEntry.add(new Field("ID", nodeRef.toString(), Field.Store.YES, + Field.Index.NO_NORMS, Field.TermVector.NO)); + directoryEntry.add(new Field("PATH", pathString, Field.Store.YES, Field.Index.TOKENIZED, + Field.TermVector.NO)); + for (NodeRef parent : getParents(path)) + { + directoryEntry.add(new Field("ANCESTOR", tenantService.getName(parent).toString(), + Field.Store.NO, Field.Index.NO_NORMS, Field.TermVector.NO)); + } + directoryEntry.add(new Field("ISCONTAINER", "T", Field.Store.YES, Field.Index.NO_NORMS, + Field.TermVector.NO)); + + if (isCategory) + { + directoryEntry.add(new Field("ISCATEGORY", "T", Field.Store.YES, Field.Index.NO_NORMS, + Field.TermVector.NO)); + } + + docs.add(directoryEntry); + } + } + + if (cascade) + { + List childPaths = new LinkedList(); + for (ChildAssociationRef childRef : getCachedChildren(childAssociationsSinceFlush, nodeRef)) + { + childPaths.add(new Path().append(path).append(new Path.ChildAssocElement(childRef))); + } + generateContainersAndBelow(childPaths, docs, true, pathsProcessedSinceFlush, + childAssociationsSinceFlush); + } + } + } + + private List getCachedChildren( + Map> childAssociationsSinceFlush, NodeRef nodeRef) + { + List children = childAssociationsSinceFlush.get(nodeRef); + + // Cache the children in case there are many paths to the same node + if (children == null) + { + children = nodeService.getChildAssocs(nodeRef); + for (ChildAssociationRef childRef : children) + { + // We don't want index numbers in generated paths + childRef.setNthSibling(-1); + } + childAssociationsSinceFlush.put(nodeRef, children); + } + return children; + } private void addFtsStatusDoc(List docs, FTSStatus ftsStatus, NodeRef nodeRef, NodeRef.Status nodeStatus) @@ -827,6 +926,185 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp doc.add(new Field("FTSSTATUS", ftsStatus.name(), Field.Store.NO, Field.Index.NO_NORMS, Field.TermVector.NO)); docs.add(doc); } + + /** + * @throws LuceneIndexException + */ + public void flushPending() throws LuceneIndexException + { + IndexReader mainReader = null; + try + { + saveDelta(); + + if (commandList.isEmpty()) + { + return; + } + + Map nodeActionMap = new LinkedHashMap(commandList.size() * 2); + + // First, apply deletions and work out a 'flattened' list of reindex actions + mainReader = getReader(); + IndexReader deltaReader = getDeltaReader(); + Set deletionsSinceFlush = new TreeSet(); + for (Command command : commandList) + { + if (s_logger.isDebugEnabled()) + { + s_logger.debug(command.action + ": " + command.ref); + } + String nodeRef = command.ref.toString(); + switch(command.action) + { + case INDEX: + // No deletions + if (nodeActionMap.get(nodeRef) != Action.CASCADEREINDEX) + { + nodeActionMap.put(nodeRef, Action.INDEX); + } + break; + case REINDEX: + // Remove from delta if present + deleteLeafOnly(nodeRef, deltaReader, true); + + // Only mask out the node if it is present in the main index + if (deleteLeafOnly(nodeRef, mainReader, false)); + { + deletions.add(nodeRef); + } + if (!nodeActionMap.containsKey(nodeRef)) + { + nodeActionMap.put(nodeRef, Action.REINDEX); + } + break; + case CASCADEREINDEX: + deleteContainerAndBelow(nodeRef, deltaReader, true, true); + // Only mask out the container if it is present in the main index + Set temp = deleteContainerAndBelow(nodeRef, mainReader, false, true); + if (!temp.isEmpty()) + { + containerDeletions.add(nodeRef); + } + // Only mask out the node if it is present in the main index + if (temp.contains(nodeRef)) + { + deletions.add(nodeRef); + } + nodeActionMap.put(nodeRef, Action.CASCADEREINDEX); + break; + case DELETE: + // if already deleted don't do it again ... + if(!deletionsSinceFlush.contains(nodeRef)) + { + Set refs = deleteImpl(nodeRef, deltaReader, mainReader); + + // do not delete anything we have deleted before in this flush + // probably OK to cache for the TX as a whole but done per flush => See ALF-8007 + deletionsSinceFlush.addAll(refs); + for (String ref : refs) + { + if (!nodeActionMap.containsKey(ref)) + { + nodeActionMap.put(ref, Action.REINDEX); + } + } + } + break; + } + } + + // Now reindex what needs indexing! + Set pathsProcessedSinceFlush = new HashSet(97); + Map> childAssociationsSinceFlush = new HashMap>(97); + + // First do the reading + List docs = new LinkedList(); + for (Map.Entry entry : nodeActionMap.entrySet()) + { + String nodeRef = entry.getKey(); + try + { + switch (entry.getValue()) + { + case INDEX: + docs.addAll(readDocuments(nodeRef, FTSStatus.New, false, true, false, pathsProcessedSinceFlush, + childAssociationsSinceFlush, deltaReader, mainReader)); + break; + case REINDEX: + docs.addAll(readDocuments(nodeRef, FTSStatus.Dirty, false, false, false, + pathsProcessedSinceFlush, childAssociationsSinceFlush, deltaReader, mainReader)); + break; + case CASCADEREINDEX: + // Add the nodes for index + docs.addAll(readDocuments(nodeRef, FTSStatus.Dirty, false, true, true, + pathsProcessedSinceFlush, childAssociationsSinceFlush, deltaReader, mainReader)); + break; + } + } + catch (InvalidNodeRefException e) + { + // The node does not exist + } + } + closeDeltaReader(); + + // Now the writing + IndexWriter writer = getDeltaWriter(); + for (Document doc : docs) + { + try + { + writer.addDocument(doc); + } + catch (IOException e) + { + throw new LuceneIndexException("Failed to add document to index", e); + } + } + + commandList.clear(); + this.docs = writer.docCount(); + deletionsSinceFlush.clear(); + } + catch (IOException e) + { + // If anything goes wrong we try and do a roll back + throw new LuceneIndexException("Failed to flush index", e); + } + finally + { + if (mainReader != null) + { + try + { + mainReader.close(); + } + catch (IOException e) + { + throw new LuceneIndexException("Filed to close main reader", e); + } + } + // Make sure deletes are sent + try + { + closeDeltaReader(); + } + catch (IOException e) + { + + } + // Make sure writes and updates are sent. + try + { + closeDeltaWriter(); + } + catch (IOException e) + { + + } + } + } private Serializable convertForMT(QName propertyName, Serializable inboundValue) { @@ -1405,55 +1683,41 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp throw new IndexerException("Confused path: " + path); } Path.ChildAssocElement cae = (Path.ChildAssocElement) element; - parentsInDepthOrderStartingWithSelf.add(0, cae.getRef().getChildRef()); + parentsInDepthOrderStartingWithSelf.add(0, tenantService.getName(cae.getRef().getChildRef())); } return parentsInDepthOrderStartingWithSelf; } - private ChildAssociationRef getLastRefOrNull(Path path) + private Collection> getAllParents(NodeRef nodeRef, Map properties) { - if (path.last() instanceof Path.ChildAssocElement) + List> allParents = new LinkedList>(); + // First get the real parents + StoreRef storeRef = nodeRef.getStoreRef(); + Set allRootNodes = nodeService.getAllRootNodes(storeRef); + for (ChildAssociationRef assocRef : nodeService.getParentAssocs(nodeRef)) { - Path.ChildAssocElement cae = (Path.ChildAssocElement) path.last(); - return cae.getRef(); - } - else - { - return null; - } - } + allParents.add(new Pair(assocRef, null)); - private Map getNodeCounts(NodeRef nodeRef) - { - Map nodeCounts = new HashMap(5); - List parentAssocs = nodeService.getParentAssocs(nodeRef); - // count the number of times the association is duplicated - for (ChildAssociationRef assoc : parentAssocs) - { - Counter counter = nodeCounts.get(assoc); - if (counter == null) + // Add a fake association to the store root if a real parent is a 'fake' root + NodeRef parentRef = tenantService.getBaseName(assocRef.getParentRef()); + if (allRootNodes.contains(parentRef)) { - counter = new Counter(); - nodeCounts.put(assoc, counter); + NodeRef rootNodeRef = nodeService.getRootNode(parentRef.getStoreRef()); + if (!parentRef.equals(rootNodeRef)) + { + allParents.add(new Pair(new ChildAssociationRef( + assocRef.getTypeQName(), rootNodeRef, assocRef.getQName(), nodeRef), null)); + } } - counter.incrementParentCount(); - } - return nodeCounts; - } - private Collection> getCategoryPaths(NodeRef nodeRef, Map properties) - { - ArrayList> categoryPaths = new ArrayList>(); - Set aspects = nodeService.getAspects(nodeRef); - - for (QName classRef : aspects) + // Now add the 'fake' parents, including their aspect QName + for (QName classRef : nodeService.getAspects(nodeRef)) { AspectDefinition aspDef = getDictionaryService().getAspect(classRef); if (isCategorised(aspDef)) { - LinkedList> aspectPaths = new LinkedList>(); for (PropertyDefinition propDef : aspDef.getProperties().values()) { if (propDef.getDataType().getName().equals(DataTypeDefinition.CATEGORY)) @@ -1467,28 +1731,10 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp try { - for (Path path : nodeService.getPaths(catRef, false)) + for (ChildAssociationRef assocRef : nodeService.getParentAssocs(catRef)) { - if ((path.size() > 1) && (path.get(1) instanceof Path.ChildAssocElement)) - { - Path.ChildAssocElement cae = (Path.ChildAssocElement) path.get(1); - boolean isFakeRoot = true; - for (ChildAssociationRef car : nodeService.getParentAssocs(cae.getRef().getChildRef())) - { - if (cae.getRef().equals(car)) - { - isFakeRoot = false; - break; - } - } - if (isFakeRoot) - { - if (path.toString().indexOf(aspDef.getName().toString()) != -1) - { - aspectPaths.add(new Pair(path, aspDef.getName())); - } - } - } + allParents + .add(new Pair(new ChildAssociationRef(assocRef.getTypeQName(), assocRef.getChildRef(), QName.createQName("member"), nodeRef), aspDef.getName())); } } catch (InvalidNodeRefException e) @@ -1500,21 +1746,9 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp } } } - categoryPaths.addAll(aspectPaths); } } - // Add member final element - for (Pair pair : categoryPaths) - { - if (pair.getFirst().last() instanceof Path.ChildAssocElement) - { - Path.ChildAssocElement cae = (Path.ChildAssocElement) pair.getFirst().last(); - ChildAssociationRef assocRef = cae.getRef(); - pair.getFirst().append(new Path.ChildAssocElement(new ChildAssociationRef(assocRef.getTypeQName(), assocRef.getChildRef(), QName.createQName("member"), nodeRef))); - } - } - - return categoryPaths; + return allParents; } private boolean isCategorised(AspectDefinition aspDef) @@ -1669,13 +1903,13 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp List docs; try { - docs = readDocuments(ref.toString(), FTSStatus.Clean, true, false); + docs = readDocuments(ref.toString(), FTSStatus.Clean, true, false, false, null, null, null, null); } catch (Throwable t) { // Try to recover from failure s_logger.error("FTS index of " + ref + " failed. Reindexing without FTS", t); - docs = readDocuments(ref.toString(), FTSStatus.Clean, false, false); + docs = readDocuments(ref.toString(), FTSStatus.Clean, false, false, false, null, null, null, null); } for (Document doc : docs) { @@ -1725,6 +1959,23 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp } } + protected List readDocuments(final String stringNodeRef, final FTSStatus ftsStatus, + final boolean indexAllProperties, final boolean includeDirectoryDocuments, final boolean cascade, + final Set pathsProcessedSinceFlush, + final Map> childAssociationsSinceFlush, final IndexReader deltaReader, + final IndexReader mainReader) + { + return doInReadthroughTransaction(new RetryingTransactionCallback>() + { + @Override + public List execute() throws Throwable + { + return createDocuments(stringNodeRef, ftsStatus, indexAllProperties, includeDirectoryDocuments, + cascade, pathsProcessedSinceFlush, childAssociationsSinceFlush, deltaReader, mainReader); + } + }); + } + public void registerCallBack(FTSIndexerAware callBack) { this.callBack = callBack; @@ -1765,7 +2016,8 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp } else { - setInfo(docs, getDeletions(), false); + // Deletions delete nodes only. Containers handled separately + setInfo(docs, getDeletions(), getContainerDeletions(), getDeleteOnlyNodes()); fullTextSearchIndexer.requiresIndex(store); } if (callBack != null) diff --git a/source/java/org/alfresco/repo/search/impl/lucene/ADMLuceneTest.java b/source/java/org/alfresco/repo/search/impl/lucene/ADMLuceneTest.java index 3e9946d284..c940b59ea8 100644 --- a/source/java/org/alfresco/repo/search/impl/lucene/ADMLuceneTest.java +++ b/source/java/org/alfresco/repo/search/impl/lucene/ADMLuceneTest.java @@ -2270,6 +2270,8 @@ public class ADMLuceneTest extends TestCase implements DictionaryListener results.close(); nodeService.addAspect(n14, aspectWithChildren, null); + nodeService.createNode(n14, QName.createQName(TEST_NAMESPACE, "unused"), QName.createQName(TEST_NAMESPACE, + "unused"), testSuperType, getOrderProperties()); testTX.commit(); testTX = transactionService.getUserTransaction(); diff --git a/source/java/org/alfresco/repo/search/impl/lucene/AVMLuceneIndexerImpl.java b/source/java/org/alfresco/repo/search/impl/lucene/AVMLuceneIndexerImpl.java index 231be2f170..c243801518 100644 --- a/source/java/org/alfresco/repo/search/impl/lucene/AVMLuceneIndexerImpl.java +++ b/source/java/org/alfresco/repo/search/impl/lucene/AVMLuceneIndexerImpl.java @@ -30,9 +30,12 @@ import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import org.alfresco.error.AlfrescoRuntimeException; import org.alfresco.model.ContentModel; @@ -56,6 +59,7 @@ import org.alfresco.repo.search.impl.lucene.fts.FullTextSearchIndexer; import org.alfresco.repo.security.authentication.AuthenticationUtil; import org.alfresco.repo.security.authentication.AuthenticationUtil.RunAsWork; import org.alfresco.repo.transaction.AlfrescoTransactionSupport; +import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.service.cmr.avm.AVMException; import org.alfresco.service.cmr.avm.AVMNodeDescriptor; import org.alfresco.service.cmr.avm.AVMService; @@ -70,6 +74,7 @@ import org.alfresco.service.cmr.repository.ContentIOException; import org.alfresco.service.cmr.repository.ContentReader; import org.alfresco.service.cmr.repository.ContentService; import org.alfresco.service.cmr.repository.ContentWriter; +import org.alfresco.service.cmr.repository.InvalidNodeRefException; import org.alfresco.service.cmr.repository.MLText; import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.StoreRef; @@ -87,6 +92,7 @@ import org.apache.lucene.analysis.Token; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermDocs; import org.apache.lucene.index.TermEnum; @@ -107,6 +113,8 @@ public class AVMLuceneIndexerImpl extends AbstractLuceneIndexerImpl impl MAIN, DELTA; } + protected enum IndexDeleteMode {REINDEX, DELETE}; + private static String SNAP_SHOT_ID = "SnapShot"; static Log s_logger = LogFactory.getLog(AVMLuceneIndexerImpl.class); @@ -129,6 +137,11 @@ public class AVMLuceneIndexerImpl extends AbstractLuceneIndexerImpl impl private int startVersion = -1; private int endVersion = -1; + + /** + * A list of deletions associated with the changes to nodes in the current flush + */ + protected Set deletionsSinceFlush = new HashSet(); private long indexedDocCount = 0; @@ -170,6 +183,16 @@ public class AVMLuceneIndexerImpl extends AbstractLuceneIndexerImpl impl this.contentService = contentService; } + /** + * Are we deleting leaves only (not meta data) + * + * @return - deleting only nodes. + */ + public boolean getDeleteOnlyNodes() + { + return indexUpdateStatus == IndexUpdateStatus.ASYNCHRONOUS; + } + /** * Generate an indexer * @@ -430,7 +453,92 @@ public class AVMLuceneIndexerImpl extends AbstractLuceneIndexerImpl impl } - @Override + protected Set deleteImpl(String nodeRef, IndexDeleteMode mode, boolean cascade, IndexReader mainReader) + throws LuceneIndexException, IOException + { + Set leafrefs = new LinkedHashSet(); + IndexReader deltaReader = null; + + // startTimer(); + getDeltaReader(); + // outputTime("Delete "+nodeRef+" size = "+getDeltaWriter().docCount()); + Set refs = new LinkedHashSet(); + Set containerRefs = new LinkedHashSet(); + Set temp = null; + + switch(mode) + { + case REINDEX: + temp = deleteContainerAndBelow(nodeRef, getDeltaReader(), true, cascade); + closeDeltaReader(); + refs.addAll(temp); + deletions.addAll(temp); + // should not be included as a delete for optimisation in deletionsSinceFlush + // should be optimised out + // defensive against any issue with optimisation of events + // the nodes have not been deleted and would require a real delete + temp = deleteContainerAndBelow(nodeRef, mainReader, false, cascade); + refs.addAll(temp); + deletions.addAll(temp); + // should not be included as a delete for optimisation + // should be optimised out + // defensive agaainst any issue with optimisation of events + // the nodes have not been deleted and would require a real delete + break; + case DELETE: + // if already deleted don't do it again ... + if(deletionsSinceFlush.contains(nodeRef)) + { + // nothing to do + break; + } + else + { + // Delete all and reindex as they could be secondary links we have deleted and they need to be updated. + // Most will skip any indexing as they will really have gone. + temp = deleteContainerAndBelow(nodeRef, getDeltaReader(), true, cascade); + closeDeltaReader(); + containerRefs.addAll(temp); + refs.addAll(temp); + temp = deleteContainerAndBelow(nodeRef, mainReader, false, cascade); + containerRefs.addAll(temp); + + temp = deletePrimary(containerRefs, getDeltaReader(), true); + leafrefs.addAll(temp); + closeDeltaReader(); + temp = deletePrimary(containerRefs, mainReader, false); + leafrefs.addAll(temp); + + // May not have to delete references + temp = deleteReference(containerRefs, getDeltaReader(), true); + leafrefs.addAll(temp); + closeDeltaReader(); + temp = deleteReference(containerRefs, mainReader, false); + leafrefs.addAll(temp); + + refs.addAll(containerRefs); + refs.addAll(leafrefs); + deletions.addAll(refs); + // do not delete anything we have deleted before in this flush + // probably OK to cache for the TX as a whole but done per flush => See ALF-8007 + deletionsSinceFlush.addAll(refs); + + // make sure leaves are also removed from the delta before reindexing + + deltaReader = getDeltaReader(); + for(String id : leafrefs) + { + deltaReader.deleteDocuments(new Term("ID", id)); + } + closeDeltaReader(); + break; + } + } + + return refs; + + } + protected List createDocuments(String stringNodeRef, FTSStatus ftsStatus, boolean indexAllProperties, boolean includeDirectoryDocuments) { List docs = new ArrayList(); @@ -636,6 +744,161 @@ public class AVMLuceneIndexerImpl extends AbstractLuceneIndexerImpl impl return docs; } + protected List readDocuments(final String stringNodeRef, final FTSStatus ftsStatus, + final boolean indexAllProperties, final boolean includeDirectoryDocuments) + { + return doInReadthroughTransaction(new RetryingTransactionCallback>() + { + @Override + public List execute() throws Throwable + { + return createDocuments(stringNodeRef, ftsStatus, indexAllProperties, + includeDirectoryDocuments); + } + }); + } + + + protected void indexImpl(String nodeRef, boolean isNew) throws LuceneIndexException, IOException + { + IndexWriter writer = getDeltaWriter(); + + // avoid attempting to index nodes that don't exist + + try + { + List docs = readDocuments(nodeRef, isNew ? FTSStatus.New : FTSStatus.Dirty, false, true); + for (Document doc : docs) + { + try + { + writer.addDocument(doc); + } + catch (IOException e) + { + throw new LuceneIndexException("Failed to add document to index", e); + } + } + } + catch (InvalidNodeRefException e) + { + // The node does not exist + return; + } + + } + + void indexImpl(Set refs, boolean isNew) throws LuceneIndexException, IOException + { + for (String ref : refs) + { + indexImpl(ref, isNew); + } + } + + /** + * @throws LuceneIndexException + */ + public void flushPending() throws LuceneIndexException + { + IndexReader mainReader = null; + try + { + saveDelta(); + + // Make sure the in flush deletion list is clear at the start + deletionsSinceFlush.clear(); + if (commandList.isEmpty()) + { + return; + } + + mainReader = getReader(); + Set forIndex = new LinkedHashSet(); + + for (Command command : commandList) + { + if (command.action == Action.INDEX) + { + // Indexing just requires the node to be added to the list + forIndex.add(command.ref.toString()); + } + else if (command.action == Action.REINDEX) + { + // Reindex is a delete and then and index + Set set = deleteImpl(command.ref.toString(), IndexDeleteMode.REINDEX, false, mainReader); + + // Deleting any pending index actions + // - make sure we only do at most one index + forIndex.removeAll(set); + // Add the nodes for index + forIndex.addAll(set); + } + else if (command.action == Action.CASCADEREINDEX) + { + // Reindex is a delete and then and index + Set set = deleteImpl(command.ref.toString(), IndexDeleteMode.REINDEX, true, mainReader); + + // Deleting any pending index actions + // - make sure we only do at most one index + forIndex.removeAll(set); + // Add the nodes for index + forIndex.addAll(set); + } + else if (command.action == Action.DELETE) + { + // Delete the nodes + Set set = deleteImpl(command.ref.toString(), IndexDeleteMode.DELETE, true, mainReader); + // Remove any pending indexes + forIndex.removeAll(set); + // Add the leaf nodes for reindex + forIndex.addAll(set); + } + } + commandList.clear(); + indexImpl(forIndex, false); + docs = getDeltaWriter().docCount(); + deletionsSinceFlush.clear(); + } + catch (IOException e) + { + // If anything goes wrong we try and do a roll back + throw new LuceneIndexException("Failed to flush index", e); + } + finally + { + if (mainReader != null) + { + try + { + mainReader.close(); + } + catch (IOException e) + { + throw new LuceneIndexException("Filed to close main reader", e); + } + } + // Make sure deletes are sent + try + { + closeDeltaReader(); + } + catch (IOException e) + { + + } + // Make sure writes and updates are sent. + try + { + closeDeltaWriter(); + } + catch (IOException e) + { + + } + } + } + private String[] splitPath(String path) { String[] pathParts = path.split(":"); @@ -1247,12 +1510,12 @@ public class AVMLuceneIndexerImpl extends AbstractLuceneIndexerImpl impl { if (indexUpdateStatus == IndexUpdateStatus.ASYNCHRONOUS) { - setInfo(docs, getDeletions(), false); + setInfo(docs, getDeletions(), getContainerDeletions(), false); // FTS does not trigger indexing request } else { - setInfo(docs, getDeletions(), false); + setInfo(docs, getDeletions(), getContainerDeletions(), false); // TODO: only register if required fullTextSearchIndexer.requiresIndex(store); } @@ -1261,7 +1524,7 @@ public class AVMLuceneIndexerImpl extends AbstractLuceneIndexerImpl impl callBack.indexCompleted(store, remainingCount, null); } - setInfo(docs, deletions, false); + setInfo(docs, deletions, containerDeletions, false); } @Override @@ -2148,4 +2411,74 @@ public class AVMLuceneIndexerImpl extends AbstractLuceneIndexerImpl impl deleteIndex(); } + + /** + * Delete all entries from the index. + */ + public void deleteAll() + { + deleteAll(null); + } + + /** + * Delete all index entries which do not start with the given prefix + * + * @param prefix + */ + public void deleteAll(String prefix) + { + IndexReader mainReader = null; + try + { + mainReader = getReader(); + for (int doc = 0; doc < mainReader.maxDoc(); doc++) + { + if (!mainReader.isDeleted(doc)) + { + Document document = mainReader.document(doc); + String[] ids = document.getValues("ID"); + if ((prefix == null) || nonStartwWith(ids, prefix)) + { + deletions.add(ids[ids.length - 1]); + // should be included in the deletion cache if we move back to caching at the TX level and not the flush level + // Entries here will currently be ignored as the list is cleared at the start and end of a flush. + deletionsSinceFlush.add(ids[ids.length - 1]); + } + } + } + + } + catch (IOException e) + { + // If anything goes wrong we try and do a roll back + throw new LuceneIndexException("Failed to delete all entries from the index", e); + } + finally + { + if (mainReader != null) + { + try + { + mainReader.close(); + } + catch (IOException e) + { + throw new LuceneIndexException("Filed to close main reader", e); + } + } + } + } + + private boolean nonStartwWith(String[] values, String prefix) + { + for (String value : values) + { + if (value.startsWith(prefix)) + { + return false; + } + } + return true; + } + } diff --git a/source/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneBase.java b/source/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneBase.java index 8125c02add..c6e61eade7 100644 --- a/source/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneBase.java +++ b/source/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneBase.java @@ -175,7 +175,8 @@ public abstract class AbstractLuceneBase // luceneIndexer.flushPending(); return new ClosingIndexSearcher(indexInfo.getMainIndexReferenceCountingReadOnlyIndexReader(deltaId, - luceneIndexer.getDeletions(), luceneIndexer.getDeleteOnlyNodes())); + luceneIndexer.getDeletions(), luceneIndexer.getContainerDeletions(), luceneIndexer + .getDeleteOnlyNodes())); } } @@ -252,9 +253,9 @@ public abstract class AbstractLuceneBase closeDeltaWriter(); } - protected void setInfo(long docs, Set deletions, boolean deleteNodesOnly) throws IOException + protected void setInfo(long docs, Set deletions, Set containerDeletions, boolean deleteNodesOnly) throws IOException { - indexInfo.setPreparedState(deltaId, deletions, docs, deleteNodesOnly); + indexInfo.setPreparedState(deltaId, deletions, containerDeletions, docs, deleteNodesOnly); } protected void setStatus(TransactionStatus status) throws IOException diff --git a/source/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerImpl.java b/source/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerImpl.java index 2af659a046..4ca27b46ba 100644 --- a/source/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerImpl.java +++ b/source/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerImpl.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; import java.util.ListIterator; @@ -31,6 +30,7 @@ import java.util.Set; import javax.transaction.Status; import javax.transaction.xa.XAResource; +import org.alfresco.repo.search.Indexer; import org.alfresco.repo.search.IndexerException; import org.alfresco.repo.search.impl.lucene.index.TransactionStatus; import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; @@ -39,8 +39,8 @@ import org.alfresco.service.transaction.TransactionService; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermDocs; import org.springframework.dao.ConcurrencyFailureException; @@ -52,7 +52,7 @@ import org.springframework.dao.ConcurrencyFailureException; * @param - * the type used to generate the key in the index file */ -public abstract class AbstractLuceneIndexerImpl extends AbstractLuceneBase +public abstract class AbstractLuceneIndexerImpl extends AbstractLuceneBase implements Indexer { /** * Enum for indexing actions against a node @@ -71,7 +71,6 @@ public abstract class AbstractLuceneIndexerImpl extends AbstractLuceneBase * A delete */ DELETE, - MOVE, /** * A cascaded reindex (ensures directory structre is ok) */ @@ -94,8 +93,6 @@ public abstract class AbstractLuceneIndexerImpl extends AbstractLuceneBase ASYNCHRONOUS; } - protected enum IndexDeleteMode {REINDEX, DELETE, MOVE}; - protected enum FTSStatus {New, Dirty, Clean}; protected long docs; @@ -284,6 +281,65 @@ public abstract class AbstractLuceneIndexerImpl extends AbstractLuceneBase return refs; } + protected boolean locateContainer(String nodeRef, IndexReader reader) + { + boolean found = false; + try + { + TermDocs td = reader.termDocs(new Term("ID", nodeRef)); + while (td.next()) + { + int doc = td.doc(); + Document document = reader.document(doc); + if (document.getField("ISCONTAINER") != null) + { + found = true; + break; + } + } + td.close(); + } + catch (IOException e) + { + throw new LuceneIndexException("Failed to delete container and below for " + nodeRef, e); + } + return found; + } + + protected boolean deleteLeafOnly(String nodeRef, IndexReader reader, boolean delete) throws LuceneIndexException + { + boolean found = false; + try + { + TermDocs td = reader.termDocs(new Term("ID", nodeRef)); + while (td.next()) + { + int doc = td.doc(); + Document document = reader.document(doc); + // Exclude all containers except the root (which is also a node!) + Field path = document.getField("PATH"); + if (path == null || path.stringValue().length() == 0) + { + found = true; + if (delete) + { + reader.deleteDocument(doc); + } + else + { + break; + } + } + } + td.close(); + } + catch (IOException e) + { + throw new LuceneIndexException("Failed to delete container and below for " + nodeRef, e); + } + return found; + } + /** the maximum transformation time to allow atomically, defaulting to 20ms */ protected long maxAtomicTransformationTime = 20; @@ -294,9 +350,9 @@ public abstract class AbstractLuceneIndexerImpl extends AbstractLuceneBase protected Set deletions = new LinkedHashSet(); /** - * A list of deletions associated with the changes to nodes in the current flush + * A list of cascading container deletions we have made - at merge these deletions need to be made against the main index. */ - protected Set deletionsSinceFlush = new HashSet(); + protected Set containerDeletions = new LinkedHashSet(); /** * List of pending indexing commands. @@ -629,24 +685,19 @@ public abstract class AbstractLuceneIndexerImpl extends AbstractLuceneBase protected abstract void doSetRollbackOnly() throws IOException; - protected abstract List createDocuments(String stringNodeRef, FTSStatus ftsStatus, boolean indexAllProperties, - boolean includeDirectoryDocuments); - - protected List readDocuments(final String stringNodeRef, final FTSStatus ftsStatus, - final boolean indexAllProperties, final boolean includeDirectoryDocuments) + protected T2 doInReadthroughTransaction(final RetryingTransactionCallback callback) { if (isReadThrough) { return transactionService.getRetryingTransactionHelper().doInTransaction( - new RetryingTransactionCallback>() + new RetryingTransactionCallback() { @Override - public List execute() throws Throwable + public T2 execute() throws Throwable { try { - return createDocuments(stringNodeRef, ftsStatus, indexAllProperties, - includeDirectoryDocuments); + return callback.execute(); } catch (InvalidNodeRefException e) { @@ -660,166 +711,25 @@ public abstract class AbstractLuceneIndexerImpl extends AbstractLuceneBase } else { - return createDocuments(stringNodeRef, ftsStatus, indexAllProperties, includeDirectoryDocuments); - } - } - - protected Set deleteImpl(String nodeRef, IndexDeleteMode mode, boolean cascade, IndexReader mainReader) - throws LuceneIndexException, IOException - - { - Set leafrefs = new LinkedHashSet(); - IndexReader deltaReader = null; - - // startTimer(); - getDeltaReader(); - // outputTime("Delete "+nodeRef+" size = "+getDeltaWriter().docCount()); - Set refs = new LinkedHashSet(); - Set containerRefs = new LinkedHashSet(); - Set temp = null; - - switch(mode) - { - case MOVE: - temp = deleteContainerAndBelow(nodeRef, getDeltaReader(), true, cascade); - closeDeltaReader(); - containerRefs.addAll(temp); - temp = deleteContainerAndBelow(nodeRef, mainReader, false, cascade); - containerRefs.addAll(temp); - - temp = deletePrimary(containerRefs, getDeltaReader(), true); - leafrefs.addAll(temp); - closeDeltaReader(); - // May not have to delete references - temp = deleteReference(containerRefs, getDeltaReader(), true); - leafrefs.addAll(temp); - closeDeltaReader(); - - refs.addAll(containerRefs); - refs.addAll(leafrefs); - deletions.addAll(refs); - // should not be included as a delete for optimisation in deletionsSinceFlush - // should be optimised out - // defensive against any issue with optimisation of events - // the node has only moved - it still requires a real delete - - // make sure leaves are also removed from the delta before reindexing - - deltaReader = getDeltaReader(); - for(String id : leafrefs) + try { - deltaReader.deleteDocuments(new Term("ID", id)); + return callback.execute(); } - closeDeltaReader(); - break; - case REINDEX: - temp = deleteContainerAndBelow(nodeRef, getDeltaReader(), true, cascade); - closeDeltaReader(); - refs.addAll(temp); - deletions.addAll(temp); - // should not be included as a delete for optimisation in deletionsSinceFlush - // should be optimised out - // defensive against any issue with optimisation of events - // the nodes have not been deleted and would require a real delete - temp = deleteContainerAndBelow(nodeRef, mainReader, false, cascade); - refs.addAll(temp); - deletions.addAll(temp); - // should not be included as a delete for optimisation - // should be optimised out - // defensive agaainst any issue with optimisation of events - // the nodes have not been deleted and would require a real delete - break; - case DELETE: - // if already deleted don't do it again ... - if(deletionsSinceFlush.contains(nodeRef)) + catch (RuntimeException e) { - // nothing to do - break; + throw e; } - else + catch (Error e) { - // Delete all and reindex as they could be secondary links we have deleted and they need to be updated. - // Most will skip any indexing as they will really have gone. - temp = deleteContainerAndBelow(nodeRef, getDeltaReader(), true, cascade); - closeDeltaReader(); - containerRefs.addAll(temp); - refs.addAll(temp); - temp = deleteContainerAndBelow(nodeRef, mainReader, false, cascade); - containerRefs.addAll(temp); - - temp = deletePrimary(containerRefs, getDeltaReader(), true); - leafrefs.addAll(temp); - closeDeltaReader(); - temp = deletePrimary(containerRefs, mainReader, false); - leafrefs.addAll(temp); - - // May not have to delete references - temp = deleteReference(containerRefs, getDeltaReader(), true); - leafrefs.addAll(temp); - closeDeltaReader(); - temp = deleteReference(containerRefs, mainReader, false); - leafrefs.addAll(temp); - - refs.addAll(containerRefs); - refs.addAll(leafrefs); - deletions.addAll(refs); - // do not delete anything we have deleted before in this flush - // probably OK to cache for the TX as a whole but done per flush => See ALF-8007 - deletionsSinceFlush.addAll(refs); - - // make sure leaves are also removed from the delta before reindexing - - deltaReader = getDeltaReader(); - for(String id : leafrefs) - { - deltaReader.deleteDocuments(new Term("ID", id)); - } - closeDeltaReader(); - break; + throw e; + } + catch (Throwable e) + { + throw new RuntimeException(e); } } - - return refs; - } - - protected void indexImpl(String nodeRef, boolean isNew) throws LuceneIndexException, IOException - { - IndexWriter writer = getDeltaWriter(); - - // avoid attempting to index nodes that don't exist - - try - { - List docs = readDocuments(nodeRef, isNew ? FTSStatus.New : FTSStatus.Dirty, false, true); - for (Document doc : docs) - { - try - { - writer.addDocument(doc); - } - catch (IOException e) - { - throw new LuceneIndexException("Failed to add document to index", e); - } - } - } - catch (InvalidNodeRefException e) - { - // The node does not exist - return; - } - - } - - void indexImpl(Set refs, boolean isNew) throws LuceneIndexException, IOException - { - for (String ref : refs) - { - indexImpl(ref, isNew); - } - } - + protected void index(T ref) throws LuceneIndexException { addCommand(new Command(ref, Action.INDEX)); @@ -834,11 +744,6 @@ public abstract class AbstractLuceneIndexerImpl extends AbstractLuceneBase { addCommand(new Command(ref, Action.DELETE)); } - - protected void move(T ref) throws LuceneIndexException - { - addCommand(new Command(ref, Action.MOVE)); - } private void addCommand(Command command) { @@ -861,22 +766,7 @@ public abstract class AbstractLuceneIndexerImpl extends AbstractLuceneBase private void purgeCommandList(Command command) { - if (command.action == Action.DELETE) - { - removeFromCommandList(command, false); - } - else if (command.action == Action.REINDEX) - { - removeFromCommandList(command, true); - } - else if (command.action == Action.INDEX) - { - removeFromCommandList(command, true); - } - else if (command.action == Action.CASCADEREINDEX) - { - removeFromCommandList(command, true); - } + removeFromCommandList(command, command.action != Action.DELETE); } private void removeFromCommandList(Command command, boolean matchExact) @@ -912,128 +802,6 @@ public abstract class AbstractLuceneIndexerImpl extends AbstractLuceneBase } } - /** - * @throws LuceneIndexException - */ - public void flushPending() throws LuceneIndexException - { - IndexReader mainReader = null; - try - { - saveDelta(); - - // Make sure the in flush deletion list is clear at the start - deletionsSinceFlush.clear(); - if (commandList.isEmpty()) - { - return; - } - - mainReader = getReader(); - Set forIndex = new LinkedHashSet(); - - for (Command command : commandList) - { - if (command.action == Action.INDEX) - { - // Indexing just requires the node to be added to the list - forIndex.add(command.ref.toString()); - } - else if (command.action == Action.REINDEX) - { - // Reindex is a delete and then and index - Set set = deleteImpl(command.ref.toString(), IndexDeleteMode.REINDEX, false, mainReader); - - // Deleting any pending index actions - // - make sure we only do at most one index - forIndex.removeAll(set); - // Add the nodes for index - forIndex.addAll(set); - } - else if (command.action == Action.CASCADEREINDEX) - { - // Reindex is a delete and then and index - Set set = deleteImpl(command.ref.toString(), IndexDeleteMode.REINDEX, true, mainReader); - - // Deleting any pending index actions - // - make sure we only do at most one index - forIndex.removeAll(set); - // Add the nodes for index - forIndex.addAll(set); - } - else if (command.action == Action.DELETE) - { - // Delete the nodes - Set set = deleteImpl(command.ref.toString(), IndexDeleteMode.DELETE, true, mainReader); - // Remove any pending indexes - forIndex.removeAll(set); - // Add the leaf nodes for reindex - forIndex.addAll(set); - } - else if (command.action == Action.MOVE) - { - // Delete the nodes - Set set = deleteImpl(command.ref.toString(), IndexDeleteMode.MOVE, true, mainReader); - // Remove any pending indexes - forIndex.removeAll(set); - // Add the leaf nodes for reindex - forIndex.addAll(set); - } - } - commandList.clear(); - indexImpl(forIndex, false); - docs = getDeltaWriter().docCount(); - deletionsSinceFlush.clear(); - } - catch (IOException e) - { - // If anything goes wrong we try and do a roll back - throw new LuceneIndexException("Failed to flush index", e); - } - finally - { - if (mainReader != null) - { - try - { - mainReader.close(); - } - catch (IOException e) - { - throw new LuceneIndexException("Filed to close main reader", e); - } - } - // Make sure deletes are sent - try - { - closeDeltaReader(); - } - catch (IOException e) - { - - } - // Make sure writes and updates are sent. - try - { - closeDeltaWriter(); - } - catch (IOException e) - { - - } - } - } - - /** - * Are we deleting leaves only (not meta data) - * - * @return - deleting only nodes. - */ - public boolean getDeleteOnlyNodes() - { - return indexUpdateStatus == IndexUpdateStatus.ASYNCHRONOUS; - } - /** * Get the deletions * @@ -1045,72 +813,12 @@ public abstract class AbstractLuceneIndexerImpl extends AbstractLuceneBase } /** - * Delete all entries from the index. - */ - public void deleteAll() - { - deleteAll(null); - } - - /** - * Delete all index entries which do not start with the given prefix + * Get the container deletions * - * @param prefix + * @return - the ids to delete */ - public void deleteAll(String prefix) + public Set getContainerDeletions() { - IndexReader mainReader = null; - try - { - mainReader = getReader(); - for (int doc = 0; doc < mainReader.maxDoc(); doc++) - { - if (!mainReader.isDeleted(doc)) - { - Document document = mainReader.document(doc); - String[] ids = document.getValues("ID"); - if ((prefix == null) || nonStartwWith(ids, prefix)) - { - deletions.add(ids[ids.length - 1]); - // should be included in the deletion cache if we move back to caching at the TX level and not the flush level - // Entries here will currently be ignored as the list is cleared at the start and end of a flush. - deletionsSinceFlush.add(ids[ids.length - 1]); - } - } - } - - } - catch (IOException e) - { - // If anything goes wrong we try and do a roll back - throw new LuceneIndexException("Failed to delete all entries from the index", e); - } - finally - { - if (mainReader != null) - { - try - { - mainReader.close(); - } - catch (IOException e) - { - throw new LuceneIndexException("Filed to close main reader", e); - } - } - } + return Collections.unmodifiableSet(containerDeletions); } - - private boolean nonStartwWith(String[] values, String prefix) - { - for (String value : values) - { - if (value.startsWith(prefix)) - { - return false; - } - } - return true; - } - } diff --git a/source/java/org/alfresco/repo/search/impl/lucene/FilterIndexReaderByStringId.java b/source/java/org/alfresco/repo/search/impl/lucene/FilterIndexReaderByStringId.java index d92a5d93b6..2eb791e2bc 100644 --- a/source/java/org/alfresco/repo/search/impl/lucene/FilterIndexReaderByStringId.java +++ b/source/java/org/alfresco/repo/search/impl/lucene/FilterIndexReaderByStringId.java @@ -27,6 +27,7 @@ import org.alfresco.error.AlfrescoRuntimeException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; import org.apache.lucene.index.FilterIndexReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; @@ -52,6 +53,7 @@ public class FilterIndexReaderByStringId extends FilterIndexReader private OpenBitSet deletedDocuments; private final Set deletions; + private final Set containerDeletions; private final boolean deleteNodesOnly; private final ReadWriteLock lock = new ReentrantReadWriteLock(); @@ -65,12 +67,13 @@ public class FilterIndexReaderByStringId extends FilterIndexReader * @param deletions * @param deleteNodesOnly */ - public FilterIndexReaderByStringId(String id, IndexReader reader, Set deletions, boolean deleteNodesOnly) + public FilterIndexReaderByStringId(String id, IndexReader reader, Set deletions, Set containerDeletions, boolean deleteNodesOnly) { super(reader); reader.incRef(); this.id = id; this.deletions = deletions; + this.containerDeletions = containerDeletions; this.deleteNodesOnly = deleteNodesOnly; if (s_logger.isDebugEnabled()) @@ -103,23 +106,19 @@ public class FilterIndexReaderByStringId extends FilterIndexReader } deletedDocuments = new OpenBitSet(in.maxDoc()); - if (!deleteNodesOnly) + Searcher searcher = new IndexSearcher(in); + for (String stringRef : deletions) { - for (String stringRef : deletions) + if (!deleteNodesOnly || containerDeletions.contains(stringRef)) { TermDocs td = in.termDocs(new Term("ID", stringRef)); while (td.next()) { deletedDocuments.set(td.doc()); } - td.close(); + td.close(); } - } - else - { - - Searcher searcher = new IndexSearcher(in); - for (String stringRef : deletions) + else { TermQuery query = new TermQuery(new Term("ID", stringRef)); Hits hits = searcher.search(query); @@ -128,7 +127,9 @@ public class FilterIndexReaderByStringId extends FilterIndexReader for (int i = 0; i < hits.length(); i++) { Document doc = hits.doc(i); - if (doc.getField("ISCONTAINER") == null) + // Exclude all containers except the root (which is also a node!) + Field path = doc.getField("PATH"); + if (path == null || path.stringValue().length() == 0) { deletedDocuments.set(hits.id(i)); // There should only be one thing to delete @@ -137,7 +138,17 @@ public class FilterIndexReaderByStringId extends FilterIndexReader } } } - // searcher does not need to be closed, the reader is live + } + // searcher does not need to be closed, the reader is live + + for (String stringRef : containerDeletions) + { + TermDocs td = in.termDocs(new Term("ANCESTOR", stringRef)); + while (td.next()) + { + deletedDocuments.set(td.doc()); + } + td.close(); } return deletedDocuments; } diff --git a/source/java/org/alfresco/repo/search/impl/lucene/LuceneIndexer.java b/source/java/org/alfresco/repo/search/impl/lucene/LuceneIndexer.java index d6e54bc641..70b4d19bba 100644 --- a/source/java/org/alfresco/repo/search/impl/lucene/LuceneIndexer.java +++ b/source/java/org/alfresco/repo/search/impl/lucene/LuceneIndexer.java @@ -31,6 +31,7 @@ public interface LuceneIndexer extends Indexer, TransactionSynchronisationAwareI { public String getDeltaId(); public Set getDeletions(); + public Set getContainerDeletions(); public boolean getDeleteOnlyNodes(); public R doReadOnly(IndexInfo.LockWork lockWork); } diff --git a/source/java/org/alfresco/repo/search/impl/lucene/index/IndexInfo.java b/source/java/org/alfresco/repo/search/impl/lucene/index/IndexInfo.java index 62ab001d79..f0b46e18e1 100644 --- a/source/java/org/alfresco/repo/search/impl/lucene/index/IndexInfo.java +++ b/source/java/org/alfresco/repo/search/impl/lucene/index/IndexInfo.java @@ -72,6 +72,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.FilterIndexReader; import org.apache.lucene.index.IndexReader; @@ -280,6 +281,11 @@ public class IndexInfo implements IndexMonitor */ private static String INDEX_INFO_DELETIONS = "IndexInfoDeletions"; + /** + * The default name for the index container deletions file + */ + private static String INDEX_INFO_CONTAINER_DELETIONS = "IndexInfoContainerDeletions"; + /** * What to look for to detect the previous index implementation. */ @@ -1188,6 +1194,18 @@ public class IndexInfo implements IndexMonitor * @throws IOException */ public Set getDeletions(String id) throws IOException + { + return getDeletions(id, INDEX_INFO_DELETIONS); + } + + /** + * Get the deletions for a given index (there is no check if they should be applied that is up to the calling layer) + * + * @param id + * @return + * @throws IOException + */ + private Set getDeletions(String id, String fileName) throws IOException { if (id == null) { @@ -1196,7 +1214,7 @@ public class IndexInfo implements IndexMonitor // Check state Set deletions = new HashSet(); File location = new File(indexDirectory, id).getCanonicalFile(); - File file = new File(location, INDEX_INFO_DELETIONS).getCanonicalFile(); + File file = new File(location, fileName).getCanonicalFile(); if (!file.exists()) { if (s_logger.isDebugEnabled()) @@ -1234,32 +1252,22 @@ public class IndexInfo implements IndexMonitor * should deletions on apply to nodes (ie not to containers) * @throws IOException */ - public void setPreparedState(String id, Set toDelete, long documents, boolean deleteNodesOnly) throws IOException + public void setPreparedState(String id, Set toDelete, Set containersToDelete, long documents, boolean deleteNodesOnly) throws IOException { if (id == null) { throw new IndexerException("\"null\" is not a valid identifier for a transaction"); } // Check state - if (toDelete.size() > 0) + int toDeleteSize = toDelete.size(); + int containersToDeleteSize = containersToDelete.size(); + if (toDeleteSize > 0) { - File location = new File(indexDirectory, id).getCanonicalFile(); - if (!location.exists()) - { - if (!location.mkdirs()) - { - throw new IndexerException("Failed to make index directory " + location); - } - } - // Write deletions - DataOutputStream os = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(new File(location, INDEX_INFO_DELETIONS).getCanonicalFile()))); - os.writeInt(toDelete.size()); - for (String ref : toDelete) - { - os.writeUTF(ref); - } - os.flush(); - os.close(); + persistDeletions(id, toDelete, INDEX_INFO_DELETIONS); + } + if (containersToDeleteSize > 0) + { + persistDeletions(id, containersToDelete, INDEX_INFO_CONTAINER_DELETIONS); } getWriteLock(); try @@ -1274,7 +1282,7 @@ public class IndexInfo implements IndexMonitor throw new IndexerException("Deletes and doc count can only be set on a preparing index"); } entry.setDocumentCount(documents); - entry.setDeletions(toDelete.size()); + entry.setDeletions(toDeleteSize + containersToDeleteSize); entry.setDeletOnlyNodes(deleteNodesOnly); } finally @@ -1283,6 +1291,33 @@ public class IndexInfo implements IndexMonitor } } + /** + * @param id + * @param toDelete + * @throws IOException + * @throws FileNotFoundException + */ + private void persistDeletions(String id, Set toDelete, String fileName) throws IOException, FileNotFoundException + { + File location = new File(indexDirectory, id).getCanonicalFile(); + if (!location.exists()) + { + if (!location.mkdirs()) + { + throw new IndexerException("Failed to make index directory " + location); + } + } + // Write deletions + DataOutputStream os = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(new File(location, fileName).getCanonicalFile()))); + os.writeInt(toDelete.size()); + for (String ref : toDelete) + { + os.writeUTF(ref); + } + os.flush(); + os.close(); + } + private void invalidateMainReadersFromFirst(Set ids) throws IOException { boolean found = false; @@ -1413,7 +1448,7 @@ public class IndexInfo implements IndexMonitor * @return * @throws IOException */ - public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader(String id, Set deletions, boolean deleteOnlyNodes) throws IOException + public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader(String id, Set deletions, Set containerDeletions, boolean deleteOnlyNodes) throws IOException { if (id == null) { @@ -1482,13 +1517,13 @@ public class IndexInfo implements IndexMonitor IndexReader deltaReader = buildAndRegisterDeltaReader(id); IndexReader reader = null; - if (deletions == null || deletions.size() == 0) + if ((deletions == null || deletions.size() == 0) && (containerDeletions == null || containerDeletions.size() == 0)) { reader = new MultiReader(new IndexReader[] { mainIndexReader, deltaReader }, false); } else { - IndexReader filterReader = new FilterIndexReaderByStringId("main+id", mainIndexReader, deletions, deleteOnlyNodes); + IndexReader filterReader = new FilterIndexReaderByStringId("main+id", mainIndexReader, deletions, containerDeletions, deleteOnlyNodes); reader = new MultiReader(new IndexReader[] { filterReader, deltaReader }, false); // Cancel out extra incRef made by MultiReader filterReader.decRef(); @@ -2254,7 +2289,7 @@ public class IndexInfo implements IndexMonitor { try { - IndexReader filterReader = new FilterIndexReaderByStringId(id, oldReader, getDeletions(entry.getName()), entry.isDeletOnlyNodes()); + IndexReader filterReader = new FilterIndexReaderByStringId(id, oldReader, getDeletions(entry.getName(), INDEX_INFO_DELETIONS), getDeletions(entry.getName(), INDEX_INFO_CONTAINER_DELETIONS), entry.isDeletOnlyNodes()); reader = new MultiReader(new IndexReader[] { filterReader, subReader }, false); // Cancel out the incRef on the filter reader filterReader.decRef(); @@ -3843,7 +3878,8 @@ public class IndexInfo implements IndexMonitor LinkedHashMap readers = new LinkedHashMap(size); for (IndexEntry currentDelete : toDelete.values()) { - Set deletions = getDeletions(currentDelete.getName()); + Set deletions = getDeletions(currentDelete.getName(), INDEX_INFO_DELETIONS); + Set containerDeletions = getDeletions(currentDelete.getName(), INDEX_INFO_CONTAINER_DELETIONS); if (!deletions.isEmpty()) { for (String key : indexes.keySet()) @@ -3873,7 +3909,7 @@ public class IndexInfo implements IndexMonitor readers.put(key, writeableReader); } - if (currentDelete.isDeletOnlyNodes()) + if (currentDelete.isDeletOnlyNodes() && !containerDeletions.contains(stringRef)) { Searcher writeableSearcher = new IndexSearcher(writeableReader); hits = writeableSearcher.search(query); @@ -3882,7 +3918,9 @@ public class IndexInfo implements IndexMonitor for (int i = 0; i < hits.length(); i++) { Document doc = hits.doc(i); - if (doc.getField("ISCONTAINER") == null) + // Exclude all containers except the root (which is also a node!) + Field path = doc.getField("PATH"); + if (path == null || path.stringValue().length() == 0) { writeableReader.deleteDocument(hits.id(i)); invalidIndexes.add(key); @@ -3927,6 +3965,65 @@ public class IndexInfo implements IndexMonitor } } } + if (!containerDeletions.isEmpty()) + { + for (String key : indexes.keySet()) + { + IndexReader reader = getReferenceCountingIndexReader(key); + Searcher searcher = new IndexSearcher(reader); + try + { + for (String stringRef : deletions) + { + TermQuery query = new TermQuery(new Term("ANCESTOR", stringRef)); + Hits hits = searcher.search(query); + if (hits.length() > 0) + { + IndexReader writeableReader = readers.get(key); + if (writeableReader == null) + { + File location = new File(indexDirectory, key).getCanonicalFile(); + if (IndexReader.indexExists(location)) + { + writeableReader = IndexReader.open(location); + } + else + { + continue; + } + readers.put(key, writeableReader); + } + + int deletedCount = 0; + try + { + deletedCount = writeableReader.deleteDocuments(new Term("ANCESTOR", stringRef)); + } + catch (IOException ioe) + { + if (s_logger.isDebugEnabled()) + { + s_logger.debug("IO Error for " + key); + throw ioe; + } + } + if (deletedCount > 0) + { + if (s_logger.isDebugEnabled()) + { + s_logger.debug("Deleted " + deletedCount + " from " + key + " for id " + stringRef + " remaining docs " + writeableReader.numDocs()); + } + invalidIndexes.add(key); + } + } + } + } + finally + { + searcher.close(); + } + } + } // The delta we have just processed now must be included when we process the deletions of its successor indexes.put(currentDelete.getName(), currentDelete); } diff --git a/source/java/org/alfresco/repo/search/impl/lucene/index/IndexInfoTest.java b/source/java/org/alfresco/repo/search/impl/lucene/index/IndexInfoTest.java index 1869724540..27cb212f74 100644 --- a/source/java/org/alfresco/repo/search/impl/lucene/index/IndexInfoTest.java +++ b/source/java/org/alfresco/repo/search/impl/lucene/index/IndexInfoTest.java @@ -21,6 +21,7 @@ package org.alfresco.repo.search.impl.lucene.index; import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import junit.framework.TestCase; @@ -109,7 +110,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " ii.closeDeltaIndexWriter(guid); ii.setStatus(guid, TransactionStatus.PREPARING, null, null); - ii.setPreparedState(guid, deletions, 1, false); + ii.setPreparedState(guid, deletions, Collections.emptySet(), 1, false); ii.getDeletions(guid); ii.setStatus(guid, TransactionStatus.PREPARED, null, null); @@ -131,7 +132,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " } reader.close(); - reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, deletions, false); + reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, deletions, Collections.emptySet(), false); assertEquals(reader.numDocs(), i + 1); for (int j = 0; j < WORD_LIST.length; j++) { @@ -214,7 +215,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " ii.closeDeltaIndexWriter(guid); ii.setStatus(guid, TransactionStatus.PREPARING, null, null); - ii.setPreparedState(guid, new HashSet(), 1, false); + ii.setPreparedState(guid, Collections.emptySet(), Collections.emptySet(), 1, false); ii.getDeletions(guid); ii.setStatus(guid, TransactionStatus.PREPARED, null, null); @@ -236,7 +237,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " } reader.close(); - reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, new HashSet(), false); + reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, Collections.emptySet(), Collections.emptySet(), false); assertEquals(reader.numDocs(), i + 1); for (int j = 0; j < CREATE_LIST.length; j++) { @@ -290,7 +291,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " ii.setStatus(guid, TransactionStatus.ACTIVE, null, null); ii.closeDeltaIndexWriter(guid); ii.setStatus(guid, TransactionStatus.PREPARING, null, null); - ii.setPreparedState(guid, deletions, 1, false); + ii.setPreparedState(guid, deletions, Collections.emptySet(), 1, false); ii.getDeletions(guid); ii.setStatus(guid, TransactionStatus.PREPARED, null, null); @@ -314,7 +315,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " } reader.close(); - reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, deletions, false); + reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, deletions, Collections.emptySet(), false); assertEquals(reader.numDocs(), UPDATE_LIST.length - i - 1); lastDoc = -1; for (int j = 0; j < CREATE_LIST.length; j++) @@ -409,7 +410,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " ii.closeDeltaIndexWriter(guid); ii.setStatus(guid, TransactionStatus.PREPARING, null, null); - ii.setPreparedState(guid, new HashSet(), 1, false); + ii.setPreparedState(guid, Collections.emptySet(), Collections.emptySet(), 1, false); ii.getDeletions(guid); ii.setStatus(guid, TransactionStatus.PREPARED, null, null); @@ -431,7 +432,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " } reader.close(); - reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, new HashSet(), false); + reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, Collections.emptySet(), Collections.emptySet(), false); assertEquals(reader.numDocs(), i + 1); for (int j = 0; j < CREATE_LIST.length; j++) { @@ -495,7 +496,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " ii.closeDeltaIndexWriter(guid); ii.setStatus(guid, TransactionStatus.PREPARING, null, null); - ii.setPreparedState(guid, deletions, 1, false); + ii.setPreparedState(guid, deletions, Collections.emptySet(), 1, false); ii.getDeletions(guid); ii.setStatus(guid, TransactionStatus.PREPARED, null, null); @@ -534,7 +535,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " } reader.close(); - reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, deletions, false); + reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, deletions, Collections.emptySet(), false); assertEquals(reader.numDocs(), UPDATE_LIST.length); lastDoc = -1; for (int j = 0; j < CREATE_LIST.length; j++) @@ -684,7 +685,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " ii.closeDeltaIndexWriter(guid); ii.setStatus(guid, TransactionStatus.PREPARING, null, null); - ii.setPreparedState(guid, new HashSet(), 1, false); + ii.setPreparedState(guid, Collections.emptySet(), Collections.emptySet(), 1, false); ii.getDeletions(guid); ii.setStatus(guid, TransactionStatus.PREPARED, null, null); @@ -709,7 +710,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " } reader.close(); - reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, new HashSet(), false); + reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, Collections.emptySet(), Collections.emptySet(), false); lastDoc = -1; for (int j = 0; j < create.length; j++) { @@ -775,7 +776,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " ii.closeDeltaIndexWriter(guid); ii.setStatus(guid, TransactionStatus.PREPARING, null, null); - ii.setPreparedState(guid, deletions, 1, false); + ii.setPreparedState(guid, deletions, Collections.emptySet(), 1, false); ii.getDeletions(guid); ii.setStatus(guid, TransactionStatus.PREPARED, null, null); @@ -814,7 +815,7 @@ public static final String[] UPDATE_LIST_2 = { "alpha2", "bravo2", "charlie2", " } reader.close(); - reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, deletions, false); + reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(guid, deletions, Collections.emptySet(), false); lastDoc = -1; for (int j = 0; j < create.length; j++) diff --git a/source/java/org/alfresco/repo/security/authentication/AbstractChainingAuthenticationComponent.java b/source/java/org/alfresco/repo/security/authentication/AbstractChainingAuthenticationComponent.java index 488628f670..fd285145d5 100644 --- a/source/java/org/alfresco/repo/security/authentication/AbstractChainingAuthenticationComponent.java +++ b/source/java/org/alfresco/repo/security/authentication/AbstractChainingAuthenticationComponent.java @@ -124,6 +124,7 @@ public abstract class AbstractChainingAuthenticationComponent extends AbstractAu @Override public Authentication setCurrentUser(String userName) { + Exception last = null; for (AuthenticationComponent authComponent : getUsableAuthenticationComponents()) { try @@ -132,10 +133,10 @@ public abstract class AbstractChainingAuthenticationComponent extends AbstractAu } catch (AuthenticationException e) { - // Ignore and chain + last = e; } } - throw new AuthenticationException("Failed to set current user " + userName); + throw new AuthenticationException("Failed to set current user " + userName, last); } /** diff --git a/source/java/org/alfresco/repo/security/authority/AuthorityDAO.java b/source/java/org/alfresco/repo/security/authority/AuthorityDAO.java index 81fb78df50..d54893b5dc 100644 --- a/source/java/org/alfresco/repo/security/authority/AuthorityDAO.java +++ b/source/java/org/alfresco/repo/security/authority/AuthorityDAO.java @@ -25,6 +25,7 @@ import org.alfresco.query.PagingRequest; import org.alfresco.query.PagingResults; import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.security.AuthorityType; +import org.alfresco.service.cmr.security.AuthorityService.AuthorityFilter; public interface AuthorityDAO { @@ -62,6 +63,8 @@ public interface AuthorityDAO */ Set getContainedAuthorities(AuthorityType type, String parentName, boolean immediate); + public boolean isAuthorityContained(NodeRef authorityNodeRef, String authorityToFind); + /** * Remove an authority. * @@ -80,6 +83,20 @@ public interface AuthorityDAO */ Set getContainingAuthorities(AuthorityType type, String name, boolean immediate); + + /** + * Get a set of authorities with varying filter criteria + * + * @param type authority type or null for all types + * @param authority if non-null, only return those authorities who contain this authority + * @param zoneName if non-null, only include authorities in the named zone + * @param filter optional callback to apply further filter criteria or null + * @param size if greater than zero, the maximum results to return. The search strategy used is varied depending on this number. + * @return a set of authorities + */ + public Set getContainingAuthoritiesInZone(AuthorityType type, String authority, final String zoneName, AuthorityFilter filter, int size); + + /** * Get authorities by type and/or zone * diff --git a/source/java/org/alfresco/repo/security/authority/AuthorityDAOImpl.java b/source/java/org/alfresco/repo/security/authority/AuthorityDAOImpl.java index c22ea0b0f9..dc3c53cbcf 100644 --- a/source/java/org/alfresco/repo/security/authority/AuthorityDAOImpl.java +++ b/source/java/org/alfresco/repo/security/authority/AuthorityDAOImpl.java @@ -44,6 +44,8 @@ import org.alfresco.repo.node.NodeServicePolicies; import org.alfresco.repo.policy.JavaBehaviour; import org.alfresco.repo.policy.PolicyComponent; import org.alfresco.repo.search.impl.lucene.AbstractLuceneQueryParser; +import org.alfresco.repo.security.authentication.AuthenticationUtil; +import org.alfresco.repo.security.authentication.AuthenticationUtil.RunAsWork; import org.alfresco.repo.security.person.PersonServiceImpl; import org.alfresco.repo.tenant.TenantService; import org.alfresco.repo.transaction.AlfrescoTransactionSupport; @@ -61,6 +63,7 @@ import org.alfresco.service.cmr.security.AuthorityType; import org.alfresco.service.cmr.security.NoSuchPersonException; import org.alfresco.service.cmr.security.PersonService; import org.alfresco.service.cmr.security.PersonService.PersonInfo; +import org.alfresco.service.cmr.security.AuthorityService.AuthorityFilter; import org.alfresco.service.namespace.NamespacePrefixResolver; import org.alfresco.service.namespace.NamespaceService; import org.alfresco.service.namespace.QName; @@ -102,7 +105,13 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor private SimpleCache, NodeRef> authorityLookupCache; + private static final NodeRef NULL_NODEREF = new NodeRef("null", "null", "null"); + private SimpleCache> userAuthorityCache; + + private SimpleCache, List> zoneAuthorityCache; + + private SimpleCache> childAuthorityCache; /** System Container ref cache (Tennant aware) */ private Map systemContainerRefs = new ConcurrentHashMap(4); @@ -111,6 +120,9 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor private PolicyComponent policyComponent; + /** The number of authorities in a zone to pre-cache, allowing quick generation of 'first n' results. */ + private int zoneAuthoritySampleSize = 10000; + private NamedObjectRegistry> cannedQueryRegistry; public AuthorityDAOImpl() @@ -118,6 +130,19 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor super(); } + + /** + * Sets number of authorities in a zone to pre-cache, allowing quick generation of 'first n' results and adaption of + * search technique based on hit rate. + * + * @param zoneAuthoritySampleSize + * the zoneAuthoritySampleSize to set + */ + public void setZoneAuthoritySampleSize(int zoneAuthoritySampleSize) + { + this.zoneAuthoritySampleSize = zoneAuthoritySampleSize; + } + public void setStoreUrl(String storeUrl) { this.storeRef = new StoreRef(storeUrl); @@ -155,6 +180,16 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor { this.userAuthorityCache = userAuthorityCache; } + + public void setZoneAuthorityCache(SimpleCache, List> zoneAuthorityCache) + { + this.zoneAuthorityCache = zoneAuthorityCache; + } + + public void setChildAuthorityCache(SimpleCache> childAuthorityCache) + { + this.childAuthorityCache = childAuthorityCache; + } public void setPersonService(PersonService personService) { @@ -208,6 +243,7 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor throw new AlfrescoRuntimeException("Authorities of the type " + authorityType + " may not be added to other authorities"); } + childAuthorityCache.remove(parentRef); parentRefs.add(parentRef); } NodeRef childRef = getAuthorityOrNull(childName); @@ -247,10 +283,13 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor if (authorityZones != null) { Set zoneRefs = new HashSet(authorityZones.size() * 2); + String currentUserDomain = tenantService.getCurrentUserDomain(); for (String authorityZone : authorityZones) { zoneRefs.add(getOrCreateZone(authorityZone)); + zoneAuthorityCache.remove(new Pair(currentUserDomain, authorityZone)); } + zoneAuthorityCache.remove(new Pair(currentUserDomain, null)); nodeService.addChild(zoneRefs, childRef, ContentModel.ASSOC_IN_ZONE, QName.createQName("cm", name, namespacePrefixResolver)); } authorityLookupCache.put(cacheKey(name), childRef); @@ -269,9 +308,17 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor { throw new UnknownAuthorityException("An authority was not found for " + name); } - nodeService.deleteNode(nodeRef); + String currentUserDomain = tenantService.getCurrentUserDomain(); + for (String authorityZone : getAuthorityZones(name)) + { + zoneAuthorityCache.remove(new Pair(currentUserDomain, authorityZone)); + } + zoneAuthorityCache.remove(new Pair(currentUserDomain, null)); + removeParentsFromChildAuthorityCache(nodeRef); authorityLookupCache.remove(cacheKey(name)); userAuthorityCache.clear(); + + nodeService.deleteNode(nodeRef); } // Get authorities by type and/or zone (both cannot be null) @@ -626,6 +673,7 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor throw new UnknownAuthorityException("An authority was not found for " + childName); } nodeService.removeChild(parentRef, childRef); + childAuthorityCache.remove(parentRef); if (AuthorityType.getAuthorityType(childName) == AuthorityType.USER) { userAuthorityCache.remove(childName); @@ -671,6 +719,94 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor } } + public Set getContainingAuthoritiesInZone(AuthorityType type, String authority, final String zoneName, AuthorityFilter filter, int size) + { + // Retrieved the cached 'sample' of authorities in the zone + String currentUserDomain = tenantService.getCurrentUserDomain(); + Pair cacheKey = new Pair(currentUserDomain, zoneName); + List zoneAuthorities = zoneAuthorityCache.get(cacheKey); + final int maxToProcess = Math.max(size, zoneAuthoritySampleSize); + if (zoneAuthorities == null) + { + zoneAuthorities = AuthenticationUtil.runAs(new RunAsWork>() + { + @Override + public List doWork() throws Exception + { + NodeRef root = zoneName == null ? getAuthorityContainer() : getZone(zoneName); + if (root == null) + { + return Collections.emptyList(); + } + return nodeService.getChildAssocs(root, null, null, maxToProcess, false); + } + }, tenantService.getDomainUser(AuthenticationUtil.getSystemUserName(), currentUserDomain)); + zoneAuthorityCache.put(cacheKey, zoneAuthorities); + } + + // Now search each for the required authority. If the number of results is greater than or close to the size + // limit, then this will be the most efficient route + Set result = new TreeSet(); + final int maxResults = size > 0 ? size : Integer.MAX_VALUE; + int hits = 0, processed = 0; + for (ChildAssociationRef groupAssoc : zoneAuthorities) + { + String containing = groupAssoc.getQName().getLocalName(); + AuthorityType containingType = AuthorityType.getAuthorityType(containing); + processed++; + // Cache the authority by key, if appropriate + switch (containingType) + { + case USER: + case ADMIN: + case GUEST: + break; + default: + Pair containingKey = cacheKey(containing); + if (!authorityLookupCache.contains(containingKey)) + { + authorityLookupCache.put(containingKey, groupAssoc.getChildRef()); + } + } + if ((type == null || containingType == type) + && (authority == null || isAuthorityContained(groupAssoc.getChildRef(), authority)) + && (filter == null || filter.includeAuthority(containing))) + { + result.add(containing); + if (++hits == maxResults) + { + break; + } + } + + // If this top down search is not providing an adequate hit count then resort to a naiive unlimited search + if (processed >= maxToProcess) + { + if (authority == null) + { + return new HashSet(getAuthorities(type, zoneName, null, false, true, new PagingRequest(0, maxResults, null)).getPage()); + } + Set newResult = getContainingAuthorities(type, authority, false); + result.clear(); + int i=0; + for (String container : newResult) + { + if ((filter == null || filter.includeAuthority(container) + && (zoneName == null || getAuthorityZones(container).contains(zoneName)))) + { + result.add(container); + if (++i >= maxResults) + { + break; + } + } + } + break; + } + } + return result; + } + public String getShortName(String name) { AuthorityType type = AuthorityType.getAuthorityType(name); @@ -804,6 +940,44 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor } } } + + + // Take advantage of the fact that the authority name is on the child association + public boolean isAuthorityContained(NodeRef authorityNodeRef, String authorityToFind) + { + List cars = childAuthorityCache.get(authorityNodeRef); + if (cars == null) + { + cars = nodeService.getChildAssocs(authorityNodeRef, RegexQNamePattern.MATCH_ALL, + RegexQNamePattern.MATCH_ALL, false); + childAuthorityCache.put(authorityNodeRef, cars); + } + + // Loop over children recursively to find authorityToFind + for (ChildAssociationRef car : cars) + { + String authorityName = car.getQName().getLocalName(); + if (authorityToFind.equals(authorityName) + || AuthorityType.getAuthorityType(authorityName) != AuthorityType.USER + && isAuthorityContained(car.getChildRef(), authorityToFind)) + { + return true; + } + } + return false; + } + + private void removeParentsFromChildAuthorityCache(NodeRef nodeRef) + { + for (ChildAssociationRef car: nodeService.getParentAssocs(nodeRef)) + { + NodeRef parentRef = car.getParentRef(); + if (dictionaryService.isSubClass(nodeService.getType(parentRef), ContentModel.TYPE_AUTHORITY_CONTAINER)) + { + childAuthorityCache.remove(parentRef); + } + } + } private NodeRef getAuthorityOrNull(String name) { @@ -829,13 +1003,10 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor { List results = nodeService.getChildAssocs(getAuthorityContainer(), ContentModel.ASSOC_CHILDREN, QName.createQName("cm", name, namespacePrefixResolver), false); - if (!results.isEmpty()) - { - result = results.get(0).getChildRef(); - authorityLookupCache.put(cacheKey, result); - } + result = results.isEmpty() ? NULL_NODEREF :results.get(0).getChildRef(); + authorityLookupCache.put(cacheKey, result); } - return result; + return result == NULL_NODEREF ? null : result; } } catch (NoSuchPersonException e) @@ -1084,6 +1255,7 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor public void beforeDeleteNode(NodeRef nodeRef) { userAuthorityCache.remove(getAuthorityName(nodeRef)); + removeParentsFromChildAuthorityCache(nodeRef); } public void onUpdateProperties(NodeRef nodeRef, Map before, Map after) @@ -1109,7 +1281,6 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor // Fix any ACLs aclDao.renameAuthority(authBefore, authAfter); } - // Fix primary association local name QName newAssocQName = QName.createQName("cm", authAfter, namespacePrefixResolver); @@ -1137,7 +1308,7 @@ public class AuthorityDAOImpl implements AuthorityDAO, NodeServicePolicies.Befor { userAuthorityCache.remove(authBefore); } - + removeParentsFromChildAuthorityCache(nodeRef); } else { diff --git a/source/java/org/alfresco/repo/security/authority/AuthorityServiceImpl.java b/source/java/org/alfresco/repo/security/authority/AuthorityServiceImpl.java index 5bce944791..ace49df128 100644 --- a/source/java/org/alfresco/repo/security/authority/AuthorityServiceImpl.java +++ b/source/java/org/alfresco/repo/security/authority/AuthorityServiceImpl.java @@ -18,12 +18,15 @@ */ package org.alfresco.repo.security.authority; +import java.util.AbstractSet; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Set; +import java.util.TreeSet; import org.alfresco.query.PagingRequest; import org.alfresco.query.PagingResults; @@ -123,6 +126,7 @@ public class AuthorityServiceImpl implements AuthorityService, InitializingBean this.guestGroups = guestGroups; } + @Override public void afterPropertiesSet() throws Exception { // Fully qualify the admin group names @@ -199,6 +203,32 @@ public class AuthorityServiceImpl implements AuthorityService, InitializingBean return getAuthoritiesForUser(canonicalName).contains(PermissionService.GUEST_AUTHORITY); } + /** + * Checks if the {@code authority} (normally a username) is the same as or is contained + * within the {@code parentAuthority}. + * @param authority + * @param parentAuthority a normalized, case sensitive authority name + * @return {@code true} if does, {@code false} otherwise. + */ + private boolean hasAuthority(String authority, String parentAuthority) + { + if (parentAuthority.equals(authority)) + { + return true; + } + // Even users are matched case sensitively in ACLs + if (AuthorityType.getAuthorityType(parentAuthority) == AuthorityType.USER) + { + return false; + } + NodeRef nodeRef = authorityDAO.getAuthorityNodeRefOrNull(parentAuthority); + if (nodeRef == null) + { + return false; + } + return authorityDAO.isAuthorityContained(nodeRef, authority); + } + /** * {@inheritDoc} */ @@ -214,16 +244,17 @@ public class AuthorityServiceImpl implements AuthorityService, InitializingBean */ public Set getAuthoritiesForUser(String currentUserName) { - Set authorities = new HashSet(64); - - authorities.addAll(getContainingAuthorities(null, currentUserName, false)); - - // Work out mapped roles + return new UserAuthoritySet(currentUserName); + } + + // Return mapped roles + private Set getRoleAuthorities(String currentUserName) + { + Set authorities = new TreeSet(); // Check named guest and admin users - Set adminUsers = this.authenticationService.getDefaultAdministratorUserNames(); - - Set guestUsers = this.authenticationService.getDefaultGuestUserNames(); + Set adminUsers = authenticationService.getDefaultAdministratorUserNames(); + Set guestUsers = authenticationService.getDefaultGuestUserNames(); String defaultGuestName = AuthenticationUtil.getGuestUserName(); if (defaultGuestName != null && defaultGuestName.length() > 0) @@ -236,23 +267,32 @@ public class AuthorityServiceImpl implements AuthorityService, InitializingBean boolean isGuestUser = containsMatch(guestUsers, currentUserName); // Check if any of the user's groups are listed as admin groups - if (!isAdminUser && !adminGroups.isEmpty()) + if (!isAdminUser) { - for (String authority : authorities) + for (String authority : adminGroups) { - if (adminGroups.contains(authority) || adminGroups.contains(tenantService.getBaseNameUser(authority))) + if (hasAuthority(currentUserName, authority) || hasAuthority(currentUserName, tenantService.getBaseNameUser(authority))) { isAdminUser = true; break; } } } - // Check if any of the user's groups are listed as guest groups - if (!isAdminUser && !isGuestUser && !guestGroups.isEmpty()) + + // Check if user name matches (ignore case) "ROLE_GUEST", if so its a guest. Code originally in PermissionService. + if (!isAdminUser && !isGuestUser && + tenantService.getBaseNameUser(currentUserName).equalsIgnoreCase(AuthenticationUtil.getGuestUserName())) { - for (String authority : authorities) + isGuestUser = true; + + } + + // Check if any of the user's groups are listed as guest groups + if (!isAdminUser && !isGuestUser) + { + for (String authority : guestGroups) { - if (guestGroups.contains(authority) || guestGroups.contains(tenantService.getBaseNameUser(authority))) + if (hasAuthority(currentUserName, authority) || hasAuthority(currentUserName, tenantService.getBaseNameUser(authority))) { isGuestUser = true; break; @@ -274,6 +314,7 @@ public class AuthorityServiceImpl implements AuthorityService, InitializingBean { authorities.addAll(guestSet); } + return authorities; } @@ -501,6 +542,12 @@ public class AuthorityServiceImpl implements AuthorityService, InitializingBean /** * {@inheritDoc} */ + public Set getContainingAuthoritiesInZone(AuthorityType type, String authority, final String zoneName, AuthorityFilter filter, int size) + { + return authorityDAO.getContainingAuthoritiesInZone(type, authority, zoneName, filter, size); + } + + @Override public void removeAuthority(String parentName, String childName) { authorityDAO.removeAuthority(parentName, childName); @@ -645,4 +692,118 @@ public class AuthorityServiceImpl implements AuthorityService, InitializingBean { return authorityDAO.getShortName(name); } + + + /** + * Lazy load set of authorities. Try not to iterate or ask for the size. Needed for the case where there + * is a large number of sites/groups. + * + * @author David Ward, Alan Davis + */ + public final class UserAuthoritySet extends AbstractSet + { + private final String username; + private Set positiveHits; + private Set negativeHits; + private boolean allAuthoritiesLoaded; + + /** + * @param username + * @param auths + */ + public UserAuthoritySet(String username) + { + this.username = username; + positiveHits = getRoleAuthorities(username); + negativeHits = new TreeSet(); + } + + // Try to avoid evaluating the full set unless we have to! + private Set getAllAuthorities() + { + if (!allAuthoritiesLoaded) + { + allAuthoritiesLoaded = true; + Set tmp = positiveHits; // must add role authorities back in. + positiveHits = getContainingAuthorities(null, username, false); + positiveHits.addAll(tmp); + negativeHits = null; + } + return positiveHits; + } + + @Override + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean add(String e) + { + return positiveHits.add(e); + } + + @Override + public void clear() + { + throw new UnsupportedOperationException(); + } + + @Override + public boolean contains(Object o) + { + if (!(o instanceof String)) + { + return false; + } + if (positiveHits.contains(o)) + { + return true; + } + if (allAuthoritiesLoaded || negativeHits.contains(o)) + { + return false; + } + // Remember positive and negative hits for next time + if (hasAuthority(username, (String) o)) + { + positiveHits.add((String) o); + return true; + } + else + { + negativeHits.add((String)o); + return false; + } + } + + @Override + public boolean remove(Object o) + { + throw new UnsupportedOperationException(); + } + + @Override + public boolean retainAll(Collection c) + { + throw new UnsupportedOperationException(); + } + + @Override + public Iterator iterator() + { + return getAllAuthorities().iterator(); + } + + @Override + public int size() + { + return getAllAuthorities().size(); + } + + public Object getUsername() + { + return username; + } + } } diff --git a/source/java/org/alfresco/repo/security/permissions/impl/PermissionServiceImpl.java b/source/java/org/alfresco/repo/security/permissions/impl/PermissionServiceImpl.java index 8bc51bc97e..8948dc996d 100644 --- a/source/java/org/alfresco/repo/security/permissions/impl/PermissionServiceImpl.java +++ b/source/java/org/alfresco/repo/security/permissions/impl/PermissionServiceImpl.java @@ -39,6 +39,7 @@ import org.alfresco.repo.policy.JavaBehaviour; import org.alfresco.repo.policy.PolicyComponent; import org.alfresco.repo.security.authentication.AuthenticationUtil; import org.alfresco.repo.security.authentication.AuthenticationUtil.RunAsWork; +import org.alfresco.repo.security.authority.AuthorityServiceImpl; import org.alfresco.repo.security.permissions.ACLType; import org.alfresco.repo.security.permissions.AccessControlEntry; import org.alfresco.repo.security.permissions.AccessControlList; @@ -68,11 +69,11 @@ import org.alfresco.service.namespace.NamespaceService; import org.alfresco.service.namespace.QName; import org.alfresco.util.EqualsHelper; import org.alfresco.util.Pair; +import org.alfresco.util.PropertyCheck; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.springframework.context.ApplicationEvent; import org.springframework.extensions.surf.util.AbstractLifecycleBean; -import org.alfresco.util.PropertyCheck; /** * The Alfresco implementation of a permissions service against our APIs for the permissions model and permissions @@ -279,6 +280,26 @@ public class PermissionServiceImpl extends AbstractLifecycleBean implements Perm accessCache.clear(); } + /** + * Cache clear on create of a child association from an authority container. + * + * @param childAssocRef + */ + public void onCreateChildAssociation(ChildAssociationRef childAssocRef) + { + accessCache.clear(); + } + + /** + * Cache clear on delete of a child association from an authority container. + * + * @param childAssocRef + */ + public void beforeDeleteChildAssociation(ChildAssociationRef childAssocRef) + { + accessCache.clear(); + } + @Override protected void onBootstrap(ApplicationEvent event) { @@ -307,6 +328,9 @@ public class PermissionServiceImpl extends AbstractLifecycleBean implements Perm public void init() { policyComponent.bindClassBehaviour(QName.createQName(NamespaceService.ALFRESCO_URI, "onMoveNode"), ContentModel.TYPE_BASE, new JavaBehaviour(this, "onMoveNode")); + + policyComponent.bindClassBehaviour(QName.createQName(NamespaceService.ALFRESCO_URI, "onCreateChildAssociation"), ContentModel.TYPE_AUTHORITY_CONTAINER, new JavaBehaviour(this, "onCreateChildAssociation")); + policyComponent.bindClassBehaviour(QName.createQName(NamespaceService.ALFRESCO_URI, "beforeDeleteChildAssociation"), ContentModel.TYPE_AUTHORITY_CONTAINER, new JavaBehaviour(this, "beforeDeleteChildAssociation")); } // @@ -474,10 +498,13 @@ public class PermissionServiceImpl extends AbstractLifecycleBean implements Perm PermissionContext context = new PermissionContext(typeQname); context.getAspects().addAll(aspectQNames); Authentication auth = AuthenticationUtil.getRunAsAuthentication(); - String user = AuthenticationUtil.getRunAsUser(); - for (String dynamicAuthority : getDynamicAuthorities(auth, nodeRef, perm)) + if (auth != null) { - context.addDynamicAuthorityAssignment(user, dynamicAuthority); + String user = AuthenticationUtil.getRunAsUser(); + for (String dynamicAuthority : getDynamicAuthorities(auth, nodeRef, perm)) + { + context.addDynamicAuthorityAssignment(user, dynamicAuthority); + } } return hasPermission(properties.getId(), context, perm); } @@ -711,12 +738,43 @@ public class PermissionServiceImpl extends AbstractLifecycleBean implements Perm { LinkedHashSet key = new LinkedHashSet(); key.add(perm.toString()); - key.addAll(auths); + // We will just have to key our dynamic sets by username. We wrap it so as not to be confused with a static set + if (auths instanceof AuthorityServiceImpl.UserAuthoritySet) + { + key.add((Serializable)Collections.singleton(((AuthorityServiceImpl.UserAuthoritySet)auths).getUsername())); + } + else + { + key.addAll(auths); + } key.add(nodeRef); key.add(type); return key; } + /** + * Get the core authorisations for this {@code auth}. If {@code null} this + * will be an empty set. Otherwise it will be a Lazy loaded Set of authorities + * from the authority node structure PLUS any granted authorities. + */ + private Set getCoreAuthorisations(Authentication auth) + { + if (auth == null) + { + return Collections.emptySet(); + } + + User user = (User) auth.getPrincipal(); + String username = user.getUsername(); + Set auths = authorityService.getAuthoritiesForUser(username); + + for (GrantedAuthority grantedAuthority : auth.getAuthorities()) + { + auths.add(grantedAuthority.getAuthority()); + } + return auths; + } + /** * Get the authorisations for the currently authenticated user * @@ -725,41 +783,17 @@ public class PermissionServiceImpl extends AbstractLifecycleBean implements Perm */ private Set getAuthorisations(Authentication auth, NodeRef nodeRef, PermissionReference required) { - - HashSet auths = new HashSet(); - // No authenticated user then no permissions - if (auth == null) + Set auths = getCoreAuthorisations(auth); + if (auth != null) { - return auths; + auths.addAll(getDynamicAuthorities(auth, nodeRef, required)); } - // TODO: Refactor and use the authentication service for this. - User user = (User) auth.getPrincipal(); - - String username = user.getUsername(); - auths.add(username); - - if (tenantService.getBaseNameUser(username).equalsIgnoreCase(AuthenticationUtil.getGuestUserName())) - { - auths.add(PermissionService.GUEST_AUTHORITY); - } - - for (GrantedAuthority authority : auth.getAuthorities()) - { - auths.add(authority.getAuthority()); - } - auths.addAll(getDynamicAuthorities(auth, nodeRef, required)); - auths.addAll(authorityService.getAuthoritiesForUser(username)); return auths; } - + private Set getDynamicAuthorities(Authentication auth, NodeRef nodeRef, PermissionReference required) { - HashSet auths = new HashSet(64); - - if (auth == null) - { - return auths; - } + Set dynAuths = new HashSet(64); User user = (User) auth.getPrincipal(); String username = user.getUsername(); @@ -775,49 +809,44 @@ public class PermissionServiceImpl extends AbstractLifecycleBean implements Perm { if (da.hasAuthority(nodeRef, username)) { - auths.add(da.getAuthority()); + dynAuths.add(da.getAuthority()); } } } } } - auths.addAll(authorityService.getAuthoritiesForUser(user.getUsername())); - return auths; + return dynAuths; } private Set getAuthorisations(Authentication auth, PermissionContext context) { - HashSet auths = new HashSet(); - // No authenticated user then no permissions - if (auth == null) + Set auths = getCoreAuthorisations(auth); + if (auth != null) { - return auths; - } - // TODO: Refactor and use the authentication service for this. - User user = (User) auth.getPrincipal(); - auths.add(user.getUsername()); - for (GrantedAuthority authority : auth.getAuthorities()) - { - auths.add(authority.getAuthority()); - } - auths.addAll(authorityService.getAuthoritiesForUser(user.getUsername())); - - if (context != null) - { - Map> dynamicAuthorityAssignments = context.getDynamicAuthorityAssignment(); - HashSet dynAuths = new HashSet(); - for (String current : auths) + if (context != null) { - Set dynos = dynamicAuthorityAssignments.get(current); + auths.addAll(getDynamicAuthorities(auth, context, auths)); + } + } + return auths; + } + + private Set getDynamicAuthorities(Authentication auth, PermissionContext context, Set auths) + { + Set dynAuths = new HashSet(); + Map> dynamicAuthorityAssignments = context.getDynamicAuthorityAssignment(); + for (String dynKey : dynamicAuthorityAssignments.keySet()) + { + if (auths.contains(dynKey)) + { + Set dynos = dynamicAuthorityAssignments.get(dynKey); if (dynos != null) { dynAuths.addAll(dynos); } } - auths.addAll(dynAuths); } - - return auths; + return dynAuths; } public NodePermissionEntry explainPermission(NodeRef nodeRef, PermissionReference perm) @@ -1161,25 +1190,11 @@ public class PermissionServiceImpl extends AbstractLifecycleBean implements Perm // test acl readers Set aclReaders = getReaders(aclId); - // both lists are ordered so we can skip scan to find any overlap - if(authorities.size() < aclReaders.size()) + for(String auth : aclReaders) { - for(String auth : authorities) + if(authorities.contains(auth)) { - if(aclReaders.contains(auth)) - { - return AccessStatus.ALLOWED; - } - } - } - else - { - for(String auth : aclReaders) - { - if(authorities.contains(auth)) - { - return AccessStatus.ALLOWED; - } + return AccessStatus.ALLOWED; } } @@ -1641,29 +1656,6 @@ public class PermissionServiceImpl extends AbstractLifecycleBean implements Perm // any deny denies -// if (false) -// { -// if (denied != null) -// { -// for (String auth : authorisations) -// { -// Pair specific = new Pair(auth, required); -// if (denied.contains(specific)) -// { -// return false; -// } -// for (PermissionReference perm : granters) -// { -// specific = new Pair(auth, perm); -// if (denied.contains(specific)) -// { -// return false; -// } -// } -// } -// } -// } - // If the permission has a match in both the authorities and // granters list it is allowed // It applies to the current user and it is granted @@ -1918,29 +1910,6 @@ public class PermissionServiceImpl extends AbstractLifecycleBean implements Perm // any deny denies -// if (false) -// { -// if (denied != null) -// { -// for (String auth : authorisations) -// { -// Pair specific = new Pair(auth, required); -// if (denied.contains(specific)) -// { -// return false; -// } -// for (PermissionReference perm : granters) -// { -// specific = new Pair(auth, perm); -// if (denied.contains(specific)) -// { -// return false; -// } -// } -// } -// } -// } - // If the permission has a match in both the authorities and // granters list it is allowed // It applies to the current user and it is granted @@ -2336,34 +2305,19 @@ public class PermissionServiceImpl extends AbstractLifecycleBean implements Perm public Set getAuthorisations() { // Use TX cache - @SuppressWarnings("unchecked") Set auths = (Set) AlfrescoTransactionSupport.getResource("MyAuthCache"); Authentication auth = AuthenticationUtil.getRunAsAuthentication(); - User user = (User) auth.getPrincipal(); - if(auths != null) + if (auths != null) { - if(!auths.contains(user.getUsername())) + if (auth == null || !auths.contains(((User)auth.getPrincipal()).getUsername())) { auths = null; } } if (auths == null) { - auths = new HashSet(); - - // No authenticated user then no permissions - if (auth != null) - { - - auths.add(user.getUsername()); - for (GrantedAuthority authority : auth.getAuthorities()) - { - auths.add(authority.getAuthority()); - } - auths.addAll(authorityService.getAuthoritiesForUser(user.getUsername())); - } - + auths = getCoreAuthorisations(auth); AlfrescoTransactionSupport.bindResource("MyAuthCache", auths); } return Collections.unmodifiableSet(auths); diff --git a/source/java/org/alfresco/repo/site/SiteServiceBootstrap.java b/source/java/org/alfresco/repo/site/SiteServiceBootstrap.java new file mode 100644 index 0000000000..1364844753 --- /dev/null +++ b/source/java/org/alfresco/repo/site/SiteServiceBootstrap.java @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2005-2011 Alfresco Software Limited. + * + * This file is part of Alfresco + * + * Alfresco is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * Alfresco is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with Alfresco. If not, see . + */ +package org.alfresco.repo.site; + +import java.util.List; + +import org.alfresco.repo.security.authentication.AuthenticationUtil; +import org.alfresco.repo.security.authentication.AuthenticationUtil.RunAsWork; +import org.alfresco.repo.tenant.Tenant; +import org.alfresco.repo.tenant.TenantAdminService; +import org.alfresco.service.cmr.site.SiteService; +import org.springframework.context.ApplicationEvent; +import org.springframework.extensions.surf.util.AbstractLifecycleBean; + +/** + * Warms up site zone / authority caches before the first access to a user dashboard + * + * @author dward + */ +public class SiteServiceBootstrap extends AbstractLifecycleBean +{ + private SiteService siteService; + private TenantAdminService tenantAdminService; + + public void setSiteService(SiteService siteService) + { + this.siteService = siteService; + } + + public void setTenantAdminService(TenantAdminService tenantAdminService) + { + this.tenantAdminService = tenantAdminService; + } + + /* + * (non-Javadoc) + * @seeorg.springframework.extensions.surf.util.AbstractLifecycleBean#onBootstrap(org.springframework.context. + * ApplicationEvent) + */ + @Override + protected void onBootstrap(ApplicationEvent event) + { + AuthenticationUtil.runAs(new RunAsWork() + { + public Object doWork() throws Exception + { + siteService.listSites("a"); + return null; + } + }, AuthenticationUtil.getSystemUserName()); + + if (tenantAdminService.isEnabled()) + { + List tenants = tenantAdminService.getAllTenants(); + for (Tenant tenant : tenants) + { + AuthenticationUtil.runAs(new RunAsWork() + { + public Object doWork() throws Exception + { + siteService.listSites("a"); + return null; + } + }, tenantAdminService.getDomainUser(AuthenticationUtil.getSystemUserName(), tenant.getTenantDomain())); + } + } + } + + /* + * (non-Javadoc) + * @seeorg.springframework.extensions.surf.util.AbstractLifecycleBean#onShutdown(org.springframework.context. + * ApplicationEvent) + */ + @Override + protected void onShutdown(ApplicationEvent event) + { + } +} diff --git a/source/java/org/alfresco/repo/site/SiteServiceImpl.java b/source/java/org/alfresco/repo/site/SiteServiceImpl.java index 89965ab0a1..11d6eda5f0 100644 --- a/source/java/org/alfresco/repo/site/SiteServiceImpl.java +++ b/source/java/org/alfresco/repo/site/SiteServiceImpl.java @@ -69,7 +69,9 @@ import org.alfresco.service.cmr.repository.ChildAssociationRef; import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.NodeService; import org.alfresco.service.cmr.repository.StoreRef; +import org.alfresco.service.cmr.search.LimitBy; import org.alfresco.service.cmr.search.ResultSet; +import org.alfresco.service.cmr.search.SearchParameters; import org.alfresco.service.cmr.search.SearchService; import org.alfresco.service.cmr.security.AccessPermission; import org.alfresco.service.cmr.security.AccessStatus; @@ -78,6 +80,7 @@ import org.alfresco.service.cmr.security.AuthorityType; import org.alfresco.service.cmr.security.NoSuchPersonException; import org.alfresco.service.cmr.security.PermissionService; import org.alfresco.service.cmr.security.PersonService; +import org.alfresco.service.cmr.security.AuthorityService.AuthorityFilter; import org.alfresco.service.cmr.site.SiteInfo; import org.alfresco.service.cmr.site.SiteService; import org.alfresco.service.cmr.site.SiteVisibility; @@ -786,11 +789,16 @@ public class SiteServiceImpl extends AbstractLifecycleBean implements SiteServic query.append(")"); } - ResultSet results = this.searchService.query( - siteRoot.getStoreRef(), - SearchService.LANGUAGE_LUCENE, - query.toString(), - null); + SearchParameters sp = new SearchParameters(); + sp.addStore(siteRoot.getStoreRef()); + sp.setLanguage(SearchService.LANGUAGE_LUCENE); + sp.setQuery(query.toString()); + if (size != 0) + { + sp.setLimit(size); + sp.setLimitBy(LimitBy.FINAL_SIZE); + } + ResultSet results = this.searchService.query(sp); try { result = new ArrayList(results.length()); @@ -798,11 +806,9 @@ public class SiteServiceImpl extends AbstractLifecycleBean implements SiteServic { // Ignore any node type that is not a "site" QName siteClassName = this.nodeService.getType(site); - if (this.dictionaryService.isSubClass(siteClassName, SiteModel.TYPE_SITE) == true) + if (this.dictionaryService.isSubClass(siteClassName, SiteModel.TYPE_SITE)) { result.add(createSiteInfo(site)); - // break on max size limit reached - if (result.size() == size) break; } } } @@ -864,6 +870,14 @@ public class SiteServiceImpl extends AbstractLifecycleBean implements SiteServic * @see org.alfresco.service.cmr.site.SiteService#listSites(java.lang.String) */ public List listSites(final String userName) + { + return listSites(userName, 0); + } + + /** + * @see org.alfresco.service.cmr.site.SiteService#listSites(java.lang.String, int) + */ + public List listSites(final String userName, final int size) { // MT share - for activity service system callback if (tenantService.isEnabled() && (AuthenticationUtil.SYSTEM_USER_NAME.equals(AuthenticationUtil.getRunAsUser())) && tenantService.isTenantUser(userName)) @@ -874,13 +888,13 @@ public class SiteServiceImpl extends AbstractLifecycleBean implements SiteServic { public List doWork() throws Exception { - return listSitesImpl(userName); + return listSitesImpl(userName, size); } }, tenantService.getDomainUser(AuthenticationUtil.getSystemUserName(), tenantDomain)); } else { - return listSitesImpl(userName); + return listSitesImpl(userName, size); } } @@ -961,72 +975,62 @@ public class SiteServiceImpl extends AbstractLifecycleBean implements SiteServic * @param userName the username * @return a list of {@link SiteInfo site infos}. */ - private List listSitesImpl(String userName) + private String resolveSite(String group) { - List result = null; - - // get the Groups this user is contained within (at any level) - Set groups = this.authorityService.getContainingAuthorities(null, userName, false); - Set siteNames = new HashSet(groups.size()); - // purge non Site related Groups and strip the group name down to the site "shortName" it relates to - for (String group : groups) + // purge non Site related Groups and strip the group name down to the site "shortName" it relates too + if (group.startsWith(GROUP_SITE_PREFIX)) { - if (group.startsWith(GROUP_SITE_PREFIX)) + int roleIndex = group.lastIndexOf('_'); + if (roleIndex + 1 <= GROUP_SITE_PREFIX_LENGTH) { - int roleIndex = group.lastIndexOf('_'); - String siteName; - if (roleIndex + 1 <= GROUP_SITE_PREFIX_LENGTH) - { - // There is no role associated - siteName = group.substring(GROUP_SITE_PREFIX_LENGTH); - } - else - { - siteName = group.substring(GROUP_SITE_PREFIX_LENGTH, roleIndex); - } - siteNames.add(siteName); + // There is no role associated + return group.substring(GROUP_SITE_PREFIX_LENGTH); + } + else + { + return group.substring(GROUP_SITE_PREFIX_LENGTH, roleIndex); } } - - // retrieve the site nodes based on the list from the containing site groups - NodeRef siteRoot = getSiteRoot(); - if (siteRoot == null) - { - result = Collections.emptyList(); - } - else - { - List siteList = new ArrayList(siteNames); - // ensure we do not trip over the getChildrenByName() 1000 item API limit! - // - // Note the implicit assumption here: that the specified user is not a member of > 1000 sites - // If the user IS a member of more than 1000 sites, then a truncated list of sites will be returned. - // Also, given that the siteNames are a Set, there is no guarantee about which sites would be - // included in the truncated results and which would be excluded. HashSets are unordered. - if (siteList.size() > 1000) - { - siteList = siteList.subList(0, 1000); - } - List assocs = this.nodeService.getChildrenByName( - siteRoot, - ContentModel.ASSOC_CONTAINS, - siteList); - result = new ArrayList(assocs.size()); - for (ChildAssociationRef assoc : assocs) - { - // Ignore any node that is not a "site" type - NodeRef site = assoc.getChildRef(); - QName siteClassName = this.directNodeService.getType(site); - if (this.dictionaryService.isSubClass(siteClassName, SiteModel.TYPE_SITE)) - { - result.add(createSiteInfo(site)); - } - } - } - - return result; + return null; } + private List listSitesImpl(final String userName, int size) + { + final int maxResults = size > 0 ? size : 1000; + final Set siteNames = new TreeSet(); + authorityService.getContainingAuthoritiesInZone(AuthorityType.GROUP, userName, AuthorityService.ZONE_APP_SHARE, new AuthorityFilter(){ + @Override + public boolean includeAuthority(String authority) + { + String siteName = resolveSite(authority); + if (siteName == null) + { + return false; + } + return siteNames.add(siteName); + }}, maxResults); + if (siteNames.isEmpty()) + { + return Collections.emptyList(); + } + List assocs = this.nodeService.getChildrenByName( + getSiteRoot(), + ContentModel.ASSOC_CONTAINS, + siteNames); + List result = new ArrayList(assocs.size()); + for (ChildAssociationRef assoc : assocs) + { + // Ignore any node that is not a "site" type + NodeRef site = assoc.getChildRef(); + QName siteClassName = this.directNodeService.getType(site); + if (this.dictionaryService.isSubClass(siteClassName, SiteModel.TYPE_SITE)) + { + result.add(createSiteInfo(site)); + } + } + return result; + } + /** * Creates a site information object given a site node reference * @@ -1683,18 +1687,17 @@ public class SiteServiceImpl extends AbstractLifecycleBean implements SiteServic Set roles = this.permissionService.getSettablePermissions(siteType); // First use the authority's cached recursive group memberships to answer the question quickly - Set authorityGroups = this.authorityService.getContainingAuthorities(AuthorityType.GROUP, - authorityName, false); + Set authorities = authorityService.getAuthoritiesForUser(authorityName); for (String role : roles) { String roleGroup = getSiteRoleGroup(siteShortName, role, true); - if (authorityGroups.contains(roleGroup)) + if (authorities.contains(roleGroup)) { fullResult.add(roleGroup); } } - // Unfortunately, due to direct membership taking precendence, we can't answer the question quickly if more than one role has been inherited + // Unfortunately, due to direct membership taking precedence, we can't answer the question quickly if more than one role has been inherited if (fullResult.size() <= 1) { return fullResult; @@ -1702,7 +1705,7 @@ public class SiteServiceImpl extends AbstractLifecycleBean implements SiteServic // Check direct group memberships List result = new ArrayList(5); - authorityGroups = this.authorityService.getContainingAuthorities(AuthorityType.GROUP, + Set authorityGroups = this.authorityService.getContainingAuthorities(AuthorityType.GROUP, authorityName, true); for (String role : roles) { diff --git a/source/java/org/alfresco/repo/site/SiteServiceTestHuge.java b/source/java/org/alfresco/repo/site/SiteServiceTestHuge.java new file mode 100644 index 0000000000..32b2d05360 --- /dev/null +++ b/source/java/org/alfresco/repo/site/SiteServiceTestHuge.java @@ -0,0 +1,1108 @@ +/* + * Copyright (C) 2005-2011 Alfresco Software Limited. + * + * This file is part of Alfresco + * + * Alfresco is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * Alfresco is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with Alfresco. If not, see . + */ +package org.alfresco.repo.site; + +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.PrintStream; +import java.util.Set; + +import javax.transaction.SystemException; +import javax.transaction.UserTransaction; + +import org.alfresco.model.ContentModel; +import org.alfresco.repo.security.authentication.AuthenticationComponent; +import org.alfresco.repo.security.authentication.AuthenticationUtil; +import org.alfresco.service.cmr.repository.DuplicateChildNodeNameException; +import org.alfresco.service.cmr.security.AuthorityService; +import org.alfresco.service.cmr.security.AuthorityType; +import org.alfresco.service.cmr.security.MutableAuthenticationService; +import org.alfresco.service.cmr.security.PersonService; +import org.alfresco.service.cmr.site.SiteService; +import org.alfresco.service.cmr.site.SiteVisibility; +import org.alfresco.service.transaction.TransactionService; +import org.alfresco.util.ApplicationContextHelper; +import org.alfresco.util.PropertyMap; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.springframework.context.ApplicationContext; + +/** + * Site service unit test that targets HUGE numbers of sites + * + * @author Alan Davis + */ +public class SiteServiceTestHuge +{ + private enum Allocation + { + /** + * Allocate source (groups) to target (sites) on a round robin basis + * until all targets (sites) have been allocated a single source (group). + * + * Some source (groups) might have been allocated to more than one target + * (site) or none at all. + **/ + ROUND_ROBIN_TO_TARGET, + + /** + * Allocate source (users) to target (groups) on a round robin basis + * until all source (users) have been allocated a target (group). + * + * Some target (groups) might have been allocated to more than one source + * (user) or none at all. + **/ + ROUND_ROBIN_FROM_SOURCE, + + /** + * Allocate source (users) to target (groups) on a round robin basis + * until all source (users) AND all target (groups) have been allocated + * to at least one a target (group) or one source (user). + * + * If there are more target (groups) than source (users) + * then some source (users) will be in more than one target (group). + * + * If there are more source (users) than target (groups) then some + * target (groups) will contain more than one source (user). + **/ + ROUND_ROBIN_BOTH, + + /** + * Allocate all source (users) to each target (group). + * OR + * Allocate all source (users) to each target (site). + * OR + * Allocate all source (groups) to each target (site). + **/ + ALL_TO_EACH, + + /** No allocation **/ + NONE + } + + private enum OnFailure + { + GIVE_UP, + KEEP_GOING + } + + // Standard numbers of users, groups and sites + private static final int NUM_USERS = 100; + private static final int NUM_GROUPS = 60; + private static int NUM_SITES = 60000; + + private static final String ADMIN_USER = "admin"; + + // Max times in ms for various activities + private static final long SECOND = 1000; + private static final long FIVE_SECONDS = SECOND * 5; + + private static final long MAX_CREATE_USER_MS = FIVE_SECONDS; + private static final long MAX_CREATE_GROUP_MS = FIVE_SECONDS; + private static final long MAX_CREATE_SITE_MS = FIVE_SECONDS; + + private static final long MAX_DELETE_USER_MS = FIVE_SECONDS; + private static final long MAX_DELETE_GROUP_MS = FIVE_SECONDS; + private static final long MAX_DELETE_SITE_MS = FIVE_SECONDS; + + private static final long MAX_USER_TO_GROUP_MS = FIVE_SECONDS; + private static final long MAX_USER_TO_SITE_MS = FIVE_SECONDS; + private static final long MAX_GROUP_TO_SITE_MS = FIVE_SECONDS; + + // Used to save having to check if users, groups and sites exist if already created by this test. + private static int usersCreated = 0; + private static int groupsCreated = 0; + private static int sitesCreated = 0; + + private static ApplicationContext ctx = ApplicationContextHelper.getApplicationContext(); + private static TransactionService transactionService = (TransactionService)ctx.getBean("TransactionService"); + private static AuthenticationComponent authenticationComponent = (AuthenticationComponent)ctx.getBean("authenticationComponent"); + private static MutableAuthenticationService authenticationService = (MutableAuthenticationService)ctx.getBean("authenticationService"); + private static PersonService personService = (PersonService)ctx.getBean("PersonService"); + private static AuthorityService authorityService = (AuthorityService)ctx.getBean("AuthorityService"); + private static SiteService siteService = (SiteService)ctx.getBean("SiteService"); // Big 'S' + + private static String logFilename; + private static PrintStream log; + + @BeforeClass + public static void setUpBeforeClass() throws Exception + { + logFilename = "sites.log"; + } + + @AfterClass + public static void tearDownClass() + { + if (log != null) + { + log.close(); + } + } + + @Before + public void setUp() throws Exception + { + authenticationComponent.setCurrentUser(AuthenticationUtil.getAdminUserName()); + } + + @After + public void tearDown() throws Exception + { + authenticationComponent.clearCurrentSecurityContext(); + } + + private void log(String msg) throws Exception + { + System.out.println(msg); + if (logFilename != null) + { + if (log == null) + { + log = new PrintStream(new File(logFilename)); + } + log.println(msg); + } + } + + // ------------------ main test helper methods -------------------- + + private void createAndAllocate(int userCount, int groupCount, int siteCount, + Allocation usersToGroups, Allocation usersToSites, Allocation groupsToSites) + throws Exception + { + createUsersGroupsAndSites(userCount, groupCount, siteCount); + allocateUsersToGroupsAndSites(userCount, groupCount, siteCount, usersToGroups, + usersToSites, groupsToSites); + } + + private void createUsersGroupsAndSites(int userCount, int groupCount, int siteCount) + throws Exception + { + createUsers(userCount); + createGroups(groupCount); + createSites(siteCount, userCount, 0, OnFailure.GIVE_UP); + } + + private void allocateUsersToGroupsAndSites(int userCount, int groupCount, int siteCount, + Allocation usersToGroups, Allocation usersToSites, Allocation groupsToSites) + throws Exception + { + if ((usersToGroups == Allocation.ALL_TO_EACH && + usersToSites == Allocation.NONE && + groupsToSites == Allocation.ROUND_ROBIN_TO_TARGET) + || + (usersToGroups == Allocation.NONE && + usersToSites == Allocation.ALL_TO_EACH && + groupsToSites == Allocation.NONE)) + { + allocateUsersToGroups(userCount, groupCount, usersToGroups); + allocateUsersToSites(userCount, siteCount, usersToSites); + allocateGroupsToSites(groupCount, siteCount, groupsToSites, userCount, 0, OnFailure.GIVE_UP); + } + else + { + fail("Users are not able to see all sites or can see a site directly and via a group"); + } + } + + private void addMoreSites(int sitesToAdd, OnFailure onFailureAction) throws Exception + { + log("\n\n ADD "+sitesToAdd+" MORE SITES AND ADD A GROUP TO EACH"); + Allocation groupsToSites = Allocation.ROUND_ROBIN_TO_TARGET; + int sitesAlreadyCreated = sitesCreated; + int siteCount = sitesAlreadyCreated + sitesToAdd; + + long ms = System.currentTimeMillis(); + createSites(siteCount, NUM_USERS, sitesAlreadyCreated, OnFailure.GIVE_UP); + allocateGroupsToSites(NUM_GROUPS, siteCount, groupsToSites, NUM_USERS, sitesAlreadyCreated, OnFailure.KEEP_GOING); + assertTime("Add more sites", sitesAlreadyCreated+1, sitesToAdd, ms, MAX_CREATE_SITE_MS+MAX_GROUP_TO_SITE_MS, onFailureAction, sitesToAdd); + } + + private void assertTime(String what, int id1, int id2, long start, long max) throws Exception + { + assertTime(what, id1, id2, start, max, OnFailure.GIVE_UP, 1); + } + + private void assertTime(String what, int id1, int id2, long start, long max, OnFailure onFailureAction, int blockSize) throws Exception + { + long ms = (System.currentTimeMillis() - start)/blockSize; + + String msg = what+","+id1+(id2 > 0 ? ","+id2 : "")+","+ms+",ms"+((blockSize == 1) ? "" : " average for,"+blockSize); + log(msg); + if (ms > max && onFailureAction == OnFailure.GIVE_UP) + { + fail(msg+" is longer than "+max); + } + } + + // ------------------ create N -------------------- + + // Creates users and removes extra ones + private void createUsers(int userCount) throws Exception + { + for (int userId=1; ; userId++) + { + String userName = getUserName(userId); + boolean exists = (userId <= usersCreated) || personService.personExists(userName); + + if (userId <= userCount) + { + if (!exists) + { + long ms = System.currentTimeMillis(); + createUser(userName); + assertTime("Create user", userId, -1, ms, MAX_CREATE_USER_MS); + } + } + else + { + if (!exists) + { + break; + } + long ms = System.currentTimeMillis(); + deleteUser(userName); + assertTime("Delete user", userId, -1, ms, MAX_DELETE_USER_MS); + } + } + usersCreated = userCount; + } + + // Creates groups and removes extra ones + private void createGroups(int groupCount) throws Exception + { + for (int groupId=1; ; groupId++) + { + String groupName = getGroupName(groupId); + String groupAuthorityName = authorityService.getName(AuthorityType.GROUP, groupName); + boolean exists = (groupId <= groupsCreated) || authorityService.authorityExists(groupAuthorityName); + + if (groupId <= groupCount) + { + if (!exists) + { + long ms = System.currentTimeMillis(); + createGroup(groupName); + assertTime("Create group", groupId, -1, ms, MAX_CREATE_GROUP_MS); + } + } + else + { + if (!exists) + { + break; + } + long ms = System.currentTimeMillis(); + deleteGroup(groupName); + assertTime("Delete group", groupId, -1, ms, MAX_DELETE_GROUP_MS); + } + } + groupsCreated = groupCount; + } + + // Creates sites and removes extra ones + private void createSites(int siteCount, int userCount, int sitesAlreadyCreated, OnFailure onFailureAction) throws Exception + { + for (int siteId=sitesAlreadyCreated+1; ; siteId++) + { + String siteName = getSiteName(siteId); + boolean exists = (siteId <= sitesCreated) || siteService.getSite(siteName) != null; + + if (siteId <= siteCount) + { + if (!exists) + { + String siteOwnerUserName = getSiteOwnerUserName(siteId, userCount); + creatSite(siteId, siteOwnerUserName, onFailureAction); + } + } + else + { + if (!exists) + { + if (siteId >= siteCount + 1) + break; + } + else + { + long ms = System.currentTimeMillis(); + deleteSite(siteName, null); + assertTime("Delete site", siteId, -1, ms, MAX_DELETE_SITE_MS); + } + } + } + sitesCreated = siteCount; + } + + private void deleteSites(int fromSiteId, int toSiteId, OnFailure onFailureAction) throws Exception + { + log("\n\n TIDY UP: DELETE SITES "+fromSiteId+" TO "+toSiteId); + for (int siteId = fromSiteId; siteId <= toSiteId; siteId++) + { + try + { + deleteSite(siteId, null, onFailureAction); + } + catch (Exception e) + { + // move on + } + } + sitesCreated = fromSiteId-1; + } + + // ------------------ create 1 -------------------- + + private void createUser(String userName) throws Exception + { + UserTransaction txn = transactionService.getUserTransaction(); + try + { + txn.begin(); + + authenticationService.createAuthentication(userName, userName.toCharArray()); + + PropertyMap ppOne = new PropertyMap(4); + ppOne.put(ContentModel.PROP_USERNAME, userName); + ppOne.put(ContentModel.PROP_FIRSTNAME, userName.substring(0, userName.length()-4)); + ppOne.put(ContentModel.PROP_LASTNAME, "user"); + ppOne.put(ContentModel.PROP_EMAIL, userName + "@email.com"); + ppOne.put(ContentModel.PROP_JOBTITLE, "jobTitle"); + + personService.createPerson(ppOne); + + txn.commit(); + } + catch (Exception e) + { + txn.rollback(); + throw e; + } + } + + private void createGroup(String groupName) throws Exception + { + UserTransaction txn = transactionService.getUserTransaction(); + try + { + txn.begin(); + + authorityService.createAuthority(AuthorityType.GROUP, groupName); + + txn.commit(); + } + catch (Exception e) + { + txn.rollback(); + throw e; + } + } + + private void creatSite(int siteId, String doAsUser, OnFailure onFailureAction) throws Exception + { + String siteName = getSiteName(siteId); + long ms = System.currentTimeMillis(); + creatSite(siteName, doAsUser); + assertTime("Create site", siteId, -1, ms, MAX_CREATE_SITE_MS, onFailureAction, 1); + } + + private void creatSite(String siteName, String doAsUser) throws Exception + { + String currentUser = authenticationComponent.getCurrentUserName(); + UserTransaction txn = transactionService.getUserTransaction(); + try + { + if (doAsUser != null) + authenticationComponent.setCurrentUser(doAsUser); + txn.begin(); + + if (siteService.getSite(siteName) == null) + { + String sitePreset = "site-dashboard"; + siteService.createSite(sitePreset, siteName, "Title for " + siteName, "Description for " + + siteName, SiteVisibility.PUBLIC); + + // TODO Should do the following rather than the createContainers - not sure how +// Map tokens = new HashMap(); +// tokens.put("siteid", siteName); +// presetsManager.constructPreset(tokens, tokens); + siteService.createContainer(siteName, "documentLibrary", ContentModel.TYPE_FOLDER, null); + siteService.createContainer(siteName, "links", ContentModel.TYPE_FOLDER, null); + } + + txn.commit(); + } + catch (Exception e) + { + try + { + txn.rollback(); + } + catch (Exception e2) + { + } + throw e; + } + finally + { + authenticationComponent.setCurrentUser(currentUser); + } + } + + // ------------------ delete 1 -------------------- + + private void deleteUser(String userName) throws Exception + { + UserTransaction txn = transactionService.getUserTransaction(); + try + { + txn.begin(); + + personService.deletePerson(userName); + + txn.commit(); + } + catch (Exception e) + { + txn.rollback(); + throw e; + } + } + + private void deleteGroup(String groupName) throws Exception + { + UserTransaction txn = transactionService.getUserTransaction(); + try + { + txn.begin(); + + String groupAuthorityName = authorityService.getName(AuthorityType.GROUP, groupName); + authorityService.deleteAuthority(groupAuthorityName, true); + + txn.commit(); + } + catch (Exception e) + { + txn.rollback(); + throw e; + } + } + + private void deleteSite(int siteId, String doAsUser, OnFailure onFailureAction) throws Exception + { + String siteName = getSiteName(siteId); + long ms = System.currentTimeMillis(); + deleteSite(siteName, doAsUser); + assertTime("Delete site", siteId, -1, ms, MAX_DELETE_SITE_MS, onFailureAction, 1); + } + + private void deleteSite(String siteName, String doAsUser) throws Exception + { + String currentUser = authenticationComponent.getCurrentUserName(); + UserTransaction txn = transactionService.getUserTransaction(); + try + { + if (doAsUser != null) + authenticationComponent.setCurrentUser(doAsUser); + txn.begin(); + + siteService.deleteSite(siteName); + + txn.commit(); + } + catch (Exception e) + { + try + { + txn.rollback(); + } + catch (Exception e2) + { + } + throw e; + } + finally + { + authenticationComponent.setCurrentUser(currentUser); + } + } + + // ------------------ allocate N -------------------- + + private void allocateUsersToGroups(int userCount, int groupCount, Allocation allocation) + throws Exception + { + if (allocation == Allocation.ALL_TO_EACH) + { + for (int userId = 1; userId <= userCount; userId++) + { + UserTransaction txn = transactionService.getUserTransaction(); + try + { + txn.begin(); + Set existingAuthorities = authorityService.getContainingAuthoritiesInZone(AuthorityType.GROUP, + getUserName(userId), AuthorityService.ZONE_APP_DEFAULT, null, -1); + for (int groupId = 1; groupId <= groupCount; groupId++) + { + if (!existingAuthorities.contains(authorityService.getName(AuthorityType.GROUP, + getGroupName(groupId)))) + { + allocateUserToGroup(userId, groupId); + } + } + txn.commit(); + } + catch (Exception e) + { + txn.rollback(); + throw e; + } + } + } + else + { + int iterations, groupIncrement; + if (allocation == Allocation.ROUND_ROBIN_FROM_SOURCE) + { + iterations = userCount; + groupIncrement = 1; + } + else + { + iterations = groupCount; + groupIncrement = userCount; + } + int i=0; + OUTER: while (i < iterations) + { + for (int userId = 1; userId <= userCount; userId++) + { + UserTransaction txn = transactionService.getUserTransaction(); + try + { + txn.begin(); + Set existingAuthorities = authorityService.getContainingAuthoritiesInZone(AuthorityType.GROUP, + getUserName(userId), AuthorityService.ZONE_APP_DEFAULT, null, -1); + for (int groupId = userId; groupId <= groupCount; groupId += groupIncrement) + { + if (!existingAuthorities.contains(authorityService.getName(AuthorityType.GROUP, + getGroupName(groupId)))) + { + allocateUserToGroup(userId, groupId); + } + if (++i >= iterations) + { + txn.commit(); + break OUTER; + } + } + txn.commit(); + } + catch (Exception e) + { + txn.rollback(); + throw e; + } + } + } + } + } + + private void allocateUsersToSites(int userCount, int siteCount, Allocation allocation) + throws Exception + { + if (allocation == Allocation.ALL_TO_EACH) + { + for (int userId = 1; userId <= userCount; userId++) + { + for (int siteId = 1; siteId <= siteCount; siteId++) + { + allocateUserToSite(userId, siteId, userCount); + } + } + } + else if (allocation == Allocation.ROUND_ROBIN_TO_TARGET) + { + boolean sourceEnd = (allocation == Allocation.ROUND_ROBIN_TO_TARGET); + boolean targetEnd = (allocation == Allocation.ROUND_ROBIN_FROM_SOURCE); + for (int siteId = 1, userId = 1; ; siteId++, userId++) + { + if (userId > userCount) + { + if (targetEnd) + break; + sourceEnd = true; + userId = 1; + } + if (siteId > siteCount) + { + if (sourceEnd) + break; + targetEnd = true; + siteId = 1; + } + allocateUserToSite(userId, siteId, userCount); + } + } + } + + private void allocateGroupsToSites(int groupCount, int siteCount, Allocation allocation, int userCount, + int sitesAlreadyCreated, OnFailure onFailureAction) throws Exception + { + if (allocation == Allocation.ALL_TO_EACH) + { + for (int groupId = 1; groupId <= groupCount; groupId++) + { + for (int siteId = sitesAlreadyCreated+1; siteId <= siteCount; siteId++) + { + String doAsUser = getSiteOwnerUserName(siteId, userCount); + allocateGroupToSite(groupId, siteId, userCount, doAsUser, onFailureAction); + } + } + } + else if (allocation == Allocation.ROUND_ROBIN_TO_TARGET) + { + boolean sourceEnd = (allocation == Allocation.ROUND_ROBIN_TO_TARGET); + boolean targetEnd = (allocation == Allocation.ROUND_ROBIN_FROM_SOURCE); + int startGroupId = sitesAlreadyCreated % groupCount + 1; + for (int siteId = sitesAlreadyCreated+1, groupId = startGroupId; ; siteId++, groupId++) + { + if (groupId > groupCount) + { + if (targetEnd) + break; + sourceEnd = true; + groupId = 1; + } + if (siteId > siteCount) + { + if (sourceEnd) + break; + targetEnd = true; + siteId = 1; + } + String doAsUser = getSiteOwnerUserName(siteId, userCount); + allocateGroupToSite(groupId, siteId, userCount, doAsUser, onFailureAction); + } + } + } + + private void allocateUserToGroup(int userId, int groupId) throws Exception + { + String userName = getUserName(userId); + String groupName = getGroupName(groupId); + + long ms = System.currentTimeMillis(); + allocateUserToGroup(userName, groupName); + assertTime("Adding a user to a group", userId, groupId, ms, MAX_USER_TO_GROUP_MS); + } + + private void allocateUserToSite(int userId, int siteId, int userCount) throws Exception + { + try + { + String userName = getUserName(userId); + String siteName = getSiteName(siteId); + String siteOwnerUserName = getSiteOwnerUserName(siteId, userCount); + + long ms = System.currentTimeMillis(); + allocateUserToSite(userName, siteName, siteOwnerUserName); + assertTime("Adding a user to a site", userId, siteId, ms, MAX_USER_TO_SITE_MS); + } + catch (DuplicateChildNodeNameException e) + { + // Already allocated. + } + } + + private int getNextSiteToAddGroupTo(int firstSiteIdToCheck) throws Exception + { + String userName = getUserName(1); + Set existingAuthorities = authorityService.getAuthoritiesForUser(userName); + int siteId = firstSiteIdToCheck; + for (; siteId <= NUM_SITES; siteId++) + { + String siteName = getSiteName(siteId); + String groupName = "GROUP_site_"+siteName; + if (!existingAuthorities.contains(groupName)) + { + break; + } + } + log("Next site to add group to is "+siteId); + return siteId; + } + + private void allocateGroupToSite(int firstSiteId, int userCount, String doAsUser, OnFailure onFailureAction, int blockSize) throws Exception + { + String currentUser = authenticationComponent.getCurrentUserName(); + UserTransaction txn = transactionService.getUserTransaction(); + long ms1 = System.currentTimeMillis(); + + try + { + if (doAsUser != null) + authenticationComponent.setCurrentUser(doAsUser); + txn.begin(); + for (int siteId = firstSiteId; siteId < firstSiteId+blockSize; siteId++) + { + try + { + int groupId = (siteId - 1) % NUM_GROUPS + 1; + String groupName = getGroupName(groupId); + String siteName = getSiteName(siteId); + + long ms2 = System.currentTimeMillis(); + String groupAuthorityName = authorityService.getName(AuthorityType.GROUP, + groupName); + siteService.setMembership(siteName, groupAuthorityName, SiteModel.SITE_COLLABORATOR); + assertTime("Adding a group to a site", groupId, siteId, ms2, MAX_GROUP_TO_SITE_MS, onFailureAction, 1); + } + catch (DuplicateChildNodeNameException e) + { + // Already allocated. + } + } + txn.commit(); + assertTime(" Block Add", firstSiteId, 0, ms1, MAX_GROUP_TO_SITE_MS, onFailureAction, blockSize); + } + catch (Exception e) + { + txn.rollback(); + throw e; + } + finally + { + authenticationComponent.setCurrentUser(currentUser); + } + } + + private void allocateGroupToSite(int siteId, int userCount, String doAsUser, OnFailure onFailureAction) throws Exception + { + int groupId = (siteId-1) % NUM_GROUPS + 1; + allocateGroupToSite(groupId, siteId, userCount, doAsUser, onFailureAction); + } + + private void allocateGroupToSite(int groupId, int siteId, int userCount, String doAsUser, OnFailure onFailureAction) throws Exception + { + try + { + String groupName = getGroupName(groupId); + String siteName = getSiteName(siteId); + + long ms = System.currentTimeMillis(); + allocateGroupToSite(groupName, siteName, doAsUser); + assertTime("Adding a group to a site", groupId, siteId, ms, MAX_GROUP_TO_SITE_MS, onFailureAction, 1); + } + catch (DuplicateChildNodeNameException e) + { + // Already allocated. + } + } + + // ------------------ allocate 1 -------------------- + + private void allocateUserToGroup(String userName, String groupName) throws Exception + { + String groupAuthorityName = authorityService.getName(AuthorityType.GROUP, groupName); + authorityService.addAuthority(groupAuthorityName, userName); + } + + private void allocateUserToSite(String userName, String siteName, String doAsUser) + throws Exception + { + setSiteMembership(userName, siteName, doAsUser); + } + + private void allocateGroupToSite(String groupName, String siteName, String doAsUser) + throws Exception + { + String groupAuthorityName = authorityService.getName(AuthorityType.GROUP, groupName); + setSiteMembership(groupAuthorityName, siteName, doAsUser); + } + + private void setSiteMembership(String authority, String siteName, String doAsUser) + throws SystemException, Exception + { + String currentUser = authenticationComponent.getCurrentUserName(); + UserTransaction txn = transactionService.getUserTransaction(); + try + { + if (doAsUser != null) + authenticationComponent.setCurrentUser(doAsUser); + txn.begin(); + + siteService.setMembership(siteName, authority, SiteModel.SITE_COLLABORATOR); + + txn.commit(); + } + catch (Exception e) + { + txn.rollback(); + throw e; + } + finally + { + authenticationComponent.setCurrentUser(currentUser); + } + } + + // ------------------ names -------------------- + + private String getUserName(int userId) + { + return getName("user", userId); + } + + private String getGroupName(int groupId) + { + return getName("group", groupId); + } + + private String getSiteName(int siteId) + { + return getName("site", siteId); + } + + private String getName(String prefix, int id) + { + return new StringBuilder(prefix).append(id).toString(); + } + + private String getSiteOwnerUserName(int siteId, int userCount) + { + int ownerId = (siteId-1)%userCount+1; + return getUserName(ownerId); + } + + // ------------------ Original Tests -------------------- + +// @Test +// public void testSingleGroup() throws Exception +// { +// Allocation usersToGroups = Allocation.ALL_TO_EACH; +// Allocation usersToSites = Allocation.NONE; +// Allocation groupsToSites = Allocation.ROUND_ROBIN_TO_TARGET; +// +// createAndAllocate(NUM_USERS, NUM_GROUPS, NUM_SITES, usersToGroups, usersToSites, groupsToSites); +// } + +// @Test +// public void testMultipleGroups() throws Exception +// { +// Allocation usersToGroups = Allocation.ALL_TO_EACH; +// Allocation usersToSites = Allocation.NONE; +// Allocation groupsToSites = Allocation.ROUND_ROBIN_TO_TARGET; +// +// createAndAllocate(NUM_USERS, NUM_GROUPS, NUM_SITES, usersToGroups, usersToSites, groupsToSites); +// } + +// @Test +// public void testNoGroups() throws Exception +// { +// Allocation usersToGroups = Allocation.NONE; +// Allocation usersToSites = Allocation.ALL_TO_EACH; +// Allocation groupsToSites = Allocation.NONE; +// +// createAndAllocate(NUM_USERS, NUM_GROUPS, NUM_SITES, usersToGroups, usersToSites, groupsToSites); +// } + + // ------------------ Initial Data Load Tests -------------------- + +// @Test +// public void testInitClearDownAll() throws Exception +// { +// createUsersGroupsAndSites(0, 0, 0); +// } + +// @Test +// public void testInitCreateUsersAndGroups() throws Exception +// { +// createUsers(NUM_USERS); +// createGroups(NUM_GROUPS); +// allocateUsersToGroups(NUM_USERS, NUM_GROUPS, Allocation.ALL_TO_EACH); +// } + +// @Test +// public void testInit() throws Exception +// { +// createUsers(NUM_USERS); +// createGroups(NUM_GROUPS); +// allocateUsersToGroups(NUM_USERS, NUM_GROUPS, Allocation.ALL_TO_EACH); +// +// createSites(NUM_SITES, NUM_USERS, 0, OnFailure.KEEP_GOING); +// +// int blockSize = 10; +// for (int siteId = getNextSiteToAddGroupTo(1); siteId <= NUM_SITES; siteId += blockSize) +// { +// allocateGroupToSite(siteId, NUM_USERS, ADMIN_USER, OnFailure.KEEP_GOING, blockSize); +// } +// } + + // ------------------ Test to load data from cmd line -------------------- + + @Test + public void commandLine() throws Exception + { + String from = System.getProperty("from"); + String to = System.getProperty("to"); + String restart = System.getProperty("restart"); + String action = System.getProperty("action"); + logFilename = System.getProperty("log", "sites.log"); + + boolean usersOnly = "usersOnly".equalsIgnoreCase(action); + boolean sites = "sites".equalsIgnoreCase(action); + boolean groups = "groups".equalsIgnoreCase(action); + boolean test = "test".equalsIgnoreCase(action); + + if ((usersOnly && (from != null || to != null || restart != null)) || + (!usersOnly && (from == null || to == null || (action != null && !sites && !groups && !test)))) + { + System.err.println( + "Usage: -Dfrom= -Dto= [ -Dlog= ]\n" + + " -Daction=usersOnly [ -Dlog= ]\n" + + " -Daction=sites -Dfrom= -Dto= [ -Dlog= ] [ -Drestart= ]\n" + + " -Daction=groups -Dfrom= -Dto= [ -Dlog= ] [ -Drestart= ]" + + " -Daction=test -Dfrom= -Dto= [ -Dlog= ] "); + } + else + { + try + { + int fromId = (from == null) ? 0 : Integer.parseInt(from); + NUM_SITES = (to == null) ? 0 : Integer.parseInt(to); + int restartFromId = (restart == null) ? fromId : Integer.parseInt(restart); + + if (test) + { + testAddingSitesAndDelete(fromId, NUM_SITES); + } + else + { + if (action == null || usersOnly) + { + createUsers(NUM_USERS); + createGroups(NUM_GROUPS); + allocateUsersToGroups(NUM_USERS, NUM_GROUPS, Allocation.ALL_TO_EACH); + if (action == null) + { + sites = true; + } + } + + if (sites) + { + createSites(NUM_SITES, NUM_USERS, restartFromId - 1, OnFailure.KEEP_GOING); + restartFromId = fromId; + groups = true; + } + + if (groups) + { + int blockSize = 10; + for (int siteId = getNextSiteToAddGroupTo(restartFromId); siteId <= NUM_SITES; siteId += blockSize) + { + int size = Math.min(blockSize, NUM_SITES - siteId + 1); + allocateGroupToSite(siteId, NUM_USERS, ADMIN_USER, OnFailure.KEEP_GOING, size); + } + } + } + } + catch (Exception e) + { + e.printStackTrace(); + } + finally + { + log("\n\n DONE"); + } + } + } + + /** + * Simplify running unit test from the command line. + * + * set SITE_CPATH=%TOMCAT_HOME%/lib/*;%TOMCAT_HOME%/endorsed/*;%TOMCAT_HOME%/webapps/alfresco/WEB-INF/lib/*;%TOMCAT_HOME%/webapps/alfresco/WEB-INF/classes;%TOMCAT_HOME%/shared/classes; + * java -Xmx2048m -XX:MaxPermSize=512M -classpath %SITE_CPATH% org.alfresco.repo.site.SiteServiceTestHuge ... + */ + public static void main(String args[]) + { + org.junit.runner.JUnitCore.main(SiteServiceTestHuge.class.getName()); + } + + // ------------------ Tests Once Data Is Loaded -------------------- + +// @Test +// public void testAdding1000SitesInBlocksOf100() throws Exception +// { +// usersCreated = NUM_USERS; +// groupsCreated = NUM_GROUPS; +// sitesCreated = NUM_SITES; +// +// deleteSites(NUM_SITES+1, NUM_SITES+1000, OnFailure.KEEP_GOING); +// +// for (int i=1; i<=10; i++) +// { +// addMoreSites(100, OnFailure.GIVE_UP); +// } +// +// deleteSites(NUM_SITES+1, NUM_SITES+1000, OnFailure.KEEP_GOING); +// } + +// @Test +// public void testAdding4SitesAndDelete() throws Exception +// { +// testAddingSitesAndDelete(NUM_SITES + 1, NUM_SITES + 100); +// } + + public void testAddingSitesAndDelete(int fromSiteId, int toSiteId) throws Exception + { + usersCreated = NUM_USERS; + groupsCreated = NUM_GROUPS; + sitesCreated = fromSiteId - 1; + + deleteSites(fromSiteId, toSiteId, OnFailure.KEEP_GOING); + + log("\n\n CREATE SITES"); + for (int siteId = fromSiteId; siteId <= toSiteId; siteId++) + { + String siteCreatorUser = getSiteOwnerUserName(siteId, NUM_USERS); + creatSite(siteId, siteCreatorUser, OnFailure.KEEP_GOING); + } + + log("\n\n ADD GROUPS"); + for (int siteId = fromSiteId; siteId <= toSiteId; siteId++) + { + String siteCreatorUser = getSiteOwnerUserName(siteId, NUM_USERS); + allocateGroupToSite(siteId, NUM_USERS, siteCreatorUser, OnFailure.KEEP_GOING); + } + + log("\n\n DELETE"); + for (int siteId = fromSiteId; siteId <= toSiteId; siteId++) + { + String siteCreatorUser = getSiteOwnerUserName(siteId, NUM_USERS); + deleteSite(siteId, siteCreatorUser, OnFailure.KEEP_GOING); + } + } +} diff --git a/source/java/org/alfresco/repo/site/script/ScriptSiteService.java b/source/java/org/alfresco/repo/site/script/ScriptSiteService.java index 625c54e037..b9bb7ccb97 100644 --- a/source/java/org/alfresco/repo/site/script/ScriptSiteService.java +++ b/source/java/org/alfresco/repo/site/script/ScriptSiteService.java @@ -273,11 +273,12 @@ public class ScriptSiteService extends BaseScopableProcessorExtension * List all the sites that the specified user has an explicit membership to. * * @param userName user name + * @param size maximum list size * @return Site[] a list of sites the user has an explicit membership to */ - public Site[] listUserSites(String userName) + public Site[] listUserSites(String userName, int size) { - List siteInfos = this.siteService.listSites(userName); + List siteInfos = this.siteService.listSites(userName, size); List sites = new ArrayList(siteInfos.size()); for (SiteInfo siteInfo : siteInfos) { @@ -286,6 +287,17 @@ public class ScriptSiteService extends BaseScopableProcessorExtension return sites.toArray(new Site[sites.size()]); } + /** + * List all the sites that the specified user has an explicit membership to. + * + * @param userName user name + * @return Site[] a list of sites the user has an explicit membership to + */ + public Site[] listUserSites(String userName) + { + return listUserSites(userName, 0); + } + /** * Get a site for a provided site short name. *

diff --git a/source/java/org/alfresco/repo/template/People.java b/source/java/org/alfresco/repo/template/People.java index 90a5b4ac67..966da5a660 100644 --- a/source/java/org/alfresco/repo/template/People.java +++ b/source/java/org/alfresco/repo/template/People.java @@ -235,10 +235,10 @@ public class People extends BaseTemplateProcessorExtension implements Initializi { ParameterCheck.mandatory("Person", person); List parents; - Set authorities = this.authorityService.getContainingAuthorities( + Set authorities = this.authorityService.getContainingAuthoritiesInZone( AuthorityType.GROUP, (String)person.getProperties().get(ContentModel.PROP_USERNAME), - false); + AuthorityService.ZONE_APP_DEFAULT, null, 1000); parents = new ArrayList(authorities.size()); for (String authority : authorities) { diff --git a/source/java/org/alfresco/repo/version/NodeServiceImpl.java b/source/java/org/alfresco/repo/version/NodeServiceImpl.java index b06cbf3277..adca764c36 100644 --- a/source/java/org/alfresco/repo/version/NodeServiceImpl.java +++ b/source/java/org/alfresco/repo/version/NodeServiceImpl.java @@ -188,6 +188,16 @@ public class NodeServiceImpl implements NodeService, VersionModel { return dbNodeService.getRootNode(storeRef); } + + + /** + * Delegates to the NodeService used as the version store implementation + */ + @Override + public Set getAllRootNodes(StoreRef storeRef) + { + return dbNodeService.getAllRootNodes(storeRef); + } /** * @throws UnsupportedOperationException always @@ -556,6 +566,18 @@ public class NodeServiceImpl implements NodeService, VersionModel return result; } + + @Override + public List getChildAssocs(NodeRef nodeRef, QName typeQName, QName qname, int maxResults, + boolean preload) throws InvalidNodeRefException + { + List result = getChildAssocs(nodeRef, typeQName, qname); + if (result.size() > maxResults) + { + return result.subList(0, maxResults); + } + return result; + } /** * @throws UnsupportedOperationException always diff --git a/source/java/org/alfresco/repo/workflow/WorkflowServiceImpl.java b/source/java/org/alfresco/repo/workflow/WorkflowServiceImpl.java index 0c858cb4da..e631fe9fe9 100644 --- a/source/java/org/alfresco/repo/workflow/WorkflowServiceImpl.java +++ b/source/java/org/alfresco/repo/workflow/WorkflowServiceImpl.java @@ -699,7 +699,8 @@ public class WorkflowServiceImpl implements WorkflowService // Expand authorities to include associated groups (and parent groups) List authorities = new ArrayList(); authorities.add(authority); - Set parents = authorityService.getContainingAuthorities(AuthorityType.GROUP, authority, false); + Set parents = authorityService.getContainingAuthoritiesInZone(AuthorityType.GROUP, authority, + AuthorityService.ZONE_APP_DEFAULT, null, -1); authorities.addAll(parents); // Retrieve pooled tasks for authorities (from each of the registered diff --git a/source/java/org/alfresco/service/cmr/security/AuthorityService.java b/source/java/org/alfresco/service/cmr/security/AuthorityService.java index bb8eb894fe..b2f5921e05 100644 --- a/source/java/org/alfresco/service/cmr/security/AuthorityService.java +++ b/source/java/org/alfresco/service/cmr/security/AuthorityService.java @@ -277,10 +277,14 @@ public interface AuthorityService public Set getContainedAuthorities(AuthorityType type, String name, boolean immediate); /** - * Get the authorities that contain the given authority + * Get the authorities that contain the given authority, + * but use {@code getAuthoritiesForUser(userName).contains(authority)} rather than + * {@code getContainingAuthorities(type, userName, false).contains(authority)} or + * use {@link #getContainingAuthoritiesInZone(AuthorityType, String, AuthorityService.ZONE_APP_DEFAULT)} + * as they will be much faster. * - * For example, this can be used find out all the authorities that contain a - * user. + * For example, this method can be used find out all the authorities that contain a + * group. * * @param type - * if not null, limit to the type of authority specified @@ -294,6 +298,31 @@ public interface AuthorityService @Auditable(parameters = {"type", "name", "immediate"}) public Set getContainingAuthorities(AuthorityType type, String name, boolean immediate); + /** + * Get a set of authorities with varying filter criteria + * + * @param type + * authority type or null for all types + * @param name + * if non-null, only return those authorities who contain this authority + * @param zoneName + * if non-null, only include authorities in the named zone + * @param filter + * optional callback to apply further filter criteria or null + * @param size + * if greater than zero, the maximum results to return. The search strategy used is varied depending on + * this number. + * @return a set of authorities + */ + @Auditable(parameters = {"type", "name", "zoneName", "filter", "size"}) + public Set getContainingAuthoritiesInZone(AuthorityType type, String name, final String zoneName, + AuthorityFilter filter, int size); + + public interface AuthorityFilter + { + boolean includeAuthority(String authority); + } + /** * Extract the short name of an authority from its full identifier. * diff --git a/source/java/org/alfresco/service/cmr/site/SiteService.java b/source/java/org/alfresco/service/cmr/site/SiteService.java index 8180409d07..a5f7c56437 100644 --- a/source/java/org/alfresco/service/cmr/site/SiteService.java +++ b/source/java/org/alfresco/service/cmr/site/SiteService.java @@ -169,6 +169,16 @@ public interface SiteService */ PagingResults listSites(List filterProps, List> sortProps, PagingRequest pagingRequest); + /** + * List all the sites that the specified user has a explicit membership to. + * + * @param userName user name + * @param size list maximum size or zero for all + * @return List list of site information + */ + @NotAuditable + List listSites(String userName, int size); + /** * Gets site information based on the short name of a site. *