From ef034e596b33662136a042e22ff07a49cba3fb5c Mon Sep 17 00:00:00 2001 From: cezary-witkowski Date: Tue, 23 Sep 2025 14:10:29 +0200 Subject: [PATCH] [ACS-10041] Repository - CPU spikes and OOM errors with SQL Server 2019 (#3588) --- .../repo/domain/node/AbstractNodeDAOImpl.java | 10272 ++++++++-------- .../node/ReferenceablePropertiesEntity.java | 203 +- .../getchildren/FilterSortNodeEntity.java | 525 +- .../getchildren/GetChildrenCannedQuery.java | 1951 +-- .../node-common-SqlMap.xml | 24 +- 5 files changed, 6538 insertions(+), 6437 deletions(-) diff --git a/repository/src/main/java/org/alfresco/repo/domain/node/AbstractNodeDAOImpl.java b/repository/src/main/java/org/alfresco/repo/domain/node/AbstractNodeDAOImpl.java index 9b223788c5..41156e9734 100644 --- a/repository/src/main/java/org/alfresco/repo/domain/node/AbstractNodeDAOImpl.java +++ b/repository/src/main/java/org/alfresco/repo/domain/node/AbstractNodeDAOImpl.java @@ -1,5113 +1,5159 @@ -/* - * #%L - * Alfresco Repository - * %% - * Copyright (C) 2005 - 2023 Alfresco Software Limited - * %% - * This file is part of the Alfresco software. - * If the software was purchased under a paid Alfresco license, the terms of - * the paid license agreement will prevail. Otherwise, the software is - * provided under the following open source license terms: - * - * Alfresco is free software: you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Alfresco is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with Alfresco. If not, see . - * #L% - */ -package org.alfresco.repo.domain.node; - -import java.io.Serializable; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.sql.Savepoint; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Set; -import java.util.SortedSet; -import java.util.Stack; -import java.util.TreeSet; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import org.alfresco.error.AlfrescoRuntimeException; -import org.alfresco.ibatis.BatchingDAO; -import org.alfresco.ibatis.RetryingCallbackHelper; -import org.alfresco.ibatis.RetryingCallbackHelper.RetryingCallback; -import org.alfresco.model.ContentModel; -import org.alfresco.repo.cache.NullCache; -import org.alfresco.repo.cache.SimpleCache; -import org.alfresco.repo.cache.TransactionalCache; -import org.alfresco.repo.cache.lookup.EntityLookupCache; -import org.alfresco.repo.cache.lookup.EntityLookupCache.EntityLookupCallbackDAOAdaptor; -import org.alfresco.repo.domain.contentdata.ContentDataDAO; -import org.alfresco.repo.domain.control.ControlDAO; -import org.alfresco.repo.domain.locale.LocaleDAO; -import org.alfresco.repo.domain.permissions.AccessControlListDAO; -import org.alfresco.repo.domain.permissions.AclDAO; -import org.alfresco.repo.domain.qname.QNameDAO; -import org.alfresco.repo.domain.usage.UsageDAO; -import org.alfresco.repo.policy.BehaviourFilter; -import org.alfresco.repo.security.permissions.AccessControlListProperties; -import org.alfresco.repo.transaction.AlfrescoTransactionSupport; -import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState; -import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; -import org.alfresco.repo.transaction.TransactionAwareSingleton; -import org.alfresco.repo.transaction.TransactionalDao; -import org.alfresco.repo.transaction.TransactionalResourceHelper; -import org.alfresco.service.cmr.dictionary.DataTypeDefinition; -import org.alfresco.service.cmr.dictionary.DictionaryService; -import org.alfresco.service.cmr.dictionary.InvalidTypeException; -import org.alfresco.service.cmr.dictionary.PropertyDefinition; -import org.alfresco.service.cmr.repository.AssociationExistsException; -import org.alfresco.service.cmr.repository.AssociationRef; -import org.alfresco.service.cmr.repository.ChildAssociationRef; -import org.alfresco.service.cmr.repository.ContentData; -import org.alfresco.service.cmr.repository.CyclicChildRelationshipException; -import org.alfresco.service.cmr.repository.DuplicateChildNodeNameException; -import org.alfresco.service.cmr.repository.InvalidNodeRefException; -import org.alfresco.service.cmr.repository.InvalidStoreRefException; -import org.alfresco.service.cmr.repository.NodeRef; -import org.alfresco.service.cmr.repository.NodeRef.Status; -import org.alfresco.service.cmr.repository.Path; -import org.alfresco.service.cmr.repository.StoreRef; -import org.alfresco.service.cmr.repository.datatype.DefaultTypeConverter; -import org.alfresco.service.namespace.QName; -import org.alfresco.service.transaction.ReadOnlyServerException; -import org.alfresco.service.transaction.TransactionService; -import org.alfresco.util.EqualsHelper; -import org.alfresco.util.EqualsHelper.MapValueComparison; -import org.alfresco.util.GUID; -import org.alfresco.util.Pair; -import org.alfresco.util.PropertyCheck; -import org.alfresco.util.ReadWriteLockExecuter; -import org.alfresco.util.ValueProtectingMap; -import org.alfresco.util.transaction.TransactionListenerAdapter; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.springframework.dao.ConcurrencyFailureException; -import org.springframework.dao.DataIntegrityViolationException; -import org.springframework.util.Assert; - -/** - * Abstract implementation for Node DAO. - *

- * This provides basic services such as caching, but defers to the underlying implementation - * for CRUD operations. - * - * @author Derek Hulley - * @since 3.4 - */ -public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO -{ - private static final String CACHE_REGION_ROOT_NODES = "N.RN"; - public static final String CACHE_REGION_NODES = "N.N"; - private static final String CACHE_REGION_ASPECTS = "N.A"; - private static final String CACHE_REGION_PROPERTIES = "N.P"; - - private static final String KEY_LOST_NODE_PAIRS = AbstractNodeDAOImpl.class.getName() + ".lostNodePairs"; - private static final String KEY_DELETED_ASSOCS = AbstractNodeDAOImpl.class.getName() + ".deletedAssocs"; - - protected Log logger = LogFactory.getLog(getClass()); - private Log loggerPaths = LogFactory.getLog(getClass().getName() + ".paths"); - - protected final boolean isDebugEnabled = logger.isDebugEnabled(); - private NodePropertyHelper nodePropertyHelper; - private UpdateTransactionListener updateTransactionListener = new UpdateTransactionListener(); - private RetryingCallbackHelper childAssocRetryingHelper; - - private TransactionService transactionService; - private DictionaryService dictionaryService; - private BehaviourFilter policyBehaviourFilter; - private AclDAO aclDAO; - private AccessControlListDAO accessControlListDAO; - private ControlDAO controlDAO; - private QNameDAO qnameDAO; - private ContentDataDAO contentDataDAO; - private LocaleDAO localeDAO; - private UsageDAO usageDAO; - - private int cachingThreshold = 10; - - /** - * Cache for the Store root nodes by StoreRef:
- * KEY: StoreRef
- * VALUE: Node representing the root node
- * VALUE KEY: IGNORED
- */ - private EntityLookupCache rootNodesCache; - - - /** - * Cache for nodes with the root aspect by StoreRef:
- * KEY: StoreRef
- * VALUE: A set of nodes with the root aspect
- */ - private SimpleCache> allRootNodesCache; - - /** - * Bidirectional cache for the Node ID to Node lookups:
- * KEY: Node ID
- * VALUE: Node
- * VALUE KEY: The Node's NodeRef
- */ - private EntityLookupCache nodesCache; - /** - * Backing transactional cache to allow read-through requests to be honoured - */ - private TransactionalCache nodesTransactionalCache; - /** - * Cache for the QName values:
- * KEY: NodeVersionKey
- * VALUE: Set<QName>
- * VALUE KEY: None
- */ - private EntityLookupCache, Serializable> aspectsCache; - /** - * Cache for the Node properties:
- * KEY: NodeVersionKey
- * VALUE: Map<QName, Serializable>
- * VALUE KEY: None
- */ - private EntityLookupCache, Serializable> propertiesCache; - /** - * Non-clustered cache for the Node parent assocs:
- * KEY: (nodeId, txnId) pair
- * VALUE: ParentAssocs - */ - private ParentAssocsCache parentAssocsCache; - private int parentAssocsCacheSize; - private int parentAssocsCacheLimitFactor = 8; - - /** - * Cache for fast lookups of child nodes by cm:name. - */ - private SimpleCache childByNameCache; - - /** - * Constructor. Set up various instance-specific members such as caches and locks. - */ - public AbstractNodeDAOImpl() - { - childAssocRetryingHelper = new RetryingCallbackHelper(); - childAssocRetryingHelper.setRetryWaitMs(10); - childAssocRetryingHelper.setMaxRetries(5); - // Caches - rootNodesCache = new EntityLookupCache(new RootNodesCacheCallbackDAO()); - nodesCache = new EntityLookupCache(new NodesCacheCallbackDAO()); - aspectsCache = new EntityLookupCache, Serializable>(new AspectsCallbackDAO()); - propertiesCache = new EntityLookupCache, Serializable>(new PropertiesCallbackDAO()); - childByNameCache = new NullCache(); - } - - /** - * @param transactionService the service to start post-txn processes - */ - public void setTransactionService(TransactionService transactionService) - { - this.transactionService = transactionService; - } - - /** - * @param dictionaryService the service help determine cm:auditable characteristics - */ - public void setDictionaryService(DictionaryService dictionaryService) - { - this.dictionaryService = dictionaryService; - } - - public void setCachingThreshold(int cachingThreshold) - { - this.cachingThreshold = cachingThreshold; - } - - /** - * @param policyBehaviourFilter the service to determine the behaviour for cm:auditable and - * other inherent capabilities. - */ - public void setPolicyBehaviourFilter(BehaviourFilter policyBehaviourFilter) - { - this.policyBehaviourFilter = policyBehaviourFilter; - } - - /** - * @param aclDAO used to update permissions during certain operations - */ - public void setAclDAO(AclDAO aclDAO) - { - this.aclDAO = aclDAO; - } - - /** - * @param accessControlListDAO used to update ACL inheritance during node moves - */ - public void setAccessControlListDAO(AccessControlListDAO accessControlListDAO) - { - this.accessControlListDAO = accessControlListDAO; - } - - /** - * @param controlDAO create Savepoints - */ - public void setControlDAO(ControlDAO controlDAO) - { - this.controlDAO = controlDAO; - } - - /** - * @param qnameDAO translates QName IDs into QName instances and vice-versa - */ - public void setQnameDAO(QNameDAO qnameDAO) - { - this.qnameDAO = qnameDAO; - } - - /** - * @param contentDataDAO used to create and delete content references - */ - public void setContentDataDAO(ContentDataDAO contentDataDAO) - { - this.contentDataDAO = contentDataDAO; - } - - /** - * @param localeDAO used to handle MLText properties - */ - public void setLocaleDAO(LocaleDAO localeDAO) - { - this.localeDAO = localeDAO; - } - - /** - * @param usageDAO used to keep content usage calculations in line - */ - public void setUsageDAO(UsageDAO usageDAO) - { - this.usageDAO = usageDAO; - } - - /** - * Set the cache that maintains the Store root node data - * - * @param cache the cache - */ - public void setRootNodesCache(SimpleCache cache) - { - this.rootNodesCache = new EntityLookupCache( - cache, - CACHE_REGION_ROOT_NODES, - new RootNodesCacheCallbackDAO()); - } - - /** - * Set the cache that maintains the extended Store root node data - * - * @param allRootNodesCache the cache - */ - public void setAllRootNodesCache(SimpleCache> allRootNodesCache) - { - this.allRootNodesCache = allRootNodesCache; - } - - /** - * Set the cache that maintains node ID-NodeRef cross referencing data - * - * @param cache the cache - */ - public void setNodesCache(SimpleCache cache) - { - this.nodesCache = new EntityLookupCache( - cache, - CACHE_REGION_NODES, - new NodesCacheCallbackDAO()); - if (cache instanceof TransactionalCache) - { - this.nodesTransactionalCache = (TransactionalCache) cache; - } - } - - /** - * Set the cache that maintains the Node QName IDs - * - * @param aspectsCache the cache - */ - public void setAspectsCache(SimpleCache> aspectsCache) - { - this.aspectsCache = new EntityLookupCache, Serializable>( - aspectsCache, - CACHE_REGION_ASPECTS, - new AspectsCallbackDAO()); - } - - /** - * Set the cache that maintains the Node property values - * - * @param propertiesCache the cache - */ - public void setPropertiesCache(SimpleCache> propertiesCache) - { - this.propertiesCache = new EntityLookupCache, Serializable>( - propertiesCache, - CACHE_REGION_PROPERTIES, - new PropertiesCallbackDAO()); - } - - /** - * Sets the maximum capacity of the parent assocs cache - * - * @param parentAssocsCacheSize the cache size - */ - public void setParentAssocsCacheSize(int parentAssocsCacheSize) - { - this.parentAssocsCacheSize = parentAssocsCacheSize; - } - - /** - * Sets the average number of parents expected per cache entry. This parameter is multiplied by the - * {@link #setParentAssocsCacheSize(int)} parameter to compute a limit on the total number of cached parents, which - * will be proportional to the cache's memory usage. The cache will be pruned when this limit is exceeded to avoid - * excessive memory usage. - * - * @param parentAssocsCacheLimitFactor - * the parentAssocsCacheLimitFactor to set - */ - public void setParentAssocsCacheLimitFactor(int parentAssocsCacheLimitFactor) - { - this.parentAssocsCacheLimitFactor = parentAssocsCacheLimitFactor; - } - - /** - * Set the cache that maintains lookups by child cm:name - * - * @param childByNameCache the cache - */ - public void setChildByNameCache(SimpleCache childByNameCache) - { - this.childByNameCache = childByNameCache; - } - - /* - * Initialize - */ - - public void init() - { - PropertyCheck.mandatory(this, "transactionService", transactionService); - PropertyCheck.mandatory(this, "dictionaryService", dictionaryService); - PropertyCheck.mandatory(this, "aclDAO", aclDAO); - PropertyCheck.mandatory(this, "accessControlListDAO", accessControlListDAO); - PropertyCheck.mandatory(this, "qnameDAO", qnameDAO); - PropertyCheck.mandatory(this, "contentDataDAO", contentDataDAO); - PropertyCheck.mandatory(this, "localeDAO", localeDAO); - PropertyCheck.mandatory(this, "usageDAO", usageDAO); - - this.nodePropertyHelper = new NodePropertyHelper(dictionaryService, qnameDAO, localeDAO, contentDataDAO); - this.parentAssocsCache = new ParentAssocsCache(this.parentAssocsCacheSize, this.parentAssocsCacheLimitFactor); - } - - /* - * Cache helpers - */ - - private void clearCaches() - { - nodesCache.clear(); - aspectsCache.clear(); - propertiesCache.clear(); - parentAssocsCache.clear(); - } - - /** - * Invalidate cache entries for all children of a give node. This usually applies - * where the child associations or nodes are modified en-masse. - * - * @param parentNodeId the parent node of all child nodes to be invalidated (may be null) - * @param touchNodes true to also touch the nodes - * @return the number of child associations found (might be capped) - */ - private int invalidateNodeChildrenCaches(Long parentNodeId, boolean primary, boolean touchNodes) - { - Long txnId = getCurrentTransaction().getId(); - - int count = 0; - List childNodeIds = new ArrayList(256); - Long minChildNodeIdInclusive = Long.MIN_VALUE; - while (minChildNodeIdInclusive != null) - { - childNodeIds.clear(); - List childAssocs = selectChildNodeIds( - parentNodeId, - Boolean.valueOf(primary), - minChildNodeIdInclusive, - 256); - // Remove the cache entries as we go - for (ChildAssocEntity childAssoc : childAssocs) - { - Long childNodeId = childAssoc.getChildNode().getId(); - if (childNodeId.compareTo(minChildNodeIdInclusive) < 0) - { - throw new RuntimeException("Query results did not increase for child node id ID"); - } - else - { - minChildNodeIdInclusive = Long.valueOf(childNodeId.longValue() + 1L); - } - // Invalidate the node cache - childNodeIds.add(childNodeId); - invalidateNodeCaches(childNodeId); - count++; - } - // Bring all the nodes into the transaction, if required - if (touchNodes) - { - updateNodes(txnId, childNodeIds); - } - // Now break out if we didn't have the full set of results - if (childAssocs.size() < 256) - { - break; - } - } - // Done - return count; - } - - /** - * Invalidates all cached artefacts for a particular node, forcing a refresh. - * - * @param nodeId the node ID - */ - private void invalidateNodeCaches(Long nodeId) - { - // Take the current value from the nodesCache and use that to invalidate the other caches - Node node = nodesCache.getValue(nodeId); - if (node != null) - { - invalidateNodeCaches(node, true, true, true); - } - // Finally remove the node reference - nodesCache.removeByKey(nodeId); - } - - /** - * Invalidate specific node caches using an exact key - * - * @param node the node in question - */ - private void invalidateNodeCaches(Node node, boolean invalidateNodeAspectsCache, - boolean invalidateNodePropertiesCache, boolean invalidateParentAssocsCache) - { - NodeVersionKey nodeVersionKey = node.getNodeVersionKey(); - if (invalidateNodeAspectsCache) - { - aspectsCache.removeByKey(nodeVersionKey); - } - if (invalidateNodePropertiesCache) - { - propertiesCache.removeByKey(nodeVersionKey); - } - if (invalidateParentAssocsCache) - { - invalidateParentAssocsCached(node); - } - } - - /* - * Transactions - */ - - private static final String KEY_TRANSACTION = "node.transaction.id"; - - /** - * Wrapper to update the current transaction to get the change time correct - * - * @author Derek Hulley - * @since 3.4 - */ - private class UpdateTransactionListener implements TransactionalDao - { - /** - * Checks for the presence of a written DB transaction entry - */ - @Override - public boolean isDirty() - { - Long txnId = AbstractNodeDAOImpl.this.getCurrentTransactionId(false); - return txnId != null; - } - - @Override - public void beforeCommit(boolean readOnly) - { - if (readOnly) - { - return; - } - TransactionEntity txn = AlfrescoTransactionSupport.getResource(KEY_TRANSACTION); - Long txnId = txn.getId(); - // Update it - Long now = System.currentTimeMillis(); - txn.setCommitTimeMs(now); - updateTransaction(txnId, now); - } - } - - /** - * @return Returns a new transaction or an existing one if already active - */ - private TransactionEntity getCurrentTransaction() - { - TransactionEntity txn = AlfrescoTransactionSupport.getResource(KEY_TRANSACTION); - if (txn != null) - { - // We have been busy here before - return txn; - } - // Check that this is a writable txn - if (AlfrescoTransactionSupport.getTransactionReadState() != TxnReadState.TXN_READ_WRITE) - { - throw new ReadOnlyServerException(); - } - // Have to create a new transaction entry - Long now = System.currentTimeMillis(); - String changeTxnId = AlfrescoTransactionSupport.getTransactionId(); - Long txnId = insertTransaction(changeTxnId, now); - // Store it for later - if (isDebugEnabled) - { - logger.debug("Create txn: " + txnId); - } - txn = new TransactionEntity(); - txn.setId(txnId); - txn.setChangeTxnId(changeTxnId); - txn.setCommitTimeMs(now); - - AlfrescoTransactionSupport.bindResource(KEY_TRANSACTION, txn); - // Listen for the end of the transaction - AlfrescoTransactionSupport.bindDaoService(updateTransactionListener); - // Done - return txn; - } - - public Long getCurrentTransactionCommitTime() - { - Long commitTime = null; - TransactionEntity resource = AlfrescoTransactionSupport.getResource(KEY_TRANSACTION); - if(resource != null) - { - commitTime = resource.getCommitTimeMs(); - } - return commitTime; - } - - public Long getCurrentTransactionId(boolean ensureNew) - { - TransactionEntity txn; - if (ensureNew) - { - txn = getCurrentTransaction(); - } - else - { - txn = AlfrescoTransactionSupport.getResource(KEY_TRANSACTION); - } - return txn == null ? null : txn.getId(); - } - - /* - * Stores - */ - - @Override - public Pair getStore(StoreRef storeRef) - { - Pair rootNodePair = rootNodesCache.getByKey(storeRef); - if (rootNodePair == null) - { - return null; - } - else - { - return new Pair(rootNodePair.getSecond().getStore().getId(), rootNodePair.getFirst()); - } - } - - @Override - public List> getStores() - { - List storeEntities = selectAllStores(); - List> storeRefs = new ArrayList>(storeEntities.size()); - for (StoreEntity storeEntity : storeEntities) - { - storeRefs.add(new Pair(storeEntity.getId(), storeEntity.getStoreRef())); - } - return storeRefs; - } - - /** - * @throws InvalidStoreRefException if the store is invalid - */ - private StoreEntity getStoreNotNull(StoreRef storeRef) - { - Pair rootNodePair = rootNodesCache.getByKey(storeRef); - if (rootNodePair == null) - { - throw new InvalidStoreRefException(storeRef); - } - else - { - return rootNodePair.getSecond().getStore(); - } - } - - @Override - public boolean exists(StoreRef storeRef) - { - Pair rootNodePair = rootNodesCache.getByKey(storeRef); - return rootNodePair != null; - } - - @Override - public Pair getRootNode(StoreRef storeRef) - { - Pair rootNodePair = rootNodesCache.getByKey(storeRef); - if (rootNodePair == null) - { - throw new InvalidStoreRefException(storeRef); - } - else - { - return rootNodePair.getSecond().getNodePair(); - } - } - - @Override - public Set getAllRootNodes(StoreRef storeRef) - { - Set rootNodes = allRootNodesCache.get(storeRef); - if (rootNodes == null) - { - final Map> allRootNodes = new HashMap>(97); - getNodesWithAspects(Collections.singleton(ContentModel.ASPECT_ROOT), 0L, Long.MAX_VALUE, new NodeRefQueryCallback() - { - @Override - public boolean handle(Pair nodePair) - { - NodeRef nodeRef = nodePair.getSecond(); - StoreRef storeRef = nodeRef.getStoreRef(); - Set rootNodes = allRootNodes.get(storeRef); - if (rootNodes == null) - { - rootNodes = new HashSet(97); - allRootNodes.put(storeRef, rootNodes); - } - rootNodes.add(nodeRef); - return true; - } - }); - rootNodes = allRootNodes.get(storeRef); - if (rootNodes == null) - { - rootNodes = Collections.emptySet(); - allRootNodes.put(storeRef, rootNodes); - } - for (Map.Entry> entry : allRootNodes.entrySet()) - { - StoreRef entryStoreRef = entry.getKey(); - // Prevent unnecessary cross-invalidation - if (!allRootNodesCache.contains(entryStoreRef)) - { - allRootNodesCache.put(entryStoreRef, entry.getValue()); - } - } - } - return rootNodes; - } - - @Override - public Pair newStore(StoreRef storeRef) - { - // Create the store - StoreEntity store = new StoreEntity(); - store.setProtocol(storeRef.getProtocol()); - store.setIdentifier(storeRef.getIdentifier()); - - Long storeId = insertStore(store); - store.setId(storeId); - - // Get an ACL for the root node - Long aclId = aclDAO.createAccessControlList(); - - // Create a root node - Long nodeTypeQNameId = qnameDAO.getOrCreateQName(ContentModel.TYPE_STOREROOT).getFirst(); - NodeEntity rootNode = newNodeImpl(store, null, nodeTypeQNameId, null, aclId, null, true); - Long rootNodeId = rootNode.getId(); - addNodeAspects(rootNodeId, Collections.singleton(ContentModel.ASPECT_ROOT)); - - // Now update the store with the root node ID - store.setRootNode(rootNode); - updateStoreRoot(store); - - // Push the value into the caches - rootNodesCache.setValue(storeRef, rootNode); - - if (isDebugEnabled) - { - logger.debug("Created store: \n" + " " + store); - } - return new Pair(rootNode.getId(), rootNode.getNodeRef()); - } - - @Override - public void moveStore(StoreRef oldStoreRef, StoreRef newStoreRef) - { - StoreEntity store = getStoreNotNull(oldStoreRef); - store.setProtocol(newStoreRef.getProtocol()); - store.setIdentifier(newStoreRef.getIdentifier()); - // Update it - int count = updateStore(store); - if (count != 1) - { - throw new ConcurrencyFailureException("Store not updated: " + oldStoreRef); - } - // Bring all the associated nodes into the current transaction - Long txnId = getCurrentTransaction().getId(); - Long storeId = store.getId(); - updateNodesInStore(txnId, storeId); - - // All the NodeRef-based caches are invalid. ID-based caches are fine. - rootNodesCache.removeByKey(oldStoreRef); - allRootNodesCache.remove(oldStoreRef); - nodesCache.clear(); - - if (isDebugEnabled) - { - logger.debug("Moved store: " + oldStoreRef + " --> " + newStoreRef); - } - } - - /** - * Callback to cache store root nodes by {@link StoreRef}. - * - * @author Derek Hulley - * @since 3.4 - */ - private class RootNodesCacheCallbackDAO extends EntityLookupCallbackDAOAdaptor - { - /** - * @throws UnsupportedOperationException Stores must be created externally - */ - public Pair createValue(Node value) - { - throw new UnsupportedOperationException("Root node creation is done externally: " + value); - } - - /** - * @param storeRef the store ID - */ - public Pair findByKey(StoreRef storeRef) - { - NodeEntity node = selectStoreRootNode(storeRef); - return node == null ? null : new Pair(storeRef, node); - } - } - - /* - * Nodes - */ - - /** - * Callback to cache nodes by ID and {@link NodeRef}. When looking up objects based on the - * value key, only the referencing properties need be populated. ALL nodes are cached, - * not just live nodes. - * - * @see NodeEntity - * - * @author Derek Hulley - * @since 3.4 - */ - private class NodesCacheCallbackDAO extends EntityLookupCallbackDAOAdaptor - { - /** - * @throws UnsupportedOperationException Nodes are created externally - */ - public Pair createValue(Node value) - { - throw new UnsupportedOperationException("Node creation is done externally: " + value); - } - - /** - * @param nodeId the key node ID - */ - public Pair findByKey(Long nodeId) - { - NodeEntity node = selectNodeById(nodeId); - if (node != null) - { - // Lock it to prevent 'accidental' modification - node.lock(); - return new Pair(nodeId, node); - } - else - { - return null; - } - } - - /** - * @return Returns the Node's NodeRef - */ - @Override - public NodeRef getValueKey(Node value) - { - return value.getNodeRef(); - } - - /** - * Looks the node up based on the NodeRef of the given node - */ - @Override - public Pair findByValue(Node node) - { - NodeRef nodeRef = node.getNodeRef(); - node = selectNodeByNodeRef(nodeRef); - if (node != null) - { - // Lock it to prevent 'accidental' modification - node.lock(); - return new Pair(node.getId(), node); - } - else - { - return null; - } - } - } - - public boolean exists(Long nodeId) - { - Pair pair = nodesCache.getByKey(nodeId); - return pair != null && !pair.getSecond().getDeleted(qnameDAO); - } - - public boolean exists(NodeRef nodeRef) - { - NodeEntity node = new NodeEntity(nodeRef); - Pair pair = nodesCache.getByValue(node); - return pair != null && !pair.getSecond().getDeleted(qnameDAO); - } - - @Override - public boolean isInCurrentTxn(Long nodeId) - { - Long currentTxnId = getCurrentTransactionId(false); - if (currentTxnId == null) - { - // No transactional changes have been made to any nodes, therefore the node cannot - // be part of the current transaction - return false; - } - Node node = getNodeNotNull(nodeId, false); - Long nodeTxnId = node.getTransaction().getId(); - return nodeTxnId.equals(currentTxnId); - } - - @Override - public Status getNodeRefStatus(NodeRef nodeRef) - { - Node node = new NodeEntity(nodeRef); - Pair nodePair = nodesCache.getByValue(node); - // The nodesCache gets both live and deleted nodes. - if (nodePair == null) - { - return null; - } - else - { - return nodePair.getSecond().getNodeStatus(qnameDAO); - } - } - - @Override - public Status getNodeIdStatus(Long nodeId) - { - Pair nodePair = nodesCache.getByKey(nodeId); - // The nodesCache gets both live and deleted nodes. - if (nodePair == null) - { - return null; - } - else - { - return nodePair.getSecond().getNodeStatus(qnameDAO); - } - } - - @Override - public Pair getNodePair(NodeRef nodeRef) - { - NodeEntity node = new NodeEntity(nodeRef); - Pair pair = nodesCache.getByValue(node); - // Check it - if (pair == null || pair.getSecond().getDeleted(qnameDAO)) - { - // The cache says that the node is not there or is deleted. - // We double check by going to the DB - Node dbNode = selectNodeByNodeRef(nodeRef); - if (dbNode == null) - { - // The DB agrees. This is an invalid noderef. Why are you trying to use it? - return null; - } - else if (dbNode.getDeleted(qnameDAO)) - { - // We may have reached this deleted node via an invalid association; trigger a post transaction prune of - // any associations that point to this deleted one - pruneDanglingAssocs(dbNode.getId()); - - // The DB agrees. This is a deleted noderef. - return null; - } - else - { - // The cache was wrong, possibly due to it caching negative results earlier. - if (isDebugEnabled) - { - logger.debug("Repairing stale cache entry for node: " + nodeRef); - } - Long nodeId = dbNode.getId(); - invalidateNodeCaches(nodeId); - dbNode.lock(); // Prevent unexpected edits of values going into the cache - nodesCache.setValue(nodeId, dbNode); - return dbNode.getNodePair(); - } - } - return pair.getSecond().getNodePair(); - } - - /** - * Trigger a post transaction prune of any associations that point to this deleted one. - * @param nodeId Long - */ - private void pruneDanglingAssocs(Long nodeId) - { - selectChildAssocs(nodeId, null, null, null, null, null, new ChildAssocRefQueryCallback() - { - @Override - public boolean preLoadNodes() - { - return false; - } - - @Override - public boolean orderResults() - { - return false; - } - - @Override - public boolean handle(Pair childAssocPair, Pair parentNodePair, - Pair childNodePair) - { - bindFixAssocAndCollectLostAndFound(childNodePair, "childNodeWithDeletedParent", childAssocPair.getFirst(), childAssocPair.getSecond().isPrimary() && exists(childAssocPair.getFirst())); - return true; - } - - @Override - public void done() - { - } - }); - selectParentAssocs(nodeId, null, null, null, new ChildAssocRefQueryCallback() - { - @Override - public boolean preLoadNodes() - { - return false; - } - - @Override - public boolean orderResults() - { - return false; - } - - @Override - public boolean handle(Pair childAssocPair, Pair parentNodePair, - Pair childNodePair) - { - bindFixAssocAndCollectLostAndFound(childNodePair, "deletedChildWithParents", childAssocPair.getFirst(), false); - return true; - } - - @Override - public void done() - { - } - }); - } - - @Override - public Pair getNodePair(Long nodeId) - { - Pair pair = nodesCache.getByKey(nodeId); - // Check it - if (pair == null || pair.getSecond().getDeleted(qnameDAO)) - { - // The cache says that the node is not there or is deleted. - // We double check by going to the DB - Node dbNode = selectNodeById(nodeId); - if (dbNode == null) - { - // The DB agrees. This is an invalid noderef. Why are you trying to use it? - return null; - } - else if (dbNode.getDeleted(qnameDAO)) - { - // We may have reached this deleted node via an invalid association; trigger a post transaction prune of - // any associations that point to this deleted one - pruneDanglingAssocs(dbNode.getId()); - - // The DB agrees. This is a deleted noderef. - return null; - } - else - { - // The cache was wrong, possibly due to it caching negative results earlier. - if (isDebugEnabled) - { - logger.debug("Repairing stale cache entry for node: " + nodeId); - } - invalidateNodeCaches(nodeId); - dbNode.lock(); // Prevent unexpected edits of values going into the cache - nodesCache.setValue(nodeId, dbNode); - return dbNode.getNodePair(); - } - } - else - { - return pair.getSecond().getNodePair(); - } - } - - /** - * Get a node instance regardless of whether it is considered live or deleted - * - * @param nodeId the node ID to look for - * @param liveOnly true to ensure that only live nodes are retrieved - * @return a node that will be live if requested - * @throws ConcurrencyFailureException if a valid node is not found - */ - private Node getNodeNotNull(Long nodeId, boolean liveOnly) - { - Pair pair = nodesCache.getByKey(nodeId); - - if (pair == null) - { - // The node has no entry in the database - NodeEntity dbNode = selectNodeById(nodeId); - nodesCache.removeByKey(nodeId); - throw new ConcurrencyFailureException( - "No node row exists: \n" + - " ID: " + nodeId + "\n" + - " DB row: " + dbNode); - } - else if (pair.getSecond().getDeleted(qnameDAO) && liveOnly) - { - // The node is not 'live' as was requested - NodeEntity dbNode = selectNodeById(nodeId); - nodesCache.removeByKey(nodeId); - // Make absolutely sure that the node is not referenced by any associations - pruneDanglingAssocs(nodeId); - // Force a retry on the transaction - throw new ConcurrencyFailureException( - "No live node exists: \n" + - " ID: " + nodeId + "\n" + - " DB row: " + dbNode); - } - else - { - return pair.getSecond(); - } - } - - @Override - public QName getNodeType(Long nodeId) - { - Node node = getNodeNotNull(nodeId, false); - Long nodeTypeQNameId = node.getTypeQNameId(); - return qnameDAO.getQName(nodeTypeQNameId).getSecond(); - } - - @Override - public Long getNodeAclId(Long nodeId) - { - Node node = getNodeNotNull(nodeId, true); - return node.getAclId(); - } - - @Override - public ChildAssocEntity newNode( - Long parentNodeId, - QName assocTypeQName, - QName assocQName, - StoreRef storeRef, - String uuid, - QName nodeTypeQName, - Locale nodeLocale, - String childNodeName, - Map auditableProperties) throws InvalidTypeException - { - Assert.notNull(parentNodeId, "parentNodeId"); - Assert.notNull(assocTypeQName, "assocTypeQName"); - Assert.notNull(assocQName, "assocQName"); - Assert.notNull(storeRef, "storeRef"); - - if (auditableProperties == null) - { - auditableProperties = Collections.emptyMap(); - } - - // Get the parent node - Node parentNode = getNodeNotNull(parentNodeId, true); - - // Find an initial ACL for the node - Long parentAclId = parentNode.getAclId(); - AccessControlListProperties inheritedAcl = null; - Long childAclId = null; - if (parentAclId != null) - { - try - { - Long inheritedACL = aclDAO.getInheritedAccessControlList(parentAclId); - inheritedAcl = aclDAO.getAccessControlListProperties(inheritedACL); - if (inheritedAcl != null) - { - childAclId = inheritedAcl.getId(); - } - } - catch (RuntimeException e) - { - // The get* calls above actually do writes. So pessimistically get rid of the - // parent node from the cache in case it was wrong somehow. - invalidateNodeCaches(parentNodeId); - // Rethrow for a retry (ALF-17286) - throw new RuntimeException( - "Failure while 'getting' inherited ACL or ACL properties: \n" + - " parent ACL ID: " + parentAclId + "\n" + - " inheritied ACL: " + inheritedAcl, - e); - } - } - // Build the cm:auditable properties - AuditablePropertiesEntity auditableProps = new AuditablePropertiesEntity(); - boolean setAuditProps = auditableProps.setAuditValues(null, null, auditableProperties); - if (!setAuditProps) - { - // No cm:auditable properties were supplied - auditableProps = null; - } - - // Get the store - StoreEntity store = getStoreNotNull(storeRef); - // Create the node (it is not a root node) - Long nodeTypeQNameId = qnameDAO.getOrCreateQName(nodeTypeQName).getFirst(); - Long nodeLocaleId = localeDAO.getOrCreateLocalePair(nodeLocale).getFirst(); - NodeEntity node = newNodeImpl(store, uuid, nodeTypeQNameId, nodeLocaleId, childAclId, auditableProps, true); - Long nodeId = node.getId(); - - // Protect the node's cm:auditable if it was explicitly set - if (setAuditProps) - { - NodeRef nodeRef = node.getNodeRef(); - policyBehaviourFilter.disableBehaviour(nodeRef, ContentModel.ASPECT_AUDITABLE); - } - - // Now create a primary association for it - if (childNodeName == null) - { - childNodeName = node.getUuid(); - } - ChildAssocEntity assoc = newChildAssocImpl( - parentNodeId, nodeId, true, assocTypeQName, assocQName, childNodeName, false); - - // There will be no other parent assocs - boolean isRoot = false; - boolean isStoreRoot = nodeTypeQName.equals(ContentModel.TYPE_STOREROOT); - ParentAssocsInfo parentAssocsInfo = new ParentAssocsInfo(isRoot, isStoreRoot, assoc); - setParentAssocsCached(nodeId, parentAssocsInfo); - - if (isDebugEnabled) - { - logger.debug( - "Created new node: \n" + - " Node: " + node + "\n" + - " Assoc: " + assoc); - } - return assoc; - } - - /** - * @param uuid the node UUID, or null to auto-generate - * @param nodeTypeQNameId the node's type - * @param nodeLocaleId the node's locale or null to use the default locale - * @param aclId an ACL ID if available - * @param auditableProps null to auto-generate or provide a value to explicitly set - * @param allowAuditableAspect Should we override the behaviour by potentially not adding the auditable aspect - * @throws NodeExistsException if the target reference is already taken by a live node - */ - private NodeEntity newNodeImpl( - StoreEntity store, - String uuid, - Long nodeTypeQNameId, - Long nodeLocaleId, - Long aclId, - AuditablePropertiesEntity auditableProps, - boolean allowAuditableAspect) throws InvalidTypeException - { - NodeEntity node = new NodeEntity(); - // Store - node.setStore(store); - // UUID - if (uuid == null) - { - node.setUuid(GUID.generate()); - } - else - { - node.setUuid(uuid); - } - // QName - node.setTypeQNameId(nodeTypeQNameId); - QName nodeTypeQName = qnameDAO.getQName(nodeTypeQNameId).getSecond(); - // Locale - if (nodeLocaleId == null) - { - nodeLocaleId = localeDAO.getOrCreateDefaultLocalePair().getFirst(); - } - node.setLocaleId(nodeLocaleId); - // ACL (may be null) - node.setAclId(aclId); - // Transaction - TransactionEntity txn = getCurrentTransaction(); - node.setTransaction(txn); - - // Audit - boolean addAuditableAspect = false; - if (auditableProps != null) - { - // Client-supplied cm:auditable values - node.setAuditableProperties(auditableProps); - addAuditableAspect = true; - } - else if (AuditablePropertiesEntity.hasAuditableAspect(nodeTypeQName, dictionaryService)) - { - // Automatically-generated cm:auditable values - auditableProps = new AuditablePropertiesEntity(); - auditableProps.setAuditValues(null, null, true, 0L); - node.setAuditableProperties(auditableProps); - addAuditableAspect = true; - } - - if (!allowAuditableAspect) addAuditableAspect = false; - - Long id = newNodeImplInsert(node); - node.setId(id); - - Set nodeAspects = null; - if (addAuditableAspect) - { - Long auditableAspectQNameId = qnameDAO.getOrCreateQName(ContentModel.ASPECT_AUDITABLE).getFirst(); - insertNodeAspect(id, auditableAspectQNameId); - nodeAspects = Collections.singleton(ContentModel.ASPECT_AUDITABLE); - } - else - { - nodeAspects = Collections.emptySet(); - } - - // Lock the node and cache - node.lock(); - nodesCache.setValue(id, node); - // Pre-populate some of the other caches so that we don't immediately query - setNodeAspectsCached(id, nodeAspects); - setNodePropertiesCached(id, Collections.emptyMap()); - - if (isDebugEnabled) - { - logger.debug("Created new node: \n" + " " + node); - } - return node; - } - - protected Long newNodeImplInsert(NodeEntity node) - { - Long id = null; - Savepoint savepoint = controlDAO.createSavepoint("newNodeImpl"); - try - { - // First try a straight insert and risk the constraint violation if the node exists - id = insertNode(node); - controlDAO.releaseSavepoint(savepoint); - } - catch (Throwable e) - { - controlDAO.rollbackToSavepoint(savepoint); - // This is probably because there is an existing node. We can handle existing deleted nodes. - NodeRef targetNodeRef = node.getNodeRef(); - Node dbTargetNode = selectNodeByNodeRef(targetNodeRef); - if (dbTargetNode == null) - { - // There does not appear to be any row that could prevent an insert - throw new AlfrescoRuntimeException("Failed to insert new node: " + node, e); - } - else if (dbTargetNode.getDeleted(qnameDAO)) - { - Long dbTargetNodeId = dbTargetNode.getId(); - // This is OK. It happens when we create a node that existed in the past. - // Remove the row completely - deleteNodeProperties(dbTargetNodeId, (Set) null); - deleteNodeById(dbTargetNodeId); - // Now repeat the insert but let any further problems just be thrown out - id = insertNode(node); - } - else - { - // A live node exists. - throw new NodeExistsException(dbTargetNode.getNodePair(), e); - } - } - - return id; - } - - @Override - public Pair, Pair> moveNode( - final Long childNodeId, - final Long newParentNodeId, - final QName assocTypeQName, - final QName assocQName) - { - final Node newParentNode = getNodeNotNull(newParentNodeId, true); - final StoreEntity newParentStore = newParentNode.getStore(); - final Node childNode = getNodeNotNull(childNodeId, true); - final StoreEntity childStore = childNode.getStore(); - final ChildAssocEntity primaryParentAssoc = getPrimaryParentAssocImpl(childNodeId); - final Long oldParentAclId; - final Long oldParentNodeId; - if (primaryParentAssoc == null) - { - oldParentAclId = null; - oldParentNodeId = null; - } - else - { - if (primaryParentAssoc.getParentNode() == null) - { - oldParentAclId = null; - oldParentNodeId = null; - } - else - { - oldParentNodeId = primaryParentAssoc.getParentNode().getId(); - oldParentAclId = getNodeNotNull(oldParentNodeId, true).getAclId(); - } - } - - // Need the child node's name here in case it gets removed - final String childNodeName = (String) getNodeProperty(childNodeId, ContentModel.PROP_NAME); - - // First attempt to move the node, which may rollback to a savepoint - Node newChildNode = childNode; - // Store - if (!childStore.getId().equals(newParentStore.getId())) - { - - //Delete the ASPECT_AUDITABLE from the source node so it doesn't get copied across - //A new aspect would have already been created in the newNodeImpl method. - // ... make sure we have the cm:auditable data from the originating node - AuditablePropertiesEntity auditableProps = childNode.getAuditableProperties(); - - // Create a new node - newChildNode = newNodeImpl( - newParentStore, - childNode.getUuid(), - childNode.getTypeQNameId(), - childNode.getLocaleId(), - childNode.getAclId(), - auditableProps, - false); - Long newChildNodeId = newChildNode.getId(); - - //copy all the data over to new node - moveNodeData(childNode.getId(), newChildNodeId); - - // The new node will have new data not present in the cache, yet - invalidateNodeCaches(newChildNodeId); - invalidateNodeChildrenCaches(newChildNodeId, true, true); - invalidateNodeChildrenCaches(newChildNodeId, false, true); - // Completely delete the original node but keep the ACL as it's reused - deleteNodeImpl(childNodeId, false); - } - else - { - // Touch the node; make sure parent assocs are invalidated - touchNode(childNodeId, null, null, false, false, true); - } - - final Long newChildNodeId = newChildNode.getId(); - - // Now update the primary parent assoc - updatePrimaryParentAssocs(primaryParentAssoc, - newParentNode, - childNode, - newChildNodeId, - childNodeName, - oldParentNodeId, - assocTypeQName, - assocQName); - - // Optimize for rename case - if (!EqualsHelper.nullSafeEquals(newParentNodeId, oldParentNodeId)) - { - // Check for cyclic relationships - // TODO: This adds a lot of overhead when moving hierarchies. - // While getPaths is faster, it would be better to avoid the parentAssocsCache - // completely. - getPaths(newChildNode.getNodePair(), false); -// cycleCheck(newChildNodeId); - - // Update ACLs for moved tree - Long newParentAclId = newParentNode.getAclId(); - - // Verify if parent has aspect applied and ACL's are pending - if (hasNodeAspect(oldParentNodeId, ContentModel.ASPECT_PENDING_FIX_ACL)) - { - Long oldParentSharedAclId = (Long) this.getNodeProperty(oldParentNodeId, ContentModel.PROP_SHARED_ACL_TO_REPLACE); - accessControlListDAO.updateInheritance(newChildNodeId, oldParentSharedAclId, newParentAclId); - } - else - { - accessControlListDAO.updateInheritance(newChildNodeId, oldParentAclId, newParentAclId); - } - } - - // Done - Pair assocPair = getPrimaryParentAssoc(newChildNode.getId()); - Pair nodePair = newChildNode.getNodePair(); - if (isDebugEnabled) - { - logger.debug("Moved node: " + assocPair + " ... " + nodePair); - } - return new Pair, Pair>(assocPair, nodePair); - } - - protected void updatePrimaryParentAssocs( - final ChildAssocEntity primaryParentAssoc, - final Node newParentNode, - final Node childNode, - final Long newChildNodeId, - final String childNodeName, - final Long oldParentNodeId, - final QName assocTypeQName, - final QName assocQName) - { - // Because we are retrying in-transaction i.e. absorbing exceptions, we need partial rollback &/or via savepoint if needed (eg. PostgreSQL) - RetryingCallback callback = new RetryingCallback() - { - public Integer execute() throws Throwable - { - return updatePrimaryParentAssocsImpl(primaryParentAssoc, - newParentNode, - childNode, - newChildNodeId, - childNodeName, - oldParentNodeId, - assocTypeQName, - assocQName); - } - }; - childAssocRetryingHelper.doWithRetry(callback); - } - - protected int updatePrimaryParentAssocsImpl( - ChildAssocEntity primaryParentAssoc, - Node newParentNode, - Node childNode, - Long newChildNodeId, - String childNodeName, - Long oldParentNodeId, - QName assocTypeQName, - QName assocQName) - { - Long newParentNodeId = newParentNode.getId(); - Long childNodeId = childNode.getId(); - - Savepoint savepoint = controlDAO.createSavepoint("DuplicateChildNodeNameException"); - // We use the child node's UUID if there is no cm:name - String childNodeNameToUse = childNodeName == null ? childNode.getUuid() : childNodeName; - - try - { - int updated = updatePrimaryParentAssocs( - newChildNodeId, - newParentNodeId, - assocTypeQName, - assocQName, - childNodeNameToUse); - controlDAO.releaseSavepoint(savepoint); - // Ensure we invalidate the name cache (the child version key might not have been 'bumped' by the last - // 'touch') - if (updated > 0 && primaryParentAssoc != null) - { - Pair oldTypeQnamePair = qnameDAO.getQName( - primaryParentAssoc.getTypeQNameId()); - if (oldTypeQnamePair != null) - { - childByNameCache.remove(new ChildByNameKey(oldParentNodeId, oldTypeQnamePair.getSecond(), - primaryParentAssoc.getChildNodeName())); - } - } - return updated; - } - catch (Throwable e) - { - controlDAO.rollbackToSavepoint(savepoint); - // DuplicateChildNodeNameException implements DoNotRetryException. - // There are some cases - FK violations, specifically - where we DO actually want to retry. - // Detecting this is done by looking for the related FK names, 'fk_alf_cass_*' in the error message - String lowerMsg = e.getMessage().toLowerCase(); - if (lowerMsg.contains("fk_alf_cass_")) - { - throw new ConcurrencyFailureException("FK violation updating primary parent association for " + childNodeId, e); - } - // We assume that this is from the child cm:name constraint violation - throw new DuplicateChildNodeNameException( - newParentNode.getNodeRef(), - assocTypeQName, - childNodeName, - e); - } - } - - @Override - public boolean updateNode(Long nodeId, QName nodeTypeQName, Locale nodeLocale) - { - // Get the existing node; we need to check for a change in store or UUID - Node oldNode = getNodeNotNull(nodeId, true); - final Long nodeTypeQNameId; - if (nodeTypeQName == null) - { - nodeTypeQNameId = oldNode.getTypeQNameId(); - } - else - { - nodeTypeQNameId = qnameDAO.getOrCreateQName(nodeTypeQName).getFirst(); - } - final Long nodeLocaleId; - if (nodeLocale == null) - { - nodeLocaleId = oldNode.getLocaleId(); - } - else - { - nodeLocaleId = localeDAO.getOrCreateLocalePair(nodeLocale).getFirst(); - } - - // Wrap all the updates into one - NodeUpdateEntity nodeUpdate = new NodeUpdateEntity(); - nodeUpdate.setId(nodeId); - nodeUpdate.setStore(oldNode.getStore()); // Need node reference - nodeUpdate.setUuid(oldNode.getUuid()); // Need node reference - // TypeQName (if necessary) - if (!nodeTypeQNameId.equals(oldNode.getTypeQNameId())) - { - nodeUpdate.setTypeQNameId(nodeTypeQNameId); - nodeUpdate.setUpdateTypeQNameId(true); - } - // Locale (if necessary) - if (!nodeLocaleId.equals(oldNode.getLocaleId())) - { - nodeUpdate.setLocaleId(nodeLocaleId); - nodeUpdate.setUpdateLocaleId(true); - } - - return updateNodeImpl(oldNode, nodeUpdate, null); - } - - - @Override - public int touchNodes(Long txnId, List nodeIds) - { - // limit in clause to 1000 node ids - int batchSize = 1000; - - int touched = 0; - ArrayList batch = new ArrayList(batchSize); - for(Long nodeId : nodeIds) - { - invalidateNodeCaches(nodeId); - batch.add(nodeId); - if(batch.size() % batchSize == 0) - { - touched += updateNodes(txnId, batch); - batch.clear(); - } - } - if(batch.size() > 0) - { - touched += updateNodes(txnId, batch); - } - return touched; - } - - /** - * Updates the node's transaction and cm:auditable properties while - * providing a convenient method to control cache entry invalidation. - *

- * Not all 'touch' signals actually produce a change: the node may already have been touched - * in the current transaction. In this case, the required caches are explicitly invalidated - * as requested.
- * It is more complicated when the node is modified. If the node is modified against a previous - * transaction then all cache entries are left untrusted and not pulled forward. But if the - * node is modified but in the same transaction, then the cache entries are considered good and - * pull forward against the current version of the node ... unless the cache was specicially - * tagged for invalidation. - *

- * It is sometime necessary to provide the node's current aspects, particularly during - * changes to the aspect list. If not provided, they will be looked up. - * - * @param nodeId the ID of the node (must refer to a live node) - * @param auditableProps optionally override the cm:auditable values - * @param nodeAspects the node's aspects or null to look them up - * @param invalidateNodeAspectsCache true if the node's cached aspects are unreliable - * @param invalidateNodePropertiesCache true if the node's cached properties are unreliable - * @param invalidateParentAssocsCache true if the node's cached parent assocs are unreliable - * - * @see #updateNodeImpl(Node, NodeUpdateEntity, Set) - */ - private boolean touchNode( - Long nodeId, AuditablePropertiesEntity auditableProps, Set nodeAspects, - boolean invalidateNodeAspectsCache, - boolean invalidateNodePropertiesCache, - boolean invalidateParentAssocsCache) - { - Node node = null; - try - { - node = getNodeNotNull(nodeId, false); - } - catch (DataIntegrityViolationException e) - { - // The ID doesn't reference a live node. - // We do nothing w.r.t. touching - return false; - } - - NodeUpdateEntity nodeUpdate = new NodeUpdateEntity(); - nodeUpdate.setId(nodeId); - nodeUpdate.setAuditableProperties(auditableProps); - // Update it - boolean updatedNode = updateNodeImpl(node, nodeUpdate, nodeAspects); - // Handle the cache invalidation requests - NodeVersionKey nodeVersionKey = node.getNodeVersionKey(); - if (updatedNode) - { - Node newNode = getNodeNotNull(nodeId, false); - NodeVersionKey newNodeVersionKey = newNode.getNodeVersionKey(); - // The version will have moved on, effectively rendering our caches invalid. - // Copy over caches that DON'T need invalidating - if (!invalidateNodeAspectsCache) - { - copyNodeAspectsCached(nodeVersionKey, newNodeVersionKey); - } - if (!invalidateNodePropertiesCache) - { - copyNodePropertiesCached(nodeVersionKey, newNodeVersionKey); - } - if (invalidateParentAssocsCache) - { - // Because we cache parent assocs by transaction, we must manually invalidate on this version change - invalidateParentAssocsCached(node); - } - else - { - copyParentAssocsCached(node); - } - } - else - { - // The node was not touched. By definition it MUST be in the current transaction. - // We invalidate the caches as specifically requested - invalidateNodeCaches( - node, - invalidateNodeAspectsCache, - invalidateNodePropertiesCache, - invalidateParentAssocsCache); - } - - return updatedNode; - } - - /** - * Helper method that updates the node, bringing it into the current transaction with - * the appropriate cm:auditable and transaction behaviour. - *

- * If the NodeRef of the node is changing (usually a store move) then deleted - * nodes are cleaned out where they might exist. - * - * @param oldNode the existing node, fully populated - * @param nodeUpdate the node update with all update elements populated - * @param nodeAspects the node's aspects or null to look them up - * @return true if any updates were made - */ - private boolean updateNodeImpl(Node oldNode, NodeUpdateEntity nodeUpdate, Set nodeAspects) - { - Long nodeId = oldNode.getId(); - - // Make sure that the ID has been populated - if (!EqualsHelper.nullSafeEquals(nodeId, nodeUpdate.getId())) - { - throw new IllegalArgumentException("NodeUpdateEntity node ID is not correct: " + nodeUpdate); - } - - // Copy of the reference data - nodeUpdate.setStore(oldNode.getStore()); - nodeUpdate.setUuid(oldNode.getUuid()); - - // Ensure that other values are set for completeness when caching - if (!nodeUpdate.isUpdateTypeQNameId()) - { - nodeUpdate.setTypeQNameId(oldNode.getTypeQNameId()); - } - if (!nodeUpdate.isUpdateLocaleId()) - { - nodeUpdate.setLocaleId(oldNode.getLocaleId()); - } - if (!nodeUpdate.isUpdateAclId()) - { - nodeUpdate.setAclId(oldNode.getAclId()); - } - - nodeUpdate.setVersion(oldNode.getVersion()); - // Update the transaction - TransactionEntity txn = getCurrentTransaction(); - nodeUpdate.setTransaction(txn); - if (!txn.getId().equals(oldNode.getTransaction().getId())) - { - // Only update if the txn has changed - nodeUpdate.setUpdateTransaction(true); - } - // Update auditable - if (nodeAspects == null) - { - nodeAspects = getNodeAspects(nodeId); - } - if (nodeAspects.contains(ContentModel.ASPECT_AUDITABLE)) - { - NodeRef oldNodeRef = oldNode.getNodeRef(); - if (policyBehaviourFilter.isEnabled(oldNodeRef, ContentModel.ASPECT_AUDITABLE)) - { - // Make sure that auditable properties are present - AuditablePropertiesEntity auditableProps = oldNode.getAuditableProperties(); - if (auditableProps == null) - { - auditableProps = new AuditablePropertiesEntity(); - } - else - { - auditableProps = new AuditablePropertiesEntity(auditableProps); - } - long modifiedDateToleranceMs = 1000L; - - if (nodeUpdate.isUpdateTransaction()) - { - // allow update cm:modified property for new transaction - modifiedDateToleranceMs = 0L; - } - - boolean updateAuditableProperties = auditableProps.setAuditValues(null, null, false, modifiedDateToleranceMs); - nodeUpdate.setAuditableProperties(auditableProps); - nodeUpdate.setUpdateAuditableProperties(updateAuditableProperties); - } - else if (nodeUpdate.getAuditableProperties() == null) - { - // cache the explicit setting of auditable properties when creating node (note: auditable aspect is not yet present) - AuditablePropertiesEntity auditableProps = oldNode.getAuditableProperties(); - if (auditableProps != null) - { - nodeUpdate.setAuditableProperties(auditableProps); // Can reuse the locked instance - nodeUpdate.setUpdateAuditableProperties(true); - } - } - else - { - // ALF-4117: NodeDAO: Allow cm:auditable to be set - // The nodeUpdate had auditable properties set, so we just use that directly - nodeUpdate.setUpdateAuditableProperties(true); - } - } - else - { - // Make sure that any auditable properties are removed - AuditablePropertiesEntity auditableProps = oldNode.getAuditableProperties(); - if (auditableProps != null) - { - nodeUpdate.setAuditableProperties(null); - nodeUpdate.setUpdateAuditableProperties(true); - } - } - - // Just bug out if nothing has changed - if (!nodeUpdate.isUpdateAnything()) - { - return false; - } - - // The node is remaining in the current store - int count = 0; - Throwable concurrencyException = null; - try - { - count = updateNode(nodeUpdate); - } - catch (Throwable e) - { - concurrencyException = e; - } - // Do concurrency check - if (count != 1) - { - // Drop the value from the cache in case the cache is stale - nodesCache.removeByKey(nodeId); - nodesCache.removeByValue(nodeUpdate); - - throw new ConcurrencyFailureException("Failed to update node " + nodeId, concurrencyException); - } - else - { - // Check for wrap-around in the version number - if (nodeUpdate.getVersion().equals(LONG_ZERO)) - { - // The version was wrapped back to zero - // The caches that are keyed by version are now unreliable - propertiesCache.clear(); - aspectsCache.clear(); - parentAssocsCache.clear(); - } - // Update the caches - nodeUpdate.lock(); - nodesCache.setValue(nodeId, nodeUpdate); - // The node's version has moved on so no need to invalidate caches - } - - // Done - if (isDebugEnabled) - { - logger.debug( - "Updated Node: \n" + - " OLD: " + oldNode + "\n" + - " NEW: " + nodeUpdate); - } - return true; - } - - @Override - public void setNodeAclId(Long nodeId, Long aclId) - { - Node oldNode = getNodeNotNull(nodeId, true); - NodeUpdateEntity nodeUpdateEntity = new NodeUpdateEntity(); - nodeUpdateEntity.setId(nodeId); - nodeUpdateEntity.setAclId(aclId); - nodeUpdateEntity.setUpdateAclId(true); - updateNodeImpl(oldNode, nodeUpdateEntity, null); - } - - public void setPrimaryChildrenSharedAclId( - Long primaryParentNodeId, - Long optionalOldSharedAlcIdInAdditionToNull, - Long newSharedAclId) - { - Long txnId = getCurrentTransaction().getId(); - updatePrimaryChildrenSharedAclId( - txnId, - primaryParentNodeId, - optionalOldSharedAlcIdInAdditionToNull, - newSharedAclId); - invalidateNodeChildrenCaches(primaryParentNodeId, true, false); - } - - @Override - public void deleteNode(Long nodeId) - { - // Delete and take the ACLs to the grave - deleteNodeImpl(nodeId, true); - } - - /** - * Physical deletion of the node - * - * @param nodeId the node to delete - * @param deleteAcl true to delete any associated ACLs otherwise - * false if the ACLs get reused elsewhere - */ - private void deleteNodeImpl(Long nodeId, boolean deleteAcl) - { - Node node = getNodeNotNull(nodeId, true); - // Gather data for later - Long aclId = node.getAclId(); - Set nodeAspects = getNodeAspects(nodeId); - - // Clean up content data - Set contentQNames = new HashSet(dictionaryService.getAllProperties(DataTypeDefinition.CONTENT)); - Set contentQNamesToRemoveIds = qnameDAO.convertQNamesToIds(contentQNames, false); - contentDataDAO.deleteContentDataForNode(nodeId, contentQNamesToRemoveIds); - - // Delete content usage deltas - usageDAO.deleteDeltas(nodeId); - - // Handle sys:aspect_root - if (nodeAspects.contains(ContentModel.ASPECT_ROOT)) - { - StoreRef storeRef = node.getStore().getStoreRef(); - allRootNodesCache.remove(storeRef); - } - - // Remove child associations (invalidate children) - invalidateNodeChildrenCaches(nodeId, true, true); - invalidateNodeChildrenCaches(nodeId, false, true); - - // Remove aspects - deleteNodeAspects(nodeId, null); - - // Remove properties - deleteNodeProperties(nodeId, (Set) null); - - // Remove subscriptions - deleteSubscriptions(nodeId); - - // Delete the row completely: - // ALF-12358: Concurrency: Possible to create association references to deleted nodes - // There will be no way that any references can be made to a deleted node because we - // are really going to delete it. However, for tracking purposes we need to maintain - // a list of nodes deleted in the transaction. We store that information against a - // new node of type 'sys:deleted'. This means that 'deleted' nodes are really just - // orphaned (read standalone) nodes that remain invisible outside of the DAO. - int deleted = deleteNodeById(nodeId); - // We will always have to invalidate the cache for the node - invalidateNodeCaches(nodeId); - // Concurrency check - if (deleted != 1) - { - // We thought that the row existed - throw new ConcurrencyFailureException( - "Failed to delete node: \n" + - " Node: " + node); - } - - // Remove ACLs - if (deleteAcl && aclId != null) - { - aclDAO.deleteAclForNode(aclId); - } - - // The node has been cleaned up. Now we recreate the node for index tracking purposes. - // Use a 'deleted' type QName - StoreEntity store = node.getStore(); - String uuid = node.getUuid(); - Long deletedQNameId = qnameDAO.getOrCreateQName(ContentModel.TYPE_DELETED).getFirst(); - Long defaultLocaleId = localeDAO.getOrCreateDefaultLocalePair().getFirst(); - Node deletedNode = newNodeImpl(store, uuid, deletedQNameId, defaultLocaleId, null, null, true); - Long deletedNodeId = deletedNode.getId(); - // Store the original ID as a property - Map trackingProps = Collections.singletonMap(ContentModel.PROP_ORIGINAL_ID, (Serializable) nodeId); - setNodePropertiesImpl(deletedNodeId, trackingProps, true); - } - - @Override - public int purgeNodes(long fromTxnCommitTimeMs, long toTxnCommitTimeMs) - { - return deleteNodesByCommitTime(fromTxnCommitTimeMs, toTxnCommitTimeMs); - } - - /* - * Node Properties - */ - - public Map getNodeProperties(Long nodeId) - { - Map props = getNodePropertiesCached(nodeId); - // Create a shallow copy to allow additions - props = new HashMap(props); - - Node node = getNodeNotNull(nodeId, false); - // Handle sys:referenceable - ReferenceablePropertiesEntity.addReferenceableProperties(node, props); - // Handle sys:localized - LocalizedPropertiesEntity.addLocalizedProperties(localeDAO, node, props); - // Handle cm:auditable - if (hasNodeAspect(nodeId, ContentModel.ASPECT_AUDITABLE)) - { - AuditablePropertiesEntity auditableProperties = node.getAuditableProperties(); - if (auditableProperties == null) - { - auditableProperties = new AuditablePropertiesEntity(); - } - props.putAll(auditableProperties.getAuditableProperties()); - } - - // Wrap to ensure that we only clone values if the client attempts to modify - // the map or retrieve values that might, themselves, be mutable - props = new ValueProtectingMap(props, NodePropertyValue.IMMUTABLE_CLASSES); - - // Done - if (isDebugEnabled) - { - logger.debug("Fetched properties for Node: \n" + - " Node: " + nodeId + "\n" + - " Props: " + props); - } - return props; - } - - @Override - public Serializable getNodeProperty(Long nodeId, QName propertyQName) - { - Serializable value = null; - // We have to load the node for cm:auditable - if (AuditablePropertiesEntity.isAuditableProperty(propertyQName)) - { - Node node = getNodeNotNull(nodeId, false); - AuditablePropertiesEntity auditableProperties = node.getAuditableProperties(); - if (auditableProperties != null) - { - value = auditableProperties.getAuditableProperty(propertyQName); - } - } - else if (ReferenceablePropertiesEntity.isReferenceableProperty(propertyQName)) // sys:referenceable - { - Node node = getNodeNotNull(nodeId, false); - value = ReferenceablePropertiesEntity.getReferenceableProperty(node, propertyQName); - } - else if (LocalizedPropertiesEntity.isLocalizedProperty(propertyQName)) // sys:localized - { - Node node = getNodeNotNull(nodeId, false); - value = LocalizedPropertiesEntity.getLocalizedProperty(localeDAO, node, propertyQName); - } - else - { - Map props = getNodePropertiesCached(nodeId); - // Wrap to ensure that we only clone values if the client attempts to modify - // the map or retrieve values that might, themselves, be mutable - props = new ValueProtectingMap(props, NodePropertyValue.IMMUTABLE_CLASSES); - // The 'get' here will clone the value if it is mutable - value = props.get(propertyQName); - } - // Done - if (isDebugEnabled) - { - logger.debug("Fetched property for Node: \n" + - " Node: " + nodeId + "\n" + - " QName: " + propertyQName + "\n" + - " Value: " + value); - } - return value; - } - - /** - * Does differencing to add and/or remove properties. Internally, the existing properties - * will be retrieved and a difference performed to work out which properties need to be - * created, updated or deleted. - *

- * Note: The cached properties are not updated - * - * @param nodeId the node ID - * @param newProps the properties to add or update - * @param isAddOnly true if the new properties are just an update or - * false if the properties are a complete set - * @return Returns true if any properties were changed - */ - private boolean setNodePropertiesImpl( - Long nodeId, - Map newProps, - boolean isAddOnly) - { - if (isAddOnly && newProps.size() == 0) - { - return false; // No point adding nothing - } - - // Get the current node - Node node = getNodeNotNull(nodeId, false); - // Create an update node - NodeUpdateEntity nodeUpdate = new NodeUpdateEntity(); - nodeUpdate.setId(nodeId); - - // Copy inbound values - newProps = new HashMap(newProps); - - // Copy cm:auditable - if (!policyBehaviourFilter.isEnabled(node.getNodeRef(), ContentModel.ASPECT_AUDITABLE)) - { - // Only bother if cm:auditable properties are present - if (AuditablePropertiesEntity.hasAuditableProperty(newProps.keySet())) - { - AuditablePropertiesEntity auditableProps = node.getAuditableProperties(); - if (auditableProps == null) - { - auditableProps = new AuditablePropertiesEntity(); - } - else - { - auditableProps = new AuditablePropertiesEntity(auditableProps); // Unlocked instance - } - boolean containedAuditProperties = auditableProps.setAuditValues(null, null, newProps); - if (!containedAuditProperties) - { - // Double-check (previous hasAuditableProperty should cover it) - // The behaviour is disabled, but no audit properties were passed in - auditableProps = null; - } - nodeUpdate.setAuditableProperties(auditableProps); - nodeUpdate.setUpdateAuditableProperties(true); - } - } - - // Remove cm:auditable - newProps.keySet().removeAll(AuditablePropertiesEntity.getAuditablePropertyQNames()); - - // Check if the sys:localized property is being changed - Long oldNodeLocaleId = node.getLocaleId(); - Locale newLocale = DefaultTypeConverter.INSTANCE.convert( - Locale.class, - newProps.get(ContentModel.PROP_LOCALE)); - if (newLocale != null) - { - Long newNodeLocaleId = localeDAO.getOrCreateLocalePair(newLocale).getFirst(); - if (!newNodeLocaleId.equals(oldNodeLocaleId)) - { - nodeUpdate.setLocaleId(newNodeLocaleId); - nodeUpdate.setUpdateLocaleId(true); - } - } - // else: a 'null' new locale is completely ignored. This is the behaviour we choose. - - // Remove sys:localized - LocalizedPropertiesEntity.removeLocalizedProperties(node, newProps); - - // Remove sys:referenceable - ReferenceablePropertiesEntity.removeReferenceableProperties(node, newProps); - // Load the current properties. - // This means that we have to go to the DB during cold-write operations, - // but usually a write occurs after a node has been fetched of viewed in - // some way by the client code. Loading the existing properties has the - // advantage that the differencing code can eliminate unnecessary writes - // completely. - Map oldPropsCached = getNodePropertiesCached(nodeId); // Keep pristine for caching - Map oldProps = new HashMap(oldPropsCached); - // If we're adding, remove current properties that are not of interest - if (isAddOnly) - { - oldProps.keySet().retainAll(newProps.keySet()); - } - // We need to convert the new properties to our internally-used format, - // which is compatible with model i.e. people may have passed in data - // which needs to be converted to a model-compliant format. We do this - // before comparisons to avoid false negatives. - Map newPropsRaw = nodePropertyHelper.convertToPersistentProperties(newProps); - newProps = nodePropertyHelper.convertToPublicProperties(newPropsRaw); - // Now find out what's changed - Map diff = EqualsHelper.getMapComparison( - oldProps, - newProps); - // Keep track of properties to delete and add - Set propsToDelete = new HashSet(oldProps.size()*2); - Map propsToAdd = new HashMap(newProps.size() * 2); - Set contentQNamesToDelete = new HashSet(5); - for (Map.Entry entry : diff.entrySet()) - { - QName qname = entry.getKey(); - - PropertyDefinition removePropDef = dictionaryService.getProperty(qname); - boolean isContent = (removePropDef != null && - removePropDef.getDataType().getName().equals(DataTypeDefinition.CONTENT)); - - switch (entry.getValue()) - { - case EQUAL: - // Ignore - break; - case LEFT_ONLY: - // Not in the new properties - propsToDelete.add(qname); - if (isContent) - { - contentQNamesToDelete.add(qname); - } - break; - case NOT_EQUAL: - // Must remove from the LHS - propsToDelete.add(qname); - if (isContent) - { - contentQNamesToDelete.add(qname); - } - // Fall through to load up the RHS - case RIGHT_ONLY: - // We're adding this - Serializable value = newProps.get(qname); - if (isContent && value != null) - { - ContentData newContentData = (ContentData) value; - Long newContentDataId = contentDataDAO.createContentData(newContentData).getFirst(); - value = new ContentDataWithId(newContentData, newContentDataId); - } - propsToAdd.put(qname, value); - break; - default: - throw new IllegalStateException("Unknown MapValueComparison: " + entry.getValue()); - } - } - - boolean modifyProps = propsToDelete.size() > 0 || propsToAdd.size() > 0; - boolean updated = modifyProps || nodeUpdate.isUpdateAnything(); - - // Bring the node into the current transaction - if (nodeUpdate.isUpdateAnything()) - { - // We have to explicitly update the node (sys:locale or cm:auditable) - if (updateNodeImpl(node, nodeUpdate, null)) - { - // Copy the caches across - NodeVersionKey nodeVersionKey = node.getNodeVersionKey(); - NodeVersionKey newNodeVersionKey = getNodeNotNull(nodeId, false).getNodeVersionKey(); - copyNodeAspectsCached(nodeVersionKey, newNodeVersionKey); - copyNodePropertiesCached(nodeVersionKey, newNodeVersionKey); - copyParentAssocsCached(node); - } - } - else if (modifyProps) - { - // Touch the node; all caches are fine - touchNode(nodeId, null, null, false, false, false); - } - - // Touch to bring into current txn - if (modifyProps) - { - // Clean up content properties - try - { - if (contentQNamesToDelete.size() > 0) - { - Set contentQNameIdsToDelete = qnameDAO.convertQNamesToIds(contentQNamesToDelete, false); - contentDataDAO.deleteContentDataForNode(nodeId, contentQNameIdsToDelete); - } - } - catch (Throwable e) - { - throw new AlfrescoRuntimeException( - "Failed to delete content properties: \n" + - " Node: " + nodeId + "\n" + - " Delete Tried: " + contentQNamesToDelete, - e); - } - - try - { - // Apply deletes - Set propQNameIdsToDelete = qnameDAO.convertQNamesToIds(propsToDelete, true); - deleteNodeProperties(nodeId, propQNameIdsToDelete); - // Now create the raw properties for adding - newPropsRaw = nodePropertyHelper.convertToPersistentProperties(propsToAdd); - insertNodeProperties(nodeId, newPropsRaw); - } - catch (Throwable e) - { - // Don't trust the caches for the node - invalidateNodeCaches(nodeId); - // Focused error - throw new AlfrescoRuntimeException( - "Failed to write property deltas: \n" + - " Node: " + nodeId + "\n" + - " Old: " + oldProps + "\n" + - " New: " + newProps + "\n" + - " Diff: " + diff + "\n" + - " Delete Tried: " + propsToDelete + "\n" + - " Add Tried: " + propsToAdd, - e); - } - - // Build the properties to cache based on whether this is an append or replace - Map propsToCache = null; - if (isAddOnly) - { - // Copy cache properties for additions - propsToCache = new HashMap(oldPropsCached); - // Combine the old and new properties - propsToCache.putAll(propsToAdd); - } - else - { - // Replace old properties - propsToCache = newProps; - propsToCache.putAll(propsToAdd); // Ensure correct types - } - // Update cache - setNodePropertiesCached(nodeId, propsToCache); - } - - // Done - if (isDebugEnabled && updated) - { - logger.debug( - "Modified node properties: " + nodeId + "\n" + - " Removed: " + propsToDelete + "\n" + - " Added: " + propsToAdd + "\n" + - " Node Update: " + nodeUpdate); - } - return updated; - } - - @Override - public boolean setNodeProperties(Long nodeId, Map properties) - { - // Merge with current values - boolean modified = setNodePropertiesImpl(nodeId, properties, false); - - // Done - return modified; - } - - @Override - public boolean addNodeProperty(Long nodeId, QName qname, Serializable value) - { - // Copy inbound values - Map newProps = new HashMap(3); - newProps.put(qname, value); - // Merge with current values - boolean modified = setNodePropertiesImpl(nodeId, newProps, true); - - // Done - return modified; - } - - @Override - public boolean addNodeProperties(Long nodeId, Map properties) - { - // Merge with current values - boolean modified = setNodePropertiesImpl(nodeId, properties, true); - - // Done - return modified; - } - - @Override - public boolean removeNodeProperties(Long nodeId, Set propertyQNames) - { - propertyQNames = new HashSet(propertyQNames); - ReferenceablePropertiesEntity.removeReferenceableProperties(propertyQNames); - if (propertyQNames.size() == 0) - { - return false; // sys:referenceable properties cannot be removed - } - LocalizedPropertiesEntity.removeLocalizedProperties(propertyQNames); - if (propertyQNames.size() == 0) - { - return false; // sys:localized properties cannot be removed - } - Set qnameIds = qnameDAO.convertQNamesToIds(propertyQNames, false); - int deleteCount = deleteNodeProperties(nodeId, qnameIds); - - if (deleteCount > 0) - { - // Touch the node; all caches are fine - touchNode(nodeId, null, null, false, false, false); - // Get cache props - Map cachedProps = getNodePropertiesCached(nodeId); - // Remove deleted properties - Map props = new HashMap(cachedProps); - props.keySet().removeAll(propertyQNames); - // Update cache - setNodePropertiesCached(nodeId, props); - } - // Done - return deleteCount > 0; - } - - @Override - public boolean setModifiedDate(Long nodeId, Date modifiedDate) - { - return setModifiedProperties(nodeId, modifiedDate, null); - } - - @Override - public boolean setModifiedProperties(Long nodeId, Date modifiedDate, String modifiedBy) { - // Do nothing if the node is not cm:auditable - if (!hasNodeAspect(nodeId, ContentModel.ASPECT_AUDITABLE)) - { - return false; - } - // Get the node - Node node = getNodeNotNull(nodeId, false); - NodeRef nodeRef = node.getNodeRef(); - // Get the existing auditable values - AuditablePropertiesEntity auditableProps = node.getAuditableProperties(); - boolean dateChanged = false; - if (auditableProps == null) - { - // The properties should be present - auditableProps = new AuditablePropertiesEntity(); - auditableProps.setAuditValues(modifiedBy, modifiedDate, true, 1000L); - dateChanged = true; - } - else - { - auditableProps = new AuditablePropertiesEntity(auditableProps); - dateChanged = auditableProps.setAuditModified(modifiedDate, 1000L); - if (dateChanged) - { - auditableProps.setAuditModifier(modifiedBy); - } - } - if (dateChanged) - { - try - { - policyBehaviourFilter.disableBehaviour(nodeRef, ContentModel.ASPECT_AUDITABLE); - // Touch the node; all caches are fine - return touchNode(nodeId, auditableProps, null, false, false, false); - } - finally - { - policyBehaviourFilter.enableBehaviour(nodeRef, ContentModel.ASPECT_AUDITABLE); - } - } - else - { - // Date did not advance - return false; - } - } - - /** - * @return Returns the read-only cached property map - */ - private Map getNodePropertiesCached(Long nodeId) - { - NodeVersionKey nodeVersionKey = getNodeNotNull(nodeId, false).getNodeVersionKey(); - Pair> cacheEntry = propertiesCache.getByKey(nodeVersionKey); - if (cacheEntry == null) - { - invalidateNodeCaches(nodeId); - throw new DataIntegrityViolationException("Invalid node ID: " + nodeId); - } - // We have the properties from the cache - Map cachedProperties = cacheEntry.getSecond(); - return cachedProperties; - } - - /** - * Update the node properties cache. The incoming properties will be wrapped to be - * unmodifiable. - *

- * NOTE: Incoming properties must exclude the cm:auditable properties - */ - private void setNodePropertiesCached(Long nodeId, Map properties) - { - NodeVersionKey nodeVersionKey = getNodeNotNull(nodeId, false).getNodeVersionKey(); - propertiesCache.setValue(nodeVersionKey, Collections.unmodifiableMap(properties)); - } - - /** - * Helper method to copy cache values from one key to another - */ - private void copyNodePropertiesCached(NodeVersionKey from, NodeVersionKey to) - { - Map cacheEntry = propertiesCache.getValue(from); - if (cacheEntry != null) - { - propertiesCache.setValue(to, cacheEntry); - } - } - - /** - * Callback to cache node properties. The DAO callback only does the simple {@link #findByKey(Serializable)}. - * - * @author Derek Hulley - * @since 3.4 - */ - private class PropertiesCallbackDAO extends EntityLookupCallbackDAOAdaptor, Serializable> - { - public Pair> createValue(Map value) - { - throw new UnsupportedOperationException("A node always has a 'map' of properties."); - } - - public Pair> findByKey(NodeVersionKey nodeVersionKey) - { - Long nodeId = nodeVersionKey.getNodeId(); - Map> propsRawByNodeVersionKey = selectNodeProperties(nodeId); - Map propsRaw = propsRawByNodeVersionKey.get(nodeVersionKey); - if (propsRaw == null) - { - // Didn't find a match. Is this because there are none? - if (propsRawByNodeVersionKey.size() == 0) - { - // This is OK. The node has no properties - propsRaw = Collections.emptyMap(); - } - else - { - // We found properties associated with a different node ID and version - invalidateNodeCaches(nodeId); - throw new DataIntegrityViolationException( - "Detected stale node entry: " + nodeVersionKey + - " (now " + propsRawByNodeVersionKey.keySet() + ")"); - } - } - // Convert to public properties - Map props = nodePropertyHelper.convertToPublicProperties(propsRaw); - // Done - return new Pair>(nodeVersionKey, Collections.unmodifiableMap(props)); - } - } - - /* - * Aspects - */ - - @Override - public Set getNodeAspects(Long nodeId) - { - Set nodeAspects = getNodeAspectsCached(nodeId); - // Nodes are always referenceable - nodeAspects.add(ContentModel.ASPECT_REFERENCEABLE); - // Nodes are always localized - nodeAspects.add(ContentModel.ASPECT_LOCALIZED); - return nodeAspects; - } - - @Override - public boolean hasNodeAspect(Long nodeId, QName aspectQName) - { - if (aspectQName.equals(ContentModel.ASPECT_REFERENCEABLE)) - { - // Nodes are always referenceable - return true; - } - if (aspectQName.equals(ContentModel.ASPECT_LOCALIZED)) - { - // Nodes are always localized - return true; - } - Set nodeAspects = getNodeAspectsCached(nodeId); - return nodeAspects.contains(aspectQName); - } - - @Override - public boolean addNodeAspects(Long nodeId, Set aspectQNames) - { - if (aspectQNames.size() == 0) - { - return false; - } - // Copy the inbound set - Set aspectQNamesToAdd = new HashSet(aspectQNames); - // Get existing - Set existingAspectQNames = getNodeAspectsCached(nodeId); - // Find out what needs adding - aspectQNamesToAdd.removeAll(existingAspectQNames); - aspectQNamesToAdd.remove(ContentModel.ASPECT_REFERENCEABLE); // Implicit - aspectQNamesToAdd.remove(ContentModel.ASPECT_LOCALIZED); // Implicit - if (aspectQNamesToAdd.isEmpty()) - { - // Nothing to do - return false; - } - // Add them - Set aspectQNameIds = qnameDAO.convertQNamesToIds(aspectQNamesToAdd, true); - startBatch(); - try - { - for (Long aspectQNameId : aspectQNameIds) - { - insertNodeAspect(nodeId, aspectQNameId); - } - } - catch (RuntimeException e) - { - // This could be because the cache is out of date - invalidateNodeCaches(nodeId); - throw e; - } - finally - { - executeBatch(); - } - - // Collate the new aspect set, so that touch recognizes the addtion of cm:auditable - Set newAspectQNames = new HashSet(existingAspectQNames); - newAspectQNames.addAll(aspectQNamesToAdd); - - // Handle sys:aspect_root - if (aspectQNames.contains(ContentModel.ASPECT_ROOT)) - { - // invalidate root nodes cache for the store - StoreRef storeRef = getNodeNotNull(nodeId, false).getStore().getStoreRef(); - allRootNodesCache.remove(storeRef); - // Touch the node; parent assocs need invalidation - touchNode(nodeId, null, newAspectQNames, false, false, true); - } - else - { - // Touch the node; all caches are fine - touchNode(nodeId, null, newAspectQNames, false, false, false); - } - - // Manually update the cache - setNodeAspectsCached(nodeId, newAspectQNames); - - // Done - return true; - } - - public boolean removeNodeAspects(Long nodeId) - { - Set newAspectQNames = Collections.emptySet(); - - // Touch the node; all caches are fine - touchNode(nodeId, null, newAspectQNames, false, false, false); - - // Just delete all the node's aspects - int deleteCount = deleteNodeAspects(nodeId, null); - - // Manually update the cache - setNodeAspectsCached(nodeId, newAspectQNames); - - // Done - return deleteCount > 0; - } - - @Override - public boolean removeNodeAspects(Long nodeId, Set aspectQNames) - { - if (aspectQNames.size() == 0) - { - return false; - } - // Get the current aspects - Set existingAspectQNames = getNodeAspects(nodeId); - - // Collate the new set of aspects so that touch works correctly against cm:auditable - Set newAspectQNames = new HashSet(existingAspectQNames); - newAspectQNames.removeAll(aspectQNames); - - // Touch the node; all caches are fine - touchNode(nodeId, null, newAspectQNames, false, false, false); - - // Now remove each aspect - Set aspectQNameIdsToRemove = qnameDAO.convertQNamesToIds(aspectQNames, false); - int deleteCount = deleteNodeAspects(nodeId, aspectQNameIdsToRemove); - if (deleteCount == 0) - { - return false; - } - - // Handle sys:aspect_root - if (aspectQNames.contains(ContentModel.ASPECT_ROOT)) - { - // invalidate root nodes cache for the store - StoreRef storeRef = getNodeNotNull(nodeId, false).getStore().getStoreRef(); - allRootNodesCache.remove(storeRef); - // Touch the node; parent assocs need invalidation - touchNode(nodeId, null, newAspectQNames, false, false, true); - } - else - { - // Touch the node; all caches are fine - touchNode(nodeId, null, newAspectQNames, false, false, false); - } - - // Manually update the cache - setNodeAspectsCached(nodeId, newAspectQNames); - - // Done - return deleteCount > 0; - } - - @Override - public void getNodesWithAspects( - Set aspectQNames, - Long minNodeId, Long maxNodeId, - NodeRefQueryCallback resultsCallback) - { - Set qnameIdsSet = qnameDAO.convertQNamesToIds(aspectQNames, false); - if (qnameIdsSet.size() == 0) - { - // No point running a query - return; - } - List qnameIds = new ArrayList(qnameIdsSet); - selectNodesWithAspects(qnameIds, minNodeId, maxNodeId, resultsCallback); - } - - @Override - public void getNodesWithAspects( - Set aspectQNames, - Long minNodeId, Long maxNodeId, boolean ordered, - NodeRefQueryCallback resultsCallback) - { - Set qnameIdsSet = qnameDAO.convertQNamesToIds(aspectQNames, false); - if (qnameIdsSet.size() == 0) - { - // No point running a query - return; - } - List qnameIds = new ArrayList(qnameIdsSet); - selectNodesWithAspects(qnameIds, minNodeId, maxNodeId, ordered, resultsCallback); - } - - @Override - public void getNodesWithAspects( - Set aspectQNames, - Long minNodeId, Long maxNodeId, boolean ordered, - int maxResults, - NodeRefQueryCallback resultsCallback) - { - Set qnameIdsSet = qnameDAO.convertQNamesToIds(aspectQNames, false); - if (qnameIdsSet.isEmpty()) - { - // No point running a query - return; - } - List qnameIds = new ArrayList<>(qnameIdsSet); - selectNodesWithAspects(qnameIds, minNodeId, maxNodeId, ordered, maxResults, resultsCallback); - } - - /** - * @return Returns a writable copy of the cached aspects set - */ - private Set getNodeAspectsCached(Long nodeId) - { - NodeVersionKey nodeVersionKey = getNodeNotNull(nodeId, false).getNodeVersionKey(); - Pair> cacheEntry = aspectsCache.getByKey(nodeVersionKey); - if (cacheEntry == null) - { - invalidateNodeCaches(nodeId); - throw new DataIntegrityViolationException("Invalid node ID: " + nodeId); - } - return new HashSet(cacheEntry.getSecond()); - } - - /** - * Update the node aspects cache. The incoming set will be wrapped to be unmodifiable. - */ - private void setNodeAspectsCached(Long nodeId, Set aspects) - { - NodeVersionKey nodeVersionKey = getNodeNotNull(nodeId, false).getNodeVersionKey(); - aspectsCache.setValue(nodeVersionKey, Collections.unmodifiableSet(aspects)); - } - - /** - * Helper method to copy cache values from one key to another - */ - private void copyNodeAspectsCached(NodeVersionKey from, NodeVersionKey to) - { - Set cacheEntry = aspectsCache.getValue(from); - if (cacheEntry != null) - { - aspectsCache.setValue(to, cacheEntry); - } - } - - /** - * Callback to cache node aspects. The DAO callback only does the simple {@link #findByKey(Serializable)}. - * - * @author Derek Hulley - * @since 3.4 - */ - private class AspectsCallbackDAO extends EntityLookupCallbackDAOAdaptor, Serializable> - { - public Pair> createValue(Set value) - { - throw new UnsupportedOperationException("A node always has a 'set' of aspects."); - } - - public Pair> findByKey(NodeVersionKey nodeVersionKey) - { - Long nodeId = nodeVersionKey.getNodeId(); - Set nodeIds = Collections.singleton(nodeId); - Map> nodeAspectQNameIdsByVersionKey = selectNodeAspects(nodeIds); - Set nodeAspectQNames = nodeAspectQNameIdsByVersionKey.get(nodeVersionKey); - if (nodeAspectQNames == null) - { - // Didn't find a match. Is this because there are none? - if (nodeAspectQNameIdsByVersionKey.size() == 0) - { - // This is OK. The node has no properties - nodeAspectQNames = Collections.emptySet(); - } - else - { - // We found properties associated with a different node ID and version - invalidateNodeCaches(nodeId); - throw new DataIntegrityViolationException( - "Detected stale node entry: " + nodeVersionKey + - " (now " + nodeAspectQNameIdsByVersionKey.keySet() + ")"); - } - } - // Done - return new Pair>(nodeVersionKey, Collections.unmodifiableSet(nodeAspectQNames)); - } - } - - /* - * Node assocs - */ - - @Override - public Long newNodeAssoc(Long sourceNodeId, Long targetNodeId, QName assocTypeQName, int assocIndex) - { - if (assocIndex == 0) - { - throw new IllegalArgumentException("Index is 1-based, or -1 to indicate 'next value'."); - } - - // Touch the node; all caches are fine - touchNode(sourceNodeId, null, null, false, false, false); - - // Resolve type QName - Long assocTypeQNameId = qnameDAO.getOrCreateQName(assocTypeQName).getFirst(); - - // Get the current max; we will need this no matter what - if (assocIndex <= 0) - { - int maxIndex = selectNodeAssocMaxIndex(sourceNodeId, assocTypeQNameId); - assocIndex = maxIndex + 1; - } - - Long result = null; - Savepoint savepoint = controlDAO.createSavepoint("NodeService.newNodeAssoc"); - try - { - result = insertNodeAssoc(sourceNodeId, targetNodeId, assocTypeQNameId, assocIndex); - controlDAO.releaseSavepoint(savepoint); - return result; - } - catch (Throwable e) - { - controlDAO.rollbackToSavepoint(savepoint); - if (isDebugEnabled) - { - logger.debug( - "Failed to insert node association: \n" + - " sourceNodeId: " + sourceNodeId + "\n" + - " targetNodeId: " + targetNodeId + "\n" + - " assocTypeQName: " + assocTypeQName + "\n" + - " assocIndex: " + assocIndex, - e); - } - throw new AssociationExistsException(sourceNodeId, targetNodeId, assocTypeQName); - } - } - - @Override - public void setNodeAssocIndex(Long id, int assocIndex) - { - int updated = updateNodeAssoc(id, assocIndex); - if (updated != 1) - { - throw new ConcurrencyFailureException("Expected to update exactly one row: " + id); - } - } - - @Override - public int removeNodeAssoc(Long sourceNodeId, Long targetNodeId, QName assocTypeQName) - { - Pair assocTypeQNamePair = qnameDAO.getQName(assocTypeQName); - if (assocTypeQNamePair == null) - { - // Never existed - return 0; - } - - Long assocTypeQNameId = assocTypeQNamePair.getFirst(); - int deleted = deleteNodeAssoc(sourceNodeId, targetNodeId, assocTypeQNameId); - if (deleted > 0) - { - // Touch the node; all caches are fine - touchNode(sourceNodeId, null, null, false, false, false); - } - return deleted; - } - - @Override - public int removeNodeAssocs(List ids) - { - int toDelete = ids.size(); - if (toDelete == 0) - { - return 0; - } - int deleted = deleteNodeAssocs(ids); - if (toDelete != deleted) - { - throw new ConcurrencyFailureException("Deleted " + deleted + " but expected " + toDelete); - } - return deleted; - } - - @Override - public Collection> getNodeAssocsToAndFrom(Long nodeId) - { - List nodeAssocEntities = selectNodeAssocs(nodeId); - List> results = new ArrayList>(nodeAssocEntities.size()); - for (NodeAssocEntity nodeAssocEntity : nodeAssocEntities) - { - Long assocId = nodeAssocEntity.getId(); - AssociationRef assocRef = nodeAssocEntity.getAssociationRef(qnameDAO); - results.add(new Pair(assocId, assocRef)); - } - return results; - } - - @Override - public Collection> getSourceNodeAssocs(Long targetNodeId, QName typeQName) - { - Long typeQNameId = null; - if (typeQName != null) - { - Pair typeQNamePair = qnameDAO.getQName(typeQName); - if (typeQNamePair == null) - { - // No such QName - return Collections.emptyList(); - } - typeQNameId = typeQNamePair.getFirst(); - } - List nodeAssocEntities = selectNodeAssocsByTarget(targetNodeId, typeQNameId); - List> results = new ArrayList>(nodeAssocEntities.size()); - for (NodeAssocEntity nodeAssocEntity : nodeAssocEntities) - { - Long assocId = nodeAssocEntity.getId(); - AssociationRef assocRef = nodeAssocEntity.getAssociationRef(qnameDAO); - results.add(new Pair(assocId, assocRef)); - } - return results; - } - - @Override - public Collection> getTargetNodeAssocs(Long sourceNodeId, QName typeQName) - { - Long typeQNameId = null; - if (typeQName != null) - { - Pair typeQNamePair = qnameDAO.getQName(typeQName); - if (typeQNamePair == null) - { - // No such QName - return Collections.emptyList(); - } - typeQNameId = typeQNamePair.getFirst(); - } - List nodeAssocEntities = selectNodeAssocsBySource(sourceNodeId, typeQNameId); - List> results = new ArrayList>(nodeAssocEntities.size()); - for (NodeAssocEntity nodeAssocEntity : nodeAssocEntities) - { - Long assocId = nodeAssocEntity.getId(); - AssociationRef assocRef = nodeAssocEntity.getAssociationRef(qnameDAO); - results.add(new Pair(assocId, assocRef)); - } - return results; - } - - @Override - public Collection> getTargetAssocsByPropertyValue(Long sourceNodeId, QName typeQName, QName propertyQName, Serializable propertyValue) - { - Long typeQNameId = null; - if (typeQName != null) - { - Pair typeQNamePair = qnameDAO.getQName(typeQName); - if (typeQNamePair == null) - { - // No such QName - return Collections.emptyList(); - } - typeQNameId = typeQNamePair.getFirst(); - } - - Long propertyQNameId = null; - NodePropertyValue nodeValue = null; - if (propertyQName != null) - { - - Pair propQNamePair = qnameDAO.getQName(propertyQName); - if (propQNamePair == null) - { - // No such QName - return Collections.emptyList(); - } - - propertyQNameId = propQNamePair.getFirst(); - - PropertyDefinition propertyDef = dictionaryService.getProperty(propertyQName); - - nodeValue = nodePropertyHelper.makeNodePropertyValue(propertyDef, propertyValue); - if (nodeValue != null) - { - switch (nodeValue.getPersistedType()) - { - case 1: // Boolean - case 3: // long - case 5: // double - case 6: // string - // no floats due to the range errors testing equality on a float. - break; - default: - throw new IllegalArgumentException("method not supported for persisted value type " + nodeValue.getPersistedType()); - } - } - } - - List nodeAssocEntities = selectNodeAssocsBySourceAndPropertyValue(sourceNodeId, typeQNameId, propertyQNameId, nodeValue); - - // Create custom result - List> results = new ArrayList>(nodeAssocEntities.size()); - for (NodeAssocEntity nodeAssocEntity : nodeAssocEntities) - { - Long assocId = nodeAssocEntity.getId(); - AssociationRef assocRef = nodeAssocEntity.getAssociationRef(qnameDAO); - results.add(new Pair(assocId, assocRef)); - } - return results; - } - - @Override - public Pair getNodeAssocOrNull(Long assocId) - { - NodeAssocEntity nodeAssocEntity = selectNodeAssocById(assocId); - if (nodeAssocEntity == null) - { - return null; - } - else - { - AssociationRef assocRef = nodeAssocEntity.getAssociationRef(qnameDAO); - return new Pair(assocId, assocRef); - } - } - - @Override - public Pair getNodeAssoc(Long assocId) - { - Pair ret = getNodeAssocOrNull(assocId); - if (ret == null) - { - throw new ConcurrencyFailureException("Assoc ID does not point to a valid association: " + assocId); - } - else - { - return ret; - } - } - - /* - * Child assocs - */ - - private ChildAssocEntity newChildAssocImpl( - Long parentNodeId, - Long childNodeId, - boolean isPrimary, - final QName assocTypeQName, - QName assocQName, - final String childNodeName, - boolean allowDeletedChild) - { - Assert.notNull(parentNodeId, "parentNodeId"); - Assert.notNull(childNodeId, "childNodeId"); - Assert.notNull(assocTypeQName, "assocTypeQName"); - Assert.notNull(assocQName, "assocQName"); - Assert.notNull(childNodeName, "childNodeName"); - - // Get parent and child nodes. We need them later, so just get them now. - final Node parentNode = getNodeNotNull(parentNodeId, true); - final Node childNode = getNodeNotNull(childNodeId, !allowDeletedChild); - - final ChildAssocEntity assoc = new ChildAssocEntity(); - // Parent node - assoc.setParentNode(new NodeEntity(parentNode)); - // Child node - assoc.setChildNode(new NodeEntity(childNode)); - // Type QName - assoc.setTypeQNameAll(qnameDAO, assocTypeQName, true); - // Child node name - assoc.setChildNodeNameAll(dictionaryService, assocTypeQName, childNodeName); - // QName - assoc.setQNameAll(qnameDAO, assocQName, true); - // Primary - assoc.setPrimary(isPrimary); - // Index - assoc.setAssocIndex(-1); - - Long assocId = newChildAssocInsert(assoc, assocTypeQName, childNodeName); - - // Persist it - assoc.setId(assocId); - - // Primary associations accompany new nodes, so we only have to bring the - // node into the current transaction for secondary associations - if (!isPrimary) - { - updateNode(childNodeId, null, null); - } - - // Done - if (isDebugEnabled) - { - logger.debug("Created child association: " + assoc); - } - return assoc; - } - - protected Long newChildAssocInsert(final ChildAssocEntity assoc, final QName assocTypeQName, final String childNodeName) - { - // Because we are retrying in-transaction i.e. absorbing exceptions, we need partial rollback &/or via savepoint if needed (eg. PostgreSQL) - RetryingCallback callback = new RetryingCallback() - { - public Long execute() throws Throwable - { - return newChildAssocInsertImpl(assoc, assocTypeQName, childNodeName); - } - }; - Long assocId = childAssocRetryingHelper.doWithRetry(callback); - return assocId; - } - - protected Long newChildAssocInsertImpl(final ChildAssocEntity assoc, final QName assocTypeQName, final String childNodeName) - { - Savepoint savepoint = controlDAO.createSavepoint("DuplicateChildNodeNameException"); - try - { - Long id = insertChildAssoc(assoc); - controlDAO.releaseSavepoint(savepoint); - return id; - } - catch (Throwable e) - { - controlDAO.rollbackToSavepoint(savepoint); - // DuplicateChildNodeNameException implements DoNotRetryException. - - // Allow real DB concurrency issues (e.g. DeadlockLoserDataAccessException) straight through for a retry - if (e instanceof ConcurrencyFailureException) - { - throw e; - } - - // There are some cases - FK violations, specifically - where we DO actually want to retry. - // Detecting this is done by looking for the related FK names, 'fk_alf_cass_*' in the error message - String lowerMsg = e.getMessage().toLowerCase(); - if (lowerMsg.contains("fk_alf_cass_")) - { - throw new ConcurrencyFailureException("FK violation updating primary parent association:" + assoc, e); - } - - // We assume that this is from the child cm:name constraint violation - throw new DuplicateChildNodeNameException( - assoc.getParentNode().getNodeRef(), - assocTypeQName, - childNodeName, - e); - } - } - - @Override - public Pair newChildAssoc( - Long parentNodeId, - Long childNodeId, - QName assocTypeQName, - QName assocQName, - String childNodeName) - { - ParentAssocsInfo parentAssocInfo = getParentAssocsCached(childNodeId); - // Create it - ChildAssocEntity assoc = newChildAssocImpl( - parentNodeId, childNodeId, false, assocTypeQName, assocQName, childNodeName, false); - Long assocId = assoc.getId(); - // Touch the node; parent assocs have been updated - touchNode(childNodeId, null, null, false, false, true); - // update cache - parentAssocInfo = parentAssocInfo.addAssoc(assocId, assoc); - setParentAssocsCached(childNodeId, parentAssocInfo); - // Done - return assoc.getPair(qnameDAO); - } - - @Override - public void deleteChildAssoc(Long assocId) - { - ChildAssocEntity assoc = selectChildAssoc(assocId); - if (assoc == null) - { - throw new ConcurrencyFailureException( - "Child association not found: " + assocId + ". A concurrency violation is likely.\n" + - "This can also occur if code reacts to 'beforeDelete' callbacks and pre-emptively deletes associations \n" + - "that are about to be cascade-deleted. The 'onDelete' phase then fails to delete the association.\n" + - "See links on issue ALF-12358."); // TODO: Get docs URL - } - // Update cache - Long childNodeId = assoc.getChildNode().getId(); - ParentAssocsInfo parentAssocInfo = getParentAssocsCached(childNodeId); - // Delete it - List assocIds = Collections.singletonList(assocId); - int count = deleteChildAssocs(assocIds); - if (count != 1) - { - throw new ConcurrencyFailureException("Child association not deleted: " + assocId); - } - // Touch the node; parent assocs have been updated - touchNode(childNodeId, null, null, false, false, true); - // Update cache - parentAssocInfo = parentAssocInfo.removeAssoc(assocId); - setParentAssocsCached(childNodeId, parentAssocInfo); - } - - @Override - public int setChildAssocIndex(Long parentNodeId, Long childNodeId, QName assocTypeQName, QName assocQName, int index) - { - int count = updateChildAssocIndex(parentNodeId, childNodeId, assocTypeQName, assocQName, index); - if (count > 0) - { - // Touch the node; parent assocs are out of sync - touchNode(childNodeId, null, null, false, false, true); - } - return count; - } - - /** - * TODO: See about pulling automatic cm:name update logic into this DAO - */ - @Override - public void setChildAssocsUniqueName(Long childNodeId, String childName) - { - Integer count = setChildAssocsUniqueNameImpl(childNodeId, childName); - - if (count > 0) - { - // Touch the node; parent assocs are out of sync - touchNode(childNodeId, null, null, false, false, true); - } - - if (isDebugEnabled) - { - logger.debug( - "Updated cm:name to parent assocs: \n" + - " Node: " + childNodeId + "\n" + - " Name: " + childName + "\n" + - " Updated: " + count); - } - } - - protected int setChildAssocsUniqueNameImpl(final Long childNodeId, final String childName) - { - // Because we are retrying in-transaction i.e. absorbing exceptions, we need partial rollback &/or via savepoint if needed (eg. PostgreSQL) - RetryingCallback callback = new RetryingCallback() - { - public Integer execute() throws Throwable - { - return updateChildAssocUniqueNameImpl(childNodeId, childName); - } - }; - return childAssocRetryingHelper.doWithRetry(callback); - } - - protected int updateChildAssocUniqueNameImpl(final Long childNodeId, final String childName) - { - int total = 0; - Savepoint savepoint = controlDAO.createSavepoint("DuplicateChildNodeNameException"); - try - { - for (ChildAssocEntity parentAssoc : getParentAssocsCached(childNodeId).getParentAssocs().values()) - { - // Subtlety: We only update those associations for which name uniqueness checking is enforced. - // Such associations have a positive CRC - if (parentAssoc.getChildNodeNameCrc() <= 0) - { - continue; - } - Pair oldTypeQnamePair = qnameDAO.getQName(parentAssoc.getTypeQNameId()); - // Ensure we invalidate the name cache (the child version key might not be 'bumped' by the next - // 'touch') - if (oldTypeQnamePair != null) - { - childByNameCache.remove(new ChildByNameKey(parentAssoc.getParentNode().getId(), - oldTypeQnamePair.getSecond(), parentAssoc.getChildNodeName())); - } - int count = updateChildAssocUniqueName(parentAssoc.getId(), childName); - if (count <= 0) - { - // Should not be attempting to delete a deleted node - throw new ConcurrencyFailureException("Failed to update an existing parent association " - + parentAssoc.getId()); - } - total += count; - } - controlDAO.releaseSavepoint(savepoint); - return total; - } - catch (Throwable e) - { - controlDAO.rollbackToSavepoint(savepoint); - // We assume that this is from the child cm:name constraint violation - throw new DuplicateChildNodeNameException(null, null, childName, e); - } - } - - @Override - public Pair getChildAssoc(Long assocId) - { - ChildAssocEntity assoc = selectChildAssoc(assocId); - if (assoc == null) - { - throw new ConcurrencyFailureException("Child association not found: " + assocId); - } - return assoc.getPair(qnameDAO); - } - - @Override - public List getPrimaryChildrenAcls(Long nodeId) - { - return selectPrimaryChildAcls(nodeId); - } - - @Override - public Pair getChildAssoc( - Long parentNodeId, - Long childNodeId, - QName assocTypeQName, - QName assocQName) - { - List assocs = selectChildAssoc(parentNodeId, childNodeId, assocTypeQName, assocQName); - if (assocs.size() == 0) - { - return null; - } - else if (assocs.size() == 1) - { - return assocs.get(0).getPair(qnameDAO); - } - // Keep the primary association or, if there isn't one, the association with the smallest ID - Map assocsToDeleteById = new HashMap(assocs.size() * 2); - Long minId = null; - Long primaryId = null; - for (ChildAssocEntity assoc : assocs) - { - // First store it - Long assocId = assoc.getId(); - assocsToDeleteById.put(assocId, assoc); - if (minId == null || minId.compareTo(assocId) > 0) - { - minId = assocId; - } - if (assoc.isPrimary()) - { - primaryId = assocId; - } - } - // Remove either the primary or min assoc - Long assocToKeepId = primaryId == null ? minId : primaryId; - ChildAssocEntity assocToKeep = assocsToDeleteById.remove(assocToKeepId); - // If the current transaction allows, remove the other associations - if (AlfrescoTransactionSupport.getTransactionReadState() == TxnReadState.TXN_READ_WRITE) - { - for (Long assocIdToDelete : assocsToDeleteById.keySet()) - { - deleteChildAssoc(assocIdToDelete); - } - } - // Done - return assocToKeep.getPair(qnameDAO); - } - - /** - * Callback that applies node preloading if required. - *

- * Instances must be used and discarded per query. - * - * @author Derek Hulley - * @since 3.4 - */ - private class ChildAssocRefBatchingQueryCallback implements ChildAssocRefQueryCallback - { - private final ChildAssocRefQueryCallback callback; - private final boolean preload; - private final List nodeRefs; - /** - * @param callback the callback to batch around - */ - private ChildAssocRefBatchingQueryCallback(ChildAssocRefQueryCallback callback) - { - this.callback = callback; - this.preload = callback.preLoadNodes(); - if (preload) - { - nodeRefs = new LinkedList(); // No memory required - } - else - { - nodeRefs = null; // No list needed - } - } - /** - * @throws UnsupportedOperationException always - */ - public boolean preLoadNodes() - { - throw new UnsupportedOperationException("Expected to be used internally only."); - } - /** - * Defers to delegate - */ - @Override - public boolean orderResults() - { - return callback.orderResults(); - } - /** - * {@inheritDoc} - */ - public boolean handle( - Pair childAssocPair, - Pair parentNodePair, - Pair childNodePair) - { - if (preload) - { - nodeRefs.add(childNodePair.getSecond()); - } - return callback.handle(childAssocPair, parentNodePair, childNodePair); - } - public void done() - { - // Finish the batch - if (preload && nodeRefs.size() > 0) - { - cacheNodes(nodeRefs); - nodeRefs.clear(); - } - // Done - callback.done(); - } - } - - @Override - public void getChildAssocs( - Long parentNodeId, - Long childNodeId, - QName assocTypeQName, - QName assocQName, - Boolean isPrimary, - Boolean sameStore, - ChildAssocRefQueryCallback resultsCallback) - { - selectChildAssocs( - parentNodeId, childNodeId, - assocTypeQName, assocQName, isPrimary, sameStore, - new ChildAssocRefBatchingQueryCallback(resultsCallback)); - } - - @Override - public void getChildAssocs( - Long parentNodeId, - QName assocTypeQName, - QName assocQName, - int maxResults, - ChildAssocRefQueryCallback resultsCallback) - { - selectChildAssocs( - parentNodeId, - assocTypeQName, - assocQName, - maxResults, - new ChildAssocRefBatchingQueryCallback(resultsCallback)); - } - - @Override - public void getChildAssocs(Long parentNodeId, Set assocTypeQNames, ChildAssocRefQueryCallback resultsCallback) - { - switch (assocTypeQNames.size()) - { - case 0: - return; // No results possible - case 1: - QName assocTypeQName = assocTypeQNames.iterator().next(); - selectChildAssocs( - parentNodeId, null, assocTypeQName, (QName) null, null, null, - new ChildAssocRefBatchingQueryCallback(resultsCallback)); - break; - default: - selectChildAssocs( - parentNodeId, assocTypeQNames, - new ChildAssocRefBatchingQueryCallback(resultsCallback)); - } - } - - /** - * Checks a cache and then queries. - *

- * Note: If we were to cach misses, then we would have to ensure that the cache is - * kept up to date whenever any affection association is changed. This is actually - * not possible without forcing the cache to be fully clustered. So to - * avoid clustering the cache, we instead watch the node child version, - * which relies on a cache that is already clustered. - */ - @Override - public Pair getChildAssoc(Long parentNodeId, QName assocTypeQName, String childName) - { - ChildByNameKey key = new ChildByNameKey(parentNodeId, assocTypeQName, childName); - ChildAssocEntity assoc = childByNameCache.get(key); - boolean query = false; - if (assoc == null) - { - query = true; - } - else - { - // Check that the resultant child node has not moved on - Node childNode = assoc.getChildNode(); - Long childNodeId = childNode.getId(); - NodeVersionKey childNodeVersionKey = childNode.getNodeVersionKey(); - Pair childNodeFromCache = nodesCache.getByKey(childNodeId); - if (childNodeFromCache == null) - { - // Child node no longer exists (or never did) - query = true; - } - else - { - NodeVersionKey childNodeFromCacheVersionKey = childNodeFromCache.getSecond().getNodeVersionKey(); - if (!childNodeFromCacheVersionKey.equals(childNodeVersionKey)) - { - // The child node has moved on. We don't know why, but must query again. - query = true; - } - } - } - if (query) - { - assoc = selectChildAssoc(parentNodeId, assocTypeQName, childName); - if (assoc != null) - { - childByNameCache.put(key, assoc); - } - else - { - // We do not cache misses. See javadoc. - } - } - // Now return, checking the assoc's ID for null - return assoc == null ? null : assoc.getPair(qnameDAO); - } - - @Override - public void getChildAssocs( - Long parentNodeId, - QName assocTypeQName, - Collection childNames, - ChildAssocRefQueryCallback resultsCallback) - { - selectChildAssocs( - parentNodeId, assocTypeQName, childNames, - new ChildAssocRefBatchingQueryCallback(resultsCallback)); - } - - @Override - public void getChildAssocsByPropertyValue( - Long parentNodeId, - QName propertyQName, - Serializable value, - ChildAssocRefQueryCallback resultsCallback) - { - PropertyDefinition propertyDef = dictionaryService.getProperty(propertyQName); - NodePropertyValue nodeValue = nodePropertyHelper.makeNodePropertyValue(propertyDef, value); - - if(nodeValue != null) - { - switch (nodeValue.getPersistedType()) - { - case 1: // Boolean - case 3: // long - case 5: // double - case 6: // string - // no floats due to the range errors testing equality on a float. - break; - - default: - throw new IllegalArgumentException("method not supported for persisted value type " + nodeValue.getPersistedType()); - } - - selectChildAssocsByPropertyValue(parentNodeId, - propertyQName, - nodeValue, - new ChildAssocRefBatchingQueryCallback(resultsCallback)); - } - } - - @Override - public void getChildAssocsByChildTypes( - Long parentNodeId, - Set childNodeTypeQNames, - ChildAssocRefQueryCallback resultsCallback) - { - selectChildAssocsByChildTypes( - parentNodeId, childNodeTypeQNames, - new ChildAssocRefBatchingQueryCallback(resultsCallback)); - } - - @Override - public void getChildAssocsWithoutParentAssocsOfType( - Long parentNodeId, - QName assocTypeQName, - ChildAssocRefQueryCallback resultsCallback) - { - selectChildAssocsWithoutParentAssocsOfType( - parentNodeId, assocTypeQName, - new ChildAssocRefBatchingQueryCallback(resultsCallback)); - } - - @Override - public Pair getPrimaryParentAssoc(Long childNodeId) - { - ChildAssocEntity childAssocEntity = getPrimaryParentAssocImpl(childNodeId); - if(childAssocEntity == null) - { - return null; - } - else - { - return childAssocEntity.getPair(qnameDAO); - } - } - - private ChildAssocEntity getPrimaryParentAssocImpl(Long childNodeId) - { - ParentAssocsInfo parentAssocs = getParentAssocsCached(childNodeId); - return parentAssocs.getPrimaryParentAssoc(); - } - - private static final int PARENT_ASSOCS_CACHE_FILTER_THRESHOLD = 2000; - - @Override - public void getParentAssocs( - Long childNodeId, - QName assocTypeQName, - QName assocQName, - Boolean isPrimary, - ChildAssocRefQueryCallback resultsCallback) - { - if (assocTypeQName == null && assocQName == null && isPrimary == null) - { - // Go for the cache (and return all) - ParentAssocsInfo parentAssocs = getParentAssocsCached(childNodeId); - for (ChildAssocEntity assoc : parentAssocs.getParentAssocs().values()) - { - resultsCallback.handle( - assoc.getPair(qnameDAO), - assoc.getParentNode().getNodePair(), - assoc.getChildNode().getNodePair()); - } - resultsCallback.done(); - } - else - { - // Decide whether we query or filter - ParentAssocsInfo parentAssocs = getParentAssocsCached(childNodeId); - if (parentAssocs.getParentAssocs().size() > PARENT_ASSOCS_CACHE_FILTER_THRESHOLD) - { - // Query - selectParentAssocs(childNodeId, assocTypeQName, assocQName, isPrimary, resultsCallback); - } - else - { - // Go for the cache (and filter) - for (ChildAssocEntity assoc : parentAssocs.getParentAssocs().values()) - { - Pair assocPair = assoc.getPair(qnameDAO); - if (((assocTypeQName == null) || (assocPair.getSecond().getTypeQName().equals(assocTypeQName))) && - ((assocQName == null) || (assocPair.getSecond().getQName().equals(assocQName)))) - { - resultsCallback.handle( - assocPair, - assoc.getParentNode().getNodePair(), - assoc.getChildNode().getNodePair()); - } - } - resultsCallback.done(); - } - - } - } - - /** - * Potentially cheaper than evaluating all of a node's paths to check for child association cycles - *

- * TODO: When is it cheaper to go up and when is it cheaper to go down? - * Look at using direct queries to pass through layers both up and down. - * - * @param nodeId the node to start with - */ - @Override - public void cycleCheck(Long nodeId) - { - CycleCallBack callback = new CycleCallBack(); - callback.cycleCheck(nodeId); - if (callback.toThrow != null) - { - throw callback.toThrow; - } - } - - private class CycleCallBack implements ChildAssocRefQueryCallback - { - final Set nodeIds = new HashSet(97); - CyclicChildRelationshipException toThrow; - - @Override - public void done() - { - } - - @Override - public boolean handle( - Pair childAssocPair, - Pair parentNodePair, - Pair childNodePair) - { - Long nodeId = childNodePair.getFirst(); - if (!nodeIds.add(nodeId)) - { - ChildAssociationRef childAssociationRef = childAssocPair.getSecond(); - // Remember exception we want to throw and exit. If we throw within here, it will be wrapped by IBatis - toThrow = new CyclicChildRelationshipException( - "Child Association Cycle detected hitting nodes: " + nodeIds, - childAssociationRef); - return false; - } - cycleCheck(nodeId); - nodeIds.remove(nodeId); - return toThrow == null; - } - - /** - * No preloading required - */ - @Override - public boolean preLoadNodes() - { - return false; - } - - /** - * No ordering required - */ - @Override - public boolean orderResults() - { - return false; - } - - public void cycleCheck(Long nodeId) - { - getChildAssocs(nodeId, null, null, null, null, null, this); - } - }; - - - @Override - public List getPaths(Pair nodePair, boolean primaryOnly) throws InvalidNodeRefException - { - // create storage for the paths - only need 1 bucket if we are looking for the primary path - List paths = new ArrayList(primaryOnly ? 1 : 10); - // create an empty current path to start from - Path currentPath = new Path(); - // create storage for touched associations - Stack assocIdStack = new Stack(); - - // call recursive method to sort it out - prependPaths(nodePair, null, currentPath, paths, assocIdStack, primaryOnly); - - // check that for the primary only case we have exactly one path - if (primaryOnly && paths.size() != 1) - { - throw new RuntimeException("Node has " + paths.size() + " primary paths: " + nodePair); - } - - // done - if (loggerPaths.isDebugEnabled()) - { - StringBuilder sb = new StringBuilder(256); - if (primaryOnly) - { - sb.append("Primary paths"); - } - else - { - sb.append("Paths"); - } - sb.append(" for node ").append(nodePair); - for (Path path : paths) - { - sb.append("\n").append(" ").append(path); - } - loggerPaths.debug(sb); - } - return paths; - } - - private void bindFixAssocAndCollectLostAndFound(final Pair lostNodePair, final String lostName, final Long assocId, final boolean orphanChild) - { - // Remember the items already deleted in inner transactions - final Set> lostNodePairs = TransactionalResourceHelper.getSet(KEY_LOST_NODE_PAIRS); - final Set deletedAssocs = TransactionalResourceHelper.getSet(KEY_DELETED_ASSOCS); - AlfrescoTransactionSupport.bindListener(new TransactionListenerAdapter() - { - @Override - public void afterRollback() - { - afterCommit(); - } - - @Override - public void afterCommit() - { - if (transactionService.getAllowWrite()) - { - // New transaction - RetryingTransactionCallback callback = new RetryingTransactionCallback() - { - public Void execute() throws Throwable - { - if (assocId == null) - { - // 'child' with missing parent assoc => collect lost+found orphan child - if (lostNodePairs.add(lostNodePair)) - { - collectLostAndFoundNode(lostNodePair, lostName); - logger.error("ALF-13066: Orphan child node has been re-homed under lost_found: " - + lostNodePair); - } - } - else - { - // 'child' with deleted parent assoc => delete invalid parent assoc and if primary then - // collect lost+found orphan child - if (deletedAssocs.add(assocId)) - { - deleteChildAssoc(assocId); // Can't use caching version or may hit infinite loop - logger.error("ALF-12358: Deleted node - removed child assoc: " + assocId); - } - - if (orphanChild && lostNodePairs.add(lostNodePair)) - { - collectLostAndFoundNode(lostNodePair, lostName); - logger.error("ALF-12358: Orphan child node has been re-homed under lost_found: " - + lostNodePair); - } - } - - return null; - } - }; - transactionService.getRetryingTransactionHelper().doInTransaction(callback, false, true); - } - } - }); - } - - /** - * TODO: Remove once ALF-12358 has been proven to be fixed i.e. no more orphans are created ... ever. - */ - private void collectLostAndFoundNode(Pair lostNodePair, String lostName) - { - Long childNodeId = lostNodePair.getFirst(); - NodeRef lostNodeRef = lostNodePair.getSecond(); - - Long newParentNodeId = getOrCreateLostAndFoundContainer(lostNodeRef.getStoreRef()).getId(); - - String assocName = lostName+"-"+System.currentTimeMillis(); - // Create new primary assoc (re-home the orphan node under lost_found) - ChildAssocEntity assoc = newChildAssocImpl(newParentNodeId, - childNodeId, - true, - ContentModel.ASSOC_CHILDREN, - QName.createQName(assocName), - assocName, - true); - - // Touch the node; all caches are fine - touchNode(childNodeId, null, null, false, false, false); - - // update cache - boolean isRoot = false; - boolean isStoreRoot = false; - ParentAssocsInfo parentAssocInfo = new ParentAssocsInfo(isRoot, isStoreRoot, assoc); - setParentAssocsCached(childNodeId, parentAssocInfo); - - /* - // Update ACLs for moved tree - note: actually a NOOP if oldParentAclId is null - Long newParentAclId = newParentNode.getAclId(); - Long oldParentAclId = null; // unknown - accessControlListDAO.updateInheritance(childNodeId, oldParentAclId, newParentAclId); - */ - } - - private Node getOrCreateLostAndFoundContainer(StoreRef storeRef) - { - Pair rootNodePair = getRootNode(storeRef); - Long rootParentNodeId = rootNodePair.getFirst(); - - final List> nodes = new ArrayList>(1); - NodeDAO.ChildAssocRefQueryCallback callback = new NodeDAO.ChildAssocRefQueryCallback() - { - public boolean handle( - Pair childAssocPair, - Pair parentNodePair, - Pair childNodePair - ) - { - nodes.add(childNodePair); - // More results - return true; - } - - @Override - public boolean preLoadNodes() - { - return false; - } - - @Override - public boolean orderResults() - { - return false; - } - - @Override - public void done() - { - } - }; - Set assocTypeQNames = new HashSet(1); - assocTypeQNames.add(ContentModel.ASSOC_LOST_AND_FOUND); - getChildAssocs(rootParentNodeId, assocTypeQNames, callback); - - Node lostFoundNode = null; - if (nodes.size() > 0) - { - Long lostFoundNodeId = nodes.get(0).getFirst(); - lostFoundNode = getNodeNotNull(lostFoundNodeId, true); - if (nodes.size() > 1) - { - logger.warn("More than one lost_found, using first: " + lostFoundNode.getNodeRef()); - } - } - else - { - Locale locale = localeDAO.getOrCreateDefaultLocalePair().getSecond(); - - lostFoundNode = newNode( - rootParentNodeId, - ContentModel.ASSOC_LOST_AND_FOUND, - ContentModel.ASSOC_LOST_AND_FOUND, - storeRef, - null, - ContentModel.TYPE_LOST_AND_FOUND, - locale, - ContentModel.ASSOC_LOST_AND_FOUND.getLocalName(), - null).getChildNode(); - - logger.info("Created lost_found: " + lostFoundNode.getNodeRef()); - } - - return lostFoundNode; - } - - /** - * Build the paths for a node - * - * @param currentNodePair the leave or child node to start with - * @param currentRootNodePair pass in null only - * @param currentPath an empty {@link Path} - * @param completedPaths completed paths i.e. the result - * @param assocIdStack a stack to detected cyclic relationships - * @param primaryOnly true to follow only primary parent associations - * @throws CyclicChildRelationshipException - */ - private void prependPaths( - Pair currentNodePair, - Pair currentRootNodePair, - Path currentPath, - Collection completedPaths, - Stack assocIdStack, - boolean primaryOnly) throws CyclicChildRelationshipException - { - if (isDebugEnabled) - { - logger.debug("\n" + - "Prepending paths: \n" + - " Current node: " + currentNodePair + "\n" + - " Current root: " + currentRootNodePair + "\n" + - " Current path: " + currentPath); - } - Long currentNodeId = currentNodePair.getFirst(); - NodeRef currentNodeRef = currentNodePair.getSecond(); - - // Check if we have changed root nodes - StoreRef currentStoreRef = currentNodeRef.getStoreRef(); - if (currentRootNodePair == null || !currentStoreRef.equals(currentRootNodePair.getFirst())) - { - // We've changed stores - Pair rootNodePair = getRootNode(currentStoreRef); - currentRootNodePair = new Pair(currentStoreRef, rootNodePair.getSecond()); - } - - // get the parent associations of the given node - ParentAssocsInfo parentAssocInfo = getParentAssocsCached(currentNodeId); // note: currently may throw NotLiveNodeException - // bulk load parents as we are certain to hit them in the next call - ArrayList toLoad = new ArrayList(parentAssocInfo.getParentAssocs().size()); - for(Map.Entry entry : parentAssocInfo.getParentAssocs().entrySet()) - { - toLoad.add(entry.getValue().getParentNode().getId()); - } - cacheNodesById(toLoad); - - // does the node have parents - boolean hasParents = parentAssocInfo.getParentAssocs().size() > 0; - // does the current node have a root aspect? - - // look for a root. If we only want the primary root, then ignore all but the top-level root. - if (!(primaryOnly && hasParents) && parentAssocInfo.isRoot()) // exclude primary search with parents present - { - // create a one-sided assoc ref for the root node and prepend to the stack - // this effectively spoofs the fact that the current node is not below the root - // - we put this assoc in as the first assoc in the path must be a one-sided - // reference pointing to the root node - ChildAssociationRef assocRef = new ChildAssociationRef(null, null, null, currentRootNodePair.getSecond()); - // create a path to save and add the 'root' assoc - Path pathToSave = new Path(); - Path.ChildAssocElement first = null; - for (Path.Element element : currentPath) - { - if (first == null) - { - first = (Path.ChildAssocElement) element; - } - else - { - pathToSave.append(element); - } - } - if (first != null) - { - // mimic an association that would appear if the current node was below the root node - // or if first beneath the root node it will make the real thing - ChildAssociationRef updateAssocRef = new ChildAssociationRef( - parentAssocInfo.isStoreRoot() ? ContentModel.ASSOC_CHILDREN : first.getRef().getTypeQName(), - currentRootNodePair.getSecond(), - first.getRef().getQName(), - first.getRef().getChildRef()); - Path.Element newFirst = new Path.ChildAssocElement(updateAssocRef); - pathToSave.prepend(newFirst); - } - - Path.Element element = new Path.ChildAssocElement(assocRef); - pathToSave.prepend(element); - - // store the path just built - completedPaths.add(pathToSave); - } - - // walk up each parent association - for (Map.Entry entry : parentAssocInfo.getParentAssocs().entrySet()) - { - Long assocId = entry.getKey(); - ChildAssocEntity assoc = entry.getValue(); - ChildAssociationRef assocRef = assoc.getRef(qnameDAO); - // do we consider only primary assocs? - if (primaryOnly && !assocRef.isPrimary()) - { - continue; - } - // Ordering is meaningless here as we are constructing a path upwards - // and have no idea where the node comes in the sibling order or even - // if there are like-pathed siblings. - assocRef.setNthSibling(-1); - // build a path element - Path.Element element = new Path.ChildAssocElement(assocRef); - // create a new path that builds on the current path - Path path = new Path(); - path.append(currentPath); - // prepend element - path.prepend(element); - // get parent node pair - Pair parentNodePair = new Pair( - assoc.getParentNode().getId(), - assocRef.getParentRef()); - - // does the association already exist in the stack - if (assocIdStack.contains(assocId)) - { - // the association was present already - logger.error( - "Cyclic parent-child relationship detected: \n" + - " current node: " + currentNodeId + "\n" + - " current path: " + currentPath + "\n" + - " next assoc: " + assocId); - throw new CyclicChildRelationshipException("Node has been pasted into its own tree.", assocRef); - } - - if (isDebugEnabled) - { - logger.debug("\n" + - " Prepending path parent: \n" + - " Parent node: " + parentNodePair); - } - - // push the assoc stack, recurse and pop - assocIdStack.push(assocId); - - prependPaths(parentNodePair, currentRootNodePair, path, completedPaths, assocIdStack, primaryOnly); - - assocIdStack.pop(); - } - // done - } - - /** - * A Map-like class for storing ParentAssocsInfos. It prunes its oldest ParentAssocsInfo entries not only when a - * capacity is reached, but also when a total number of cached parents is reached, as this is what dictates the - * overall memory usage. - */ - private static class ParentAssocsCache - { - private final ReadWriteLock lock = new ReentrantReadWriteLock(); - private final int size; - private final int maxParentCount; - private final Map, ParentAssocsInfo> cache; - private final Map, Pair > nextKeys; - private final Map, Pair > previousKeys; - private Pair firstKey; - private Pair lastKey; - private int parentCount; - - /** - * @param size int - * @param limitFactor int - */ - public ParentAssocsCache(int size, int limitFactor) - { - this.size = size; - this.maxParentCount = size * limitFactor; - final int mapSize = size * 2; - this.cache = new HashMap, ParentAssocsInfo>(mapSize); - this.nextKeys = new HashMap, Pair >(mapSize); - this.previousKeys = new HashMap, Pair >(mapSize); - } - - private ParentAssocsInfo get(Pair cacheKey) - { - lock.readLock().lock(); - try - { - return cache.get(cacheKey); - } - finally - { - lock.readLock().unlock(); - } - } - - private void put(Pair cacheKey, ParentAssocsInfo parentAssocs) - { - lock.writeLock().lock(); - try - { - // If an entry already exists, remove it and do the necessary housekeeping - if (cache.containsKey(cacheKey)) - { - remove(cacheKey); - } - - // Add the value and prepend the key - cache.put(cacheKey, parentAssocs); - if (firstKey == null) - { - lastKey = cacheKey; - } - else - { - nextKeys.put(cacheKey, firstKey); - previousKeys.put(firstKey, cacheKey); - } - firstKey = cacheKey; - parentCount += parentAssocs.getParentAssocs().size(); - - // Now prune the oldest entries whilst we have more cache entries or cached parents than desired - int currentSize = cache.size(); - while (currentSize > size || parentCount > maxParentCount) - { - remove(lastKey); - currentSize--; - } - } - finally - { - lock.writeLock().unlock(); - } - } - - private ParentAssocsInfo remove(Pair cacheKey) - { - lock.writeLock().lock(); - try - { - // Remove from the map - ParentAssocsInfo oldParentAssocs = cache.remove(cacheKey); - - // If the object didn't exist, we are done - if (oldParentAssocs == null) - { - return null; - } - - // Re-link the list - Pair previousCacheKey = previousKeys.remove(cacheKey); - Pair nextCacheKey = nextKeys.remove(cacheKey); - if (nextCacheKey == null) - { - if (previousCacheKey == null) - { - firstKey = lastKey = null; - } - else - { - lastKey = previousCacheKey; - nextKeys.remove(previousCacheKey); - } - } - else - { - if (previousCacheKey == null) - { - firstKey = nextCacheKey; - previousKeys.remove(nextCacheKey); - } - else - { - nextKeys.put(previousCacheKey, nextCacheKey); - previousKeys.put(nextCacheKey, previousCacheKey); - } - } - // Update the parent count - parentCount -= oldParentAssocs.getParentAssocs().size(); - return oldParentAssocs; - } - finally - { - lock.writeLock().unlock(); - } - } - - private void clear() - { - lock.writeLock().lock(); - try - { - cache.clear(); - nextKeys.clear(); - previousKeys.clear(); - firstKey = lastKey = null; - parentCount = 0; - } - finally - { - lock.writeLock().unlock(); - } - } - } - - /** - * @return Returns a node's parent associations - */ - private ParentAssocsInfo getParentAssocsCached(Long nodeId) - { - Node node = getNodeNotNull(nodeId, false); - Pair cacheKey = new Pair(nodeId, node.getTransaction().getChangeTxnId()); - ParentAssocsInfo value = parentAssocsCache.get(cacheKey); - if (value == null) - { - value = loadParentAssocs(node.getNodeVersionKey()); - parentAssocsCache.put(cacheKey, value); - } - - // We have already validated on loading that we have a list in sync with the child node, so if the list is still - // empty we have an integrity problem - if (value.getPrimaryParentAssoc() == null && !node.getDeleted(qnameDAO) && !value.isStoreRoot()) - { - Pair currentNodePair = node.getNodePair(); - // We have a corrupt repository - non-root node has a missing parent ?! - bindFixAssocAndCollectLostAndFound(currentNodePair, "nonRootNodeWithoutParents", null, false); - - // throw - error will be logged and then bound txn listener (afterRollback) will be called - throw new NonRootNodeWithoutParentsException(currentNodePair); - } - - return value; - } - - /** - * Update a node's parent associations. - */ - private void setParentAssocsCached(Long nodeId, ParentAssocsInfo parentAssocs) - { - Node node = getNodeNotNull(nodeId, false); - Pair cacheKey = new Pair(nodeId, node.getTransaction().getChangeTxnId()); - parentAssocsCache.put(cacheKey, parentAssocs); - } - - /** - * Helper method to copy cache values from one key to another - */ - private void copyParentAssocsCached(Node from) - { - String fromTransactionId = from.getTransaction().getChangeTxnId(); - String toTransactionId = getCurrentTransaction().getChangeTxnId(); - // If the node is already in this transaction, there's nothing to do - if (fromTransactionId.equals(toTransactionId)) - { - return; - } - Pair cacheKey = new Pair(from.getId(), fromTransactionId); - ParentAssocsInfo cacheEntry = parentAssocsCache.get(cacheKey); - if (cacheEntry != null) - { - parentAssocsCache.put(new Pair(from.getId(), toTransactionId), cacheEntry); - } - } - - /** - * Helper method to remove associations relating to a cached node - */ - private void invalidateParentAssocsCached(Node node) - { - // Invalidate both the node and current transaction ID, just in case - Long nodeId = node.getId(); - String nodeTransactionId = node.getTransaction().getChangeTxnId(); - parentAssocsCache.remove(new Pair(nodeId, nodeTransactionId)); - if (AlfrescoTransactionSupport.getTransactionReadState() == TxnReadState.TXN_READ_WRITE) - { - String currentTransactionId = getCurrentTransaction().getChangeTxnId(); - if (!currentTransactionId.equals(nodeTransactionId)) - { - parentAssocsCache.remove(new Pair(nodeId, currentTransactionId)); - } - } - } - - private ParentAssocsInfo loadParentAssocs(NodeVersionKey nodeVersionKey) - { - Long nodeId = nodeVersionKey.getNodeId(); - // Find out if it is a root or store root - boolean isRoot = hasNodeAspect(nodeId, ContentModel.ASPECT_ROOT); - boolean isStoreRoot = getNodeType(nodeId).equals(ContentModel.TYPE_STOREROOT); - - // Select all the parent associations - List assocs = selectParentAssocs(nodeId); - - // Build the cache object - ParentAssocsInfo value = new ParentAssocsInfo(isRoot, isStoreRoot, assocs); - - // Now check if we are seeing the correct version of the node - if (assocs.isEmpty()) - { - // No results. - // Nodes without parents are root nodes or deleted nodes. The latter will not normally - // be accessed here but it is possible. - // To match earlier fixes of ALF-12393, we do a double-check of the node's details. - NodeEntity nodeCheckFromDb = selectNodeById(nodeId); - if (nodeCheckFromDb == null || !nodeCheckFromDb.getNodeVersionKey().equals(nodeVersionKey)) - { - // The node is gone or has moved on in version - invalidateNodeCaches(nodeId); - throw new DataIntegrityViolationException( - "Detected stale node entry: " + nodeVersionKey + - " (now " + nodeCheckFromDb + ")"); - } - } - else - { - ChildAssocEntity childAssoc = assocs.get(0); - // What is the real (at least to this txn) version of the child node? - NodeVersionKey childNodeVersionKeyFromDb = childAssoc.getChildNode().getNodeVersionKey(); - if (!childNodeVersionKeyFromDb.equals(nodeVersionKey)) - { - // This method was called with a stale version - invalidateNodeCaches(nodeId); - throw new DataIntegrityViolationException( - "Detected stale node entry: " + nodeVersionKey + - " (now " + childNodeVersionKeyFromDb + ")"); - } - } - return value; - } - - /* - * Bulk caching - */ - - @Override - public void setCheckNodeConsistency() - { - if (nodesTransactionalCache != null) - { - nodesTransactionalCache.setDisableSharedCacheReadForTransaction(true); - } - } - - @Override - public Set getCachedAncestors(List nodeIds) - { - // First, make sure 'level 1' nodes and their parents are in the cache - cacheNodesById(nodeIds); - for (Long nodeId : nodeIds) - { - // Filter out deleted nodes - if (exists(nodeId)) - { - getParentAssocsCached(nodeId); - } - } - // Now recurse on all ancestors in the cache - Set ancestors = new TreeSet(); - for (Long nodeId : nodeIds) - { - findCachedAncestors(nodeId, ancestors); - } - return ancestors; - } - - /** - * Uses the node and parent assocs cache content to recursively find the set of currently cached ancestor node IDs - */ - private void findCachedAncestors(Long nodeId, Set ancestors) - { - if (!ancestors.add(nodeId)) - { - return; // Already visited - } - Node node = nodesCache.getValue(nodeId); - if (node == null) - { - return; // Not in cache yet - will load in due course - } - Pair cacheKey = new Pair(nodeId, node.getTransaction().getChangeTxnId()); - ParentAssocsInfo value = parentAssocsCache.get(cacheKey); - if (value == null) - { - return; // Not in cache yet - will load in due course - } - for (ChildAssocEntity childAssoc : value.getParentAssocs().values()) - { - findCachedAncestors(childAssoc.getParentNode().getId(), ancestors); - } - } - - @Override - public void cacheNodesById(List nodeIds) - { - /* - * ALF-2712: Performance degradation from 3.1.0 to 3.1.2 - * ALF-2784: Degradation of performance between 3.1.1 and 3.2x (observed in JSF) - * - * There is an obvious cost associated with querying the database to pull back nodes, - * and there is additional cost associated with putting the resultant entries into the - * caches. It is NO MORE expensive to check the cache than it is to put an entry into it - * - and probably cheaper considering cache replication - so we start checking nodes to see - * if they have entries before passing them over for batch loading. - * - * However, when running against a cold cache or doing a first-time query against some - * part of the repo, we will be checking for entries in the cache and consistently getting - * no results. To avoid unnecessary checking when the cache is PROBABLY cold, we - * examine the ratio of hits/misses at regular intervals. - */ - - boolean disableSharedCacheReadForTransaction = false; - if (nodesTransactionalCache != null) - { - disableSharedCacheReadForTransaction = nodesTransactionalCache.getDisableSharedCacheReadForTransaction(); - } - - if ((disableSharedCacheReadForTransaction == false) && nodeIds.size() < 10) - { - // We only cache where the number of results is potentially - // a problem for the N+1 loading that might result. - return; - } - - int foundCacheEntryCount = 0; - int missingCacheEntryCount = 0; - boolean forceBatch = false; - - List batchLoadNodeIds = new ArrayList(nodeIds.size()); - for (Long nodeId : nodeIds) - { - if (!forceBatch) - { - // Is this node in the cache? - if (nodesCache.getValue(nodeId) != null) - { - foundCacheEntryCount++; // Don't add it to the batch - continue; - } - else - { - missingCacheEntryCount++; // Fall through and add it to the batch - } - if (foundCacheEntryCount + missingCacheEntryCount % 100 == 0) - { - // We force the batch if the number of hits drops below the number of misses - forceBatch = foundCacheEntryCount < missingCacheEntryCount; - } - } - - batchLoadNodeIds.add(nodeId); - } - - int size = batchLoadNodeIds.size(); - cacheNodesBatch(batchLoadNodeIds); - - if (logger.isDebugEnabled()) - { - logger.debug("Pre-loaded " + size + " nodes."); - } - } - - /** - * {@inheritDoc} - *

- * Loads properties, aspects, parent associations and the ID-noderef cache. - */ - @Override - public void cacheNodes(List nodeRefs) - { - /* - * ALF-2712: Performance degradation from 3.1.0 to 3.1.2 - * ALF-2784: Degradation of performance between 3.1.1 and 3.2x (observed in JSF) - * - * There is an obvious cost associated with querying the database to pull back nodes, - * and there is additional cost associated with putting the resultant entries into the - * caches. It is NO MORE expensive to check the cache than it is to put an entry into it - * - and probably cheaper considering cache replication - so we start checking nodes to see - * if they have entries before passing them over for batch loading. - * - * However, when running against a cold cache or doing a first-time query against some - * part of the repo, we will be checking for entries in the cache and consistently getting - * no results. To avoid unnecessary checking when the cache is PROBABLY cold, we - * examine the ratio of hits/misses at regular intervals. - */ - if (nodeRefs.size() < cachingThreshold) - { - // We only cache where the number of results is potentially - // a problem for the N+1 loading that might result. - return; - } - int foundCacheEntryCount = 0; - int missingCacheEntryCount = 0; - boolean forceBatch = false; - - // Group the nodes by store so that we don't *have* to eagerly join to store to get query performance - Map> uuidsByStore = new HashMap>(3); - for (NodeRef nodeRef : nodeRefs) - { - if (!forceBatch) - { - // Is this node in the cache? - if (nodesCache.getKey(nodeRef) != null) - { - foundCacheEntryCount++; // Don't add it to the batch - continue; - } - else - { - missingCacheEntryCount++; // Fall through and add it to the batch - } - if (foundCacheEntryCount + missingCacheEntryCount % 100 == 0) - { - // We force the batch if the number of hits drops below the number of misses - forceBatch = foundCacheEntryCount < missingCacheEntryCount; - } - } - - StoreRef storeRef = nodeRef.getStoreRef(); - List uuids = (List) uuidsByStore.get(storeRef); - if (uuids == null) - { - uuids = new ArrayList(nodeRefs.size()); - uuidsByStore.put(storeRef, uuids); - } - uuids.add(nodeRef.getId()); - } - int size = nodeRefs.size(); - nodeRefs = null; - // Now load all the nodes - for (Map.Entry> entry : uuidsByStore.entrySet()) - { - StoreRef storeRef = entry.getKey(); - List uuids = entry.getValue(); - cacheNodes(storeRef, uuids); - } - if (logger.isDebugEnabled()) - { - logger.debug("Pre-loaded " + size + " nodes."); - } - } - - /** - * Loads the nodes into cache using batching. - */ - private void cacheNodes(StoreRef storeRef, List uuids) - { - StoreEntity store = getStoreNotNull(storeRef); - Long storeId = store.getId(); - - int batchSize = 256; - SortedSet batch = new TreeSet(); - for (String uuid : uuids) - { - batch.add(uuid); - if (batch.size() >= batchSize) - { - // Preload - List nodes = selectNodesByUuids(storeId, batch); - cacheNodesNoBatch(nodes); - batch.clear(); - } - } - // Load any remaining nodes - if (batch.size() > 0) - { - List nodes = selectNodesByUuids(storeId, batch); - cacheNodesNoBatch(nodes); - } - } - - private void cacheNodesBatch(List nodeIds) - { - int batchSize = 256; - SortedSet batch = new TreeSet(); - for (Long nodeId : nodeIds) - { - batch.add(nodeId); - if (batch.size() >= batchSize) - { - // Preload - List nodes = selectNodesByIds(batch); - cacheNodesNoBatch(nodes); - batch.clear(); - } - } - // Load any remaining nodes - if (batch.size() > 0) - { - List nodes = selectNodesByIds(batch); - cacheNodesNoBatch(nodes); - } - } - - /** - * Bulk-fetch the nodes for a given store. All nodes passed in are fetched. - */ - private void cacheNodesNoBatch(List nodes) - { - // Get the nodes - SortedSet aspectNodeIds = new TreeSet(); - SortedSet propertiesNodeIds = new TreeSet(); - Map nodeVersionKeysFromCache = new HashMap(nodes.size()*2); // Keep for quick lookup - for (Node node : nodes) - { - Long nodeId = node.getId(); - NodeVersionKey nodeVersionKey = node.getNodeVersionKey(); - node.lock(); // Prevent unexpected edits of values going into the cache - nodesCache.setValue(nodeId, node); - if (propertiesCache.getValue(nodeVersionKey) == null) - { - propertiesNodeIds.add(nodeId); - } - if (aspectsCache.getValue(nodeVersionKey) == null) - { - aspectNodeIds.add(nodeId); - } - nodeVersionKeysFromCache.put(nodeId, nodeVersionKey); - } - - if(logger.isDebugEnabled()) - { - logger.debug("Pre-loaded " + propertiesNodeIds.size() + " properties"); - logger.debug("Pre-loaded " + propertiesNodeIds.size() + " aspects"); - } - - Map> nodeAspects = selectNodeAspects(aspectNodeIds); - for (Map.Entry> entry : nodeAspects.entrySet()) - { - NodeVersionKey nodeVersionKeyFromDb = entry.getKey(); - Long nodeId = nodeVersionKeyFromDb.getNodeId(); - Set qnames = entry.getValue(); - setNodeAspectsCached(nodeId, qnames); - aspectNodeIds.remove(nodeId); - } - // Cache the absence of aspects too! - for (Long nodeId: aspectNodeIds) - { - setNodeAspectsCached(nodeId, Collections.emptySet()); - } - - // First ensure all content data are pre-cached, so we don't have to load them individually when converting properties - contentDataDAO.cacheContentDataForNodes(propertiesNodeIds); - - // Now bulk load the properties - Map> propsByNodeId = selectNodeProperties(propertiesNodeIds); - for (Map.Entry> entry : propsByNodeId.entrySet()) - { - Long nodeId = entry.getKey().getNodeId(); - Map propertyValues = entry.getValue(); - Map props = nodePropertyHelper.convertToPublicProperties(propertyValues); - setNodePropertiesCached(nodeId, props); - } - } - - /** - * {@inheritDoc} - *

- * Simply clears out all the node-related caches. - */ - @Override - public void clear() - { - clearCaches(); - } - - /* - * Transactions - */ - - public Long getMaxTxnIdByCommitTime(long maxCommitTime) - { - Transaction txn = selectLastTxnBeforeCommitTime(maxCommitTime); - return (txn == null ? null : txn.getId()); - } - - @Override - public int getTransactionCount() - { - return selectTransactionCount(); - } - - @Override - public Transaction getTxnById(Long txnId) - { - return selectTxnById(txnId); - } - - @Override - public List getTxnChanges(Long txnId) - { - return getTxnChangesForStore(null, txnId); - } - - @Override - public List getTxnChangesForStore(StoreRef storeRef, Long txnId) - { - Long storeId = (storeRef == null) ? null : getStoreNotNull(storeRef).getId(); - List nodes = selectTxnChanges(txnId, storeId); - // Convert - List nodeStatuses = new ArrayList(nodes.size()); - for (NodeEntity node : nodes) - { - nodeStatuses.add(node.getNodeStatus(qnameDAO)); - } - - // Done - return nodeStatuses; - } - - @Override - public List getTxnsUnused(Long minTxnId, long maxCommitTime, int count) - { - return selectTxnsUnused(minTxnId, maxCommitTime, count); - } - - @Override - public void purgeTxn(Long txnId) - { - deleteTransaction(txnId); - } - - public static final Long LONG_ZERO = 0L; - - @Override - public Long getMinTxnCommitTime() - { - Long time = selectMinTxnCommitTime(); - return (time == null ? LONG_ZERO : time); - } - - @Override - public Long getMaxTxnCommitTime() - { - Long time = selectMaxTxnCommitTime(); - return (time == null ? LONG_ZERO : time); - } - - public Long getMinTxnCommitTimeForDeletedNodes() - { - Long time = selectMinTxnCommitTimeForDeletedNodes(); - return (time == null ? LONG_ZERO : time); - } - - - @Override - public Long getMinTxnId() - { - Long id = selectMinTxnId(); - return (id == null ? LONG_ZERO : id); - } - - @Override - public Long getMinUnusedTxnCommitTime() - { - Long id = selectMinUnusedTxnCommitTime(); - return (id == null ? LONG_ZERO : id); - } - - @Override - public Long getMaxTxnId() - { - Long id = selectMaxTxnId(); - return (id == null ? LONG_ZERO : id); - } - - @Override - public Long getMinTxInNodeIdRange(Long fromNodeId, Long toNodeId) - { - return selectMinTxInNodeIdRange(fromNodeId, toNodeId); - } - - @Override - public Long getMaxTxInNodeIdRange(Long fromNodeId, Long toNodeId) - { - return selectMaxTxInNodeIdRange(fromNodeId, toNodeId); - } - - @Override - public Long getNextTxCommitTime(Long fromCommitTime) - { - return selectNextTxCommitTime(fromCommitTime); - } - - /* - * Abstract methods for underlying CRUD - */ - - protected abstract Long insertTransaction(String changeTxnId, Long commit_time_ms); - protected abstract int updateTransaction(Long txnId, Long commit_time_ms); - protected abstract int deleteTransaction(Long txnId); - protected abstract List selectAllStores(); - protected abstract StoreEntity selectStore(StoreRef storeRef); - protected abstract NodeEntity selectStoreRootNode(StoreRef storeRef); - protected abstract Long insertStore(StoreEntity store); - protected abstract int updateStoreRoot(StoreEntity store); - protected abstract int updateStore(StoreEntity store); - protected abstract int updateNodesInStore(Long txnId, Long storeId); - protected abstract Long insertNode(NodeEntity node); - protected abstract int updateNode(NodeUpdateEntity nodeUpdate); - protected abstract int updateNodes(Long txnId, List nodeIds); - protected abstract void updatePrimaryChildrenSharedAclId( - Long txnId, - Long primaryParentNodeId, - Long optionalOldSharedAlcIdInAdditionToNull, - Long newSharedAlcId); - protected abstract int deleteNodeById(Long nodeId); - protected abstract int deleteNodesByCommitTime(long fromTxnCommitTimeMs, long toTxnCommitTimeMs); - protected abstract NodeEntity selectNodeById(Long id); - protected abstract NodeEntity selectNodeByNodeRef(NodeRef nodeRef); - protected abstract List selectNodesByUuids(Long storeId, SortedSet uuids); - protected abstract List selectNodesByIds(SortedSet ids); - protected abstract Map> selectNodeProperties(Set nodeIds); - protected abstract Map> selectNodeProperties(Long nodeId); - protected abstract Map> selectNodeProperties(Long nodeId, Set qnameIds); - protected abstract int deleteNodeProperties(Long nodeId, Set qnameIds); - protected abstract int deleteNodeProperties(Long nodeId, List propKeys); - protected abstract void insertNodeProperties(Long nodeId, Map persistableProps); - protected abstract Map> selectNodeAspects(Set nodeIds); - protected abstract void insertNodeAspect(Long nodeId, Long qnameId); - protected abstract int deleteNodeAspects(Long nodeId, Set qnameIds); - protected abstract void selectNodesWithAspects( - List qnameIds, - Long minNodeId, Long maxNodeId, - NodeRefQueryCallback resultsCallback); - protected abstract void selectNodesWithAspects( - List qnameIds, - Long minNodeId, Long maxNodeId, boolean ordered, - NodeRefQueryCallback resultsCallback); - protected abstract void selectNodesWithAspects( - List qnameIds, - Long minNodeId, Long maxNodeId, boolean ordered, int maxResults, - NodeRefQueryCallback resultsCallback); - protected abstract Long insertNodeAssoc(Long sourceNodeId, Long targetNodeId, Long assocTypeQNameId, int assocIndex); - protected abstract int updateNodeAssoc(Long id, int assocIndex); - protected abstract int deleteNodeAssoc(Long sourceNodeId, Long targetNodeId, Long assocTypeQNameId); - protected abstract int deleteNodeAssocs(List ids); - protected abstract List selectNodeAssocs(Long nodeId); - protected abstract List selectNodeAssocsBySource(Long sourceNodeId, Long typeQNameId); - protected abstract List selectNodeAssocsBySourceAndPropertyValue(Long sourceNodeId, Long typeQNameId, Long propertyQNameId, NodePropertyValue nodeValue); - protected abstract List selectNodeAssocsByTarget(Long targetNodeId, Long typeQNameId); - protected abstract NodeAssocEntity selectNodeAssocById(Long assocId); - protected abstract int selectNodeAssocMaxIndex(Long sourceNodeId, Long assocTypeQNameId); - protected abstract Long insertChildAssoc(ChildAssocEntity assoc); - protected abstract int deleteChildAssocs(List ids); - protected abstract int updateChildAssocIndex( - Long parentNodeId, - Long childNodeId, - QName assocTypeQName, - QName assocQName, - int index); - protected abstract int updateChildAssocUniqueName(Long assocId, String name); -// protected abstract int deleteChildAssocsToAndFrom(Long nodeId); - protected abstract ChildAssocEntity selectChildAssoc(Long assocId); - protected abstract List selectChildNodeIds( - Long nodeId, - Boolean isPrimary, - Long minAssocIdInclusive, - int maxResults); - protected abstract List selectPrimaryChildAcls(Long nodeId); - protected abstract List selectChildAssoc( - Long parentNodeId, - Long childNodeId, - QName assocTypeQName, - QName assocQName); - /** - * Parameters are all optional except the parent node ID and the callback - */ - protected abstract void selectChildAssocs( - Long parentNodeId, - Long childNodeId, - QName assocTypeQName, - QName assocQName, - Boolean isPrimary, - Boolean sameStore, - ChildAssocRefQueryCallback resultsCallback); - protected abstract void selectChildAssocs( - Long parentNodeId, - QName assocTypeQName, - QName assocQName, - int maxResults, - ChildAssocRefQueryCallback resultsCallback); - protected abstract void selectChildAssocs( - Long parentNodeId, - Set assocTypeQNames, - ChildAssocRefQueryCallback resultsCallback); - protected abstract ChildAssocEntity selectChildAssoc( - Long parentNodeId, - QName assocTypeQName, - String childName); - protected abstract void selectChildAssocs( - Long parentNodeId, - QName assocTypeQName, - Collection childNames, - ChildAssocRefQueryCallback resultsCallback); - protected abstract void selectChildAssocsByPropertyValue( - Long parentNodeId, - QName propertyQName, - NodePropertyValue nodeValue, - ChildAssocRefQueryCallback resultsCallback); - protected abstract void selectChildAssocsByChildTypes( - Long parentNodeId, - Set childNodeTypeQNames, - ChildAssocRefQueryCallback resultsCallback); - protected abstract void selectChildAssocsWithoutParentAssocsOfType( - Long parentNodeId, - QName assocTypeQName, - ChildAssocRefQueryCallback resultsCallback); - /** - * Parameters are all optional except the parent node ID and the callback - */ - protected abstract void selectParentAssocs( - Long childNodeId, - QName assocTypeQName, - QName assocQName, - Boolean isPrimary, - ChildAssocRefQueryCallback resultsCallback); - protected abstract List selectParentAssocs(Long childNodeId); - /** - * No DB constraint, so multiple returned - */ - protected abstract List selectPrimaryParentAssocs(Long childNodeId); - protected abstract int updatePrimaryParentAssocs( - Long childNodeId, - Long parentNodeId, - QName assocTypeQName, - QName assocQName, - String childNodeName); - /** - * Moves all node-linked data from one node to another. The source node will be left - * in an orphaned state and without any attached data other than the current transaction. - * - * @param fromNodeId the source node - * @param toNodeId the target node - */ - protected abstract void moveNodeData(Long fromNodeId, Long toNodeId); - - protected abstract void deleteSubscriptions(Long nodeId); - - protected abstract Transaction selectLastTxnBeforeCommitTime(Long maxCommitTime); - protected abstract int selectTransactionCount(); - protected abstract Transaction selectTxnById(Long txnId); - protected abstract List selectTxnChanges(Long txnId, Long storeId); - // public for testing - public abstract List selectTxns( - Long fromTimeInclusive, - Long toTimeExclusive, - Integer count, - List includeTxnIds, - List excludeTxnIds, - Boolean ascending); - protected abstract List selectTxnsUnused(Long minTxnId, Long maxCommitTime, Integer count); - protected abstract Long selectMinTxnCommitTime(); - protected abstract Long selectMaxTxnCommitTime(); - protected abstract Long selectMinTxnCommitTimeForDeletedNodes(); - protected abstract Long selectMinTxnId(); - protected abstract Long selectMaxTxnId(); - protected abstract Long selectMinUnusedTxnCommitTime(); - protected abstract Long selectMinTxInNodeIdRange(Long fromNodeId, Long toNodeId); - protected abstract Long selectMaxTxInNodeIdRange(Long fromNodeId, Long toNodeId); - protected abstract Long selectNextTxCommitTime(Long fromCommitTime); - -} +/* + * #%L + * Alfresco Repository + * %% + * Copyright (C) 2005 - 2025 Alfresco Software Limited + * %% + * This file is part of the Alfresco software. + * If the software was purchased under a paid Alfresco license, the terms of + * the paid license agreement will prevail. Otherwise, the software is + * provided under the following open source license terms: + * + * Alfresco is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * Alfresco is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with Alfresco. If not, see . + * #L% + */ +package org.alfresco.repo.domain.node; + +import java.io.Serializable; +import java.sql.Savepoint; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.Stack; +import java.util.TreeSet; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.springframework.dao.ConcurrencyFailureException; +import org.springframework.dao.DataIntegrityViolationException; +import org.springframework.util.Assert; + +import org.alfresco.error.AlfrescoRuntimeException; +import org.alfresco.ibatis.BatchingDAO; +import org.alfresco.ibatis.RetryingCallbackHelper; +import org.alfresco.ibatis.RetryingCallbackHelper.RetryingCallback; +import org.alfresco.model.ContentModel; +import org.alfresco.repo.cache.NullCache; +import org.alfresco.repo.cache.SimpleCache; +import org.alfresco.repo.cache.TransactionalCache; +import org.alfresco.repo.cache.lookup.EntityLookupCache; +import org.alfresco.repo.cache.lookup.EntityLookupCache.EntityLookupCallbackDAOAdaptor; +import org.alfresco.repo.domain.contentdata.ContentDataDAO; +import org.alfresco.repo.domain.control.ControlDAO; +import org.alfresco.repo.domain.locale.LocaleDAO; +import org.alfresco.repo.domain.permissions.AccessControlListDAO; +import org.alfresco.repo.domain.permissions.AclDAO; +import org.alfresco.repo.domain.qname.QNameDAO; +import org.alfresco.repo.domain.usage.UsageDAO; +import org.alfresco.repo.policy.BehaviourFilter; +import org.alfresco.repo.security.permissions.AccessControlListProperties; +import org.alfresco.repo.transaction.AlfrescoTransactionSupport; +import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState; +import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; +import org.alfresco.repo.transaction.TransactionalDao; +import org.alfresco.repo.transaction.TransactionalResourceHelper; +import org.alfresco.service.cmr.dictionary.DataTypeDefinition; +import org.alfresco.service.cmr.dictionary.DictionaryService; +import org.alfresco.service.cmr.dictionary.InvalidTypeException; +import org.alfresco.service.cmr.dictionary.PropertyDefinition; +import org.alfresco.service.cmr.repository.AssociationExistsException; +import org.alfresco.service.cmr.repository.AssociationRef; +import org.alfresco.service.cmr.repository.ChildAssociationRef; +import org.alfresco.service.cmr.repository.ContentData; +import org.alfresco.service.cmr.repository.CyclicChildRelationshipException; +import org.alfresco.service.cmr.repository.DuplicateChildNodeNameException; +import org.alfresco.service.cmr.repository.InvalidNodeRefException; +import org.alfresco.service.cmr.repository.InvalidStoreRefException; +import org.alfresco.service.cmr.repository.NodeRef; +import org.alfresco.service.cmr.repository.NodeRef.Status; +import org.alfresco.service.cmr.repository.Path; +import org.alfresco.service.cmr.repository.StoreRef; +import org.alfresco.service.cmr.repository.datatype.DefaultTypeConverter; +import org.alfresco.service.namespace.QName; +import org.alfresco.service.transaction.ReadOnlyServerException; +import org.alfresco.service.transaction.TransactionService; +import org.alfresco.util.EqualsHelper; +import org.alfresco.util.EqualsHelper.MapValueComparison; +import org.alfresco.util.GUID; +import org.alfresco.util.Pair; +import org.alfresco.util.PropertyCheck; +import org.alfresco.util.ValueProtectingMap; +import org.alfresco.util.transaction.TransactionListenerAdapter; + +/** + * Abstract implementation for Node DAO. + *

+ * This provides basic services such as caching, but defers to the underlying implementation for CRUD operations. + * + * @author Derek Hulley + * @since 3.4 + */ +public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO +{ + private static final String CACHE_REGION_ROOT_NODES = "N.RN"; + public static final String CACHE_REGION_NODES = "N.N"; + private static final String CACHE_REGION_ASPECTS = "N.A"; + private static final String CACHE_REGION_PROPERTIES = "N.P"; + + private static final String KEY_LOST_NODE_PAIRS = AbstractNodeDAOImpl.class.getName() + ".lostNodePairs"; + private static final String KEY_DELETED_ASSOCS = AbstractNodeDAOImpl.class.getName() + ".deletedAssocs"; + + protected Log logger = LogFactory.getLog(getClass()); + private Log loggerPaths = LogFactory.getLog(getClass().getName() + ".paths"); + + protected final boolean isDebugEnabled = logger.isDebugEnabled(); + private NodePropertyHelper nodePropertyHelper; + private UpdateTransactionListener updateTransactionListener = new UpdateTransactionListener(); + private RetryingCallbackHelper childAssocRetryingHelper; + + private TransactionService transactionService; + private DictionaryService dictionaryService; + private BehaviourFilter policyBehaviourFilter; + private AclDAO aclDAO; + private AccessControlListDAO accessControlListDAO; + private ControlDAO controlDAO; + private QNameDAO qnameDAO; + private ContentDataDAO contentDataDAO; + private LocaleDAO localeDAO; + private UsageDAO usageDAO; + + private int cachingThreshold = 10; + + /** + * Cache for the Store root nodes by StoreRef:
+ * KEY: StoreRef
+ * VALUE: Node representing the root node
+ * VALUE KEY: IGNORED
+ */ + private EntityLookupCache rootNodesCache; + + /** + * Cache for nodes with the root aspect by StoreRef:
+ * KEY: StoreRef
+ * VALUE: A set of nodes with the root aspect
+ */ + private SimpleCache> allRootNodesCache; + + /** + * Bidirectional cache for the Node ID to Node lookups:
+ * KEY: Node ID
+ * VALUE: Node
+ * VALUE KEY: The Node's NodeRef
+ */ + private EntityLookupCache nodesCache; + /** + * Backing transactional cache to allow read-through requests to be honoured + */ + private TransactionalCache nodesTransactionalCache; + /** + * Cache for the QName values:
+ * KEY: NodeVersionKey
+ * VALUE: Set<QName>
+ * VALUE KEY: None
+ */ + private EntityLookupCache, Serializable> aspectsCache; + /** + * Cache for the Node properties:
+ * KEY: NodeVersionKey
+ * VALUE: Map<QName, Serializable>
+ * VALUE KEY: None
+ */ + private EntityLookupCache, Serializable> propertiesCache; + /** + * Non-clustered cache for the Node parent assocs:
+ * KEY: (nodeId, txnId) pair
+ * VALUE: ParentAssocs + */ + private ParentAssocsCache parentAssocsCache; + private int parentAssocsCacheSize; + private int parentAssocsCacheLimitFactor = 8; + + /** + * Cache for fast lookups of child nodes by cm:name. + */ + private SimpleCache childByNameCache; + + /** + * Constructor. Set up various instance-specific members such as caches and locks. + */ + public AbstractNodeDAOImpl() + { + childAssocRetryingHelper = new RetryingCallbackHelper(); + childAssocRetryingHelper.setRetryWaitMs(10); + childAssocRetryingHelper.setMaxRetries(5); + // Caches + rootNodesCache = new EntityLookupCache(new RootNodesCacheCallbackDAO()); + nodesCache = new EntityLookupCache(new NodesCacheCallbackDAO()); + aspectsCache = new EntityLookupCache, Serializable>(new AspectsCallbackDAO()); + propertiesCache = new EntityLookupCache, Serializable>(new PropertiesCallbackDAO()); + childByNameCache = new NullCache(); + } + + /** + * @param transactionService + * the service to start post-txn processes + */ + public void setTransactionService(TransactionService transactionService) + { + this.transactionService = transactionService; + } + + /** + * @param dictionaryService + * the service help determine cm:auditable characteristics + */ + public void setDictionaryService(DictionaryService dictionaryService) + { + this.dictionaryService = dictionaryService; + } + + public void setCachingThreshold(int cachingThreshold) + { + this.cachingThreshold = cachingThreshold; + } + + /** + * @param policyBehaviourFilter + * the service to determine the behaviour for cm:auditable and other inherent capabilities. + */ + public void setPolicyBehaviourFilter(BehaviourFilter policyBehaviourFilter) + { + this.policyBehaviourFilter = policyBehaviourFilter; + } + + /** + * @param aclDAO + * used to update permissions during certain operations + */ + public void setAclDAO(AclDAO aclDAO) + { + this.aclDAO = aclDAO; + } + + /** + * @param accessControlListDAO + * used to update ACL inheritance during node moves + */ + public void setAccessControlListDAO(AccessControlListDAO accessControlListDAO) + { + this.accessControlListDAO = accessControlListDAO; + } + + /** + * @param controlDAO + * create Savepoints + */ + public void setControlDAO(ControlDAO controlDAO) + { + this.controlDAO = controlDAO; + } + + /** + * @param qnameDAO + * translates QName IDs into QName instances and vice-versa + */ + public void setQnameDAO(QNameDAO qnameDAO) + { + this.qnameDAO = qnameDAO; + } + + /** + * @param contentDataDAO + * used to create and delete content references + */ + public void setContentDataDAO(ContentDataDAO contentDataDAO) + { + this.contentDataDAO = contentDataDAO; + } + + /** + * @param localeDAO + * used to handle MLText properties + */ + public void setLocaleDAO(LocaleDAO localeDAO) + { + this.localeDAO = localeDAO; + } + + /** + * @param usageDAO + * used to keep content usage calculations in line + */ + public void setUsageDAO(UsageDAO usageDAO) + { + this.usageDAO = usageDAO; + } + + /** + * Set the cache that maintains the Store root node data + * + * @param cache + * the cache + */ + public void setRootNodesCache(SimpleCache cache) + { + this.rootNodesCache = new EntityLookupCache( + cache, + CACHE_REGION_ROOT_NODES, + new RootNodesCacheCallbackDAO()); + } + + /** + * Set the cache that maintains the extended Store root node data + * + * @param allRootNodesCache + * the cache + */ + public void setAllRootNodesCache(SimpleCache> allRootNodesCache) + { + this.allRootNodesCache = allRootNodesCache; + } + + /** + * Set the cache that maintains node ID-NodeRef cross referencing data + * + * @param cache + * the cache + */ + public void setNodesCache(SimpleCache cache) + { + this.nodesCache = new EntityLookupCache( + cache, + CACHE_REGION_NODES, + new NodesCacheCallbackDAO()); + if (cache instanceof TransactionalCache) + { + this.nodesTransactionalCache = (TransactionalCache) cache; + } + } + + /** + * Set the cache that maintains the Node QName IDs + * + * @param aspectsCache + * the cache + */ + public void setAspectsCache(SimpleCache> aspectsCache) + { + this.aspectsCache = new EntityLookupCache, Serializable>( + aspectsCache, + CACHE_REGION_ASPECTS, + new AspectsCallbackDAO()); + } + + /** + * Set the cache that maintains the Node property values + * + * @param propertiesCache + * the cache + */ + public void setPropertiesCache(SimpleCache> propertiesCache) + { + this.propertiesCache = new EntityLookupCache, Serializable>( + propertiesCache, + CACHE_REGION_PROPERTIES, + new PropertiesCallbackDAO()); + } + + /** + * Sets the maximum capacity of the parent assocs cache + * + * @param parentAssocsCacheSize + * the cache size + */ + public void setParentAssocsCacheSize(int parentAssocsCacheSize) + { + this.parentAssocsCacheSize = parentAssocsCacheSize; + } + + /** + * Sets the average number of parents expected per cache entry. This parameter is multiplied by the {@link #setParentAssocsCacheSize(int)} parameter to compute a limit on the total number of cached parents, which will be proportional to the cache's memory usage. The cache will be pruned when this limit is exceeded to avoid excessive memory usage. + * + * @param parentAssocsCacheLimitFactor + * the parentAssocsCacheLimitFactor to set + */ + public void setParentAssocsCacheLimitFactor(int parentAssocsCacheLimitFactor) + { + this.parentAssocsCacheLimitFactor = parentAssocsCacheLimitFactor; + } + + /** + * Set the cache that maintains lookups by child cm:name + * + * @param childByNameCache + * the cache + */ + public void setChildByNameCache(SimpleCache childByNameCache) + { + this.childByNameCache = childByNameCache; + } + + /* Initialize */ + + public void init() + { + PropertyCheck.mandatory(this, "transactionService", transactionService); + PropertyCheck.mandatory(this, "dictionaryService", dictionaryService); + PropertyCheck.mandatory(this, "aclDAO", aclDAO); + PropertyCheck.mandatory(this, "accessControlListDAO", accessControlListDAO); + PropertyCheck.mandatory(this, "qnameDAO", qnameDAO); + PropertyCheck.mandatory(this, "contentDataDAO", contentDataDAO); + PropertyCheck.mandatory(this, "localeDAO", localeDAO); + PropertyCheck.mandatory(this, "usageDAO", usageDAO); + + this.nodePropertyHelper = new NodePropertyHelper(dictionaryService, qnameDAO, localeDAO, contentDataDAO); + this.parentAssocsCache = new ParentAssocsCache(this.parentAssocsCacheSize, this.parentAssocsCacheLimitFactor); + } + + /* Cache helpers */ + + private void clearCaches() + { + nodesCache.clear(); + aspectsCache.clear(); + propertiesCache.clear(); + parentAssocsCache.clear(); + } + + /** + * Invalidate cache entries for all children of a give node. This usually applies where the child associations or nodes are modified en-masse. + * + * @param parentNodeId + * the parent node of all child nodes to be invalidated (may be null) + * @param touchNodes + * true to also touch the nodes + * @return the number of child associations found (might be capped) + */ + private int invalidateNodeChildrenCaches(Long parentNodeId, boolean primary, boolean touchNodes) + { + Long txnId = getCurrentTransaction().getId(); + + int count = 0; + List childNodeIds = new ArrayList(256); + Long minChildNodeIdInclusive = Long.MIN_VALUE; + while (minChildNodeIdInclusive != null) + { + childNodeIds.clear(); + List childAssocs = selectChildNodeIds( + parentNodeId, + Boolean.valueOf(primary), + minChildNodeIdInclusive, + 256); + // Remove the cache entries as we go + for (ChildAssocEntity childAssoc : childAssocs) + { + Long childNodeId = childAssoc.getChildNode().getId(); + if (childNodeId.compareTo(minChildNodeIdInclusive) < 0) + { + throw new RuntimeException("Query results did not increase for child node id ID"); + } + else + { + minChildNodeIdInclusive = Long.valueOf(childNodeId.longValue() + 1L); + } + // Invalidate the node cache + childNodeIds.add(childNodeId); + invalidateNodeCaches(childNodeId); + count++; + } + // Bring all the nodes into the transaction, if required + if (touchNodes) + { + updateNodes(txnId, childNodeIds); + } + // Now break out if we didn't have the full set of results + if (childAssocs.size() < 256) + { + break; + } + } + // Done + return count; + } + + /** + * Invalidates all cached artefacts for a particular node, forcing a refresh. + * + * @param nodeId + * the node ID + */ + private void invalidateNodeCaches(Long nodeId) + { + // Take the current value from the nodesCache and use that to invalidate the other caches + Node node = nodesCache.getValue(nodeId); + if (node != null) + { + invalidateNodeCaches(node, true, true, true); + } + // Finally remove the node reference + nodesCache.removeByKey(nodeId); + } + + /** + * Invalidate specific node caches using an exact key + * + * @param node + * the node in question + */ + private void invalidateNodeCaches(Node node, boolean invalidateNodeAspectsCache, + boolean invalidateNodePropertiesCache, boolean invalidateParentAssocsCache) + { + NodeVersionKey nodeVersionKey = node.getNodeVersionKey(); + if (invalidateNodeAspectsCache) + { + aspectsCache.removeByKey(nodeVersionKey); + } + if (invalidateNodePropertiesCache) + { + propertiesCache.removeByKey(nodeVersionKey); + } + if (invalidateParentAssocsCache) + { + invalidateParentAssocsCached(node); + } + } + + /* Transactions */ + + private static final String KEY_TRANSACTION = "node.transaction.id"; + + /** + * Wrapper to update the current transaction to get the change time correct + * + * @author Derek Hulley + * @since 3.4 + */ + private class UpdateTransactionListener implements TransactionalDao + { + /** + * Checks for the presence of a written DB transaction entry + */ + @Override + public boolean isDirty() + { + Long txnId = AbstractNodeDAOImpl.this.getCurrentTransactionId(false); + return txnId != null; + } + + @Override + public void beforeCommit(boolean readOnly) + { + if (readOnly) + { + return; + } + TransactionEntity txn = AlfrescoTransactionSupport.getResource(KEY_TRANSACTION); + Long txnId = txn.getId(); + // Update it + Long now = System.currentTimeMillis(); + txn.setCommitTimeMs(now); + updateTransaction(txnId, now); + } + } + + /** + * @return Returns a new transaction or an existing one if already active + */ + private TransactionEntity getCurrentTransaction() + { + TransactionEntity txn = AlfrescoTransactionSupport.getResource(KEY_TRANSACTION); + if (txn != null) + { + // We have been busy here before + return txn; + } + // Check that this is a writable txn + if (AlfrescoTransactionSupport.getTransactionReadState() != TxnReadState.TXN_READ_WRITE) + { + throw new ReadOnlyServerException(); + } + // Have to create a new transaction entry + Long now = System.currentTimeMillis(); + String changeTxnId = AlfrescoTransactionSupport.getTransactionId(); + Long txnId = insertTransaction(changeTxnId, now); + // Store it for later + if (isDebugEnabled) + { + logger.debug("Create txn: " + txnId); + } + txn = new TransactionEntity(); + txn.setId(txnId); + txn.setChangeTxnId(changeTxnId); + txn.setCommitTimeMs(now); + + AlfrescoTransactionSupport.bindResource(KEY_TRANSACTION, txn); + // Listen for the end of the transaction + AlfrescoTransactionSupport.bindDaoService(updateTransactionListener); + // Done + return txn; + } + + public Long getCurrentTransactionCommitTime() + { + Long commitTime = null; + TransactionEntity resource = AlfrescoTransactionSupport.getResource(KEY_TRANSACTION); + if (resource != null) + { + commitTime = resource.getCommitTimeMs(); + } + return commitTime; + } + + public Long getCurrentTransactionId(boolean ensureNew) + { + TransactionEntity txn; + if (ensureNew) + { + txn = getCurrentTransaction(); + } + else + { + txn = AlfrescoTransactionSupport.getResource(KEY_TRANSACTION); + } + return txn == null ? null : txn.getId(); + } + + /* Stores */ + + @Override + public Pair getStore(StoreRef storeRef) + { + Pair rootNodePair = rootNodesCache.getByKey(storeRef); + if (rootNodePair == null) + { + return null; + } + else + { + return new Pair(rootNodePair.getSecond().getStore().getId(), rootNodePair.getFirst()); + } + } + + @Override + public List> getStores() + { + List storeEntities = selectAllStores(); + List> storeRefs = new ArrayList>(storeEntities.size()); + for (StoreEntity storeEntity : storeEntities) + { + storeRefs.add(new Pair(storeEntity.getId(), storeEntity.getStoreRef())); + } + return storeRefs; + } + + /** + * @throws InvalidStoreRefException + * if the store is invalid + */ + private StoreEntity getStoreNotNull(StoreRef storeRef) + { + Pair rootNodePair = rootNodesCache.getByKey(storeRef); + if (rootNodePair == null) + { + throw new InvalidStoreRefException(storeRef); + } + else + { + return rootNodePair.getSecond().getStore(); + } + } + + @Override + public boolean exists(StoreRef storeRef) + { + Pair rootNodePair = rootNodesCache.getByKey(storeRef); + return rootNodePair != null; + } + + @Override + public Pair getRootNode(StoreRef storeRef) + { + Pair rootNodePair = rootNodesCache.getByKey(storeRef); + if (rootNodePair == null) + { + throw new InvalidStoreRefException(storeRef); + } + else + { + return rootNodePair.getSecond().getNodePair(); + } + } + + @Override + public Set getAllRootNodes(StoreRef storeRef) + { + Set rootNodes = allRootNodesCache.get(storeRef); + if (rootNodes == null) + { + final Map> allRootNodes = new HashMap>(97); + getNodesWithAspects(Collections.singleton(ContentModel.ASPECT_ROOT), 0L, Long.MAX_VALUE, new NodeRefQueryCallback() { + @Override + public boolean handle(Pair nodePair) + { + NodeRef nodeRef = nodePair.getSecond(); + StoreRef storeRef = nodeRef.getStoreRef(); + Set rootNodes = allRootNodes.get(storeRef); + if (rootNodes == null) + { + rootNodes = new HashSet(97); + allRootNodes.put(storeRef, rootNodes); + } + rootNodes.add(nodeRef); + return true; + } + }); + rootNodes = allRootNodes.get(storeRef); + if (rootNodes == null) + { + rootNodes = Collections.emptySet(); + allRootNodes.put(storeRef, rootNodes); + } + for (Map.Entry> entry : allRootNodes.entrySet()) + { + StoreRef entryStoreRef = entry.getKey(); + // Prevent unnecessary cross-invalidation + if (!allRootNodesCache.contains(entryStoreRef)) + { + allRootNodesCache.put(entryStoreRef, entry.getValue()); + } + } + } + return rootNodes; + } + + @Override + public Pair newStore(StoreRef storeRef) + { + // Create the store + StoreEntity store = new StoreEntity(); + store.setProtocol(storeRef.getProtocol()); + store.setIdentifier(storeRef.getIdentifier()); + + Long storeId = insertStore(store); + store.setId(storeId); + + // Get an ACL for the root node + Long aclId = aclDAO.createAccessControlList(); + + // Create a root node + Long nodeTypeQNameId = qnameDAO.getOrCreateQName(ContentModel.TYPE_STOREROOT).getFirst(); + NodeEntity rootNode = newNodeImpl(store, null, nodeTypeQNameId, null, aclId, null, true); + Long rootNodeId = rootNode.getId(); + addNodeAspects(rootNodeId, Collections.singleton(ContentModel.ASPECT_ROOT)); + + // Now update the store with the root node ID + store.setRootNode(rootNode); + updateStoreRoot(store); + + // Push the value into the caches + rootNodesCache.setValue(storeRef, rootNode); + + if (isDebugEnabled) + { + logger.debug("Created store: \n" + " " + store); + } + return new Pair(rootNode.getId(), rootNode.getNodeRef()); + } + + @Override + public void moveStore(StoreRef oldStoreRef, StoreRef newStoreRef) + { + StoreEntity store = getStoreNotNull(oldStoreRef); + store.setProtocol(newStoreRef.getProtocol()); + store.setIdentifier(newStoreRef.getIdentifier()); + // Update it + int count = updateStore(store); + if (count != 1) + { + throw new ConcurrencyFailureException("Store not updated: " + oldStoreRef); + } + // Bring all the associated nodes into the current transaction + Long txnId = getCurrentTransaction().getId(); + Long storeId = store.getId(); + updateNodesInStore(txnId, storeId); + + // All the NodeRef-based caches are invalid. ID-based caches are fine. + rootNodesCache.removeByKey(oldStoreRef); + allRootNodesCache.remove(oldStoreRef); + nodesCache.clear(); + + if (isDebugEnabled) + { + logger.debug("Moved store: " + oldStoreRef + " --> " + newStoreRef); + } + } + + /** + * Callback to cache store root nodes by {@link StoreRef}. + * + * @author Derek Hulley + * @since 3.4 + */ + private class RootNodesCacheCallbackDAO extends EntityLookupCallbackDAOAdaptor + { + /** + * @throws UnsupportedOperationException + * Stores must be created externally + */ + public Pair createValue(Node value) + { + throw new UnsupportedOperationException("Root node creation is done externally: " + value); + } + + /** + * @param storeRef + * the store ID + */ + public Pair findByKey(StoreRef storeRef) + { + NodeEntity node = selectStoreRootNode(storeRef); + return node == null ? null : new Pair(storeRef, node); + } + } + + /* Nodes */ + + /** + * Callback to cache nodes by ID and {@link NodeRef}. When looking up objects based on the value key, only the referencing properties need be populated. ALL nodes are cached, not just live nodes. + * + * @see NodeEntity + * + * @author Derek Hulley + * @since 3.4 + */ + private class NodesCacheCallbackDAO extends EntityLookupCallbackDAOAdaptor + { + /** + * @throws UnsupportedOperationException + * Nodes are created externally + */ + public Pair createValue(Node value) + { + throw new UnsupportedOperationException("Node creation is done externally: " + value); + } + + /** + * @param nodeId + * the key node ID + */ + public Pair findByKey(Long nodeId) + { + NodeEntity node = selectNodeById(nodeId); + if (node != null) + { + // Lock it to prevent 'accidental' modification + node.lock(); + return new Pair(nodeId, node); + } + else + { + return null; + } + } + + /** + * @return Returns the Node's NodeRef + */ + @Override + public NodeRef getValueKey(Node value) + { + return value.getNodeRef(); + } + + /** + * Looks the node up based on the NodeRef of the given node + */ + @Override + public Pair findByValue(Node node) + { + NodeRef nodeRef = node.getNodeRef(); + node = selectNodeByNodeRef(nodeRef); + if (node != null) + { + // Lock it to prevent 'accidental' modification + node.lock(); + return new Pair(node.getId(), node); + } + else + { + return null; + } + } + } + + public boolean exists(Long nodeId) + { + Pair pair = nodesCache.getByKey(nodeId); + return pair != null && !pair.getSecond().getDeleted(qnameDAO); + } + + public boolean exists(NodeRef nodeRef) + { + NodeEntity node = new NodeEntity(nodeRef); + Pair pair = nodesCache.getByValue(node); + return pair != null && !pair.getSecond().getDeleted(qnameDAO); + } + + @Override + public boolean isInCurrentTxn(Long nodeId) + { + Long currentTxnId = getCurrentTransactionId(false); + if (currentTxnId == null) + { + // No transactional changes have been made to any nodes, therefore the node cannot + // be part of the current transaction + return false; + } + Node node = getNodeNotNull(nodeId, false); + Long nodeTxnId = node.getTransaction().getId(); + return nodeTxnId.equals(currentTxnId); + } + + @Override + public Status getNodeRefStatus(NodeRef nodeRef) + { + Node node = new NodeEntity(nodeRef); + Pair nodePair = nodesCache.getByValue(node); + // The nodesCache gets both live and deleted nodes. + if (nodePair == null) + { + return null; + } + else + { + return nodePair.getSecond().getNodeStatus(qnameDAO); + } + } + + @Override + public Status getNodeIdStatus(Long nodeId) + { + Pair nodePair = nodesCache.getByKey(nodeId); + // The nodesCache gets both live and deleted nodes. + if (nodePair == null) + { + return null; + } + else + { + return nodePair.getSecond().getNodeStatus(qnameDAO); + } + } + + @Override + public Pair getNodePair(NodeRef nodeRef) + { + NodeEntity node = new NodeEntity(nodeRef); + Pair pair = nodesCache.getByValue(node); + // Check it + if (pair == null || pair.getSecond().getDeleted(qnameDAO)) + { + // The cache says that the node is not there or is deleted. + // We double check by going to the DB + Node dbNode = selectNodeByNodeRef(nodeRef); + if (dbNode == null) + { + // The DB agrees. This is an invalid noderef. Why are you trying to use it? + return null; + } + else if (dbNode.getDeleted(qnameDAO)) + { + // We may have reached this deleted node via an invalid association; trigger a post transaction prune of + // any associations that point to this deleted one + pruneDanglingAssocs(dbNode.getId()); + + // The DB agrees. This is a deleted noderef. + return null; + } + else + { + // The cache was wrong, possibly due to it caching negative results earlier. + if (isDebugEnabled) + { + logger.debug("Repairing stale cache entry for node: " + nodeRef); + } + Long nodeId = dbNode.getId(); + invalidateNodeCaches(nodeId); + dbNode.lock(); // Prevent unexpected edits of values going into the cache + nodesCache.setValue(nodeId, dbNode); + return dbNode.getNodePair(); + } + } + return pair.getSecond().getNodePair(); + } + + /** + * Trigger a post transaction prune of any associations that point to this deleted one. + * + * @param nodeId + * Long + */ + private void pruneDanglingAssocs(Long nodeId) + { + selectChildAssocs(nodeId, null, null, null, null, null, new ChildAssocRefQueryCallback() { + @Override + public boolean preLoadNodes() + { + return false; + } + + @Override + public boolean orderResults() + { + return false; + } + + @Override + public boolean handle(Pair childAssocPair, Pair parentNodePair, + Pair childNodePair) + { + bindFixAssocAndCollectLostAndFound(childNodePair, "childNodeWithDeletedParent", childAssocPair.getFirst(), childAssocPair.getSecond().isPrimary() && exists(childAssocPair.getFirst())); + return true; + } + + @Override + public void done() + {} + }); + selectParentAssocs(nodeId, null, null, null, new ChildAssocRefQueryCallback() { + @Override + public boolean preLoadNodes() + { + return false; + } + + @Override + public boolean orderResults() + { + return false; + } + + @Override + public boolean handle(Pair childAssocPair, Pair parentNodePair, + Pair childNodePair) + { + bindFixAssocAndCollectLostAndFound(childNodePair, "deletedChildWithParents", childAssocPair.getFirst(), false); + return true; + } + + @Override + public void done() + {} + }); + } + + @Override + public Pair getNodePair(Long nodeId) + { + Pair pair = nodesCache.getByKey(nodeId); + // Check it + if (pair == null || pair.getSecond().getDeleted(qnameDAO)) + { + // The cache says that the node is not there or is deleted. + // We double check by going to the DB + Node dbNode = selectNodeById(nodeId); + if (dbNode == null) + { + // The DB agrees. This is an invalid noderef. Why are you trying to use it? + return null; + } + else if (dbNode.getDeleted(qnameDAO)) + { + // We may have reached this deleted node via an invalid association; trigger a post transaction prune of + // any associations that point to this deleted one + pruneDanglingAssocs(dbNode.getId()); + + // The DB agrees. This is a deleted noderef. + return null; + } + else + { + // The cache was wrong, possibly due to it caching negative results earlier. + if (isDebugEnabled) + { + logger.debug("Repairing stale cache entry for node: " + nodeId); + } + invalidateNodeCaches(nodeId); + dbNode.lock(); // Prevent unexpected edits of values going into the cache + nodesCache.setValue(nodeId, dbNode); + return dbNode.getNodePair(); + } + } + else + { + return pair.getSecond().getNodePair(); + } + } + + /** + * Get a node instance regardless of whether it is considered live or deleted + * + * @param nodeId + * the node ID to look for + * @param liveOnly + * true to ensure that only live nodes are retrieved + * @return a node that will be live if requested + * @throws ConcurrencyFailureException + * if a valid node is not found + */ + private Node getNodeNotNull(Long nodeId, boolean liveOnly) + { + Pair pair = nodesCache.getByKey(nodeId); + + if (pair == null) + { + // The node has no entry in the database + NodeEntity dbNode = selectNodeById(nodeId); + nodesCache.removeByKey(nodeId); + throw new ConcurrencyFailureException( + "No node row exists: \n" + + " ID: " + nodeId + "\n" + + " DB row: " + dbNode); + } + else if (pair.getSecond().getDeleted(qnameDAO) && liveOnly) + { + // The node is not 'live' as was requested + NodeEntity dbNode = selectNodeById(nodeId); + nodesCache.removeByKey(nodeId); + // Make absolutely sure that the node is not referenced by any associations + pruneDanglingAssocs(nodeId); + // Force a retry on the transaction + throw new ConcurrencyFailureException( + "No live node exists: \n" + + " ID: " + nodeId + "\n" + + " DB row: " + dbNode); + } + else + { + return pair.getSecond(); + } + } + + @Override + public QName getNodeType(Long nodeId) + { + Node node = getNodeNotNull(nodeId, false); + Long nodeTypeQNameId = node.getTypeQNameId(); + return qnameDAO.getQName(nodeTypeQNameId).getSecond(); + } + + @Override + public Long getNodeAclId(Long nodeId) + { + Node node = getNodeNotNull(nodeId, true); + return node.getAclId(); + } + + @Override + public ChildAssocEntity newNode( + Long parentNodeId, + QName assocTypeQName, + QName assocQName, + StoreRef storeRef, + String uuid, + QName nodeTypeQName, + Locale nodeLocale, + String childNodeName, + Map auditableProperties) throws InvalidTypeException + { + Assert.notNull(parentNodeId, "parentNodeId"); + Assert.notNull(assocTypeQName, "assocTypeQName"); + Assert.notNull(assocQName, "assocQName"); + Assert.notNull(storeRef, "storeRef"); + + if (auditableProperties == null) + { + auditableProperties = Collections.emptyMap(); + } + + // Get the parent node + Node parentNode = getNodeNotNull(parentNodeId, true); + + // Find an initial ACL for the node + Long parentAclId = parentNode.getAclId(); + AccessControlListProperties inheritedAcl = null; + Long childAclId = null; + if (parentAclId != null) + { + try + { + Long inheritedACL = aclDAO.getInheritedAccessControlList(parentAclId); + inheritedAcl = aclDAO.getAccessControlListProperties(inheritedACL); + if (inheritedAcl != null) + { + childAclId = inheritedAcl.getId(); + } + } + catch (RuntimeException e) + { + // The get* calls above actually do writes. So pessimistically get rid of the + // parent node from the cache in case it was wrong somehow. + invalidateNodeCaches(parentNodeId); + // Rethrow for a retry (ALF-17286) + throw new RuntimeException( + "Failure while 'getting' inherited ACL or ACL properties: \n" + + " parent ACL ID: " + parentAclId + "\n" + + " inheritied ACL: " + inheritedAcl, + e); + } + } + // Build the cm:auditable properties + AuditablePropertiesEntity auditableProps = new AuditablePropertiesEntity(); + boolean setAuditProps = auditableProps.setAuditValues(null, null, auditableProperties); + if (!setAuditProps) + { + // No cm:auditable properties were supplied + auditableProps = null; + } + + // Get the store + StoreEntity store = getStoreNotNull(storeRef); + // Create the node (it is not a root node) + Long nodeTypeQNameId = qnameDAO.getOrCreateQName(nodeTypeQName).getFirst(); + Long nodeLocaleId = localeDAO.getOrCreateLocalePair(nodeLocale).getFirst(); + NodeEntity node = newNodeImpl(store, uuid, nodeTypeQNameId, nodeLocaleId, childAclId, auditableProps, true); + Long nodeId = node.getId(); + + // Protect the node's cm:auditable if it was explicitly set + if (setAuditProps) + { + NodeRef nodeRef = node.getNodeRef(); + policyBehaviourFilter.disableBehaviour(nodeRef, ContentModel.ASPECT_AUDITABLE); + } + + // Now create a primary association for it + if (childNodeName == null) + { + childNodeName = node.getUuid(); + } + ChildAssocEntity assoc = newChildAssocImpl( + parentNodeId, nodeId, true, assocTypeQName, assocQName, childNodeName, false); + + // There will be no other parent assocs + boolean isRoot = false; + boolean isStoreRoot = nodeTypeQName.equals(ContentModel.TYPE_STOREROOT); + ParentAssocsInfo parentAssocsInfo = new ParentAssocsInfo(isRoot, isStoreRoot, assoc); + setParentAssocsCached(nodeId, parentAssocsInfo); + + if (isDebugEnabled) + { + logger.debug( + "Created new node: \n" + + " Node: " + node + "\n" + + " Assoc: " + assoc); + } + return assoc; + } + + /** + * @param uuid + * the node UUID, or null to auto-generate + * @param nodeTypeQNameId + * the node's type + * @param nodeLocaleId + * the node's locale or null to use the default locale + * @param aclId + * an ACL ID if available + * @param auditableProps + * null to auto-generate or provide a value to explicitly set + * @param allowAuditableAspect + * Should we override the behaviour by potentially not adding the auditable aspect + * @throws NodeExistsException + * if the target reference is already taken by a live node + */ + private NodeEntity newNodeImpl( + StoreEntity store, + String uuid, + Long nodeTypeQNameId, + Long nodeLocaleId, + Long aclId, + AuditablePropertiesEntity auditableProps, + boolean allowAuditableAspect) throws InvalidTypeException + { + NodeEntity node = new NodeEntity(); + // Store + node.setStore(store); + // UUID + if (uuid == null) + { + node.setUuid(GUID.generate()); + } + else + { + node.setUuid(uuid); + } + // QName + node.setTypeQNameId(nodeTypeQNameId); + QName nodeTypeQName = qnameDAO.getQName(nodeTypeQNameId).getSecond(); + // Locale + if (nodeLocaleId == null) + { + nodeLocaleId = localeDAO.getOrCreateDefaultLocalePair().getFirst(); + } + node.setLocaleId(nodeLocaleId); + // ACL (may be null) + node.setAclId(aclId); + // Transaction + TransactionEntity txn = getCurrentTransaction(); + node.setTransaction(txn); + + // Audit + boolean addAuditableAspect = false; + if (auditableProps != null) + { + // Client-supplied cm:auditable values + node.setAuditableProperties(auditableProps); + addAuditableAspect = true; + } + else if (AuditablePropertiesEntity.hasAuditableAspect(nodeTypeQName, dictionaryService)) + { + // Automatically-generated cm:auditable values + auditableProps = new AuditablePropertiesEntity(); + auditableProps.setAuditValues(null, null, true, 0L); + node.setAuditableProperties(auditableProps); + addAuditableAspect = true; + } + + if (!allowAuditableAspect) + addAuditableAspect = false; + + Long id = newNodeImplInsert(node); + node.setId(id); + + Set nodeAspects = null; + if (addAuditableAspect) + { + Long auditableAspectQNameId = qnameDAO.getOrCreateQName(ContentModel.ASPECT_AUDITABLE).getFirst(); + insertNodeAspect(id, auditableAspectQNameId); + nodeAspects = Collections. singleton(ContentModel.ASPECT_AUDITABLE); + } + else + { + nodeAspects = Collections. emptySet(); + } + + // Lock the node and cache + node.lock(); + nodesCache.setValue(id, node); + // Pre-populate some of the other caches so that we don't immediately query + setNodeAspectsCached(id, nodeAspects); + setNodePropertiesCached(id, Collections. emptyMap()); + + if (isDebugEnabled) + { + logger.debug("Created new node: \n" + " " + node); + } + return node; + } + + protected Long newNodeImplInsert(NodeEntity node) + { + Long id = null; + Savepoint savepoint = controlDAO.createSavepoint("newNodeImpl"); + try + { + // First try a straight insert and risk the constraint violation if the node exists + id = insertNode(node); + controlDAO.releaseSavepoint(savepoint); + } + catch (Throwable e) + { + controlDAO.rollbackToSavepoint(savepoint); + // This is probably because there is an existing node. We can handle existing deleted nodes. + NodeRef targetNodeRef = node.getNodeRef(); + Node dbTargetNode = selectNodeByNodeRef(targetNodeRef); + if (dbTargetNode == null) + { + // There does not appear to be any row that could prevent an insert + throw new AlfrescoRuntimeException("Failed to insert new node: " + node, e); + } + else if (dbTargetNode.getDeleted(qnameDAO)) + { + Long dbTargetNodeId = dbTargetNode.getId(); + // This is OK. It happens when we create a node that existed in the past. + // Remove the row completely + deleteNodeProperties(dbTargetNodeId, (Set) null); + deleteNodeById(dbTargetNodeId); + // Now repeat the insert but let any further problems just be thrown out + id = insertNode(node); + } + else + { + // A live node exists. + throw new NodeExistsException(dbTargetNode.getNodePair(), e); + } + } + + return id; + } + + @Override + public Pair, Pair> moveNode( + final Long childNodeId, + final Long newParentNodeId, + final QName assocTypeQName, + final QName assocQName) + { + final Node newParentNode = getNodeNotNull(newParentNodeId, true); + final StoreEntity newParentStore = newParentNode.getStore(); + final Node childNode = getNodeNotNull(childNodeId, true); + final StoreEntity childStore = childNode.getStore(); + final ChildAssocEntity primaryParentAssoc = getPrimaryParentAssocImpl(childNodeId); + final Long oldParentAclId; + final Long oldParentNodeId; + if (primaryParentAssoc == null) + { + oldParentAclId = null; + oldParentNodeId = null; + } + else + { + if (primaryParentAssoc.getParentNode() == null) + { + oldParentAclId = null; + oldParentNodeId = null; + } + else + { + oldParentNodeId = primaryParentAssoc.getParentNode().getId(); + oldParentAclId = getNodeNotNull(oldParentNodeId, true).getAclId(); + } + } + + // Need the child node's name here in case it gets removed + final String childNodeName = (String) getNodeProperty(childNodeId, ContentModel.PROP_NAME); + + // First attempt to move the node, which may rollback to a savepoint + Node newChildNode = childNode; + // Store + if (!childStore.getId().equals(newParentStore.getId())) + { + + // Delete the ASPECT_AUDITABLE from the source node so it doesn't get copied across + // A new aspect would have already been created in the newNodeImpl method. + // ... make sure we have the cm:auditable data from the originating node + AuditablePropertiesEntity auditableProps = childNode.getAuditableProperties(); + + // Create a new node + newChildNode = newNodeImpl( + newParentStore, + childNode.getUuid(), + childNode.getTypeQNameId(), + childNode.getLocaleId(), + childNode.getAclId(), + auditableProps, + false); + Long newChildNodeId = newChildNode.getId(); + + // copy all the data over to new node + moveNodeData(childNode.getId(), newChildNodeId); + + // The new node will have new data not present in the cache, yet + invalidateNodeCaches(newChildNodeId); + invalidateNodeChildrenCaches(newChildNodeId, true, true); + invalidateNodeChildrenCaches(newChildNodeId, false, true); + // Completely delete the original node but keep the ACL as it's reused + deleteNodeImpl(childNodeId, false); + } + else + { + // Touch the node; make sure parent assocs are invalidated + touchNode(childNodeId, null, null, false, false, true); + } + + final Long newChildNodeId = newChildNode.getId(); + + // Now update the primary parent assoc + updatePrimaryParentAssocs(primaryParentAssoc, + newParentNode, + childNode, + newChildNodeId, + childNodeName, + oldParentNodeId, + assocTypeQName, + assocQName); + + // Optimize for rename case + if (!EqualsHelper.nullSafeEquals(newParentNodeId, oldParentNodeId)) + { + // Check for cyclic relationships + // TODO: This adds a lot of overhead when moving hierarchies. + // While getPaths is faster, it would be better to avoid the parentAssocsCache + // completely. + getPaths(newChildNode.getNodePair(), false); + // cycleCheck(newChildNodeId); + + // Update ACLs for moved tree + Long newParentAclId = newParentNode.getAclId(); + + // Verify if parent has aspect applied and ACL's are pending + if (hasNodeAspect(oldParentNodeId, ContentModel.ASPECT_PENDING_FIX_ACL)) + { + Long oldParentSharedAclId = (Long) this.getNodeProperty(oldParentNodeId, ContentModel.PROP_SHARED_ACL_TO_REPLACE); + accessControlListDAO.updateInheritance(newChildNodeId, oldParentSharedAclId, newParentAclId); + } + else + { + accessControlListDAO.updateInheritance(newChildNodeId, oldParentAclId, newParentAclId); + } + } + + // Done + Pair assocPair = getPrimaryParentAssoc(newChildNode.getId()); + Pair nodePair = newChildNode.getNodePair(); + if (isDebugEnabled) + { + logger.debug("Moved node: " + assocPair + " ... " + nodePair); + } + return new Pair, Pair>(assocPair, nodePair); + } + + protected void updatePrimaryParentAssocs( + final ChildAssocEntity primaryParentAssoc, + final Node newParentNode, + final Node childNode, + final Long newChildNodeId, + final String childNodeName, + final Long oldParentNodeId, + final QName assocTypeQName, + final QName assocQName) + { + // Because we are retrying in-transaction i.e. absorbing exceptions, we need partial rollback &/or via savepoint if needed (eg. PostgreSQL) + RetryingCallback callback = new RetryingCallback() { + public Integer execute() throws Throwable + { + return updatePrimaryParentAssocsImpl(primaryParentAssoc, + newParentNode, + childNode, + newChildNodeId, + childNodeName, + oldParentNodeId, + assocTypeQName, + assocQName); + } + }; + childAssocRetryingHelper.doWithRetry(callback); + } + + protected int updatePrimaryParentAssocsImpl( + ChildAssocEntity primaryParentAssoc, + Node newParentNode, + Node childNode, + Long newChildNodeId, + String childNodeName, + Long oldParentNodeId, + QName assocTypeQName, + QName assocQName) + { + Long newParentNodeId = newParentNode.getId(); + Long childNodeId = childNode.getId(); + + Savepoint savepoint = controlDAO.createSavepoint("DuplicateChildNodeNameException"); + // We use the child node's UUID if there is no cm:name + String childNodeNameToUse = childNodeName == null ? childNode.getUuid() : childNodeName; + + try + { + int updated = updatePrimaryParentAssocs( + newChildNodeId, + newParentNodeId, + assocTypeQName, + assocQName, + childNodeNameToUse); + controlDAO.releaseSavepoint(savepoint); + // Ensure we invalidate the name cache (the child version key might not have been 'bumped' by the last + // 'touch') + if (updated > 0 && primaryParentAssoc != null) + { + Pair oldTypeQnamePair = qnameDAO.getQName( + primaryParentAssoc.getTypeQNameId()); + if (oldTypeQnamePair != null) + { + childByNameCache.remove(new ChildByNameKey(oldParentNodeId, oldTypeQnamePair.getSecond(), + primaryParentAssoc.getChildNodeName())); + } + } + return updated; + } + catch (Throwable e) + { + controlDAO.rollbackToSavepoint(savepoint); + // DuplicateChildNodeNameException implements DoNotRetryException. + // There are some cases - FK violations, specifically - where we DO actually want to retry. + // Detecting this is done by looking for the related FK names, 'fk_alf_cass_*' in the error message + String lowerMsg = e.getMessage().toLowerCase(); + if (lowerMsg.contains("fk_alf_cass_")) + { + throw new ConcurrencyFailureException("FK violation updating primary parent association for " + childNodeId, e); + } + // We assume that this is from the child cm:name constraint violation + throw new DuplicateChildNodeNameException( + newParentNode.getNodeRef(), + assocTypeQName, + childNodeName, + e); + } + } + + @Override + public boolean updateNode(Long nodeId, QName nodeTypeQName, Locale nodeLocale) + { + // Get the existing node; we need to check for a change in store or UUID + Node oldNode = getNodeNotNull(nodeId, true); + final Long nodeTypeQNameId; + if (nodeTypeQName == null) + { + nodeTypeQNameId = oldNode.getTypeQNameId(); + } + else + { + nodeTypeQNameId = qnameDAO.getOrCreateQName(nodeTypeQName).getFirst(); + } + final Long nodeLocaleId; + if (nodeLocale == null) + { + nodeLocaleId = oldNode.getLocaleId(); + } + else + { + nodeLocaleId = localeDAO.getOrCreateLocalePair(nodeLocale).getFirst(); + } + + // Wrap all the updates into one + NodeUpdateEntity nodeUpdate = new NodeUpdateEntity(); + nodeUpdate.setId(nodeId); + nodeUpdate.setStore(oldNode.getStore()); // Need node reference + nodeUpdate.setUuid(oldNode.getUuid()); // Need node reference + // TypeQName (if necessary) + if (!nodeTypeQNameId.equals(oldNode.getTypeQNameId())) + { + nodeUpdate.setTypeQNameId(nodeTypeQNameId); + nodeUpdate.setUpdateTypeQNameId(true); + } + // Locale (if necessary) + if (!nodeLocaleId.equals(oldNode.getLocaleId())) + { + nodeUpdate.setLocaleId(nodeLocaleId); + nodeUpdate.setUpdateLocaleId(true); + } + + return updateNodeImpl(oldNode, nodeUpdate, null); + } + + @Override + public int touchNodes(Long txnId, List nodeIds) + { + // limit in clause to 1000 node ids + int batchSize = 1000; + + int touched = 0; + ArrayList batch = new ArrayList(batchSize); + for (Long nodeId : nodeIds) + { + invalidateNodeCaches(nodeId); + batch.add(nodeId); + if (batch.size() % batchSize == 0) + { + touched += updateNodes(txnId, batch); + batch.clear(); + } + } + if (batch.size() > 0) + { + touched += updateNodes(txnId, batch); + } + return touched; + } + + /** + * Updates the node's transaction and cm:auditable properties while providing a convenient method to control cache entry invalidation. + *

+ * Not all 'touch' signals actually produce a change: the node may already have been touched in the current transaction. In this case, the required caches are explicitly invalidated as requested.
+ * It is more complicated when the node is modified. If the node is modified against a previous transaction then all cache entries are left untrusted and not pulled forward. But if the node is modified but in the same transaction, then the cache entries are considered good and pull forward against the current version of the node ... unless the cache was specicially tagged for invalidation. + *

+ * It is sometime necessary to provide the node's current aspects, particularly during changes to the aspect list. If not provided, they will be looked up. + * + * @param nodeId + * the ID of the node (must refer to a live node) + * @param auditableProps + * optionally override the cm:auditable values + * @param nodeAspects + * the node's aspects or null to look them up + * @param invalidateNodeAspectsCache + * true if the node's cached aspects are unreliable + * @param invalidateNodePropertiesCache + * true if the node's cached properties are unreliable + * @param invalidateParentAssocsCache + * true if the node's cached parent assocs are unreliable + * + * @see #updateNodeImpl(Node, NodeUpdateEntity, Set) + */ + private boolean touchNode( + Long nodeId, AuditablePropertiesEntity auditableProps, Set nodeAspects, + boolean invalidateNodeAspectsCache, + boolean invalidateNodePropertiesCache, + boolean invalidateParentAssocsCache) + { + Node node = null; + try + { + node = getNodeNotNull(nodeId, false); + } + catch (DataIntegrityViolationException e) + { + // The ID doesn't reference a live node. + // We do nothing w.r.t. touching + return false; + } + + NodeUpdateEntity nodeUpdate = new NodeUpdateEntity(); + nodeUpdate.setId(nodeId); + nodeUpdate.setAuditableProperties(auditableProps); + // Update it + boolean updatedNode = updateNodeImpl(node, nodeUpdate, nodeAspects); + // Handle the cache invalidation requests + NodeVersionKey nodeVersionKey = node.getNodeVersionKey(); + if (updatedNode) + { + Node newNode = getNodeNotNull(nodeId, false); + NodeVersionKey newNodeVersionKey = newNode.getNodeVersionKey(); + // The version will have moved on, effectively rendering our caches invalid. + // Copy over caches that DON'T need invalidating + if (!invalidateNodeAspectsCache) + { + copyNodeAspectsCached(nodeVersionKey, newNodeVersionKey); + } + if (!invalidateNodePropertiesCache) + { + copyNodePropertiesCached(nodeVersionKey, newNodeVersionKey); + } + if (invalidateParentAssocsCache) + { + // Because we cache parent assocs by transaction, we must manually invalidate on this version change + invalidateParentAssocsCached(node); + } + else + { + copyParentAssocsCached(node); + } + } + else + { + // The node was not touched. By definition it MUST be in the current transaction. + // We invalidate the caches as specifically requested + invalidateNodeCaches( + node, + invalidateNodeAspectsCache, + invalidateNodePropertiesCache, + invalidateParentAssocsCache); + } + + return updatedNode; + } + + /** + * Helper method that updates the node, bringing it into the current transaction with the appropriate cm:auditable and transaction behaviour. + *

+ * If the NodeRef of the node is changing (usually a store move) then deleted nodes are cleaned out where they might exist. + * + * @param oldNode + * the existing node, fully populated + * @param nodeUpdate + * the node update with all update elements populated + * @param nodeAspects + * the node's aspects or null to look them up + * @return true if any updates were made + */ + private boolean updateNodeImpl(Node oldNode, NodeUpdateEntity nodeUpdate, Set nodeAspects) + { + Long nodeId = oldNode.getId(); + + // Make sure that the ID has been populated + if (!EqualsHelper.nullSafeEquals(nodeId, nodeUpdate.getId())) + { + throw new IllegalArgumentException("NodeUpdateEntity node ID is not correct: " + nodeUpdate); + } + + // Copy of the reference data + nodeUpdate.setStore(oldNode.getStore()); + nodeUpdate.setUuid(oldNode.getUuid()); + + // Ensure that other values are set for completeness when caching + if (!nodeUpdate.isUpdateTypeQNameId()) + { + nodeUpdate.setTypeQNameId(oldNode.getTypeQNameId()); + } + if (!nodeUpdate.isUpdateLocaleId()) + { + nodeUpdate.setLocaleId(oldNode.getLocaleId()); + } + if (!nodeUpdate.isUpdateAclId()) + { + nodeUpdate.setAclId(oldNode.getAclId()); + } + + nodeUpdate.setVersion(oldNode.getVersion()); + // Update the transaction + TransactionEntity txn = getCurrentTransaction(); + nodeUpdate.setTransaction(txn); + if (!txn.getId().equals(oldNode.getTransaction().getId())) + { + // Only update if the txn has changed + nodeUpdate.setUpdateTransaction(true); + } + // Update auditable + if (nodeAspects == null) + { + nodeAspects = getNodeAspects(nodeId); + } + if (nodeAspects.contains(ContentModel.ASPECT_AUDITABLE)) + { + NodeRef oldNodeRef = oldNode.getNodeRef(); + if (policyBehaviourFilter.isEnabled(oldNodeRef, ContentModel.ASPECT_AUDITABLE)) + { + // Make sure that auditable properties are present + AuditablePropertiesEntity auditableProps = oldNode.getAuditableProperties(); + if (auditableProps == null) + { + auditableProps = new AuditablePropertiesEntity(); + } + else + { + auditableProps = new AuditablePropertiesEntity(auditableProps); + } + long modifiedDateToleranceMs = 1000L; + + if (nodeUpdate.isUpdateTransaction()) + { + // allow update cm:modified property for new transaction + modifiedDateToleranceMs = 0L; + } + + boolean updateAuditableProperties = auditableProps.setAuditValues(null, null, false, modifiedDateToleranceMs); + nodeUpdate.setAuditableProperties(auditableProps); + nodeUpdate.setUpdateAuditableProperties(updateAuditableProperties); + } + else if (nodeUpdate.getAuditableProperties() == null) + { + // cache the explicit setting of auditable properties when creating node (note: auditable aspect is not yet present) + AuditablePropertiesEntity auditableProps = oldNode.getAuditableProperties(); + if (auditableProps != null) + { + nodeUpdate.setAuditableProperties(auditableProps); // Can reuse the locked instance + nodeUpdate.setUpdateAuditableProperties(true); + } + } + else + { + // ALF-4117: NodeDAO: Allow cm:auditable to be set + // The nodeUpdate had auditable properties set, so we just use that directly + nodeUpdate.setUpdateAuditableProperties(true); + } + } + else + { + // Make sure that any auditable properties are removed + AuditablePropertiesEntity auditableProps = oldNode.getAuditableProperties(); + if (auditableProps != null) + { + nodeUpdate.setAuditableProperties(null); + nodeUpdate.setUpdateAuditableProperties(true); + } + } + + // Just bug out if nothing has changed + if (!nodeUpdate.isUpdateAnything()) + { + return false; + } + + // The node is remaining in the current store + int count = 0; + Throwable concurrencyException = null; + try + { + count = updateNode(nodeUpdate); + } + catch (Throwable e) + { + concurrencyException = e; + } + // Do concurrency check + if (count != 1) + { + // Drop the value from the cache in case the cache is stale + nodesCache.removeByKey(nodeId); + nodesCache.removeByValue(nodeUpdate); + + throw new ConcurrencyFailureException("Failed to update node " + nodeId, concurrencyException); + } + else + { + // Check for wrap-around in the version number + if (nodeUpdate.getVersion().equals(LONG_ZERO)) + { + // The version was wrapped back to zero + // The caches that are keyed by version are now unreliable + propertiesCache.clear(); + aspectsCache.clear(); + parentAssocsCache.clear(); + } + // Update the caches + nodeUpdate.lock(); + nodesCache.setValue(nodeId, nodeUpdate); + // The node's version has moved on so no need to invalidate caches + } + + // Done + if (isDebugEnabled) + { + logger.debug( + "Updated Node: \n" + + " OLD: " + oldNode + "\n" + + " NEW: " + nodeUpdate); + } + return true; + } + + @Override + public void setNodeAclId(Long nodeId, Long aclId) + { + Node oldNode = getNodeNotNull(nodeId, true); + NodeUpdateEntity nodeUpdateEntity = new NodeUpdateEntity(); + nodeUpdateEntity.setId(nodeId); + nodeUpdateEntity.setAclId(aclId); + nodeUpdateEntity.setUpdateAclId(true); + updateNodeImpl(oldNode, nodeUpdateEntity, null); + } + + public void setPrimaryChildrenSharedAclId( + Long primaryParentNodeId, + Long optionalOldSharedAlcIdInAdditionToNull, + Long newSharedAclId) + { + Long txnId = getCurrentTransaction().getId(); + updatePrimaryChildrenSharedAclId( + txnId, + primaryParentNodeId, + optionalOldSharedAlcIdInAdditionToNull, + newSharedAclId); + invalidateNodeChildrenCaches(primaryParentNodeId, true, false); + } + + @Override + public void deleteNode(Long nodeId) + { + // Delete and take the ACLs to the grave + deleteNodeImpl(nodeId, true); + } + + /** + * Physical deletion of the node + * + * @param nodeId + * the node to delete + * @param deleteAcl + * true to delete any associated ACLs otherwise false if the ACLs get reused elsewhere + */ + private void deleteNodeImpl(Long nodeId, boolean deleteAcl) + { + Node node = getNodeNotNull(nodeId, true); + // Gather data for later + Long aclId = node.getAclId(); + Set nodeAspects = getNodeAspects(nodeId); + + // Clean up content data + Set contentQNames = new HashSet(dictionaryService.getAllProperties(DataTypeDefinition.CONTENT)); + Set contentQNamesToRemoveIds = qnameDAO.convertQNamesToIds(contentQNames, false); + contentDataDAO.deleteContentDataForNode(nodeId, contentQNamesToRemoveIds); + + // Delete content usage deltas + usageDAO.deleteDeltas(nodeId); + + // Handle sys:aspect_root + if (nodeAspects.contains(ContentModel.ASPECT_ROOT)) + { + StoreRef storeRef = node.getStore().getStoreRef(); + allRootNodesCache.remove(storeRef); + } + + // Remove child associations (invalidate children) + invalidateNodeChildrenCaches(nodeId, true, true); + invalidateNodeChildrenCaches(nodeId, false, true); + + // Remove aspects + deleteNodeAspects(nodeId, null); + + // Remove properties + deleteNodeProperties(nodeId, (Set) null); + + // Remove subscriptions + deleteSubscriptions(nodeId); + + // Delete the row completely: + // ALF-12358: Concurrency: Possible to create association references to deleted nodes + // There will be no way that any references can be made to a deleted node because we + // are really going to delete it. However, for tracking purposes we need to maintain + // a list of nodes deleted in the transaction. We store that information against a + // new node of type 'sys:deleted'. This means that 'deleted' nodes are really just + // orphaned (read standalone) nodes that remain invisible outside of the DAO. + int deleted = deleteNodeById(nodeId); + // We will always have to invalidate the cache for the node + invalidateNodeCaches(nodeId); + // Concurrency check + if (deleted != 1) + { + // We thought that the row existed + throw new ConcurrencyFailureException( + "Failed to delete node: \n" + + " Node: " + node); + } + + // Remove ACLs + if (deleteAcl && aclId != null) + { + aclDAO.deleteAclForNode(aclId); + } + + // The node has been cleaned up. Now we recreate the node for index tracking purposes. + // Use a 'deleted' type QName + StoreEntity store = node.getStore(); + String uuid = node.getUuid(); + Long deletedQNameId = qnameDAO.getOrCreateQName(ContentModel.TYPE_DELETED).getFirst(); + Long defaultLocaleId = localeDAO.getOrCreateDefaultLocalePair().getFirst(); + Node deletedNode = newNodeImpl(store, uuid, deletedQNameId, defaultLocaleId, null, null, true); + Long deletedNodeId = deletedNode.getId(); + // Store the original ID as a property + Map trackingProps = Collections.singletonMap(ContentModel.PROP_ORIGINAL_ID, (Serializable) nodeId); + setNodePropertiesImpl(deletedNodeId, trackingProps, true); + } + + @Override + public int purgeNodes(long fromTxnCommitTimeMs, long toTxnCommitTimeMs) + { + return deleteNodesByCommitTime(fromTxnCommitTimeMs, toTxnCommitTimeMs); + } + + /* Node Properties */ + + public Map getNodeProperties(Long nodeId) + { + Map props = getNodePropertiesCached(nodeId); + // Create a shallow copy to allow additions + props = new HashMap(props); + + Node node = getNodeNotNull(nodeId, false); + // Handle sys:referenceable + ReferenceablePropertiesEntity.addReferenceableProperties(node.getId(), node.getNodeRef(), props); + // Handle sys:localized + LocalizedPropertiesEntity.addLocalizedProperties(localeDAO, node, props); + // Handle cm:auditable + if (hasNodeAspect(nodeId, ContentModel.ASPECT_AUDITABLE)) + { + AuditablePropertiesEntity auditableProperties = node.getAuditableProperties(); + if (auditableProperties == null) + { + auditableProperties = new AuditablePropertiesEntity(); + } + props.putAll(auditableProperties.getAuditableProperties()); + } + + // Wrap to ensure that we only clone values if the client attempts to modify + // the map or retrieve values that might, themselves, be mutable + props = new ValueProtectingMap(props, NodePropertyValue.IMMUTABLE_CLASSES); + + // Done + if (isDebugEnabled) + { + logger.debug("Fetched properties for Node: \n" + + " Node: " + nodeId + "\n" + + " Props: " + props); + } + return props; + } + + @Override + public Serializable getNodeProperty(Long nodeId, QName propertyQName) + { + Serializable value = null; + // We have to load the node for cm:auditable + if (AuditablePropertiesEntity.isAuditableProperty(propertyQName)) + { + Node node = getNodeNotNull(nodeId, false); + AuditablePropertiesEntity auditableProperties = node.getAuditableProperties(); + if (auditableProperties != null) + { + value = auditableProperties.getAuditableProperty(propertyQName); + } + } + else if (ReferenceablePropertiesEntity.isReferenceableProperty(propertyQName)) // sys:referenceable + { + Node node = getNodeNotNull(nodeId, false); + value = ReferenceablePropertiesEntity.getReferenceableProperty(node, propertyQName); + } + else if (LocalizedPropertiesEntity.isLocalizedProperty(propertyQName)) // sys:localized + { + Node node = getNodeNotNull(nodeId, false); + value = LocalizedPropertiesEntity.getLocalizedProperty(localeDAO, node, propertyQName); + } + else + { + Map props = getNodePropertiesCached(nodeId); + // Wrap to ensure that we only clone values if the client attempts to modify + // the map or retrieve values that might, themselves, be mutable + props = new ValueProtectingMap(props, NodePropertyValue.IMMUTABLE_CLASSES); + // The 'get' here will clone the value if it is mutable + value = props.get(propertyQName); + } + // Done + if (isDebugEnabled) + { + logger.debug("Fetched property for Node: \n" + + " Node: " + nodeId + "\n" + + " QName: " + propertyQName + "\n" + + " Value: " + value); + } + return value; + } + + /** + * Does differencing to add and/or remove properties. Internally, the existing properties will be retrieved and a difference performed to work out which properties need to be created, updated or deleted. + *

+ * Note: The cached properties are not updated + * + * @param nodeId + * the node ID + * @param newProps + * the properties to add or update + * @param isAddOnly + * true if the new properties are just an update or false if the properties are a complete set + * @return Returns true if any properties were changed + */ + private boolean setNodePropertiesImpl( + Long nodeId, + Map newProps, + boolean isAddOnly) + { + if (isAddOnly && newProps.size() == 0) + { + return false; // No point adding nothing + } + + // Get the current node + Node node = getNodeNotNull(nodeId, false); + // Create an update node + NodeUpdateEntity nodeUpdate = new NodeUpdateEntity(); + nodeUpdate.setId(nodeId); + + // Copy inbound values + newProps = new HashMap(newProps); + + // Copy cm:auditable + if (!policyBehaviourFilter.isEnabled(node.getNodeRef(), ContentModel.ASPECT_AUDITABLE)) + { + // Only bother if cm:auditable properties are present + if (AuditablePropertiesEntity.hasAuditableProperty(newProps.keySet())) + { + AuditablePropertiesEntity auditableProps = node.getAuditableProperties(); + if (auditableProps == null) + { + auditableProps = new AuditablePropertiesEntity(); + } + else + { + auditableProps = new AuditablePropertiesEntity(auditableProps); // Unlocked instance + } + boolean containedAuditProperties = auditableProps.setAuditValues(null, null, newProps); + if (!containedAuditProperties) + { + // Double-check (previous hasAuditableProperty should cover it) + // The behaviour is disabled, but no audit properties were passed in + auditableProps = null; + } + nodeUpdate.setAuditableProperties(auditableProps); + nodeUpdate.setUpdateAuditableProperties(true); + } + } + + // Remove cm:auditable + newProps.keySet().removeAll(AuditablePropertiesEntity.getAuditablePropertyQNames()); + + // Check if the sys:localized property is being changed + Long oldNodeLocaleId = node.getLocaleId(); + Locale newLocale = DefaultTypeConverter.INSTANCE.convert( + Locale.class, + newProps.get(ContentModel.PROP_LOCALE)); + if (newLocale != null) + { + Long newNodeLocaleId = localeDAO.getOrCreateLocalePair(newLocale).getFirst(); + if (!newNodeLocaleId.equals(oldNodeLocaleId)) + { + nodeUpdate.setLocaleId(newNodeLocaleId); + nodeUpdate.setUpdateLocaleId(true); + } + } + // else: a 'null' new locale is completely ignored. This is the behaviour we choose. + + // Remove sys:localized + LocalizedPropertiesEntity.removeLocalizedProperties(node, newProps); + + // Remove sys:referenceable + ReferenceablePropertiesEntity.removeReferenceableProperties(node, newProps); + // Load the current properties. + // This means that we have to go to the DB during cold-write operations, + // but usually a write occurs after a node has been fetched of viewed in + // some way by the client code. Loading the existing properties has the + // advantage that the differencing code can eliminate unnecessary writes + // completely. + Map oldPropsCached = getNodePropertiesCached(nodeId); // Keep pristine for caching + Map oldProps = new HashMap(oldPropsCached); + // If we're adding, remove current properties that are not of interest + if (isAddOnly) + { + oldProps.keySet().retainAll(newProps.keySet()); + } + // We need to convert the new properties to our internally-used format, + // which is compatible with model i.e. people may have passed in data + // which needs to be converted to a model-compliant format. We do this + // before comparisons to avoid false negatives. + Map newPropsRaw = nodePropertyHelper.convertToPersistentProperties(newProps); + newProps = nodePropertyHelper.convertToPublicProperties(newPropsRaw); + // Now find out what's changed + Map diff = EqualsHelper.getMapComparison( + oldProps, + newProps); + // Keep track of properties to delete and add + Set propsToDelete = new HashSet(oldProps.size() * 2); + Map propsToAdd = new HashMap(newProps.size() * 2); + Set contentQNamesToDelete = new HashSet(5); + for (Map.Entry entry : diff.entrySet()) + { + QName qname = entry.getKey(); + + PropertyDefinition removePropDef = dictionaryService.getProperty(qname); + boolean isContent = (removePropDef != null && + removePropDef.getDataType().getName().equals(DataTypeDefinition.CONTENT)); + + switch (entry.getValue()) + { + case EQUAL: + // Ignore + break; + case LEFT_ONLY: + // Not in the new properties + propsToDelete.add(qname); + if (isContent) + { + contentQNamesToDelete.add(qname); + } + break; + case NOT_EQUAL: + // Must remove from the LHS + propsToDelete.add(qname); + if (isContent) + { + contentQNamesToDelete.add(qname); + } + // Fall through to load up the RHS + case RIGHT_ONLY: + // We're adding this + Serializable value = newProps.get(qname); + if (isContent && value != null) + { + ContentData newContentData = (ContentData) value; + Long newContentDataId = contentDataDAO.createContentData(newContentData).getFirst(); + value = new ContentDataWithId(newContentData, newContentDataId); + } + propsToAdd.put(qname, value); + break; + default: + throw new IllegalStateException("Unknown MapValueComparison: " + entry.getValue()); + } + } + + boolean modifyProps = propsToDelete.size() > 0 || propsToAdd.size() > 0; + boolean updated = modifyProps || nodeUpdate.isUpdateAnything(); + + // Bring the node into the current transaction + if (nodeUpdate.isUpdateAnything()) + { + // We have to explicitly update the node (sys:locale or cm:auditable) + if (updateNodeImpl(node, nodeUpdate, null)) + { + // Copy the caches across + NodeVersionKey nodeVersionKey = node.getNodeVersionKey(); + NodeVersionKey newNodeVersionKey = getNodeNotNull(nodeId, false).getNodeVersionKey(); + copyNodeAspectsCached(nodeVersionKey, newNodeVersionKey); + copyNodePropertiesCached(nodeVersionKey, newNodeVersionKey); + copyParentAssocsCached(node); + } + } + else if (modifyProps) + { + // Touch the node; all caches are fine + touchNode(nodeId, null, null, false, false, false); + } + + // Touch to bring into current txn + if (modifyProps) + { + // Clean up content properties + try + { + if (contentQNamesToDelete.size() > 0) + { + Set contentQNameIdsToDelete = qnameDAO.convertQNamesToIds(contentQNamesToDelete, false); + contentDataDAO.deleteContentDataForNode(nodeId, contentQNameIdsToDelete); + } + } + catch (Throwable e) + { + throw new AlfrescoRuntimeException( + "Failed to delete content properties: \n" + + " Node: " + nodeId + "\n" + + " Delete Tried: " + contentQNamesToDelete, + e); + } + + try + { + // Apply deletes + Set propQNameIdsToDelete = qnameDAO.convertQNamesToIds(propsToDelete, true); + deleteNodeProperties(nodeId, propQNameIdsToDelete); + // Now create the raw properties for adding + newPropsRaw = nodePropertyHelper.convertToPersistentProperties(propsToAdd); + insertNodeProperties(nodeId, newPropsRaw); + } + catch (Throwable e) + { + // Don't trust the caches for the node + invalidateNodeCaches(nodeId); + // Focused error + throw new AlfrescoRuntimeException( + "Failed to write property deltas: \n" + + " Node: " + nodeId + "\n" + + " Old: " + oldProps + "\n" + + " New: " + newProps + "\n" + + " Diff: " + diff + "\n" + + " Delete Tried: " + propsToDelete + "\n" + + " Add Tried: " + propsToAdd, + e); + } + + // Build the properties to cache based on whether this is an append or replace + Map propsToCache = null; + if (isAddOnly) + { + // Copy cache properties for additions + propsToCache = new HashMap(oldPropsCached); + // Combine the old and new properties + propsToCache.putAll(propsToAdd); + } + else + { + // Replace old properties + propsToCache = newProps; + propsToCache.putAll(propsToAdd); // Ensure correct types + } + // Update cache + setNodePropertiesCached(nodeId, propsToCache); + } + + // Done + if (isDebugEnabled && updated) + { + logger.debug( + "Modified node properties: " + nodeId + "\n" + + " Removed: " + propsToDelete + "\n" + + " Added: " + propsToAdd + "\n" + + " Node Update: " + nodeUpdate); + } + return updated; + } + + @Override + public boolean setNodeProperties(Long nodeId, Map properties) + { + // Merge with current values + boolean modified = setNodePropertiesImpl(nodeId, properties, false); + + // Done + return modified; + } + + @Override + public boolean addNodeProperty(Long nodeId, QName qname, Serializable value) + { + // Copy inbound values + Map newProps = new HashMap(3); + newProps.put(qname, value); + // Merge with current values + boolean modified = setNodePropertiesImpl(nodeId, newProps, true); + + // Done + return modified; + } + + @Override + public boolean addNodeProperties(Long nodeId, Map properties) + { + // Merge with current values + boolean modified = setNodePropertiesImpl(nodeId, properties, true); + + // Done + return modified; + } + + @Override + public boolean removeNodeProperties(Long nodeId, Set propertyQNames) + { + propertyQNames = new HashSet(propertyQNames); + ReferenceablePropertiesEntity.removeReferenceableProperties(propertyQNames); + if (propertyQNames.size() == 0) + { + return false; // sys:referenceable properties cannot be removed + } + LocalizedPropertiesEntity.removeLocalizedProperties(propertyQNames); + if (propertyQNames.size() == 0) + { + return false; // sys:localized properties cannot be removed + } + Set qnameIds = qnameDAO.convertQNamesToIds(propertyQNames, false); + int deleteCount = deleteNodeProperties(nodeId, qnameIds); + + if (deleteCount > 0) + { + // Touch the node; all caches are fine + touchNode(nodeId, null, null, false, false, false); + // Get cache props + Map cachedProps = getNodePropertiesCached(nodeId); + // Remove deleted properties + Map props = new HashMap(cachedProps); + props.keySet().removeAll(propertyQNames); + // Update cache + setNodePropertiesCached(nodeId, props); + } + // Done + return deleteCount > 0; + } + + @Override + public boolean setModifiedDate(Long nodeId, Date modifiedDate) + { + return setModifiedProperties(nodeId, modifiedDate, null); + } + + @Override + public boolean setModifiedProperties(Long nodeId, Date modifiedDate, String modifiedBy) + { + // Do nothing if the node is not cm:auditable + if (!hasNodeAspect(nodeId, ContentModel.ASPECT_AUDITABLE)) + { + return false; + } + // Get the node + Node node = getNodeNotNull(nodeId, false); + NodeRef nodeRef = node.getNodeRef(); + // Get the existing auditable values + AuditablePropertiesEntity auditableProps = node.getAuditableProperties(); + boolean dateChanged = false; + if (auditableProps == null) + { + // The properties should be present + auditableProps = new AuditablePropertiesEntity(); + auditableProps.setAuditValues(modifiedBy, modifiedDate, true, 1000L); + dateChanged = true; + } + else + { + auditableProps = new AuditablePropertiesEntity(auditableProps); + dateChanged = auditableProps.setAuditModified(modifiedDate, 1000L); + if (dateChanged) + { + auditableProps.setAuditModifier(modifiedBy); + } + } + if (dateChanged) + { + try + { + policyBehaviourFilter.disableBehaviour(nodeRef, ContentModel.ASPECT_AUDITABLE); + // Touch the node; all caches are fine + return touchNode(nodeId, auditableProps, null, false, false, false); + } + finally + { + policyBehaviourFilter.enableBehaviour(nodeRef, ContentModel.ASPECT_AUDITABLE); + } + } + else + { + // Date did not advance + return false; + } + } + + /** + * @return Returns the read-only cached property map + */ + private Map getNodePropertiesCached(Long nodeId) + { + NodeVersionKey nodeVersionKey = getNodeNotNull(nodeId, false).getNodeVersionKey(); + Pair> cacheEntry = propertiesCache.getByKey(nodeVersionKey); + if (cacheEntry == null) + { + invalidateNodeCaches(nodeId); + throw new DataIntegrityViolationException("Invalid node ID: " + nodeId); + } + // We have the properties from the cache + Map cachedProperties = cacheEntry.getSecond(); + return cachedProperties; + } + + /** + * Update the node properties cache. The incoming properties will be wrapped to be unmodifiable. + *

+ * NOTE: Incoming properties must exclude the cm:auditable properties + */ + private void setNodePropertiesCached(Long nodeId, Map properties) + { + NodeVersionKey nodeVersionKey = getNodeNotNull(nodeId, false).getNodeVersionKey(); + propertiesCache.setValue(nodeVersionKey, Collections.unmodifiableMap(properties)); + } + + /** + * Helper method to copy cache values from one key to another + */ + private void copyNodePropertiesCached(NodeVersionKey from, NodeVersionKey to) + { + Map cacheEntry = propertiesCache.getValue(from); + if (cacheEntry != null) + { + propertiesCache.setValue(to, cacheEntry); + } + } + + /** + * Callback to cache node properties. The DAO callback only does the simple {@link #findByKey(Serializable)}. + * + * @author Derek Hulley + * @since 3.4 + */ + private class PropertiesCallbackDAO extends EntityLookupCallbackDAOAdaptor, Serializable> + { + public Pair> createValue(Map value) + { + throw new UnsupportedOperationException("A node always has a 'map' of properties."); + } + + public Pair> findByKey(NodeVersionKey nodeVersionKey) + { + Long nodeId = nodeVersionKey.getNodeId(); + Map> propsRawByNodeVersionKey = selectNodeProperties(nodeId); + Map propsRaw = propsRawByNodeVersionKey.get(nodeVersionKey); + if (propsRaw == null) + { + // Didn't find a match. Is this because there are none? + if (propsRawByNodeVersionKey.size() == 0) + { + // This is OK. The node has no properties + propsRaw = Collections.emptyMap(); + } + else + { + // We found properties associated with a different node ID and version + invalidateNodeCaches(nodeId); + throw new DataIntegrityViolationException( + "Detected stale node entry: " + nodeVersionKey + + " (now " + propsRawByNodeVersionKey.keySet() + ")"); + } + } + // Convert to public properties + Map props = nodePropertyHelper.convertToPublicProperties(propsRaw); + // Done + return new Pair>(nodeVersionKey, Collections.unmodifiableMap(props)); + } + } + + /* Aspects */ + + @Override + public Set getNodeAspects(Long nodeId) + { + Set nodeAspects = getNodeAspectsCached(nodeId); + // Nodes are always referenceable + nodeAspects.add(ContentModel.ASPECT_REFERENCEABLE); + // Nodes are always localized + nodeAspects.add(ContentModel.ASPECT_LOCALIZED); + return nodeAspects; + } + + @Override + public boolean hasNodeAspect(Long nodeId, QName aspectQName) + { + if (aspectQName.equals(ContentModel.ASPECT_REFERENCEABLE)) + { + // Nodes are always referenceable + return true; + } + if (aspectQName.equals(ContentModel.ASPECT_LOCALIZED)) + { + // Nodes are always localized + return true; + } + Set nodeAspects = getNodeAspectsCached(nodeId); + return nodeAspects.contains(aspectQName); + } + + @Override + public boolean addNodeAspects(Long nodeId, Set aspectQNames) + { + if (aspectQNames.size() == 0) + { + return false; + } + // Copy the inbound set + Set aspectQNamesToAdd = new HashSet(aspectQNames); + // Get existing + Set existingAspectQNames = getNodeAspectsCached(nodeId); + // Find out what needs adding + aspectQNamesToAdd.removeAll(existingAspectQNames); + aspectQNamesToAdd.remove(ContentModel.ASPECT_REFERENCEABLE); // Implicit + aspectQNamesToAdd.remove(ContentModel.ASPECT_LOCALIZED); // Implicit + if (aspectQNamesToAdd.isEmpty()) + { + // Nothing to do + return false; + } + // Add them + Set aspectQNameIds = qnameDAO.convertQNamesToIds(aspectQNamesToAdd, true); + startBatch(); + try + { + for (Long aspectQNameId : aspectQNameIds) + { + insertNodeAspect(nodeId, aspectQNameId); + } + } + catch (RuntimeException e) + { + // This could be because the cache is out of date + invalidateNodeCaches(nodeId); + throw e; + } + finally + { + executeBatch(); + } + + // Collate the new aspect set, so that touch recognizes the addtion of cm:auditable + Set newAspectQNames = new HashSet(existingAspectQNames); + newAspectQNames.addAll(aspectQNamesToAdd); + + // Handle sys:aspect_root + if (aspectQNames.contains(ContentModel.ASPECT_ROOT)) + { + // invalidate root nodes cache for the store + StoreRef storeRef = getNodeNotNull(nodeId, false).getStore().getStoreRef(); + allRootNodesCache.remove(storeRef); + // Touch the node; parent assocs need invalidation + touchNode(nodeId, null, newAspectQNames, false, false, true); + } + else + { + // Touch the node; all caches are fine + touchNode(nodeId, null, newAspectQNames, false, false, false); + } + + // Manually update the cache + setNodeAspectsCached(nodeId, newAspectQNames); + + // Done + return true; + } + + public boolean removeNodeAspects(Long nodeId) + { + Set newAspectQNames = Collections. emptySet(); + + // Touch the node; all caches are fine + touchNode(nodeId, null, newAspectQNames, false, false, false); + + // Just delete all the node's aspects + int deleteCount = deleteNodeAspects(nodeId, null); + + // Manually update the cache + setNodeAspectsCached(nodeId, newAspectQNames); + + // Done + return deleteCount > 0; + } + + @Override + public boolean removeNodeAspects(Long nodeId, Set aspectQNames) + { + if (aspectQNames.size() == 0) + { + return false; + } + // Get the current aspects + Set existingAspectQNames = getNodeAspects(nodeId); + + // Collate the new set of aspects so that touch works correctly against cm:auditable + Set newAspectQNames = new HashSet(existingAspectQNames); + newAspectQNames.removeAll(aspectQNames); + + // Touch the node; all caches are fine + touchNode(nodeId, null, newAspectQNames, false, false, false); + + // Now remove each aspect + Set aspectQNameIdsToRemove = qnameDAO.convertQNamesToIds(aspectQNames, false); + int deleteCount = deleteNodeAspects(nodeId, aspectQNameIdsToRemove); + if (deleteCount == 0) + { + return false; + } + + // Handle sys:aspect_root + if (aspectQNames.contains(ContentModel.ASPECT_ROOT)) + { + // invalidate root nodes cache for the store + StoreRef storeRef = getNodeNotNull(nodeId, false).getStore().getStoreRef(); + allRootNodesCache.remove(storeRef); + // Touch the node; parent assocs need invalidation + touchNode(nodeId, null, newAspectQNames, false, false, true); + } + else + { + // Touch the node; all caches are fine + touchNode(nodeId, null, newAspectQNames, false, false, false); + } + + // Manually update the cache + setNodeAspectsCached(nodeId, newAspectQNames); + + // Done + return deleteCount > 0; + } + + @Override + public void getNodesWithAspects( + Set aspectQNames, + Long minNodeId, Long maxNodeId, + NodeRefQueryCallback resultsCallback) + { + Set qnameIdsSet = qnameDAO.convertQNamesToIds(aspectQNames, false); + if (qnameIdsSet.size() == 0) + { + // No point running a query + return; + } + List qnameIds = new ArrayList(qnameIdsSet); + selectNodesWithAspects(qnameIds, minNodeId, maxNodeId, resultsCallback); + } + + @Override + public void getNodesWithAspects( + Set aspectQNames, + Long minNodeId, Long maxNodeId, boolean ordered, + NodeRefQueryCallback resultsCallback) + { + Set qnameIdsSet = qnameDAO.convertQNamesToIds(aspectQNames, false); + if (qnameIdsSet.size() == 0) + { + // No point running a query + return; + } + List qnameIds = new ArrayList(qnameIdsSet); + selectNodesWithAspects(qnameIds, minNodeId, maxNodeId, ordered, resultsCallback); + } + + @Override + public void getNodesWithAspects( + Set aspectQNames, + Long minNodeId, Long maxNodeId, boolean ordered, + int maxResults, + NodeRefQueryCallback resultsCallback) + { + Set qnameIdsSet = qnameDAO.convertQNamesToIds(aspectQNames, false); + if (qnameIdsSet.isEmpty()) + { + // No point running a query + return; + } + List qnameIds = new ArrayList<>(qnameIdsSet); + selectNodesWithAspects(qnameIds, minNodeId, maxNodeId, ordered, maxResults, resultsCallback); + } + + /** + * @return Returns a writable copy of the cached aspects set + */ + private Set getNodeAspectsCached(Long nodeId) + { + NodeVersionKey nodeVersionKey = getNodeNotNull(nodeId, false).getNodeVersionKey(); + Pair> cacheEntry = aspectsCache.getByKey(nodeVersionKey); + if (cacheEntry == null) + { + invalidateNodeCaches(nodeId); + throw new DataIntegrityViolationException("Invalid node ID: " + nodeId); + } + return new HashSet(cacheEntry.getSecond()); + } + + /** + * Update the node aspects cache. The incoming set will be wrapped to be unmodifiable. + */ + private void setNodeAspectsCached(Long nodeId, Set aspects) + { + NodeVersionKey nodeVersionKey = getNodeNotNull(nodeId, false).getNodeVersionKey(); + aspectsCache.setValue(nodeVersionKey, Collections.unmodifiableSet(aspects)); + } + + /** + * Helper method to copy cache values from one key to another + */ + private void copyNodeAspectsCached(NodeVersionKey from, NodeVersionKey to) + { + Set cacheEntry = aspectsCache.getValue(from); + if (cacheEntry != null) + { + aspectsCache.setValue(to, cacheEntry); + } + } + + /** + * Callback to cache node aspects. The DAO callback only does the simple {@link #findByKey(Serializable)}. + * + * @author Derek Hulley + * @since 3.4 + */ + private class AspectsCallbackDAO extends EntityLookupCallbackDAOAdaptor, Serializable> + { + public Pair> createValue(Set value) + { + throw new UnsupportedOperationException("A node always has a 'set' of aspects."); + } + + public Pair> findByKey(NodeVersionKey nodeVersionKey) + { + Long nodeId = nodeVersionKey.getNodeId(); + Set nodeIds = Collections.singleton(nodeId); + Map> nodeAspectQNameIdsByVersionKey = selectNodeAspects(nodeIds); + Set nodeAspectQNames = nodeAspectQNameIdsByVersionKey.get(nodeVersionKey); + if (nodeAspectQNames == null) + { + // Didn't find a match. Is this because there are none? + if (nodeAspectQNameIdsByVersionKey.size() == 0) + { + // This is OK. The node has no properties + nodeAspectQNames = Collections.emptySet(); + } + else + { + // We found properties associated with a different node ID and version + invalidateNodeCaches(nodeId); + throw new DataIntegrityViolationException( + "Detected stale node entry: " + nodeVersionKey + + " (now " + nodeAspectQNameIdsByVersionKey.keySet() + ")"); + } + } + // Done + return new Pair>(nodeVersionKey, Collections.unmodifiableSet(nodeAspectQNames)); + } + } + + /* Node assocs */ + + @Override + public Long newNodeAssoc(Long sourceNodeId, Long targetNodeId, QName assocTypeQName, int assocIndex) + { + if (assocIndex == 0) + { + throw new IllegalArgumentException("Index is 1-based, or -1 to indicate 'next value'."); + } + + // Touch the node; all caches are fine + touchNode(sourceNodeId, null, null, false, false, false); + + // Resolve type QName + Long assocTypeQNameId = qnameDAO.getOrCreateQName(assocTypeQName).getFirst(); + + // Get the current max; we will need this no matter what + if (assocIndex <= 0) + { + int maxIndex = selectNodeAssocMaxIndex(sourceNodeId, assocTypeQNameId); + assocIndex = maxIndex + 1; + } + + Long result = null; + Savepoint savepoint = controlDAO.createSavepoint("NodeService.newNodeAssoc"); + try + { + result = insertNodeAssoc(sourceNodeId, targetNodeId, assocTypeQNameId, assocIndex); + controlDAO.releaseSavepoint(savepoint); + return result; + } + catch (Throwable e) + { + controlDAO.rollbackToSavepoint(savepoint); + if (isDebugEnabled) + { + logger.debug( + "Failed to insert node association: \n" + + " sourceNodeId: " + sourceNodeId + "\n" + + " targetNodeId: " + targetNodeId + "\n" + + " assocTypeQName: " + assocTypeQName + "\n" + + " assocIndex: " + assocIndex, + e); + } + throw new AssociationExistsException(sourceNodeId, targetNodeId, assocTypeQName); + } + } + + @Override + public void setNodeAssocIndex(Long id, int assocIndex) + { + int updated = updateNodeAssoc(id, assocIndex); + if (updated != 1) + { + throw new ConcurrencyFailureException("Expected to update exactly one row: " + id); + } + } + + @Override + public int removeNodeAssoc(Long sourceNodeId, Long targetNodeId, QName assocTypeQName) + { + Pair assocTypeQNamePair = qnameDAO.getQName(assocTypeQName); + if (assocTypeQNamePair == null) + { + // Never existed + return 0; + } + + Long assocTypeQNameId = assocTypeQNamePair.getFirst(); + int deleted = deleteNodeAssoc(sourceNodeId, targetNodeId, assocTypeQNameId); + if (deleted > 0) + { + // Touch the node; all caches are fine + touchNode(sourceNodeId, null, null, false, false, false); + } + return deleted; + } + + @Override + public int removeNodeAssocs(List ids) + { + int toDelete = ids.size(); + if (toDelete == 0) + { + return 0; + } + int deleted = deleteNodeAssocs(ids); + if (toDelete != deleted) + { + throw new ConcurrencyFailureException("Deleted " + deleted + " but expected " + toDelete); + } + return deleted; + } + + @Override + public Collection> getNodeAssocsToAndFrom(Long nodeId) + { + List nodeAssocEntities = selectNodeAssocs(nodeId); + List> results = new ArrayList>(nodeAssocEntities.size()); + for (NodeAssocEntity nodeAssocEntity : nodeAssocEntities) + { + Long assocId = nodeAssocEntity.getId(); + AssociationRef assocRef = nodeAssocEntity.getAssociationRef(qnameDAO); + results.add(new Pair(assocId, assocRef)); + } + return results; + } + + @Override + public Collection> getSourceNodeAssocs(Long targetNodeId, QName typeQName) + { + Long typeQNameId = null; + if (typeQName != null) + { + Pair typeQNamePair = qnameDAO.getQName(typeQName); + if (typeQNamePair == null) + { + // No such QName + return Collections.emptyList(); + } + typeQNameId = typeQNamePair.getFirst(); + } + List nodeAssocEntities = selectNodeAssocsByTarget(targetNodeId, typeQNameId); + List> results = new ArrayList>(nodeAssocEntities.size()); + for (NodeAssocEntity nodeAssocEntity : nodeAssocEntities) + { + Long assocId = nodeAssocEntity.getId(); + AssociationRef assocRef = nodeAssocEntity.getAssociationRef(qnameDAO); + results.add(new Pair(assocId, assocRef)); + } + return results; + } + + @Override + public Collection> getTargetNodeAssocs(Long sourceNodeId, QName typeQName) + { + Long typeQNameId = null; + if (typeQName != null) + { + Pair typeQNamePair = qnameDAO.getQName(typeQName); + if (typeQNamePair == null) + { + // No such QName + return Collections.emptyList(); + } + typeQNameId = typeQNamePair.getFirst(); + } + List nodeAssocEntities = selectNodeAssocsBySource(sourceNodeId, typeQNameId); + List> results = new ArrayList>(nodeAssocEntities.size()); + for (NodeAssocEntity nodeAssocEntity : nodeAssocEntities) + { + Long assocId = nodeAssocEntity.getId(); + AssociationRef assocRef = nodeAssocEntity.getAssociationRef(qnameDAO); + results.add(new Pair(assocId, assocRef)); + } + return results; + } + + @Override + public Collection> getTargetAssocsByPropertyValue(Long sourceNodeId, QName typeQName, QName propertyQName, Serializable propertyValue) + { + Long typeQNameId = null; + if (typeQName != null) + { + Pair typeQNamePair = qnameDAO.getQName(typeQName); + if (typeQNamePair == null) + { + // No such QName + return Collections.emptyList(); + } + typeQNameId = typeQNamePair.getFirst(); + } + + Long propertyQNameId = null; + NodePropertyValue nodeValue = null; + if (propertyQName != null) + { + + Pair propQNamePair = qnameDAO.getQName(propertyQName); + if (propQNamePair == null) + { + // No such QName + return Collections.emptyList(); + } + + propertyQNameId = propQNamePair.getFirst(); + + PropertyDefinition propertyDef = dictionaryService.getProperty(propertyQName); + + nodeValue = nodePropertyHelper.makeNodePropertyValue(propertyDef, propertyValue); + if (nodeValue != null) + { + switch (nodeValue.getPersistedType()) + { + case 1: // Boolean + case 3: // long + case 5: // double + case 6: // string + // no floats due to the range errors testing equality on a float. + break; + default: + throw new IllegalArgumentException("method not supported for persisted value type " + nodeValue.getPersistedType()); + } + } + } + + List nodeAssocEntities = selectNodeAssocsBySourceAndPropertyValue(sourceNodeId, typeQNameId, propertyQNameId, nodeValue); + + // Create custom result + List> results = new ArrayList>(nodeAssocEntities.size()); + for (NodeAssocEntity nodeAssocEntity : nodeAssocEntities) + { + Long assocId = nodeAssocEntity.getId(); + AssociationRef assocRef = nodeAssocEntity.getAssociationRef(qnameDAO); + results.add(new Pair(assocId, assocRef)); + } + return results; + } + + @Override + public Pair getNodeAssocOrNull(Long assocId) + { + NodeAssocEntity nodeAssocEntity = selectNodeAssocById(assocId); + if (nodeAssocEntity == null) + { + return null; + } + else + { + AssociationRef assocRef = nodeAssocEntity.getAssociationRef(qnameDAO); + return new Pair(assocId, assocRef); + } + } + + @Override + public Pair getNodeAssoc(Long assocId) + { + Pair ret = getNodeAssocOrNull(assocId); + if (ret == null) + { + throw new ConcurrencyFailureException("Assoc ID does not point to a valid association: " + assocId); + } + else + { + return ret; + } + } + + /* Child assocs */ + + private ChildAssocEntity newChildAssocImpl( + Long parentNodeId, + Long childNodeId, + boolean isPrimary, + final QName assocTypeQName, + QName assocQName, + final String childNodeName, + boolean allowDeletedChild) + { + Assert.notNull(parentNodeId, "parentNodeId"); + Assert.notNull(childNodeId, "childNodeId"); + Assert.notNull(assocTypeQName, "assocTypeQName"); + Assert.notNull(assocQName, "assocQName"); + Assert.notNull(childNodeName, "childNodeName"); + + // Get parent and child nodes. We need them later, so just get them now. + final Node parentNode = getNodeNotNull(parentNodeId, true); + final Node childNode = getNodeNotNull(childNodeId, !allowDeletedChild); + + final ChildAssocEntity assoc = new ChildAssocEntity(); + // Parent node + assoc.setParentNode(new NodeEntity(parentNode)); + // Child node + assoc.setChildNode(new NodeEntity(childNode)); + // Type QName + assoc.setTypeQNameAll(qnameDAO, assocTypeQName, true); + // Child node name + assoc.setChildNodeNameAll(dictionaryService, assocTypeQName, childNodeName); + // QName + assoc.setQNameAll(qnameDAO, assocQName, true); + // Primary + assoc.setPrimary(isPrimary); + // Index + assoc.setAssocIndex(-1); + + Long assocId = newChildAssocInsert(assoc, assocTypeQName, childNodeName); + + // Persist it + assoc.setId(assocId); + + // Primary associations accompany new nodes, so we only have to bring the + // node into the current transaction for secondary associations + if (!isPrimary) + { + updateNode(childNodeId, null, null); + } + + // Done + if (isDebugEnabled) + { + logger.debug("Created child association: " + assoc); + } + return assoc; + } + + protected Long newChildAssocInsert(final ChildAssocEntity assoc, final QName assocTypeQName, final String childNodeName) + { + // Because we are retrying in-transaction i.e. absorbing exceptions, we need partial rollback &/or via savepoint if needed (eg. PostgreSQL) + RetryingCallback callback = new RetryingCallback() { + public Long execute() throws Throwable + { + return newChildAssocInsertImpl(assoc, assocTypeQName, childNodeName); + } + }; + Long assocId = childAssocRetryingHelper.doWithRetry(callback); + return assocId; + } + + protected Long newChildAssocInsertImpl(final ChildAssocEntity assoc, final QName assocTypeQName, final String childNodeName) + { + Savepoint savepoint = controlDAO.createSavepoint("DuplicateChildNodeNameException"); + try + { + Long id = insertChildAssoc(assoc); + controlDAO.releaseSavepoint(savepoint); + return id; + } + catch (Throwable e) + { + controlDAO.rollbackToSavepoint(savepoint); + // DuplicateChildNodeNameException implements DoNotRetryException. + + // Allow real DB concurrency issues (e.g. DeadlockLoserDataAccessException) straight through for a retry + if (e instanceof ConcurrencyFailureException) + { + throw e; + } + + // There are some cases - FK violations, specifically - where we DO actually want to retry. + // Detecting this is done by looking for the related FK names, 'fk_alf_cass_*' in the error message + String lowerMsg = e.getMessage().toLowerCase(); + if (lowerMsg.contains("fk_alf_cass_")) + { + throw new ConcurrencyFailureException("FK violation updating primary parent association:" + assoc, e); + } + + // We assume that this is from the child cm:name constraint violation + throw new DuplicateChildNodeNameException( + assoc.getParentNode().getNodeRef(), + assocTypeQName, + childNodeName, + e); + } + } + + @Override + public Pair newChildAssoc( + Long parentNodeId, + Long childNodeId, + QName assocTypeQName, + QName assocQName, + String childNodeName) + { + ParentAssocsInfo parentAssocInfo = getParentAssocsCached(childNodeId); + // Create it + ChildAssocEntity assoc = newChildAssocImpl( + parentNodeId, childNodeId, false, assocTypeQName, assocQName, childNodeName, false); + Long assocId = assoc.getId(); + // Touch the node; parent assocs have been updated + touchNode(childNodeId, null, null, false, false, true); + // update cache + parentAssocInfo = parentAssocInfo.addAssoc(assocId, assoc); + setParentAssocsCached(childNodeId, parentAssocInfo); + // Done + return assoc.getPair(qnameDAO); + } + + @Override + public void deleteChildAssoc(Long assocId) + { + ChildAssocEntity assoc = selectChildAssoc(assocId); + if (assoc == null) + { + throw new ConcurrencyFailureException( + "Child association not found: " + assocId + ". A concurrency violation is likely.\n" + + "This can also occur if code reacts to 'beforeDelete' callbacks and pre-emptively deletes associations \n" + + "that are about to be cascade-deleted. The 'onDelete' phase then fails to delete the association.\n" + + "See links on issue ALF-12358."); // TODO: Get docs URL + } + // Update cache + Long childNodeId = assoc.getChildNode().getId(); + ParentAssocsInfo parentAssocInfo = getParentAssocsCached(childNodeId); + // Delete it + List assocIds = Collections.singletonList(assocId); + int count = deleteChildAssocs(assocIds); + if (count != 1) + { + throw new ConcurrencyFailureException("Child association not deleted: " + assocId); + } + // Touch the node; parent assocs have been updated + touchNode(childNodeId, null, null, false, false, true); + // Update cache + parentAssocInfo = parentAssocInfo.removeAssoc(assocId); + setParentAssocsCached(childNodeId, parentAssocInfo); + } + + @Override + public int setChildAssocIndex(Long parentNodeId, Long childNodeId, QName assocTypeQName, QName assocQName, int index) + { + int count = updateChildAssocIndex(parentNodeId, childNodeId, assocTypeQName, assocQName, index); + if (count > 0) + { + // Touch the node; parent assocs are out of sync + touchNode(childNodeId, null, null, false, false, true); + } + return count; + } + + /** + * TODO: See about pulling automatic cm:name update logic into this DAO + */ + @Override + public void setChildAssocsUniqueName(Long childNodeId, String childName) + { + Integer count = setChildAssocsUniqueNameImpl(childNodeId, childName); + + if (count > 0) + { + // Touch the node; parent assocs are out of sync + touchNode(childNodeId, null, null, false, false, true); + } + + if (isDebugEnabled) + { + logger.debug( + "Updated cm:name to parent assocs: \n" + + " Node: " + childNodeId + "\n" + + " Name: " + childName + "\n" + + " Updated: " + count); + } + } + + protected int setChildAssocsUniqueNameImpl(final Long childNodeId, final String childName) + { + // Because we are retrying in-transaction i.e. absorbing exceptions, we need partial rollback &/or via savepoint if needed (eg. PostgreSQL) + RetryingCallback callback = new RetryingCallback() { + public Integer execute() throws Throwable + { + return updateChildAssocUniqueNameImpl(childNodeId, childName); + } + }; + return childAssocRetryingHelper.doWithRetry(callback); + } + + protected int updateChildAssocUniqueNameImpl(final Long childNodeId, final String childName) + { + int total = 0; + Savepoint savepoint = controlDAO.createSavepoint("DuplicateChildNodeNameException"); + try + { + for (ChildAssocEntity parentAssoc : getParentAssocsCached(childNodeId).getParentAssocs().values()) + { + // Subtlety: We only update those associations for which name uniqueness checking is enforced. + // Such associations have a positive CRC + if (parentAssoc.getChildNodeNameCrc() <= 0) + { + continue; + } + Pair oldTypeQnamePair = qnameDAO.getQName(parentAssoc.getTypeQNameId()); + // Ensure we invalidate the name cache (the child version key might not be 'bumped' by the next + // 'touch') + if (oldTypeQnamePair != null) + { + childByNameCache.remove(new ChildByNameKey(parentAssoc.getParentNode().getId(), + oldTypeQnamePair.getSecond(), parentAssoc.getChildNodeName())); + } + int count = updateChildAssocUniqueName(parentAssoc.getId(), childName); + if (count <= 0) + { + // Should not be attempting to delete a deleted node + throw new ConcurrencyFailureException("Failed to update an existing parent association " + + parentAssoc.getId()); + } + total += count; + } + controlDAO.releaseSavepoint(savepoint); + return total; + } + catch (Throwable e) + { + controlDAO.rollbackToSavepoint(savepoint); + // We assume that this is from the child cm:name constraint violation + throw new DuplicateChildNodeNameException(null, null, childName, e); + } + } + + @Override + public Pair getChildAssoc(Long assocId) + { + ChildAssocEntity assoc = selectChildAssoc(assocId); + if (assoc == null) + { + throw new ConcurrencyFailureException("Child association not found: " + assocId); + } + return assoc.getPair(qnameDAO); + } + + @Override + public List getPrimaryChildrenAcls(Long nodeId) + { + return selectPrimaryChildAcls(nodeId); + } + + @Override + public Pair getChildAssoc( + Long parentNodeId, + Long childNodeId, + QName assocTypeQName, + QName assocQName) + { + List assocs = selectChildAssoc(parentNodeId, childNodeId, assocTypeQName, assocQName); + if (assocs.size() == 0) + { + return null; + } + else if (assocs.size() == 1) + { + return assocs.get(0).getPair(qnameDAO); + } + // Keep the primary association or, if there isn't one, the association with the smallest ID + Map assocsToDeleteById = new HashMap(assocs.size() * 2); + Long minId = null; + Long primaryId = null; + for (ChildAssocEntity assoc : assocs) + { + // First store it + Long assocId = assoc.getId(); + assocsToDeleteById.put(assocId, assoc); + if (minId == null || minId.compareTo(assocId) > 0) + { + minId = assocId; + } + if (assoc.isPrimary()) + { + primaryId = assocId; + } + } + // Remove either the primary or min assoc + Long assocToKeepId = primaryId == null ? minId : primaryId; + ChildAssocEntity assocToKeep = assocsToDeleteById.remove(assocToKeepId); + // If the current transaction allows, remove the other associations + if (AlfrescoTransactionSupport.getTransactionReadState() == TxnReadState.TXN_READ_WRITE) + { + for (Long assocIdToDelete : assocsToDeleteById.keySet()) + { + deleteChildAssoc(assocIdToDelete); + } + } + // Done + return assocToKeep.getPair(qnameDAO); + } + + /** + * Callback that applies node preloading if required. + *

+ * Instances must be used and discarded per query. + * + * @author Derek Hulley + * @since 3.4 + */ + private class ChildAssocRefBatchingQueryCallback implements ChildAssocRefQueryCallback + { + private final ChildAssocRefQueryCallback callback; + private final boolean preload; + private final List nodeRefs; + + /** + * @param callback + * the callback to batch around + */ + private ChildAssocRefBatchingQueryCallback(ChildAssocRefQueryCallback callback) + { + this.callback = callback; + this.preload = callback.preLoadNodes(); + if (preload) + { + nodeRefs = new LinkedList(); // No memory required + } + else + { + nodeRefs = null; // No list needed + } + } + + /** + * @throws UnsupportedOperationException + * always + */ + public boolean preLoadNodes() + { + throw new UnsupportedOperationException("Expected to be used internally only."); + } + + /** + * Defers to delegate + */ + @Override + public boolean orderResults() + { + return callback.orderResults(); + } + + /** + * {@inheritDoc} + */ + public boolean handle( + Pair childAssocPair, + Pair parentNodePair, + Pair childNodePair) + { + if (preload) + { + nodeRefs.add(childNodePair.getSecond()); + } + return callback.handle(childAssocPair, parentNodePair, childNodePair); + } + + public void done() + { + // Finish the batch + if (preload && nodeRefs.size() > 0) + { + cacheNodes(nodeRefs); + nodeRefs.clear(); + } + // Done + callback.done(); + } + } + + @Override + public void getChildAssocs( + Long parentNodeId, + Long childNodeId, + QName assocTypeQName, + QName assocQName, + Boolean isPrimary, + Boolean sameStore, + ChildAssocRefQueryCallback resultsCallback) + { + selectChildAssocs( + parentNodeId, childNodeId, + assocTypeQName, assocQName, isPrimary, sameStore, + new ChildAssocRefBatchingQueryCallback(resultsCallback)); + } + + @Override + public void getChildAssocs( + Long parentNodeId, + QName assocTypeQName, + QName assocQName, + int maxResults, + ChildAssocRefQueryCallback resultsCallback) + { + selectChildAssocs( + parentNodeId, + assocTypeQName, + assocQName, + maxResults, + new ChildAssocRefBatchingQueryCallback(resultsCallback)); + } + + @Override + public void getChildAssocs(Long parentNodeId, Set assocTypeQNames, ChildAssocRefQueryCallback resultsCallback) + { + switch (assocTypeQNames.size()) + { + case 0: + return; // No results possible + case 1: + QName assocTypeQName = assocTypeQNames.iterator().next(); + selectChildAssocs( + parentNodeId, null, assocTypeQName, (QName) null, null, null, + new ChildAssocRefBatchingQueryCallback(resultsCallback)); + break; + default: + selectChildAssocs( + parentNodeId, assocTypeQNames, + new ChildAssocRefBatchingQueryCallback(resultsCallback)); + } + } + + /** + * Checks a cache and then queries. + *

+ * Note: If we were to cach misses, then we would have to ensure that the cache is kept up to date whenever any affection association is changed. This is actually not possible without forcing the cache to be fully clustered. So to avoid clustering the cache, we instead watch the node child version, which relies on a cache that is already clustered. + */ + @Override + public Pair getChildAssoc(Long parentNodeId, QName assocTypeQName, String childName) + { + ChildByNameKey key = new ChildByNameKey(parentNodeId, assocTypeQName, childName); + ChildAssocEntity assoc = childByNameCache.get(key); + boolean query = false; + if (assoc == null) + { + query = true; + } + else + { + // Check that the resultant child node has not moved on + Node childNode = assoc.getChildNode(); + Long childNodeId = childNode.getId(); + NodeVersionKey childNodeVersionKey = childNode.getNodeVersionKey(); + Pair childNodeFromCache = nodesCache.getByKey(childNodeId); + if (childNodeFromCache == null) + { + // Child node no longer exists (or never did) + query = true; + } + else + { + NodeVersionKey childNodeFromCacheVersionKey = childNodeFromCache.getSecond().getNodeVersionKey(); + if (!childNodeFromCacheVersionKey.equals(childNodeVersionKey)) + { + // The child node has moved on. We don't know why, but must query again. + query = true; + } + } + } + if (query) + { + assoc = selectChildAssoc(parentNodeId, assocTypeQName, childName); + if (assoc != null) + { + childByNameCache.put(key, assoc); + } + else + { + // We do not cache misses. See javadoc. + } + } + // Now return, checking the assoc's ID for null + return assoc == null ? null : assoc.getPair(qnameDAO); + } + + @Override + public void getChildAssocs( + Long parentNodeId, + QName assocTypeQName, + Collection childNames, + ChildAssocRefQueryCallback resultsCallback) + { + selectChildAssocs( + parentNodeId, assocTypeQName, childNames, + new ChildAssocRefBatchingQueryCallback(resultsCallback)); + } + + @Override + public void getChildAssocsByPropertyValue( + Long parentNodeId, + QName propertyQName, + Serializable value, + ChildAssocRefQueryCallback resultsCallback) + { + PropertyDefinition propertyDef = dictionaryService.getProperty(propertyQName); + NodePropertyValue nodeValue = nodePropertyHelper.makeNodePropertyValue(propertyDef, value); + + if (nodeValue != null) + { + switch (nodeValue.getPersistedType()) + { + case 1: // Boolean + case 3: // long + case 5: // double + case 6: // string + // no floats due to the range errors testing equality on a float. + break; + + default: + throw new IllegalArgumentException("method not supported for persisted value type " + nodeValue.getPersistedType()); + } + + selectChildAssocsByPropertyValue(parentNodeId, + propertyQName, + nodeValue, + new ChildAssocRefBatchingQueryCallback(resultsCallback)); + } + } + + @Override + public void getChildAssocsByChildTypes( + Long parentNodeId, + Set childNodeTypeQNames, + ChildAssocRefQueryCallback resultsCallback) + { + selectChildAssocsByChildTypes( + parentNodeId, childNodeTypeQNames, + new ChildAssocRefBatchingQueryCallback(resultsCallback)); + } + + @Override + public void getChildAssocsWithoutParentAssocsOfType( + Long parentNodeId, + QName assocTypeQName, + ChildAssocRefQueryCallback resultsCallback) + { + selectChildAssocsWithoutParentAssocsOfType( + parentNodeId, assocTypeQName, + new ChildAssocRefBatchingQueryCallback(resultsCallback)); + } + + @Override + public Pair getPrimaryParentAssoc(Long childNodeId) + { + ChildAssocEntity childAssocEntity = getPrimaryParentAssocImpl(childNodeId); + if (childAssocEntity == null) + { + return null; + } + else + { + return childAssocEntity.getPair(qnameDAO); + } + } + + private ChildAssocEntity getPrimaryParentAssocImpl(Long childNodeId) + { + ParentAssocsInfo parentAssocs = getParentAssocsCached(childNodeId); + return parentAssocs.getPrimaryParentAssoc(); + } + + private static final int PARENT_ASSOCS_CACHE_FILTER_THRESHOLD = 2000; + + @Override + public void getParentAssocs( + Long childNodeId, + QName assocTypeQName, + QName assocQName, + Boolean isPrimary, + ChildAssocRefQueryCallback resultsCallback) + { + if (assocTypeQName == null && assocQName == null && isPrimary == null) + { + // Go for the cache (and return all) + ParentAssocsInfo parentAssocs = getParentAssocsCached(childNodeId); + for (ChildAssocEntity assoc : parentAssocs.getParentAssocs().values()) + { + resultsCallback.handle( + assoc.getPair(qnameDAO), + assoc.getParentNode().getNodePair(), + assoc.getChildNode().getNodePair()); + } + resultsCallback.done(); + } + else + { + // Decide whether we query or filter + ParentAssocsInfo parentAssocs = getParentAssocsCached(childNodeId); + if (parentAssocs.getParentAssocs().size() > PARENT_ASSOCS_CACHE_FILTER_THRESHOLD) + { + // Query + selectParentAssocs(childNodeId, assocTypeQName, assocQName, isPrimary, resultsCallback); + } + else + { + // Go for the cache (and filter) + for (ChildAssocEntity assoc : parentAssocs.getParentAssocs().values()) + { + Pair assocPair = assoc.getPair(qnameDAO); + if (((assocTypeQName == null) || (assocPair.getSecond().getTypeQName().equals(assocTypeQName))) && + ((assocQName == null) || (assocPair.getSecond().getQName().equals(assocQName)))) + { + resultsCallback.handle( + assocPair, + assoc.getParentNode().getNodePair(), + assoc.getChildNode().getNodePair()); + } + } + resultsCallback.done(); + } + + } + } + + /** + * Potentially cheaper than evaluating all of a node's paths to check for child association cycles + *

+ * TODO: When is it cheaper to go up and when is it cheaper to go down? Look at using direct queries to pass through layers both up and down. + * + * @param nodeId + * the node to start with + */ + @Override + public void cycleCheck(Long nodeId) + { + CycleCallBack callback = new CycleCallBack(); + callback.cycleCheck(nodeId); + if (callback.toThrow != null) + { + throw callback.toThrow; + } + } + + private class CycleCallBack implements ChildAssocRefQueryCallback + { + final Set nodeIds = new HashSet(97); + CyclicChildRelationshipException toThrow; + + @Override + public void done() + {} + + @Override + public boolean handle( + Pair childAssocPair, + Pair parentNodePair, + Pair childNodePair) + { + Long nodeId = childNodePair.getFirst(); + if (!nodeIds.add(nodeId)) + { + ChildAssociationRef childAssociationRef = childAssocPair.getSecond(); + // Remember exception we want to throw and exit. If we throw within here, it will be wrapped by IBatis + toThrow = new CyclicChildRelationshipException( + "Child Association Cycle detected hitting nodes: " + nodeIds, + childAssociationRef); + return false; + } + cycleCheck(nodeId); + nodeIds.remove(nodeId); + return toThrow == null; + } + + /** + * No preloading required + */ + @Override + public boolean preLoadNodes() + { + return false; + } + + /** + * No ordering required + */ + @Override + public boolean orderResults() + { + return false; + } + + public void cycleCheck(Long nodeId) + { + getChildAssocs(nodeId, null, null, null, null, null, this); + } + }; + + @Override + public List getPaths(Pair nodePair, boolean primaryOnly) throws InvalidNodeRefException + { + // create storage for the paths - only need 1 bucket if we are looking for the primary path + List paths = new ArrayList(primaryOnly ? 1 : 10); + // create an empty current path to start from + Path currentPath = new Path(); + // create storage for touched associations + Stack assocIdStack = new Stack(); + + // call recursive method to sort it out + prependPaths(nodePair, null, currentPath, paths, assocIdStack, primaryOnly); + + // check that for the primary only case we have exactly one path + if (primaryOnly && paths.size() != 1) + { + throw new RuntimeException("Node has " + paths.size() + " primary paths: " + nodePair); + } + + // done + if (loggerPaths.isDebugEnabled()) + { + StringBuilder sb = new StringBuilder(256); + if (primaryOnly) + { + sb.append("Primary paths"); + } + else + { + sb.append("Paths"); + } + sb.append(" for node ").append(nodePair); + for (Path path : paths) + { + sb.append("\n").append(" ").append(path); + } + loggerPaths.debug(sb); + } + return paths; + } + + private void bindFixAssocAndCollectLostAndFound(final Pair lostNodePair, final String lostName, final Long assocId, final boolean orphanChild) + { + // Remember the items already deleted in inner transactions + final Set> lostNodePairs = TransactionalResourceHelper.getSet(KEY_LOST_NODE_PAIRS); + final Set deletedAssocs = TransactionalResourceHelper.getSet(KEY_DELETED_ASSOCS); + AlfrescoTransactionSupport.bindListener(new TransactionListenerAdapter() { + @Override + public void afterRollback() + { + afterCommit(); + } + + @Override + public void afterCommit() + { + if (transactionService.getAllowWrite()) + { + // New transaction + RetryingTransactionCallback callback = new RetryingTransactionCallback() { + public Void execute() throws Throwable + { + if (assocId == null) + { + // 'child' with missing parent assoc => collect lost+found orphan child + if (lostNodePairs.add(lostNodePair)) + { + collectLostAndFoundNode(lostNodePair, lostName); + logger.error("ALF-13066: Orphan child node has been re-homed under lost_found: " + + lostNodePair); + } + } + else + { + // 'child' with deleted parent assoc => delete invalid parent assoc and if primary then + // collect lost+found orphan child + if (deletedAssocs.add(assocId)) + { + deleteChildAssoc(assocId); // Can't use caching version or may hit infinite loop + logger.error("ALF-12358: Deleted node - removed child assoc: " + assocId); + } + + if (orphanChild && lostNodePairs.add(lostNodePair)) + { + collectLostAndFoundNode(lostNodePair, lostName); + logger.error("ALF-12358: Orphan child node has been re-homed under lost_found: " + + lostNodePair); + } + } + + return null; + } + }; + transactionService.getRetryingTransactionHelper().doInTransaction(callback, false, true); + } + } + }); + } + + /** + * TODO: Remove once ALF-12358 has been proven to be fixed i.e. no more orphans are created ... ever. + */ + private void collectLostAndFoundNode(Pair lostNodePair, String lostName) + { + Long childNodeId = lostNodePair.getFirst(); + NodeRef lostNodeRef = lostNodePair.getSecond(); + + Long newParentNodeId = getOrCreateLostAndFoundContainer(lostNodeRef.getStoreRef()).getId(); + + String assocName = lostName + "-" + System.currentTimeMillis(); + // Create new primary assoc (re-home the orphan node under lost_found) + ChildAssocEntity assoc = newChildAssocImpl(newParentNodeId, + childNodeId, + true, + ContentModel.ASSOC_CHILDREN, + QName.createQName(assocName), + assocName, + true); + + // Touch the node; all caches are fine + touchNode(childNodeId, null, null, false, false, false); + + // update cache + boolean isRoot = false; + boolean isStoreRoot = false; + ParentAssocsInfo parentAssocInfo = new ParentAssocsInfo(isRoot, isStoreRoot, assoc); + setParentAssocsCached(childNodeId, parentAssocInfo); + + /* // Update ACLs for moved tree - note: actually a NOOP if oldParentAclId is null Long newParentAclId = newParentNode.getAclId(); Long oldParentAclId = null; // unknown accessControlListDAO.updateInheritance(childNodeId, oldParentAclId, newParentAclId); */ + } + + private Node getOrCreateLostAndFoundContainer(StoreRef storeRef) + { + Pair rootNodePair = getRootNode(storeRef); + Long rootParentNodeId = rootNodePair.getFirst(); + + final List> nodes = new ArrayList>(1); + NodeDAO.ChildAssocRefQueryCallback callback = new NodeDAO.ChildAssocRefQueryCallback() { + public boolean handle( + Pair childAssocPair, + Pair parentNodePair, + Pair childNodePair) + { + nodes.add(childNodePair); + // More results + return true; + } + + @Override + public boolean preLoadNodes() + { + return false; + } + + @Override + public boolean orderResults() + { + return false; + } + + @Override + public void done() + {} + }; + Set assocTypeQNames = new HashSet(1); + assocTypeQNames.add(ContentModel.ASSOC_LOST_AND_FOUND); + getChildAssocs(rootParentNodeId, assocTypeQNames, callback); + + Node lostFoundNode = null; + if (nodes.size() > 0) + { + Long lostFoundNodeId = nodes.get(0).getFirst(); + lostFoundNode = getNodeNotNull(lostFoundNodeId, true); + if (nodes.size() > 1) + { + logger.warn("More than one lost_found, using first: " + lostFoundNode.getNodeRef()); + } + } + else + { + Locale locale = localeDAO.getOrCreateDefaultLocalePair().getSecond(); + + lostFoundNode = newNode( + rootParentNodeId, + ContentModel.ASSOC_LOST_AND_FOUND, + ContentModel.ASSOC_LOST_AND_FOUND, + storeRef, + null, + ContentModel.TYPE_LOST_AND_FOUND, + locale, + ContentModel.ASSOC_LOST_AND_FOUND.getLocalName(), + null).getChildNode(); + + logger.info("Created lost_found: " + lostFoundNode.getNodeRef()); + } + + return lostFoundNode; + } + + /** + * Build the paths for a node + * + * @param currentNodePair + * the leave or child node to start with + * @param currentRootNodePair + * pass in null only + * @param currentPath + * an empty {@link Path} + * @param completedPaths + * completed paths i.e. the result + * @param assocIdStack + * a stack to detected cyclic relationships + * @param primaryOnly + * true to follow only primary parent associations + * @throws CyclicChildRelationshipException + */ + private void prependPaths( + Pair currentNodePair, + Pair currentRootNodePair, + Path currentPath, + Collection completedPaths, + Stack assocIdStack, + boolean primaryOnly) throws CyclicChildRelationshipException + { + if (isDebugEnabled) + { + logger.debug("\n" + + "Prepending paths: \n" + + " Current node: " + currentNodePair + "\n" + + " Current root: " + currentRootNodePair + "\n" + + " Current path: " + currentPath); + } + Long currentNodeId = currentNodePair.getFirst(); + NodeRef currentNodeRef = currentNodePair.getSecond(); + + // Check if we have changed root nodes + StoreRef currentStoreRef = currentNodeRef.getStoreRef(); + if (currentRootNodePair == null || !currentStoreRef.equals(currentRootNodePair.getFirst())) + { + // We've changed stores + Pair rootNodePair = getRootNode(currentStoreRef); + currentRootNodePair = new Pair(currentStoreRef, rootNodePair.getSecond()); + } + + // get the parent associations of the given node + ParentAssocsInfo parentAssocInfo = getParentAssocsCached(currentNodeId); // note: currently may throw NotLiveNodeException + // bulk load parents as we are certain to hit them in the next call + ArrayList toLoad = new ArrayList(parentAssocInfo.getParentAssocs().size()); + for (Map.Entry entry : parentAssocInfo.getParentAssocs().entrySet()) + { + toLoad.add(entry.getValue().getParentNode().getId()); + } + cacheNodesById(toLoad); + + // does the node have parents + boolean hasParents = parentAssocInfo.getParentAssocs().size() > 0; + // does the current node have a root aspect? + + // look for a root. If we only want the primary root, then ignore all but the top-level root. + if (!(primaryOnly && hasParents) && parentAssocInfo.isRoot()) // exclude primary search with parents present + { + // create a one-sided assoc ref for the root node and prepend to the stack + // this effectively spoofs the fact that the current node is not below the root + // - we put this assoc in as the first assoc in the path must be a one-sided + // reference pointing to the root node + ChildAssociationRef assocRef = new ChildAssociationRef(null, null, null, currentRootNodePair.getSecond()); + // create a path to save and add the 'root' assoc + Path pathToSave = new Path(); + Path.ChildAssocElement first = null; + for (Path.Element element : currentPath) + { + if (first == null) + { + first = (Path.ChildAssocElement) element; + } + else + { + pathToSave.append(element); + } + } + if (first != null) + { + // mimic an association that would appear if the current node was below the root node + // or if first beneath the root node it will make the real thing + ChildAssociationRef updateAssocRef = new ChildAssociationRef( + parentAssocInfo.isStoreRoot() ? ContentModel.ASSOC_CHILDREN : first.getRef().getTypeQName(), + currentRootNodePair.getSecond(), + first.getRef().getQName(), + first.getRef().getChildRef()); + Path.Element newFirst = new Path.ChildAssocElement(updateAssocRef); + pathToSave.prepend(newFirst); + } + + Path.Element element = new Path.ChildAssocElement(assocRef); + pathToSave.prepend(element); + + // store the path just built + completedPaths.add(pathToSave); + } + + // walk up each parent association + for (Map.Entry entry : parentAssocInfo.getParentAssocs().entrySet()) + { + Long assocId = entry.getKey(); + ChildAssocEntity assoc = entry.getValue(); + ChildAssociationRef assocRef = assoc.getRef(qnameDAO); + // do we consider only primary assocs? + if (primaryOnly && !assocRef.isPrimary()) + { + continue; + } + // Ordering is meaningless here as we are constructing a path upwards + // and have no idea where the node comes in the sibling order or even + // if there are like-pathed siblings. + assocRef.setNthSibling(-1); + // build a path element + Path.Element element = new Path.ChildAssocElement(assocRef); + // create a new path that builds on the current path + Path path = new Path(); + path.append(currentPath); + // prepend element + path.prepend(element); + // get parent node pair + Pair parentNodePair = new Pair( + assoc.getParentNode().getId(), + assocRef.getParentRef()); + + // does the association already exist in the stack + if (assocIdStack.contains(assocId)) + { + // the association was present already + logger.error( + "Cyclic parent-child relationship detected: \n" + + " current node: " + currentNodeId + "\n" + + " current path: " + currentPath + "\n" + + " next assoc: " + assocId); + throw new CyclicChildRelationshipException("Node has been pasted into its own tree.", assocRef); + } + + if (isDebugEnabled) + { + logger.debug("\n" + + " Prepending path parent: \n" + + " Parent node: " + parentNodePair); + } + + // push the assoc stack, recurse and pop + assocIdStack.push(assocId); + + prependPaths(parentNodePair, currentRootNodePair, path, completedPaths, assocIdStack, primaryOnly); + + assocIdStack.pop(); + } + // done + } + + /** + * A Map-like class for storing ParentAssocsInfos. It prunes its oldest ParentAssocsInfo entries not only when a capacity is reached, but also when a total number of cached parents is reached, as this is what dictates the overall memory usage. + */ + private static class ParentAssocsCache + { + private final ReadWriteLock lock = new ReentrantReadWriteLock(); + private final int size; + private final int maxParentCount; + private final Map, ParentAssocsInfo> cache; + private final Map, Pair> nextKeys; + private final Map, Pair> previousKeys; + private Pair firstKey; + private Pair lastKey; + private int parentCount; + + /** + * @param size + * int + * @param limitFactor + * int + */ + public ParentAssocsCache(int size, int limitFactor) + { + this.size = size; + this.maxParentCount = size * limitFactor; + final int mapSize = size * 2; + this.cache = new HashMap, ParentAssocsInfo>(mapSize); + this.nextKeys = new HashMap, Pair>(mapSize); + this.previousKeys = new HashMap, Pair>(mapSize); + } + + private ParentAssocsInfo get(Pair cacheKey) + { + lock.readLock().lock(); + try + { + return cache.get(cacheKey); + } + finally + { + lock.readLock().unlock(); + } + } + + private void put(Pair cacheKey, ParentAssocsInfo parentAssocs) + { + lock.writeLock().lock(); + try + { + // If an entry already exists, remove it and do the necessary housekeeping + if (cache.containsKey(cacheKey)) + { + remove(cacheKey); + } + + // Add the value and prepend the key + cache.put(cacheKey, parentAssocs); + if (firstKey == null) + { + lastKey = cacheKey; + } + else + { + nextKeys.put(cacheKey, firstKey); + previousKeys.put(firstKey, cacheKey); + } + firstKey = cacheKey; + parentCount += parentAssocs.getParentAssocs().size(); + + // Now prune the oldest entries whilst we have more cache entries or cached parents than desired + int currentSize = cache.size(); + while (currentSize > size || parentCount > maxParentCount) + { + remove(lastKey); + currentSize--; + } + } + finally + { + lock.writeLock().unlock(); + } + } + + private ParentAssocsInfo remove(Pair cacheKey) + { + lock.writeLock().lock(); + try + { + // Remove from the map + ParentAssocsInfo oldParentAssocs = cache.remove(cacheKey); + + // If the object didn't exist, we are done + if (oldParentAssocs == null) + { + return null; + } + + // Re-link the list + Pair previousCacheKey = previousKeys.remove(cacheKey); + Pair nextCacheKey = nextKeys.remove(cacheKey); + if (nextCacheKey == null) + { + if (previousCacheKey == null) + { + firstKey = lastKey = null; + } + else + { + lastKey = previousCacheKey; + nextKeys.remove(previousCacheKey); + } + } + else + { + if (previousCacheKey == null) + { + firstKey = nextCacheKey; + previousKeys.remove(nextCacheKey); + } + else + { + nextKeys.put(previousCacheKey, nextCacheKey); + previousKeys.put(nextCacheKey, previousCacheKey); + } + } + // Update the parent count + parentCount -= oldParentAssocs.getParentAssocs().size(); + return oldParentAssocs; + } + finally + { + lock.writeLock().unlock(); + } + } + + private void clear() + { + lock.writeLock().lock(); + try + { + cache.clear(); + nextKeys.clear(); + previousKeys.clear(); + firstKey = lastKey = null; + parentCount = 0; + } + finally + { + lock.writeLock().unlock(); + } + } + } + + /** + * @return Returns a node's parent associations + */ + private ParentAssocsInfo getParentAssocsCached(Long nodeId) + { + Node node = getNodeNotNull(nodeId, false); + Pair cacheKey = new Pair(nodeId, node.getTransaction().getChangeTxnId()); + ParentAssocsInfo value = parentAssocsCache.get(cacheKey); + if (value == null) + { + value = loadParentAssocs(node.getNodeVersionKey()); + parentAssocsCache.put(cacheKey, value); + } + + // We have already validated on loading that we have a list in sync with the child node, so if the list is still + // empty we have an integrity problem + if (value.getPrimaryParentAssoc() == null && !node.getDeleted(qnameDAO) && !value.isStoreRoot()) + { + Pair currentNodePair = node.getNodePair(); + // We have a corrupt repository - non-root node has a missing parent ?! + bindFixAssocAndCollectLostAndFound(currentNodePair, "nonRootNodeWithoutParents", null, false); + + // throw - error will be logged and then bound txn listener (afterRollback) will be called + throw new NonRootNodeWithoutParentsException(currentNodePair); + } + + return value; + } + + /** + * Update a node's parent associations. + */ + private void setParentAssocsCached(Long nodeId, ParentAssocsInfo parentAssocs) + { + Node node = getNodeNotNull(nodeId, false); + Pair cacheKey = new Pair(nodeId, node.getTransaction().getChangeTxnId()); + parentAssocsCache.put(cacheKey, parentAssocs); + } + + /** + * Helper method to copy cache values from one key to another + */ + private void copyParentAssocsCached(Node from) + { + String fromTransactionId = from.getTransaction().getChangeTxnId(); + String toTransactionId = getCurrentTransaction().getChangeTxnId(); + // If the node is already in this transaction, there's nothing to do + if (fromTransactionId.equals(toTransactionId)) + { + return; + } + Pair cacheKey = new Pair(from.getId(), fromTransactionId); + ParentAssocsInfo cacheEntry = parentAssocsCache.get(cacheKey); + if (cacheEntry != null) + { + parentAssocsCache.put(new Pair(from.getId(), toTransactionId), cacheEntry); + } + } + + /** + * Helper method to remove associations relating to a cached node + */ + private void invalidateParentAssocsCached(Node node) + { + // Invalidate both the node and current transaction ID, just in case + Long nodeId = node.getId(); + String nodeTransactionId = node.getTransaction().getChangeTxnId(); + parentAssocsCache.remove(new Pair(nodeId, nodeTransactionId)); + if (AlfrescoTransactionSupport.getTransactionReadState() == TxnReadState.TXN_READ_WRITE) + { + String currentTransactionId = getCurrentTransaction().getChangeTxnId(); + if (!currentTransactionId.equals(nodeTransactionId)) + { + parentAssocsCache.remove(new Pair(nodeId, currentTransactionId)); + } + } + } + + private ParentAssocsInfo loadParentAssocs(NodeVersionKey nodeVersionKey) + { + Long nodeId = nodeVersionKey.getNodeId(); + // Find out if it is a root or store root + boolean isRoot = hasNodeAspect(nodeId, ContentModel.ASPECT_ROOT); + boolean isStoreRoot = getNodeType(nodeId).equals(ContentModel.TYPE_STOREROOT); + + // Select all the parent associations + List assocs = selectParentAssocs(nodeId); + + // Build the cache object + ParentAssocsInfo value = new ParentAssocsInfo(isRoot, isStoreRoot, assocs); + + // Now check if we are seeing the correct version of the node + if (assocs.isEmpty()) + { + // No results. + // Nodes without parents are root nodes or deleted nodes. The latter will not normally + // be accessed here but it is possible. + // To match earlier fixes of ALF-12393, we do a double-check of the node's details. + NodeEntity nodeCheckFromDb = selectNodeById(nodeId); + if (nodeCheckFromDb == null || !nodeCheckFromDb.getNodeVersionKey().equals(nodeVersionKey)) + { + // The node is gone or has moved on in version + invalidateNodeCaches(nodeId); + throw new DataIntegrityViolationException( + "Detected stale node entry: " + nodeVersionKey + + " (now " + nodeCheckFromDb + ")"); + } + } + else + { + ChildAssocEntity childAssoc = assocs.get(0); + // What is the real (at least to this txn) version of the child node? + NodeVersionKey childNodeVersionKeyFromDb = childAssoc.getChildNode().getNodeVersionKey(); + if (!childNodeVersionKeyFromDb.equals(nodeVersionKey)) + { + // This method was called with a stale version + invalidateNodeCaches(nodeId); + throw new DataIntegrityViolationException( + "Detected stale node entry: " + nodeVersionKey + + " (now " + childNodeVersionKeyFromDb + ")"); + } + } + return value; + } + + /* Bulk caching */ + + @Override + public void setCheckNodeConsistency() + { + if (nodesTransactionalCache != null) + { + nodesTransactionalCache.setDisableSharedCacheReadForTransaction(true); + } + } + + @Override + public Set getCachedAncestors(List nodeIds) + { + // First, make sure 'level 1' nodes and their parents are in the cache + cacheNodesById(nodeIds); + for (Long nodeId : nodeIds) + { + // Filter out deleted nodes + if (exists(nodeId)) + { + getParentAssocsCached(nodeId); + } + } + // Now recurse on all ancestors in the cache + Set ancestors = new TreeSet(); + for (Long nodeId : nodeIds) + { + findCachedAncestors(nodeId, ancestors); + } + return ancestors; + } + + /** + * Uses the node and parent assocs cache content to recursively find the set of currently cached ancestor node IDs + */ + private void findCachedAncestors(Long nodeId, Set ancestors) + { + if (!ancestors.add(nodeId)) + { + return; // Already visited + } + Node node = nodesCache.getValue(nodeId); + if (node == null) + { + return; // Not in cache yet - will load in due course + } + Pair cacheKey = new Pair(nodeId, node.getTransaction().getChangeTxnId()); + ParentAssocsInfo value = parentAssocsCache.get(cacheKey); + if (value == null) + { + return; // Not in cache yet - will load in due course + } + for (ChildAssocEntity childAssoc : value.getParentAssocs().values()) + { + findCachedAncestors(childAssoc.getParentNode().getId(), ancestors); + } + } + + @Override + public void cacheNodesById(List nodeIds) + { + /* ALF-2712: Performance degradation from 3.1.0 to 3.1.2 ALF-2784: Degradation of performance between 3.1.1 and 3.2x (observed in JSF) + * + * There is an obvious cost associated with querying the database to pull back nodes, and there is additional cost associated with putting the resultant entries into the caches. It is NO MORE expensive to check the cache than it is to put an entry into it - and probably cheaper considering cache replication - so we start checking nodes to see if they have entries before passing them over for batch loading. + * + * However, when running against a cold cache or doing a first-time query against some part of the repo, we will be checking for entries in the cache and consistently getting no results. To avoid unnecessary checking when the cache is PROBABLY cold, we examine the ratio of hits/misses at regular intervals. */ + + boolean disableSharedCacheReadForTransaction = false; + if (nodesTransactionalCache != null) + { + disableSharedCacheReadForTransaction = nodesTransactionalCache.getDisableSharedCacheReadForTransaction(); + } + + if ((disableSharedCacheReadForTransaction == false) && nodeIds.size() < 10) + { + // We only cache where the number of results is potentially + // a problem for the N+1 loading that might result. + return; + } + + int foundCacheEntryCount = 0; + int missingCacheEntryCount = 0; + boolean forceBatch = false; + + List batchLoadNodeIds = new ArrayList(nodeIds.size()); + for (Long nodeId : nodeIds) + { + if (!forceBatch) + { + // Is this node in the cache? + if (nodesCache.getValue(nodeId) != null) + { + foundCacheEntryCount++; // Don't add it to the batch + continue; + } + else + { + missingCacheEntryCount++; // Fall through and add it to the batch + } + if (foundCacheEntryCount + missingCacheEntryCount % 100 == 0) + { + // We force the batch if the number of hits drops below the number of misses + forceBatch = foundCacheEntryCount < missingCacheEntryCount; + } + } + + batchLoadNodeIds.add(nodeId); + } + + int size = batchLoadNodeIds.size(); + cacheNodesBatch(batchLoadNodeIds); + + if (logger.isDebugEnabled()) + { + logger.debug("Pre-loaded " + size + " nodes."); + } + } + + /** + * {@inheritDoc} + *

+ * Loads properties, aspects, parent associations and the ID-noderef cache. + */ + @Override + public void cacheNodes(List nodeRefs) + { + /* ALF-2712: Performance degradation from 3.1.0 to 3.1.2 ALF-2784: Degradation of performance between 3.1.1 and 3.2x (observed in JSF) + * + * There is an obvious cost associated with querying the database to pull back nodes, and there is additional cost associated with putting the resultant entries into the caches. It is NO MORE expensive to check the cache than it is to put an entry into it - and probably cheaper considering cache replication - so we start checking nodes to see if they have entries before passing them over for batch loading. + * + * However, when running against a cold cache or doing a first-time query against some part of the repo, we will be checking for entries in the cache and consistently getting no results. To avoid unnecessary checking when the cache is PROBABLY cold, we examine the ratio of hits/misses at regular intervals. */ + if (nodeRefs.size() < cachingThreshold) + { + // We only cache where the number of results is potentially + // a problem for the N+1 loading that might result. + return; + } + int foundCacheEntryCount = 0; + int missingCacheEntryCount = 0; + boolean forceBatch = false; + + // Group the nodes by store so that we don't *have* to eagerly join to store to get query performance + Map> uuidsByStore = new HashMap>(3); + for (NodeRef nodeRef : nodeRefs) + { + if (!forceBatch) + { + // Is this node in the cache? + if (nodesCache.getKey(nodeRef) != null) + { + foundCacheEntryCount++; // Don't add it to the batch + continue; + } + else + { + missingCacheEntryCount++; // Fall through and add it to the batch + } + if (foundCacheEntryCount + missingCacheEntryCount % 100 == 0) + { + // We force the batch if the number of hits drops below the number of misses + forceBatch = foundCacheEntryCount < missingCacheEntryCount; + } + } + + StoreRef storeRef = nodeRef.getStoreRef(); + List uuids = (List) uuidsByStore.get(storeRef); + if (uuids == null) + { + uuids = new ArrayList(nodeRefs.size()); + uuidsByStore.put(storeRef, uuids); + } + uuids.add(nodeRef.getId()); + } + int size = nodeRefs.size(); + nodeRefs = null; + // Now load all the nodes + for (Map.Entry> entry : uuidsByStore.entrySet()) + { + StoreRef storeRef = entry.getKey(); + List uuids = entry.getValue(); + cacheNodes(storeRef, uuids); + } + if (logger.isDebugEnabled()) + { + logger.debug("Pre-loaded " + size + " nodes."); + } + } + + /** + * Loads the nodes into cache using batching. + */ + private void cacheNodes(StoreRef storeRef, List uuids) + { + StoreEntity store = getStoreNotNull(storeRef); + Long storeId = store.getId(); + + int batchSize = 256; + SortedSet batch = new TreeSet(); + for (String uuid : uuids) + { + batch.add(uuid); + if (batch.size() >= batchSize) + { + // Preload + List nodes = selectNodesByUuids(storeId, batch); + cacheNodesNoBatch(nodes); + batch.clear(); + } + } + // Load any remaining nodes + if (batch.size() > 0) + { + List nodes = selectNodesByUuids(storeId, batch); + cacheNodesNoBatch(nodes); + } + } + + private void cacheNodesBatch(List nodeIds) + { + int batchSize = 256; + SortedSet batch = new TreeSet(); + for (Long nodeId : nodeIds) + { + batch.add(nodeId); + if (batch.size() >= batchSize) + { + // Preload + List nodes = selectNodesByIds(batch); + cacheNodesNoBatch(nodes); + batch.clear(); + } + } + // Load any remaining nodes + if (batch.size() > 0) + { + List nodes = selectNodesByIds(batch); + cacheNodesNoBatch(nodes); + } + } + + /** + * Bulk-fetch the nodes for a given store. All nodes passed in are fetched. + */ + private void cacheNodesNoBatch(List nodes) + { + // Get the nodes + SortedSet aspectNodeIds = new TreeSet(); + SortedSet propertiesNodeIds = new TreeSet(); + Map nodeVersionKeysFromCache = new HashMap(nodes.size() * 2); // Keep for quick lookup + for (Node node : nodes) + { + Long nodeId = node.getId(); + NodeVersionKey nodeVersionKey = node.getNodeVersionKey(); + node.lock(); // Prevent unexpected edits of values going into the cache + nodesCache.setValue(nodeId, node); + if (propertiesCache.getValue(nodeVersionKey) == null) + { + propertiesNodeIds.add(nodeId); + } + if (aspectsCache.getValue(nodeVersionKey) == null) + { + aspectNodeIds.add(nodeId); + } + nodeVersionKeysFromCache.put(nodeId, nodeVersionKey); + } + + if (logger.isDebugEnabled()) + { + logger.debug("Pre-loaded " + propertiesNodeIds.size() + " properties"); + logger.debug("Pre-loaded " + propertiesNodeIds.size() + " aspects"); + } + + Map> nodeAspects = selectNodeAspects(aspectNodeIds); + for (Map.Entry> entry : nodeAspects.entrySet()) + { + NodeVersionKey nodeVersionKeyFromDb = entry.getKey(); + Long nodeId = nodeVersionKeyFromDb.getNodeId(); + Set qnames = entry.getValue(); + setNodeAspectsCached(nodeId, qnames); + aspectNodeIds.remove(nodeId); + } + // Cache the absence of aspects too! + for (Long nodeId : aspectNodeIds) + { + setNodeAspectsCached(nodeId, Collections. emptySet()); + } + + // First ensure all content data are pre-cached, so we don't have to load them individually when converting properties + contentDataDAO.cacheContentDataForNodes(propertiesNodeIds); + + // Now bulk load the properties + Map> propsByNodeId = selectNodeProperties(propertiesNodeIds); + for (Map.Entry> entry : propsByNodeId.entrySet()) + { + Long nodeId = entry.getKey().getNodeId(); + Map propertyValues = entry.getValue(); + Map props = nodePropertyHelper.convertToPublicProperties(propertyValues); + setNodePropertiesCached(nodeId, props); + } + } + + /** + * {@inheritDoc} + *

+ * Simply clears out all the node-related caches. + */ + @Override + public void clear() + { + clearCaches(); + } + + /* Transactions */ + + public Long getMaxTxnIdByCommitTime(long maxCommitTime) + { + Transaction txn = selectLastTxnBeforeCommitTime(maxCommitTime); + return (txn == null ? null : txn.getId()); + } + + @Override + public int getTransactionCount() + { + return selectTransactionCount(); + } + + @Override + public Transaction getTxnById(Long txnId) + { + return selectTxnById(txnId); + } + + @Override + public List getTxnChanges(Long txnId) + { + return getTxnChangesForStore(null, txnId); + } + + @Override + public List getTxnChangesForStore(StoreRef storeRef, Long txnId) + { + Long storeId = (storeRef == null) ? null : getStoreNotNull(storeRef).getId(); + List nodes = selectTxnChanges(txnId, storeId); + // Convert + List nodeStatuses = new ArrayList(nodes.size()); + for (NodeEntity node : nodes) + { + nodeStatuses.add(node.getNodeStatus(qnameDAO)); + } + + // Done + return nodeStatuses; + } + + @Override + public List getTxnsUnused(Long minTxnId, long maxCommitTime, int count) + { + return selectTxnsUnused(minTxnId, maxCommitTime, count); + } + + @Override + public void purgeTxn(Long txnId) + { + deleteTransaction(txnId); + } + + public static final Long LONG_ZERO = 0L; + + @Override + public Long getMinTxnCommitTime() + { + Long time = selectMinTxnCommitTime(); + return (time == null ? LONG_ZERO : time); + } + + @Override + public Long getMaxTxnCommitTime() + { + Long time = selectMaxTxnCommitTime(); + return (time == null ? LONG_ZERO : time); + } + + public Long getMinTxnCommitTimeForDeletedNodes() + { + Long time = selectMinTxnCommitTimeForDeletedNodes(); + return (time == null ? LONG_ZERO : time); + } + + @Override + public Long getMinTxnId() + { + Long id = selectMinTxnId(); + return (id == null ? LONG_ZERO : id); + } + + @Override + public Long getMinUnusedTxnCommitTime() + { + Long id = selectMinUnusedTxnCommitTime(); + return (id == null ? LONG_ZERO : id); + } + + @Override + public Long getMaxTxnId() + { + Long id = selectMaxTxnId(); + return (id == null ? LONG_ZERO : id); + } + + @Override + public Long getMinTxInNodeIdRange(Long fromNodeId, Long toNodeId) + { + return selectMinTxInNodeIdRange(fromNodeId, toNodeId); + } + + @Override + public Long getMaxTxInNodeIdRange(Long fromNodeId, Long toNodeId) + { + return selectMaxTxInNodeIdRange(fromNodeId, toNodeId); + } + + @Override + public Long getNextTxCommitTime(Long fromCommitTime) + { + return selectNextTxCommitTime(fromCommitTime); + } + + /* Abstract methods for underlying CRUD */ + + protected abstract Long insertTransaction(String changeTxnId, Long commit_time_ms); + + protected abstract int updateTransaction(Long txnId, Long commit_time_ms); + + protected abstract int deleteTransaction(Long txnId); + + protected abstract List selectAllStores(); + + protected abstract StoreEntity selectStore(StoreRef storeRef); + + protected abstract NodeEntity selectStoreRootNode(StoreRef storeRef); + + protected abstract Long insertStore(StoreEntity store); + + protected abstract int updateStoreRoot(StoreEntity store); + + protected abstract int updateStore(StoreEntity store); + + protected abstract int updateNodesInStore(Long txnId, Long storeId); + + protected abstract Long insertNode(NodeEntity node); + + protected abstract int updateNode(NodeUpdateEntity nodeUpdate); + + protected abstract int updateNodes(Long txnId, List nodeIds); + + protected abstract void updatePrimaryChildrenSharedAclId( + Long txnId, + Long primaryParentNodeId, + Long optionalOldSharedAlcIdInAdditionToNull, + Long newSharedAlcId); + + protected abstract int deleteNodeById(Long nodeId); + + protected abstract int deleteNodesByCommitTime(long fromTxnCommitTimeMs, long toTxnCommitTimeMs); + + protected abstract NodeEntity selectNodeById(Long id); + + protected abstract NodeEntity selectNodeByNodeRef(NodeRef nodeRef); + + protected abstract List selectNodesByUuids(Long storeId, SortedSet uuids); + + protected abstract List selectNodesByIds(SortedSet ids); + + protected abstract Map> selectNodeProperties(Set nodeIds); + + protected abstract Map> selectNodeProperties(Long nodeId); + + protected abstract Map> selectNodeProperties(Long nodeId, Set qnameIds); + + protected abstract int deleteNodeProperties(Long nodeId, Set qnameIds); + + protected abstract int deleteNodeProperties(Long nodeId, List propKeys); + + protected abstract void insertNodeProperties(Long nodeId, Map persistableProps); + + protected abstract Map> selectNodeAspects(Set nodeIds); + + protected abstract void insertNodeAspect(Long nodeId, Long qnameId); + + protected abstract int deleteNodeAspects(Long nodeId, Set qnameIds); + + protected abstract void selectNodesWithAspects( + List qnameIds, + Long minNodeId, Long maxNodeId, + NodeRefQueryCallback resultsCallback); + + protected abstract void selectNodesWithAspects( + List qnameIds, + Long minNodeId, Long maxNodeId, boolean ordered, + NodeRefQueryCallback resultsCallback); + + protected abstract void selectNodesWithAspects( + List qnameIds, + Long minNodeId, Long maxNodeId, boolean ordered, int maxResults, + NodeRefQueryCallback resultsCallback); + + protected abstract Long insertNodeAssoc(Long sourceNodeId, Long targetNodeId, Long assocTypeQNameId, int assocIndex); + + protected abstract int updateNodeAssoc(Long id, int assocIndex); + + protected abstract int deleteNodeAssoc(Long sourceNodeId, Long targetNodeId, Long assocTypeQNameId); + + protected abstract int deleteNodeAssocs(List ids); + + protected abstract List selectNodeAssocs(Long nodeId); + + protected abstract List selectNodeAssocsBySource(Long sourceNodeId, Long typeQNameId); + + protected abstract List selectNodeAssocsBySourceAndPropertyValue(Long sourceNodeId, Long typeQNameId, Long propertyQNameId, NodePropertyValue nodeValue); + + protected abstract List selectNodeAssocsByTarget(Long targetNodeId, Long typeQNameId); + + protected abstract NodeAssocEntity selectNodeAssocById(Long assocId); + + protected abstract int selectNodeAssocMaxIndex(Long sourceNodeId, Long assocTypeQNameId); + + protected abstract Long insertChildAssoc(ChildAssocEntity assoc); + + protected abstract int deleteChildAssocs(List ids); + + protected abstract int updateChildAssocIndex( + Long parentNodeId, + Long childNodeId, + QName assocTypeQName, + QName assocQName, + int index); + + protected abstract int updateChildAssocUniqueName(Long assocId, String name); + + // protected abstract int deleteChildAssocsToAndFrom(Long nodeId); + protected abstract ChildAssocEntity selectChildAssoc(Long assocId); + + protected abstract List selectChildNodeIds( + Long nodeId, + Boolean isPrimary, + Long minAssocIdInclusive, + int maxResults); + + protected abstract List selectPrimaryChildAcls(Long nodeId); + + protected abstract List selectChildAssoc( + Long parentNodeId, + Long childNodeId, + QName assocTypeQName, + QName assocQName); + + /** + * Parameters are all optional except the parent node ID and the callback + */ + protected abstract void selectChildAssocs( + Long parentNodeId, + Long childNodeId, + QName assocTypeQName, + QName assocQName, + Boolean isPrimary, + Boolean sameStore, + ChildAssocRefQueryCallback resultsCallback); + + protected abstract void selectChildAssocs( + Long parentNodeId, + QName assocTypeQName, + QName assocQName, + int maxResults, + ChildAssocRefQueryCallback resultsCallback); + + protected abstract void selectChildAssocs( + Long parentNodeId, + Set assocTypeQNames, + ChildAssocRefQueryCallback resultsCallback); + + protected abstract ChildAssocEntity selectChildAssoc( + Long parentNodeId, + QName assocTypeQName, + String childName); + + protected abstract void selectChildAssocs( + Long parentNodeId, + QName assocTypeQName, + Collection childNames, + ChildAssocRefQueryCallback resultsCallback); + + protected abstract void selectChildAssocsByPropertyValue( + Long parentNodeId, + QName propertyQName, + NodePropertyValue nodeValue, + ChildAssocRefQueryCallback resultsCallback); + + protected abstract void selectChildAssocsByChildTypes( + Long parentNodeId, + Set childNodeTypeQNames, + ChildAssocRefQueryCallback resultsCallback); + + protected abstract void selectChildAssocsWithoutParentAssocsOfType( + Long parentNodeId, + QName assocTypeQName, + ChildAssocRefQueryCallback resultsCallback); + + /** + * Parameters are all optional except the parent node ID and the callback + */ + protected abstract void selectParentAssocs( + Long childNodeId, + QName assocTypeQName, + QName assocQName, + Boolean isPrimary, + ChildAssocRefQueryCallback resultsCallback); + + protected abstract List selectParentAssocs(Long childNodeId); + + /** + * No DB constraint, so multiple returned + */ + protected abstract List selectPrimaryParentAssocs(Long childNodeId); + + protected abstract int updatePrimaryParentAssocs( + Long childNodeId, + Long parentNodeId, + QName assocTypeQName, + QName assocQName, + String childNodeName); + + /** + * Moves all node-linked data from one node to another. The source node will be left in an orphaned state and without any attached data other than the current transaction. + * + * @param fromNodeId + * the source node + * @param toNodeId + * the target node + */ + protected abstract void moveNodeData(Long fromNodeId, Long toNodeId); + + protected abstract void deleteSubscriptions(Long nodeId); + + protected abstract Transaction selectLastTxnBeforeCommitTime(Long maxCommitTime); + + protected abstract int selectTransactionCount(); + + protected abstract Transaction selectTxnById(Long txnId); + + protected abstract List selectTxnChanges(Long txnId, Long storeId); + + // public for testing + public abstract List selectTxns( + Long fromTimeInclusive, + Long toTimeExclusive, + Integer count, + List includeTxnIds, + List excludeTxnIds, + Boolean ascending); + + protected abstract List selectTxnsUnused(Long minTxnId, Long maxCommitTime, Integer count); + + protected abstract Long selectMinTxnCommitTime(); + + protected abstract Long selectMaxTxnCommitTime(); + + protected abstract Long selectMinTxnCommitTimeForDeletedNodes(); + + protected abstract Long selectMinTxnId(); + + protected abstract Long selectMaxTxnId(); + + protected abstract Long selectMinUnusedTxnCommitTime(); + + protected abstract Long selectMinTxInNodeIdRange(Long fromNodeId, Long toNodeId); + + protected abstract Long selectMaxTxInNodeIdRange(Long fromNodeId, Long toNodeId); + + protected abstract Long selectNextTxCommitTime(Long fromCommitTime); + +} diff --git a/repository/src/main/java/org/alfresco/repo/domain/node/ReferenceablePropertiesEntity.java b/repository/src/main/java/org/alfresco/repo/domain/node/ReferenceablePropertiesEntity.java index b7ca48f447..c1ff706037 100644 --- a/repository/src/main/java/org/alfresco/repo/domain/node/ReferenceablePropertiesEntity.java +++ b/repository/src/main/java/org/alfresco/repo/domain/node/ReferenceablePropertiesEntity.java @@ -2,7 +2,7 @@ * #%L * Alfresco Repository * %% - * Copyright (C) 2005 - 2016 Alfresco Software Limited + * Copyright (C) 2005 - 2025 Alfresco Software Limited * %% * This file is part of the Alfresco software. * If the software was purchased under a paid Alfresco license, the terms of @@ -23,105 +23,102 @@ * along with Alfresco. If not, see . * #L% */ -package org.alfresco.repo.domain.node; - -import java.io.Serializable; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import org.alfresco.model.ContentModel; -import org.alfresco.service.cmr.repository.NodeRef; -import org.alfresco.service.cmr.repository.datatype.DefaultTypeConverter; -import org.alfresco.service.namespace.QName; - -/** - * Class holding properties associated with the sys:referenceable aspect. - * This aspect is common enough to warrant direct inclusion on the Node entity. - * - * @author Derek Hulley - * @since 3.4 - */ -public class ReferenceablePropertiesEntity -{ - private static final Set REFERENCEABLE_PROP_QNAMES; - static - { - REFERENCEABLE_PROP_QNAMES = new HashSet(8); - REFERENCEABLE_PROP_QNAMES.add(ContentModel.PROP_STORE_PROTOCOL); - REFERENCEABLE_PROP_QNAMES.add(ContentModel.PROP_STORE_IDENTIFIER); - REFERENCEABLE_PROP_QNAMES.add(ContentModel.PROP_NODE_UUID); - REFERENCEABLE_PROP_QNAMES.add(ContentModel.PROP_NODE_DBID); - } - - /** - * @return Returns true if the property belongs to the sys:referenceable aspect - */ - public static boolean isReferenceableProperty(QName qname) - { - return REFERENCEABLE_PROP_QNAMES.contains(qname); - } - - /** - * Remove all {@link ContentModel#ASPECT_REFERENCEABLE referencable} properties - */ - public static void removeReferenceableProperties(Node node, Map properties) - { - properties.keySet().removeAll(REFERENCEABLE_PROP_QNAMES); - String name = DefaultTypeConverter.INSTANCE.convert(String.class, properties.get(ContentModel.PROP_NAME)); - if (name != null && name.equals(node.getUuid())) - { - // The cm:name matches the UUID, so drop it - properties.remove(ContentModel.PROP_NAME); - } - } - - /** - * Remove all {@link ContentModel#ASPECT_REFERENCEABLE referencable} properties - */ - public static void removeReferenceableProperties(Set propertyQNames) - { - propertyQNames.removeAll(REFERENCEABLE_PROP_QNAMES); - } - - /** - * Adds all {@link ContentModel#ASPECT_REFERENCEABLE referencable} properties. - */ - public static void addReferenceableProperties(Node node, Map properties) - { - Long nodeId = node.getId(); - NodeRef nodeRef = node.getNodeRef(); - properties.put(ContentModel.PROP_STORE_PROTOCOL, nodeRef.getStoreRef().getProtocol()); - properties.put(ContentModel.PROP_STORE_IDENTIFIER, nodeRef.getStoreRef().getIdentifier()); - properties.put(ContentModel.PROP_NODE_UUID, nodeRef.getId()); - properties.put(ContentModel.PROP_NODE_DBID, nodeId); - // add the ID as the name, if required - String name = DefaultTypeConverter.INSTANCE.convert(String.class, properties.get(ContentModel.PROP_NAME)); - if (name == null) - { - properties.put(ContentModel.PROP_NAME, nodeRef.getId()); - } - } - - public static Serializable getReferenceableProperty(Node node, QName qname) - { - NodeRef nodeRef = node.getNodeRef(); - if (qname.equals(ContentModel.PROP_STORE_PROTOCOL)) - { - return nodeRef.getStoreRef().getProtocol(); - } - else if (qname.equals(ContentModel.PROP_STORE_IDENTIFIER)) - { - return nodeRef.getStoreRef().getIdentifier(); - } - else if (qname.equals(ContentModel.PROP_NODE_UUID)) - { - return nodeRef.getId(); - } - else if (qname.equals(ContentModel.PROP_NODE_DBID)) - { - return node.getId(); - } - throw new IllegalArgumentException("Not sys:referenceable property: " + qname); - } -} +package org.alfresco.repo.domain.node; + +import java.io.Serializable; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.alfresco.model.ContentModel; +import org.alfresco.service.cmr.repository.NodeRef; +import org.alfresco.service.cmr.repository.datatype.DefaultTypeConverter; +import org.alfresco.service.namespace.QName; + +/** + * Class holding properties associated with the sys:referenceable aspect. This aspect is common enough to warrant direct inclusion on the Node entity. + * + * @author Derek Hulley + * @since 3.4 + */ +public class ReferenceablePropertiesEntity +{ + private static final Set REFERENCEABLE_PROP_QNAMES; + static + { + REFERENCEABLE_PROP_QNAMES = new HashSet(8); + REFERENCEABLE_PROP_QNAMES.add(ContentModel.PROP_STORE_PROTOCOL); + REFERENCEABLE_PROP_QNAMES.add(ContentModel.PROP_STORE_IDENTIFIER); + REFERENCEABLE_PROP_QNAMES.add(ContentModel.PROP_NODE_UUID); + REFERENCEABLE_PROP_QNAMES.add(ContentModel.PROP_NODE_DBID); + } + + /** + * @return Returns true if the property belongs to the sys:referenceable aspect + */ + public static boolean isReferenceableProperty(QName qname) + { + return REFERENCEABLE_PROP_QNAMES.contains(qname); + } + + /** + * Remove all {@link ContentModel#ASPECT_REFERENCEABLE referencable} properties + */ + public static void removeReferenceableProperties(Node node, Map properties) + { + properties.keySet().removeAll(REFERENCEABLE_PROP_QNAMES); + String name = DefaultTypeConverter.INSTANCE.convert(String.class, properties.get(ContentModel.PROP_NAME)); + if (name != null && name.equals(node.getUuid())) + { + // The cm:name matches the UUID, so drop it + properties.remove(ContentModel.PROP_NAME); + } + } + + /** + * Remove all {@link ContentModel#ASPECT_REFERENCEABLE referencable} properties + */ + public static void removeReferenceableProperties(Set propertyQNames) + { + propertyQNames.removeAll(REFERENCEABLE_PROP_QNAMES); + } + + /** + * Adds all {@link ContentModel#ASPECT_REFERENCEABLE referencable} properties. + */ + public static void addReferenceableProperties(Long nodeId, NodeRef nodeRef, Map properties) + { + properties.put(ContentModel.PROP_STORE_PROTOCOL, nodeRef.getStoreRef().getProtocol()); + properties.put(ContentModel.PROP_STORE_IDENTIFIER, nodeRef.getStoreRef().getIdentifier()); + properties.put(ContentModel.PROP_NODE_UUID, nodeRef.getId()); + properties.put(ContentModel.PROP_NODE_DBID, nodeId); + // add the ID as the name, if required + String name = DefaultTypeConverter.INSTANCE.convert(String.class, properties.get(ContentModel.PROP_NAME)); + if (name == null) + { + properties.put(ContentModel.PROP_NAME, nodeRef.getId()); + } + } + + public static Serializable getReferenceableProperty(Node node, QName qname) + { + NodeRef nodeRef = node.getNodeRef(); + if (qname.equals(ContentModel.PROP_STORE_PROTOCOL)) + { + return nodeRef.getStoreRef().getProtocol(); + } + else if (qname.equals(ContentModel.PROP_STORE_IDENTIFIER)) + { + return nodeRef.getStoreRef().getIdentifier(); + } + else if (qname.equals(ContentModel.PROP_NODE_UUID)) + { + return nodeRef.getId(); + } + else if (qname.equals(ContentModel.PROP_NODE_DBID)) + { + return node.getId(); + } + throw new IllegalArgumentException("Not sys:referenceable property: " + qname); + } +} diff --git a/repository/src/main/java/org/alfresco/repo/node/getchildren/FilterSortNodeEntity.java b/repository/src/main/java/org/alfresco/repo/node/getchildren/FilterSortNodeEntity.java index adc222e8e6..7916f104d0 100644 --- a/repository/src/main/java/org/alfresco/repo/node/getchildren/FilterSortNodeEntity.java +++ b/repository/src/main/java/org/alfresco/repo/node/getchildren/FilterSortNodeEntity.java @@ -2,7 +2,7 @@ * #%L * Alfresco Repository * %% - * Copyright (C) 2005 - 2016 Alfresco Software Limited + * Copyright (C) 2005 - 2025 Alfresco Software Limited * %% * This file is part of the Alfresco software. * If the software was purchased under a paid Alfresco license, the terms of @@ -23,239 +23,290 @@ * along with Alfresco. If not, see . * #L% */ -package org.alfresco.repo.node.getchildren; - -import java.util.List; -import java.util.Set; - -import org.alfresco.repo.domain.node.NodeEntity; -import org.alfresco.repo.domain.node.NodePropertyEntity; - -/** - * Filterable/Sortable Node Entity - * - * Can be optionally filtered/sorted by (up to) three properties - note: sort properties are applied in order - * - * @author jan - * @since 4.0 - */ -public class FilterSortNodeEntity -{ - private Long id; // node id - - private NodeEntity node; - private NodePropertyEntity prop1; - private NodePropertyEntity prop2; - private NodePropertyEntity prop3; - - // Supplemental query-related parameters - private Long parentNodeId; - private Long prop1qnameId; - private Long prop2qnameId; - private Long prop3qnameId; - private List childNodeTypeQNameIds; - private Set assocTypeQNameIds; - private String pattern; - private Long namePropertyQNameId; - private boolean auditableProps; - private boolean nodeType; - - private Boolean isPrimary; - - /** - * Default constructor - */ - public FilterSortNodeEntity() - { - auditableProps = false; - } - - public Long getId() - { - return id; - } - - public void setId(Long id) - { - this.id = id; - } - - public String getPattern() - { - return pattern; - } - - protected String escape(String s, char escapeChar) - { - StringBuilder sb = new StringBuilder(); - int idx = -1; - int offset = 0; - do - { - idx = s.indexOf(escapeChar, offset); - if(idx != -1) - { - sb.append(s.substring(offset, idx)); - sb.append("\\"); - sb.append(escapeChar); - offset = idx + 1; - } - } - while(idx != -1); - sb.append(s.substring(offset)); - return sb.toString(); - } - - public void setPattern(String pattern) - { - if(pattern != null) - { - // escape the '%' character with '\' (standard SQL escape character) - pattern = escape(pattern, '%'); - // replace the wildcard character '*' with the one used in database queries i.e. '%' - this.pattern = pattern.replace('*', '%'); - } - } - - public void setAssocTypeQNameIds(Set assocTypeQNameIds) - { - this.assocTypeQNameIds = assocTypeQNameIds; - } - - public Set getAssocTypeQNameIds() - { - return assocTypeQNameIds; - } - - public Long getNamePropertyQNameId() - { - return namePropertyQNameId; - } - - public void setNamePropertyQNameId(Long namePropertyQNameId) - { - this.namePropertyQNameId = namePropertyQNameId; - } - - public NodePropertyEntity getProp1() - { - return prop1; - } - - public void setProp1(NodePropertyEntity prop1) - { - this.prop1 = prop1; - } - - public NodePropertyEntity getProp2() - { - return prop2; - } - - public void setProp2(NodePropertyEntity prop2) - { - this.prop2 = prop2; - } - - public NodePropertyEntity getProp3() - { - return prop3; - } - - public void setProp3(NodePropertyEntity prop3) - { - this.prop3 = prop3; - } - - public NodeEntity getNode() - { - return node; - } - - public void setNode(NodeEntity childNode) - { - this.node = childNode; - } - - // Supplemental query-related parameters - - public Long getParentNodeId() - { - return parentNodeId; - } - - public void setParentNodeId(Long parentNodeId) - { - this.parentNodeId = parentNodeId; - } - - public Long getProp1qnameId() - { - return prop1qnameId; - } - - public void setProp1qnameId(Long prop1qnameId) - { - this.prop1qnameId = prop1qnameId; - } - - public Long getProp2qnameId() - { - return prop2qnameId; - } - - public void setProp2qnameId(Long prop2qnameId) - { - this.prop2qnameId = prop2qnameId; - } - - public Long getProp3qnameId() - { - return prop3qnameId; - } - - public void setProp3qnameId(Long prop3qnameId) - { - this.prop3qnameId = prop3qnameId; - } - - public List getChildNodeTypeQNameIds() - { - return childNodeTypeQNameIds; - } - - public void setChildNodeTypeQNameIds(List childNodeTypeQNameIds) - { - this.childNodeTypeQNameIds = childNodeTypeQNameIds; - } - - public boolean isAuditableProps() - { - return auditableProps; - } - - public void setAuditableProps(boolean auditableProps) - { - this.auditableProps = auditableProps; - } - - public boolean isNodeType() - { - return nodeType; - } - - public void setNodeType(boolean nodeType) - { - this.nodeType = nodeType; - } - - public Boolean isPrimary() - { - return isPrimary; - } - - public void setIsPrimary(Boolean isPrimary) - { - this.isPrimary = isPrimary; - } -} \ No newline at end of file +package org.alfresco.repo.node.getchildren; + +import java.util.List; +import java.util.Set; + +import org.alfresco.repo.domain.node.AuditablePropertiesEntity; +import org.alfresco.repo.domain.node.NodePropertyEntity; +import org.alfresco.service.cmr.repository.NodeRef; +import org.alfresco.service.cmr.repository.StoreRef; + +/** + * Filterable/Sortable Node Entity + * + * Can be optionally filtered/sorted by (up to) three properties - note: sort properties are applied in order + * + * @author jan + * @since 4.0 + */ +public class FilterSortNodeEntity +{ + private Long id; // node id + private String nodeUuid; + private Long typeQNameId; + + private AuditablePropertiesEntity auditablePropertiesEntity; + private NodePropertyEntity prop1; + private NodePropertyEntity prop2; + private NodePropertyEntity prop3; + + private String storeProtocol; + private String storeIdentifier; + + // Supplemental query-related parameters + private Long parentNodeId; + private Long prop1qnameId; + private Long prop2qnameId; + private Long prop3qnameId; + private List childNodeTypeQNameIds; + private Set assocTypeQNameIds; + private String pattern; + private Long namePropertyQNameId; + private boolean auditableProps; + private boolean nodeType; + + private Boolean isPrimary; + + /** + * Default constructor + */ + public FilterSortNodeEntity() + { + auditableProps = false; + } + + public Long getId() + { + return id; + } + + public void setId(Long id) + { + this.id = id; + } + + public String getNodeUuid() + { + return nodeUuid; + } + + public void setNodeUuid(String nodeUuid) + { + this.nodeUuid = nodeUuid; + } + + public Long getTypeQNameId() + { + return typeQNameId; + } + + public void setTypeQNameId(Long typeQNameId) + { + this.typeQNameId = typeQNameId; + } + + public String getPattern() + { + return pattern; + } + + protected String escape(String s, char escapeChar) + { + StringBuilder sb = new StringBuilder(); + int idx = -1; + int offset = 0; + do + { + idx = s.indexOf(escapeChar, offset); + if (idx != -1) + { + sb.append(s.substring(offset, idx)); + sb.append("\\"); + sb.append(escapeChar); + offset = idx + 1; + } + } while (idx != -1); + sb.append(s.substring(offset)); + return sb.toString(); + } + + public void setPattern(String pattern) + { + if (pattern != null) + { + // escape the '%' character with '\' (standard SQL escape character) + pattern = escape(pattern, '%'); + // replace the wildcard character '*' with the one used in database queries i.e. '%' + this.pattern = pattern.replace('*', '%'); + } + } + + public void setAssocTypeQNameIds(Set assocTypeQNameIds) + { + this.assocTypeQNameIds = assocTypeQNameIds; + } + + public Set getAssocTypeQNameIds() + { + return assocTypeQNameIds; + } + + public Long getNamePropertyQNameId() + { + return namePropertyQNameId; + } + + public void setNamePropertyQNameId(Long namePropertyQNameId) + { + this.namePropertyQNameId = namePropertyQNameId; + } + + public AuditablePropertiesEntity getAuditablePropertiesEntity() + { + return auditablePropertiesEntity; + } + + public void setAuditablePropertiesEntity(AuditablePropertiesEntity auditablePropertiesEntity) + { + this.auditablePropertiesEntity = auditablePropertiesEntity; + } + + public NodePropertyEntity getProp1() + { + return prop1; + } + + public void setProp1(NodePropertyEntity prop1) + { + this.prop1 = prop1; + } + + public NodePropertyEntity getProp2() + { + return prop2; + } + + public void setProp2(NodePropertyEntity prop2) + { + this.prop2 = prop2; + } + + public NodePropertyEntity getProp3() + { + return prop3; + } + + public void setProp3(NodePropertyEntity prop3) + { + this.prop3 = prop3; + } + + public String getStoreProtocol() + { + return storeProtocol; + } + + public void setStoreProtocol(String storeProtocol) + { + this.storeProtocol = storeProtocol; + } + + public String getStoreIdentifier() + { + return storeIdentifier; + } + + public void setStoreIdentifier(String storeIdentifier) + { + this.storeIdentifier = storeIdentifier; + } + + // Supplemental query-related parameters + + public Long getParentNodeId() + { + return parentNodeId; + } + + public void setParentNodeId(Long parentNodeId) + { + this.parentNodeId = parentNodeId; + } + + public Long getProp1qnameId() + { + return prop1qnameId; + } + + public void setProp1qnameId(Long prop1qnameId) + { + this.prop1qnameId = prop1qnameId; + } + + public Long getProp2qnameId() + { + return prop2qnameId; + } + + public void setProp2qnameId(Long prop2qnameId) + { + this.prop2qnameId = prop2qnameId; + } + + public Long getProp3qnameId() + { + return prop3qnameId; + } + + public void setProp3qnameId(Long prop3qnameId) + { + this.prop3qnameId = prop3qnameId; + } + + public List getChildNodeTypeQNameIds() + { + return childNodeTypeQNameIds; + } + + public void setChildNodeTypeQNameIds(List childNodeTypeQNameIds) + { + this.childNodeTypeQNameIds = childNodeTypeQNameIds; + } + + public boolean isAuditableProps() + { + return auditableProps; + } + + public void setAuditableProps(boolean auditableProps) + { + this.auditableProps = auditableProps; + } + + public boolean isNodeType() + { + return nodeType; + } + + public void setNodeType(boolean nodeType) + { + this.nodeType = nodeType; + } + + public Boolean isPrimary() + { + return isPrimary; + } + + public void setIsPrimary(Boolean isPrimary) + { + this.isPrimary = isPrimary; + } + + public NodeRef createNodeRef() + { + return new NodeRef(new StoreRef(storeProtocol, storeIdentifier), nodeUuid); + } +} diff --git a/repository/src/main/java/org/alfresco/repo/node/getchildren/GetChildrenCannedQuery.java b/repository/src/main/java/org/alfresco/repo/node/getchildren/GetChildrenCannedQuery.java index 511c8f4871..892b0e5775 100644 --- a/repository/src/main/java/org/alfresco/repo/node/getchildren/GetChildrenCannedQuery.java +++ b/repository/src/main/java/org/alfresco/repo/node/getchildren/GetChildrenCannedQuery.java @@ -1,14 +1,14 @@ /* - * #%L - * Alfresco Repository - * %% - * Copyright (C) 2005 - 2016 Alfresco Software Limited - * %% - * This file is part of the Alfresco software. - * If the software was purchased under a paid Alfresco license, the terms of - * the paid license agreement will prevail. Otherwise, the software is - * provided under the following open source license terms: - * + * #%L + * Alfresco Repository + * %% + * Copyright (C) 2005 - 2025 Alfresco Software Limited + * %% + * This file is part of the Alfresco software. + * If the software was purchased under a paid Alfresco license, the terms of + * the paid license agreement will prevail. Otherwise, the software is + * provided under the following open source license terms: + * * Alfresco is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or @@ -23,968 +23,969 @@ * along with Alfresco. If not, see . * #L% */ -package org.alfresco.repo.node.getchildren; - -import java.io.Serializable; -import java.text.Collator; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.alfresco.error.AlfrescoRuntimeException; -import org.alfresco.model.ContentModel; -import org.alfresco.query.CannedQueryParameters; -import org.alfresco.query.CannedQuerySortDetails; -import org.alfresco.query.CannedQuerySortDetails.SortOrder; -import org.alfresco.repo.domain.node.AuditablePropertiesEntity; -import org.alfresco.repo.domain.node.Node; -import org.alfresco.repo.domain.node.NodeDAO; -import org.alfresco.repo.domain.node.NodeEntity; -import org.alfresco.repo.domain.node.NodePropertyEntity; -import org.alfresco.repo.domain.node.NodePropertyHelper; -import org.alfresco.repo.domain.node.NodePropertyKey; -import org.alfresco.repo.domain.node.NodePropertyValue; -import org.alfresco.repo.domain.node.ReferenceablePropertiesEntity; -import org.alfresco.repo.domain.qname.QNameDAO; -import org.alfresco.repo.domain.query.CannedQueryDAO; -import org.alfresco.repo.node.getchildren.FilterPropString.FilterTypeString; -import org.alfresco.repo.security.permissions.PermissionCheckedValue.PermissionCheckedValueMixin; -import org.alfresco.repo.security.permissions.impl.acegi.AbstractCannedQueryPermissions; -import org.alfresco.repo.security.permissions.impl.acegi.MethodSecurityBean; -import org.alfresco.repo.tenant.TenantService; -import org.alfresco.service.cmr.repository.ContentData; -import org.alfresco.service.cmr.repository.InvalidNodeRefException; -import org.alfresco.service.cmr.repository.MLText; -import org.alfresco.service.cmr.repository.NodeRef; -import org.alfresco.service.cmr.repository.NodeService; -import org.alfresco.service.cmr.repository.datatype.DefaultTypeConverter; -import org.alfresco.service.namespace.QName; -import org.alfresco.util.AlfrescoCollator; -import org.alfresco.util.Pair; -import org.alfresco.util.ParameterCheck; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.springframework.extensions.surf.util.I18NUtil; - -/** - * GetChildren canned query - * - * To get paged list of children of a parent node filtered by child type. - * Also optionally filtered and/or sorted by one or more properties (up to three). - * - * @author janv - * @since 4.0 - */ -public class GetChildrenCannedQuery extends AbstractCannedQueryPermissions -{ - private Log logger = LogFactory.getLog(getClass()); - - private static final String QUERY_NAMESPACE = "alfresco.node"; - private static final String QUERY_SELECT_GET_CHILDREN_WITH_PROPS = "select_GetChildrenCannedQueryWithProps"; - private static final String QUERY_SELECT_GET_CHILDREN_WITHOUT_PROPS = "select_GetChildrenCannedQueryWithoutProps"; - - public static final int MAX_FILTER_SORT_PROPS = 3; - - // note: special qnames - originally from Share DocLib default config (however, we do not support arbitrary "fts-alfresco" special sortable fields) - public static final QName SORT_QNAME_CONTENT_SIZE = QName.createQName("http://www.alfresco.org/model/content/1.0", "content.size"); - public static final QName SORT_QNAME_CONTENT_MIMETYPE = QName.createQName("http://www.alfresco.org/model/content/1.0", "content.mimetype"); - public static final QName SORT_QNAME_NODE_TYPE = QName.createQName("", "TYPE"); - public static final QName SORT_QNAME_NODE_IS_FOLDER = QName.createQName("", "IS_FOLDER"); // ALF-13968 - - public static final QName FILTER_QNAME_NODE_IS_PRIMARY = QName.createQName("", "IS_PRIMARY"); - - - private NodeDAO nodeDAO; - private QNameDAO qnameDAO; - private CannedQueryDAO cannedQueryDAO; - private NodePropertyHelper nodePropertyHelper; - private TenantService tenantService; - protected NodeService nodeService; - - private boolean applyPostQueryPermissions = false; // if true, the permissions will be applied post-query (else should be applied as part of the "queryAndFilter") - - public GetChildrenCannedQuery( - NodeDAO nodeDAO, - QNameDAO qnameDAO, - CannedQueryDAO cannedQueryDAO, - NodePropertyHelper nodePropertyHelper, - TenantService tenantService, - NodeService nodeService, - MethodSecurityBean methodSecurity, - CannedQueryParameters params) - { - super(params, methodSecurity); - - this.nodeDAO = nodeDAO; - this.qnameDAO = qnameDAO; - this.cannedQueryDAO = cannedQueryDAO; - this.nodePropertyHelper = nodePropertyHelper; - this.tenantService = tenantService; - this.nodeService = nodeService; - - if ((params.getSortDetails() != null) && (params.getSortDetails().getSortPairs().size() > 0)) - { - applyPostQueryPermissions = true; - } - - // TODO refactor (only apply post query if sorted - as above) - GetChildrenCannedQueryParams paramBean = (GetChildrenCannedQueryParams)params.getParameterBean(); - if ((paramBean.getFilterProps()!= null) && (paramBean.getFilterProps().size() > 0)) - { - applyPostQueryPermissions = true; - } - } - - protected FilterSortChildQueryCallback getFilterSortChildQuery(final List children, final List filterProps, GetChildrenCannedQueryParams paramBean) - { - Set inclusiveAspects = paramBean.getInclusiveAspects(); - Set exclusiveAspects = paramBean.getExclusiveAspects(); - - return new DefaultFilterSortChildQueryCallback(children, filterProps, inclusiveAspects, exclusiveAspects); - } - - protected UnsortedChildQueryCallback getUnsortedChildQueryCallback(final List rawResult, final int requestedCount, GetChildrenCannedQueryParams paramBean) - { - Set inclusiveAspects = paramBean.getInclusiveAspects(); - Set exclusiveAspects = paramBean.getExclusiveAspects(); - return new DefaultUnsortedChildQueryCallback(rawResult, requestedCount, inclusiveAspects, exclusiveAspects); - } - - @Override - protected List queryAndFilter(CannedQueryParameters parameters) - { - Long start = (logger.isDebugEnabled() ? System.currentTimeMillis() : null); - - // Get parameters - GetChildrenCannedQueryParams paramBean = (GetChildrenCannedQueryParams)parameters.getParameterBean(); - - // Get parent node - NodeRef parentRef = paramBean.getParentRef(); - ParameterCheck.mandatory("nodeRef", parentRef); - Pair nodePair = nodeDAO.getNodePair(parentRef); - if (nodePair == null) - { - throw new InvalidNodeRefException("Parent node does not exist: " + parentRef, parentRef); - } - Long parentNodeId = nodePair.getFirst(); - - // Set query params - note: currently using SortableChildEntity to hold (supplemental-) query params - FilterSortNodeEntity params = new FilterSortNodeEntity(); - - // Set parent node id - params.setParentNodeId(parentNodeId); - - // Get filter details - Set childNodeTypeQNames = paramBean.getChildTypeQNames(); - Set assocTypeQNames = paramBean.getAssocTypeQNames(); - - final List filterProps = new ArrayList<>(paramBean.getFilterProps().size()); - filterProps.addAll(paramBean.getFilterProps()); // clone (to allow special handling for isPrimary) - - String pattern = paramBean.getPattern(); - - // Get sort details - CannedQuerySortDetails sortDetails = parameters.getSortDetails(); - @SuppressWarnings({ "unchecked", "rawtypes" }) - final List> sortPairs = (List)sortDetails.getSortPairs(); - - if (filterProps.size() > 0) - { - // special handling of isPrimary filter (not counted as a filter/sort "property") - Boolean isPrimary = null; - int idx = 0; - for (FilterProp filter : filterProps) - { - if ((filter instanceof FilterPropBoolean) && - ((FilterPropBoolean)filter).getPropName().equals(FILTER_QNAME_NODE_IS_PRIMARY)) - { - isPrimary = ((FilterPropBoolean)filter).getPropVal(); - break; - } - idx++; - } - if (isPrimary != null) - { - params.setIsPrimary(isPrimary); - filterProps.remove(idx); - } - } - - // Set sort / filter params - // Note - need to keep the sort properties in their requested order - List sortFilterProps = new ArrayList(filterProps.size() + sortPairs.size()); - for (Pair sort : sortPairs) - { - QName sortQName = sort.getFirst(); - if(! sortFilterProps.contains(sortQName)) - { - sortFilterProps.add(sortQName); - } - } - for (FilterProp filter : filterProps) - { - QName filterQName = filter.getPropName(); - if(! sortFilterProps.contains(filterQName)) - { - sortFilterProps.add(filterQName); - } - } - - int filterSortPropCnt = sortFilterProps.size(); - - if (filterSortPropCnt > MAX_FILTER_SORT_PROPS) - { - throw new AlfrescoRuntimeException("GetChildren: exceeded maximum number filter/sort properties: (max="+MAX_FILTER_SORT_PROPS+", actual="+filterSortPropCnt); - } - - filterSortPropCnt = setFilterSortParams(sortFilterProps, params); - - - List result = new ArrayList<>(0); - - try - { - if ((childNodeTypeQNames != null) && (childNodeTypeQNames.size() > 0)) - { - // Set child node type qnames (additional filter - performed by DB query) - Set childNodeTypeQNameIds = qnameDAO.convertQNamesToIds(childNodeTypeQNames, false); - if (childNodeTypeQNameIds.size() > 0) - { - params.setChildNodeTypeQNameIds(new ArrayList(childNodeTypeQNameIds)); - } - else - { - // short-circuit - return no results - given node type qname(s) do not exist - return result; - } - } - - if ((assocTypeQNames != null) && (assocTypeQNames.size() > 0)) - { - // Set assoc type qnames (additional filter - performed by DB query) - Set assocTypeQNameIds = qnameDAO.convertQNamesToIds(assocTypeQNames, false); - if (assocTypeQNameIds.size() > 0) - { - params.setAssocTypeQNameIds(assocTypeQNameIds); - } - else - { - // short-circuit - return no results - given assoc type qname(s) do not exist - return result; - } - } - - if (pattern != null) - { - // TODO, check that we should be tied to the content model in this way. Perhaps a configurable property - // name against which compare the pattern? - Pair nameQName = qnameDAO.getQName(ContentModel.PROP_NAME); - if(nameQName == null) - { - throw new AlfrescoRuntimeException("Unable to determine qname id of name property"); - } - params.setNamePropertyQNameId(nameQName.getFirst()); - params.setPattern(pattern); - } - - if (filterSortPropCnt > 0) - { - // filtered and/or sorted - note: permissions will be applied post query - final List children = new ArrayList(100); - final FilterSortChildQueryCallback c = getFilterSortChildQuery(children, filterProps, paramBean); - FilterSortResultHandler resultHandler = new FilterSortResultHandler(c); - cannedQueryDAO.executeQuery(QUERY_NAMESPACE, QUERY_SELECT_GET_CHILDREN_WITH_PROPS, params, 0, Integer.MAX_VALUE, resultHandler); - resultHandler.done(); - - if (sortPairs.size() > 0) - { - Long startSort = (logger.isDebugEnabled() ? System.currentTimeMillis() : null); - - // sort - Collections.sort(children, new PropComparatorAsc(sortPairs)); - - if (startSort != null) - { - logger.debug("Post-query sort: "+children.size()+" in "+(System.currentTimeMillis()-startSort)+" msecs"); - } - } - - result = new ArrayList(children.size()); - for (FilterSortNode child : children) - { - result.add(tenantService.getBaseName(child.getNodeRef())); - } - } - else - { - // unsorted (apart from any implicit order) - note: permissions are applied during result handling to allow early cutoff - - final int requestedCount = parameters.getResultsRequired(); - - final List rawResult = new ArrayList(Math.min(1000, requestedCount)); - UnsortedChildQueryCallback callback = getUnsortedChildQueryCallback(rawResult, requestedCount, paramBean); - UnsortedResultHandler resultHandler = new UnsortedResultHandler(callback); - cannedQueryDAO.executeQuery(QUERY_NAMESPACE, QUERY_SELECT_GET_CHILDREN_WITHOUT_PROPS, params, 0, Integer.MAX_VALUE, resultHandler); - resultHandler.done(); - - // permissions have been applied - result = PermissionCheckedValueMixin.create(rawResult); - } - } - finally - { - if (start != null) - { - logger.debug("Base query "+(filterSortPropCnt > 0 ? "(sort=y, perms=n)" : "(sort=n, perms=y)")+": "+result.size()+" in "+(System.currentTimeMillis()-start)+" msecs"); - } - } - - return result; - } - - // Set filter/sort props (between 0 and 3) - private int setFilterSortParams(List filterSortProps, FilterSortNodeEntity params) - { - int cnt = 0; - int propCnt = 0; - - for (QName filterSortProp : filterSortProps) - { - if (AuditablePropertiesEntity.getAuditablePropertyQNames().contains(filterSortProp)) - { - params.setAuditableProps(true); - } - else if (filterSortProp.equals(SORT_QNAME_NODE_TYPE) || filterSortProp.equals(SORT_QNAME_NODE_IS_FOLDER)) - { - params.setNodeType(true); - } - else - { - Long sortQNameId = getQNameId(filterSortProp); - if (sortQNameId != null) - { - if (propCnt == 0) - { - params.setProp1qnameId(sortQNameId); - } - else if (propCnt == 1) - { - params.setProp2qnameId(sortQNameId); - } - else if (propCnt == 2) - { - params.setProp3qnameId(sortQNameId); - } - else - { - // belts and braces - throw new AlfrescoRuntimeException("GetChildren: unexpected - cannot set sort parameter: "+cnt); - } - - propCnt++; - } - else - { - logger.warn("Skipping filter/sort param - cannot find: "+filterSortProp); - break; - } - } - - cnt++; - } - - return cnt; - } - - private Long getQNameId(QName sortPropQName) - { - if (sortPropQName.equals(SORT_QNAME_CONTENT_SIZE) || sortPropQName.equals(SORT_QNAME_CONTENT_MIMETYPE)) - { - sortPropQName = ContentModel.PROP_CONTENT; - } - - Pair qnamePair = qnameDAO.getQName(sortPropQName); - return (qnamePair == null ? null : qnamePair.getFirst()); - } - - @Override - protected boolean isApplyPostQuerySorting() - { - // note: sorted as part of the query impl (using SortableNode results) - return false; - } - - private class PropComparatorAsc implements Comparator - { - private List> sortProps; - private Collator collator; - - public PropComparatorAsc(List> sortProps) - { - this.sortProps = sortProps; - // try to overrider collator comparison rules - this.collator = AlfrescoCollator.getInstance(I18NUtil.getContentLocale()); - } - - public int compare(FilterSortNode n1, FilterSortNode n2) - { - return compareImpl(n1, n2, sortProps); - } - - private int compareImpl(FilterSortNode node1In, FilterSortNode node2In, List> sortProps) - { - Object pv1 = null; - Object pv2 = null; - - QName sortPropQName = (QName)sortProps.get(0).getFirst(); - boolean sortAscending = (sortProps.get(0).getSecond() == SortOrder.ASCENDING); - - FilterSortNode node1 = node1In; - FilterSortNode node2 = node2In; - - if (sortAscending == false) - { - node1 = node2In; - node2 = node1In; - } - - int result = 0; - - pv1 = node1.getVal(sortPropQName); - pv2 = node2.getVal(sortPropQName); - - if (pv1 == null) - { - if(pv2 == null && sortProps.size() > 1) - { - return compareImpl(node1In, node2In, sortProps.subList(1, sortProps.size())); - } - else - { - return (pv2 == null ? 0 : -1); - } - } - else if (pv2 == null) - { - return 1; - } - - if (pv1 instanceof String) - { - result = collator.compare((String)pv1, (String)pv2); // TODO use collation keys (re: performance) - } - else if (pv1 instanceof Date) - { - result = (((Date)pv1).compareTo((Date)pv2)); - } - else if (pv1 instanceof Long) - { - result = (((Long)pv1).compareTo((Long)pv2)); - } - else if (pv1 instanceof Integer) - { - result = (((Integer)pv1).compareTo((Integer)pv2)); - } - else if (pv1 instanceof QName) - { - result = (((QName)pv1).compareTo((QName)pv2)); - } - else if (pv1 instanceof Boolean) - { - result = (((Boolean)pv1).compareTo((Boolean)pv2)); - } - else - { - // TODO other comparisons - throw new RuntimeException("Unsupported sort type: "+pv1.getClass().getName()); - } - - if ((result == 0) && (sortProps.size() > 1)) - { - return compareImpl(node1In, node2In, sortProps.subList(1, sortProps.size())); - } - - return result; - } - } - - private boolean includeAspects(NodeRef nodeRef, Set inclusiveAspects, Set exclusiveAspects) - { - if (inclusiveAspects == null && exclusiveAspects == null) - { - return true; - } - - Set nodeAspects = nodeService.getAspects(nodeRef); - if (inclusiveAspects != null) - { - Set includedIntersect = new HashSet(nodeAspects); - includedIntersect.retainAll(inclusiveAspects); - if (includedIntersect.isEmpty()) - { - return false; - } - } - if (exclusiveAspects != null) - { - Set excludedIntersect = new HashSet(nodeAspects); - excludedIntersect.retainAll(exclusiveAspects); - if (excludedIntersect.isEmpty() == false) - { - return false; - } - } - return true; - - } - - // note: currently inclusive and OR-based - private boolean includeFilter(Map propVals, List filterProps) - { - for (FilterProp filterProp : filterProps) - { - Serializable propVal = propVals.get(filterProp.getPropName()); - if (propVal != null) - { - if ((filterProp instanceof FilterPropString) && (propVal instanceof String)) - { - String val = (String)propVal; - String filter = (String)filterProp.getPropVal(); - - switch ((FilterTypeString)filterProp.getFilterType()) - { - case STARTSWITH: - if (val.startsWith(filter)) - { - return true; - } - break; - case STARTSWITH_IGNORECASE: - if (val.toLowerCase().startsWith(filter.toLowerCase())) - { - return true; - } - break; - case EQUALS: - if (val.equals(filter)) - { - return true; - } - break; - case EQUALS_IGNORECASE: - if (val.equalsIgnoreCase(filter)) - { - return true; - } - break; - case ENDSWITH: - if (val.endsWith(filter)) - { - return true; - } - break; - case ENDSWITH_IGNORECASE: - if (val.toLowerCase().endsWith(filter.toLowerCase())) - { - return true; - } - break; - case MATCHES: - if (val.matches(filter)) - { - return true; - } - break; - case MATCHES_IGNORECASE: - if (val.toLowerCase().matches(filter.toLowerCase())) - { - return true; - } - break; - default: - } - } - } - - if ((filterProp instanceof FilterPropBoolean) && (propVal instanceof Boolean)) - { - Boolean val = (Boolean)propVal; - Boolean filter = (Boolean)filterProp.getPropVal(); - - return (val == filter); - } - } - - return false; - } - - @Override - protected boolean isApplyPostQueryPermissions() - { - return applyPostQueryPermissions; // true if sorted (if unsorted then permissions are applied as part of the query impl) - } - - @Override - protected List applyPostQueryPermissions(List results, int requestedCount) - { - Long start = (logger.isDebugEnabled() ? System.currentTimeMillis() : null); - - int requestTotalCountMax = getParameters().getTotalResultCountMax(); - int maxChecks = (((requestTotalCountMax > 0) && (requestTotalCountMax > requestedCount)) ? requestTotalCountMax : requestedCount); - int cnt = results.size(); - - int toIdx = (maxChecks > cnt ? cnt : maxChecks); - - // note: assume user has read access to most/majority of the items hence pre-load up to max checks - preload(results.subList(0, toIdx)); - - List ret = super.applyPostQueryPermissions(results, requestedCount); - - if (start != null) - { - logger.debug("Post-query perms: "+ret.size()+" in "+(System.currentTimeMillis()-start)+" msecs"); - } - - return ret; - } - - private void preload(List nodeRefs) - { - Long start = (logger.isTraceEnabled() ? System.currentTimeMillis() : null); - - nodeDAO.cacheNodes(nodeRefs); - - if (start != null) - { - logger.trace("Pre-load: "+nodeRefs.size()+" in "+(System.currentTimeMillis()-start)+" msecs"); - } - } - - protected interface FilterSortChildQueryCallback - { - boolean handle(FilterSortNode node); - } - - protected class DefaultFilterSortChildQueryCallback implements FilterSortChildQueryCallback - { - private List children; - private List filterProps; - private boolean applyFilter; - private Set inclusiveAspects; - private Set exclusiveAspects; - - public DefaultFilterSortChildQueryCallback(final List children, final List filterProps) - { - this(children, filterProps, null, null); - } - - public DefaultFilterSortChildQueryCallback(final List children, final List filterProps, Set inclusiveAspects, Set exclusiveAspects) - { - this.children = children; - this.filterProps = filterProps; - this.applyFilter = (filterProps.size() > 0); - this.inclusiveAspects = inclusiveAspects; - this.exclusiveAspects = exclusiveAspects; - } - - @Override - public boolean handle(FilterSortNode node) - { - if(include(node)) - { - children.add(node); - } - - // More results - return true; - } - - protected boolean include(FilterSortNode node) - { - // filter, if needed - return(!applyFilter || includeFilter(node.getPropVals(), filterProps)) && includeAspects(node.getNodeRef(), inclusiveAspects, exclusiveAspects); - } - } - - protected class DefaultUnsortedChildQueryCallback implements UnsortedChildQueryCallback - { - private List rawResult; - private int requestedCount; - private Set inclusiveAspects; - private Set exclusiveAspects; - - public DefaultUnsortedChildQueryCallback(final List rawResult, final int requestedCount, Set inclusiveAspects, Set exclusiveAspects) - { - this.rawResult = rawResult; - this.requestedCount = requestedCount; - this.inclusiveAspects = inclusiveAspects; - this.exclusiveAspects = exclusiveAspects; - } - - @Override - public boolean handle(NodeRef nodeRef) - { - if(include(nodeRef)) - { - rawResult.add(tenantService.getBaseName(nodeRef)); - } - - // More results ? - return (rawResult.size() < requestedCount); - } - - protected boolean include(NodeRef nodeRef) - { - return includeAspects(nodeRef, inclusiveAspects, exclusiveAspects); - } - } - - protected interface UnsortedChildQueryCallback - { - boolean handle(NodeRef nodeRef); - } - - protected class FilterSortResultHandler implements CannedQueryDAO.ResultHandler - { - private final FilterSortChildQueryCallback resultsCallback; - private boolean more = true; - - private static final int BATCH_SIZE = 256 * 4; - private final List results; - - private FilterSortResultHandler(FilterSortChildQueryCallback resultsCallback) - { - this.resultsCallback = resultsCallback; - - results = new LinkedList(); - } - - public boolean handleResult(FilterSortNodeEntity result) - { - // Do nothing if no further results are required - if (!more) - { - return false; - } - - if (results.size() >= BATCH_SIZE) - { - // batch - preloadFilterSort(); - } - - results.add(result); - - return more; - } - - public void done() - { - if (results.size() >= 0) - { - // finish batch - preloadFilterSort(); - } - } - - private void preloadFilterSort() - { - List nodeRefs = new ArrayList<>(results.size()); - for (FilterSortNodeEntity result : results) - { - nodeRefs.add(result.getNode().getNodeRef()); - } - - preload(nodeRefs); - - for (FilterSortNodeEntity result : results) - { - Node node = result.getNode(); - NodeRef nodeRef = node.getNodeRef(); - - Map propertyValues = new HashMap(3); - - NodePropertyEntity prop1 = result.getProp1(); - if (prop1 != null) - { - propertyValues.put(prop1.getKey(), prop1.getValue()); - } - - NodePropertyEntity prop2 = result.getProp2(); - if (prop2 != null) - { - propertyValues.put(prop2.getKey(), prop2.getValue()); - } - - NodePropertyEntity prop3 = result.getProp3(); - if (prop3 != null) - { - propertyValues.put(prop3.getKey(), prop3.getValue()); - } - - Map propVals = nodePropertyHelper.convertToPublicProperties(propertyValues); - - // Add referenceable / spoofed properties (including spoofed name if null) - ReferenceablePropertiesEntity.addReferenceableProperties(node, propVals); - - // special cases - - // MLText (eg. cm:title, cm:description, ...) - for (Map.Entry entry : propVals.entrySet()) - { - if (entry.getValue() instanceof MLText) - { - propVals.put(entry.getKey(), DefaultTypeConverter.INSTANCE.convert(String.class, (MLText)entry.getValue())); - } - } - - // ContentData (eg. cm:content.size, cm:content.mimetype) - ContentData contentData = (ContentData)propVals.get(ContentModel.PROP_CONTENT); - if (contentData != null) - { - propVals.put(SORT_QNAME_CONTENT_SIZE, contentData.getSize()); - propVals.put(SORT_QNAME_CONTENT_MIMETYPE, contentData.getMimetype()); - } - - // Auditable props (eg. cm:creator, cm:created, cm:modifier, cm:modified, ...) - AuditablePropertiesEntity auditableProps = node.getAuditableProperties(); - if (auditableProps != null) - { - for (Map.Entry entry : auditableProps.getAuditableProperties().entrySet()) - { - propVals.put(entry.getKey(), entry.getValue()); - } - } - - // Node type - Long nodeTypeQNameId = node.getTypeQNameId(); - if (nodeTypeQNameId != null) - { - Pair pair = qnameDAO.getQName(nodeTypeQNameId); - if (pair != null) - { - propVals.put(SORT_QNAME_NODE_TYPE, pair.getSecond()); - } - } - - // Call back - boolean more = resultsCallback.handle(new FilterSortNode(nodeRef, propVals)); - if (!more) - { - this.more = false; - break; - } - } - - results.clear(); - } - } - - protected class FilterSortNode - { - private NodeRef nodeRef; - private Map propVals; // subset of nodes properties - used for filtering and/or sorting - - public FilterSortNode(NodeRef nodeRef, Map propVals) - { - this.nodeRef = nodeRef; - this.propVals = propVals; - } - - @Override - public String toString() - { - return "FilterSortNode [nodeRef=" + nodeRef + ", propVals=" + propVals + "]"; - } - - public NodeRef getNodeRef() - { - return nodeRef; - } - - public Serializable getVal(QName prop) - { - return propVals.get(prop); - } - - public Map getPropVals() - { - return propVals; - } - } - - private class UnsortedResultHandler implements CannedQueryDAO.ResultHandler - { - private final UnsortedChildQueryCallback resultsCallback; - - private boolean more = true; - - private static final int BATCH_SIZE = 256 * 4; - private final List nodeRefs; - - private UnsortedResultHandler(UnsortedChildQueryCallback resultsCallback) - { - this.resultsCallback = resultsCallback; - - nodeRefs = new LinkedList(); - } - - public boolean handleResult(NodeEntity result) - { - // Do nothing if no further results are required - if (!more) - { - return false; - } - - NodeRef nodeRef = result.getNodeRef(); - - if (nodeRefs.size() >= BATCH_SIZE) - { - // batch - preloadAndApplyPermissions(); - } - - nodeRefs.add(nodeRef); - - return more; - } - - private void preloadAndApplyPermissions() - { - preload(nodeRefs); - - // TODO track total time for incremental permission checks ... and cutoff (eg. based on some config) - List results = applyPostQueryPermissions(nodeRefs, nodeRefs.size()); - - for (NodeRef nodeRef : results) - { - // Call back - boolean more = resultsCallback.handle(nodeRef); - if (!more) - { - this.more = false; - break; - } - } - - nodeRefs.clear(); - } - - public void done() - { - if (nodeRefs.size() >= 0) - { - // finish batch - preloadAndApplyPermissions(); - } - } - } -} \ No newline at end of file +package org.alfresco.repo.node.getchildren; + +import java.io.Serializable; +import java.text.Collator; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.springframework.extensions.surf.util.I18NUtil; + +import org.alfresco.error.AlfrescoRuntimeException; +import org.alfresco.model.ContentModel; +import org.alfresco.query.CannedQueryParameters; +import org.alfresco.query.CannedQuerySortDetails; +import org.alfresco.query.CannedQuerySortDetails.SortOrder; +import org.alfresco.repo.domain.node.AuditablePropertiesEntity; +import org.alfresco.repo.domain.node.NodeDAO; +import org.alfresco.repo.domain.node.NodeEntity; +import org.alfresco.repo.domain.node.NodePropertyEntity; +import org.alfresco.repo.domain.node.NodePropertyHelper; +import org.alfresco.repo.domain.node.NodePropertyKey; +import org.alfresco.repo.domain.node.NodePropertyValue; +import org.alfresco.repo.domain.node.ReferenceablePropertiesEntity; +import org.alfresco.repo.domain.qname.QNameDAO; +import org.alfresco.repo.domain.query.CannedQueryDAO; +import org.alfresco.repo.node.getchildren.FilterPropString.FilterTypeString; +import org.alfresco.repo.security.permissions.PermissionCheckedValue.PermissionCheckedValueMixin; +import org.alfresco.repo.security.permissions.impl.acegi.AbstractCannedQueryPermissions; +import org.alfresco.repo.security.permissions.impl.acegi.MethodSecurityBean; +import org.alfresco.repo.tenant.TenantService; +import org.alfresco.service.cmr.repository.ContentData; +import org.alfresco.service.cmr.repository.InvalidNodeRefException; +import org.alfresco.service.cmr.repository.MLText; +import org.alfresco.service.cmr.repository.NodeRef; +import org.alfresco.service.cmr.repository.NodeService; +import org.alfresco.service.cmr.repository.datatype.DefaultTypeConverter; +import org.alfresco.service.namespace.QName; +import org.alfresco.util.AlfrescoCollator; +import org.alfresco.util.Pair; +import org.alfresco.util.ParameterCheck; + +/** + * GetChildren canned query + * + * To get paged list of children of a parent node filtered by child type. Also optionally filtered and/or sorted by one or more properties (up to three). + * + * @author janv + * @since 4.0 + */ +public class GetChildrenCannedQuery extends AbstractCannedQueryPermissions +{ + private Log logger = LogFactory.getLog(getClass()); + + private static final String QUERY_NAMESPACE = "alfresco.node"; + private static final String QUERY_SELECT_GET_CHILDREN_WITH_PROPS = "select_GetChildrenCannedQueryWithProps"; + private static final String QUERY_SELECT_GET_CHILDREN_WITHOUT_PROPS = "select_GetChildrenCannedQueryWithoutProps"; + + public static final int MAX_FILTER_SORT_PROPS = 3; + + // note: special qnames - originally from Share DocLib default config (however, we do not support arbitrary "fts-alfresco" special sortable fields) + public static final QName SORT_QNAME_CONTENT_SIZE = QName.createQName("http://www.alfresco.org/model/content/1.0", "content.size"); + public static final QName SORT_QNAME_CONTENT_MIMETYPE = QName.createQName("http://www.alfresco.org/model/content/1.0", "content.mimetype"); + public static final QName SORT_QNAME_NODE_TYPE = QName.createQName("", "TYPE"); + public static final QName SORT_QNAME_NODE_IS_FOLDER = QName.createQName("", "IS_FOLDER"); // ALF-13968 + + public static final QName FILTER_QNAME_NODE_IS_PRIMARY = QName.createQName("", "IS_PRIMARY"); + + private NodeDAO nodeDAO; + private QNameDAO qnameDAO; + private CannedQueryDAO cannedQueryDAO; + private NodePropertyHelper nodePropertyHelper; + private TenantService tenantService; + protected NodeService nodeService; + + private boolean applyPostQueryPermissions = false; // if true, the permissions will be applied post-query (else should be applied as part of the "queryAndFilter") + + public GetChildrenCannedQuery( + NodeDAO nodeDAO, + QNameDAO qnameDAO, + CannedQueryDAO cannedQueryDAO, + NodePropertyHelper nodePropertyHelper, + TenantService tenantService, + NodeService nodeService, + MethodSecurityBean methodSecurity, + CannedQueryParameters params) + { + super(params, methodSecurity); + + this.nodeDAO = nodeDAO; + this.qnameDAO = qnameDAO; + this.cannedQueryDAO = cannedQueryDAO; + this.nodePropertyHelper = nodePropertyHelper; + this.tenantService = tenantService; + this.nodeService = nodeService; + + if ((params.getSortDetails() != null) && (params.getSortDetails().getSortPairs().size() > 0)) + { + applyPostQueryPermissions = true; + } + + // TODO refactor (only apply post query if sorted - as above) + GetChildrenCannedQueryParams paramBean = (GetChildrenCannedQueryParams) params.getParameterBean(); + if ((paramBean.getFilterProps() != null) && (paramBean.getFilterProps().size() > 0)) + { + applyPostQueryPermissions = true; + } + } + + protected FilterSortChildQueryCallback getFilterSortChildQuery(final List children, final List filterProps, GetChildrenCannedQueryParams paramBean) + { + Set inclusiveAspects = paramBean.getInclusiveAspects(); + Set exclusiveAspects = paramBean.getExclusiveAspects(); + + return new DefaultFilterSortChildQueryCallback(children, filterProps, inclusiveAspects, exclusiveAspects); + } + + protected UnsortedChildQueryCallback getUnsortedChildQueryCallback(final List rawResult, final int requestedCount, GetChildrenCannedQueryParams paramBean) + { + Set inclusiveAspects = paramBean.getInclusiveAspects(); + Set exclusiveAspects = paramBean.getExclusiveAspects(); + return new DefaultUnsortedChildQueryCallback(rawResult, requestedCount, inclusiveAspects, exclusiveAspects); + } + + @Override + protected List queryAndFilter(CannedQueryParameters parameters) + { + Long start = (logger.isDebugEnabled() ? System.currentTimeMillis() : null); + + // Get parameters + GetChildrenCannedQueryParams paramBean = (GetChildrenCannedQueryParams) parameters.getParameterBean(); + + // Get parent node + NodeRef parentRef = paramBean.getParentRef(); + ParameterCheck.mandatory("nodeRef", parentRef); + Pair nodePair = nodeDAO.getNodePair(parentRef); + if (nodePair == null) + { + throw new InvalidNodeRefException("Parent node does not exist: " + parentRef, parentRef); + } + Long parentNodeId = nodePair.getFirst(); + + // Set query params - note: currently using SortableChildEntity to hold (supplemental-) query params + FilterSortNodeEntity params = new FilterSortNodeEntity(); + + // Set parent node id + params.setParentNodeId(parentNodeId); + + // Get filter details + Set childNodeTypeQNames = paramBean.getChildTypeQNames(); + Set assocTypeQNames = paramBean.getAssocTypeQNames(); + + final List filterProps = new ArrayList<>(paramBean.getFilterProps().size()); + filterProps.addAll(paramBean.getFilterProps()); // clone (to allow special handling for isPrimary) + + String pattern = paramBean.getPattern(); + + // Get sort details + CannedQuerySortDetails sortDetails = parameters.getSortDetails(); + @SuppressWarnings({"unchecked", "rawtypes"}) + final List> sortPairs = (List) sortDetails.getSortPairs(); + + if (filterProps.size() > 0) + { + // special handling of isPrimary filter (not counted as a filter/sort "property") + Boolean isPrimary = null; + int idx = 0; + for (FilterProp filter : filterProps) + { + if ((filter instanceof FilterPropBoolean) && + ((FilterPropBoolean) filter).getPropName().equals(FILTER_QNAME_NODE_IS_PRIMARY)) + { + isPrimary = ((FilterPropBoolean) filter).getPropVal(); + break; + } + idx++; + } + if (isPrimary != null) + { + params.setIsPrimary(isPrimary); + filterProps.remove(idx); + } + } + + // Set sort / filter params + // Note - need to keep the sort properties in their requested order + List sortFilterProps = new ArrayList(filterProps.size() + sortPairs.size()); + for (Pair sort : sortPairs) + { + QName sortQName = sort.getFirst(); + if (!sortFilterProps.contains(sortQName)) + { + sortFilterProps.add(sortQName); + } + } + for (FilterProp filter : filterProps) + { + QName filterQName = filter.getPropName(); + if (!sortFilterProps.contains(filterQName)) + { + sortFilterProps.add(filterQName); + } + } + + int filterSortPropCnt = sortFilterProps.size(); + + if (filterSortPropCnt > MAX_FILTER_SORT_PROPS) + { + throw new AlfrescoRuntimeException("GetChildren: exceeded maximum number filter/sort properties: (max=" + MAX_FILTER_SORT_PROPS + ", actual=" + filterSortPropCnt); + } + + filterSortPropCnt = setFilterSortParams(sortFilterProps, params); + + List result = new ArrayList<>(0); + + try + { + if ((childNodeTypeQNames != null) && (childNodeTypeQNames.size() > 0)) + { + // Set child node type qnames (additional filter - performed by DB query) + Set childNodeTypeQNameIds = qnameDAO.convertQNamesToIds(childNodeTypeQNames, false); + if (childNodeTypeQNameIds.size() > 0) + { + params.setChildNodeTypeQNameIds(new ArrayList(childNodeTypeQNameIds)); + } + else + { + // short-circuit - return no results - given node type qname(s) do not exist + return result; + } + } + + if ((assocTypeQNames != null) && (assocTypeQNames.size() > 0)) + { + // Set assoc type qnames (additional filter - performed by DB query) + Set assocTypeQNameIds = qnameDAO.convertQNamesToIds(assocTypeQNames, false); + if (assocTypeQNameIds.size() > 0) + { + params.setAssocTypeQNameIds(assocTypeQNameIds); + } + else + { + // short-circuit - return no results - given assoc type qname(s) do not exist + return result; + } + } + + if (pattern != null) + { + // TODO, check that we should be tied to the content model in this way. Perhaps a configurable property + // name against which compare the pattern? + Pair nameQName = qnameDAO.getQName(ContentModel.PROP_NAME); + if (nameQName == null) + { + throw new AlfrescoRuntimeException("Unable to determine qname id of name property"); + } + params.setNamePropertyQNameId(nameQName.getFirst()); + params.setPattern(pattern); + } + + if (filterSortPropCnt > 0) + { + // filtered and/or sorted - note: permissions will be applied post query + final List children = new ArrayList(100); + final FilterSortChildQueryCallback c = getFilterSortChildQuery(children, filterProps, paramBean); + FilterSortResultHandler resultHandler = new FilterSortResultHandler(c); + cannedQueryDAO.executeQuery(QUERY_NAMESPACE, QUERY_SELECT_GET_CHILDREN_WITH_PROPS, params, 0, Integer.MAX_VALUE, resultHandler); + resultHandler.done(); + + if (sortPairs.size() > 0) + { + Long startSort = (logger.isDebugEnabled() ? System.currentTimeMillis() : null); + + // sort + Collections.sort(children, new PropComparatorAsc(sortPairs)); + + if (startSort != null) + { + logger.debug("Post-query sort: " + children.size() + " in " + (System.currentTimeMillis() - startSort) + " msecs"); + } + } + + result = new ArrayList(children.size()); + for (FilterSortNode child : children) + { + result.add(tenantService.getBaseName(child.getNodeRef())); + } + } + else + { + // unsorted (apart from any implicit order) - note: permissions are applied during result handling to allow early cutoff + + final int requestedCount = parameters.getResultsRequired(); + + final List rawResult = new ArrayList(Math.min(1000, requestedCount)); + UnsortedChildQueryCallback callback = getUnsortedChildQueryCallback(rawResult, requestedCount, paramBean); + UnsortedResultHandler resultHandler = new UnsortedResultHandler(callback); + cannedQueryDAO.executeQuery(QUERY_NAMESPACE, QUERY_SELECT_GET_CHILDREN_WITHOUT_PROPS, params, 0, Integer.MAX_VALUE, resultHandler); + resultHandler.done(); + + // permissions have been applied + result = PermissionCheckedValueMixin.create(rawResult); + } + } + finally + { + if (start != null) + { + logger.debug("Base query " + (filterSortPropCnt > 0 ? "(sort=y, perms=n)" : "(sort=n, perms=y)") + ": " + result.size() + " in " + (System.currentTimeMillis() - start) + " msecs"); + } + } + + return result; + } + + // Set filter/sort props (between 0 and 3) + private int setFilterSortParams(List filterSortProps, FilterSortNodeEntity params) + { + int cnt = 0; + int propCnt = 0; + + for (QName filterSortProp : filterSortProps) + { + if (AuditablePropertiesEntity.getAuditablePropertyQNames().contains(filterSortProp)) + { + params.setAuditableProps(true); + } + else if (filterSortProp.equals(SORT_QNAME_NODE_TYPE) || filterSortProp.equals(SORT_QNAME_NODE_IS_FOLDER)) + { + params.setNodeType(true); + } + else + { + Long sortQNameId = getQNameId(filterSortProp); + if (sortQNameId != null) + { + if (propCnt == 0) + { + params.setProp1qnameId(sortQNameId); + } + else if (propCnt == 1) + { + params.setProp2qnameId(sortQNameId); + } + else if (propCnt == 2) + { + params.setProp3qnameId(sortQNameId); + } + else + { + // belts and braces + throw new AlfrescoRuntimeException("GetChildren: unexpected - cannot set sort parameter: " + cnt); + } + + propCnt++; + } + else + { + logger.warn("Skipping filter/sort param - cannot find: " + filterSortProp); + break; + } + } + + cnt++; + } + + return cnt; + } + + private Long getQNameId(QName sortPropQName) + { + if (sortPropQName.equals(SORT_QNAME_CONTENT_SIZE) || sortPropQName.equals(SORT_QNAME_CONTENT_MIMETYPE)) + { + sortPropQName = ContentModel.PROP_CONTENT; + } + + Pair qnamePair = qnameDAO.getQName(sortPropQName); + return (qnamePair == null ? null : qnamePair.getFirst()); + } + + @Override + protected boolean isApplyPostQuerySorting() + { + // note: sorted as part of the query impl (using SortableNode results) + return false; + } + + private class PropComparatorAsc implements Comparator + { + private List> sortProps; + private Collator collator; + + public PropComparatorAsc(List> sortProps) + { + this.sortProps = sortProps; + // try to overrider collator comparison rules + this.collator = AlfrescoCollator.getInstance(I18NUtil.getContentLocale()); + } + + public int compare(FilterSortNode n1, FilterSortNode n2) + { + return compareImpl(n1, n2, sortProps); + } + + private int compareImpl(FilterSortNode node1In, FilterSortNode node2In, List> sortProps) + { + Object pv1 = null; + Object pv2 = null; + + QName sortPropQName = (QName) sortProps.get(0).getFirst(); + boolean sortAscending = (sortProps.get(0).getSecond() == SortOrder.ASCENDING); + + FilterSortNode node1 = node1In; + FilterSortNode node2 = node2In; + + if (sortAscending == false) + { + node1 = node2In; + node2 = node1In; + } + + int result = 0; + + pv1 = node1.getVal(sortPropQName); + pv2 = node2.getVal(sortPropQName); + + if (pv1 == null) + { + if (pv2 == null && sortProps.size() > 1) + { + return compareImpl(node1In, node2In, sortProps.subList(1, sortProps.size())); + } + else + { + return (pv2 == null ? 0 : -1); + } + } + else if (pv2 == null) + { + return 1; + } + + if (pv1 instanceof String) + { + result = collator.compare((String) pv1, (String) pv2); // TODO use collation keys (re: performance) + } + else if (pv1 instanceof Date) + { + result = (((Date) pv1).compareTo((Date) pv2)); + } + else if (pv1 instanceof Long) + { + result = (((Long) pv1).compareTo((Long) pv2)); + } + else if (pv1 instanceof Integer) + { + result = (((Integer) pv1).compareTo((Integer) pv2)); + } + else if (pv1 instanceof QName) + { + result = (((QName) pv1).compareTo((QName) pv2)); + } + else if (pv1 instanceof Boolean) + { + result = (((Boolean) pv1).compareTo((Boolean) pv2)); + } + else + { + // TODO other comparisons + throw new RuntimeException("Unsupported sort type: " + pv1.getClass().getName()); + } + + if ((result == 0) && (sortProps.size() > 1)) + { + return compareImpl(node1In, node2In, sortProps.subList(1, sortProps.size())); + } + + return result; + } + } + + private boolean includeAspects(NodeRef nodeRef, Set inclusiveAspects, Set exclusiveAspects) + { + if (inclusiveAspects == null && exclusiveAspects == null) + { + return true; + } + + Set nodeAspects = nodeService.getAspects(nodeRef); + if (inclusiveAspects != null) + { + Set includedIntersect = new HashSet(nodeAspects); + includedIntersect.retainAll(inclusiveAspects); + if (includedIntersect.isEmpty()) + { + return false; + } + } + if (exclusiveAspects != null) + { + Set excludedIntersect = new HashSet(nodeAspects); + excludedIntersect.retainAll(exclusiveAspects); + if (excludedIntersect.isEmpty() == false) + { + return false; + } + } + return true; + + } + + // note: currently inclusive and OR-based + private boolean includeFilter(Map propVals, List filterProps) + { + for (FilterProp filterProp : filterProps) + { + Serializable propVal = propVals.get(filterProp.getPropName()); + if (propVal != null) + { + if ((filterProp instanceof FilterPropString) && (propVal instanceof String)) + { + String val = (String) propVal; + String filter = (String) filterProp.getPropVal(); + + switch ((FilterTypeString) filterProp.getFilterType()) + { + case STARTSWITH: + if (val.startsWith(filter)) + { + return true; + } + break; + case STARTSWITH_IGNORECASE: + if (val.toLowerCase().startsWith(filter.toLowerCase())) + { + return true; + } + break; + case EQUALS: + if (val.equals(filter)) + { + return true; + } + break; + case EQUALS_IGNORECASE: + if (val.equalsIgnoreCase(filter)) + { + return true; + } + break; + case ENDSWITH: + if (val.endsWith(filter)) + { + return true; + } + break; + case ENDSWITH_IGNORECASE: + if (val.toLowerCase().endsWith(filter.toLowerCase())) + { + return true; + } + break; + case MATCHES: + if (val.matches(filter)) + { + return true; + } + break; + case MATCHES_IGNORECASE: + if (val.toLowerCase().matches(filter.toLowerCase())) + { + return true; + } + break; + default: + } + } + } + + if ((filterProp instanceof FilterPropBoolean) && (propVal instanceof Boolean)) + { + Boolean val = (Boolean) propVal; + Boolean filter = (Boolean) filterProp.getPropVal(); + + return (val == filter); + } + } + + return false; + } + + @Override + protected boolean isApplyPostQueryPermissions() + { + return applyPostQueryPermissions; // true if sorted (if unsorted then permissions are applied as part of the query impl) + } + + @Override + protected List applyPostQueryPermissions(List results, int requestedCount) + { + Long start = (logger.isDebugEnabled() ? System.currentTimeMillis() : null); + + int requestTotalCountMax = getParameters().getTotalResultCountMax(); + int maxChecks = (((requestTotalCountMax > 0) && (requestTotalCountMax > requestedCount)) ? requestTotalCountMax : requestedCount); + int cnt = results.size(); + + int toIdx = (maxChecks > cnt ? cnt : maxChecks); + + // note: assume user has read access to most/majority of the items hence pre-load up to max checks + preload(results.subList(0, toIdx)); + + List ret = super.applyPostQueryPermissions(results, requestedCount); + + if (start != null) + { + logger.debug("Post-query perms: " + ret.size() + " in " + (System.currentTimeMillis() - start) + " msecs"); + } + + return ret; + } + + private void preload(List nodeRefs) + { + Long start = (logger.isTraceEnabled() ? System.currentTimeMillis() : null); + + nodeDAO.cacheNodes(nodeRefs); + + if (start != null) + { + logger.trace("Pre-load: " + nodeRefs.size() + " in " + (System.currentTimeMillis() - start) + " msecs"); + } + } + + protected interface FilterSortChildQueryCallback + { + boolean handle(FilterSortNode node); + } + + protected class DefaultFilterSortChildQueryCallback implements FilterSortChildQueryCallback + { + private List children; + private List filterProps; + private boolean applyFilter; + private Set inclusiveAspects; + private Set exclusiveAspects; + + public DefaultFilterSortChildQueryCallback(final List children, final List filterProps) + { + this(children, filterProps, null, null); + } + + public DefaultFilterSortChildQueryCallback(final List children, final List filterProps, Set inclusiveAspects, Set exclusiveAspects) + { + this.children = children; + this.filterProps = filterProps; + this.applyFilter = (filterProps.size() > 0); + this.inclusiveAspects = inclusiveAspects; + this.exclusiveAspects = exclusiveAspects; + } + + @Override + public boolean handle(FilterSortNode node) + { + if (include(node)) + { + children.add(node); + } + + // More results + return true; + } + + protected boolean include(FilterSortNode node) + { + // filter, if needed + return (!applyFilter || includeFilter(node.getPropVals(), filterProps)) && includeAspects(node.getNodeRef(), inclusiveAspects, exclusiveAspects); + } + } + + protected class DefaultUnsortedChildQueryCallback implements UnsortedChildQueryCallback + { + private List rawResult; + private int requestedCount; + private Set inclusiveAspects; + private Set exclusiveAspects; + + public DefaultUnsortedChildQueryCallback(final List rawResult, final int requestedCount, Set inclusiveAspects, Set exclusiveAspects) + { + this.rawResult = rawResult; + this.requestedCount = requestedCount; + this.inclusiveAspects = inclusiveAspects; + this.exclusiveAspects = exclusiveAspects; + } + + @Override + public boolean handle(NodeRef nodeRef) + { + if (include(nodeRef)) + { + rawResult.add(tenantService.getBaseName(nodeRef)); + } + + // More results ? + return (rawResult.size() < requestedCount); + } + + protected boolean include(NodeRef nodeRef) + { + return includeAspects(nodeRef, inclusiveAspects, exclusiveAspects); + } + } + + protected interface UnsortedChildQueryCallback + { + boolean handle(NodeRef nodeRef); + } + + protected class FilterSortResultHandler implements CannedQueryDAO.ResultHandler + { + private final FilterSortChildQueryCallback resultsCallback; + private boolean more = true; + + private static final int BATCH_SIZE = 256 * 4; + private final List results; + + private FilterSortResultHandler(FilterSortChildQueryCallback resultsCallback) + { + this.resultsCallback = resultsCallback; + + results = new LinkedList(); + } + + public boolean handleResult(FilterSortNodeEntity result) + { + // Do nothing if no further results are required + if (!more) + { + return false; + } + + if (results.size() >= BATCH_SIZE) + { + // batch + preloadNodes(); + filterSort(); + } + + results.add(result); + + return more; + } + + public void done() + { + if (results.size() >= 0) + { + // finish batch + preloadNodes(); + filterSort(); + } + } + + private void preloadNodes() + { + List nodeRefs = new ArrayList<>(results.size()); + for (FilterSortNodeEntity result : results) + { + nodeRefs.add(result.createNodeRef()); + } + + preload(nodeRefs); + } + + private void filterSort() + { + for (FilterSortNodeEntity result : results) + { + NodeRef nodeRef = result.createNodeRef(); + + Map propertyValues = new HashMap(3); + + NodePropertyEntity prop1 = result.getProp1(); + if (prop1 != null) + { + propertyValues.put(prop1.getKey(), prop1.getValue()); + } + + NodePropertyEntity prop2 = result.getProp2(); + if (prop2 != null) + { + propertyValues.put(prop2.getKey(), prop2.getValue()); + } + + NodePropertyEntity prop3 = result.getProp3(); + if (prop3 != null) + { + propertyValues.put(prop3.getKey(), prop3.getValue()); + } + + Map propVals = nodePropertyHelper.convertToPublicProperties(propertyValues); + + // Add referenceable / spoofed properties (including spoofed name if null) + ReferenceablePropertiesEntity.addReferenceableProperties(result.getId(), nodeRef, propVals); + + // special cases + + // MLText (eg. cm:title, cm:description, ...) + for (Map.Entry entry : propVals.entrySet()) + { + if (entry.getValue() instanceof MLText) + { + propVals.put(entry.getKey(), DefaultTypeConverter.INSTANCE.convert(String.class, (MLText) entry.getValue())); + } + } + + // ContentData (eg. cm:content.size, cm:content.mimetype) + ContentData contentData = (ContentData) propVals.get(ContentModel.PROP_CONTENT); + if (contentData != null) + { + propVals.put(SORT_QNAME_CONTENT_SIZE, contentData.getSize()); + propVals.put(SORT_QNAME_CONTENT_MIMETYPE, contentData.getMimetype()); + } + + // Auditable props (eg. cm:creator, cm:created, cm:modifier, cm:modified, ...) + AuditablePropertiesEntity auditableProps = result.getAuditablePropertiesEntity(); + if (auditableProps != null) + { + for (Map.Entry entry : auditableProps.getAuditableProperties().entrySet()) + { + propVals.put(entry.getKey(), entry.getValue()); + } + } + + // Node type + Long nodeTypeQNameId = result.getTypeQNameId(); + if (nodeTypeQNameId != null) + { + Pair pair = qnameDAO.getQName(nodeTypeQNameId); + if (pair != null) + { + propVals.put(SORT_QNAME_NODE_TYPE, pair.getSecond()); + } + } + + // Call back + boolean more = resultsCallback.handle(new FilterSortNode(nodeRef, propVals)); + if (!more) + { + this.more = false; + break; + } + } + + results.clear(); + } + } + + protected class FilterSortNode + { + private NodeRef nodeRef; + private Map propVals; // subset of nodes properties - used for filtering and/or sorting + + public FilterSortNode(NodeRef nodeRef, Map propVals) + { + this.nodeRef = nodeRef; + this.propVals = propVals; + } + + @Override + public String toString() + { + return "FilterSortNode [nodeRef=" + nodeRef + ", propVals=" + propVals + "]"; + } + + public NodeRef getNodeRef() + { + return nodeRef; + } + + public Serializable getVal(QName prop) + { + return propVals.get(prop); + } + + public Map getPropVals() + { + return propVals; + } + } + + private class UnsortedResultHandler implements CannedQueryDAO.ResultHandler + { + private final UnsortedChildQueryCallback resultsCallback; + + private boolean more = true; + + private static final int BATCH_SIZE = 256 * 4; + private final List nodeRefs; + + private UnsortedResultHandler(UnsortedChildQueryCallback resultsCallback) + { + this.resultsCallback = resultsCallback; + + nodeRefs = new LinkedList(); + } + + public boolean handleResult(NodeEntity result) + { + // Do nothing if no further results are required + if (!more) + { + return false; + } + + NodeRef nodeRef = result.getNodeRef(); + + if (nodeRefs.size() >= BATCH_SIZE) + { + // batch + preloadAndApplyPermissions(); + } + + nodeRefs.add(nodeRef); + + return more; + } + + private void preloadAndApplyPermissions() + { + preload(nodeRefs); + + // TODO track total time for incremental permission checks ... and cutoff (eg. based on some config) + List results = applyPostQueryPermissions(nodeRefs, nodeRefs.size()); + + for (NodeRef nodeRef : results) + { + // Call back + boolean more = resultsCallback.handle(nodeRef); + if (!more) + { + this.more = false; + break; + } + } + + nodeRefs.clear(); + } + + public void done() + { + if (nodeRefs.size() >= 0) + { + // finish batch + preloadAndApplyPermissions(); + } + } + } +} diff --git a/repository/src/main/resources/alfresco/ibatis/org.alfresco.repo.domain.dialect.Dialect/node-common-SqlMap.xml b/repository/src/main/resources/alfresco/ibatis/org.alfresco.repo.domain.dialect.Dialect/node-common-SqlMap.xml index 2b5f825492..cb9eb26508 100644 --- a/repository/src/main/resources/alfresco/ibatis/org.alfresco.repo.domain.dialect.Dialect/node-common-SqlMap.xml +++ b/repository/src/main/resources/alfresco/ibatis/org.alfresco.repo.domain.dialect.Dialect/node-common-SqlMap.xml @@ -133,7 +133,15 @@ - + + + + + + + + + @@ -169,8 +177,9 @@ - - + + + @@ -972,8 +981,8 @@ - + select distinct childNode.id as id, childNode.version as version, childStore.id as store_id, @@ -989,7 +998,7 @@ childNode.audit_created as audit_created, childNode.audit_modifier as audit_modifier, childNode.audit_modified as audit_modified, - childNode.audit_accessed as audit_accessed + childNode.audit_accessed as audit_accessed , prop1.node_id as prop1_node_id, prop1.qname_id as prop1_qname_id, @@ -1067,9 +1076,6 @@ #{item} - - -