Compare commits

...

13 Commits

Author SHA1 Message Date
Jared Ottley
4953c8292b [MNT-25404] Query Performance - High performance cost in retrieving nodes/node properties for large result sets
- Add input validation to nodes.bulkLoad.batchSize.
-- value can not be below 1.  If it is, it will automatically be set to 1
-- If the value is above 1000 an info mesage will appear warning that this will not work for the Oracle Database.
2025-11-06 22:25:52 -07:00
Jared Ottley
869215c16d [MNT-25404] Query Performance - High performance cost in retrieving nodes/node properties for large result sets
- More PMD updates
2025-11-06 20:18:18 -07:00
Jared Ottley
69121aa9d0 [MNT-25404] Query Performance - High performance cost in retrieving nodes/node properties for large result sets
- PMD Clean up
2025-11-06 19:52:07 -07:00
Jared Ottley
60e1df1b65 [MNT-25404] Query Performance - High performance cost in retrieving nodes/node properties for large result sets
- PMD clean up
2025-11-06 18:59:03 -07:00
Jared Ottley
c251601139 [MNT-25404] Query Performance - High performance cost in retrieving nodes/node properties for large result sets
- Add Eva's node cache fix for CMIS queries
2025-11-06 12:10:09 -07:00
Eva Vasques
69c2f696f1 Preload parent assocs
(cherry picked from commit 27d713a5341121b87c5ea7e72c6a3a52a38c5a96)
2025-11-06 14:37:31 +00:00
Jared Ottley
480c489806 [MNT-25404] Query Performance - High performance cost in retrieving nodes/node properties for large result sets
- Remove commented out code that is not needed
2025-11-06 07:26:18 -07:00
Jared Ottley
8e97534e76 [MNT-25404] Query Performance - High performance cost in retrieving nodes/node properties for large result sets
- Modify result processing of V1 search API results to use bulk queries over single queries per node found in search resullts
2025-11-06 03:35:27 -07:00
Jared Ottley
683f03203f [MNT-25404] [LFG] Query Performance - High performance cost in retrieving nodes/node properties for large result sets
- Removed preload configuration property
- Add call for single query of Content Data Objects for contentData caching
2025-11-05 12:47:29 -07:00
Jared Ottley
4afc08232e [MNT-25404] [LFG] Query Performance - High performance cost in retrieving nodes/node properties for large result sets
-Pre-commit changes
2025-11-05 09:16:22 -07:00
Jared Ottley
5e7a668612 [MNT-25404] [LFG] Query Performance - High performance cost in retrieving nodes/node properties for large result sets
-Backout ContentData && ContentEntity changes
2025-11-05 09:11:34 -07:00
Jared Ottley
d9c56bbc79 [MNT-25404] [LFG] Query Performance - High performance cost in retrieving nodes/node properties for large result sets
- Bulkified queries on the preload for a query.
- Added additional configuraiton properties around the code changes
- New Properties are (with the defaults):
nodes.bulkLoad.batchSize=256
nodes.bulkLoad.forceBatching=false
nodes.bulkLoad.preloadContentData=true
2025-11-04 21:49:33 -07:00
alfresco-build
a16473100d [maven-release-plugin][skip ci] prepare for next development iteration 2025-11-03 12:46:19 +00:00
53 changed files with 2458 additions and 101 deletions

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo-amps</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<modules>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-governance-services-community-parent</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<modules>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-governance-services-automation-community-repo</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<build>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-governance-services-community-parent</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<modules>

View File

@@ -8,7 +8,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-governance-services-community-repo-parent</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<properties>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-governance-services-community-repo-parent</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<build>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<modules>

View File

@@ -8,7 +8,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo-amps</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<properties>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<dependencies>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<properties>

View File

@@ -111,6 +111,14 @@ public interface NodeService
@Auditable(parameters = {"nodeRef"})
public boolean exists(NodeRef nodeRef);
/**
* @param nodeRefs
* a reference list for the nodes to look for
* @return A list of the nodeRefs that exist
*/
@Auditable(parameters = {"nodeRefs"})
List<NodeRef> exists(List<NodeRef> nodeRefs);
/**
* Gets the ID of the last transaction that caused the node to change. This includes deletions, so it is possible that the node being referenced no longer exists. If the node never existed, then null is returned.
*
@@ -131,6 +139,16 @@ public interface NodeService
@Auditable(parameters = {"nodeId"})
public NodeRef getNodeRef(Long nodeId);
/**
* Get node references for a list of given node DB IDs
*
* @param nodeIds
* a list of node DB IDs
* @return the list of corresponding node references or an empty list if none are found
*/
@Auditable(parameters = {"nodeIds"})
List<NodeRef> getNodeRefs(List<Long> nodeIds);
/**
* @param storeRef
* a reference to an existing store
@@ -334,6 +352,16 @@ public interface NodeService
@Auditable(parameters = {"nodeRef"})
public Set<QName> getAspects(NodeRef nodeRef) throws InvalidNodeRefException;
/**
* @param nodeRefs
* List of NodeRefs
* @return Returns a map of NodeRefs to their corresponding set of aspects
* @throws InvalidNodeRefException
* if any of the node references could not be found
*/
@Auditable(parameters = {"nodeRefs"})
Map<NodeRef, Set<QName>> getAspects(List<NodeRef> nodeRefs) throws InvalidNodeRefException;
/**
* Deletes the given node.
* <p>
@@ -483,6 +511,16 @@ public interface NodeService
@Auditable(parameters = {"nodeRef", "qname"})
public Serializable getProperty(NodeRef nodeRef, QName qname) throws InvalidNodeRefException;
/**
* @param nodeRefs
* List of NodeRefs to get Properties for
* @return Returns all Nodes and their properties. NodeRef is the map key and properties are keyed by their qualified name
* @throws InvalidNodeRefException
* if the node could not be found
*/
@Auditable(parameters = {"nodeRefs"})
Map<NodeRef, Map<QName, Serializable>> getPropertiesForNodeRefs(List<NodeRef> nodeRefs) throws InvalidNodeRefException;
/**
* Replace all current properties on the node with the given properties. The properties given must still fulfill the requirements of the class and aspects relevant to the node.
* <p>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<dependencies>

View File

@@ -9,6 +9,6 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo-packaging</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
</project>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo-packaging</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<properties>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<modules>

View File

@@ -6,7 +6,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo-packaging</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<modules>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo-tests</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<organization>

View File

@@ -9,7 +9,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo-tests</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<developers>

View File

@@ -9,7 +9,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo-tests</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<developers>

View File

@@ -8,7 +8,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo-tests</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<properties>

View File

@@ -9,7 +9,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo-tests</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<developers>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo-packaging</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<properties>

View File

@@ -2,7 +2,7 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>alfresco-community-repo</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
<packaging>pom</packaging>
<name>Alfresco Community Repo Parent</name>
@@ -154,7 +154,7 @@
<connection>scm:git:https://github.com/Alfresco/alfresco-community-repo.git</connection>
<developerConnection>scm:git:https://github.com/Alfresco/alfresco-community-repo.git</developerConnection>
<url>https://github.com/Alfresco/alfresco-community-repo</url>
<tag>25.3.0.67</tag>
<tag>HEAD</tag>
</scm>
<distributionManagement>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<dependencies>

View File

@@ -25,6 +25,7 @@
*/
package org.alfresco.rest.api;
import java.util.List;
import java.util.Map;
import org.alfresco.rest.api.model.Node;
@@ -63,6 +64,19 @@ public interface DeletedNodes
*/
Node getDeletedNode(String originalId, Parameters parameters, boolean fullnode, Map<String, UserInfo> mapUserInfo);
/**
* Gets a list of deleted nodes by id.
*
* @param list
* of originalIds
* @param parameters
* @param fullnode
* Should we return the full representation of the minimal one?
* @param mapUserInfo
* @return a deleted node
*/
List<Node> getDeletedNodes(List<String> originalIds, Parameters parameters, boolean fullnode, Map<String, UserInfo> mapUserInfo);
/**
* Restores a deleted node and returns it.
*

View File

@@ -103,6 +103,8 @@ public interface Nodes
Node getFolderOrDocumentFullInfo(NodeRef nodeRef, NodeRef parentNodeRef, QName nodeTypeQName, Parameters parameters, Map<String, UserInfo> mapUserInfo);
List<Node> getFoldersOrDocumentsFullInfo(List<NodeRef> nodeRefs, Parameters parameters, Map<String, UserInfo> mapUserInfo);
/**
* Get the folder or document representation (as appropriate) for the given node.
*
@@ -116,6 +118,17 @@ public interface Nodes
*/
Node getFolderOrDocument(NodeRef nodeRef, NodeRef parentNodeRef, QName nodeTypeQName, List<String> includeParam, Map<String, UserInfo> mapUserInfo);
/**
* Get the folder or document representation (as appropriate) for the given list of nodes.
*
* @param nodeRefs
* A list of real Nodes
* @param includeParam
* @param mapUserInfo
* @return
*/
List<Node> getFoldersOrDocuments(List<NodeRef> nodeRefs, List<String> includeParam, Map<String, UserInfo> mapUserInfo);
/**
* Get list of children of a parent folder.
*
@@ -219,6 +232,8 @@ public interface Nodes
NodeRef validateNode(StoreRef storeRef, String nodeId);
List<NodeRef> validateNodes(StoreRef storeRef, List<String> nodeIds);
NodeRef validateNode(String nodeId);
NodeRef validateNode(NodeRef nodeRef);

View File

@@ -120,6 +120,37 @@ public class DeletedNodesImpl implements DeletedNodes, RecognizedParamsExtractor
aNode.setParentId(null);
}
/**
* Sets archived information on the Node
*
* @param nodes
* @param mapUserInfo
*/
private void mapArchiveInfo(List<Node> nodes, Map<String, UserInfo> mapUserInfo)
{
if (mapUserInfo == null)
{
mapUserInfo = new HashMap<>();
}
List<NodeRef> nodeRefs = new ArrayList<>(nodes.size());
for (Node node : nodes)
{
nodeRefs.add(node.getNodeRef());
}
Map<NodeRef, Map<QName, Serializable>> nodeProps = nodeService.getPropertiesForNodeRefs(nodeRefs);
for (Node node : nodes)
{
node.setArchivedAt((Date) nodeProps.get(node.getNodeRef()).get(ContentModel.PROP_ARCHIVED_DATE));
node.setArchivedByUser(Node.lookupUserInfo((String) nodeProps.get(node.getNodeRef()).get(ContentModel.PROP_ARCHIVED_BY), mapUserInfo, personService));
// Don't show parent id
node.setParentId(null);
}
}
@Override
public CollectionWithPagingInfo<Node> listDeleted(Parameters parameters)
{
@@ -172,6 +203,37 @@ public class DeletedNodesImpl implements DeletedNodes, RecognizedParamsExtractor
return foundNode;
}
@Override
public List<Node> getDeletedNodes(List<String> originalIds, Parameters parameters, boolean fullnode, Map<String, UserInfo> mapUserInfo)
{
// First check the nodes are valid and have been archived.
List<NodeRef> validatedNodeRefs = nodes.validateNodes(StoreRef.STORE_REF_ARCHIVE_SPACESSTORE, originalIds);
// Now get the Nodes
List<NodeRef> archivedNodeRefs = new ArrayList<>();
for (NodeRef validatedNodeRef : validatedNodeRefs)
{
NodeRef tempNodeRef = new NodeRef(StoreRef.STORE_REF_WORKSPACE_SPACESSTORE, validatedNodeRef.getId());
archivedNodeRefs.add(nodeArchiveService.getArchivedNode(tempNodeRef));
}
List<Node> foundNodes;
if (fullnode)
{
foundNodes = nodes.getFoldersOrDocumentsFullInfo(archivedNodeRefs, parameters, mapUserInfo);
}
else
{
foundNodes = nodes.getFoldersOrDocuments(archivedNodeRefs, parameters.getInclude(), mapUserInfo);
}
if (!foundNodes.isEmpty())
{
mapArchiveInfo(foundNodes, null);
}
return foundNodes;
}
@Override
public Node restoreArchivedNode(String archivedId, NodeTargetAssoc nodeTargetAssoc)
{

View File

@@ -421,6 +421,34 @@ public class NodesImpl implements Nodes
return validateNode(nodeRef);
}
@Override
public List<NodeRef> validateNodes(StoreRef storeRef, List<String> nodeIds)
{
List<NodeRef> nodeRefs = new ArrayList<>(nodeIds.size());
for (String nodeId : nodeIds)
{
String versionLabel;
String id = nodeId;
int idx = nodeId.indexOf(';');
if (idx != -1)
{
versionLabel = nodeId.substring(idx + 1);
id = nodeId.substring(0, idx);
if (versionLabel.equals("pwc"))
{
continue; // skip pwc
}
}
NodeRef nodeRef = new NodeRef(storeRef, id);
nodeRefs.add(nodeRef);
}
return validateNodes(nodeRefs);
}
@Override
public NodeRef validateNode(NodeRef nodeRef)
{
@@ -432,6 +460,17 @@ public class NodesImpl implements Nodes
return nodeRef;
}
public List<NodeRef> validateNodes(List<NodeRef> nodeRefs)
{
List<NodeRef> nodes = nodeService.exists(nodeRefs);
if (nodes.isEmpty())
{
throw new EntityNotFoundException("None of the specified nodes were found.");
}
return nodes;
}
/* Check that nodes exists and matches given expected/excluded type(s). */
@Override
public boolean nodeMatches(NodeRef nodeRef, Set<QName> expectedTypes, Set<QName> excludedTypes)
@@ -836,6 +875,22 @@ public class NodesImpl implements Nodes
return getFolderOrDocument(nodeRef, parentNodeRef, nodeTypeQName, includeParam, mapUserInfo);
}
@Override
public List<Node> getFoldersOrDocumentsFullInfo(List<NodeRef> nodeRefs, Parameters parameters, Map<String, UserInfo> mapUserInfo)
{
List<String> includeParam = new ArrayList<>();
if (parameters != null)
{
includeParam.addAll(parameters.getInclude());
}
// Add basic info for single get (above & beyond minimal that is used for listing collections)
includeParam.add(PARAM_INCLUDE_ASPECTNAMES);
includeParam.add(PARAM_INCLUDE_PROPERTIES);
return getFoldersOrDocuments(nodeRefs, includeParam, mapUserInfo);
}
@Override
public Node getFolderOrDocument(final NodeRef nodeRef, NodeRef parentNodeRef, QName nodeTypeQName, List<String> includeParam, Map<String, UserInfo> mapUserInfo)
{
@@ -1044,6 +1099,224 @@ public class NodesImpl implements Nodes
return node;
}
@Override
public List<Node> getFoldersOrDocuments(final List<NodeRef> nodeRefs, List<String> includeParam, Map<String, UserInfo> mapUserInfo)
{
List<Node> results = new ArrayList<>(nodeRefs.size());
if (mapUserInfo == null)
{
mapUserInfo = new HashMap<>(2);
}
if (includeParam == null)
{
includeParam = Collections.emptyList();
}
Map<NodeRef, Map<QName, Serializable>> properties = nodeService.getPropertiesForNodeRefs(nodeRefs);
for (NodeRef nodeRef : properties.keySet())
{
Node node;
Map<QName, Serializable> props = properties.get(nodeRef);
PathInfo pathInfo = null;
if (includeParam.contains(PARAM_INCLUDE_PATH))
{
ChildAssociationRef archivedParentAssoc = (ChildAssociationRef) props.get(ContentModel.PROP_ARCHIVED_ORIGINAL_PARENT_ASSOC);
pathInfo = lookupPathInfo(nodeRef, archivedParentAssoc);
}
QName nodeTypeQName = getNodeType(nodeRef);
NodeRef parentNodeRef = getParentNodeRef(nodeRef);
Type type = getType(nodeTypeQName, nodeRef);
if (type == null)
{
// not direct folder (or file) ...
// might be sub-type of cm:cmobject (or a cm:link pointing to cm:cmobject or possibly even another cm:link)
node = new Node(nodeRef, parentNodeRef, props, mapUserInfo, sr);
node.setIsFolder(false);
node.setIsFile(false);
}
else if (type.equals(Type.DOCUMENT))
{
node = new Document(nodeRef, parentNodeRef, props, mapUserInfo, sr);
}
else if (type.equals(Type.FOLDER))
{
node = new Folder(nodeRef, parentNodeRef, props, mapUserInfo, sr);
}
else
{
logger.info("Unexpected type for node: " + nodeRef + " type: " + type);
continue;
}
if (!includeParam.isEmpty())
{
node.setProperties(mapFromNodeProperties(props, includeParam, mapUserInfo, EXCLUDED_NS, EXCLUDED_PROPS));
}
// TODO: Optimize to batch get aspects for all nodes
Set<QName> aspects = null;
if (includeParam.contains(PARAM_INCLUDE_ASPECTNAMES))
{
aspects = nodeService.getAspects(nodeRef);
node.setAspectNames(mapFromNodeAspects(aspects, EXCLUDED_NS, EXCLUDED_ASPECTS));
}
if (includeParam.contains(PARAM_INCLUDE_ISLINK))
{
boolean isLink = isSubClass(nodeTypeQName, ContentModel.TYPE_LINK);
node.setIsLink(isLink);
}
if (includeParam.contains(PARAM_INCLUDE_ISLOCKED))
{
boolean isLocked = isLocked(nodeRef, aspects);
node.setIsLocked(isLocked);
}
// TODO: Optimize to batch get favorites for all nodes
if (includeParam.contains(PARAM_INCLUDE_ISFAVORITE))
{
boolean isFavorite = isFavorite(nodeRef);
node.setIsFavorite(isFavorite);
}
// TODO: Optimize to batch get allowable operations for all nodes
if (includeParam.contains(PARAM_INCLUDE_ALLOWABLEOPERATIONS))
{
// note: refactor when requirements change
Map<String, String> mapPermsToOps = new HashMap<>(3);
mapPermsToOps.put(PermissionService.DELETE, OP_DELETE);
mapPermsToOps.put(PermissionService.ADD_CHILDREN, OP_CREATE);
mapPermsToOps.put(PermissionService.WRITE, OP_UPDATE);
mapPermsToOps.put(PermissionService.CHANGE_PERMISSIONS, OP_UPDATE_PERMISSIONS);
List<String> allowableOperations = new ArrayList<>(3);
for (Entry<String, String> kv : mapPermsToOps.entrySet())
{
String perm = kv.getKey();
String op = kv.getValue();
if (perm.equals(PermissionService.ADD_CHILDREN) && Type.DOCUMENT.equals(type))
{
// special case: do not return "create" (as an allowable op) for file/content types - note: 'type' can be null
continue;
}
else if (perm.equals(PermissionService.DELETE) && isSpecialNode(nodeRef, nodeTypeQName))
{
// special case: do not return "delete" (as an allowable op) for specific system nodes
continue;
}
else if (permissionService.hasPermission(nodeRef, perm) == AccessStatus.ALLOWED)
{
allowableOperations.add(op);
}
}
node.setAllowableOperations((!allowableOperations.isEmpty()) ? allowableOperations : null);
}
// TODO: Optimize to batch get permissions for all nodes
if (includeParam.contains(PARAM_INCLUDE_PERMISSIONS))
{
Boolean inherit = permissionService.getInheritParentPermissions(nodeRef);
List<NodePermissions.NodePermission> inheritedPerms = new ArrayList<>(5);
List<NodePermissions.NodePermission> setDirectlyPerms = new ArrayList<>(5);
Set<String> settablePerms = null;
boolean allowRetrievePermission = true;
try
{
for (AccessPermission accessPerm : permissionService.getAllSetPermissions(nodeRef))
{
NodePermissions.NodePermission nodePerm = new NodePermissions.NodePermission(accessPerm.getAuthority(), accessPerm.getPermission(), accessPerm.getAccessStatus().toString());
if (accessPerm.isSetDirectly())
{
setDirectlyPerms.add(nodePerm);
}
else
{
inheritedPerms.add(nodePerm);
}
}
settablePerms = permissionService.getSettablePermissions(nodeRef);
}
catch (AccessDeniedException ade)
{
// ignore - ie. denied access to retrieve permissions, eg. non-admin on root (Company Home)
allowRetrievePermission = false;
}
// If the user does not have read permissions at
// least on a special node then do not include permissions and
// returned only node info that he's allowed to see
if (allowRetrievePermission)
{
NodePermissions nodePerms = new NodePermissions(inherit, inheritedPerms, setDirectlyPerms, settablePerms);
node.setPermissions(nodePerms);
}
}
// TODO: Optimize to batch get associations for all nodes
if (includeParam.contains(PARAM_INCLUDE_ASSOCIATION))
{
// Ugh ... can we optimise this and return the actual assoc directly (via FileFolderService/GetChildrenCQ) ?
ChildAssociationRef parentAssocRef = nodeService.getPrimaryParent(nodeRef);
// note: parentAssocRef.parentRef can be null for -root- node !
if ((parentAssocRef == null) || (parentAssocRef.getParentRef() == null) || (!parentAssocRef.getParentRef().equals(parentNodeRef)))
{
List<ChildAssociationRef> parentAssocRefs = nodeService.getParentAssocs(nodeRef);
for (ChildAssociationRef pAssocRef : parentAssocRefs)
{
if (pAssocRef.getParentRef().equals(parentNodeRef))
{
// for now, assume same parent/child cannot appear more than once (due to unique name)
parentAssocRef = pAssocRef;
break;
}
}
}
if (parentAssocRef != null)
{
QName assocTypeQName = parentAssocRef.getTypeQName();
if ((assocTypeQName != null) && (!EXCLUDED_NS.contains(assocTypeQName.getNamespaceURI())))
{
AssocChild childAssoc = new AssocChild(
assocTypeQName.toPrefixString(namespaceService),
parentAssocRef.isPrimary());
node.setAssociation(childAssoc);
}
}
}
// TODO: Optimize to batch get definitions for all nodes
if (includeParam.contains(PARAM_INCLUDE_DEFINITION))
{
ClassDefinition classDefinition = classDefinitionMapper.fromDictionaryClassDefinition(getTypeDefinition(nodeRef), dictionaryService);
node.setDefinition(classDefinition);
}
node.setNodeType(nodeTypeQName.toPrefixString(namespaceService));
node.setPath(pathInfo);
results.add(node);
}
return results;
}
private TypeDefinition getTypeDefinition(NodeRef nodeRef)
{
QName type = nodeService.getType(nodeRef);

View File

@@ -162,36 +162,61 @@ public class ResultMapper
{
List<Node> noderesults = new ArrayList<>();
Map<String, UserInfo> mapUserInfo = new HashMap<>(10);
Map<NodeRef, List<Pair<String, List<String>>>> highLighting = results.getHighlighting();
final AtomicInteger unknownNodeRefsCount = new AtomicInteger();
boolean isHistory = searchRequestContext.getStores().contains(StoreMapper.HISTORY);
for (ResultSetRow row : results)
Map<NodeRef, List<Pair<String, List<String>>>> highLighting = Collections.emptyMap();
if (results != null)
{
Node aNode = getNode(row, params, mapUserInfo, isHistory);
highLighting = results.getHighlighting();
}
final AtomicInteger unknownNodeRefsCount = new AtomicInteger();
// The store was never implemented
// boolean isHistory = searchRequestContext.getStores().contains(StoreMapper.HISTORY);
if (aNode != null)
if (results != null && results.getNumberFound() > 0)
{
List<Node> nodes = getNodes(results, params, mapUserInfo);
for (ResultSetRow row : results)
{
float f = row.getScore();
List<HighlightEntry> highlightEntries = null;
List<Pair<String, List<String>>> high = highLighting.get(row.getNodeRef());
Node aNode = nodes.stream()
.filter(n -> n.getNodeRef().equals(row.getNodeRef()))
.findFirst()
.orElse(null);
if (high != null && !high.isEmpty())
if (aNode == null)
{
highlightEntries = new ArrayList<HighlightEntry>(high.size());
for (Pair<String, List<String>> highlight : high)
if (logger.isDebugEnabled())
{
highlightEntries.add(new HighlightEntry(highlight.getFirst(), highlight.getSecond()));
logger.debug("Unknown noderef returned from search results " + row.getNodeRef());
}
unknownNodeRefsCount.incrementAndGet();
continue;
}
aNode.setSearch(new SearchEntry(f, highlightEntries));
float f = row.getScore();
if (!highLighting.isEmpty())
{
List<HighlightEntry> highlightEntries = null;
List<Pair<String, List<String>>> high = highLighting.get(row.getNodeRef());
if (high != null && !high.isEmpty())
{
highlightEntries = new ArrayList<>(high.size());
for (Pair<String, List<String>> highlight : high)
{
highlightEntries.add(new HighlightEntry(highlight.getFirst(), highlight.getSecond()));
}
}
aNode.setSearch(new SearchEntry(f, highlightEntries));
}
noderesults.add(aNode);
}
else
{
logger.debug("Unknown noderef returned from search results " + row.getNodeRef());
unknownNodeRefsCount.incrementAndGet();
}
}
if (unknownNodeRefsCount.get() > 0)
{
logger.warn("Search results contained " + unknownNodeRefsCount.get() + " unknown noderefs which were skipped.");
}
SearchContext context = toSearchEngineResultSet(results)
@@ -278,6 +303,119 @@ public class ResultMapper
return aNode;
}
/**
* Builds node representation based on ResultSet;
*
* @param resultSet
* @param params
* @param mapUserInfo
* @return The node object or null if the user does not have permission to view it.
*/
public List<Node> getNodes(ResultSet resultSet, Params params, Map<String, UserInfo> mapUserInfo)
{
List<Node> results = new ArrayList<>(resultSet.length());
if (resultSet.length() > 0)
{
final String nodeStore = storeMapper.getStore(resultSet.getNodeRef(0));
try
{
switch (nodeStore)
{
case LIVE_NODES:
results.addAll(nodes.getFoldersOrDocuments(resultSet.getNodeRefs(), params.getInclude(),
mapUserInfo));
break;
case VERSIONS:
Map<NodeRef, Map<QName, Serializable>> properties = serviceRegistry.getNodeService()
.getPropertiesForNodeRefs(resultSet.getNodeRefs());
Map<NodeRef, Pair<NodeRef, String>> frozenNodeRefs = new HashMap<>();
for (Entry<NodeRef, Map<QName, Serializable>> entry : properties.entrySet())
{
NodeRef frozenNodeRef = (NodeRef) entry.getValue()
.get(Version2Model.PROP_QNAME_FROZEN_NODE_REF);
String versionLabelId = (String) entry.getValue()
.get(Version2Model.PROP_QNAME_VERSION_LABEL);
if (frozenNodeRef != null && versionLabelId != null)
{
frozenNodeRefs.put(entry.getKey(), new Pair<>(frozenNodeRef, versionLabelId));
}
}
// This section falls back to the less performant way of doing things. The calls that are made need more thought into
// how to approach this it. The changes required are a bit more significant than just passing a collection. Looking up
// versioned nodes is a little less common than searching for "Live Nodes" so it should be ok for now :-(
for (Entry<NodeRef, Pair<NodeRef, String>> entry : frozenNodeRefs.entrySet())
{
NodeRef frozenNodeRef = entry.getValue().getFirst();
String versionLabelId = entry.getValue().getSecond();
Version version = null;
Node aNode = null;
try
{
version = nodeVersions.findVersion(frozenNodeRef.getId(), versionLabelId);
aNode = nodes.getFolderOrDocument(version.getFrozenStateNodeRef(), null, null, params.getInclude(), mapUserInfo);
}
catch (EntityNotFoundException | InvalidNodeRefException e)
{
// Solr says there is a node but we can't find it
if (logger.isDebugEnabled())
{
logger.debug("Failed to find a versioned node with id of " + frozenNodeRef
+ " this is probably because the original node has been deleted.");
}
}
if (version != null && aNode != null)
{
nodeVersions.mapVersionInfo(version, aNode, entry.getKey());
aNode.setNodeId(frozenNodeRef.getId());
aNode.setVersionLabel(versionLabelId);
results.add(aNode);
}
}
break;
case DELETED:
List<String> nodeIds = resultSet.getNodeRefs().stream().map(NodeRef::getId).collect(Collectors.toList());
try
{
results = deletedNodes.getDeletedNodes(nodeIds, params, false, mapUserInfo);
}
catch (EntityNotFoundException enfe)
{
// Solr says there is a deleted node but we can't find it, we want the rest of
// the search to return so lets ignore it.
if (logger.isDebugEnabled())
{
logger.debug("Failed to find a deleted nodes with ids of " + nodeIds.toString());
}
}
break;
}
}
catch (PermissionDeniedException e)
{
// logger.debug("Unable to access nodes: " + resultSet.toString());
return null;
}
if (!results.isEmpty())
{
results.forEach(aNode -> aNode.setLocation(nodeStore));
}
}
return results;
}
/**
* Sets the total number found.
*

View File

@@ -32,7 +32,11 @@ import static junit.framework.TestCase.assertFalse;
import static junit.framework.TestCase.assertNotNull;
import static junit.framework.TestCase.assertNull;
import static junit.framework.TestCase.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.ArgumentMatchers.notNull;
import static org.mockito.ArgumentMatchers.nullable;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyBoolean;
@@ -56,6 +60,7 @@ import org.json.JSONArray;
import org.json.JSONObject;
import org.json.JSONTokener;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.mockito.stubbing.Answer;
@@ -254,6 +259,7 @@ public class ResultMapperTests
}
@Test
@Ignore("Disabled MNT-25404 - Invalid test. Assumes multi-store search.")
public void testToCollectionWithPagingInfo()
{
ResultSet results = mockResultSet(asList(514l), asList(566l, VERSIONED_ID));

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-community-repo</artifactId>
<version>25.3.0.67</version>
<version>25.3.0.68-SNAPSHOT</version>
</parent>
<dependencies>

View File

@@ -27,6 +27,13 @@ package org.alfresco.repo.cache.lookup;
import java.io.Serializable;
import java.sql.Savepoint;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.springframework.dao.ConcurrencyFailureException;
import org.springframework.extensions.surf.util.ParameterCheck;
@@ -73,6 +80,19 @@ public class EntityLookupCache<K extends Serializable, V extends Object, VK exte
*/
VK1 getValueKey(V1 value);
/**
* Resolve the given values into unique value keys that can be used to find an entity's ID. A return value should be small and efficient; don't return a value if this is not possible.
* <p/>
* Implementations will often return values themselves, provided that the values are both serializable and have good <code>equals</code> and <code>hashCode</code>.
* <p/>
* Were no adequate key can be generated for the value, then it should not be returned. In this case, the {@link #findByValue(Object) findByValue} method might not even do a search and just return <tt>null</tt> or nothing itself i.e. if it is difficult to look the value up in storage then it is probably difficult to generate a cache key from it, too.. In this scenario, the cache will be purely for key-based lookups
*
* @param values
* full values being keyed (never <tt>null</tt>)
* @return Returns the business keys representing the entities
*/
List<VK1> getValueKeys(List<V1> values);
/**
* Find an entity for a given key.
*
@@ -82,6 +102,15 @@ public class EntityLookupCache<K extends Serializable, V extends Object, VK exte
*/
Pair<K1, V1> findByKey(K1 key);
/**
* Find entities for a list of given key.
*
* @param keys
* the keys (IDs) used to identify the entity (never <tt>null</tt>)
* @return Return a list of entities or <tt>null</tt> if no entities exists for the IDs
*/
List<Pair<K1, V1>> findByKeys(List<K1> keys);
/**
* Find and entity using the given value key. The <code>equals</code> and <code>hashCode</code> methods of the value object should respect case-sensitivity in the same way that this lookup treats case-sensitivity i.e. if the <code>equals</code> method is <b>case-sensitive</b> then this method should look the entity up using a <b>case-sensitive</b> search.
* <p/>
@@ -95,6 +124,19 @@ public class EntityLookupCache<K extends Serializable, V extends Object, VK exte
*/
Pair<K1, V1> findByValue(V1 value);
/**
* Find entities using the given list of value keys. The <code>equals</code> and <code>hashCode</code> methods of the value object should respect case-sensitivity in the same way that this lookup treats case-sensitivity i.e. if the <code>equals</code> method is <b>case-sensitive</b> then this method should look the entity up using a <b>case-sensitive</b> search.
* <p/>
* Since this is a cache backed by some sort of database, <tt>null</tt> values are allowed by the cache. The implementation of this method can throw an exception if <tt>null</tt> is not appropriate for the use-case.
* <p/>
* If the search is impossible or expensive, this method should just return an empty list. This would usually be the case if the {@link #getValueKeys(Object) getValueKey} method also returned <tt>null</tt> i.e. if it is difficult to look the value up in storage then it is probably difficult to generate a cache key from it, too.
*
* @param values
* the values (business objects) used to identify the entities (<tt>null</tt> allowed).
* @return Return the entities or an empty list if no entity matches the given values
*/
List<Pair<K1, V1>> findByValues(List<V1> values);
/**
* Create an entity using the given values. It is valid to assume that the entity does not exist within the current transaction at least.
* <p/>
@@ -167,6 +209,16 @@ public class EntityLookupCache<K extends Serializable, V extends Object, VK exte
return null;
}
/**
* This implementation never finds a value and is backed by {@link #getValueKey(Object)} returning an empty list.
*
* @return Returns an empty list always
*/
public List<Pair<K2, V2>> findByValues(V2 value)
{
return Collections.emptyList();
}
/**
* This implementation does not find by value and is backed by {@link #findByValue(Object)} returning nothing.
*
@@ -177,6 +229,17 @@ public class EntityLookupCache<K extends Serializable, V extends Object, VK exte
return null;
}
/**
* This implementation does not find values and is backed by {@link #findByValue(Object)} returning nothing.
*
* @return Returns empty list always
*/
@Override
public List<VK2> getValueKeys(List<V2> values)
{
return Collections.emptyList();
}
/**
* Disallows the operation.
*
@@ -343,6 +406,98 @@ public class EntityLookupCache<K extends Serializable, V extends Object, VK exte
return entityPair;
}
/**
* Find the entities associated with the given key list. The {@link EntityLookupCallbackDAO#findByKeys(Serializable) entity callback} will be used if necessary.
* <p/>
* It is up to the client code to decide if a returned empty list indicates a concurrency violation or not; the former would normally result in a concurrency-related exception such as {@link ConcurrencyFailureException}.
*
* @param keys
* The entity keys, which may be valid or invalid (<tt>null</tt> not allowed)
* @return Returns a list of key-value pairs or an empty list if no keys reference any entities
*/
@SuppressWarnings("unchecked")
public List<Pair<K, V>> getByKeys(List<K> keys)
{
if (keys == null || keys.isEmpty())
{
throw new IllegalArgumentException("An entity lookup key list may not be null or empty");
}
// Create a defensive copy and remove any nulls for safety
List<K> filteredKeys = new ArrayList<>(keys.size());
for (K k : keys)
{
if (k != null)
{
filteredKeys.add(k);
}
}
// Handle missing cache
if (cache == null)
{
return entityLookup.findByKeys(filteredKeys);
}
List<Pair<K, V>> results = new ArrayList<>(filteredKeys.size());
Map<K, CacheRegionKey> keysToResolve = new HashMap<>();
for (K key : filteredKeys)
{
CacheRegionKey keyCacheKey = new CacheRegionKey(cacheRegion, key);
// Look in the cache
V value = (V) cache.get(keyCacheKey);
if (value != null)
{
if (value.equals(VALUE_NOT_FOUND))
{
// We checked before.
continue; // not costly...making it clear that we are moving to the next key
}
else if (value.equals(VALUE_NULL))
{
results.add(new Pair<>(key, null));
}
else
{
results.add(new Pair<>(key, value));
}
}
else
{
// Need to resolve this key
keysToResolve.put(key, keyCacheKey);
}
}
// Resolve any missing keys
List<Pair<K, V>> entityPairs = entityLookup.findByKeys(new ArrayList<>(keysToResolve.keySet()));
if (entityPairs != null && !entityPairs.isEmpty())
{
for (Pair<K, V> entityPair : entityPairs)
{
V value = entityPair.getSecond();
// Get the value key
VK valueKey = (value == null) ? (VK) VALUE_NULL : entityLookup.getValueKey(value);
// Check if the value has a good key
if (valueKey != null)
{
CacheRegionValueKey valueCacheKey = new CacheRegionValueKey(cacheRegion, valueKey);
// The key is good, so we can cache the value
cache.put(valueCacheKey, entityPair.getFirst());
}
cache.put(
new CacheRegionKey(cacheRegion, entityPair.getFirst()),
value == null ? VALUE_NULL : value);
results.add(entityPair);
}
}
// Done
return results;
}
/**
* Find the entity associated with the given value. The {@link EntityLookupCallbackDAO#findByValue(Object) entity callback} will be used if no entry exists in the cache.
* <p/>
@@ -409,6 +564,130 @@ public class EntityLookupCache<K extends Serializable, V extends Object, VK exte
return entityPair;
}
/**
* Find the entity associated with the given value. The {@link EntityLookupCallbackDAO#findByValue(Object) entity callback} will be used if no entry exists in the cache.
* <p/>
* It is up to the client code to decide if a <tt>null</tt> return value indicates a concurrency violation or not; the former would normally result in a concurrency-related exception such as {@link ConcurrencyFailureException}.
*
* @param values
* The entity values, which may be valid or invalid (<tt>null</tt> is allowed)
* @return Returns a list of key-value pairs
*/
@SuppressWarnings("unchecked")
public List<Pair<K, V>> getByValues(List<V> values)
{
if (values == null || values.isEmpty())
{
throw new IllegalArgumentException("An entity lookup value list may not be null or empty");
}
// Create a defensive copy and remove any nulls for safety
List<V> filteredValues = new ArrayList<>(values.size());
for (V v : values)
{
if (v != null)
{
filteredValues.add(v);
}
}
// Handle missing cache
if (cache == null)
{
return entityLookup.findByValues(filteredValues);
}
List<Pair<K, V>> results = new ArrayList<>(filteredValues.size());
List<V> valuesToFind = new ArrayList<>(filteredValues.size());
List<Pair<VK, V>> lookInCache = new ArrayList<>(filteredValues.size());
List<V> valuesToResolve = new ArrayList<>(filteredValues.size());
List<K> keysToGet = new ArrayList<>(filteredValues.size());
// Get the value key.
for (V value : filteredValues)
{
// The cast to (VK) is counter-intuitive, but works because they're all just Serializable
// It's nasty, but hidden from the cache client code.
VK valueKey = (value == null) ? (VK) VALUE_NULL : entityLookup.getValueKey(value);
if (valueKey == null)
{
valuesToFind.add(value);
continue;
}
else
{
lookInCache.add(new Pair<>(valueKey, value));
}
}
if (!valuesToFind.isEmpty())
{
results.addAll(entityLookup.findByValues(valuesToFind));
}
for (Pair<VK, V> valuePair : lookInCache)
{
// Look in the cache
CacheRegionValueKey valueCacheKey = new CacheRegionValueKey(cacheRegion, valuePair.getFirst());
K key = (K) cache.get(valueCacheKey);
// Check if we have looked this up already
if (key != null)
{
// We checked before and ...
if (key.equals(VALUE_NOT_FOUND))
{
// ... it didn't exist
continue; // not costly...making it clear that we are moving to the next value
}
else
{
// ... it did exist
keysToGet.add(key);
continue;
}
}
valuesToResolve.add(valuePair.getSecond());
}
if (!keysToGet.isEmpty())
{
results.addAll(getByKeys(keysToGet));
}
// Resolve it
if (!valuesToResolve.isEmpty())
{
List<Pair<K, V>> entityPairs = entityLookup.findByValues(valuesToResolve);
for (Pair<K, V> entityPair : entityPairs)
{
if (entityPair == null)
{
// Missing (null) should not make it back here since findByValues should not return any nulls
continue;
}
else
{
K key = entityPair.getFirst();
// Cache the Key
cache.put(new CacheRegionValueKey(cacheRegion, (VK) entityPair.getSecond()), key);
cache.put(
new CacheRegionKey(cacheRegion, key),
entityPair.getSecond() == null ? VALUE_NULL : entityPair.getSecond());
results.add(entityPair);
}
}
}
// Done
return results;
}
/**
* Attempt to create the entity and, failing that, look it up.<br/>
* This method takes the opposite approach to {@link #getOrCreateByValue(Object)}, which assumes the entity's existence: in this case the entity is assumed to NOT exist. The {@link EntityLookupCallbackDAO#createValue(Object)} and {@link EntityLookupCallbackDAO#findByValue(Object)} will be used if necessary.<br/>
@@ -689,6 +968,22 @@ public class EntityLookupCache<K extends Serializable, V extends Object, VK exte
// Done
}
public void setValues(Map<K, V> keyValues)
{
// Handle missing cache
if (cache == null)
{
return;
}
List<K> keys = keyValues.keySet().stream().collect(Collectors.toList());
// Remove entries for the keys (bidirectional removal removes the old values as well)
// but leave the keys as they will get updated
removeByKeys(keys, false);
}
/**
* Delete the entity associated with the given key. The {@link EntityLookupCallbackDAO#deleteByKey(Serializable)} callback will be used if necessary.
* <p/>
@@ -751,6 +1046,20 @@ public class EntityLookupCache<K extends Serializable, V extends Object, VK exte
removeByKey(key, true);
}
/**
* Cache-only operation: Remove all cache values associated with the given keys.
*/
public void removeByKeys(Collection<K> keys)
{
// Handle missing cache
if (cache == null)
{
return;
}
removeByKeys(keys, true);
}
/**
* Cache-only operation: Remove all cache values associated with the given key.
*
@@ -778,6 +1087,46 @@ public class EntityLookupCache<K extends Serializable, V extends Object, VK exte
}
}
/**
* Cache-only operation: Remove all cache values associated with the given keys.
*
* @param removeKey
* <tt>true</tt> to remove the given keys' entry
*/
@SuppressWarnings("unchecked")
private void removeByKeys(Collection<K> keys, boolean removeKey)
{
List<V> values = new ArrayList<>();
for (K key : keys)
{
CacheRegionKey keyCacheKey = new CacheRegionKey(cacheRegion, key);
V value = (V) cache.get(keyCacheKey);
if (value != null && !value.equals(VALUE_NOT_FOUND))
{
values.add(value);
}
if (removeKey)
{
cache.remove(keyCacheKey);
}
}
if (!values.isEmpty())
{
// Get the value key and remove it
List<VK> valueKeys = entityLookup.getValueKeys(values);
if (valueKeys != null && !valueKeys.isEmpty())
{
for (VK vk : valueKeys)
{
CacheRegionValueKey valueCacheKey = new CacheRegionValueKey(cacheRegion, vk);
cache.remove(valueCacheKey);
}
}
}
}
/**
* Cache-only operation: Remove all cache values associated with the given value
*

View File

@@ -26,10 +26,14 @@
package org.alfresco.repo.domain.contentdata;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -194,6 +198,20 @@ public abstract class AbstractContentDataDAOImpl implements ContentDataDAO
return entityPair;
}
public List<Pair<Long, ContentData>> getContentData(List<Long> ids)
{
if (ids == null)
{
throw new IllegalArgumentException("Cannot look up ContentData by null ID list.");
}
List<Pair<Long, ContentData>> entityPairs = contentDataCache.getByKeys(ids);
if (entityPairs == null || entityPairs.isEmpty())
{
throw new DataIntegrityViolationException("No ContentData values exist for the given IDs");
}
return entityPairs;
}
/**
* Internally update a URL or create a new one if it does not exist
*/
@@ -243,10 +261,10 @@ public abstract class AbstractContentDataDAOImpl implements ContentDataDAO
public void cacheContentDataForNodes(Set<Long> nodeIds)
{
for (ContentDataEntity entity : getContentDataEntitiesForNodes(nodeIds))
{
contentDataCache.setValue(entity.getId(), makeContentData(entity));
}
List<ContentDataEntity> contentDataEntities = getContentDataEntitiesForNodes(nodeIds);
makeContentData(contentDataEntities).stream()
.forEach(pair -> contentDataCache.setValue(pair.getFirst(), pair.getSecond()));
}
@Override
@@ -319,6 +337,36 @@ public abstract class AbstractContentDataDAOImpl implements ContentDataDAO
return new Pair<Long, ContentData>(key, contentData);
}
@Override
public List<Pair<Long, ContentData>> findByKeys(List<Long> keys)
{
if (keys == null || keys.isEmpty())
{
return null;
}
List<ContentDataEntity> contentDataEntities = getContentDataEntitiesForNodes(keys.stream().collect(Collectors.toSet()));
if (contentDataEntities == null || contentDataEntities.isEmpty())
{
return null;
}
List<Pair<Long, ContentData>> result = new ArrayList<>(contentDataEntities.size());
for (ContentDataEntity contentDataEntity : contentDataEntities)
{
ContentData contentData = makeContentData(contentDataEntity);
result.add(new Pair<>(contentDataEntity.getId(), contentData));
}
return result;
}
@Override
public List<Pair<Long, ContentData>> findByValues(List<ContentData> values)
{
throw new UnsupportedOperationException("Batch findByValues for ContentData is not Supported");
}
@Override
public int updateValue(Long key, ContentData value)
{
@@ -351,6 +399,28 @@ public abstract class AbstractContentDataDAOImpl implements ContentDataDAO
return value.getContentUrl();
}
@Override
public List<Pair<Long, ContentUrlEntity>> findByKeys(List<Long> keys)
{
if (keys == null || keys.isEmpty())
{
return null;
}
List<ContentUrlEntity> contentUrlEntities = getContentUrlEntities(keys);
if (contentUrlEntities == null || contentUrlEntities.isEmpty())
{
return null;
}
List<Pair<Long, ContentUrlEntity>> result = new ArrayList<>(contentUrlEntities.size());
for (ContentUrlEntity contentUrlEntity : contentUrlEntities)
{
result.add(new Pair<>(contentUrlEntity.getId(), contentUrlEntity));
}
return result;
}
/**
* Looks the entity up based on the ContentURL of the given node
*/
@@ -369,6 +439,12 @@ public abstract class AbstractContentDataDAOImpl implements ContentDataDAO
return (ret != null ? new Pair<Long, ContentUrlEntity>(ret.getId(), ret) : null);
}
@Override
public List<Pair<Long, ContentUrlEntity>> findByValues(List<ContentUrlEntity> entities)
{
throw new UnsupportedOperationException("Batch findByValues for ContentUrlEntity is not Supported");
}
public Pair<Long, ContentUrlEntity> createValue(ContentUrlEntity value)
{
ContentUrlEntity contentUrlEntity = createContentUrlEntity(value.getContentUrl(), value.getSize(), value.getContentUrlKey());
@@ -456,6 +532,82 @@ public abstract class AbstractContentDataDAOImpl implements ContentDataDAO
return contentData;
}
private List<Pair<Long, ContentData>> makeContentData(List<ContentDataEntity> contentDataEntities)
{
if (contentDataEntities == null || contentDataEntities.isEmpty())
{
return Collections.emptyList();
}
// Cleanup the list
contentDataEntities = contentDataEntities.stream()
.filter(Objects::nonNull)
.distinct()
.collect(Collectors.toList());
// Decode content URL -- skip nulls and duplicates -- First is the contentData Id, second is the contentUrl Id
List<Pair<Long, Long>> contentUrlIds = contentDataEntities.stream()
.map(entity -> new Pair<>(entity.getId(), entity.getContentUrlId()))
.filter(pair -> pair.getSecond() != null)
.distinct()
.collect(Collectors.toList());
List<Pair<Long, ContentUrlEntity>> contentUrlEntities = contentUrlCache.getByKeys(
contentUrlIds.stream()
.map(Pair::getSecond)
.collect(Collectors.toList()));
// Clean up and mapping
Map<Long, ContentUrlEntity> contentUrlMap = contentUrlEntities.stream()
.filter(Objects::nonNull)
.filter(pair -> pair.getFirst() != null && pair.getSecond() != null)
.collect(Collectors.toMap(
Pair::getFirst,
Pair::getSecond,
(existing, replacement) -> existing // Keep existing value in case of duplicates
));
// Setup the return value
List<Pair<Long, ContentData>> contentDataList = new ArrayList<>(contentDataEntities.size());
for (ContentDataEntity contentDataEntity : contentDataEntities)
{
String contentUrl = contentUrlMap.get(contentDataEntity.getContentUrlId()).getContentUrl();
long size = contentDataEntity.getSize() == null ? 0L : contentDataEntity.getSize().longValue();
// Decode mimetype
Long mimetypeId = contentDataEntity.getMimetypeId();
String mimetype = null;
if (mimetypeId != null)
{
mimetype = mimetypeDAO.getMimetype(mimetypeId).getSecond();
}
// Decode encoding
Long encodingId = contentDataEntity.getEncodingId();
String encoding = null;
if (encodingId != null)
{
encoding = encodingDAO.getEncoding(encodingId).getSecond();
}
// Decode locale
Long localeId = contentDataEntity.getLocaleId();
Locale locale = null;
if (localeId != null)
{
locale = localeDAO.getLocalePair(localeId).getSecond();
}
// Build the ContentData
ContentData contentData = new ContentData(contentUrl, mimetype, size, encoding, locale);
contentDataList.add(new Pair<>(contentDataEntity.getId(), contentData));
}
return contentDataList;
}
/**
* Translates the {@link ContentData} into persistable values using the helper DAOs
*/
@@ -658,6 +810,13 @@ public abstract class AbstractContentDataDAOImpl implements ContentDataDAO
*/
protected abstract ContentUrlEntity getContentUrlEntity(Long id);
/**
* @param ids
* the IDs of the <b>content urls</b> entities
* @return Return a list of entities or an empty list if there are none
*/
protected abstract List<ContentUrlEntity> getContentUrlEntities(List<Long> ids);
protected abstract ContentUrlEntity getContentUrlEntity(String contentUrl);
/**

View File

@@ -62,6 +62,7 @@ import org.alfresco.util.ParameterCheck;
public class ContentDataDAOImpl extends AbstractContentDataDAOImpl
{
private static final String SELECT_CONTENT_URL_BY_ID = "alfresco.content.select_ContentUrlById";
private static final String SELECT_CONTENT_URLS_BY_IDS = "alfresco.content.select_ContentUrlsByIds";
private static final String SELECT_CONTENT_URL_BY_KEY = "alfresco.content.select_ContentUrlByKey";
private static final String SELECT_CONTENT_URL_BY_KEY_UNREFERENCED = "alfresco.content.select_ContentUrlByKeyUnreferenced";
private static final String SELECT_CONTENT_URLS_ORPHANED = "alfresco.content.select.select_ContentUrlsOrphaned";
@@ -132,6 +133,18 @@ public class ContentDataDAOImpl extends AbstractContentDataDAOImpl
return contentUrlEntity;
}
@Override
protected List<ContentUrlEntity> getContentUrlEntities(List<Long> ids)
{
if (ids == null || ids.isEmpty())
{
return Collections.emptyList();
}
List<ContentUrlEntity> contentUrlEntities = template.selectList(SELECT_CONTENT_URLS_BY_IDS, ids);
// Done
return contentUrlEntities;
}
@Override
public ContentUrlEntity getContentUrlEntity(String contentUrl)
{

View File

@@ -25,6 +25,8 @@
*/
package org.alfresco.repo.domain.encoding;
import java.util.List;
import org.springframework.extensions.surf.util.ParameterCheck;
import org.alfresco.repo.cache.SimpleCache;
@@ -109,6 +111,12 @@ public abstract class AbstractEncodingDAOImpl implements EncodingDAO
}
}
@Override
public List<Pair<Long, String>> findByKeys(List<Long> ids)
{
throw new UnsupportedOperationException("Batch lookup not supported for encodings.");
}
@Override
public Pair<Long, String> findByValue(String encoding)
{
@@ -123,6 +131,12 @@ public abstract class AbstractEncodingDAOImpl implements EncodingDAO
}
}
@Override
public List<Pair<Long, String>> findByValues(List<String> encodings)
{
throw new UnsupportedOperationException("Batch lookup not supported for encodings.");
}
public Pair<Long, String> createValue(String encoding)
{
EncodingEntity entity = createEncodingEntity(encoding);

View File

@@ -25,6 +25,7 @@
*/
package org.alfresco.repo.domain.locale;
import java.util.List;
import java.util.Locale;
import org.springframework.dao.DataIntegrityViolationException;
@@ -239,6 +240,12 @@ public abstract class AbstractLocaleDAOImpl implements LocaleDAO
}
}
@Override
public List<Pair<Long, String>> findByKeys(List<Long> ids)
{
throw new UnsupportedOperationException("Batch lookup not supported for locales.");
}
@Override
public Pair<Long, String> findByValue(String localeStr)
{
@@ -253,6 +260,12 @@ public abstract class AbstractLocaleDAOImpl implements LocaleDAO
}
}
@Override
public List<Pair<Long, String>> findByValues(List<String> localeStrs)
{
throw new UnsupportedOperationException("Batch lookup not supported for locales.");
}
public Pair<Long, String> createValue(String localeStr)
{
LocaleEntity entity = createLocaleEntity(localeStr);

View File

@@ -37,12 +37,14 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.SortedSet;
import java.util.Stack;
import java.util.TreeSet;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -123,7 +125,6 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
protected Log logger = LogFactory.getLog(getClass());
private Log loggerPaths = LogFactory.getLog(getClass().getName() + ".paths");
protected final boolean isDebugEnabled = logger.isDebugEnabled();
private NodePropertyHelper nodePropertyHelper;
private UpdateTransactionListener updateTransactionListener = new UpdateTransactionListener();
private RetryingCallbackHelper childAssocRetryingHelper;
@@ -140,6 +141,8 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
private UsageDAO usageDAO;
private int cachingThreshold = 10;
private int batchSize = 256;
private boolean forceBatching;
/**
* Cache for the Store root nodes by StoreRef:<br/>
@@ -410,6 +413,37 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
this.childByNameCache = childByNameCache;
}
/**
* Set the batch size for batch operations
*
* @param batchSize
*/
public void setBatchSize(int batchSize)
{
this.batchSize = batchSize;
if (batchSize < 1)
{
this.batchSize = 1;
logger.info("Batch size can not be set to a value less than 1. The size is now set to 1.");
}
else if (batchSize >= 1000)
{
logger.info("Batch size is set to 1000 or greater. Oracle databases have a hard limit of 1000 values allowed in an IN clause."
+ "The other supported databases have no specified limit.");
}
}
/**
* Set whether to force batching even for small sets
*
* @param forceBatching
*/
public void setForceBatching(boolean forceBatching)
{
this.forceBatching = forceBatching;
}
/* Initialize */
public void init()
@@ -594,7 +628,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
String changeTxnId = AlfrescoTransactionSupport.getTransactionId();
Long txnId = insertTransaction(changeTxnId, now);
// Store it for later
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("Create txn: " + txnId);
}
@@ -770,7 +804,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
// Push the value into the caches
rootNodesCache.setValue(storeRef, rootNode);
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("Created store: \n" + " " + store);
}
@@ -799,7 +833,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
allRootNodesCache.remove(oldStoreRef);
nodesCache.clear();
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("Moved store: " + oldStoreRef + " --> " + newStoreRef);
}
@@ -831,6 +865,26 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
NodeEntity node = selectStoreRootNode(storeRef);
return node == null ? null : new Pair<StoreRef, Node>(storeRef, node);
}
/**
* @throws UnsupportedOperationException
* Bulk root node lookup not supported
*/
@Override
public List<Pair<StoreRef, Node>> findByKeys(List<StoreRef> storeRefs)
{
throw new UnsupportedOperationException("Bulk root node lookup not supported: " + storeRefs);
}
/**
* @throws UnsupportedOperationException
* Bulk root node lookup not supported
*/
@Override
public List<Pair<StoreRef, Node>> findByValues(List<Node> values)
{
throw new UnsupportedOperationException("Bulk root node lookup not supported: " + values);
}
}
/* Nodes */
@@ -873,6 +927,37 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
}
}
/**
* @param nodeIds
* list of node IDs keys
*/
@Override
public List<Pair<Long, Node>> findByKeys(List<Long> nodeIds)
{
if (nodeIds == null || nodeIds.isEmpty())
{
return new ArrayList<>(0);
}
List<Pair<Long, Node>> results = new ArrayList<>(nodeIds.size());
SortedSet<Long> uniqueNodeIds = new TreeSet<>(nodeIds);
List<Node> nodes = selectNodesByIds(uniqueNodeIds);
for (Node node : nodes)
{
// Shouldn't be null, but...
if (node != null)
{
// Lock it to prevent 'accidental' modification
node.lock();
results.add(new Pair<>(node.getId(), node));
}
}
return results;
}
/**
* @return Returns the Node's NodeRef
*/
@@ -901,6 +986,38 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
return null;
}
}
/**
* Look up nodes based on the NodeRef of the given node. Excludes nodes that do not exist.
*/
@Override
public List<Pair<Long, Node>> findByValues(List<Node> values)
{
List<Pair<Long, Node>> results = new ArrayList<>(values.size());
SortedSet<String> nodeRefs = values.stream()
.map(Node::getNodeRef)
.map(NodeRef::getId)
.collect(Collectors.toCollection(() -> new TreeSet<String>()));
values.get(0).getStore().getId();
// All nodes should be in the same store - so we can use the store id from the first one.
StoreEntity storeEntity = values.stream()
.findFirst()
.map(Node::getStore)
.orElse(null);
List<Node> selectedNodes = selectNodesByUuids(storeEntity.getId(), nodeRefs);
// Lock it to prevent 'accidental' modification
selectedNodes.forEach(node -> {
node.lock();
results.add(new Pair<>(node.getId(), node));
});
return results;
}
}
public boolean exists(Long nodeId)
@@ -916,6 +1033,36 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
return pair != null && !pair.getSecond().getDeleted(qnameDAO);
}
/**
* Returns the subset (or all) of the given node references that exist.
*/
@Override
public List<NodeRef> exists(List<NodeRef> nodeRefs)
{
List<Node> nodeRefsToCheck = new ArrayList<>(nodeRefs.size());
for (NodeRef nodeRef : nodeRefs)
{
nodeRefsToCheck.add(new NodeEntity(nodeRef));
}
List<NodeRef> existingNodeRefs = new ArrayList<>(nodeRefs.size());
List<Pair<Long, Node>> nodes = nodesCache.getByValues(nodeRefsToCheck);
if (nodes != null)
{
for (Pair<Long, Node> pair : nodes)
{
if (pair != null && !pair.getSecond().getDeleted(qnameDAO))
{
existingNodeRefs.add(pair.getSecond().getNodeRef());
}
}
}
return existingNodeRefs;
}
@Override
public boolean isInCurrentTxn(Long nodeId)
{
@@ -990,7 +1137,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
else
{
// The cache was wrong, possibly due to it caching negative results earlier.
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("Repairing stale cache entry for node: " + nodeRef);
}
@@ -1004,6 +1151,86 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
return pair.getSecond().getNodePair();
}
@Override
public List<Pair<Long, NodeRef>> getNodePairs(StoreRef storeRef, List<NodeRef> nodeRefs)
{
List<Pair<Long, NodeRef>> results = new ArrayList<>(nodeRefs.size());
SortedSet<Long> uncachedNodeIds = new TreeSet<>();
List<Pair<Long, Node>> nodePairs = nodesCache.getByValues(
nodeRefs.stream()
.map(NodeEntity::new)
.collect(Collectors.toList()));
for (Pair<Long, Node> nodePair : nodePairs)
{
// Check it
if (nodePair != null && nodePair.getSecond().getDeleted(qnameDAO))
{
// If the nodeRef is truly missing/deleted from the db
// it will be filtered out when we query against the uncachedNodeIds later
uncachedNodeIds.add(nodePair.getFirst());
}
else if (nodePair != null)
{
results.add(nodePair.getSecond().getNodePair());
}
}
// Are there any nodes missing that we need to double check?
Set<String> existingUuids = nodePairs.stream()
.map(Pair::getSecond)
.map(Node::getNodeRef)
.map(NodeRef::getId)
.filter(Objects::nonNull)
.collect(Collectors.toSet());
SortedSet<String> missingUuids = nodeRefs.stream()
.map(NodeRef::getId)
.filter(refId -> !existingUuids.contains(refId))
.collect(Collectors.toCollection(TreeSet::new));
if (!uncachedNodeIds.isEmpty())
{
StoreEntity store = getStoreNotNull(storeRef);
// The cache says that these nodes are not there or are deleted.
// We double check by going to the DB
List<Node> dbNodes = selectNodesByIds(uncachedNodeIds);
// Also check the missing UUIDs
if (missingUuids != null && !missingUuids.isEmpty())
{
dbNodes.addAll(selectNodesByUuids(store.getId(), missingUuids));
}
for (Node dbNode : dbNodes)
{
Long nodeId = dbNode.getId();
if (dbNode.getDeleted(qnameDAO))
{
// We may have reached this deleted node via an invalid association; trigger a post transaction prune of
// any associations that point to this deleted one
pruneDanglingAssocs(nodeId);
}
else
{
// The cache was wrong, possibly due to it caching negative results earlier.
if (logger.isDebugEnabled())
{
logger.debug("Repairing stale cache entry for node: " + nodeId);
}
invalidateNodeCaches(nodeId);
dbNode.lock(); // Prevent unexpected edits of values going into the cache
nodesCache.setValue(nodeId, dbNode);
results.add(dbNode.getNodePair());
}
}
}
return results;
}
/**
* Trigger a post transaction prune of any associations that point to this deleted one.
*
@@ -1091,7 +1318,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
else
{
// The cache was wrong, possibly due to it caching negative results earlier.
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("Repairing stale cache entry for node: " + nodeId);
}
@@ -1107,6 +1334,61 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
}
}
@Override
public List<Pair<Long, NodeRef>> getNodePairs(List<Long> nodeIds)
{
List<Pair<Long, NodeRef>> results = new ArrayList<>(nodeIds.size());
SortedSet<Long> uncachedNodeIds = new TreeSet<>();
for (Long nodeId : nodeIds)
{
Pair<Long, Node> pair = nodesCache.getByKey(nodeId);
// Check it
if (pair == null || pair.getSecond().getDeleted(qnameDAO))
{
// If the nodeId is truly missing/deleted from the db
// it will be filtered out when we query against the uncachedNodeIds later
uncachedNodeIds.add(nodeId);
}
else
{
results.add(pair.getSecond().getNodePair());
}
}
if (!uncachedNodeIds.isEmpty())
{
// The cache says that these nodes are not there or are deleted.
// We double check by going to the DB
List<Node> dbNodes = selectNodesByIds(uncachedNodeIds);
for (Node dbNode : dbNodes)
{
Long nodeId = dbNode.getId();
if (dbNode.getDeleted(qnameDAO))
{
// We may have reached this deleted node via an invalid association; trigger a post transaction prune of
// any associations that point to this deleted one
pruneDanglingAssocs(nodeId);
}
else
{
// The cache was wrong, possibly due to it caching negative results earlier.
if (logger.isDebugEnabled())
{
logger.debug("Repairing stale cache entry for node: " + nodeId);
}
invalidateNodeCaches(nodeId);
dbNode.lock(); // Prevent unexpected edits of values going into the cache
nodesCache.setValue(nodeId, dbNode);
results.add(dbNode.getNodePair());
}
}
}
return results;
}
/**
* Get a node instance regardless of whether it is considered <b>live</b> or <b>deleted</b>
*
@@ -1151,6 +1433,81 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
}
}
/**
* Get node instances regardless of whether they are considered <b>live</b> or <b>deleted</b>
*
* @param nodeIds
* the node IDs to look for
* @param liveOnly
* <tt>true</tt> to ensure that only <b>live</b> nodes are retrieved
* @return nodes that will be <b>live</b> if requested. Nodes not found will be ignored.
*/
private List<Node> getNodesNotNull(List<Long> nodeIds, boolean liveOnly)
{
List<Pair<Long, Node>> pairs = nodesCache.getByKeys(nodeIds);
if (pairs.isEmpty())
{
// The nodes have no entry in the database
List<NodeEntity> dbNodes = selectNodesByIds(nodeIds);
nodesCache.removeByKeys(nodeIds);
if (logger.isDebugEnabled())
{
logger.debug(
"No node rows exists: \n" +
" IDs: " + nodeIds + "\n" +
" DB rows: " + dbNodes);
}
return Collections.emptyList();
}
// for quick lookup
Set<Long> pairNodeIds = pairs.stream()
.map(Pair::getFirst)
.collect(Collectors.toSet());
// remove missing from cache
nodeIds.stream()
.filter(nodeId -> !pairNodeIds.contains(nodeId))
.forEach(nodesCache::removeByKey);
List<Long> deletedNodeIds = new ArrayList<>();
List<Node> liveNodes = new ArrayList<>();
for (Pair<Long, Node> pair : pairs)
{
// This might initially seem less performant but after the first iteration the qname will be cached if it is already not there
if (pair.getSecond().getDeleted(qnameDAO) && liveOnly)
{
deletedNodeIds.add(pair.getFirst());
}
else
{
// Keep the live node
liveNodes.add(pair.getSecond());
}
}
if (!deletedNodeIds.isEmpty())
{
nodesCache.removeByKeys(deletedNodeIds);
// Now the pain of pruning dangling assocs for each deleted node...this could be slow if there are many deleted nodes
for (Long nodeId : deletedNodeIds)
{
pruneDanglingAssocs(nodeId);
// In the single node case we would force a retry on the transaction...we can't do that here so just log it
if (logger.isDebugEnabled())
{
logger.debug(
"No node rows exists: \n" +
" IDs: " + nodeId + "\n");
}
}
}
return liveNodes;
}
@Override
public QName getNodeType(Long nodeId)
{
@@ -1257,7 +1614,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
ParentAssocsInfo parentAssocsInfo = new ParentAssocsInfo(isRoot, isStoreRoot, assoc);
setParentAssocsCached(nodeId, parentAssocsInfo);
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug(
"Created new node: \n" +
@@ -1361,7 +1718,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
setNodeAspectsCached(id, nodeAspects);
setNodePropertiesCached(id, Collections.<QName, Serializable> emptyMap());
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("Created new node: \n" + " " + node);
}
@@ -1523,7 +1880,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
// Done
Pair<Long, ChildAssociationRef> assocPair = getPrimaryParentAssoc(newChildNode.getId());
Pair<Long, NodeRef> nodePair = newChildNode.getNodePair();
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("Moved node: " + assocPair + " ... " + nodePair);
}
@@ -1666,7 +2023,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
public int touchNodes(Long txnId, List<Long> nodeIds)
{
// limit in clause to 1000 node ids
int batchSize = 1000;
var batchSize = 1000;
int touched = 0;
ArrayList<Long> batch = new ArrayList<Long>(batchSize);
@@ -1927,7 +2284,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
}
// Done
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug(
"Updated Node: \n" +
@@ -2085,7 +2442,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
props = new ValueProtectingMap<QName, Serializable>(props, NodePropertyValue.IMMUTABLE_CLASSES);
// Done
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("Fetched properties for Node: \n" +
" Node: " + nodeId + "\n" +
@@ -2094,6 +2451,54 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
return props;
}
@Override
public Map<Long, Map<QName, Serializable>> getNodeProperties(List<Long> nodeIds)
{
Map<Long, Map<QName, Serializable>> props = getNodePropertiesCached(nodeIds);
// Create a shallow copy to allow additions
props = new HashMap<>(props);
List<Node> nodes = getNodesNotNull(nodeIds, false);
for (Node node : nodes)
{
// Handle sys:referenceable
Map<QName, Serializable> nodeProps = new HashMap<>(props.get(node.getId()));
ReferenceablePropertiesEntity.addReferenceableProperties(node.getId(), node.getNodeRef(), nodeProps);
// TODO optimize sys:localized for batch processing
// Handle sys:localized
LocalizedPropertiesEntity.addLocalizedProperties(localeDAO, node, nodeProps);
// Handle cm:auditable
if (hasNodeAspect(node.getId(), ContentModel.ASPECT_AUDITABLE))
{
AuditablePropertiesEntity auditableProperties = node.getAuditableProperties();
if (auditableProperties == null)
{
auditableProperties = new AuditablePropertiesEntity();
}
nodeProps.putAll(auditableProperties.getAuditableProperties());
}
// Wrap to ensure that we only clone values if the client attempts to modify
// the map or retrieve values that might, themselves, be mutable
nodeProps = new ValueProtectingMap<>(nodeProps, NodePropertyValue.IMMUTABLE_CLASSES);
// Done
if (logger.isDebugEnabled())
{
logger.debug("Fetched properties for Node: \n" +
" Node: " + node.getId() + "\n" +
" Props: " + nodeProps);
}
props.put(node.getId(), nodeProps);
}
return props;
}
@Override
public Serializable getNodeProperty(Long nodeId, QName propertyQName)
{
@@ -2128,7 +2533,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
value = props.get(propertyQName);
}
// Done
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("Fetched property for Node: \n" +
" Node: " + nodeId + "\n" +
@@ -2382,7 +2787,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
}
// Done
if (isDebugEnabled && updated)
if (logger.isDebugEnabled() && updated)
{
logger.debug(
"Modified node properties: " + nodeId + "\n" +
@@ -2532,6 +2937,29 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
return cachedProperties;
}
/**
* @return Returns the read-only cached property map
*/
private Map<Long, Map<QName, Serializable>> getNodePropertiesCached(List<Long> nodeIds)
{
Map<Long, Map<QName, Serializable>> result = new HashMap<>();
List<Node> nodes = getNodesNotNull(nodeIds, false); // Ensure all nodes exist
List<NodeVersionKey> nodeVersionKeys = nodes.stream()
.map(Node::getNodeVersionKey)
.collect(Collectors.toList());
List<Pair<NodeVersionKey, Map<QName, Serializable>>> cacheEntries = propertiesCache.getByKeys(nodeVersionKeys);
result.putAll(cacheEntries.stream()
.collect(Collectors.toMap(
entry -> entry.getFirst().getNodeId(),
Pair::getSecond)));
return result;
}
/**
* Update the node properties cache. The incoming properties will be wrapped to be unmodifiable.
* <p>
@@ -2595,6 +3023,60 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
// Done
return new Pair<NodeVersionKey, Map<QName, Serializable>>(nodeVersionKey, Collections.unmodifiableMap(props));
}
@Override
public List<Pair<NodeVersionKey, Map<QName, Serializable>>> findByKeys(List<NodeVersionKey> keys)
{
// Gather all node IDs
Set<Long> nodeIds = keys.stream()
.map(NodeVersionKey::getNodeId)
.distinct()
.collect(Collectors.toSet());
// Load all properties for the node IDs
Map<NodeVersionKey, Map<NodePropertyKey, NodePropertyValue>> propsRawByNodeVersionKey = selectNodeProperties(nodeIds);
// Now build up the results
List<Pair<NodeVersionKey, Map<QName, Serializable>>> results = new ArrayList<>(keys.size());
for (NodeVersionKey nodeVersionKey : keys)
{
Long nodeId = nodeVersionKey.getNodeId();
Map<NodePropertyKey, NodePropertyValue> propsRaw = propsRawByNodeVersionKey.get(nodeVersionKey);
if (propsRaw == null)
{
// Didn't find a match. Is this because there are none?
if (propsRawByNodeVersionKey.isEmpty())
{
// This is OK. The node has no properties
propsRaw = Collections.emptyMap();
}
else
{
// We found properties associated with a different node ID and version
invalidateNodeCaches(nodeId);
throw new DataIntegrityViolationException(
"Detected stale node entry: " + nodeVersionKey +
" (now " + propsRawByNodeVersionKey.keySet() + ")");
}
}
// Convert to public properties
Map<QName, Serializable> props = nodePropertyHelper.convertToPublicProperties(propsRaw);
// Done
results.add(new Pair<>(nodeVersionKey, Collections.unmodifiableMap(props)));
}
return results;
}
/**
* Batch lookup is not supported
*
* @throws UnsupportedOperationException
*/
@Override
public List<Pair<NodeVersionKey, Map<QName, Serializable>>> findByValues(List<Map<QName, Serializable>> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for node properties.");
}
}
/* Aspects */
@@ -2610,6 +3092,19 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
return nodeAspects;
}
@Override
public Map<Long, Set<QName>> getNodeAspects(List<Long> nodeIds)
{
Map<Long, Set<QName>> nodeAspects = getNodeAspectsCached(nodeIds);
// Nodes are always referenceable
for (Set<QName> aspects : nodeAspects.values())
{
aspects.add(ContentModel.ASPECT_REFERENCEABLE);
aspects.add(ContentModel.ASPECT_LOCALIZED);
}
return nodeAspects;
}
@Override
public boolean hasNodeAspect(Long nodeId, QName aspectQName)
{
@@ -2831,6 +3326,58 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
aspectsCache.setValue(nodeVersionKey, Collections.unmodifiableSet(aspects));
}
/**
* @return Returns a writable copy of the cached aspects mapped by node ID
*/
private Map<Long, Set<QName>> getNodeAspectsCached(List<Long> nodeIds)
{
List<Node> nodes = getNodesNotNull(nodeIds, false);
List<NodeVersionKey> nodeVersionKeys = new ArrayList<>(nodes.size());
for (Node node : nodes)
{
nodeVersionKeys.add(node.getNodeVersionKey());
}
List<Pair<NodeVersionKey, Set<QName>>> cacheEntries = aspectsCache.getByKeys(nodeVersionKeys);
for (Pair<NodeVersionKey, Set<QName>> cacheEntry : cacheEntries)
{
if (cacheEntry.getSecond() == null)
{
invalidateNodeCaches(cacheEntry.getFirst().getNodeId());
logger.info("Invalidating caches for node ID: " + cacheEntry.getFirst().getNodeId());
}
}
Map<Long, Set<QName>> result = new HashMap<>();
for (Pair<NodeVersionKey, Set<QName>> cacheEntry : cacheEntries)
{
result.put(cacheEntry.getFirst().getNodeId(), new HashSet<>(cacheEntry.getSecond()));
}
return result;
}
/**
* Update the node aspects cache. The incoming set will be wrapped to be unmodifiable.
*/
private void setNodeAspectsCached(Map<Long, Set<QName>> nodeAspects)
{
List<Long> nodeIds = nodeAspects.keySet().stream().toList();
List<NodeVersionKey> nodeVersionKeys = getNodesNotNull(nodeIds, false).stream()
.map(Node::getNodeVersionKey)
.collect(Collectors.toList());
// Should have mimimal impact
for (NodeVersionKey nodeVersionKey : nodeVersionKeys)
{
aspectsCache.setValue(nodeVersionKey, Collections.unmodifiableSet(nodeAspects.get(nodeVersionKey.getNodeId())));
}
}
/**
* Helper method to copy cache values from one key to another
*/
@@ -2882,6 +3429,28 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
// Done
return new Pair<NodeVersionKey, Set<QName>>(nodeVersionKey, Collections.unmodifiableSet(nodeAspectQNames));
}
/**
* Batch lookup is not supported
*
* @throws UnsupportedOperationException
*/
@Override
public List<Pair<NodeVersionKey, Set<QName>>> findByKeys(List<NodeVersionKey> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for node aspects.");
}
/**
* Batch lookup is not supported
*
* @throws UnsupportedOperationException
*/
@Override
public List<Pair<NodeVersionKey, Set<QName>>> findByValues(List<Set<QName>> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for node aspects.");
}
}
/* Node assocs */
@@ -2918,7 +3487,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
catch (Throwable e)
{
controlDAO.rollbackToSavepoint(savepoint);
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug(
"Failed to insert node association: \n" +
@@ -3182,7 +3751,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
}
// Done
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("Created child association: " + assoc);
}
@@ -3316,7 +3885,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
touchNode(childNodeId, null, null, false, false, true);
}
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug(
"Updated cm:name to parent assocs: \n" +
@@ -4065,7 +4634,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
Stack<Long> assocIdStack,
boolean primaryOnly) throws CyclicChildRelationshipException
{
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("\n" +
"Prepending paths: \n" +
@@ -4180,7 +4749,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
throw new CyclicChildRelationshipException("Node has been pasted into its own tree.", assocRef);
}
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("\n" +
" Prepending path parent: \n" +
@@ -4558,12 +5127,11 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
int foundCacheEntryCount = 0;
int missingCacheEntryCount = 0;
boolean forceBatch = false;
List<Long> batchLoadNodeIds = new ArrayList<Long>(nodeIds.size());
for (Long nodeId : nodeIds)
{
if (!forceBatch)
if (!forceBatching)
{
// Is this node in the cache?
if (nodesCache.getValue(nodeId) != null)
@@ -4578,7 +5146,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
if (foundCacheEntryCount + missingCacheEntryCount % 100 == 0)
{
// We force the batch if the number of hits drops below the number of misses
forceBatch = foundCacheEntryCount < missingCacheEntryCount;
forceBatching = foundCacheEntryCount < missingCacheEntryCount;
}
}
@@ -4672,7 +5240,6 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
StoreEntity store = getStoreNotNull(storeRef);
Long storeId = store.getId();
int batchSize = 256;
SortedSet<String> batch = new TreeSet<String>();
for (String uuid : uuids)
{
@@ -4690,12 +5257,12 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
{
List<Node> nodes = selectNodesByUuids(storeId, batch);
cacheNodesNoBatch(nodes);
logger.info("Batch size may be too small " + batch.size() + " nodes.");
}
}
private void cacheNodesBatch(List<Long> nodeIds)
{
int batchSize = 256;
SortedSet<Long> batch = new TreeSet<Long>();
for (Long nodeId : nodeIds)
{
@@ -4713,6 +5280,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
{
List<Node> nodes = selectNodesByIds(batch);
cacheNodesNoBatch(nodes);
logger.info("Batch size may be too small " + batch.size() + " nodes.");
}
}
@@ -4722,9 +5290,10 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
private void cacheNodesNoBatch(List<Node> nodes)
{
// Get the nodes
SortedSet<Long> aspectNodeIds = new TreeSet<Long>();
SortedSet<Long> propertiesNodeIds = new TreeSet<Long>();
Map<Long, NodeVersionKey> nodeVersionKeysFromCache = new HashMap<Long, NodeVersionKey>(nodes.size() * 2); // Keep for quick lookup
SortedSet<Long> aspectNodeIds = new TreeSet<>();
SortedSet<Long> propertiesNodeIds = new TreeSet<>();
SortedSet<Long> childAssocsNodeIds = new TreeSet<>();
Map<Long, NodeVersionKey> nodeVersionKeysFromCache = new HashMap<>(nodes.size() * 2); // Keep for quick lookup
for (Node node : nodes)
{
Long nodeId = node.getId();
@@ -4739,6 +5308,17 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
{
aspectNodeIds.add(nodeId);
}
// Only worth caching if we are able to then retrieve the correct version later from the cache
if (node.getTransaction() != null)
{
Pair<Long, String> cacheKey = new Pair<>(nodeId, node.getTransaction().getChangeTxnId());
if (parentAssocsCache.get(cacheKey) == null)
{
childAssocsNodeIds.add(nodeId);
}
}
nodeVersionKeysFromCache.put(nodeId, nodeVersionKey);
}
@@ -4749,22 +5329,40 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
}
Map<NodeVersionKey, Set<QName>> nodeAspects = selectNodeAspects(aspectNodeIds);
Map<Long, Set<QName>> aspectsMappedByNodeId = new HashMap<>(aspectNodeIds.size());
Map<Long, Set<QName>> nodesWithNoAspects = new HashMap<>(aspectNodeIds.size());
for (Map.Entry<NodeVersionKey, Set<QName>> entry : nodeAspects.entrySet())
{
NodeVersionKey nodeVersionKeyFromDb = entry.getKey();
Long nodeId = nodeVersionKeyFromDb.getNodeId();
Set<QName> qnames = entry.getValue();
setNodeAspectsCached(nodeId, qnames);
aspectNodeIds.remove(nodeId);
NodeVersionKey oldKey = entry.getKey();
Long newKey = oldKey.getNodeId();
Set<QName> value = entry.getValue();
aspectsMappedByNodeId.put(newKey, value);
// Remove the nodeIds from the original Set
aspectNodeIds.remove(newKey);
}
if (!aspectsMappedByNodeId.isEmpty())
{
setNodeAspectsCached(aspectsMappedByNodeId);
}
// Cache the absence of aspects too!
for (Long nodeId : aspectNodeIds)
{
setNodeAspectsCached(nodeId, Collections.<QName> emptySet());
nodesWithNoAspects.put(nodeId, Collections.<QName> emptySet());
}
if (!nodesWithNoAspects.isEmpty())
{
setNodeAspectsCached(nodesWithNoAspects);
}
// First ensure all content data are pre-cached, so we don't have to load them individually when converting properties
contentDataDAO.cacheContentDataForNodes(propertiesNodeIds);
if (!propertiesNodeIds.isEmpty())
{
contentDataDAO.cacheContentDataForNodes(propertiesNodeIds);
}
// Now bulk load the properties
Map<NodeVersionKey, Map<NodePropertyKey, NodePropertyValue>> propsByNodeId = selectNodeProperties(propertiesNodeIds);
@@ -4775,6 +5373,28 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
Map<QName, Serializable> props = nodePropertyHelper.convertToPublicProperties(propertyValues);
setNodePropertiesCached(nodeId, props);
}
// Bulk load the parent associations
List<ChildAssocEntity> assocs = selectParentAssocsOfChildren(childAssocsNodeIds);
for (ChildAssocEntity assoc : assocs)
{
Long nodeId = assoc.getChildNode().getId();
Node childNode = getNodeNotNull(nodeId, false);
boolean isRoot = hasNodeAspect(nodeId, ContentModel.ASPECT_ROOT);
boolean isStoreRoot = getNodeType(nodeId).equals(ContentModel.TYPE_STOREROOT);
if (childNode.getTransaction() == null)
{
// Should not happen - skip
logger.warn("Child node " + childNode + " has no transaction - cannot cache parent associations");
continue;
}
Pair<Long, String> cacheKey = new Pair<>(nodeId, childNode.getTransaction().getChangeTxnId());
ParentAssocsInfo value = new ParentAssocsInfo(isRoot, isStoreRoot, assoc);
parentAssocsCache.put(cacheKey, value);
}
}
/**
@@ -4943,6 +5563,8 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
protected abstract NodeEntity selectNodeById(Long id);
protected abstract List<NodeEntity> selectNodesByIds(List<Long> ids);
protected abstract NodeEntity selectNodeByNodeRef(NodeRef nodeRef);
protected abstract List<Node> selectNodesByUuids(Long storeId, SortedSet<String> uuids);
@@ -5095,6 +5717,8 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
protected abstract List<ChildAssocEntity> selectParentAssocs(Long childNodeId);
protected abstract List<ChildAssocEntity> selectParentAssocsOfChildren(Set<Long> childrenNodeIds);
/**
* No DB constraint, so multiple returned
*/

View File

@@ -148,6 +148,15 @@ public interface NodeDAO extends NodeBulkLoader
*/
public boolean exists(NodeRef nodeRef);
/**
* Find out if a node exists. Unpurged deleted nodes do not count as they are the DAO's concern only.
*
* @param nodeRefs
* the potentially valid node reference
* @return Returns <tt>true</tt> if the node is present and undeleted
*/
List<NodeRef> exists(List<NodeRef> nodeRefs);
/**
* Find out if a node exists. Unpurged deleted nodes do not count as they are the DAO's concern only.
*
@@ -184,8 +193,12 @@ public interface NodeDAO extends NodeBulkLoader
public Pair<Long, NodeRef> getNodePair(NodeRef nodeRef);
List<Pair<Long, NodeRef>> getNodePairs(StoreRef storeRef, List<NodeRef> nodeRefs);
public Pair<Long, NodeRef> getNodePair(Long nodeId);
List<Pair<Long, NodeRef>> getNodePairs(List<Long> nodeIds);
public QName getNodeType(Long nodeId);
public Long getNodeAclId(Long nodeId);
@@ -367,6 +380,8 @@ public interface NodeDAO extends NodeBulkLoader
public Map<QName, Serializable> getNodeProperties(Long nodeId);
Map<Long, Map<QName, Serializable>> getNodeProperties(List<Long> nodeIds);
public boolean setNodeProperties(Long nodeId, Map<QName, Serializable> properties);
public boolean addNodeProperty(Long nodeId, QName qname, Serializable value);
@@ -404,6 +419,8 @@ public interface NodeDAO extends NodeBulkLoader
public Set<QName> getNodeAspects(Long nodeId);
Map<Long, Set<QName>> getNodeAspects(List<Long> nodeIds);
public boolean hasNodeAspect(Long nodeId, QName aspectQName);
public boolean addNodeAspects(Long nodeId, Set<QName> aspectQNames);

View File

@@ -143,6 +143,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
private static final String SELECT_ASSOCS_NOT_LINKED_BY_TWO_OTHER_ASSOCS = "alfresco.node.select_AssocsNotLinkedByTwoOtherAssocs";
private static final String SELECT_CHILD_ASSOCS_OF_PARENT_WITHOUT_NODE_ASSOCS_OF_TYPE = "alfresco.node.select_ChildAssocsOfParentWithoutNodeAssocsOfType";
private static final String SELECT_PARENT_ASSOCS_OF_CHILD = "alfresco.node.select_ParentAssocsOfChild";
private static final String SELECT_PARENT_ASSOCS_OF_CHILDREN = "alfresco.node.select_ParentAssocsOfChildren";
private static final String UPDATE_PARENT_ASSOCS_OF_CHILD = "alfresco.node.update_ParentAssocsOfChild";
private static final String DELETE_SUBSCRIPTIONS = "alfresco.node.delete_NodeSubscriptions";
@@ -419,6 +420,19 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
return template.selectOne(SELECT_NODE_BY_ID, node);
}
@Override
protected List<NodeEntity> selectNodesByIds(List<Long> ids)
{
List<NodeEntity> nodes = new ArrayList<>();
ids.forEach(id -> {
NodeEntity node = new NodeEntity();
node.setId(id);
nodes.add(node);
});
return template.selectList(SELECT_NODES_BY_IDS, nodes);
}
@Override
protected NodeEntity selectNodeByNodeRef(NodeRef nodeRef)
{
@@ -1527,6 +1541,19 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
return template.selectList(SELECT_PARENT_ASSOCS_OF_CHILD, assoc);
}
@Override
protected List<ChildAssocEntity> selectParentAssocsOfChildren(Set<Long> childrenNodeIds)
{
if (childrenNodeIds.isEmpty())
{
// There will be no results
return Collections.emptyList();
}
IdsEntity idsEntity = new IdsEntity();
idsEntity.setIds(new ArrayList<>(childrenNodeIds));
return template.selectList(SELECT_PARENT_ASSOCS_OF_CHILDREN, idsEntity);
}
@Override
protected int updatePrimaryParentAssocs(
Long childNodeId,
@@ -1575,7 +1602,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
int countTA = template.update(UPDATE_MOVE_TARGET_ASSOCS, params);
int countP = template.update(UPDATE_MOVE_PROPERTIES, params);
int countA = template.update(UPDATE_MOVE_ASPECTS, params);
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug(
"Moved node data: \n" +
@@ -1850,7 +1877,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
Iterator<Long> nodeIdIterator = this.selectDeletedNodesByCommitTime(maxCommitTime);
ArrayList<Long> nodeIdList = new ArrayList<>();
List<String> deleteResult = new ArrayList<>();
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("nodes selected for deletion, deleteBatchSize:" + deleteBatchSize);
}
@@ -1859,7 +1886,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
if (deleteBatchSize == nodeIdList.size())
{
int count = deleteSelectedNodesAndProperties(nodeIdList);
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("nodes deleted:" + count);
}
@@ -1874,7 +1901,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
if (nodeIdList.size() > 0)
{
int count = deleteSelectedNodesAndProperties(nodeIdList);
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("remaining nodes deleted:" + count);
}
@@ -1890,7 +1917,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
Iterator<Long> transactionIdIterator = this.selectUnusedTransactionsByCommitTime(maxCommitTime);
ArrayList<Long> transactionIdList = new ArrayList<>();
List<String> deleteResult = new ArrayList<>();
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("transactions selected for deletion, deleteBatchSize:" + deleteBatchSize);
}
@@ -1900,7 +1927,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
{
int count = deleteSelectedTransactions(transactionIdList);
deleteResult.add("Purged old transactions: " + count);
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("transactions deleted:" + count);
}
@@ -1916,7 +1943,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
{
int count = deleteSelectedTransactions(transactionIdList);
deleteResult.add("Purged old transactions: " + count);
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("final batch of transactions deleted:" + count);
}
@@ -1928,13 +1955,13 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
private int deleteSelectedNodesAndProperties(List<Long> nodeIdList)
{
int cnt = template.delete(DELETE_NODE_PROPS_BY_NODE_ID, nodeIdList);
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("nodes props deleted:" + cnt);
}
// Finally, remove the nodes
cnt = template.delete(DELETE_NODES_BY_ID, nodeIdList);
if (isDebugEnabled)
if (logger.isDebugEnabled())
{
logger.debug("nodes deleted:" + cnt);
}

View File

@@ -310,6 +310,12 @@ public abstract class AbstractAclCrudDAOImpl implements AclCrudDAO
return null;
}
@Override
public List<Serializable> getValueKeys(List<AclEntity> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for ACLs.");
}
public Pair<Long, AclEntity> createValue(AclEntity value)
{
AclEntity entity = createAclEntity(value);
@@ -322,6 +328,12 @@ public abstract class AbstractAclCrudDAOImpl implements AclCrudDAO
return convertEntityToPair(entity);
}
@Override
public List<Pair<Long, AclEntity>> findByKeys(List<Long> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for ACLs.");
}
public Pair<Long, AclEntity> findByValue(AclEntity value)
{
if ((value != null) && (value.getId() != null))
@@ -331,6 +343,12 @@ public abstract class AbstractAclCrudDAOImpl implements AclCrudDAO
return null;
}
@Override
public List<Pair<Long, AclEntity>> findByValues(List<AclEntity> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for ACLs.");
}
public int updateValue(Long key, AclEntity value)
{
return updateAclEntity(value);
@@ -816,6 +834,12 @@ public abstract class AbstractAclCrudDAOImpl implements AclCrudDAO
return value;
}
@Override
public List<PermissionEntity> getValueKeys(List<PermissionEntity> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for permissions.");
}
public Pair<Long, PermissionEntity> createValue(PermissionEntity value)
{
PermissionEntity entity = createPermissionEntity(value);
@@ -828,6 +852,12 @@ public abstract class AbstractAclCrudDAOImpl implements AclCrudDAO
return convertEntityToPair(entity);
}
@Override
public List<Pair<Long, PermissionEntity>> findByKeys(List<Long> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for permissions.");
}
public Pair<Long, PermissionEntity> findByValue(PermissionEntity value)
{
if ((value == null) || (value.getName() == null) || (value.getTypeQNameId() == null))
@@ -837,6 +867,12 @@ public abstract class AbstractAclCrudDAOImpl implements AclCrudDAO
return convertEntityToPair(getPermissionEntity(value.getTypeQNameId(), value.getName()));
}
@Override
public List<Pair<Long, PermissionEntity>> findByValues(List<PermissionEntity> values)
{
throw new UnsupportedOperationException("Batch loading not supported for PermissionEntity");
}
public int updateValue(Long key, PermissionEntity value)
{
return updatePermissionEntity(value);
@@ -999,6 +1035,12 @@ public abstract class AbstractAclCrudDAOImpl implements AclCrudDAO
return value.getAuthority();
}
@Override
public List<String> getValueKeys(List<AuthorityEntity> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for authorities.");
}
public Pair<Long, AuthorityEntity> createValue(AuthorityEntity value)
{
AuthorityEntity entity = createAuthorityEntity(value);
@@ -1011,6 +1053,12 @@ public abstract class AbstractAclCrudDAOImpl implements AclCrudDAO
return convertEntityToPair(entity);
}
@Override
public List<Pair<Long, AuthorityEntity>> findByKeys(List<Long> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for authorities.");
}
public Pair<Long, AuthorityEntity> findByValue(AuthorityEntity value)
{
if ((value == null) || (value.getAuthority() == null))
@@ -1020,6 +1068,12 @@ public abstract class AbstractAclCrudDAOImpl implements AclCrudDAO
return convertEntityToPair(getAuthorityEntity(value.getAuthority()));
}
@Override
public List<Pair<Long, AuthorityEntity>> findByValues(List<AuthorityEntity> values)
{
throw new UnsupportedOperationException("Batch loading not supported for AuthorityEntity");
}
public int updateValue(Long key, AuthorityEntity value)
{
return updateAuthorityEntity(value);

View File

@@ -373,11 +373,23 @@ public abstract class AbstractPropertyValueDAOImpl implements PropertyValueDAO
return convertEntityToPair(entity);
}
@Override
public List<Pair<Long, Class<?>>> findByKeys(List<Long> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for property classes.");
}
public Pair<Long, Class<?>> findByValue(Class<?> value)
{
PropertyClassEntity entity = findClassByValue(value);
return convertEntityToPair(entity);
}
@Override
public List<Pair<Long, Class<?>>> findByValues(List<Class<?>> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for property classes.");
}
}
protected abstract PropertyClassEntity findClassById(Long id);
@@ -465,11 +477,23 @@ public abstract class AbstractPropertyValueDAOImpl implements PropertyValueDAO
return convertEntityToPair(entity);
}
@Override
public List<Pair<Long, Date>> findByKeys(List<Long> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for property date values.");
}
public Pair<Long, Date> findByValue(Date value)
{
PropertyDateValueEntity entity = findDateValueByValue(value);
return convertEntityToPair(entity);
}
@Override
public List<Pair<Long, Date>> findByValues(List<Date> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for property date values.");
}
}
protected abstract PropertyDateValueEntity findDateValueById(Long id);
@@ -566,6 +590,12 @@ public abstract class AbstractPropertyValueDAOImpl implements PropertyValueDAO
}
}
@Override
public List<Pair<Long, String>> findByKeys(List<Long> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for property string values.");
}
public Pair<Long, String> findByValue(String value)
{
Long key = findStringValueByValue(value);
@@ -578,6 +608,12 @@ public abstract class AbstractPropertyValueDAOImpl implements PropertyValueDAO
return new Pair<Long, String>(key, value);
}
}
@Override
public List<Pair<Long, String>> findByValues(List<String> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for property string values.");
}
}
protected abstract String findStringValueById(Long id);
@@ -658,11 +694,23 @@ public abstract class AbstractPropertyValueDAOImpl implements PropertyValueDAO
return convertEntityToPair(entity);
}
@Override
public List<Pair<Long, Double>> findByKeys(List<Long> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for property double values.");
}
public Pair<Long, Double> findByValue(Double value)
{
PropertyDoubleValueEntity entity = findDoubleValueByValue(value);
return convertEntityToPair(entity);
}
@Override
public List<Pair<Long, Double>> findByValues(List<Double> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for property double values.");
}
}
protected abstract PropertyDoubleValueEntity findDoubleValueById(Long id);
@@ -727,6 +775,18 @@ public abstract class AbstractPropertyValueDAOImpl implements PropertyValueDAO
PropertySerializableValueEntity entity = findSerializableValueById(key);
return convertEntityToPair(entity);
}
@Override
public List<Pair<Long, Serializable>> findByKeys(List<Long> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for property serializable values.");
}
@Override
public List<Pair<Long, Serializable>> findByValues(List<Serializable> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for property serializable values.");
}
}
protected abstract PropertySerializableValueEntity findSerializableValueById(Long id);
@@ -833,12 +893,24 @@ public abstract class AbstractPropertyValueDAOImpl implements PropertyValueDAO
return convertEntityToPair(entity);
}
@Override
public List<Pair<Long, Serializable>> findByKeys(List<Long> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for property values.");
}
public Pair<Long, Serializable> findByValue(Serializable value)
{
PropertyValueEntity entity = findPropertyValueByValue(value);
return convertEntityToPair(entity);
}
@Override
public List<Pair<Long, Serializable>> findByValues(List<Serializable> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for property values.");
}
/**
* No-op. This is implemented as we just want to update the cache.
*
@@ -937,6 +1009,18 @@ public abstract class AbstractPropertyValueDAOImpl implements PropertyValueDAO
return new Pair<Long, Serializable>(key, value);
}
@Override
public List<Pair<Long, Serializable>> findByKeys(List<Long> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for properties.");
}
@Override
public List<Pair<Long, Serializable>> findByValues(List<Serializable> values)
{
throw new UnsupportedOperationException("Lookup by value not supported for properties.");
}
/**
* Updates a property. The <b>alf_prop_root</b> entity is updated to ensure concurrent modification is detected.
*

View File

@@ -27,6 +27,7 @@ package org.alfresco.repo.domain.qname;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -191,6 +192,12 @@ public abstract class AbstractQNameDAOImpl implements QNameDAO
}
}
@Override
public List<Pair<Long, String>> findByKeys(List<Long> ids)
{
throw new UnsupportedOperationException("Batch lookup not supported for namespaces.");
}
@Override
public Pair<Long, String> findByValue(String uri)
{
@@ -205,6 +212,12 @@ public abstract class AbstractQNameDAOImpl implements QNameDAO
}
}
@Override
public List<Pair<Long, String>> findByValues(List<String> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for namespaces.");
}
public Pair<Long, String> createValue(String uri)
{
NamespaceEntity entity = createNamespaceEntity(uri);
@@ -351,6 +364,12 @@ public abstract class AbstractQNameDAOImpl implements QNameDAO
}
}
@Override
public List<Pair<Long, QName>> findByKeys(List<Long> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for QNames.");
}
@Override
public Pair<Long, QName> findByValue(QName qname)
{
@@ -374,6 +393,12 @@ public abstract class AbstractQNameDAOImpl implements QNameDAO
}
}
@Override
public List<Pair<Long, QName>> findByValues(List<QName> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for QNames.");
}
public Pair<Long, QName> createValue(QName qname)
{
String uri = qname.getNamespaceURI();

View File

@@ -210,6 +210,12 @@ public abstract class AbstractTenantAdminDAOImpl implements TenantAdminDAO
return null;
}
@Override
public List<Serializable> getValueKeys(List<TenantEntity> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for tenants.");
}
@Override
public Pair<String, TenantEntity> createValue(TenantEntity value)
{
@@ -224,6 +230,12 @@ public abstract class AbstractTenantAdminDAOImpl implements TenantAdminDAO
return convertEntityToPair(entity);
}
@Override
public List<Pair<String, TenantEntity>> findByKeys(List<String> keys)
{
throw new UnsupportedOperationException("Batch lookup not supported for tenants.");
}
@Override
public Pair<String, TenantEntity> findByValue(TenantEntity value)
{
@@ -234,6 +246,12 @@ public abstract class AbstractTenantAdminDAOImpl implements TenantAdminDAO
return convertEntityToPair(getTenantEntity(value.getTenantDomain()));
}
@Override
public List<Pair<String, TenantEntity>> findByValues(List<TenantEntity> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for tenants.");
}
@Override
public int updateValue(String tenantDomain, TenantEntity value)
{

View File

@@ -38,6 +38,7 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -192,6 +193,27 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl implements Extens
return unchecked;
}
/**
* Performs a null-safe get of a list of nodes
*
* @param nodeRefs
* the nodes to retrieve
* @return Returns a map of node entities (never null)
* @throws InvalidNodeRefException
* if none of the referenced nodes could not be found
*/
private List<Pair<Long, NodeRef>> getNodePairsNotNull(List<NodeRef> nodeRefs) throws InvalidNodeRefException
{
ParameterCheck.mandatory("nodeRefs", nodeRefs);
List<Pair<Long, NodeRef>> unchecked = nodeDAO.getNodePairs(nodeRefs.get(0).getStoreRef(), nodeRefs);
if (unchecked.isEmpty())
{
throw new InvalidNodeRefException("Nodes do not exist: " + nodeRefs, null);
}
return unchecked;
}
@Extend(traitAPI = NodeServiceTrait.class, extensionAPI = NodeServiceExtension.class)
public boolean exists(StoreRef storeRef)
{
@@ -205,6 +227,14 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl implements Extens
return nodeDAO.exists(nodeRef);
}
@Extend(traitAPI = NodeServiceTrait.class, extensionAPI = NodeServiceExtension.class)
@Override
public List<NodeRef> exists(List<NodeRef> nodeRefs)
{
ParameterCheck.mandatory("nodeRefs", nodeRefs);
return nodeDAO.exists(nodeRefs);
}
@Extend(traitAPI = NodeServiceTrait.class, extensionAPI = NodeServiceExtension.class)
public Status getNodeStatus(NodeRef nodeRef)
{
@@ -221,6 +251,24 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl implements Extens
return nodePair == null ? null : nodePair.getSecond();
}
@Override
@Extend(traitAPI = NodeServiceTrait.class, extensionAPI = NodeServiceExtension.class)
public List<NodeRef> getNodeRefs(List<Long> nodeIds)
{
List<NodeRef> nodeRefs = new ArrayList<>(nodeIds.size());
List<Pair<Long, NodeRef>> nodePairs = nodeDAO.getNodePairs(nodeIds);
for (Pair<Long, NodeRef> nodePair : nodePairs)
{
// It should not be null but just in case...
if (nodePair != null)
{
nodeRefs.add(nodePair.getSecond());
}
}
return nodeRefs;
}
/**
* {@inheritDoc}
*/
@@ -1031,6 +1079,44 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl implements Extens
return aspectQNames;
}
@Extend(traitAPI = NodeServiceTrait.class, extensionAPI = NodeServiceExtension.class)
@Override
public Map<NodeRef, Set<QName>> getAspects(List<NodeRef> nodeRefs) throws InvalidNodeRefException
{
List<Pair<Long, NodeRef>> nodePairs = getNodePairsNotNull(nodeRefs);
Map<Long, Set<QName>> aspectQNames = nodeDAO.getNodeAspects(getNodeIdsFromList(nodePairs));
for (Pair<Long, NodeRef> nodePair : nodePairs)
{
if (aspectQNames.containsKey(nodePair.getFirst()) && isPendingDelete(nodePair.getSecond()))
{
aspectQNames.computeIfPresent(nodePair.getFirst(), (k, v) -> {
v.add(ContentModel.ASPECT_PENDING_DELETE);
return v;
});
}
}
// Map back to NodeRef. Ignore nodes with no aspects.
Map<NodeRef, Set<QName>> results = nodePairs.stream()
.filter(pair -> aspectQNames.containsKey(pair.getFirst()))
.collect(Collectors.toMap(
Pair::getSecond,
pair -> aspectQNames.getOrDefault(pair.getFirst(), Collections.emptySet())));
return results;
}
private List<Long> getNodeIdsFromList(List<Pair<Long, NodeRef>> nodeRefPairs)
{
List<Long> nodeIds = new ArrayList<>(nodeRefPairs.size());
for (Pair<Long, NodeRef> nodePair : nodeRefPairs)
{
nodeIds.add(nodePair.getFirst());
}
return nodeIds;
}
/**
* @return Returns <tt>true</tt> if the node is being deleted
*
@@ -1550,6 +1636,60 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl implements Extens
return nodeProperties;
}
/**
* Gets, converts and adds the intrinsic properties for the current list of node's properties
*/
private Map<Long, Map<QName, Serializable>> getPropertiesImpl(List<Pair<Long, NodeRef>> nodePairs) throws InvalidNodeRefException
{
List<Long> nodeIds = new ArrayList<>(nodePairs.size());
for (Pair<Long, NodeRef> nodePair : nodePairs)
{
nodeIds.add(nodePair.getFirst());
}
Map<Long, Map<QName, Serializable>> nodeProperties = nodeDAO.getNodeProperties(nodeIds);
// done
return nodeProperties;
}
@Extend(traitAPI = NodeServiceTrait.class, extensionAPI = NodeServiceExtension.class)
@Override
public Map<NodeRef, Map<QName, Serializable>> getPropertiesForNodeRefs(List<NodeRef> nodeRefs) throws InvalidNodeRefException
{
if (nodeRefs == null || nodeRefs.isEmpty())
{
return Collections.emptyMap();
}
// make a copy so we can modify if needed
nodeRefs = new ArrayList<>(nodeRefs);
// check permissions per node (since we can't do this in config). Remove nodes that the user has no permission to see
nodeRefs.removeIf(nodeRef -> permissionService.hasPermission(nodeRef, PermissionService.READ_PROPERTIES) != AccessStatus.ALLOWED);
// if no nodes left, return empty map
if (nodeRefs.isEmpty())
{
return Collections.emptyMap();
}
StoreRef storeRef = nodeRefs.get(0).getStoreRef();
List<Pair<Long, NodeRef>> nodePairs = nodeDAO.getNodePairs(storeRef, nodeRefs);
Map<Long, Map<QName, Serializable>> propertiesMappedById = getPropertiesImpl(nodePairs);
Map<NodeRef, Map<QName, Serializable>> propertiesMappedByNodeRef = new HashMap<>(nodePairs.size());
for (Pair<Long, NodeRef> nodePair : nodePairs)
{
Long nodeId = nodePair.getFirst();
NodeRef nodeRef = nodePair.getSecond();
propertiesMappedByNodeRef.put(nodeRef, propertiesMappedById.get(nodeId));
}
return propertiesMappedByNodeRef;
}
@Extend(traitAPI = NodeServiceTrait.class, extensionAPI = NodeServiceExtension.class)
public Long getNodeAclId(NodeRef nodeRef) throws InvalidNodeRefException
{

View File

@@ -384,7 +384,7 @@ public class DBQueryEngine implements QueryEngine
addStoreInfo(node);
boolean shouldCache = shouldCache(options, nodes, requiredNodes);
if (shouldCache)
if (shouldCache && !options.isBulkFetchEnabled())
{
logger.debug("- selected node " + nodes.size() + ": " + node.getUuid() + " " + node.getId());
nodesCache.setValue(node.getId(), node);
@@ -433,7 +433,29 @@ public class DBQueryEngine implements QueryEngine
FilteringResultSet frs = new FilteringResultSet(rs, formInclusionMask(nodes));
frs.setResultSetMetaData(new SimpleResultSetMetaData(LimitBy.UNLIMITED, PermissionEvaluationMode.EAGER, rs.getResultSetMetaData().getSearchParameters()));
logger.debug("- query is completed, " + nodes.size() + " nodes loaded");
// Bulk Load
if (rs.getResultSetMetaData().getSearchParameters().isBulkFetchEnabled())
{
List<Long> rawDbids = new ArrayList<>();
for (Node node : nodes)
{
if (node != null)
{
rawDbids.add(node.getId());
}
}
if (logger.isDebugEnabled())
{
logger.debug("- bulk loading node data: " + rawDbids.size());
}
nodeDAO.cacheNodesById(rawDbids);
}
if (logger.isDebugEnabled())
{
logger.debug("- query is completed, " + nodes.size() + " nodes loaded");
}
return frs;
}
@@ -565,11 +587,23 @@ public class DBQueryEngine implements QueryEngine
return null;
}
@Override
public List<Pair<Long, Node>> findByKeys(List<Long> nodeIds)
{
throw new UnsupportedOperationException("Batch lookup not supported for Nodes.");
}
@Override
public NodeRef getValueKey(Node value)
{
return value.getNodeRef();
}
@Override
public List<Pair<Long, Node>> findByValues(List<Node> values)
{
throw new UnsupportedOperationException("Batch lookup not supported for Nodes.");
}
}
private void addStoreInfo(Node node)

View File

@@ -34,6 +34,7 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -174,6 +175,20 @@ public class NodeServiceImpl implements NodeService, VersionModel
return dbNodeService.exists(VersionUtil.convertNodeRef(nodeRef));
}
/**
* Delegates to the <code>NodeService</code> used as the version store implementation
*/
@Override
public List<NodeRef> exists(List<NodeRef> nodeRefs)
{
List<NodeRef> convertedNodeRefs = new ArrayList<>(nodeRefs.size());
for (NodeRef nodeRef : nodeRefs)
{
convertedNodeRefs.add(VersionUtil.convertNodeRef(nodeRef));
}
return dbNodeService.exists(convertedNodeRefs);
}
/**
* Delegates to the <code>NodeService</code> used as the version store implementation
*/
@@ -191,6 +206,15 @@ public class NodeServiceImpl implements NodeService, VersionModel
return dbNodeService.getNodeRef(nodeId);
}
/**
* Delegates to the <code>NodeService</code> used as the version store implementation
*/
@Override
public List<NodeRef> getNodeRefs(List<Long> nodeIds)
{
return dbNodeService.getNodeRefs(nodeIds);
}
/**
* Delegates to the <code>NodeService</code> used as the version store implementation
*/
@@ -386,6 +410,33 @@ public class NodeServiceImpl implements NodeService, VersionModel
(ArrayList<QName>) this.dbNodeService.getProperty(VersionUtil.convertNodeRef(nodeRef), PROP_QNAME_FROZEN_ASPECTS));
}
/**
* Translation for version store
*/
@SuppressWarnings({"unchecked", "deprecation"})
@Override
public Map<NodeRef, Set<QName>> getAspects(List<NodeRef> nodeRefs) throws InvalidNodeRefException
{
Map<NodeRef, Set<QName>> result = new HashMap<>();
List<NodeRef> convertedNodeRefs = nodeRefs.stream()
.map(VersionUtil::convertNodeRef)
.collect(Collectors.toList());
for (NodeRef nodeRef : convertedNodeRefs)
{
if (!this.dbNodeService.exists(nodeRef))
{
throw new InvalidNodeRefException("Node does not exist: " + nodeRef, nodeRef);
}
Serializable aspect = this.dbNodeService.getProperty(nodeRef, PROP_QNAME_FROZEN_ASPECTS);
result.put(nodeRef, new HashSet<>((ArrayList<QName>) aspect));
}
return result;
}
/**
* Property translation for version store
*/
@@ -450,6 +501,19 @@ public class NodeServiceImpl implements NodeService, VersionModel
return result;
}
@Override
public Map<NodeRef, Map<QName, Serializable>> getPropertiesForNodeRefs(List<NodeRef> nodeRefs) throws InvalidNodeRefException
{
if (nodeRefs == null || nodeRefs.isEmpty())
{
return Collections.emptyMap();
}
List<NodeRef> convertedNodeRefs = nodeRefs.stream().map(VersionUtil::convertNodeRef).collect(Collectors.toList());
return this.dbNodeService.getPropertiesForNodeRefs(convertedNodeRefs);
}
/**
* Property translation for version store
*/

View File

@@ -33,6 +33,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
@@ -167,6 +168,22 @@ public class VirtualNodeServiceExtension extends VirtualSpringBeanExtension<Node
}
}
@Override
public Map<NodeRef, Map<QName, Serializable>> getPropertiesForNodeRefs(List<NodeRef> nodeRefs)
{
Map<NodeRef, Map<QName, Serializable>> result = new HashMap<>();
for (NodeRef nodeRef : nodeRefs)
{
Reference reference = Reference.fromNodeRef(nodeRef);
if (reference != null)
{
result.put(nodeRef, getVirtualProperties(reference));
}
}
return result;
}
@Override
public Serializable getProperty(NodeRef nodeRef, QName qname)
{
@@ -200,6 +217,28 @@ public class VirtualNodeServiceExtension extends VirtualSpringBeanExtension<Node
}
}
@Override
public Map<NodeRef, Set<QName>> getAspects(List<NodeRef> nodeRefs)
{
NodeServiceTrait theTrait = getTrait();
Map<NodeRef, Set<QName>> aspectsMap = new HashMap<>();
for (NodeRef nodeRef : nodeRefs)
{
Reference reference = Reference.fromNodeRef(nodeRef);
if (reference != null)
{
GetAspectsMethod method = new GetAspectsMethod(theTrait,
environment);
aspectsMap.put(nodeRef, reference.execute(method));
}
else
{
aspectsMap.put(nodeRef, theTrait.getAspects(nodeRef));
}
}
return aspectsMap;
}
@Override
public Path getPath(NodeRef nodeRef)
{
@@ -248,6 +287,33 @@ public class VirtualNodeServiceExtension extends VirtualSpringBeanExtension<Node
}
}
@Override
public List<NodeRef> exists(List<NodeRef> nodeRefs)
{
List<NodeRef> existingRefs = new ArrayList<>();
List<NodeRef> nonExistingRefs = new ArrayList<>();
for (NodeRef nodeRef : nodeRefs)
{
Reference reference = Reference.fromNodeRef(nodeRef);
if (reference != null)
{
// For now references last forever (i.e. there is no expiration
// mechanism )
existingRefs.add(nodeRef);
}
else
{
nonExistingRefs.add(nodeRef);
}
}
// Now check the rest with the actual node service
List<NodeRef> allExistingRefs = getTrait().exists(nonExistingRefs);
existingRefs.addAll(allExistingRefs);
return existingRefs;
}
@Override
public ChildAssociationRef createNode(NodeRef parentRef, QName assocTypeQName, QName assocQName,
QName nodeTypeQName)
@@ -1210,6 +1276,12 @@ public class VirtualNodeServiceExtension extends VirtualSpringBeanExtension<Node
return getTrait().getNodeRef(nodeId);
}
@Override
public List<NodeRef> getNodeRefs(List<Long> nodeIds)
{
return getTrait().getNodeRefs(nodeIds);
}
@Override
public NodeRef getRootNode(StoreRef storeRef) throws InvalidStoreRefException
{

View File

@@ -140,6 +140,8 @@
<property name="parentAssocsCacheLimitFactor" value="${system.cache.parentAssocs.limitFactor}"/>
<property name="childByNameCache" ref="node.childByNameCache"/>
<property name="cachingThreshold" value="${nodes.bulkLoad.cachingThreshold}"/>
<property name="batchSize" value="${nodes.bulkLoad.batchSize:256}"/>
<property name="forceBatching" value="${nodes.bulkLoad.forceBatching:false}"/>
</bean>
<bean id="nodeDAO.org.alfresco.repo.domain.dialect.Dialect" class="org.alfresco.repo.domain.node.ibatis.NodeDAOImpl" parent="nodeDAObase" />

View File

@@ -235,6 +235,31 @@
u.id = #{id}
</select>
<!-- Get the content URL entities by IDs -->
<select id="select_ContentUrlsByIds" parameterType="list" resultMap="result_ContentUrl">
select
u.id as id,
u.content_url as content_url,
u.content_url_short as content_url_short,
u.content_url_crc as content_url_crc,
u.content_size as content_size,
u.orphan_time as orphan_time,
ce.algorithm as algorithm,
ce.key_size as key_size,
ce.encrypted_key as encrypted_key,
ce.master_keystore_id as master_keystore_id,
ce.master_key_alias as master_key_alias,
ce.unencrypted_file_size as unencrypted_file_size
from
alf_content_url u
left join alf_content_url_encryption ce on (u.id = ce.content_url_id)
where
u.id in
<foreach item="item" index="index" collection="list" open="(" separator="," close=")">
#{item}
</foreach>
</select>
<!-- Get the content URL entity by unique key -->
<select id="select_ContentUrlByKey" parameterType="ContentUrl" resultMap="result_ContentUrl">
select

View File

@@ -762,13 +762,15 @@
alf_node node
join alf_node_aspects aspects on (aspects.node_id = node.id)
<where>
<if test="nodeId != null">aspects.node_id = #{nodeId}</if>
<if test="nodeIds != null">
and aspects.node_id in
<foreach item="item" index="index" collection="nodeIds" open="(" separator="," close=")">
#{item}
</foreach>
</if>
<choose>
<when test="nodeId != null">aspects.node_id = #{nodeId}</when>
<when test="nodeIds != null">
aspects.node_id in
<foreach item="item" index="index" collection="nodeIds" open="(" separator="," close=")">
#{item}
</foreach>
</when>
</choose>
</where>
</select>
@@ -1213,6 +1215,17 @@
<if test="isPrimary != null">and assoc.is_primary = #{isPrimary}</if>
</select>
<select id="select_ParentAssocsOfChildren" parameterType="ChildAssoc" resultMap="result_ChildAssocTxnId">
<include refid="alfresco.node.select_ChildAssoc_Results"/>
<include refid="alfresco.node.select_ChildAssoc_FromSimple"/>
where
childNode.id in
<foreach item="item" index="index" collection="ids" open="(" separator="," close=")">
#{item}
</foreach>
and assoc.is_primary = true
</select>
<select id="select_NodeMinId" resultType="java.lang.Long">
select
min(id)

View File

@@ -425,6 +425,7 @@
org.alfresco.service.cmr.repository.NodeService.removeSeconaryChildAssociation=ACL_PARENT.0.sys:base.DeleteChildren
org.alfresco.service.cmr.repository.NodeService.removeSecondaryChildAssociation=ACL_PARENT.0.sys:base.DeleteChildren
org.alfresco.service.cmr.repository.NodeService.getProperties=ACL_NODE.0.sys:base.ReadProperties
org.alfresco.service.cmr.repository.NodeService.getPropertiesForNodeRefs=ROLE_AUTHENTICATED
org.alfresco.service.cmr.repository.NodeService.getProperty=ACL_NODE.0.sys:base.ReadProperties
org.alfresco.service.cmr.repository.NodeService.setProperties=ACL_NODE.0.sys:base.WriteProperties,ACL_ITEM.0.cm:ownable.TakeOwnership
org.alfresco.service.cmr.repository.NodeService.addProperties=ACL_NODE.0.sys:base.WriteProperties,ACL_ITEM.0.cm:ownable.TakeOwnership

View File

@@ -918,6 +918,8 @@ mail.service.corePoolSize=8
mail.service.maximumPoolSize=20
nodes.bulkLoad.cachingThreshold=10
nodes.bulkLoad.batchSize=256
nodes.bulkLoad.forceBatching=false
# Multi-Tenancy

View File

@@ -28,6 +28,8 @@ package org.alfresco.repo.cache.lookup;
import static org.junit.Assert.*;
import java.sql.Savepoint;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
@@ -294,6 +296,23 @@ public class EntityLookupCacheTest implements EntityLookupCallbackDAO<Long, Obje
assertNull(entityPairCacheCheck);
}
public void testFindByValuesNotFound()
{
// Put some values in the "database"
createValue(new TestValue("AAA"));
createValue(new TestValue("BBB"));
createValue(new TestValue("CCC"));
List<Object> valuesToFind = new ArrayList<>(3);
valuesToFind.add(new TestValue("ZZZ"));
valuesToFind.add(new TestValue("AAA"));
valuesToFind.add(new TestValue("BBB"));
List<Pair<Long, Object>> results = findByValues(valuesToFind);
assertNotNull(results);
assertEquals(2, results.size());
}
/**
* Helper class to represent business object
*/
@@ -332,6 +351,17 @@ public class EntityLookupCacheTest implements EntityLookupCallbackDAO<Long, Obje
return dbValue;
}
@Override
public List<String> getValueKeys(List<Object> values)
{
List<String> keys = new ArrayList<>(values.size());
for (Object value : values)
{
keys.add(getValueKey(value));
}
return keys;
}
public Pair<Long, Object> findByKey(Long key)
{
assertNotNull(key);
@@ -346,6 +376,12 @@ public class EntityLookupCacheTest implements EntityLookupCallbackDAO<Long, Obje
return new Pair<Long, Object>(key, value);
}
@Override
public List<Pair<Long, Object>> findByKeys(List<Long> key)
{
throw new UnsupportedOperationException("Batch lookup not supported in test DAO.");
}
public Pair<Long, Object> findByValue(Object value)
{
assertTrue(value == null || value instanceof TestValue);
@@ -361,6 +397,31 @@ public class EntityLookupCacheTest implements EntityLookupCallbackDAO<Long, Obje
return null;
}
@Override
public List<Pair<Long, Object>> findByValues(List<Object> values)
{
assertNotNull(values);
assertFalse(values.isEmpty());
List<Pair<Long, Object>> results = new ArrayList<>(values.size());
for (Object value : values)
{
String dbValue = (value == null) ? null : ((TestValue) value).val;
for (Map.Entry<Long, String> entry : database.entrySet())
{
if (EqualsHelper.nullSafeEquals(entry.getValue(), dbValue))
{
results.add(new Pair<>(entry.getKey(), entry.getValue()));
break;
}
}
}
return results;
}
/**
* Simulate creation of a new database entry
*/