SAIL-234: Partially implemented NodeBulkLoader interface

- Currently still need to pre-load aspects to get rid of N+1 on hasAspect during directory listings


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@20929 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2010-07-03 12:47:50 +00:00
parent 8f681ca8d3
commit 5ad8b75240
8 changed files with 426 additions and 21 deletions

View File

@@ -15,6 +15,7 @@
<typeAlias alias="TransactionQuery" type="org.alfresco.repo.domain.node.TransactionQueryEntity"/>
<typeAlias alias="Store" type="org.alfresco.repo.domain.node.StoreEntity"/>
<typeAlias alias="Node" type="org.alfresco.repo.domain.node.NodeEntity"/>
<typeAlias alias="NodeBatchLoad" type="org.alfresco.repo.domain.node.ibatis.NodeBatchLoadEntity"/>
<typeAlias alias="NodeUpdate" type="org.alfresco.repo.domain.node.NodeUpdateEntity"/>
<typeAlias alias="AuditProps" type="org.alfresco.repo.domain.node.AuditablePropertiesEntity"/>
<typeAlias alias="NodePropertyKey" type="org.alfresco.repo.domain.node.NodePropertyKey"/>
@@ -547,6 +548,17 @@
<isNotNull property="deleted">and node.node_deleted = #deleted#</isNotNull>
</select>
<select id="select_NodesByUuids" parameterClass="NodeBatchLoad" resultMap="result_Node">
<include refid="alfresco.node.select_Node_Results"/>
from
alf_node node
join alf_store store on (store.id = node.store_id)
join alf_transaction txn on (txn.id = node.transaction_id)
where
node.store_id = #storeId# and
node.uuid in <iterate property="uuids" open="(" close=")" conjunction=",">#uuids[]#</iterate>
</select>
<sql id="select_NodeProperty_Results">
select
prop.node_id as node_id,
@@ -568,7 +580,12 @@
from
alf_node_properties prop
where
prop.node_id = #nodeId#
<dynamic>
<isNotNull property="nodeId">prop.node_id = #nodeId#</isNotNull>
<isNotNull property="nodeIds">
node_id in <iterate property="nodeIds" open="(" close=")" conjunction=",">#nodeIds[]#</iterate>
</isNotNull>
</dynamic>
<isNotNull property="key.qnameId">and qname_id = #key.qnameId#</isNotNull>
<isNotNull property="qnameIds">
and qname_id in

View File

@@ -528,11 +528,31 @@ public class EntityLookupCache<K extends Serializable, V extends Object, VK exte
return updateCount;
}
/**
* Cache-only operation: Get the key for a given value key (note: not 'value' but 'value key').
*
* @param value The entity value key, which must be valid (<tt>null</tt> not allowed)
* @return The entity key (may be <tt>null</tt>)
*/
@SuppressWarnings("unchecked")
public K getKey(VK valueKey)
{
// There is a good value key, cache by value
CacheRegionValueKey valueCacheKey = new CacheRegionValueKey(cacheRegion, valueKey);
K key = (K) cache.get(valueCacheKey);
// Check if we have looked this up already
if (key != null && key.equals(VALUE_NOT_FOUND))
{
key = null;
}
return key;
}
/**
* Cache-only operation: Get the value for a given key
*
* @param key The entity key, which may be valid or invalid (<tt>null</tt> not allowed)
* @return The new entity value (may be <tt>null</tt>)
* @return The entity value (may be <tt>null</tt>)
*/
@SuppressWarnings("unchecked")
public V getValue(K key)

View File

@@ -33,10 +33,13 @@ import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.Stack;
import java.util.TreeSet;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.ibatis.BatchingDAO;
@@ -478,6 +481,10 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
}
return true;
}
public void done()
{
}
};
selectChildAssocs(parentNodeId, null, null, null, null, null, callback);
}
@@ -2368,6 +2375,76 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
return assocToKeep.getPair(qnameDAO);
}
/**
* Callback that applies node preloading. Instances must be used and discarded per query.
*
* @author Derek Hulley
* @since 3.4
*/
private class ChildAssocRefBatchingQueryCallback implements ChildAssocRefQueryCallback
{
private static final int BATCH_SIZE = 256 * 4;
private final ChildAssocRefQueryCallback callback;
private final boolean preload;
private final List<NodeRef> nodeRefs;
/**
* @param callback the callback to batch around
*/
private ChildAssocRefBatchingQueryCallback(ChildAssocRefQueryCallback callback)
{
this.callback = callback;
this.preload = callback.preLoadNodes();
if (preload)
{
nodeRefs = new LinkedList<NodeRef>(); // No memory required
}
else
{
nodeRefs = null; // No list needed
}
}
/**
* @return Returns <tt>false</tt> always as batching is applied
*/
public boolean preLoadNodes()
{
return false;
}
/**
* {@inheritDoc}
*/
public boolean handle(
Pair<Long, ChildAssociationRef> childAssocPair,
Pair<Long, NodeRef> parentNodePair,
Pair<Long, NodeRef> childNodePair)
{
if (!preload)
{
return callback.handle(childAssocPair, parentNodePair, childNodePair);
}
// Batch it
if (nodeRefs.size() >= BATCH_SIZE)
{
cacheNodes(nodeRefs);
nodeRefs.clear();
}
nodeRefs.add(childNodePair.getSecond());
return callback.handle(childAssocPair, parentNodePair, childNodePair);
}
public void done()
{
// Finish the batch
if (preload && nodeRefs.size() > 0)
{
cacheNodes(nodeRefs);
nodeRefs.clear();
}
callback.done();
}
}
public void getChildAssocs(
Long parentNodeId,
Long childNodeId,
@@ -2380,7 +2457,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
selectChildAssocs(
parentNodeId, childNodeId,
assocTypeQName, assocQName, isPrimary, sameStore,
resultsCallback);
new ChildAssocRefBatchingQueryCallback(resultsCallback));
}
public void getChildAssocs(Long parentNodeId, Set<QName> assocTypeQNames, ChildAssocRefQueryCallback resultsCallback)
@@ -2391,10 +2468,14 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
return; // No results possible
case 1:
QName assocTypeQName = assocTypeQNames.iterator().next();
selectChildAssocs(parentNodeId, null, assocTypeQName, (QName) null, null, null, resultsCallback);
selectChildAssocs(
parentNodeId, null, assocTypeQName, (QName) null, null, null,
new ChildAssocRefBatchingQueryCallback(resultsCallback));
break;
default:
selectChildAssocs(parentNodeId, assocTypeQNames, resultsCallback);
selectChildAssocs(
parentNodeId, assocTypeQNames,
new ChildAssocRefBatchingQueryCallback(resultsCallback));
}
}
@@ -2410,7 +2491,9 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
Collection<String> childNames,
ChildAssocRefQueryCallback resultsCallback)
{
selectChildAssocs(parentNodeId, assocTypeQName, childNames, resultsCallback);
selectChildAssocs(
parentNodeId, assocTypeQName, childNames,
new ChildAssocRefBatchingQueryCallback(resultsCallback));
}
public void getChildAssocsByChildTypes(
@@ -2418,7 +2501,9 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
Set<QName> childNodeTypeQNames,
ChildAssocRefQueryCallback resultsCallback)
{
selectChildAssocsByChildTypes(parentNodeId, childNodeTypeQNames, resultsCallback);
selectChildAssocsByChildTypes(
parentNodeId, childNodeTypeQNames,
new ChildAssocRefBatchingQueryCallback(resultsCallback));
}
public void getChildAssocsWithoutParentAssocsOfType(
@@ -2426,7 +2511,9 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
QName assocTypeQName,
ChildAssocRefQueryCallback resultsCallback)
{
selectChildAssocsWithoutParentAssocsOfType(parentNodeId, assocTypeQName, resultsCallback);
selectChildAssocsWithoutParentAssocsOfType(
parentNodeId, assocTypeQName,
new ChildAssocRefBatchingQueryCallback(resultsCallback));
}
public Pair<Long, ChildAssociationRef> getPrimaryParentAssoc(Long childNodeId)
@@ -2698,9 +2785,137 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
* Bulk caching
*/
/**
* {@inheritDoc}
* <p/>
* Loads properties, aspects, parent associations and the ID-noderef cache.
*/
public void cacheNodes(List<NodeRef> nodeRefs)
{
/*
* ALF-2712: Performance degradation from 3.1.0 to 3.1.2
* ALF-2784: Degradation of performance between 3.1.1 and 3.2x (observed in JSF)
*
* There is an obvious cost associated with querying the database to pull back nodes,
* and there is additional cost associated with putting the resultant entries into the
* caches. It is NO MORE expensive to check the cache than it is to put an entry into it
* - and probably cheaper considering cache replication - so we start checking nodes to see
* if they have entries before passing them over for batch loading.
*
* However, when running against a cold cache or doing a first-time query against some
* part of the repo, we will be checking for entries in the cache and consistently getting
* no results. To avoid unnecessary checking when the cache is PROBABLY cold, we
* examine the ratio of hits/misses at regular intervals.
*/
if (nodeRefs.size() < 10)
{
// We only cache where the number of results is potentially
// a problem for the N+1 loading that might result.
return;
}
int foundCacheEntryCount = 0;
int missingCacheEntryCount = 0;
boolean forceBatch = false;
// Group the nodes by store so that we don't *have* to eagerly join to store to get query performance
Map<StoreRef, List<String>> uuidsByStore = new HashMap<StoreRef, List<String>>(3);
for (NodeRef nodeRef : nodeRefs)
{
if (!forceBatch)
{
// Is this node in the cache?
if (nodesCache.getKey(nodeRef) != null)
{
foundCacheEntryCount++; // Don't add it to the batch
continue;
}
else
{
missingCacheEntryCount++; // Fall through and add it to the batch
}
if (foundCacheEntryCount + missingCacheEntryCount % 100 == 0)
{
// We force the batch if the number of hits drops below the number of misses
forceBatch = foundCacheEntryCount < missingCacheEntryCount;
}
}
StoreRef storeRef = nodeRef.getStoreRef();
List<String> uuids = (List<String>) uuidsByStore.get(storeRef);
if (uuids == null)
{
uuids = new ArrayList<String>(nodeRefs.size());
uuidsByStore.put(storeRef, uuids);
}
uuids.add(nodeRef.getId());
}
int size = nodeRefs.size();
nodeRefs = null;
// Now load all the nodes
for (Map.Entry<StoreRef, List<String>> entry : uuidsByStore.entrySet())
{
StoreRef storeRef = entry.getKey();
List<String> uuids = entry.getValue();
cacheNodes(storeRef, uuids);
}
if (logger.isDebugEnabled())
{
logger.debug("Pre-loaded " + size + " nodes.");
}
}
/**
* Loads the nodes into cache using batching.
*/
private void cacheNodes(StoreRef storeRef, List<String> uuids)
{
StoreEntity store = getStoreNotNull(storeRef);
Long storeId = store.getId();
int batchSize = 256;
SortedSet<String> batch = new TreeSet<String>();
for (String uuid : uuids)
{
batch.add(uuid);
if (batch.size() >= batchSize)
{
// Preload
cacheNodesNoBatch(storeId, batch);
batch.clear();
}
}
// Load any remaining nodes
if (batch.size() > 0)
{
cacheNodesNoBatch(storeId, batch);
}
}
/**
* Bulk-fetch the nodes for a given store. All nodes passed in are fetched.
*/
private void cacheNodesNoBatch(Long storeId, SortedSet<String> uuids)
{
// Get the nodes
List<NodeEntity> nodes = selectNodesByUuids(storeId, uuids);
SortedSet<Long> nodeIds = new TreeSet<Long>();
for (NodeEntity node : nodes)
{
Long nodeId = node.getId();
nodesCache.setValue(nodeId, node);
if (propertiesCache.getValue(nodeId) == null)
{
nodeIds.add(nodeId);
}
}
Map<Long, Map<NodePropertyKey, NodePropertyValue>> propsByNodeId = selectNodeProperties(nodeIds);
for (Map.Entry<Long, Map<NodePropertyKey, NodePropertyValue>> entry : propsByNodeId.entrySet())
{
Long nodeId = entry.getKey();
Map<NodePropertyKey, NodePropertyValue> propertyValues = entry.getValue();
Map<QName, Serializable> props = nodePropertyHelper.convertToPublicProperties(propertyValues);
propertiesCache.setValue(nodeId, props);
}
}
/**
@@ -2850,6 +3065,8 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
protected abstract int deleteNodeById(Long nodeId, boolean deletedOnly);
protected abstract NodeEntity selectNodeById(Long id, Boolean deleted);
protected abstract NodeEntity selectNodeByNodeRef(NodeRef nodeRef, Boolean deleted);
protected abstract List<NodeEntity> selectNodesByUuids(Long storeId, SortedSet<String> uuids);
protected abstract Map<Long, Map<NodePropertyKey, NodePropertyValue>> selectNodeProperties(Set<Long> nodeIds);
protected abstract Map<NodePropertyKey, NodePropertyValue> selectNodeProperties(Long nodeId);
protected abstract Map<NodePropertyKey, NodePropertyValue> selectNodeProperties(Long nodeId, Set<Long> qnameIds);
protected abstract int deleteNodeProperties(Long nodeId, Set<Long> qnameIds);

View File

@@ -310,6 +310,11 @@ public interface NodeDAO extends NodeBulkLoader
* @return Return <tt>true</tt> if caching of the results is required
*/
boolean preLoadNodes();
/**
* Called once the iteration of results has concluded
*/
void done();
}
/**

View File

@@ -39,6 +39,8 @@ public class NodePropertyEntity
private NodePropertyValue value;
/** Carries data for queries and updates */
private List<Long> qnameIds;
/** Carries data for queries */
private List<Long> nodeIds;
/**
* Required default constructor
@@ -92,4 +94,14 @@ public class NodePropertyEntity
{
this.qnameIds = qnameIds;
}
public List<Long> getNodeIds()
{
return nodeIds;
}
public void setNodeIds(List<Long> nodeIds)
{
this.nodeIds = nodeIds;
}
}

View File

@@ -0,0 +1,57 @@
/*
* Copyright (C) 2005-2010 Alfresco Software Limited.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* As a special exception to the terms and conditions of version 2.0 of
* the GPL, you may redistribute this Program in connection with Free/Libre
* and Open Source Software ("FLOSS") applications as described in Alfresco's
* FLOSS exception. You should have recieved a copy of the text describing
* the FLOSS exception, and it is also available here:
* http://www.alfresco.com/legal/licensing"
*/
package org.alfresco.repo.domain.node.ibatis;
import java.util.List;
/**
* Bean to convey carry query information of node batch loading.
*
* @author Derek Hulley
* @since 3.4
*/
public class NodeBatchLoadEntity
{
private Long storeId;
private List<String> uuids;
public Long getStoreId()
{
return storeId;
}
public void setStoreId(Long storeId)
{
this.storeId = storeId;
}
public List<String> getUuids()
{
return uuids;
}
public void setUuids(List<String> uuids)
{
this.uuids = uuids;
}
}

View File

@@ -33,6 +33,7 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import org.alfresco.repo.domain.node.AbstractNodeDAOImpl;
import org.alfresco.repo.domain.node.ChildAssocEntity;
@@ -86,6 +87,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
private static final String DELETE_NODE_BY_ID = "alfresco.node.delete_NodeById";
private static final String SELECT_NODE_BY_ID = "alfresco.node.select_NodeById";
private static final String SELECT_NODE_BY_NODEREF = "alfresco.node.select_NodeByNodeRef";
private static final String SELECT_NODES_BY_UUIDS = "alfresco.node.select_NodesByUuids";
private static final String SELECT_NODE_PROPERTIES = "alfresco.node.select_NodeProperties";
private static final String INSERT_NODE_PROPERTY = "alfresco.node.insert_NodeProperty";
private static final String UPDATE_PRIMARY_CHILDREN_SHARED_ACL = "alfresco.node.update_PrimaryChildrenSharedAcl";
@@ -338,31 +340,40 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
return (NodeEntity) template.queryForObject(SELECT_NODE_BY_NODEREF, node);
}
@SuppressWarnings("unchecked")
@Override
protected List<NodeEntity> selectNodesByUuids(Long storeId, SortedSet<String> uuids)
{
NodeBatchLoadEntity nodeBatchLoadEntity = new NodeBatchLoadEntity();
nodeBatchLoadEntity.setStoreId(storeId);
nodeBatchLoadEntity.setUuids(new ArrayList<String>(uuids));
return (List<NodeEntity>) template.queryForList(SELECT_NODES_BY_UUIDS, nodeBatchLoadEntity);
}
/**
* Pull out the key-value pairs from the rows
*/
private Map<NodePropertyKey, NodePropertyValue> makePersistentPropertiesMap(List<NodePropertyEntity> rows)
private Map<Long, Map<NodePropertyKey, NodePropertyValue>> makePersistentPropertiesMap(List<NodePropertyEntity> rows)
{
Long nodeId = null;
Map<NodePropertyKey, NodePropertyValue> props = new HashMap<NodePropertyKey, NodePropertyValue>(27);
Map<Long, Map<NodePropertyKey, NodePropertyValue>> results = new HashMap<Long, Map<NodePropertyKey, NodePropertyValue>>(3);
for (NodePropertyEntity row : rows)
{
if (row.getNodeId() == null)
Long nodeId = row.getNodeId();
if (nodeId == null)
{
throw new RuntimeException("Expect results with a Node ID: " + row);
}
if (nodeId == null)
Map<NodePropertyKey, NodePropertyValue> props = results.get(nodeId);
if (props == null)
{
nodeId = row.getNodeId();
}
else if (!nodeId.equals(row.getNodeId()))
{
throw new RuntimeException("Results can only be interpreted for a single node.");
props = new HashMap<NodePropertyKey, NodePropertyValue>(17);
results.put(nodeId, props);
}
props.put(row.getKey(), row.getValue());
}
// Done
return props;
return results;
}
/**
@@ -383,12 +394,27 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
return rows;
}
@Override
@SuppressWarnings("unchecked")
protected Map<Long, Map<NodePropertyKey, NodePropertyValue>> selectNodeProperties(Set<Long> nodeIds)
{
if (nodeIds.size() == 0)
{
return Collections.emptyMap();
}
NodePropertyEntity prop = new NodePropertyEntity();
prop.setNodeIds(new ArrayList<Long>(nodeIds));
List<NodePropertyEntity> rows = template.queryForList(SELECT_NODE_PROPERTIES, prop);
return makePersistentPropertiesMap(rows);
}
@Override
protected Map<NodePropertyKey, NodePropertyValue> selectNodeProperties(Long nodeId)
{
return selectNodeProperties(nodeId, Collections.<Long>emptySet());
}
@SuppressWarnings("unchecked")
@Override
@SuppressWarnings("unchecked")
protected Map<NodePropertyKey, NodePropertyValue> selectNodeProperties(Long nodeId, Set<Long> qnameIds)
{
NodePropertyEntity prop = new NodePropertyEntity();
@@ -409,7 +435,16 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
}
List<NodePropertyEntity> rows = template.queryForList(SELECT_NODE_PROPERTIES, prop);
return makePersistentPropertiesMap(rows);
Map<Long, Map<NodePropertyKey, NodePropertyValue>> results = makePersistentPropertiesMap(rows);
Map<NodePropertyKey, NodePropertyValue> props = results.get(nodeId);
if (props == null)
{
return Collections.emptyMap();
}
else
{
return props;
}
}
@Override
@@ -862,6 +897,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
ChildAssocRowHandler rowHandler = new ChildAssocRowHandler(resultsCallback);
template.queryWithRowHandler(SELECT_CHILD_ASSOCS_OF_PARENT, assoc, rowHandler);
resultsCallback.done();
}
@Override
@@ -885,6 +921,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
ChildAssocRowHandler rowHandler = new ChildAssocRowHandler(resultsCallback);
template.queryWithRowHandler(SELECT_CHILD_ASSOCS_OF_PARENT, assoc, rowHandler);
resultsCallback.done();
}
@Override
@@ -960,6 +997,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
ChildAssocRowHandler rowHandler = new ChildAssocRowHandler(filter, resultsCallback);
template.queryWithRowHandler(SELECT_CHILD_ASSOCS_OF_PARENT, assoc, rowHandler);
resultsCallback.done();
}
@Override
@@ -983,6 +1021,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
ChildAssocRowHandler rowHandler = new ChildAssocRowHandler(resultsCallback);
template.queryWithRowHandler(SELECT_CHILD_ASSOCS_OF_PARENT, assoc, rowHandler);
resultsCallback.done();
}
@Override
@@ -1004,6 +1043,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
ChildAssocRowHandler rowHandler = new ChildAssocRowHandler(resultsCallback);
template.queryWithRowHandler(SELECT_CHILD_ASSOCS_OF_PARENT_WITHOUT_PARENT_ASSOCS_OF_TYPE, assoc, rowHandler);
resultsCallback.done();
}
@SuppressWarnings("unchecked")
@@ -1058,6 +1098,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl
ChildAssocRowHandler rowHandler = new ChildAssocRowHandler(resultsCallback);
template.queryWithRowHandler(SELECT_PARENT_ASSOCS_OF_CHILD, assoc, rowHandler);
resultsCallback.done();
}
@SuppressWarnings("unchecked")

View File

@@ -792,6 +792,10 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl
{
return true;
}
public void done()
{
}
};
// Get all the QNames to remove
Set<QName> assocTypeQNamesToRemove = new HashSet<QName>(aspectDef.getChildAssociations().keySet());
@@ -987,6 +991,10 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl
{
return true;
}
public void done()
{
}
};
// Get all the QNames to remove
@@ -1134,6 +1142,10 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl
{
return true;
}
public void done()
{
}
};
nodeDAO.getChildAssocs(parentNodeId, childNodeId, null, null, null, null, callback);
@@ -1482,6 +1494,10 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl
results.add(childAssocPair.getSecond());
return true;
}
public void done()
{
}
};
// Get the assocs pointing to it
@@ -1539,6 +1555,10 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl
results.add(childAssocPair.getSecond());
return true;
}
public void done()
{
}
};
// Get the assocs pointing to it
@@ -1576,6 +1596,10 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl
{
return true;
}
public void done()
{
}
};
// Get all child associations with the specific qualified name
nodeDAO.getChildAssocsByChildTypes(nodeId, childNodeTypeQNames, callback);
@@ -1650,6 +1674,10 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl
{
return true;
}
public void done()
{
}
};
// Get all child associations with the specific qualified name
nodeDAO.getChildAssocs(nodeId, assocTypeQName, childNames, callback);
@@ -1724,6 +1752,10 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl
{
return false;
}
public void done()
{
}
};
// Get the child associations that meet the criteria
@@ -2076,6 +2108,10 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl
{
return true;
}
public void done()
{
}
};
// We only need to move child nodes that are not already in the same store
nodeDAO.getChildAssocs(nodeId, null, null, null, Boolean.TRUE, Boolean.FALSE, callback);