Merged V3.2 to HEAD

16937: Merged V3.2 to V3.1
      16891: Merged V2.2 to V3.1
         16772: Fix unreported bugs found by new code (ArgumentHelper)
         16773: AlfrescoJobExecutor thread is now a 'daemon' thread
         16774: Increases sizes of 'parent assocs' and 'NodeRef-ID' caches
         16775: Session L1 cache size improvements
         16777: Transactional cache issues warning when it overflows
         16779: Fixed ETHREEOH-2657: Performance: slow answers to directory listings
         16797: Set AVM L1 Hibernate object retention to 0
         16829: Read vs Write split in Session size management
         16834: Build fix for SessionSizeManagementTest
___________________________________________________________________
Modified: svn:mergeinfo
   Merged /alfresco/BRANCHES/V2.2:r16772-16775,16777,16779,16797,16829,16834
   Merged /alfresco/BRANCHES/V3.1:r16891
   Merged /alfresco/BRANCHES/V3.2:r16937


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@17018 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2009-10-19 11:48:23 +00:00
parent d809498e5a
commit 733d27742b
16 changed files with 621 additions and 99 deletions

View File

@@ -405,6 +405,27 @@ public class TransactionalCache<K extends Serializable, V extends Object>
" value: " + value);
}
}
// we have a transaction - add the item into the updated cache for this transaction
// are we in an overflow condition?
if (txnData.updatedItemsCache.getMemoryStoreSize() >= maxCacheSize)
{
// overflow about to occur or has occured - we can only guarantee non-stale
// data by clearing the shared cache after the transaction. Also, the
// shared cache needs to be ignored for the rest of the transaction.
txnData.isClearOn = true;
if (!txnData.haveIssuedFullWarning && logger.isWarnEnabled())
{
logger.warn("Transactional update cache '" + name + "' is full (" + maxCacheSize + ").");
txnData.haveIssuedFullWarning = true;
}
}
CacheBucket<V> bucket = null;
if (sharedCache.contains(key))
{
V existingValue = sharedCache.get(key);
// The value needs to be kept for later checks
bucket = new UpdateCacheBucket<V>(existingValue, value);
}
else
{
// we have an active transaction - add the item into the updated cache for this transaction
@@ -416,7 +437,6 @@ public class TransactionalCache<K extends Serializable, V extends Object>
// shared cache needs to be ignored for the rest of the transaction.
txnData.isClearOn = true;
}
CacheBucket<V> bucket = null;
if (sharedCache.contains(key))
{
V existingValue = sharedCache.get(key);
@@ -484,7 +504,15 @@ public class TransactionalCache<K extends Serializable, V extends Object>
// is the shared cache going to be cleared?
if (txnData.isClearOn)
{
// don't store removals if we're just going to clear it all out later
// overflow about to occur or has occured - we can only guarantee non-stale
// data by clearing the shared cache after the transaction. Also, the
// shared cache needs to be ignored for the rest of the transaction.
txnData.isClearOn = true;
if (!txnData.haveIssuedFullWarning && logger.isWarnEnabled())
{
logger.warn("Transactional update cache '" + name + "' is full (" + maxCacheSize + ").");
txnData.haveIssuedFullWarning = true;
}
}
else
{
@@ -806,6 +834,7 @@ public class TransactionalCache<K extends Serializable, V extends Object>
{
private Cache updatedItemsCache;
private Cache removedItemsCache;
private boolean haveIssuedFullWarning;
private boolean isClearOn;
private boolean isClosed;
}

View File

@@ -58,9 +58,9 @@
class="org.alfresco.repo.domain.hibernate.StoreImpl"
column="store_id"
not-null="true"
lazy="false"
foreign-key="fk_alf_node_store"
fetch="join" />
lazy="proxy"
fetch="select" />
<!-- the store-unique identifier -->
<property name="uuid" column="uuid" type="string" length="36" />
</natural-id>
@@ -97,9 +97,8 @@
<map
name="properties"
table="alf_node_properties"
lazy="true"
fetch="select"
batch-size="128"
lazy="false"
fetch="join"
sort="unsorted"
inverse="false"
optimistic-lock="false"
@@ -125,9 +124,8 @@
<set
name="aspects"
table="alf_node_aspects"
lazy="true"
fetch="select"
batch-size="128"
lazy="false"
fetch="join"
inverse="false"
sort="unsorted"
optimistic-lock="false"
@@ -270,9 +268,6 @@
join assoc.child as child
where
child.id = :childId
order by
assoc.index,
assoc.id
</query>
<query name="node.DeleteParentAssocs">

View File

@@ -30,6 +30,7 @@ import java.util.Map;
import java.util.Set;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState;
import org.alfresco.util.resource.MethodResourceManager;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -60,7 +61,11 @@ public class SessionSizeResourceManager extends HibernateDaoSupport implements M
private static Log logger = LogFactory.getLog(SessionSizeResourceManager.class);
/** Default 1000 */
private int threshold;
private int writeThreshold;
/** Default 10000 */
private int readThreshold;
/** Default 3 */
private int retentionFactor;
/**
* Disable resource management for the duration of the current transaction. This is temporary
@@ -104,21 +109,58 @@ public class SessionSizeResourceManager extends HibernateDaoSupport implements M
*/
public SessionSizeResourceManager()
{
this.threshold = 1000;
this.writeThreshold = 1000;
this.readThreshold = 10000;
this.retentionFactor = 3;
}
/**
* Set the {@link Session#clear()} threshold. If the number of entities and collections in the
* current session exceeds this number, then the session will be cleared. Have you read the
* disclaimer?
*
* @param threshold the maximum number of entities and associations to keep in memory
*
* @see #threshold
* Set the number of entities retained in the session for each one flushed; default 3.
* Set this to zero to remove all entities when the session is trimmed.
*
* @param retentionFactor the number of entities to keep for each entity removed
*/
public void setThreshold(int threshold)
public void setRetentionFactor(int retentionFactor)
{
this.threshold = threshold;
this.retentionFactor = retentionFactor;
}
/**
* Set the {@link Session#clear()} threshold for read-only transactions.
* If the number of entities and collections in the current session exceeds this number,
* then the session will be cleared.
* <p/>
* Have you read the disclaimer?
*
* @param threshold the maximum number of entities and associations to keep in memory during read-only operations
*
* @see #writeThreshold
*/
public void setReadThreshold(int threshold)
{
this.readThreshold = threshold;
}
/**
* Set the {@link Session#clear()} threshold for read-write transactions.
* If the number of entities and collections in the current session exceeds this number,
* then the session will be cleared.
* <p/>
* Have you read the disclaimer?
*
* @param threshold the maximum number of entities and associations to keep in memory during write operations
*
* @see #writeThreshold
*/
public void setWriteThreshold(int threshold)
{
this.writeThreshold = threshold;
}
public static final String KEY_COMMIT_STARTED = "SessionSizeResourceManager.commitStarted";
public static void setCommitStarted()
{
AlfrescoTransactionSupport.bindResource(KEY_COMMIT_STARTED, Boolean.TRUE);
}
public void manageResources(
@@ -131,6 +173,15 @@ public class SessionSizeResourceManager extends HibernateDaoSupport implements M
// Don't do anything
return;
}
int threshold = writeThreshold;
int retentionFactor = 0;
Boolean commitStarted = (Boolean) AlfrescoTransactionSupport.getResource(KEY_COMMIT_STARTED);
if (commitStarted != null ||
AlfrescoTransactionSupport.getTransactionReadState() == TxnReadState.TXN_READ_ONLY)
{
threshold = readThreshold;
retentionFactor = this.retentionFactor; // Retain objects during read-only phase only
}
// We are go for interfering
Session session = getSession(false);
SessionStatistics stats = session.getStatistics();
@@ -139,14 +190,15 @@ public class SessionSizeResourceManager extends HibernateDaoSupport implements M
if ((entityCount + collectionCount) > threshold)
{
DirtySessionMethodInterceptor.flushSession(session, true);
selectivelyClear(session, stats);
selectivelyClear(session, stats, retentionFactor);
// session.clear();
if (logger.isDebugEnabled())
{
String msg = String.format(
"Cleared %5d entities and %5d collections from Hibernate Session",
"Cleared %5d entities and %5d collections from Hibernate Session (threshold %5d)",
entityCount,
collectionCount);
collectionCount,
threshold);
logger.debug(msg);
}
}
@@ -160,27 +212,51 @@ public class SessionSizeResourceManager extends HibernateDaoSupport implements M
public static void clear(Session session)
{
SessionStatistics stats = session.getStatistics();
selectivelyClear(session, stats);
selectivelyClear(session, stats, 0);
}
@SuppressWarnings("unchecked")
private static void selectivelyClear(Session session, SessionStatistics stats)
private static void selectivelyClear(Session session, SessionStatistics stats, int retentionFactor)
{
if (logger.isDebugEnabled())
{
logger.debug(stats);
}
Set<EntityKey> keys = new HashSet<EntityKey>((Set<EntityKey>)stats.getEntityKeys());
int retentionCount = 0;
for (EntityKey key : keys)
{
// This should probably be configurable but frankly the nauseous extrusion of Gavin King's
// programmatic alimentary tract (hibernate) will go away before this could make a difference.
String entityName = key.getEntityName();
if (!entityName.startsWith("org.alfresco") || entityName.startsWith("org.alfresco.repo.workflow.jbpm"))
if (!entityName.startsWith("org.alfresco"))
{
// Leave non-Alfresco entities alone. JBPM bugs arise due to inconsistent flushing here.
continue;
}
else if (entityName.startsWith("org.alfresco.repo.workflow.jbpm"))
{
// Once again, JBPM flushing issue prevent us from throwing related entities away
continue;
}
else if (entityName.startsWith("org.alfresco.repo.domain.hibernate.QName"))
{
// QNames are heavily used
continue;
}
else if (entityName.startsWith("org.alfresco.repo.domain.hibernate.Store"))
{
// So are Stores
continue;
}
// Do we evict or retain?
if (retentionCount < retentionFactor)
{
retentionCount++;
continue;
}
retentionCount = 0;
// Flush every other instance
Object val = session.get(key.getEntityName(), key.getIdentifier());
if (val != null)
{

View File

@@ -25,6 +25,7 @@
package org.alfresco.repo.model.filefolder;
import java.io.File;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
@@ -35,6 +36,8 @@ import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.model.ContentModel;
import org.alfresco.repo.content.transform.AbstractContentTransformerTest;
import org.alfresco.repo.security.authentication.AuthenticationComponent;
import org.alfresco.repo.security.authentication.AuthenticationUtil;
import org.alfresco.repo.security.authentication.AuthenticationUtil.RunAsWork;
import org.alfresco.repo.transaction.RetryingTransactionHelper;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.ServiceRegistry;
@@ -47,11 +50,17 @@ import org.alfresco.service.cmr.repository.NodeService;
import org.alfresco.service.cmr.repository.StoreRef;
import org.alfresco.service.cmr.search.ResultSet;
import org.alfresco.service.cmr.search.SearchService;
import org.alfresco.service.cmr.security.AuthenticationService;
import org.alfresco.service.cmr.security.PermissionService;
import org.alfresco.service.namespace.QName;
import org.alfresco.service.transaction.TransactionService;
import org.alfresco.util.ApplicationContextHelper;
import org.alfresco.util.ArgumentHelper;
import org.alfresco.util.GUID;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ConfigurableApplicationContext;
/**
* Tests around some of the data structures that lead to performance
@@ -375,4 +384,187 @@ public class FileFolderPerformanceTester extends TestCase
// 50000,
// new double[] {0.01, 0.02, 0.03, 0.04, 0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90});
// }
/**
* Create a bunch of files and folders in a folder and then run multi-threaded directory
* listings against it.
*
* @param args <x> <y> where 'x' is the number of files in a folder and 'y' is the
* number of threads to list
*/
public static void main(String ... args)
{
ConfigurableApplicationContext ctx = (ConfigurableApplicationContext) ApplicationContextHelper.getApplicationContext();
try
{
run(ctx, args);
}
catch (Throwable e)
{
System.out.println("Failed to run CifsHelper performance test");
e.printStackTrace();
}
finally
{
ctx.close();
}
}
private static void run(ApplicationContext ctx, String ... args) throws Throwable
{
ArgumentHelper argHelper = new ArgumentHelper(getUsage(), args);
final int fileCount = argHelper.getIntegerValue("files", true, 1, 10000);
final String folderRefStr = argHelper.getStringValue("folder", false, true);
final int threadCount = argHelper.getIntegerValue("threads", false, 1, 100);
final NodeRef selectedFolderNodeRef = folderRefStr == null ? null : new NodeRef(folderRefStr);
ServiceRegistry serviceRegistry = (ServiceRegistry) ctx.getBean(ServiceRegistry.SERVICE_REGISTRY);
final AuthenticationService authenticationService = serviceRegistry.getAuthenticationService();
final PermissionService permissionService = serviceRegistry.getPermissionService();
final NodeService nodeService = serviceRegistry.getNodeService();
final TransactionService transactionService = serviceRegistry.getTransactionService();
final FileFolderService fileFolderService = serviceRegistry.getFileFolderService();
RunAsWork<String> createUserRunAs = new RunAsWork<String>()
{
public String doWork() throws Exception
{
String user = GUID.generate();
authenticationService.createAuthentication(user, user.toCharArray());
return user;
}
};
final String user = AuthenticationUtil.runAs(createUserRunAs, AuthenticationUtil.getSystemUserName());
// Create the files
final RetryingTransactionCallback<NodeRef> createCallback = new RetryingTransactionCallback<NodeRef>()
{
public NodeRef execute() throws Throwable
{
AuthenticationUtil.pushAuthentication();
NodeRef folderNodeRef = null;
try
{
AuthenticationUtil.setFullyAuthenticatedUser(AuthenticationUtil.getSystemUserName());
if (selectedFolderNodeRef == null)
{
// Create a new store
StoreRef storeRef = nodeService.createStore(StoreRef.PROTOCOL_WORKSPACE, GUID.generate());
NodeRef rootNodeRef = nodeService.getRootNode(storeRef);
// Create a folder
folderNodeRef = nodeService.createNode(
rootNodeRef,
ContentModel.ASSOC_CHILDREN,
ContentModel.ASSOC_CHILDREN,
ContentModel.TYPE_FOLDER,
Collections.<QName, Serializable>singletonMap(ContentModel.PROP_NAME, "TOP FOLDER")
).getChildRef();
// Grant permissions
permissionService.setPermission(folderNodeRef, user, PermissionService.ALL_PERMISSIONS, true);
}
else
{
folderNodeRef = selectedFolderNodeRef;
// Grant permissions
permissionService.setPermission(folderNodeRef, user, PermissionService.ALL_PERMISSIONS, true);
System.out.println("Reusing folder " + folderNodeRef);
}
}
finally
{
AuthenticationUtil.popAuthentication();
}
if (selectedFolderNodeRef == null)
{
// Create the files
for (int i = 0; i < fileCount; i++)
{
fileFolderService.create(
folderNodeRef,
String.format("FILE-%4d", i),
ContentModel.TYPE_CONTENT);
}
System.out.println("Created " + fileCount + " files in folder " + folderNodeRef);
}
// Done
return folderNodeRef;
}
};
RunAsWork<NodeRef> createRunAs = new RunAsWork<NodeRef>()
{
public NodeRef doWork() throws Exception
{
return transactionService.getRetryingTransactionHelper().doInTransaction(createCallback);
}
};
final NodeRef folderNodeRef = AuthenticationUtil.runAs(createRunAs, user);
// Now wait for some input before commencing the read run
System.out.print("Hit any key to commence directory listing ...");
System.in.read();
final RunAsWork<List<FileInfo>> readRunAs = new RunAsWork<List<FileInfo>>()
{
public List<FileInfo> doWork() throws Exception
{
return fileFolderService.search(folderNodeRef, "*", false);
}
};
Thread[] threads = new Thread[threadCount];
for (int i = 0; i < threadCount; i++)
{
Thread readThread = new Thread("FolderList-" + i)
{
int iteration = 0;
public void run()
{
while(++iteration <= 2)
{
runImpl();
}
}
private void runImpl()
{
String threadName = Thread.currentThread().getName();
long start = System.currentTimeMillis();
List<FileInfo> nodeRefs = AuthenticationUtil.runAs(readRunAs, user);
long time = System.currentTimeMillis() - start;
double average = (double) time / (double) (fileCount);
// Make sure that we have the correct number of entries
if (folderRefStr != null && nodeRefs.size() != fileCount)
{
System.err.println(
"WARNING: Thread " + threadName + " got " + nodeRefs.size() +
" but expected " + fileCount);
}
System.out.print("\n" +
"Thread " + threadName + ": \n" +
" Read " + String.format("%4d", fileCount) + " files \n" +
" Average: " + String.format("%10.2f", average) + " ms per file \n" +
" Average: " + String.format("%10.2f", 1000.0/average) + " files per second");
}
};
readThread.start();
threads[i] = readThread;
}
for (int i = 0; i < threads.length; i++)
{
threads[i].join();
}
}
private static String getUsage()
{
StringBuilder sb = new StringBuilder();
sb.append("FileFolderPerformanceTester usage: ").append("\n");
sb.append(" FileFolderPerformanceTester --files=<filecount> --threads=<threadcount> --folder=<folderref>").append("\n");
sb.append(" filecount: number of files in the folder").append("\n");
sb.append(" threadcount: number of threads to do the directory listing").append("\n");
return sb.toString();
}
}

View File

@@ -0,0 +1,49 @@
/*
* Copyright (C) 2005-2007 Alfresco Software Limited.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* As a special exception to the terms and conditions of version 2.0 of
* the GPL, you may redistribute this Program in connection with Free/Libre
* and Open Source Software ("FLOSS") applications as described in Alfresco's
* FLOSS exception. You should have recieved a copy of the text describing
* the FLOSS exception, and it is also available here:
* http://www.alfresco.com/legal/licensing"
*/
package org.alfresco.repo.node;
import java.util.List;
import org.alfresco.service.cmr.repository.NodeRef;
/**
* A component that pre-fetches cached data for the given nodes. Client code can use
* this component when a list of <code>NodeRef</code> instances will be processed in
* a data-intensive manner.
*
* @author Andy Hind
* @author Derek Hulley
*/
public interface NodeBulkLoader
{
/**
* Pre-cache data relevant to the given nodes. There is no need to split the collection
* up before calling this method; it is up to the implementations to ensure that batching
* is done where necessary.
*
* @param nodeRefs the nodes that will be cached.
*/
public void cacheNodes(List<NodeRef> nodeRefs);
}

View File

@@ -73,6 +73,7 @@ import org.alfresco.repo.domain.hibernate.ServerImpl;
import org.alfresco.repo.domain.hibernate.SessionSizeResourceManager;
import org.alfresco.repo.domain.hibernate.StoreImpl;
import org.alfresco.repo.domain.hibernate.TransactionImpl;
import org.alfresco.repo.node.NodeBulkLoader;
import org.alfresco.repo.node.db.NodeDaoService;
import org.alfresco.repo.policy.BehaviourFilter;
import org.alfresco.repo.security.authentication.AuthenticationUtil;
@@ -116,7 +117,9 @@ import org.alfresco.util.GUID;
import org.alfresco.util.Pair;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.hibernate.CacheMode;
import org.hibernate.Criteria;
import org.hibernate.FlushMode;
import org.hibernate.HibernateException;
import org.hibernate.LockMode;
import org.hibernate.ObjectNotFoundException;
@@ -136,7 +139,9 @@ import org.springframework.orm.hibernate3.support.HibernateDaoSupport;
*
* @author Derek Hulley
*/
public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements NodeDaoService, TransactionalDao
public class HibernateNodeDaoServiceImpl
extends HibernateDaoSupport
implements NodeDaoService, TransactionalDao, NodeBulkLoader
{
private static final String QUERY_GET_STORE_BY_ALL = "store.GetStoreByAll";
private static final String QUERY_GET_ALL_STORES = "store.GetAllStores";
@@ -2050,20 +2055,24 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
childNameUnique.getFirst());
// Add it to the cache
Set<Long> oldParentAssocIds = parentAssocsCache.get(childNode.getId());
if (oldParentAssocIds != null)
Set<Long> parentAssocIds = parentAssocsCache.get(childNode.getId());
if (parentAssocIds == null)
{
Set<Long> newParentAssocIds = new HashSet<Long>(oldParentAssocIds);
newParentAssocIds.add(assocId);
parentAssocsCache.put(childNodeId, newParentAssocIds);
if (isDebugParentAssocCacheEnabled)
{
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Updating entry: \n" +
" Node: " + childNodeId + "\n" +
" Before: " + oldParentAssocIds + "\n" +
" After: " + newParentAssocIds);
}
parentAssocIds = new HashSet<Long>(3);
}
else
{
// Copy the list when we add to it
parentAssocIds = new HashSet<Long>(parentAssocIds);
}
parentAssocIds.add(assocId);
parentAssocsCache.put(childNodeId, parentAssocIds);
if (isDebugParentAssocCacheEnabled)
{
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Updating entry: \n" +
" Node: " + childNodeId + "\n" +
" Assocs: " + parentAssocIds);
}
// If this is a primary association then update the permissions
@@ -2909,11 +2918,16 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
9 child.uuid
* </pre>
*/
@SuppressWarnings("unchecked")
private void convertToChildAssocRefs(Node parentNode, ScrollableResults results, ChildAssocRefQueryCallback resultsCallback)
{
Long parentNodeId = parentNode.getId();
NodeRef parentNodeRef = parentNode.getNodeRef();
Pair<Long, NodeRef> parentNodePair = new Pair<Long, NodeRef>(parentNodeId, parentNodeRef);
List<Object[]> callbackResults = new ArrayList<Object[]>(128);
List<NodeRef> childNodeRefs = new ArrayList<NodeRef>(128);
while (results.next())
{
Object[] row = results.get();
@@ -2956,8 +2970,134 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
continue;
}
}
// Call back
resultsCallback.handle(assocPair, parentNodePair, childNodePair);
callbackResults.add(new Object[] {assocPair, parentNodePair, childNodePair});
childNodeRefs.add(childNodeRef);
}
// Cache the nodes
cacheNodes(childNodeRefs);
// Pass results to callback
for (Object[] callbackResult : callbackResults)
{
resultsCallback.handle(
(Pair<Long, ChildAssociationRef>) callbackResult[0],
(Pair<Long, NodeRef>) callbackResult[1],
(Pair<Long, NodeRef>) callbackResult[2]);
}
}
/**
* {@inheritDoc}
* <p/>
* Loads properties, aspects, parent associations and the ID-noderef cache
*/
public void cacheNodes(List<NodeRef> nodeRefs)
{
// Group the nodes by store so that we don't *have* to eagerly join to store to get query performance
Map<StoreRef, List<String>> uuidsByStore = new HashMap<StoreRef, List<String>>(3);
for (NodeRef nodeRef : nodeRefs)
{
StoreRef storeRef = nodeRef.getStoreRef();
List<String> uuids = (List<String>) uuidsByStore.get(storeRef);
if (uuids == null)
{
uuids = new ArrayList<String>(nodeRefs.size());
uuidsByStore.put(storeRef, uuids);
}
uuids.add(nodeRef.getId());
}
int size = nodeRefs.size();
nodeRefs = null;
// Now load all the nodes
for (Map.Entry<StoreRef, List<String>> entry : uuidsByStore.entrySet())
{
StoreRef storeRef = entry.getKey();
List<String> uuids = entry.getValue();
cacheNodes(storeRef, uuids);
}
if (logger.isDebugEnabled())
{
logger.debug("Pre-loaded " + size + " nodes.");
}
}
/**
* Loads the nodes into cache using batching.
*/
private void cacheNodes(StoreRef storeRef, List<String> uuids)
{
Store store = getStore(storeRef); // Be fetched from local caches
int batchSize = 256;
List<String> batch = new ArrayList<String>(128);
for (String uuid : uuids)
{
batch.add(uuid);
if (batch.size() >= batchSize)
{
// Preload
cacheNodesNoBatch(store, batch);
batch.clear();
}
}
// Load any remaining nodes
if (batch.size() > 0)
{
cacheNodesNoBatch(store, batch);
}
}
/**
* Uses a Critera to preload the nodes without batching
*/
@SuppressWarnings("unchecked")
private void cacheNodesNoBatch(Store store, List<String> uuids)
{
Criteria criteria = getSession().createCriteria(NodeImpl.class, "node");
criteria.setResultTransformer(Criteria.ROOT_ENTITY);
criteria.add(Restrictions.eq("store.id", store.getId()));
criteria.add(Restrictions.in("uuid", uuids));
criteria.setCacheMode(CacheMode.PUT);
criteria.setFlushMode(FlushMode.MANUAL);
List<Node> nodeList = criteria.list();
List<Long> nodeIds = new ArrayList<Long>(nodeList.size());
for (Node node : nodeList)
{
Long nodeId = node.getId();
storeAndNodeIdCache.put(node.getNodeRef(), nodeId);
nodeIds.add(nodeId);
}
criteria = getSession().createCriteria(ChildAssocImpl.class, "parentAssoc");
criteria.setResultTransformer(Criteria.ROOT_ENTITY);
criteria.add(Restrictions.in("child.id", nodeIds));
criteria.setCacheMode(CacheMode.PUT);
criteria.setFlushMode(FlushMode.MANUAL);
List<ChildAssoc> parentAssocs = criteria.list();
for (ChildAssoc parentAssoc : parentAssocs)
{
Long nodeId = parentAssoc.getChild().getId();
Set<Long> parentAssocsOfNode = parentAssocsCache.get(nodeId);
if (parentAssocsOfNode == null)
{
parentAssocsOfNode = new HashSet<Long>(3);
}
else
{
parentAssocsOfNode = new HashSet<Long>(parentAssocsOfNode);
}
parentAssocsOfNode.add(parentAssoc.getId());
parentAssocsCache.put(nodeId, parentAssocsOfNode);
if (isDebugParentAssocCacheEnabled)
{
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Adding entry: \n" +
" Node: " + nodeId + "\n" +
" Assocs: " + parentAssocsOfNode);
}
}
}
@@ -3199,11 +3339,14 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
Set<Long> newParentAssocIds = new HashSet<Long>(oldParentAssocIds);
newParentAssocIds.remove(childAssocId);
parentAssocsCache.put(childNodeId, newParentAssocIds);
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Updating entry: \n" +
" Node: " + childNodeId + "\n" +
" Before: " + oldParentAssocIds + "\n" +
" After: " + newParentAssocIds);
if (this.isDebugParentAssocCacheEnabled)
{
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Updating entry: \n" +
" Node: " + childNodeId + "\n" +
" Before: " + oldParentAssocIds + "\n" +
" After: " + newParentAssocIds);
}
}
// maintain inverse association sets

View File

@@ -36,6 +36,7 @@ import java.util.Set;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.repo.cache.TransactionalCache;
import org.alfresco.repo.domain.hibernate.DirtySessionMethodInterceptor;
import org.alfresco.repo.domain.hibernate.SessionSizeResourceManager;
import org.alfresco.repo.node.integrity.IntegrityChecker;
import org.alfresco.repo.search.impl.lucene.LuceneIndexerAndSearcher;
import org.alfresco.util.GUID;
@@ -677,6 +678,10 @@ public abstract class AlfrescoTransactionSupport
// These are still considered part of the transaction so are executed here
doBeforeCommit(readOnly);
// HACK: In order to control Hibernate's flush behaviour, we mark the point at which
// we start read-only operations during a commit
SessionSizeResourceManager.setCommitStarted();
// Check integrity
for (IntegrityChecker integrityChecker : integrityCheckers)

View File

@@ -80,6 +80,7 @@ public class AlfrescoJobExecutor extends JobExecutor
Thread thread = new AlfrescoJobExecutorThread(threadName, this, jbpmConfiguration, getIdleInterval(), getMaxIdleInterval(), getMaxLockTime(), getHistoryMaxSize());
getThreads().put(threadName, thread);
log.debug("starting new job executor thread '" + threadName + "'");
thread.setDaemon(true);
thread.start();
}