Merged V3.2 to HEAD

16937: Merged V3.2 to V3.1
      16891: Merged V2.2 to V3.1
         16772: Fix unreported bugs found by new code (ArgumentHelper)
         16773: AlfrescoJobExecutor thread is now a 'daemon' thread
         16774: Increases sizes of 'parent assocs' and 'NodeRef-ID' caches
         16775: Session L1 cache size improvements
         16777: Transactional cache issues warning when it overflows
         16779: Fixed ETHREEOH-2657: Performance: slow answers to directory listings
         16797: Set AVM L1 Hibernate object retention to 0
         16829: Read vs Write split in Session size management
         16834: Build fix for SessionSizeManagementTest
___________________________________________________________________
Modified: svn:mergeinfo
   Merged /alfresco/BRANCHES/V2.2:r16772-16775,16777,16779,16797,16829,16834
   Merged /alfresco/BRANCHES/V3.1:r16891
   Merged /alfresco/BRANCHES/V3.2:r16937


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@17018 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2009-10-19 11:48:23 +00:00
parent d809498e5a
commit 733d27742b
16 changed files with 621 additions and 99 deletions

View File

@@ -22,9 +22,15 @@
<property name="sessionFactory"> <property name="sessionFactory">
<ref bean="sessionFactory" /> <ref bean="sessionFactory" />
</property> </property>
<property name="threshold"> <property name="writeThreshold">
<value>100</value> <value>100</value>
</property> </property>
<property name="readThreshold">
<value>100</value>
</property>
<property name="retentionFactor">
<value>0</value>
</property>
</bean> </bean>
<bean id="attributeService" class="org.springframework.aop.framework.ProxyFactoryBean"> <bean id="attributeService" class="org.springframework.aop.framework.ProxyFactoryBean">

View File

@@ -274,5 +274,33 @@
<value>workspace://SpacesStore</value> <value>workspace://SpacesStore</value>
</property> </property>
</bean> </bean>
<bean id="avmSessionSizeResourceInterceptor" class="org.alfresco.repo.transaction.SingleEntryTransactionResourceInterceptor" >
<property name="methodResourceManagers">
<list>
<ref bean="avmSessionSizeResourceManager"></ref>
</list>
</property>
<property name="elapsedTimeBeforeActivationMillis">
<value>500</value>
</property>
<property name="resourceManagerCallFrequencyMillis">
<value>250</value>
</property>
</bean>
<bean id="avmSessionSizeResourceManager" class="org.alfresco.repo.domain.hibernate.SessionSizeResourceManager">
<property name="sessionFactory">
<ref bean="sessionFactory" />
</property>
<property name="writeThreshold">
<value>100</value>
</property>
<property name="readThreshold">
<value>100</value>
</property>
<property name="retentionFactor">
<value>0</value>
</property>
</bean>
</beans> </beans>

View File

@@ -59,7 +59,7 @@
<value>org.alfresco.cache.qnameEntityTransactionalCache</value> <value>org.alfresco.cache.qnameEntityTransactionalCache</value>
</property> </property>
<property name="maxCacheSize"> <property name="maxCacheSize">
<value>100</value> <value>500</value>
</property> </property>
</bean> </bean>
@@ -275,7 +275,7 @@
<value>org.alfresco.storeAndNodeIdTransactionalCache</value> <value>org.alfresco.storeAndNodeIdTransactionalCache</value>
</property> </property>
<property name="maxCacheSize"> <property name="maxCacheSize">
<value>500</value> <value>10000</value>
</property> </property>
</bean> </bean>
@@ -311,7 +311,7 @@
<value>org.alfresco.parentAssocsTransactionalCache</value> <value>org.alfresco.parentAssocsTransactionalCache</value>
</property> </property>
<property name="maxCacheSize"> <property name="maxCacheSize">
<value>1000</value> <value>10000</value>
</property> </property>
</bean> </bean>

View File

@@ -277,13 +277,13 @@
/> />
<cache <cache
name="org.alfresco.cache.storeAndNodeIdCache" name="org.alfresco.cache.storeAndNodeIdCache"
maxElementsInMemory="10000" maxElementsInMemory="50000"
eternal="true" eternal="true"
overflowToDisk="false" overflowToDisk="false"
/> />
<cache <cache
name="org.alfresco.cache.parentAssocsCache" name="org.alfresco.cache.parentAssocsCache"
maxElementsInMemory="10000" maxElementsInMemory="50000"
eternal="true" eternal="true"
overflowToDisk="false" overflowToDisk="false"
/> />

View File

@@ -537,7 +537,7 @@
<cache <cache
name="org.alfresco.cache.storeAndNodeIdCache" name="org.alfresco.cache.storeAndNodeIdCache"
maxElementsInMemory="10000" maxElementsInMemory="50000"
eternal="true" eternal="true"
overflowToDisk="false"> overflowToDisk="false">
@@ -642,7 +642,7 @@
<cache <cache
name="org.alfresco.cache.parentAssocsCache" name="org.alfresco.cache.parentAssocsCache"
maxElementsInMemory="10000" maxElementsInMemory="50000"
eternal="true" eternal="true"
overflowToDisk="false"> overflowToDisk="false">

View File

@@ -410,6 +410,34 @@
</property> </property>
</bean> </bean>
<bean id="sessionSizeResourceInterceptor" class="org.alfresco.repo.transaction.SingleEntryTransactionResourceInterceptor" >
<property name="methodResourceManagers">
<list>
<ref bean="sessionSizeResourceManager"></ref>
</list>
</property>
<property name="elapsedTimeBeforeActivationMillis">
<value>10000</value>
</property>
<property name="resourceManagerCallFrequencyMillis">
<value>5000</value>
</property>
</bean>
<bean id="sessionSizeResourceManager" class="org.alfresco.repo.domain.hibernate.SessionSizeResourceManager">
<property name="sessionFactory">
<ref bean="sessionFactory" />
</property>
<property name="writeThreshold">
<value>2000</value>
</property>
<property name="readThreshold">
<value>50000</value>
</property>
<property name="retentionFactor">
<value>3</value>
</property>
</bean>
<bean id="dbNodeDaoServiceTxnRegistration" class="org.alfresco.repo.transaction.TransactionalDaoInterceptor" > <bean id="dbNodeDaoServiceTxnRegistration" class="org.alfresco.repo.transaction.TransactionalDaoInterceptor" >
<property name="daoService"> <property name="daoService">
<ref bean="nodeDaoServiceImpl" /> <ref bean="nodeDaoServiceImpl" />
@@ -424,6 +452,7 @@
</property> </property>
<property name="interceptorNames"> <property name="interceptorNames">
<list> <list>
<value>sessionSizeResourceInterceptor</value>
<value>daoServiceDirtySessionInterceptor</value> <value>daoServiceDirtySessionInterceptor</value>
<value>dbNodeDaoServiceTxnRegistration</value> <value>dbNodeDaoServiceTxnRegistration</value>
</list> </list>

View File

@@ -190,7 +190,7 @@
<ref bean="transactionService" /> <ref bean="transactionService" />
</property> </property>
<property name="dbNodeService"> <property name="dbNodeService">
<ref bean="dbNodeServiceImpl" /> <ref bean="dbNodeService" />
</property> </property>
<property name="nodeDaoService"> <property name="nodeDaoService">
<ref bean="nodeDaoService" /> <ref bean="nodeDaoService" />
@@ -210,21 +210,8 @@
</property> </property>
</bean> </bean>
<!-- NodeService implemented to persist to Database. Resource management enabled. --> <alias name="dbNodeService" alias="dbNodeServiceImpl" />
<bean id="dbNodeService" class="org.springframework.aop.framework.ProxyFactoryBean"> <bean id="dbNodeService" class="org.alfresco.repo.node.db.DbNodeServiceImpl" init-method="init" >
<property name="proxyInterfaces">
<value>org.alfresco.service.cmr.repository.NodeService</value>
</property>
<property name="target">
<ref bean="dbNodeServiceImpl" />
</property>
<property name="interceptorNames">
<list>
<value>sessionSizeResourceInterceptor</value>
</list>
</property>
</bean>
<bean id="dbNodeServiceImpl" class="org.alfresco.repo.node.db.DbNodeServiceImpl" init-method="init" >
<property name="dictionaryService"> <property name="dictionaryService">
<ref bean="dictionaryService" /> <ref bean="dictionaryService" />
</property> </property>
@@ -250,26 +237,5 @@
<value>${system.cascadeDeleteInTransaction}</value> <value>${system.cascadeDeleteInTransaction}</value>
</property> </property>
</bean> </bean>
<bean id="sessionSizeResourceInterceptor" class="org.alfresco.repo.transaction.SingleEntryTransactionResourceInterceptor" >
<property name="methodResourceManagers">
<list>
<ref bean="sessionSizeResourceManager"></ref>
</list>
</property>
<property name="elapsedTimeBeforeActivationMillis">
<value>10000</value>
</property>
<property name="resourceManagerCallFrequencyMillis">
<value>5000</value>
</property>
</bean>
<bean id="sessionSizeResourceManager" class="org.alfresco.repo.domain.hibernate.SessionSizeResourceManager">
<property name="sessionFactory">
<ref bean="sessionFactory" />
</property>
<property name="threshold">
<value>5000</value>
</property>
</bean>
</beans> </beans>

View File

@@ -405,6 +405,27 @@ public class TransactionalCache<K extends Serializable, V extends Object>
" value: " + value); " value: " + value);
} }
} }
// we have a transaction - add the item into the updated cache for this transaction
// are we in an overflow condition?
if (txnData.updatedItemsCache.getMemoryStoreSize() >= maxCacheSize)
{
// overflow about to occur or has occured - we can only guarantee non-stale
// data by clearing the shared cache after the transaction. Also, the
// shared cache needs to be ignored for the rest of the transaction.
txnData.isClearOn = true;
if (!txnData.haveIssuedFullWarning && logger.isWarnEnabled())
{
logger.warn("Transactional update cache '" + name + "' is full (" + maxCacheSize + ").");
txnData.haveIssuedFullWarning = true;
}
}
CacheBucket<V> bucket = null;
if (sharedCache.contains(key))
{
V existingValue = sharedCache.get(key);
// The value needs to be kept for later checks
bucket = new UpdateCacheBucket<V>(existingValue, value);
}
else else
{ {
// we have an active transaction - add the item into the updated cache for this transaction // we have an active transaction - add the item into the updated cache for this transaction
@@ -416,7 +437,6 @@ public class TransactionalCache<K extends Serializable, V extends Object>
// shared cache needs to be ignored for the rest of the transaction. // shared cache needs to be ignored for the rest of the transaction.
txnData.isClearOn = true; txnData.isClearOn = true;
} }
CacheBucket<V> bucket = null;
if (sharedCache.contains(key)) if (sharedCache.contains(key))
{ {
V existingValue = sharedCache.get(key); V existingValue = sharedCache.get(key);
@@ -484,7 +504,15 @@ public class TransactionalCache<K extends Serializable, V extends Object>
// is the shared cache going to be cleared? // is the shared cache going to be cleared?
if (txnData.isClearOn) if (txnData.isClearOn)
{ {
// don't store removals if we're just going to clear it all out later // overflow about to occur or has occured - we can only guarantee non-stale
// data by clearing the shared cache after the transaction. Also, the
// shared cache needs to be ignored for the rest of the transaction.
txnData.isClearOn = true;
if (!txnData.haveIssuedFullWarning && logger.isWarnEnabled())
{
logger.warn("Transactional update cache '" + name + "' is full (" + maxCacheSize + ").");
txnData.haveIssuedFullWarning = true;
}
} }
else else
{ {
@@ -806,6 +834,7 @@ public class TransactionalCache<K extends Serializable, V extends Object>
{ {
private Cache updatedItemsCache; private Cache updatedItemsCache;
private Cache removedItemsCache; private Cache removedItemsCache;
private boolean haveIssuedFullWarning;
private boolean isClearOn; private boolean isClearOn;
private boolean isClosed; private boolean isClosed;
} }

View File

@@ -58,9 +58,9 @@
class="org.alfresco.repo.domain.hibernate.StoreImpl" class="org.alfresco.repo.domain.hibernate.StoreImpl"
column="store_id" column="store_id"
not-null="true" not-null="true"
lazy="false"
foreign-key="fk_alf_node_store" foreign-key="fk_alf_node_store"
fetch="join" /> lazy="proxy"
fetch="select" />
<!-- the store-unique identifier --> <!-- the store-unique identifier -->
<property name="uuid" column="uuid" type="string" length="36" /> <property name="uuid" column="uuid" type="string" length="36" />
</natural-id> </natural-id>
@@ -97,9 +97,8 @@
<map <map
name="properties" name="properties"
table="alf_node_properties" table="alf_node_properties"
lazy="true" lazy="false"
fetch="select" fetch="join"
batch-size="128"
sort="unsorted" sort="unsorted"
inverse="false" inverse="false"
optimistic-lock="false" optimistic-lock="false"
@@ -125,9 +124,8 @@
<set <set
name="aspects" name="aspects"
table="alf_node_aspects" table="alf_node_aspects"
lazy="true" lazy="false"
fetch="select" fetch="join"
batch-size="128"
inverse="false" inverse="false"
sort="unsorted" sort="unsorted"
optimistic-lock="false" optimistic-lock="false"
@@ -270,9 +268,6 @@
join assoc.child as child join assoc.child as child
where where
child.id = :childId child.id = :childId
order by
assoc.index,
assoc.id
</query> </query>
<query name="node.DeleteParentAssocs"> <query name="node.DeleteParentAssocs">

View File

@@ -30,6 +30,7 @@ import java.util.Map;
import java.util.Set; import java.util.Set;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport; import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState;
import org.alfresco.util.resource.MethodResourceManager; import org.alfresco.util.resource.MethodResourceManager;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@@ -60,7 +61,11 @@ public class SessionSizeResourceManager extends HibernateDaoSupport implements M
private static Log logger = LogFactory.getLog(SessionSizeResourceManager.class); private static Log logger = LogFactory.getLog(SessionSizeResourceManager.class);
/** Default 1000 */ /** Default 1000 */
private int threshold; private int writeThreshold;
/** Default 10000 */
private int readThreshold;
/** Default 3 */
private int retentionFactor;
/** /**
* Disable resource management for the duration of the current transaction. This is temporary * Disable resource management for the duration of the current transaction. This is temporary
@@ -104,21 +109,58 @@ public class SessionSizeResourceManager extends HibernateDaoSupport implements M
*/ */
public SessionSizeResourceManager() public SessionSizeResourceManager()
{ {
this.threshold = 1000; this.writeThreshold = 1000;
this.readThreshold = 10000;
this.retentionFactor = 3;
} }
/** /**
* Set the {@link Session#clear()} threshold. If the number of entities and collections in the * Set the number of entities retained in the session for each one flushed; default 3.
* current session exceeds this number, then the session will be cleared. Have you read the * Set this to zero to remove all entities when the session is trimmed.
* disclaimer? *
* * @param retentionFactor the number of entities to keep for each entity removed
* @param threshold the maximum number of entities and associations to keep in memory
*
* @see #threshold
*/ */
public void setThreshold(int threshold) public void setRetentionFactor(int retentionFactor)
{ {
this.threshold = threshold; this.retentionFactor = retentionFactor;
}
/**
* Set the {@link Session#clear()} threshold for read-only transactions.
* If the number of entities and collections in the current session exceeds this number,
* then the session will be cleared.
* <p/>
* Have you read the disclaimer?
*
* @param threshold the maximum number of entities and associations to keep in memory during read-only operations
*
* @see #writeThreshold
*/
public void setReadThreshold(int threshold)
{
this.readThreshold = threshold;
}
/**
* Set the {@link Session#clear()} threshold for read-write transactions.
* If the number of entities and collections in the current session exceeds this number,
* then the session will be cleared.
* <p/>
* Have you read the disclaimer?
*
* @param threshold the maximum number of entities and associations to keep in memory during write operations
*
* @see #writeThreshold
*/
public void setWriteThreshold(int threshold)
{
this.writeThreshold = threshold;
}
public static final String KEY_COMMIT_STARTED = "SessionSizeResourceManager.commitStarted";
public static void setCommitStarted()
{
AlfrescoTransactionSupport.bindResource(KEY_COMMIT_STARTED, Boolean.TRUE);
} }
public void manageResources( public void manageResources(
@@ -131,6 +173,15 @@ public class SessionSizeResourceManager extends HibernateDaoSupport implements M
// Don't do anything // Don't do anything
return; return;
} }
int threshold = writeThreshold;
int retentionFactor = 0;
Boolean commitStarted = (Boolean) AlfrescoTransactionSupport.getResource(KEY_COMMIT_STARTED);
if (commitStarted != null ||
AlfrescoTransactionSupport.getTransactionReadState() == TxnReadState.TXN_READ_ONLY)
{
threshold = readThreshold;
retentionFactor = this.retentionFactor; // Retain objects during read-only phase only
}
// We are go for interfering // We are go for interfering
Session session = getSession(false); Session session = getSession(false);
SessionStatistics stats = session.getStatistics(); SessionStatistics stats = session.getStatistics();
@@ -139,14 +190,15 @@ public class SessionSizeResourceManager extends HibernateDaoSupport implements M
if ((entityCount + collectionCount) > threshold) if ((entityCount + collectionCount) > threshold)
{ {
DirtySessionMethodInterceptor.flushSession(session, true); DirtySessionMethodInterceptor.flushSession(session, true);
selectivelyClear(session, stats); selectivelyClear(session, stats, retentionFactor);
// session.clear(); // session.clear();
if (logger.isDebugEnabled()) if (logger.isDebugEnabled())
{ {
String msg = String.format( String msg = String.format(
"Cleared %5d entities and %5d collections from Hibernate Session", "Cleared %5d entities and %5d collections from Hibernate Session (threshold %5d)",
entityCount, entityCount,
collectionCount); collectionCount,
threshold);
logger.debug(msg); logger.debug(msg);
} }
} }
@@ -160,27 +212,51 @@ public class SessionSizeResourceManager extends HibernateDaoSupport implements M
public static void clear(Session session) public static void clear(Session session)
{ {
SessionStatistics stats = session.getStatistics(); SessionStatistics stats = session.getStatistics();
selectivelyClear(session, stats); selectivelyClear(session, stats, 0);
} }
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
private static void selectivelyClear(Session session, SessionStatistics stats) private static void selectivelyClear(Session session, SessionStatistics stats, int retentionFactor)
{ {
if (logger.isDebugEnabled()) if (logger.isDebugEnabled())
{ {
logger.debug(stats); logger.debug(stats);
} }
Set<EntityKey> keys = new HashSet<EntityKey>((Set<EntityKey>)stats.getEntityKeys()); Set<EntityKey> keys = new HashSet<EntityKey>((Set<EntityKey>)stats.getEntityKeys());
int retentionCount = 0;
for (EntityKey key : keys) for (EntityKey key : keys)
{ {
// This should probably be configurable but frankly the nauseous extrusion of Gavin King's // This should probably be configurable but frankly the nauseous extrusion of Gavin King's
// programmatic alimentary tract (hibernate) will go away before this could make a difference. // programmatic alimentary tract (hibernate) will go away before this could make a difference.
String entityName = key.getEntityName(); String entityName = key.getEntityName();
if (!entityName.startsWith("org.alfresco") || entityName.startsWith("org.alfresco.repo.workflow.jbpm")) if (!entityName.startsWith("org.alfresco"))
{ {
// Leave non-Alfresco entities alone. JBPM bugs arise due to inconsistent flushing here.
continue; continue;
} }
else if (entityName.startsWith("org.alfresco.repo.workflow.jbpm"))
{
// Once again, JBPM flushing issue prevent us from throwing related entities away
continue;
}
else if (entityName.startsWith("org.alfresco.repo.domain.hibernate.QName"))
{
// QNames are heavily used
continue;
}
else if (entityName.startsWith("org.alfresco.repo.domain.hibernate.Store"))
{
// So are Stores
continue;
}
// Do we evict or retain?
if (retentionCount < retentionFactor)
{
retentionCount++;
continue;
}
retentionCount = 0;
// Flush every other instance
Object val = session.get(key.getEntityName(), key.getIdentifier()); Object val = session.get(key.getEntityName(), key.getIdentifier());
if (val != null) if (val != null)
{ {

View File

@@ -25,6 +25,7 @@
package org.alfresco.repo.model.filefolder; package org.alfresco.repo.model.filefolder;
import java.io.File; import java.io.File;
import java.io.Serializable;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
@@ -35,6 +36,8 @@ import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.model.ContentModel; import org.alfresco.model.ContentModel;
import org.alfresco.repo.content.transform.AbstractContentTransformerTest; import org.alfresco.repo.content.transform.AbstractContentTransformerTest;
import org.alfresco.repo.security.authentication.AuthenticationComponent; import org.alfresco.repo.security.authentication.AuthenticationComponent;
import org.alfresco.repo.security.authentication.AuthenticationUtil;
import org.alfresco.repo.security.authentication.AuthenticationUtil.RunAsWork;
import org.alfresco.repo.transaction.RetryingTransactionHelper; import org.alfresco.repo.transaction.RetryingTransactionHelper;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.ServiceRegistry; import org.alfresco.service.ServiceRegistry;
@@ -47,11 +50,17 @@ import org.alfresco.service.cmr.repository.NodeService;
import org.alfresco.service.cmr.repository.StoreRef; import org.alfresco.service.cmr.repository.StoreRef;
import org.alfresco.service.cmr.search.ResultSet; import org.alfresco.service.cmr.search.ResultSet;
import org.alfresco.service.cmr.search.SearchService; import org.alfresco.service.cmr.search.SearchService;
import org.alfresco.service.cmr.security.AuthenticationService;
import org.alfresco.service.cmr.security.PermissionService;
import org.alfresco.service.namespace.QName;
import org.alfresco.service.transaction.TransactionService;
import org.alfresco.util.ApplicationContextHelper; import org.alfresco.util.ApplicationContextHelper;
import org.alfresco.util.ArgumentHelper;
import org.alfresco.util.GUID; import org.alfresco.util.GUID;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContext;
import org.springframework.context.ConfigurableApplicationContext;
/** /**
* Tests around some of the data structures that lead to performance * Tests around some of the data structures that lead to performance
@@ -375,4 +384,187 @@ public class FileFolderPerformanceTester extends TestCase
// 50000, // 50000,
// new double[] {0.01, 0.02, 0.03, 0.04, 0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90}); // new double[] {0.01, 0.02, 0.03, 0.04, 0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90});
// } // }
/**
* Create a bunch of files and folders in a folder and then run multi-threaded directory
* listings against it.
*
* @param args <x> <y> where 'x' is the number of files in a folder and 'y' is the
* number of threads to list
*/
public static void main(String ... args)
{
ConfigurableApplicationContext ctx = (ConfigurableApplicationContext) ApplicationContextHelper.getApplicationContext();
try
{
run(ctx, args);
}
catch (Throwable e)
{
System.out.println("Failed to run CifsHelper performance test");
e.printStackTrace();
}
finally
{
ctx.close();
}
}
private static void run(ApplicationContext ctx, String ... args) throws Throwable
{
ArgumentHelper argHelper = new ArgumentHelper(getUsage(), args);
final int fileCount = argHelper.getIntegerValue("files", true, 1, 10000);
final String folderRefStr = argHelper.getStringValue("folder", false, true);
final int threadCount = argHelper.getIntegerValue("threads", false, 1, 100);
final NodeRef selectedFolderNodeRef = folderRefStr == null ? null : new NodeRef(folderRefStr);
ServiceRegistry serviceRegistry = (ServiceRegistry) ctx.getBean(ServiceRegistry.SERVICE_REGISTRY);
final AuthenticationService authenticationService = serviceRegistry.getAuthenticationService();
final PermissionService permissionService = serviceRegistry.getPermissionService();
final NodeService nodeService = serviceRegistry.getNodeService();
final TransactionService transactionService = serviceRegistry.getTransactionService();
final FileFolderService fileFolderService = serviceRegistry.getFileFolderService();
RunAsWork<String> createUserRunAs = new RunAsWork<String>()
{
public String doWork() throws Exception
{
String user = GUID.generate();
authenticationService.createAuthentication(user, user.toCharArray());
return user;
}
};
final String user = AuthenticationUtil.runAs(createUserRunAs, AuthenticationUtil.getSystemUserName());
// Create the files
final RetryingTransactionCallback<NodeRef> createCallback = new RetryingTransactionCallback<NodeRef>()
{
public NodeRef execute() throws Throwable
{
AuthenticationUtil.pushAuthentication();
NodeRef folderNodeRef = null;
try
{
AuthenticationUtil.setFullyAuthenticatedUser(AuthenticationUtil.getSystemUserName());
if (selectedFolderNodeRef == null)
{
// Create a new store
StoreRef storeRef = nodeService.createStore(StoreRef.PROTOCOL_WORKSPACE, GUID.generate());
NodeRef rootNodeRef = nodeService.getRootNode(storeRef);
// Create a folder
folderNodeRef = nodeService.createNode(
rootNodeRef,
ContentModel.ASSOC_CHILDREN,
ContentModel.ASSOC_CHILDREN,
ContentModel.TYPE_FOLDER,
Collections.<QName, Serializable>singletonMap(ContentModel.PROP_NAME, "TOP FOLDER")
).getChildRef();
// Grant permissions
permissionService.setPermission(folderNodeRef, user, PermissionService.ALL_PERMISSIONS, true);
}
else
{
folderNodeRef = selectedFolderNodeRef;
// Grant permissions
permissionService.setPermission(folderNodeRef, user, PermissionService.ALL_PERMISSIONS, true);
System.out.println("Reusing folder " + folderNodeRef);
}
}
finally
{
AuthenticationUtil.popAuthentication();
}
if (selectedFolderNodeRef == null)
{
// Create the files
for (int i = 0; i < fileCount; i++)
{
fileFolderService.create(
folderNodeRef,
String.format("FILE-%4d", i),
ContentModel.TYPE_CONTENT);
}
System.out.println("Created " + fileCount + " files in folder " + folderNodeRef);
}
// Done
return folderNodeRef;
}
};
RunAsWork<NodeRef> createRunAs = new RunAsWork<NodeRef>()
{
public NodeRef doWork() throws Exception
{
return transactionService.getRetryingTransactionHelper().doInTransaction(createCallback);
}
};
final NodeRef folderNodeRef = AuthenticationUtil.runAs(createRunAs, user);
// Now wait for some input before commencing the read run
System.out.print("Hit any key to commence directory listing ...");
System.in.read();
final RunAsWork<List<FileInfo>> readRunAs = new RunAsWork<List<FileInfo>>()
{
public List<FileInfo> doWork() throws Exception
{
return fileFolderService.search(folderNodeRef, "*", false);
}
};
Thread[] threads = new Thread[threadCount];
for (int i = 0; i < threadCount; i++)
{
Thread readThread = new Thread("FolderList-" + i)
{
int iteration = 0;
public void run()
{
while(++iteration <= 2)
{
runImpl();
}
}
private void runImpl()
{
String threadName = Thread.currentThread().getName();
long start = System.currentTimeMillis();
List<FileInfo> nodeRefs = AuthenticationUtil.runAs(readRunAs, user);
long time = System.currentTimeMillis() - start;
double average = (double) time / (double) (fileCount);
// Make sure that we have the correct number of entries
if (folderRefStr != null && nodeRefs.size() != fileCount)
{
System.err.println(
"WARNING: Thread " + threadName + " got " + nodeRefs.size() +
" but expected " + fileCount);
}
System.out.print("\n" +
"Thread " + threadName + ": \n" +
" Read " + String.format("%4d", fileCount) + " files \n" +
" Average: " + String.format("%10.2f", average) + " ms per file \n" +
" Average: " + String.format("%10.2f", 1000.0/average) + " files per second");
}
};
readThread.start();
threads[i] = readThread;
}
for (int i = 0; i < threads.length; i++)
{
threads[i].join();
}
}
private static String getUsage()
{
StringBuilder sb = new StringBuilder();
sb.append("FileFolderPerformanceTester usage: ").append("\n");
sb.append(" FileFolderPerformanceTester --files=<filecount> --threads=<threadcount> --folder=<folderref>").append("\n");
sb.append(" filecount: number of files in the folder").append("\n");
sb.append(" threadcount: number of threads to do the directory listing").append("\n");
return sb.toString();
}
} }

View File

@@ -0,0 +1,49 @@
/*
* Copyright (C) 2005-2007 Alfresco Software Limited.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* As a special exception to the terms and conditions of version 2.0 of
* the GPL, you may redistribute this Program in connection with Free/Libre
* and Open Source Software ("FLOSS") applications as described in Alfresco's
* FLOSS exception. You should have recieved a copy of the text describing
* the FLOSS exception, and it is also available here:
* http://www.alfresco.com/legal/licensing"
*/
package org.alfresco.repo.node;
import java.util.List;
import org.alfresco.service.cmr.repository.NodeRef;
/**
* A component that pre-fetches cached data for the given nodes. Client code can use
* this component when a list of <code>NodeRef</code> instances will be processed in
* a data-intensive manner.
*
* @author Andy Hind
* @author Derek Hulley
*/
public interface NodeBulkLoader
{
/**
* Pre-cache data relevant to the given nodes. There is no need to split the collection
* up before calling this method; it is up to the implementations to ensure that batching
* is done where necessary.
*
* @param nodeRefs the nodes that will be cached.
*/
public void cacheNodes(List<NodeRef> nodeRefs);
}

View File

@@ -73,6 +73,7 @@ import org.alfresco.repo.domain.hibernate.ServerImpl;
import org.alfresco.repo.domain.hibernate.SessionSizeResourceManager; import org.alfresco.repo.domain.hibernate.SessionSizeResourceManager;
import org.alfresco.repo.domain.hibernate.StoreImpl; import org.alfresco.repo.domain.hibernate.StoreImpl;
import org.alfresco.repo.domain.hibernate.TransactionImpl; import org.alfresco.repo.domain.hibernate.TransactionImpl;
import org.alfresco.repo.node.NodeBulkLoader;
import org.alfresco.repo.node.db.NodeDaoService; import org.alfresco.repo.node.db.NodeDaoService;
import org.alfresco.repo.policy.BehaviourFilter; import org.alfresco.repo.policy.BehaviourFilter;
import org.alfresco.repo.security.authentication.AuthenticationUtil; import org.alfresco.repo.security.authentication.AuthenticationUtil;
@@ -116,7 +117,9 @@ import org.alfresco.util.GUID;
import org.alfresco.util.Pair; import org.alfresco.util.Pair;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.hibernate.CacheMode;
import org.hibernate.Criteria; import org.hibernate.Criteria;
import org.hibernate.FlushMode;
import org.hibernate.HibernateException; import org.hibernate.HibernateException;
import org.hibernate.LockMode; import org.hibernate.LockMode;
import org.hibernate.ObjectNotFoundException; import org.hibernate.ObjectNotFoundException;
@@ -136,7 +139,9 @@ import org.springframework.orm.hibernate3.support.HibernateDaoSupport;
* *
* @author Derek Hulley * @author Derek Hulley
*/ */
public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements NodeDaoService, TransactionalDao public class HibernateNodeDaoServiceImpl
extends HibernateDaoSupport
implements NodeDaoService, TransactionalDao, NodeBulkLoader
{ {
private static final String QUERY_GET_STORE_BY_ALL = "store.GetStoreByAll"; private static final String QUERY_GET_STORE_BY_ALL = "store.GetStoreByAll";
private static final String QUERY_GET_ALL_STORES = "store.GetAllStores"; private static final String QUERY_GET_ALL_STORES = "store.GetAllStores";
@@ -2050,20 +2055,24 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
childNameUnique.getFirst()); childNameUnique.getFirst());
// Add it to the cache // Add it to the cache
Set<Long> oldParentAssocIds = parentAssocsCache.get(childNode.getId()); Set<Long> parentAssocIds = parentAssocsCache.get(childNode.getId());
if (oldParentAssocIds != null) if (parentAssocIds == null)
{ {
Set<Long> newParentAssocIds = new HashSet<Long>(oldParentAssocIds); parentAssocIds = new HashSet<Long>(3);
newParentAssocIds.add(assocId); }
parentAssocsCache.put(childNodeId, newParentAssocIds); else
if (isDebugParentAssocCacheEnabled) {
{ // Copy the list when we add to it
loggerParentAssocsCache.debug("\n" + parentAssocIds = new HashSet<Long>(parentAssocIds);
"Parent associations cache - Updating entry: \n" + }
" Node: " + childNodeId + "\n" + parentAssocIds.add(assocId);
" Before: " + oldParentAssocIds + "\n" + parentAssocsCache.put(childNodeId, parentAssocIds);
" After: " + newParentAssocIds); if (isDebugParentAssocCacheEnabled)
} {
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Updating entry: \n" +
" Node: " + childNodeId + "\n" +
" Assocs: " + parentAssocIds);
} }
// If this is a primary association then update the permissions // If this is a primary association then update the permissions
@@ -2909,11 +2918,16 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
9 child.uuid 9 child.uuid
* </pre> * </pre>
*/ */
@SuppressWarnings("unchecked")
private void convertToChildAssocRefs(Node parentNode, ScrollableResults results, ChildAssocRefQueryCallback resultsCallback) private void convertToChildAssocRefs(Node parentNode, ScrollableResults results, ChildAssocRefQueryCallback resultsCallback)
{ {
Long parentNodeId = parentNode.getId(); Long parentNodeId = parentNode.getId();
NodeRef parentNodeRef = parentNode.getNodeRef(); NodeRef parentNodeRef = parentNode.getNodeRef();
Pair<Long, NodeRef> parentNodePair = new Pair<Long, NodeRef>(parentNodeId, parentNodeRef); Pair<Long, NodeRef> parentNodePair = new Pair<Long, NodeRef>(parentNodeId, parentNodeRef);
List<Object[]> callbackResults = new ArrayList<Object[]>(128);
List<NodeRef> childNodeRefs = new ArrayList<NodeRef>(128);
while (results.next()) while (results.next())
{ {
Object[] row = results.get(); Object[] row = results.get();
@@ -2956,8 +2970,134 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
continue; continue;
} }
} }
// Call back
resultsCallback.handle(assocPair, parentNodePair, childNodePair); callbackResults.add(new Object[] {assocPair, parentNodePair, childNodePair});
childNodeRefs.add(childNodeRef);
}
// Cache the nodes
cacheNodes(childNodeRefs);
// Pass results to callback
for (Object[] callbackResult : callbackResults)
{
resultsCallback.handle(
(Pair<Long, ChildAssociationRef>) callbackResult[0],
(Pair<Long, NodeRef>) callbackResult[1],
(Pair<Long, NodeRef>) callbackResult[2]);
}
}
/**
* {@inheritDoc}
* <p/>
* Loads properties, aspects, parent associations and the ID-noderef cache
*/
public void cacheNodes(List<NodeRef> nodeRefs)
{
// Group the nodes by store so that we don't *have* to eagerly join to store to get query performance
Map<StoreRef, List<String>> uuidsByStore = new HashMap<StoreRef, List<String>>(3);
for (NodeRef nodeRef : nodeRefs)
{
StoreRef storeRef = nodeRef.getStoreRef();
List<String> uuids = (List<String>) uuidsByStore.get(storeRef);
if (uuids == null)
{
uuids = new ArrayList<String>(nodeRefs.size());
uuidsByStore.put(storeRef, uuids);
}
uuids.add(nodeRef.getId());
}
int size = nodeRefs.size();
nodeRefs = null;
// Now load all the nodes
for (Map.Entry<StoreRef, List<String>> entry : uuidsByStore.entrySet())
{
StoreRef storeRef = entry.getKey();
List<String> uuids = entry.getValue();
cacheNodes(storeRef, uuids);
}
if (logger.isDebugEnabled())
{
logger.debug("Pre-loaded " + size + " nodes.");
}
}
/**
* Loads the nodes into cache using batching.
*/
private void cacheNodes(StoreRef storeRef, List<String> uuids)
{
Store store = getStore(storeRef); // Be fetched from local caches
int batchSize = 256;
List<String> batch = new ArrayList<String>(128);
for (String uuid : uuids)
{
batch.add(uuid);
if (batch.size() >= batchSize)
{
// Preload
cacheNodesNoBatch(store, batch);
batch.clear();
}
}
// Load any remaining nodes
if (batch.size() > 0)
{
cacheNodesNoBatch(store, batch);
}
}
/**
* Uses a Critera to preload the nodes without batching
*/
@SuppressWarnings("unchecked")
private void cacheNodesNoBatch(Store store, List<String> uuids)
{
Criteria criteria = getSession().createCriteria(NodeImpl.class, "node");
criteria.setResultTransformer(Criteria.ROOT_ENTITY);
criteria.add(Restrictions.eq("store.id", store.getId()));
criteria.add(Restrictions.in("uuid", uuids));
criteria.setCacheMode(CacheMode.PUT);
criteria.setFlushMode(FlushMode.MANUAL);
List<Node> nodeList = criteria.list();
List<Long> nodeIds = new ArrayList<Long>(nodeList.size());
for (Node node : nodeList)
{
Long nodeId = node.getId();
storeAndNodeIdCache.put(node.getNodeRef(), nodeId);
nodeIds.add(nodeId);
}
criteria = getSession().createCriteria(ChildAssocImpl.class, "parentAssoc");
criteria.setResultTransformer(Criteria.ROOT_ENTITY);
criteria.add(Restrictions.in("child.id", nodeIds));
criteria.setCacheMode(CacheMode.PUT);
criteria.setFlushMode(FlushMode.MANUAL);
List<ChildAssoc> parentAssocs = criteria.list();
for (ChildAssoc parentAssoc : parentAssocs)
{
Long nodeId = parentAssoc.getChild().getId();
Set<Long> parentAssocsOfNode = parentAssocsCache.get(nodeId);
if (parentAssocsOfNode == null)
{
parentAssocsOfNode = new HashSet<Long>(3);
}
else
{
parentAssocsOfNode = new HashSet<Long>(parentAssocsOfNode);
}
parentAssocsOfNode.add(parentAssoc.getId());
parentAssocsCache.put(nodeId, parentAssocsOfNode);
if (isDebugParentAssocCacheEnabled)
{
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Adding entry: \n" +
" Node: " + nodeId + "\n" +
" Assocs: " + parentAssocsOfNode);
}
} }
} }
@@ -3199,11 +3339,14 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
Set<Long> newParentAssocIds = new HashSet<Long>(oldParentAssocIds); Set<Long> newParentAssocIds = new HashSet<Long>(oldParentAssocIds);
newParentAssocIds.remove(childAssocId); newParentAssocIds.remove(childAssocId);
parentAssocsCache.put(childNodeId, newParentAssocIds); parentAssocsCache.put(childNodeId, newParentAssocIds);
loggerParentAssocsCache.debug("\n" + if (this.isDebugParentAssocCacheEnabled)
"Parent associations cache - Updating entry: \n" + {
" Node: " + childNodeId + "\n" + loggerParentAssocsCache.debug("\n" +
" Before: " + oldParentAssocIds + "\n" + "Parent associations cache - Updating entry: \n" +
" After: " + newParentAssocIds); " Node: " + childNodeId + "\n" +
" Before: " + oldParentAssocIds + "\n" +
" After: " + newParentAssocIds);
}
} }
// maintain inverse association sets // maintain inverse association sets

View File

@@ -36,6 +36,7 @@ import java.util.Set;
import org.alfresco.error.AlfrescoRuntimeException; import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.repo.cache.TransactionalCache; import org.alfresco.repo.cache.TransactionalCache;
import org.alfresco.repo.domain.hibernate.DirtySessionMethodInterceptor; import org.alfresco.repo.domain.hibernate.DirtySessionMethodInterceptor;
import org.alfresco.repo.domain.hibernate.SessionSizeResourceManager;
import org.alfresco.repo.node.integrity.IntegrityChecker; import org.alfresco.repo.node.integrity.IntegrityChecker;
import org.alfresco.repo.search.impl.lucene.LuceneIndexerAndSearcher; import org.alfresco.repo.search.impl.lucene.LuceneIndexerAndSearcher;
import org.alfresco.util.GUID; import org.alfresco.util.GUID;
@@ -677,6 +678,10 @@ public abstract class AlfrescoTransactionSupport
// These are still considered part of the transaction so are executed here // These are still considered part of the transaction so are executed here
doBeforeCommit(readOnly); doBeforeCommit(readOnly);
// HACK: In order to control Hibernate's flush behaviour, we mark the point at which
// we start read-only operations during a commit
SessionSizeResourceManager.setCommitStarted();
// Check integrity // Check integrity
for (IntegrityChecker integrityChecker : integrityCheckers) for (IntegrityChecker integrityChecker : integrityCheckers)

View File

@@ -80,6 +80,7 @@ public class AlfrescoJobExecutor extends JobExecutor
Thread thread = new AlfrescoJobExecutorThread(threadName, this, jbpmConfiguration, getIdleInterval(), getMaxIdleInterval(), getMaxLockTime(), getHistoryMaxSize()); Thread thread = new AlfrescoJobExecutorThread(threadName, this, jbpmConfiguration, getIdleInterval(), getMaxIdleInterval(), getMaxLockTime(), getHistoryMaxSize());
getThreads().put(threadName, thread); getThreads().put(threadName, thread);
log.debug("starting new job executor thread '" + threadName + "'"); log.debug("starting new job executor thread '" + threadName + "'");
thread.setDaemon(true);
thread.start(); thread.start();
} }

View File

@@ -24,9 +24,12 @@
<property name="sessionFactory"> <property name="sessionFactory">
<ref bean="sessionFactory" /> <ref bean="sessionFactory" />
</property> </property>
<property name="threshold"> <property name="writeThreshold">
<value>2000</value> <value>2000</value>
</property> </property>
<property name="readThreshold">
<value>50000</value>
</property>
</bean> </bean>
<bean id="testSessionSizeDbNodeService" class="org.springframework.aop.framework.ProxyFactoryBean"> <bean id="testSessionSizeDbNodeService" class="org.springframework.aop.framework.ProxyFactoryBean">