Merged DEV/DEREK_2.1 to HEAD

- Removed Node.parentAssocs mapping
 - Added parentAssocs transactional cache to NodeDAO
 - Added concurrency detection to TransactionalCache
 - Fixed cluster sample config


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@5948 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2007-06-13 23:50:14 +00:00
parent 36ea25f822
commit fb069d1680
19 changed files with 940 additions and 186 deletions

View File

@@ -24,6 +24,9 @@
</bean> </bean>
<bean id="asynchronousActionExecutionQueue" class="org.alfresco.repo.action.AsynchronousActionExecutionQueueImpl"> <bean id="asynchronousActionExecutionQueue" class="org.alfresco.repo.action.AsynchronousActionExecutionQueueImpl">
<property name="threadPoolExecutor">
<ref bean="threadPoolExecutor"/>
</property>
<property name="transactionService"> <property name="transactionService">
<ref bean="transactionService"/> <ref bean="transactionService"/>
</property> </property>

View File

@@ -27,6 +27,42 @@
<bean name="internalEHCacheManager" class="org.alfresco.repo.cache.InternalEhCacheManagerFactoryBean" /> <bean name="internalEHCacheManager" class="org.alfresco.repo.cache.InternalEhCacheManagerFactoryBean" />
<!-- ===================================== -->
<!-- Parent Associations lookup for nodes -->
<!-- ===================================== -->
<!-- The cross-transaction shared cache for Parent Associations -->
<bean name="parentAssocsSharedCache" class="org.alfresco.repo.cache.EhCacheAdapter">
<property name="cache">
<bean class="org.springframework.cache.ehcache.EhCacheFactoryBean" >
<property name="cacheManager">
<ref bean="internalEHCacheManager" />
</property>
<property name="cacheName">
<value>org.alfresco.cache.parentAssocsCache</value>
</property>
</bean>
</property>
</bean>
<!-- The transactional cache for Parent Associations -->
<bean name="parentAssocsCache" class="org.alfresco.repo.cache.TransactionalCache">
<property name="sharedCache">
<ref bean="parentAssocsSharedCache" />
</property>
<property name="cacheManager" >
<ref bean="transactionalEHCacheManager" />
</property>
<property name="name">
<value>org.alfresco.parentAssocsTransactionalCache</value>
</property>
<property name="maxCacheSize">
<value>1000</value>
</property>
</bean>
<!-- ===================================== --> <!-- ===================================== -->
<!-- Authority container look up for users --> <!-- Authority container look up for users -->
<!-- ===================================== --> <!-- ===================================== -->

View File

@@ -75,13 +75,6 @@
eternal="true" eternal="true"
overflowToDisk="false" overflowToDisk="false"
/> />
<!-- approx 10MB memory required -->
<cache
name="org.alfresco.repo.domain.hibernate.NodeImpl.parentAssocs"
maxElementsInMemory="10000"
eternal="true"
overflowToDisk="false"
/>
<!-- approx 250MB memory required --> <!-- approx 250MB memory required -->
<cache <cache
name="org.alfresco.repo.domain.hibernate.ChildAssocImpl" name="org.alfresco.repo.domain.hibernate.ChildAssocImpl"
@@ -92,7 +85,7 @@
<!-- general use node associations are not common --> <!-- general use node associations are not common -->
<cache <cache
name="org.alfresco.repo.domain.hibernate.NodeAssocImpl" name="org.alfresco.repo.domain.hibernate.NodeAssocImpl"
maxElementsInMemory="1000" maxElementsInMemory="5000"
eternal="true" eternal="true"
overflowToDisk="false" overflowToDisk="false"
/> />
@@ -263,6 +256,12 @@
/> />
<!-- Internally used caches --> <!-- Internally used caches -->
<cache
name="org.alfresco.cache.parentAssocsCache"
maxElementsInMemory="10000"
eternal="true"
overflowToDisk="false"
/>
<cache <cache
name="org.alfresco.cache.userToAuthorityCache" name="org.alfresco.cache.userToAuthorityCache"
maxElementsInMemory="10000" maxElementsInMemory="10000"

View File

@@ -176,25 +176,6 @@
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache>
<cache
name="org.alfresco.repo.domain.hibernate.NodeImpl.parentAssocs"
maxElementsInMemory="10000"
eternal="true"
timeToIdleSeconds="0"
timeToLiveSeconds="0"
overflowToDisk="false">
<cacheEventListenerFactory
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicatePuts = false,
replicateUpdates = true,
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -434,6 +415,23 @@
<!-- Non-Hibernate --> <!-- Non-Hibernate -->
<cache
name="org.alfresco.cache.parentAssocsCache"
maxElementsInMemory="10000"
eternal="true"
overflowToDisk="false">
<cacheEventListenerFactory
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicatePuts = false,
replicateUpdates = true,
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache <cache
name="org.alfresco.cache.userToAuthorityCache" name="org.alfresco.cache.userToAuthorityCache"
maxElementsInMemory="10000" maxElementsInMemory="10000"

View File

@@ -172,7 +172,9 @@
<props> <props>
<prop key="org.alfresco.repo.domain.hibernate.NodeImpl.properties">${cache.strategy}</prop> <prop key="org.alfresco.repo.domain.hibernate.NodeImpl.properties">${cache.strategy}</prop>
<prop key="org.alfresco.repo.domain.hibernate.NodeImpl.aspects">${cache.strategy}</prop> <prop key="org.alfresco.repo.domain.hibernate.NodeImpl.aspects">${cache.strategy}</prop>
<!--
<prop key="org.alfresco.repo.domain.hibernate.NodeImpl.parentAssocs">${cache.strategy}</prop> <prop key="org.alfresco.repo.domain.hibernate.NodeImpl.parentAssocs">${cache.strategy}</prop>
-->
<prop key="org.alfresco.repo.domain.hibernate.DbAccessControlListImpl.entries">${cache.strategy}</prop> <prop key="org.alfresco.repo.domain.hibernate.DbAccessControlListImpl.entries">${cache.strategy}</prop>
<prop key="org.alfresco.repo.domain.hibernate.DbAuthorityImpl.externalKeys">${cache.strategy}</prop> <prop key="org.alfresco.repo.domain.hibernate.DbAuthorityImpl.externalKeys">${cache.strategy}</prop>
@@ -231,6 +233,9 @@
<property name="sessionFactory"> <property name="sessionFactory">
<ref bean="sessionFactory" /> <ref bean="sessionFactory" />
</property> </property>
<property name="parentAssocsCache">
<ref bean="parentAssocsCache"/>
</property>
</bean> </bean>
<bean id="dbNodeDaoServiceTxnRegistration" class="org.alfresco.repo.transaction.TransactionalDaoInterceptor" > <bean id="dbNodeDaoServiceTxnRegistration" class="org.alfresco.repo.transaction.TransactionalDaoInterceptor" >

View File

@@ -223,7 +223,7 @@ public class UniqueChildNamePatch extends AbstractPatch
{ {
// Try removing the secondary parent associations // Try removing the secondary parent associations
writeLine(" Removing secondary parents of node " + childNode.getId()); writeLine(" Removing secondary parents of node " + childNode.getId());
Collection<ChildAssoc> parentAssocs = childNode.getParentAssocs(); Collection<ChildAssoc> parentAssocs = nodeDaoService.getParentAssocs(childNode);
for (ChildAssoc parentAssoc : parentAssocs) for (ChildAssoc parentAssoc : parentAssocs)
{ {
if (!parentAssoc.getIsPrimary()) if (!parentAssoc.getIsPrimary())
@@ -241,7 +241,7 @@ public class UniqueChildNamePatch extends AbstractPatch
else if (duplicateNumber > 10) else if (duplicateNumber > 10)
{ {
// after 10 attempts, we have to admit defeat. Perhaps there is a larger issue. // after 10 attempts, we have to admit defeat. Perhaps there is a larger issue.
Collection<ChildAssoc> parentAssocs = childNode.getParentAssocs(); Collection<ChildAssoc> parentAssocs = nodeDaoService.getParentAssocs(childNode);
write(" Unable to set child name '" + usedChildName + "' for node " + childNode.getId()); write(" Unable to set child name '" + usedChildName + "' for node " + childNode.getId());
writeLine(" with parent associations:"); writeLine(" with parent associations:");
for (ChildAssoc parentAssoc : parentAssocs) for (ChildAssoc parentAssoc : parentAssocs)

View File

@@ -30,15 +30,17 @@ import java.util.Collection;
import javax.transaction.Status; import javax.transaction.Status;
import javax.transaction.UserTransaction; import javax.transaction.UserTransaction;
import junit.framework.TestCase;
import net.sf.ehcache.CacheManager; import net.sf.ehcache.CacheManager;
import org.alfresco.error.ExceptionStackUtil;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.ServiceRegistry; import org.alfresco.service.ServiceRegistry;
import org.alfresco.service.transaction.TransactionService; import org.alfresco.service.transaction.TransactionService;
import org.alfresco.util.ApplicationContextHelper; import org.alfresco.util.ApplicationContextHelper;
import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext; import org.springframework.context.support.ClassPathXmlApplicationContext;
import org.springframework.dao.ConcurrencyFailureException;
import junit.framework.TestCase;
/** /**
* @see org.alfresco.repo.cache.EhCacheAdapter * @see org.alfresco.repo.cache.EhCacheAdapter
@@ -133,7 +135,7 @@ public class CacheTest extends TestCase
assertNull("Non-transactional remove didn't go to backing cache", backingCache.get(key)); assertNull("Non-transactional remove didn't go to backing cache", backingCache.get(key));
} }
public void testTransactionalCacheWithTxn() throws Throwable public void testTransactionalCacheWithSingleTxn() throws Throwable
{ {
String newGlobalOne = "new_global_one"; String newGlobalOne = "new_global_one";
String newGlobalTwo = "new_global_two"; String newGlobalTwo = "new_global_two";
@@ -253,4 +255,269 @@ public class CacheTest extends TestCase
" transaction: " + timeTxn/((long)count) + " ns\\count"); " transaction: " + timeTxn/((long)count) + " ns\\count");
} }
} }
/**
* Starts off with a <tt>null</tt> in the backing cache and adds a value to the
* transactional cache. There should be no problem with this.
*/
public void testNullValue() throws Exception
{
TransactionService transactionService = serviceRegistry.getTransactionService();
UserTransaction txn = transactionService.getUserTransaction();
try
{
txn.begin();
backingCache.put("A", null);
transactionalCache.put("A", "AAA");
txn.commit();
}
finally
{
try { txn.rollback(); } catch (Throwable ee) {}
}
}
private static final Class[] CONCURRENCY_EXCEPTIONS = {ConcurrencyFailureException.class};
/** Execute the callback and ensure that the concurrent condition is detected */
private void executeAndCheck(RetryingTransactionCallback callback) throws Exception
{
TransactionService transactionService = serviceRegistry.getTransactionService();
UserTransaction txn = transactionService.getUserTransaction();
try
{
txn.begin();
callback.execute();
txn.commit();
fail("Failed to detect concurrent modification");
}
catch (Throwable e)
{
assertNotNull(
"Expected a concurrency failure",
ExceptionStackUtil.getCause(e, CONCURRENCY_EXCEPTIONS));
}
finally
{
try { txn.rollback(); } catch (Throwable ee) {}
}
}
/**
* <ul>
* <li>Add to the transaction cache</li>
* <li>Add to the backing cache</li>
* <li>Commit</li>
* </ul>
*/
public void testConcurrentAddAgainstAdd() throws Exception
{
RetryingTransactionCallback callback = new RetryingTransactionCallback()
{
public Object execute() throws Throwable
{
transactionalCache.put("A", "AAA");
backingCache.put("A", "aaa");
return null;
}
};
executeAndCheck(callback);
}
/**
* <ul>
* <li>Add to the transaction cache</li>
* <li>Clear the backing cache</li>
* <li>Commit</li>
* </ul>
*/
public void testConcurrentAddAgainstClear() throws Exception
{
RetryingTransactionCallback callback = new RetryingTransactionCallback()
{
public Object execute() throws Throwable
{
transactionalCache.put("A", "AAA");
backingCache.clear();
return null;
}
};
executeAndCheck(callback);
}
/**
* <ul>
* <li>Add to the backing cache</li>
* <li>Update the transactional cache</li>
* <li>Update the backing cache</li>
* <li>Commit</li>
* </ul>
*/
public void testConcurrentUpdateAgainstUpdate() throws Exception
{
RetryingTransactionCallback callback = new RetryingTransactionCallback()
{
public Object execute() throws Throwable
{
backingCache.put("A", "aaa1");
transactionalCache.put("A", "AAA");
backingCache.put("A", "aaa2");
return null;
}
};
executeAndCheck(callback);
}
/**
* <ul>
* <li>Add to the backing cache</li>
* <li>Update the transactional cache</li>
* <li>Update the backing cache with a <tt>null</tt> value</li>
* <li>Commit</li>
* </ul>
*/
public void testConcurrentUpdateAgainstUpdateNull() throws Exception
{
RetryingTransactionCallback callback = new RetryingTransactionCallback()
{
public Object execute() throws Throwable
{
backingCache.put("A", "aaa1");
transactionalCache.put("A", "AAA");
backingCache.put("A", null);
return null;
}
};
executeAndCheck(callback);
}
/**
* <ul>
* <li>Add to the backing cache</li>
* <li>Update the transactional cache with a <tt>null</tt> value</li>
* <li>Update the backing cache</li>
* <li>Commit</li>
* </ul>
*/
public void testConcurrentUpdateNullAgainstUpdate() throws Exception
{
RetryingTransactionCallback callback = new RetryingTransactionCallback()
{
public Object execute() throws Throwable
{
backingCache.put("A", "aaa1");
transactionalCache.put("A", null);
backingCache.put("A", "aaa2");
return null;
}
};
executeAndCheck(callback);
}
/**
* <ul>
* <li>Add to the backing cache</li>
* <li>Update the transactional cache</li>
* <li>Remove from the backing cache</li>
* <li>Commit</li>
* </ul>
*/
public void testConcurrentUpdateAgainstRemove() throws Exception
{
RetryingTransactionCallback callback = new RetryingTransactionCallback()
{
public Object execute() throws Throwable
{
backingCache.put("A", "aaa1");
transactionalCache.put("A", "AAA");
backingCache.remove("A");
return null;
}
};
executeAndCheck(callback);
}
/**
* <ul>
* <li>Add to the backing cache</li>
* <li>Update the transactional cache</li>
* <li>Clear the backing cache</li>
* <li>Commit</li>
* </ul>
*/
public void testConcurrentUpdateAgainstClear() throws Exception
{
RetryingTransactionCallback callback = new RetryingTransactionCallback()
{
public Object execute() throws Throwable
{
backingCache.put("A", "aaa1");
transactionalCache.put("A", "AAA");
backingCache.clear();
return null;
}
};
executeAndCheck(callback);
}
/**
* <ul>
* <li>Add to the backing cache</li>
* <li>Remove from the transactional cache</li>
* <li>Add to the backing cache</li>
* <li>Commit</li>
* </ul>
*/
public void testConcurrentRemoveAgainstUpdate() throws Exception
{
RetryingTransactionCallback callback = new RetryingTransactionCallback()
{
public Object execute() throws Throwable
{
backingCache.put("A", "aaa1");
transactionalCache.remove("A");
backingCache.put("A", "aaa2");
return null;
}
};
executeAndCheck(callback);
}
/**
* <ul>
* <li>Add to the backing cache</li>
* <li>Remove from the transactional cache</li>
* <li>Remove from the backing cache</li>
* <li>Commit</li>
* </ul>
*/
public void testConcurrentRemoveAgainstRemove() throws Exception
{
RetryingTransactionCallback callback = new RetryingTransactionCallback()
{
public Object execute() throws Throwable
{
backingCache.put("A", "aaa1");
transactionalCache.remove("A");
backingCache.remove("A");
return null;
}
};
executeAndCheck(callback);
}
/**
* <ul>
* <li>Add to the backing cache</li>
* <li>Remove from the transactional cache</li>
* <li>Clear the backing cache</li>
* <li>Commit</li>
* </ul>
*/
public void testConcurrentRemoveAgainstClear() throws Exception
{
RetryingTransactionCallback callback = new RetryingTransactionCallback()
{
public Object execute() throws Throwable
{
backingCache.put("A", "aaa1");
transactionalCache.remove("A");
backingCache.clear();
return null;
}
};
executeAndCheck(callback);
}
} }

View File

@@ -34,19 +34,48 @@ import java.util.Collection;
* <tt>Serializable</tt> for both keys and values ensures that the underlying * <tt>Serializable</tt> for both keys and values ensures that the underlying
* cache implementations can support both clustered caches as well as persistent * cache implementations can support both clustered caches as well as persistent
* caches. * caches.
* <p>
* All implementations must support <tt>null</tt> values. It therefore follows
* that
* <pre>
* (simpleCache.contains(key) == true) does not imply (simpleCache.get(key) != null)
* </pre>
* but
* <pre>
* (simpleCache.contains(key) == false) implies (simpleCache.get(key) == null)
* <pre>
* *
* @author Derek Hulley * @author Derek Hulley
*/ */
public interface SimpleCache<K extends Serializable, V extends Object> public interface SimpleCache<K extends Serializable, V extends Object>
{ {
/**
* @param key the cache key to check up on
* @return Returns <tt>true</tt> if there is a cache entry,
* regardless of whether the value itself is <tt>null</tt>
*/
public boolean contains(K key); public boolean contains(K key);
public Collection<K> getKeys(); public Collection<K> getKeys();
/**
* @param key
* @return Returns the value associated with the key. It will be <tt>null</tt>
* if the value is <tt>null</tt> or if the cache doesn't have an entry.
*/
public V get(K key); public V get(K key);
/**
* @param key the key against which to store the value
* @param value the value to store. <tt>null</tt> is allowed.
*/
public void put(K key, V value); public void put(K key, V value);
/**
* Removes the cache entry whether or not the value stored against it is <tt>null</tt>.
*
* @param key the key value to remove
*/
public void remove(K key); public void remove(K key);
public void clear(); public void clear();

View File

@@ -41,6 +41,7 @@ import org.alfresco.util.EqualsHelper;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.springframework.beans.factory.InitializingBean; import org.springframework.beans.factory.InitializingBean;
import org.springframework.dao.ConcurrencyFailureException;
import org.springframework.util.Assert; import org.springframework.util.Assert;
/** /**
@@ -54,12 +55,13 @@ import org.springframework.util.Assert;
* directly with the shared cache when no transaction is present. There is * directly with the shared cache when no transaction is present. There is
* virtually no overhead when running out-of-transaction. * virtually no overhead when running out-of-transaction.
* <p> * <p>
* 3 caches are maintained. * The first phase of the commit ensures that any values written to the cache in the
* <ul> * current transaction are not already superceded by values in the shared cache. In
* <li>Shared backing cache that should only be accessed by instances of this class</li> * this case, the transaction is failed for concurrency reasons and will have to retry.
* <li>Lazily created cache of updates made during the transaction</li> * The second phase occurs post-commit. We are sure that the transaction committed
* <li>Lazily created cache of deletions made during the transaction</li> * correctly, but things may have changed in the cache between the commit and post-commit.
* </ul> * If this is the case, then the offending values are merely removed from the shared
* cache.
* <p> * <p>
* When the cache is {@link #clear() cleared}, a flag is set on the transaction. * When the cache is {@link #clear() cleared}, a flag is set on the transaction.
* The shared cache, instead of being cleared itself, is just ignored for the remainder * The shared cache, instead of being cleared itself, is just ignored for the remainder
@@ -69,7 +71,8 @@ import org.springframework.util.Assert;
* Because there is a limited amount of space available to the in-transaction caches, * Because there is a limited amount of space available to the in-transaction caches,
* when either of these becomes full, the cleared flag is set. This ensures that * when either of these becomes full, the cleared flag is set. This ensures that
* the shared cache will not have stale data in the event of the transaction-local * the shared cache will not have stale data in the event of the transaction-local
* caches dropping items. * caches dropping items. It is therefore important to size the transactional caches
* correctly.
* *
* @author Derek Hulley * @author Derek Hulley
*/ */
@@ -77,15 +80,15 @@ public class TransactionalCache<K extends Serializable, V extends Object>
implements SimpleCache<K, V>, TransactionListener, InitializingBean implements SimpleCache<K, V>, TransactionListener, InitializingBean
{ {
private static final String RESOURCE_KEY_TXN_DATA = "TransactionalCache.TxnData"; private static final String RESOURCE_KEY_TXN_DATA = "TransactionalCache.TxnData";
private static final String VALUE_DELETE = "TransactionalCache.DeleteMarker";
private static Log logger = LogFactory.getLog(TransactionalCache.class); private static Log logger = LogFactory.getLog(TransactionalCache.class);
private static boolean isDebugEnabled = logger.isDebugEnabled();
/** a name used to uniquely identify the transactional caches */ /** a name used to uniquely identify the transactional caches */
private String name; private String name;
/** the shared cache that will get updated after commits */ /** the shared cache that will get updated after commits */
private SimpleCache<Serializable, Object> sharedCache; private SimpleCache<Serializable, V> sharedCache;
/** the manager to control Ehcache caches */ /** the manager to control Ehcache caches */
private CacheManager cacheManager; private CacheManager cacheManager;
@@ -96,6 +99,13 @@ public class TransactionalCache<K extends Serializable, V extends Object>
/** a unique string identifying this instance when binding resources */ /** a unique string identifying this instance when binding resources */
private String resourceKeyTxnData; private String resourceKeyTxnData;
/**
* Public constructor.
*/
public TransactionalCache()
{
}
/** /**
* @see #setName(String) * @see #setName(String)
*/ */
@@ -133,7 +143,7 @@ public class TransactionalCache<K extends Serializable, V extends Object>
* *
* @param sharedCache * @param sharedCache
*/ */
public void setSharedCache(SimpleCache<Serializable, Object> sharedCache) public void setSharedCache(SimpleCache<Serializable, V> sharedCache)
{ {
this.sharedCache = sharedCache; this.sharedCache = sharedCache;
} }
@@ -284,13 +294,13 @@ public class TransactionalCache<K extends Serializable, V extends Object>
TransactionData txnData = getTransactionData(); TransactionData txnData = getTransactionData();
try try
{ {
if (!txnData.isClearOn) // deletions cache is still reliable if (!txnData.isClearOn) // deletions cache only useful before a clear
{ {
// check to see if the key is present in the transaction's removed items // check to see if the key is present in the transaction's removed items
if (txnData.removedItemsCache.get(key) != null) if (txnData.removedItemsCache.get(key) != null)
{ {
// it has been removed in this transaction // it has been removed in this transaction
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("get returning null - item has been removed from transactional cache: \n" + logger.debug("get returning null - item has been removed from transactional cache: \n" +
" cache: " + this + "\n" + " cache: " + this + "\n" +
@@ -304,15 +314,17 @@ public class TransactionalCache<K extends Serializable, V extends Object>
Element element = txnData.updatedItemsCache.get(key); Element element = txnData.updatedItemsCache.get(key);
if (element != null) if (element != null)
{ {
CacheBucket<V> bucket = (CacheBucket<V>) element.getValue();
V value = bucket.getValue();
// element was found in transaction-specific updates/additions // element was found in transaction-specific updates/additions
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("Found item in transactional cache: \n" + logger.debug("Found item in transactional cache: \n" +
" cache: " + this + "\n" + " cache: " + this + "\n" +
" key: " + key + "\n" + " key: " + key + "\n" +
" value: " + element.getValue()); " value: " + value);
} }
return (V) element.getValue(); return value;
} }
} }
catch (CacheException e) catch (CacheException e)
@@ -325,19 +337,20 @@ public class TransactionalCache<K extends Serializable, V extends Object>
// no value found - must we ignore the shared cache? // no value found - must we ignore the shared cache?
if (!ignoreSharedCache) if (!ignoreSharedCache)
{ {
V value = sharedCache.get(key);
// go to the shared cache // go to the shared cache
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("No value found in transaction - fetching instance from shared cache: \n" + logger.debug("No value found in transaction - fetching instance from shared cache: \n" +
" cache: " + this + "\n" + " cache: " + this + "\n" +
" key: " + key + "\n" + " key: " + key + "\n" +
" value: " + sharedCache.get(key)); " value: " + value);
} }
return (V) sharedCache.get(key); return value;
} }
else // ignore shared cache else // ignore shared cache
{ {
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("No value found in transaction and ignoring shared cache: \n" + logger.debug("No value found in transaction and ignoring shared cache: \n" +
" cache: " + this + "\n" + " cache: " + this + "\n" +
@@ -361,7 +374,7 @@ public class TransactionalCache<K extends Serializable, V extends Object>
// no transaction // no transaction
sharedCache.put(key, value); sharedCache.put(key, value);
// done // done
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("No transaction - adding item direct to shared cache: \n" + logger.debug("No transaction - adding item direct to shared cache: \n" +
" cache: " + this + "\n" + " cache: " + this + "\n" +
@@ -381,12 +394,24 @@ public class TransactionalCache<K extends Serializable, V extends Object>
// shared cache needs to be ignored for the rest of the transaction. // shared cache needs to be ignored for the rest of the transaction.
txnData.isClearOn = true; txnData.isClearOn = true;
} }
Element element = new Element(key, value); CacheBucket<V> bucket = null;
if (sharedCache.contains(key))
{
V existingValue = sharedCache.get(key);
// The value needs to be kept for later checks
bucket = new UpdateCacheBucket<V>(existingValue, value);
}
else
{
// The value didn't exist before
bucket = new NewCacheBucket<V>(value);
}
Element element = new Element(key, bucket);
txnData.updatedItemsCache.put(element); txnData.updatedItemsCache.put(element);
// remove the item from the removed cache, if present // remove the item from the removed cache, if present
txnData.removedItemsCache.remove(key); txnData.removedItemsCache.remove(key);
// done // done
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("In transaction - adding item direct to transactional update cache: \n" + logger.debug("In transaction - adding item direct to transactional update cache: \n" +
" cache: " + this + "\n" + " cache: " + this + "\n" +
@@ -410,7 +435,7 @@ public class TransactionalCache<K extends Serializable, V extends Object>
// no transaction // no transaction
sharedCache.remove(key); sharedCache.remove(key);
// done // done
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("No transaction - removing item from shared cache: \n" + logger.debug("No transaction - removing item from shared cache: \n" +
" cache: " + this + "\n" + " cache: " + this + "\n" +
@@ -423,7 +448,7 @@ public class TransactionalCache<K extends Serializable, V extends Object>
// is the shared cache going to be cleared? // is the shared cache going to be cleared?
if (txnData.isClearOn) if (txnData.isClearOn)
{ {
// don't store removals // don't store removals if we're just going to clear it all out later
} }
else else
{ {
@@ -434,7 +459,7 @@ public class TransactionalCache<K extends Serializable, V extends Object>
// data by clearing the shared cache after the transaction. Also, the // data by clearing the shared cache after the transaction. Also, the
// shared cache needs to be ignored for the rest of the transaction. // shared cache needs to be ignored for the rest of the transaction.
txnData.isClearOn = true; txnData.isClearOn = true;
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("In transaction - removal cache reach capacity reached: \n" + logger.debug("In transaction - removal cache reach capacity reached: \n" +
" cache: " + this + "\n" + " cache: " + this + "\n" +
@@ -443,15 +468,24 @@ public class TransactionalCache<K extends Serializable, V extends Object>
} }
else else
{ {
// add it from the removed cache for this txn V existingValue = sharedCache.get(key);
Element element = new Element(key, VALUE_DELETE); if (existingValue == null)
txnData.removedItemsCache.put(element); {
// There is no point doing a remove for a value that doesn't exist
}
else
{
// Create a bucket to remove the value from the shared cache
CacheBucket<V> removeBucket = new RemoveCacheBucket<V>(existingValue);
Element element = new Element(key, removeBucket);
txnData.removedItemsCache.put(element);
}
} }
} }
// remove the item from the udpated cache, if present // remove the item from the udpated cache, if present
txnData.updatedItemsCache.remove(key); txnData.updatedItemsCache.remove(key);
// done // done
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("In transaction - adding item direct to transactional removed cache: \n" + logger.debug("In transaction - adding item direct to transactional removed cache: \n" +
" cache: " + this + "\n" + " cache: " + this + "\n" +
@@ -468,7 +502,7 @@ public class TransactionalCache<K extends Serializable, V extends Object>
// clear local caches // clear local caches
if (AlfrescoTransactionSupport.getTransactionId() != null) if (AlfrescoTransactionSupport.getTransactionId() != null)
{ {
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("In transaction clearing cache: \n" + logger.debug("In transaction clearing cache: \n" +
" cache: " + this + "\n" + " cache: " + this + "\n" +
@@ -485,7 +519,7 @@ public class TransactionalCache<K extends Serializable, V extends Object>
} }
else // no transaction else // no transaction
{ {
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("No transaction - clearing shared cache"); logger.debug("No transaction - clearing shared cache");
} }
@@ -501,12 +535,40 @@ public class TransactionalCache<K extends Serializable, V extends Object>
{ {
} }
public void beforeCommit(boolean readOnly) public void beforeCompletion()
{ {
} }
public void beforeCompletion() /**
* Check that the cache used is not out of date with the shared cache.
* Note that the clear flag is ignored as this would have removed all
* entries from the local caches anyway.
*/
@SuppressWarnings("unchecked")
public void beforeCommit(boolean readOnly)
{ {
if (isDebugEnabled)
{
logger.debug("Processing pre-commit");
}
TransactionData txnData = getTransactionData();
// Process all the updates or new entries
for (Object obj : txnData.updatedItemsCache.getKeys())
{
Serializable key = (Serializable) obj;
Element element = txnData.updatedItemsCache.get(key);
CacheBucket<V> bucket = (CacheBucket<V>) element.getValue();
bucket.doPreCommit(sharedCache, key);
}
// Process all the removals
for (Object obj : txnData.removedItemsCache.getKeys())
{
Serializable key = (Serializable) obj;
Element element = txnData.removedItemsCache.get(key);
CacheBucket<V> bucket = (CacheBucket<V>) element.getValue();
bucket.doPreCommit(sharedCache, key);
}
} }
/** /**
@@ -515,9 +577,9 @@ public class TransactionalCache<K extends Serializable, V extends Object>
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public void afterCommit() public void afterCommit()
{ {
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("Processing end of transaction commit"); logger.debug("Processing post-commit");
} }
TransactionData txnData = getTransactionData(); TransactionData txnData = getTransactionData();
@@ -527,7 +589,7 @@ public class TransactionalCache<K extends Serializable, V extends Object>
{ {
// clear shared cache // clear shared cache
sharedCache.clear(); sharedCache.clear();
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("Clear notification recieved at end of transaction - clearing shared cache"); logger.debug("Clear notification recieved at end of transaction - clearing shared cache");
} }
@@ -542,7 +604,7 @@ public class TransactionalCache<K extends Serializable, V extends Object>
{ {
sharedCache.remove(key); sharedCache.remove(key);
} }
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("Removed " + keys.size() + " values from shared cache"); logger.debug("Removed " + keys.size() + " values from shared cache");
} }
@@ -553,9 +615,10 @@ public class TransactionalCache<K extends Serializable, V extends Object>
for (Serializable key : keys) for (Serializable key : keys)
{ {
Element element = txnData.updatedItemsCache.get(key); Element element = txnData.updatedItemsCache.get(key);
sharedCache.put(key, element.getValue()); CacheBucket<V> bucket = (CacheBucket<V>) element.getObjectValue();
sharedCache.put(key, bucket.getValue());
} }
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("Added " + keys.size() + " values to shared cache"); logger.debug("Added " + keys.size() + " values to shared cache");
} }
@@ -591,6 +654,206 @@ public class TransactionalCache<K extends Serializable, V extends Object>
cacheManager.removeCache(txnData.removedItemsCache.getName()); cacheManager.removeCache(txnData.removedItemsCache.getName());
} }
/**
* Interface for the transactional cache buckets. These hold the actual values along
* with some state and behaviour around writing from the in-transaction caches to the
* shared.
*
* @author Derek Hulley
*/
private interface CacheBucket<BV extends Object> extends Serializable
{
/**
* @return Returns the bucket's value
*/
BV getValue();
/**
* Check that any new cache values have not been superceded by new values in the shared cache.
*
* @param sharedCache the cache to check
* @param key the key that the bucket was stored against
*/
void doPreCommit(SimpleCache<Serializable, BV> sharedCache, Serializable key);
/**
* Flush the current bucket to the shared cache as far as possible.
*
* @param sharedCache the cache to flush to
* @param key the key that the bucket was stored against
*/
public void doPostCommit(SimpleCache<Serializable, BV> sharedCache, Serializable key);
}
/**
* A bucket class to hold values for the caches.<br/>
* The cache ID and timestamp of the bucket is stored to ensure cache consistency.
*
* @author Derek Hulley
*/
private class NewCacheBucket<BV> implements CacheBucket<BV>
{
private static final long serialVersionUID = -8536386687213957425L;
private final BV value;
public NewCacheBucket(BV value)
{
this.value = value;
}
public BV getValue()
{
return value;
}
public void doPreCommit(SimpleCache<Serializable, BV> sharedCache, Serializable key)
{
if (sharedCache.contains(key))
{
// The shared cache has a value where there wasn't one before. More than likely,
// this transaction would have used or modified that shared value.
throw new ConcurrencyFailureException(
"New cache bucket found new shared cache entry: \n" +
" Cache: " + name + "\n" +
" Key: " + key);
}
}
public void doPostCommit(SimpleCache<Serializable, BV> sharedCache, Serializable key)
{
if (sharedCache.contains(key))
{
// The shared cache has a value where there wasn't one before.
// Just lose both of them.
sharedCache.remove(key);
}
else
{
// There is nothing in the shared cache, so add this entry
sharedCache.put(key, getValue());
}
}
}
/**
* Data holder to keep track of a cached value's timestamps in order to detect stale
* shared cache values. This bucket assumes the presence of a pre-existing entry in
* the shared cache.
*/
private class UpdateCacheBucket<BV> extends NewCacheBucket<BV>
{
private static final long serialVersionUID = 7885689778259779578L;
private final BV originalValue;
public UpdateCacheBucket(BV originalValue, BV value)
{
super(value);
this.originalValue = originalValue;
}
protected BV getOriginalValue()
{
return originalValue;
}
public void doPreCommit(SimpleCache<Serializable, BV> sharedCache, Serializable key)
{
if (sharedCache.contains(key))
{
BV sharedValue = sharedCache.get(key);
if (sharedValue == getOriginalValue())
{
// The cache entry is safe for writing
}
else
{
// The shared value has moved on since
throw new ConcurrencyFailureException(
"Update cache bucket found newer shared cache entry: \n" +
" Cache: " + name + "\n" +
" Key: " + key);
}
}
else
{
// The key was removed from the shared cache. This instance cannot update the entry.
throw new ConcurrencyFailureException(
"Update cache bucket couldn't fine entry to update: \n" +
" Cache: " + name + "\n" +
" Key: " + key);
}
}
public void doPostCommit(SimpleCache<Serializable, BV> sharedCache, Serializable key)
{
BV sharedValue = sharedCache.get(key);
if (sharedValue != null)
{
if (sharedValue == getOriginalValue())
{
// The cache entry is safe for writing
sharedCache.put(key, getValue());
}
else
{
// The shared value has moved on since
sharedCache.remove(key);
}
}
else
{
// The shared cache no longer has a value
}
}
}
/**
* Data holder to keep track of cache removals. This bucket assumes the previous existence
* of an entry in the shared cache.
*/
private class RemoveCacheBucket<BV> extends UpdateCacheBucket<BV>
{
private static final long serialVersionUID = -7736719065158540252L;
public RemoveCacheBucket(BV originalValue)
{
super(originalValue, null);
}
public void doPreCommit(SimpleCache<Serializable, BV> sharedCache, Serializable key)
{
BV sharedValue = sharedCache.get(key);
if (sharedValue != null)
{
if (sharedValue == getOriginalValue())
{
// The shared cache entry is the same as the one we were flagged to remove
}
else
{
// The shared value has moved on since
throw new ConcurrencyFailureException(
"Remove cache bucket found newer shared cache entry: \n" +
" Cache: " + name + "\n" +
" Key: " + key);
}
}
else
{
// The shared cache no longer has the value. It is possible that the removal of the
// item is something that would have affected the behaviour of the current transaction.
throw new ConcurrencyFailureException(
"Remove cache bucket found new shared cache entry: \n" +
" Cache: " + name + "\n" +
" Key: " + key);
}
}
public void doPostCommit(SimpleCache<Serializable, BV> sharedCache, Serializable key)
{
if (sharedCache.contains(key))
{
// We remove the shared entry whether it has moved on or not
sharedCache.remove(key);
}
else
{
// The shared cache no longer has a value
}
}
}
/** Data holder to bind data to the transaction */ /** Data holder to bind data to the transaction */
private class TransactionData private class TransactionData
{ {

View File

@@ -71,7 +71,7 @@ public interface Node
public Set<QName> getAspects(); public Set<QName> getAspects();
public Collection<ChildAssoc> getParentAssocs(); // public Collection<ChildAssoc> getParentAssocs();
public Map<QName, PropertyValue> getProperties(); public Map<QName, PropertyValue> getProperties();

View File

@@ -71,13 +71,13 @@ public class ChildAssocImpl implements ChildAssoc, Serializable
// add the forward associations // add the forward associations
this.setParent(parentNode); this.setParent(parentNode);
this.setChild(childNode); this.setChild(childNode);
childNode.getParentAssocs().add(this); // childNode.getParentAssocs().add(this);
} }
public void removeAssociation() public void removeAssociation()
{ {
// maintain inverse assoc from child node to this instance // // maintain inverse assoc from child node to this instance
this.getChild().getParentAssocs().remove(this); // this.getChild().getParentAssocs().remove(this);
} }
public ChildAssociationRef getChildAssocRef() public ChildAssociationRef getChildAssocRef()

View File

@@ -45,6 +45,7 @@ import org.alfresco.repo.domain.Server;
import org.alfresco.repo.domain.Store; import org.alfresco.repo.domain.Store;
import org.alfresco.repo.domain.StoreKey; import org.alfresco.repo.domain.StoreKey;
import org.alfresco.repo.domain.Transaction; import org.alfresco.repo.domain.Transaction;
import org.alfresco.repo.node.db.hibernate.HibernateNodeDaoServiceImpl;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport; import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.TransactionListenerAdapter; import org.alfresco.repo.transaction.TransactionListenerAdapter;
import org.alfresco.service.cmr.dictionary.DataTypeDefinition; import org.alfresco.service.cmr.dictionary.DataTypeDefinition;
@@ -55,9 +56,11 @@ import org.alfresco.service.transaction.TransactionService;
import org.alfresco.util.BaseSpringTest; import org.alfresco.util.BaseSpringTest;
import org.alfresco.util.GUID; import org.alfresco.util.GUID;
import org.hibernate.CacheMode; import org.hibernate.CacheMode;
import org.hibernate.Query;
import org.hibernate.Session; import org.hibernate.Session;
import org.hibernate.exception.ConstraintViolationException; import org.hibernate.exception.ConstraintViolationException;
import org.hibernate.exception.GenericJDBCException; import org.hibernate.exception.GenericJDBCException;
import org.springframework.orm.hibernate3.HibernateCallback;
/** /**
* Test persistence and retrieval of Hibernate-specific implementations of the * Test persistence and retrieval of Hibernate-specific implementations of the
@@ -292,7 +295,7 @@ public class HibernateNodeTest extends BaseSpringTest
assertNotNull("Node not found", containerNode); assertNotNull("Node not found", containerNode);
// check that we can traverse the association from the child // check that we can traverse the association from the child
Collection<ChildAssoc> parentAssocs = contentNode.getParentAssocs(); Collection<ChildAssoc> parentAssocs = getParentAssocs(contentNode);
assertEquals("Expected exactly 2 parent assocs", 2, parentAssocs.size()); assertEquals("Expected exactly 2 parent assocs", 2, parentAssocs.size());
parentAssocs = new HashSet<ChildAssoc>(parentAssocs); parentAssocs = new HashSet<ChildAssoc>(parentAssocs);
for (ChildAssoc assoc : parentAssocs) for (ChildAssoc assoc : parentAssocs)
@@ -304,10 +307,25 @@ public class HibernateNodeTest extends BaseSpringTest
} }
// check that the child now has zero parents // check that the child now has zero parents
parentAssocs = contentNode.getParentAssocs(); parentAssocs = getParentAssocs(contentNode);
assertEquals("Expected exactly 0 parent assocs", 0, parentAssocs.size()); assertEquals("Expected exactly 0 parent assocs", 0, parentAssocs.size());
} }
@SuppressWarnings("unchecked")
private List<ChildAssoc> getParentAssocs(final Node childNode)
{
Query query = getSession()
.createQuery(
"select assoc from org.alfresco.repo.domain.hibernate.ChildAssocImpl as assoc " +
"where " +
" assoc.child.id = :childId " +
"order by " +
"assoc.index, assoc.id")
.setLong("childId", childNode.getId());
List<ChildAssoc> parentAssocs = query.list();
return parentAssocs;
}
/** /**
* Allows tracing of L2 cache * Allows tracing of L2 cache
*/ */

View File

@@ -91,6 +91,7 @@
<element column="qname" type="QName" length="200"/> <element column="qname" type="QName" length="200"/>
</set> </set>
<!-- inverse assoc to parent childassocs --> <!-- inverse assoc to parent childassocs -->
<!--
<set <set
name="parentAssocs" name="parentAssocs"
inverse="true" inverse="true"
@@ -101,6 +102,7 @@
<key column="child_node_id" /> <key column="child_node_id" />
<one-to-many class="org.alfresco.repo.domain.hibernate.ChildAssocImpl" /> <one-to-many class="org.alfresco.repo.domain.hibernate.ChildAssocImpl" />
</set> </set>
-->
</class> </class>
<class <class
@@ -236,6 +238,18 @@
assoc.id = :childAssocId assoc.id = :childAssocId
</query> </query>
<query name="node.GetParentAssocs">
select
assoc
from
org.alfresco.repo.domain.hibernate.ChildAssocImpl as assoc
where
assoc.child.id = :childId
order by
assoc.index,
assoc.id
</query>
<query name="node.GetPrimaryChildNodeStatuses"> <query name="node.GetPrimaryChildNodeStatuses">
select select
status status
@@ -283,9 +297,9 @@
assoc.id assoc.id
</query> </query>
<query name="node.GetChildAssocByShortName"> <query name="node.GetChildAssocIdByShortName">
select select
assoc assoc.id
from from
org.alfresco.repo.domain.hibernate.ChildAssocImpl as assoc org.alfresco.repo.domain.hibernate.ChildAssocImpl as assoc
where where

View File

@@ -217,18 +217,18 @@ public interface NodeDaoService
/** /**
* Finds the association between the node's primary parent and the node itself * Finds the association between the node's primary parent and the node itself
* *
* @param node the child node * @param childNode the child node
* @return Returns the primary <code>ChildAssoc</code> instance where the given node is the child. * @return Returns the primary <code>ChildAssoc</code> instance where the given node is the child.
* The return value could be null for a root node - but ONLY a root node * The return value could be null for a root node - but ONLY a root node
*/ */
public ChildAssoc getPrimaryParentAssoc(Node node); public ChildAssoc getPrimaryParentAssoc(Node childNode);
/** /**
* Get all parent associations for the node. This methods includes a cache safety check. * Get all parent associations for the node. This methods includes a cache safety check.
* @param node the child node * @param childNode the child node
* @return Returns all parent associations for the node. * @return Returns all parent associations for the node.
*/ */
public Collection<ChildAssoc> getParentAssocs(Node node); public Collection<ChildAssoc> getParentAssocs(Node childNode);
/** /**
* @return Returns the persisted and filled association * @return Returns the persisted and filled association

View File

@@ -37,6 +37,7 @@ import java.util.zip.CRC32;
import org.alfresco.error.AlfrescoRuntimeException; import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.model.ContentModel; import org.alfresco.model.ContentModel;
import org.alfresco.repo.cache.SimpleCache;
import org.alfresco.repo.domain.ChildAssoc; import org.alfresco.repo.domain.ChildAssoc;
import org.alfresco.repo.domain.Node; import org.alfresco.repo.domain.Node;
import org.alfresco.repo.domain.NodeAssoc; import org.alfresco.repo.domain.NodeAssoc;
@@ -92,10 +93,11 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
private static final String QUERY_GET_PRIMARY_CHILD_NODE_STATUSES = "node.GetPrimaryChildNodeStatuses"; private static final String QUERY_GET_PRIMARY_CHILD_NODE_STATUSES = "node.GetPrimaryChildNodeStatuses";
private static final String QUERY_GET_CHILD_ASSOCS = "node.GetChildAssocs"; private static final String QUERY_GET_CHILD_ASSOCS = "node.GetChildAssocs";
private static final String QUERY_GET_CHILD_ASSOCS_BY_ALL = "node.GetChildAssocsByAll"; private static final String QUERY_GET_CHILD_ASSOCS_BY_ALL = "node.GetChildAssocsByAll";
private static final String QUERY_GET_CHILD_ASSOC_BY_NAME = "node.GetChildAssocByShortName"; private static final String QUERY_GET_CHILD_ASSOC_ID_BY_NAME = "node.GetChildAssocIdByShortName";
private static final String QUERY_GET_CHILD_ASSOC_BY_TYPE_AND_NAME = "node.GetChildAssocByTypeAndName"; private static final String QUERY_GET_CHILD_ASSOC_BY_TYPE_AND_NAME = "node.GetChildAssocByTypeAndName";
private static final String QUERY_GET_CHILD_ASSOC_REFS = "node.GetChildAssocRefs"; private static final String QUERY_GET_CHILD_ASSOC_REFS = "node.GetChildAssocRefs";
private static final String QUERY_GET_CHILD_ASSOC_REFS_BY_QNAME = "node.GetChildAssocRefsByQName"; private static final String QUERY_GET_CHILD_ASSOC_REFS_BY_QNAME = "node.GetChildAssocRefsByQName";
private static final String QUERY_GET_PARENT_ASSOCS = "node.GetParentAssocs";
private static final String QUERY_GET_NODE_ASSOC = "node.GetNodeAssoc"; private static final String QUERY_GET_NODE_ASSOC = "node.GetNodeAssoc";
private static final String QUERY_GET_NODE_ASSOCS_TO_AND_FROM = "node.GetNodeAssocsToAndFrom"; private static final String QUERY_GET_NODE_ASSOCS_TO_AND_FROM = "node.GetNodeAssocsToAndFrom";
private static final String QUERY_GET_TARGET_ASSOCS = "node.GetTargetAssocs"; private static final String QUERY_GET_TARGET_ASSOCS = "node.GetTargetAssocs";
@@ -104,7 +106,13 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
private static final String QUERY_GET_SERVER_BY_IPADDRESS = "server.getServerByIpAddress"; private static final String QUERY_GET_SERVER_BY_IPADDRESS = "server.getServerByIpAddress";
private static Log logger = LogFactory.getLog(HibernateNodeDaoServiceImpl.class); private static Log logger = LogFactory.getLog(HibernateNodeDaoServiceImpl.class);
private static Log loggerChildAssoc = LogFactory.getLog(HibernateNodeDaoServiceImpl.class.getName() + ".ChildAssoc"); /** Log to trace parent association caching: <b>classname + .ParentAssocsCache</b> */
private static Log loggerParentAssocsCache = LogFactory.getLog(HibernateNodeDaoServiceImpl.class.getName() + ".ParentAssocsCache");
/** A cache for more performant lookups of the parent associations */
private SimpleCache<Long, Set<Long>> parentAssocsCache;
private boolean isDebugEnabled = logger.isDebugEnabled();
private boolean isDebugParentAssocCacheEnabled = loggerParentAssocsCache.isDebugEnabled();
/** a uuid identifying this unique instance */ /** a uuid identifying this unique instance */
private final String uuid; private final String uuid;
@@ -158,6 +166,16 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
return uuid.hashCode(); return uuid.hashCode();
} }
/**
* Set the transaction-aware cache to store parent associations by child node id
*
* @param parentAssocsCache the cache
*/
public void setParentAssocsCache(SimpleCache<Long, Set<Long>> parentAssocsCache)
{
this.parentAssocsCache = parentAssocsCache;
}
/** /**
* Gets/creates the <b>server</b> instance to use for the life of this instance * Gets/creates the <b>server</b> instance to use for the life of this instance
*/ */
@@ -232,7 +250,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
// bind the id // bind the id
AlfrescoTransactionSupport.bindResource(RESOURCE_KEY_TRANSACTION_ID, txnId); AlfrescoTransactionSupport.bindResource(RESOURCE_KEY_TRANSACTION_ID, txnId);
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
if (!changeTxnIdSet.add(changeTxnId)) if (!changeTxnIdSet.add(changeTxnId))
{ {
@@ -245,7 +263,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
else else
{ {
transaction = (Transaction) getHibernateTemplate().get(TransactionImpl.class, txnId); transaction = (Transaction) getHibernateTemplate().get(TransactionImpl.class, txnId);
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("Using existing transaction: " + transaction); logger.debug("Using existing transaction: " + transaction);
} }
@@ -476,19 +494,19 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
private void deleteNodeInternal(Node node, boolean cascade, Set<Long> deletedChildAssocIds) private void deleteNodeInternal(Node node, boolean cascade, Set<Long> deletedChildAssocIds)
{ {
// delete all parent assocs // delete all parent assocs
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("Deleting parent assocs of node " + node.getId()); logger.debug("Deleting parent assocs of node " + node.getId());
} }
Collection<ChildAssoc> parentAssocs = node.getParentAssocs(); Collection<ChildAssoc> parentAssocs = getParentAssocsInternal(node);
parentAssocs = new ArrayList<ChildAssoc>(parentAssocs); parentAssocs = new ArrayList<ChildAssoc>(parentAssocs);
for (ChildAssoc assoc : parentAssocs) for (ChildAssoc assoc : parentAssocs)
{ {
deleteChildAssocInternal(assoc, false, deletedChildAssocIds); // we don't cascade upwards deleteChildAssocInternal(assoc, false, deletedChildAssocIds); // we don't cascade upwards
} }
// delete all child assocs // delete all child assocs
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("Deleting child assocs of node " + node.getId()); logger.debug("Deleting child assocs of node " + node.getId());
} }
@@ -499,7 +517,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
deleteChildAssocInternal(assoc, cascade, deletedChildAssocIds); // potentially cascade downwards deleteChildAssocInternal(assoc, cascade, deletedChildAssocIds); // potentially cascade downwards
} }
// delete all node associations to and from // delete all node associations to and from
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("Deleting source and target assocs of node " + node.getId()); logger.debug("Deleting source and target assocs of node " + node.getId());
} }
@@ -514,9 +532,16 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
nodeStatus.setNode(null); nodeStatus.setNode(null);
nodeStatus.getTransaction().setChangeTxnId(AlfrescoTransactionSupport.getTransactionId()); nodeStatus.getTransaction().setChangeTxnId(AlfrescoTransactionSupport.getTransactionId());
// finally delete the node // finally delete the node
Long nodeId = node.getId();
getHibernateTemplate().delete(node); getHibernateTemplate().delete(node);
// // flush to ensure constraints can't be violated // Remove node from cache
// getSession().flush(); parentAssocsCache.remove(nodeId);
if (isDebugParentAssocCacheEnabled)
{
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Removing entry: \n" +
" Node: " + nodeId);
}
// done // done
} }
@@ -550,20 +575,6 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
QName assocTypeQName, QName assocTypeQName,
QName qname) QName qname)
{ {
/*
* This initial child association creation will fail IFF there is already
* an association of the given type and name between the two nodes. For new association
* creation, this can only occur if two transactions attempt to create a secondary
* child association between the same two nodes. As this is unlikely, it is
* appropriate to just throw a runtime exception and let the second transaction
* fail.
*
* We don't need to flush the session beforehand as there won't be any deletions
* of the assocation pending. The association deletes, on the other hand, have
* to flush early in order to ensure that the database unique index isn't violated
* if the association is recreated subsequently.
*/
// assign a random name to the node // assign a random name to the node
String randomName = GUID.generate(); String randomName = GUID.generate();
@@ -576,7 +587,24 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
// maintain inverse sets // maintain inverse sets
assoc.buildAssociation(parentNode, childNode); assoc.buildAssociation(parentNode, childNode);
// persist it // persist it
getHibernateTemplate().save(assoc); Long assocId = (Long) getHibernateTemplate().save(assoc);
// Add it to the cache
Long childNodeId = childNode.getId();
Set<Long> oldParentAssocIds = parentAssocsCache.get(childNode.getId());
if (oldParentAssocIds != null)
{
Set<Long> newParentAssocIds = new HashSet<Long>(oldParentAssocIds);
newParentAssocIds.add(assocId);
parentAssocsCache.put(childNodeId, newParentAssocIds);
if (isDebugParentAssocCacheEnabled)
{
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Updating entry: \n" +
" Node: " + childNodeId + "\n" +
" Before: " + oldParentAssocIds + "\n" +
" After: " + newParentAssocIds);
}
}
// done // done
return assoc; return assoc;
} }
@@ -622,7 +650,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
* transactions modifying the unique index without this transaction's knowledge. * transactions modifying the unique index without this transaction's knowledge.
*/ */
final Node parentNode = childAssoc.getParent(); final Node parentNode = childAssoc.getParent();
// if (loggerChildAssoc.isDebugEnabled()) // if (loggerChildAssoc.Enabled())
// { // {
// loggerChildAssoc.debug( // loggerChildAssoc.debug(
// "Locking parent node for modifying child assoc: \n" + // "Locking parent node for modifying child assoc: \n" +
@@ -649,23 +677,23 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
public Object doInHibernate(Session session) public Object doInHibernate(Session session)
{ {
Query query = session Query query = session
.getNamedQuery(HibernateNodeDaoServiceImpl.QUERY_GET_CHILD_ASSOC_BY_NAME) .getNamedQuery(HibernateNodeDaoServiceImpl.QUERY_GET_CHILD_ASSOC_ID_BY_NAME)
.setLong("parentId", parentNode.getId()) .setLong("parentId", parentNode.getId())
.setParameter("childNodeName", childNameNewShort) .setParameter("childNodeName", childNameNewShort)
.setLong("childNodeNameCrc", childNameNewCrc); .setLong("childNodeNameCrc", childNameNewCrc);
return query.uniqueResult(); return query.uniqueResult();
} }
}; };
ChildAssoc childAssocExisting = (ChildAssoc) getHibernateTemplate().execute(callback); Long childAssocIdExisting = (Long) getHibernateTemplate().execute(callback);
if (childAssocExisting != null) if (childAssocIdExisting != null)
{ {
// There is already an entity // There is already an entity
if (loggerChildAssoc.isDebugEnabled()) if (isDebugEnabled)
{ {
loggerChildAssoc.debug( logger.debug(
"Duplicate child association detected: \n" + "Duplicate child association detected: \n" +
" Child Assoc: " + childAssoc + "\n" + " Existing Child Assoc: " + childAssocIdExisting + "\n" +
" Existing Child Assoc: " + childName); " Assoc Name: " + childName);
} }
throw new DuplicateChildNodeNameException( throw new DuplicateChildNodeNameException(
parentNode.getNodeRef(), parentNode.getNodeRef(),
@@ -678,9 +706,9 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
childAssoc.setChildNodeNameCrc(childNameNewCrc); childAssoc.setChildNodeNameCrc(childNameNewCrc);
// Done // Done
if (loggerChildAssoc.isDebugEnabled()) if (isDebugEnabled)
{ {
loggerChildAssoc.debug( logger.debug(
"Updated child association: \n" + "Updated child association: \n" +
" Parent: " + parentNode + "\n" + " Parent: " + parentNode + "\n" +
" Child Assoc: " + childAssoc); " Child Assoc: " + childAssoc);
@@ -881,33 +909,50 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
* @param cascade true to cascade to the child node of the association * @param cascade true to cascade to the child node of the association
* @param deletedChildAssocIds already-deleted associations * @param deletedChildAssocIds already-deleted associations
*/ */
private void deleteChildAssocInternal(ChildAssoc assoc, boolean cascade, Set<Long> deletedChildAssocIds) private void deleteChildAssocInternal(final ChildAssoc assoc, boolean cascade, Set<Long> deletedChildAssocIds)
{ {
Long childAssocId = assoc.getId(); Long childAssocId = assoc.getId();
if (deletedChildAssocIds.contains(childAssocId)) if (deletedChildAssocIds.contains(childAssocId))
{ {
if (logger.isDebugEnabled()) if (isDebugEnabled)
{ {
logger.debug("Ignoring parent-child association " + assoc.getId()); logger.debug("Ignoring parent-child association " + assoc.getId());
} }
return; return;
} }
if (loggerChildAssoc.isDebugEnabled()) if (isDebugEnabled)
{ {
loggerChildAssoc.debug( logger.debug(
"Deleting parent-child association " + assoc.getId() + "Deleting parent-child association " + assoc.getId() +
(cascade ? " with" : " without") + " cascade:" + (cascade ? " with" : " without") + " cascade:" +
assoc.getParent().getId() + " -> " + assoc.getChild().getId()); assoc.getParent().getId() + " -> " + assoc.getChild().getId());
} }
Node childNode = assoc.getChild(); Node childNode = assoc.getChild();
Long childNodeId = childNode.getId();
// Add remove the child association from the cache
Set<Long> oldParentAssocIds = parentAssocsCache.get(childNodeId);
if (oldParentAssocIds != null)
{
Set<Long> newParentAssocIds = new HashSet<Long>(oldParentAssocIds);
newParentAssocIds.remove(childAssocId);
parentAssocsCache.put(childNodeId, newParentAssocIds);
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Updating entry: \n" +
" Node: " + childNodeId + "\n" +
" Before: " + oldParentAssocIds + "\n" +
" After: " + newParentAssocIds);
}
// maintain inverse association sets // maintain inverse association sets
assoc.removeAssociation(); assoc.removeAssociation();
// remove instance // remove instance
getHibernateTemplate().delete(assoc); getHibernateTemplate().delete(assoc);
deletedChildAssocIds.add(childAssocId); // ensure that we don't attempt to delete it twice // ensure that we don't attempt to delete it twice
deletedChildAssocIds.add(childAssocId);
if (cascade && assoc.getIsPrimary()) // the assoc is primary if (cascade && assoc.getIsPrimary()) // the assoc is primary
{ {
@@ -919,25 +964,97 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
* duplicate call will be received to do this * duplicate call will be received to do this
*/ */
} }
// }
// // To ensure the validity of the constraint enforcement by the database,
// // we have to flush here. It is possible to delete and recreate the instance /**
// getSession().flush(); * @param childNode the child node
* @return Returns the parent associations without any interpretation
*/
@SuppressWarnings("unchecked")
private Collection<ChildAssoc> getParentAssocsInternal(Node childNode)
{
final Long childNodeId = childNode.getId();
List<ChildAssoc> parentAssocs = null;
// First check the cache
Set<Long> parentAssocIds = parentAssocsCache.get(childNodeId);
if (parentAssocIds != null)
{
if (isDebugParentAssocCacheEnabled)
{
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Hit: \n" +
" Node: " + childNodeId + "\n" +
" Assocs: " + parentAssocIds);
}
parentAssocs = new ArrayList<ChildAssoc>(parentAssocIds.size());
for (Long parentAssocId : parentAssocIds)
{
ChildAssoc parentAssoc = (ChildAssoc) getSession().get(ChildAssocImpl.class, parentAssocId);
if (parentAssoc == null)
{
// The cache is out of date, so just repopulate it
parentAssoc = null;
break;
}
else
{
parentAssocs.add(parentAssoc);
}
}
}
// Did we manage to get the parent assocs
if (parentAssocs == null)
{
if (isDebugParentAssocCacheEnabled)
{
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Miss: \n" +
" Node: " + childNodeId + "\n" +
" Assocs: " + parentAssocIds);
}
HibernateCallback callback = new HibernateCallback()
{
public Object doInHibernate(Session session)
{
Query query = session
.getNamedQuery(HibernateNodeDaoServiceImpl.QUERY_GET_PARENT_ASSOCS)
.setLong("childId", childNodeId);
return query.list();
}
};
parentAssocs = (List) getHibernateTemplate().execute(callback);
// Populate the cache
parentAssocIds = new HashSet<Long>(parentAssocs.size());
for (ChildAssoc parentAssoc : parentAssocs)
{
parentAssocIds.add(parentAssoc.getId());
}
parentAssocsCache.put(childNodeId, parentAssocIds);
if (isDebugParentAssocCacheEnabled)
{
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Adding entry: \n" +
" Node: " + childNodeId + "\n" +
" Assocs: " + parentAssocIds);
}
}
// Done
return parentAssocs;
} }
/** /**
* @inheritDoc * @inheritDoc
* *
* This method includes a check to ensure that the <b>parentAssocs</b> cache * This includes a check to ensuret that only root nodes don't have primary parents
* isn't incorrectly empty.
*/ */
public Collection<ChildAssoc> getParentAssocs(Node node) public Collection<ChildAssoc> getParentAssocs(final Node childNode)
{ {
Collection<ChildAssoc> parentAssocs = node.getParentAssocs(); Collection<ChildAssoc> parentAssocs = getParentAssocsInternal(childNode);
if (parentAssocs.size() == 0) if (parentAssocs.size() == 0)
{ {
// the only condition where this is allowed is if the given node is a root node // the only condition where this is allowed is if the given node is a root node
Store store = node.getStore(); Store store = childNode.getStore();
Node rootNode = store.getRootNode(); Node rootNode = store.getRootNode();
if (rootNode == null) if (rootNode == null)
{ {
@@ -945,22 +1062,23 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
throw new DataIntegrityViolationException("Store has no root node: \n" + throw new DataIntegrityViolationException("Store has no root node: \n" +
" store: " + store); " store: " + store);
} }
if (!rootNode.equals(node)) if (!rootNode.equals(childNode))
{ {
// Reload the node to ensure that it is properly initialized parentAssocsCache.remove(childNode.getId());
getSession().refresh(node); if (isDebugParentAssocCacheEnabled)
{
loggerParentAssocsCache.debug("\n" +
"Parent associations cache - Removing entry: \n" +
" Node: " + childNode.getId());
}
parentAssocs = getParentAssocsInternal(childNode);
// Check if it has any parents yet. // Check if it has any parents yet.
if (node.getParentAssocs().size() == 0) if (parentAssocs.size() == 0)
{ {
// It wasn't the root node and definitely has no parent // It wasn't the root node and definitely has no parent
throw new DataIntegrityViolationException( throw new DataIntegrityViolationException(
"Non-root node has no primary parent: \n" + "Non-root node has no primary parent: \n" +
" child: " + node); " child: " + childNode);
}
else
{
// Repeat this method with confidence
return getParentAssocs(node);
} }
} }
} }
@@ -977,10 +1095,10 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
* the error. It is up to the administrator to fix the issue at the moment, but * the error. It is up to the administrator to fix the issue at the moment, but
* the server will not stop working. * the server will not stop working.
*/ */
public ChildAssoc getPrimaryParentAssoc(Node node) public ChildAssoc getPrimaryParentAssoc(Node childNode)
{ {
// get the assocs pointing to the node // get the assocs pointing to the node
Collection<ChildAssoc> parentAssocs = node.getParentAssocs(); Collection<ChildAssoc> parentAssocs = getParentAssocs(childNode);
ChildAssoc primaryAssoc = null; ChildAssoc primaryAssoc = null;
for (ChildAssoc assoc : parentAssocs) for (ChildAssoc assoc : parentAssocs)
{ {
@@ -994,7 +1112,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
// We have found one already. // We have found one already.
synchronized(warnedDuplicateParents) synchronized(warnedDuplicateParents)
{ {
NodeRef childNodeRef = node.getNodeRef(); NodeRef childNodeRef = childNode.getNodeRef();
boolean added = warnedDuplicateParents.add(childNodeRef); boolean added = warnedDuplicateParents.add(childNodeRef);
if (added) if (added)
{ {
@@ -1009,37 +1127,6 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
primaryAssoc = assoc; primaryAssoc = assoc;
// we keep looping to hunt out data integrity issues // we keep looping to hunt out data integrity issues
} }
// did we find a primary assoc?
if (primaryAssoc == null)
{
// the only condition where this is allowed is if the given node is a root node
Store store = node.getStore();
Node rootNode = store.getRootNode();
if (rootNode == null)
{
// a store without a root node - the entire store is hosed
throw new DataIntegrityViolationException("Store has no root node: \n" +
" store: " + store);
}
if (!rootNode.equals(node))
{
// Reload the node to ensure that it is properly initialized
getSession().refresh(node);
// Check if it has any parents yet.
if (node.getParentAssocs().size() == 0)
{
// It wasn't the root node and definitely has no parent
throw new DataIntegrityViolationException(
"Non-root node has no primary parent: \n" +
" child: " + node);
}
else
{
// Repeat this method with confidence
primaryAssoc = getPrimaryParentAssoc(node);
}
}
}
// done // done
return primaryAssoc; return primaryAssoc;
} }
@@ -1142,9 +1229,6 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
{ {
// Remove instance // Remove instance
getHibernateTemplate().delete(assoc); getHibernateTemplate().delete(assoc);
// // Flush to ensure that the database constraints aren't violated if the assoc
// // is recreated in the transaction
// getSession().flush();
} }
public List<Serializable> getPropertyValuesByActualType(DataTypeDefinition actualDataTypeDefinition) public List<Serializable> getPropertyValuesByActualType(DataTypeDefinition actualDataTypeDefinition)

View File

@@ -24,6 +24,7 @@
*/ */
package org.alfresco.repo.transaction; package org.alfresco.repo.transaction;
import java.sql.BatchUpdateException;
import java.util.Random; import java.util.Random;
import javax.transaction.Status; import javax.transaction.Status;
@@ -62,7 +63,8 @@ public class RetryingTransactionHelper
ConcurrencyFailureException.class, ConcurrencyFailureException.class,
DeadlockLoserDataAccessException.class, DeadlockLoserDataAccessException.class,
StaleObjectStateException.class, StaleObjectStateException.class,
LockAcquisitionException.class LockAcquisitionException.class,
BatchUpdateException.class
}; };
} }
@@ -274,7 +276,7 @@ public class RetryingTransactionHelper
lastException = (e instanceof RuntimeException) ? lastException = (e instanceof RuntimeException) ?
(RuntimeException)e : new AlfrescoRuntimeException("Unknown Exception in Transaction.", e); (RuntimeException)e : new AlfrescoRuntimeException("Unknown Exception in Transaction.", e);
// Check if there is a cause for retrying // Check if there is a cause for retrying
Throwable retryCause = ExceptionStackUtil.getCause(e, RETRY_EXCEPTIONS); Throwable retryCause = extractRetryCause(e);
if (retryCause != null) if (retryCause != null)
{ {
// Sleep a random amount of time before retrying. // Sleep a random amount of time before retrying.
@@ -301,4 +303,36 @@ public class RetryingTransactionHelper
// So, fail. // So, fail.
throw lastException; throw lastException;
} }
/**
* Sometimes, the exception means retry and sometimes not.
*
* @param cause the cause to examine
* @return Returns the original cause if it is a valid retry cause, otherwise <tt>null</tt>
*/
private Throwable extractRetryCause(Throwable cause)
{
Throwable retryCause = ExceptionStackUtil.getCause(cause, RETRY_EXCEPTIONS);
if (retryCause == null)
{
return null;
}
else if (retryCause instanceof BatchUpdateException)
{
if (retryCause.getMessage().contains("Lock wait"))
{
// It is valid
return retryCause;
}
else
{
// Not valid
return null;
}
}
else
{
return retryCause;
}
}
} }

View File

@@ -203,7 +203,7 @@ public class TransactionUtil
{ {
// commit failed // commit failed
throw new AlfrescoRuntimeException( throw new AlfrescoRuntimeException(
"Unexpected rollback of exception: \n" + exception.getMessage(), "Unexpected rollback exception: \n" + exception.getMessage(),
exception); exception);
} }
catch (Throwable exception) catch (Throwable exception)

View File

@@ -36,6 +36,7 @@ import org.alfresco.repo.dictionary.DictionaryDAO;
import org.alfresco.repo.dictionary.M2Model; import org.alfresco.repo.dictionary.M2Model;
import org.alfresco.repo.node.archive.NodeArchiveService; import org.alfresco.repo.node.archive.NodeArchiveService;
import org.alfresco.repo.security.authentication.MutableAuthenticationDao; import org.alfresco.repo.security.authentication.MutableAuthenticationDao;
import org.alfresco.repo.transaction.RetryingTransactionHelper;
import org.alfresco.repo.version.common.counter.VersionCounterService; import org.alfresco.repo.version.common.counter.VersionCounterService;
import org.alfresco.repo.version.common.versionlabel.SerialVersionLabelPolicy; import org.alfresco.repo.version.common.versionlabel.SerialVersionLabelPolicy;
import org.alfresco.service.cmr.repository.ContentData; import org.alfresco.service.cmr.repository.ContentData;
@@ -64,6 +65,7 @@ public abstract class BaseVersionStoreTest extends BaseSpringTest
protected DictionaryDAO dictionaryDAO; protected DictionaryDAO dictionaryDAO;
protected AuthenticationService authenticationService; protected AuthenticationService authenticationService;
protected TransactionService transactionService; protected TransactionService transactionService;
protected RetryingTransactionHelper txnHelper;
protected MutableAuthenticationDao authenticationDAO; protected MutableAuthenticationDao authenticationDAO;
protected NodeArchiveService nodeArchiveService; protected NodeArchiveService nodeArchiveService;
protected NodeService nodeService; protected NodeService nodeService;
@@ -146,6 +148,7 @@ public abstract class BaseVersionStoreTest extends BaseSpringTest
this.contentService = (ContentService)applicationContext.getBean("contentService"); this.contentService = (ContentService)applicationContext.getBean("contentService");
this.authenticationService = (AuthenticationService)applicationContext.getBean("authenticationService"); this.authenticationService = (AuthenticationService)applicationContext.getBean("authenticationService");
this.transactionService = (TransactionService)this.applicationContext.getBean("transactionComponent"); this.transactionService = (TransactionService)this.applicationContext.getBean("transactionComponent");
this.txnHelper = (RetryingTransactionHelper) applicationContext.getBean("retryingTransactionHelper");
this.authenticationDAO = (MutableAuthenticationDao) applicationContext.getBean("alfDaoImpl"); this.authenticationDAO = (MutableAuthenticationDao) applicationContext.getBean("alfDaoImpl");
this.nodeArchiveService = (NodeArchiveService) applicationContext.getBean("nodeArchiveService"); this.nodeArchiveService = (NodeArchiveService) applicationContext.getBean("nodeArchiveService");
this.nodeService = (NodeService)applicationContext.getBean("nodeService"); this.nodeService = (NodeService)applicationContext.getBean("nodeService");

View File

@@ -35,6 +35,7 @@ import org.alfresco.model.ApplicationModel;
import org.alfresco.model.ContentModel; import org.alfresco.model.ContentModel;
import org.alfresco.repo.security.authentication.AuthenticationComponent; import org.alfresco.repo.security.authentication.AuthenticationComponent;
import org.alfresco.repo.transaction.TransactionUtil; import org.alfresco.repo.transaction.TransactionUtil;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.ServiceRegistry; import org.alfresco.service.ServiceRegistry;
import org.alfresco.service.cmr.model.FileFolderService; import org.alfresco.service.cmr.model.FileFolderService;
import org.alfresco.service.cmr.model.FileInfo; import org.alfresco.service.cmr.model.FileInfo;
@@ -256,7 +257,7 @@ public class VersionServiceImplTest extends BaseVersionStoreTest
* Test revert * Test revert
*/ */
@SuppressWarnings("unused") @SuppressWarnings("unused")
public void xtestRevert() public void testRevert()
{ {
// Create a versionable node // Create a versionable node
NodeRef versionableNode = createNewVersionableNode(); NodeRef versionableNode = createNewVersionableNode();
@@ -688,9 +689,9 @@ public class VersionServiceImplTest extends BaseVersionStoreTest
} }
}); });
TransactionUtil.executeInUserTransaction(this.transactionService, new TransactionUtil.TransactionWork<Object>() txnHelper.doInTransaction(new RetryingTransactionCallback<Object>()
{ {
public Object doWork() throws Exception public Object execute() throws Exception
{ {
// Get the archived noderef // Get the archived noderef
NodeRef archivedNodeRef = VersionServiceImplTest.this.nodeArchiveService.getArchivedNode(nodeRef); NodeRef archivedNodeRef = VersionServiceImplTest.this.nodeArchiveService.getArchivedNode(nodeRef);