Added 'version' column to ADM entities

- A patch will assign initial version values to the entities
 - Deprecated TransactionUtil in favour of the RetryingTransactionHelper
 - Renamed RetryingTransactionHelper.Callback to RetryingTransactionHelper.RetryingTransactionCallback
   The name Callback clashes with many other classes in the classpath
 - Moved loads of components to be included in the retry behaviour
Duplicate name checks
 - This is done using a query, but the entity update is not written to the database early
 - Concurrent adds of the same-named child node will only fail at the end of the transaction
 - TODO: Detect the duplicate violation during transaction retrying
Workaround for ADMLuceneTest
 - Disable session size resource management during tests


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@5823 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2007-06-01 12:40:17 +00:00
parent bbbd18923f
commit 819c7084a2
45 changed files with 818 additions and 230 deletions

View File

@@ -51,6 +51,7 @@ import org.alfresco.repo.policy.JavaBehaviour;
import org.alfresco.repo.policy.PolicyComponent;
import org.alfresco.repo.security.authentication.AuthenticationComponent;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.RetryingTransactionHelper;
import org.alfresco.service.cmr.dictionary.ClassDefinition;
import org.alfresco.service.cmr.dictionary.DictionaryException;
import org.alfresco.service.cmr.dictionary.DictionaryService;
@@ -140,6 +141,7 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
protected PolicyComponent policyComponent;
protected DictionaryService dictionaryService;
protected TransactionService transactionService;
protected RetryingTransactionHelper retryingTransactionHelper;
protected AuthenticationComponent authenticationComponent;
protected NodeDaoService nodeDaoService;
protected NodeService nodeService;
@@ -151,6 +153,7 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
{
super.onSetUpInTransaction();
transactionService = (TransactionService) applicationContext.getBean("transactionComponent");
retryingTransactionHelper = (RetryingTransactionHelper) applicationContext.getBean("retryingTransactionHelper");
policyComponent = (PolicyComponent) applicationContext.getBean("policyComponent");
authenticationComponent = (AuthenticationComponent) applicationContext.getBean("authenticationComponent");

View File

@@ -27,15 +27,14 @@ package org.alfresco.repo.node;
import java.io.InputStream;
import java.util.Map;
import javax.transaction.UserTransaction;
import junit.framework.TestCase;
import org.alfresco.repo.dictionary.DictionaryDAO;
import org.alfresco.repo.dictionary.M2Model;
import org.alfresco.repo.search.impl.lucene.fts.FullTextSearchIndexer;
import org.alfresco.repo.security.authentication.AuthenticationComponent;
import org.alfresco.repo.transaction.TransactionUtil;
import org.alfresco.repo.transaction.RetryingTransactionHelper;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.ServiceRegistry;
import org.alfresco.service.cmr.repository.ChildAssociationRef;
import org.alfresco.service.cmr.repository.NodeRef;
@@ -78,6 +77,7 @@ public class ConcurrentNodeServiceTest extends TestCase
private NodeService nodeService;
private TransactionService transactionService;
private RetryingTransactionHelper retryingTransactionHelper;
private NodeRef rootNodeRef;
@@ -107,23 +107,24 @@ public class ConcurrentNodeServiceTest extends TestCase
nodeService = (NodeService) ctx.getBean("dbNodeService");
transactionService = (TransactionService) ctx.getBean("transactionComponent");
retryingTransactionHelper = (RetryingTransactionHelper) ctx.getBean("retryingTransactionHelper");
luceneFTS = (FullTextSearchIndexer) ctx.getBean("LuceneFullTextSearchIndexer");
this.authenticationComponent = (AuthenticationComponent) ctx.getBean("authenticationComponent");
this.authenticationComponent.setSystemUserAsCurrentUser();
// create a first store directly
TransactionUtil.executeInUserTransaction(transactionService, new TransactionUtil.TransactionWork<Object>()
RetryingTransactionCallback<Object> createRootNodeCallback = new RetryingTransactionCallback<Object>()
{
public Object doWork() throws Exception
public Object execute() throws Exception
{
StoreRef storeRef = nodeService.createStore(StoreRef.PROTOCOL_WORKSPACE, "Test_"
+ System.currentTimeMillis());
rootNodeRef = nodeService.getRootNode(storeRef);
return null;
}
});
};
retryingTransactionHelper.doInTransaction(createRootNodeCallback);
}
@Override
@@ -140,17 +141,17 @@ public class ConcurrentNodeServiceTest extends TestCase
protected Map<QName, ChildAssociationRef> commitNodeGraph() throws Exception
{
return TransactionUtil.executeInUserTransaction(transactionService,
new TransactionUtil.TransactionWork<Map<QName, ChildAssociationRef>>()
{
RetryingTransactionCallback<Map<QName, ChildAssociationRef>> buildGraphCallback =
new RetryingTransactionCallback<Map<QName, ChildAssociationRef>>()
{
public Map<QName, ChildAssociationRef> execute() throws Exception
{
public Map<QName, ChildAssociationRef> doWork() throws Exception
{
Map<QName, ChildAssociationRef> answer = buildNodeGraph();
return answer;
}
});
Map<QName, ChildAssociationRef> answer = buildNodeGraph();
return answer;
}
};
return retryingTransactionHelper.doInTransaction(buildGraphCallback);
}
public void xtest1() throws Exception
@@ -231,10 +232,10 @@ public class ConcurrentNodeServiceTest extends TestCase
}
}
TransactionUtil.executeInUserTransaction(transactionService, new TransactionUtil.TransactionWork<Object>()
// Test it
RetryingTransactionCallback<Object> testCallback = new RetryingTransactionCallback<Object>()
{
public Object doWork() throws Exception
public Object execute() throws Exception
{
// There are two nodes at the base level in each test
assertEquals(2 * ((COUNT * REPEATS) + 1), nodeService.getChildAssocs(rootNodeRef).size());
@@ -276,9 +277,8 @@ public class ConcurrentNodeServiceTest extends TestCase
return null;
}
});
};
retryingTransactionHelper.doInTransaction(testCallback);
}
/**

View File

@@ -40,8 +40,7 @@ import org.alfresco.repo.domain.Node;
import org.alfresco.repo.domain.NodeStatus;
import org.alfresco.repo.node.BaseNodeServiceTest;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.TransactionUtil;
import org.alfresco.repo.transaction.TransactionUtil.TransactionWork;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.cmr.dictionary.DataTypeDefinition;
import org.alfresco.service.cmr.dictionary.DictionaryService;
import org.alfresco.service.cmr.repository.ChildAssociationRef;
@@ -143,9 +142,9 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest
endTransaction();
// change property - check status
TransactionWork<Object> changePropertiesWork = new TransactionWork<Object>()
RetryingTransactionCallback<Object> changePropertiesWork = new RetryingTransactionCallback<Object>()
{
public Object doWork()
public Object execute()
{
nodeService.setProperty(n6Ref, ContentModel.PROP_CREATED, new Date());
return null;
@@ -154,9 +153,9 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest
executeAndCheck(n6Ref, changePropertiesWork);
// add an aspect
TransactionWork<Object> addAspectWork = new TransactionWork<Object>()
RetryingTransactionCallback<Object> addAspectWork = new RetryingTransactionCallback<Object>()
{
public Object doWork()
public Object execute()
{
nodeService.addAspect(n6Ref, ASPECT_QNAME_TEST_MARKER, null);
return null;
@@ -165,9 +164,9 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest
executeAndCheck(n6Ref, addAspectWork);
// remove an aspect
TransactionWork<Object> removeAspectWork = new TransactionWork<Object>()
RetryingTransactionCallback<Object> removeAspectWork = new RetryingTransactionCallback<Object>()
{
public Object doWork()
public Object execute()
{
nodeService.removeAspect(n6Ref, ASPECT_QNAME_TEST_MARKER);
return null;
@@ -176,9 +175,9 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest
executeAndCheck(n6Ref, removeAspectWork);
// move the node
TransactionWork<Object> moveNodeWork = new TransactionWork<Object>()
RetryingTransactionCallback<Object> moveNodeWork = new RetryingTransactionCallback<Object>()
{
public Object doWork()
public Object execute()
{
nodeService.moveNode(
n6Ref,
@@ -191,9 +190,9 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest
executeAndCheck(n6Ref, moveNodeWork);
// delete the node
TransactionWork<Object> deleteNodeWork = new TransactionWork<Object>()
RetryingTransactionCallback<Object> deleteNodeWork = new RetryingTransactionCallback<Object>()
{
public Object doWork()
public Object execute()
{
nodeService.deleteNode(n6Ref);
return null;
@@ -202,9 +201,9 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest
executeAndCheck(n6Ref, deleteNodeWork);
// check cascade-deleted nodes
TransactionWork<Object> checkCascadeWork = new TransactionWork<Object>()
RetryingTransactionCallback<Object> checkCascadeCallback = new RetryingTransactionCallback<Object>()
{
public Object doWork()
public Object execute()
{
// check n6
NodeStatus n6Status = nodeDaoService.getNodeStatus(n6Ref, false);
@@ -221,12 +220,12 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest
return null;
}
};
TransactionUtil.executeInUserTransaction(txnService, checkCascadeWork);
retryingTransactionHelper.doInTransaction(checkCascadeCallback);
// check node recreation
TransactionWork<Object> checkRecreateWork = new TransactionWork<Object>()
RetryingTransactionCallback<Object> checkRecreateCallback = new RetryingTransactionCallback<Object>()
{
public Object doWork()
public Object execute()
{
properties.put(ContentModel.PROP_STORE_PROTOCOL, n6Ref.getStoreRef().getProtocol());
properties.put(ContentModel.PROP_STORE_IDENTIFIER, n6Ref.getStoreRef().getIdentifier());
@@ -242,10 +241,10 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest
return null;
}
};
TransactionUtil.executeInUserTransaction(txnService, checkRecreateWork);
retryingTransactionHelper.doInTransaction(checkRecreateCallback);
}
private void executeAndCheck(NodeRef nodeRef, TransactionWork<Object> work) throws Throwable
private void executeAndCheck(NodeRef nodeRef, RetryingTransactionCallback<Object> callback) throws Throwable
{
UserTransaction txn = txnService.getUserTransaction();
txn.begin();
@@ -257,7 +256,7 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest
assertNotSame(currentTxnId, currentStatus.getChangeTxnId());
try
{
work.doWork();
callback.execute();
// get the status
NodeRef.Status newStatus = nodeService.getNodeStatus(nodeRef);
assertNotNull(newStatus);
@@ -362,4 +361,20 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest
// Get it again
nodeService.getPrimaryParent(n8Ref);
}
/**
* It would appear that an issue has arisen with creating and deleting nodes
* in the same transaction.
*/
public void testInTransactionCreateAndDelete() throws Exception
{
// Create a node
NodeRef nodeRef = nodeService.createNode(
rootNodeRef,
ASSOC_TYPE_QNAME_TEST_CHILDREN,
QName.createQName(NAMESPACE, this.getName()),
TYPE_QNAME_TEST_CONTENT).getChildRef();
// Delete the node
nodeService.deleteNode(nodeRef);
}
}

View File

@@ -32,6 +32,7 @@ import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.zip.CRC32;
@@ -72,13 +73,17 @@ import org.alfresco.util.GUID;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.hibernate.FlushMode;
import org.hibernate.LockMode;
import org.hibernate.ObjectDeletedException;
import org.hibernate.Query;
import org.hibernate.ScrollMode;
import org.hibernate.ScrollableResults;
import org.hibernate.Session;
import org.hibernate.exception.LockAcquisitionException;
import org.springframework.dao.ConcurrencyFailureException;
import org.springframework.dao.DataAccessException;
import org.springframework.dao.DataIntegrityViolationException;
import org.springframework.dao.DeadlockLoserDataAccessException;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.support.HibernateDaoSupport;
@@ -94,6 +99,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
private static final String QUERY_GET_PRIMARY_CHILD_NODE_STATUSES = "node.GetPrimaryChildNodeStatuses";
private static final String QUERY_GET_CHILD_ASSOCS = "node.GetChildAssocs";
private static final String QUERY_GET_CHILD_ASSOCS_BY_ALL = "node.GetChildAssocsByAll";
private static final String QUERY_GET_CHILD_ASSOC_BY_NAME = "node.GetChildAssocByShortName";
private static final String QUERY_GET_CHILD_ASSOC_BY_TYPE_AND_NAME = "node.GetChildAssocByTypeAndName";
private static final String QUERY_GET_CHILD_ASSOC_REFS = "node.GetChildAssocRefs";
private static final String QUERY_GET_CHILD_ASSOC_REFS_BY_QNAME = "node.GetChildAssocRefsByQName";
@@ -105,12 +111,16 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
private static final String QUERY_GET_SERVER_BY_IPADDRESS = "server.getServerByIpAddress";
private static Log logger = LogFactory.getLog(HibernateNodeDaoServiceImpl.class);
private static Log loggerChildAssoc = LogFactory.getLog(HibernateNodeDaoServiceImpl.class.getName() + ".ChildAssoc");
/** a uuid identifying this unique instance */
private final String uuid;
/** the number of lock retries against the parent node to ensure child uniqueness */
private int maxLockRetries;
private static TransactionAwareSingleton<Long> serverIdSingleton = new TransactionAwareSingleton<Long>();
private final String ipAddress;
private Random randomWaitTime;
/** used for debugging */
private Set<String> changeTxnIdSet;
@@ -121,6 +131,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
public HibernateNodeDaoServiceImpl()
{
this.uuid = GUID.generate();
this.maxLockRetries = 20;
try
{
ipAddress = InetAddress.getLocalHost().getHostAddress();
@@ -129,6 +140,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
{
throw new AlfrescoRuntimeException("Failed to get server IP address", e);
}
randomWaitTime = new Random(System.currentTimeMillis());
changeTxnIdSet = new HashSet<String>(0);
}
@@ -158,6 +170,16 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
return uuid.hashCode();
}
/**
* Set the maximum number of retries when attempting to get a lock on a parent node
*
* @param maxLockRetries the retry count
*/
public void setMaxLockRetries(int maxLockRetries)
{
this.maxLockRetries = maxLockRetries;
}
/**
* Gets/creates the <b>server</b> instance to use for the life of this instance
*/
@@ -515,8 +537,8 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
nodeStatus.getTransaction().setChangeTxnId(AlfrescoTransactionSupport.getTransactionId());
// finally delete the node
getHibernateTemplate().delete(node);
// flush to ensure constraints can't be violated
getSession().flush();
// // flush to ensure constraints can't be violated
// getSession().flush();
// done
}
@@ -584,13 +606,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
public void setChildNameUnique(final ChildAssoc childAssoc, String childName)
{
/*
* As the Hibernate session is rendered useless when an exception is
* bubbled up, we go direct to the database to update the child association.
* This preserves the session and client code can catch the resulting
* exception and react to it whilst in the same transaction.
*
* We ensure that case-insensitivity is maintained by persisting
* the lowercase version of the child node name.
* Work out if there has been any change in the name
*/
String childNameNew = null;
@@ -621,40 +637,75 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
}
}
/*
* The parent node is explicitly locked. A query is then issued to ensure that there
* are no duplicates in the index on the child assoc table. The child association is
* then modified, although not directly in the database. The lock guards against other
* transactions modifying the unique index without this transaction's knowledge.
*/
final Node parentNode = childAssoc.getParent();
// if (loggerChildAssoc.isDebugEnabled())
// {
// loggerChildAssoc.debug(
// "Locking parent node for modifying child assoc: \n" +
// " Parent: " + parentNode + "\n" +
// " Child Assoc: " + childAssoc + "\n" +
// " New Name: " + childNameNew);
// }
// for (int i = 0; i < maxLockRetries; i++)
// {
// try
// {
// getSession().lock(parentNode, LockMode.UPGRADE);
// // The lock was good, proceed
// break;
// }
// catch (LockAcquisitionException e) {}
// catch (ConcurrencyFailureException e) {}
// // We can retry after a short pause that gets potentially longer each time
// try { Thread.sleep(randomWaitTime.nextInt(500 * i + 500)); } catch (InterruptedException ee) {}
// }
// We have the lock, so issue the query to check
HibernateCallback callback = new HibernateCallback()
{
public Object doInHibernate(Session session)
{
session.flush();
Query query = session
.getNamedQuery(HibernateNodeDaoServiceImpl.UPDATE_SET_CHILD_ASSOC_NAME)
.setString("newName", childNameNewShort)
.setLong("newNameCrc", childNameNewCrc)
.setLong("childAssocId", childAssoc.getId());
return (Integer) query.executeUpdate();
.getNamedQuery(HibernateNodeDaoServiceImpl.QUERY_GET_CHILD_ASSOC_BY_NAME)
.setLong("parentId", parentNode.getId())
.setParameter("childNodeName", childNameNewShort)
.setLong("childNodeNameCrc", childNameNewCrc);
return query.uniqueResult();
}
};
try
ChildAssoc childAssocExisting = (ChildAssoc) getHibernateTemplate().execute(callback);
if (childAssocExisting != null)
{
Integer count = (Integer) getHibernateTemplate().execute(callback);
// refresh the entity directly
if (count.intValue() == 0)
// There is already an entity
if (loggerChildAssoc.isDebugEnabled())
{
if (logger.isDebugEnabled())
{
logger.debug("ChildAssoc not updated: " + childAssoc.getId());
}
}
else
{
getHibernateTemplate().refresh(childAssoc);
loggerChildAssoc.debug(
"Duplicate child association detected: \n" +
" Child Assoc: " + childAssoc + "\n" +
" Existing Child Assoc: " + childName);
}
throw new DuplicateChildNodeNameException(
parentNode.getNodeRef(),
childAssoc.getTypeQName(),
childName);
}
catch (DataIntegrityViolationException e)
// We got past that, so we can just update the entity and know that no other transaction
// can lock the parent.
childAssoc.setChildNodeName(childNameNewShort);
childAssoc.setChildNodeNameCrc(childNameNewCrc);
// Done
if (loggerChildAssoc.isDebugEnabled())
{
NodeRef parentNodeRef = childAssoc.getParent().getNodeRef();
QName assocTypeQName = childAssoc.getTypeQName();
throw new DuplicateChildNodeNameException(parentNodeRef, assocTypeQName, childName);
loggerChildAssoc.debug(
"Updated child association: \n" +
" Parent: " + parentNode + "\n" +
" Child Assoc: " + childAssoc);
}
}
@@ -868,9 +919,9 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
return;
}
if (logger.isDebugEnabled())
if (loggerChildAssoc.isDebugEnabled())
{
logger.debug(
loggerChildAssoc.debug(
"Deleting parent-child association " + assoc.getId() +
(cascade ? " with" : " without") + " cascade:" +
assoc.getParent().getId() + " -> " + assoc.getChild().getId());
@@ -894,10 +945,10 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
* duplicate call will be received to do this
*/
}
// To ensure the validity of the constraint enforcement by the database,
// we have to flush here
getSession().flush();
//
// // To ensure the validity of the constraint enforcement by the database,
// // we have to flush here. It is possible to delete and recreate the instance
// getSession().flush();
}
/**
@@ -1117,9 +1168,9 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
{
// Remove instance
getHibernateTemplate().delete(assoc);
// Flush to ensure that the database constraints aren't violated if the assoc
// is recreated in the transaction
getSession().flush();
// // Flush to ensure that the database constraints aren't violated if the assoc
// // is recreated in the transaction
// getSession().flush();
}
public List<Serializable> getPropertyValuesByActualType(DataTypeDefinition actualDataTypeDefinition)

View File

@@ -136,9 +136,6 @@ public class SessionSizeManagementTest extends BaseNodeServiceTest
}
createNodes(nodeService, LOAD_COUNT, true);
// Check the session size
int entityCount = getSession().getStatistics().getEntityCount();
assertTrue("Manual flush: Entity count should be less than " + LOAD_COUNT, entityCount < LOAD_COUNT);
// Now flush integrity to be sure things are not broken
AlfrescoTransactionSupport.flush();