Merged DEV to HEAD: Cache write optimizations

- Refix ALF-10665: Caches that use @@VALUE_NOT_FOUND@@ are not really immutable
 - Add NodeBulkLoader.setCheckNodeConsistency
   - Use in a transaction to ensure that the node cache views are consistent with
     the database views.
 - Increase size of contentDataCache and make it support equals checking
 - Details:
   32162: Read-through cache changes
   32163: TransactionalCache changes to support more efficient consistency guarantees
          - 'allowEqualsCheck' property allows cache to do a full equals check against changed shared cache values
          - In-transaction option 'setDisableSharedCacheReadForTransaction'.  Values are cache in-transaction
            and written back at the end of the transaction (subject to collision rules) but the first read will
            not go to the shared cache.
          - Drop optimistic write-through in read-only transactions; they are equally likely to want to flush
            stale data.
          - Add simpler logic for mutable and allowEqualsCheck and make sure all conditions are covered by tests
   32164: Cache node entity support TransactionalCache's allowEqualsCheck
   32165: Add NodeDAO.setCheckNodeConsistency() method
          - Allows code to request that node metadata is consistent with whatever view the DB is providing
          - Incorporate into node concurrency tests without issue
          - Only one cache is affected (nodesCache) and it is enhanced by having 'allowEqualsCheck' to prevent
            massive flushing when multiple read transactions are all trying to push data into the shared caches,
            particularly during (re)indexing operations.
          - Further reduces the cache invalidation messages required in order to maintain consistency across
            the cluster
   32166: Make Lucene reindex work (trackers and FTS) use enforced node consistency
          - bulkLoader.setCheckNodeConsistency() incorporated where 'isReadThrough' is on
   32167: SOLR tracking uses NodeDAO.setCheckNodeConsistency() during node metadata retrieval
          - Ensures that any stale node metadata does not find its way into indexed SOLR node metadata
   32207: Fix ALF-11644: AVM cleanup jobs run when WCM is not installed
          - Moved scheduled jobs to installable wcm-bootstrap-context.xml
          - Also got rid of orphan reaper warnings when running in a cluster
   32208: Better hashcode for NodeVersionKey
   32209: RECORD ONLY
   32210: RECORD ONLY
   32212: Proper fix for ALF-10665: Immutable caches do not respond well to null (=> @@VALUE_NOT_FOUND@@)
          - The following caches were incorrectly classed as 'immutable':
               propertyValueCache
               immutableEntityCache
               rootNodesCache
               allRootNodesCache
               authorityCache
               tagscopeSummaryCache
               imapMessageCache
          - The 'immutable' caches are:
               node.aspectsCache
               node.propertiesCache
               node.parentAssocsCache
          - The following caches support equals checks:
               node.nodesCache
               authorityLookupCache
   32213: Fixed getNodeRefStatus(): nodesCache caches deleted entries as well.


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@32657 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2011-12-08 23:51:36 +00:00
parent 8700e0698b
commit 25382b991f
22 changed files with 536 additions and 265 deletions

View File

@@ -44,6 +44,7 @@ import org.alfresco.ibatis.RetryingCallbackHelper.RetryingCallback;
import org.alfresco.model.ContentModel;
import org.alfresco.repo.cache.NullCache;
import org.alfresco.repo.cache.SimpleCache;
import org.alfresco.repo.cache.TransactionalCache;
import org.alfresco.repo.cache.lookup.EntityLookupCache;
import org.alfresco.repo.cache.lookup.EntityLookupCache.EntityLookupCallbackDAOAdaptor;
import org.alfresco.repo.domain.contentdata.ContentDataDAO;
@@ -152,6 +153,10 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
* VALUE KEY: The Node's NodeRef<br/>
*/
private EntityLookupCache<Long, Node, NodeRef> nodesCache;
/**
* Backing transactional cache to allow read-through requests to be honoured
*/
private TransactionalCache<Serializable, Serializable> nodesTransactionalCache;
/**
* Cache for the QName values:<br/>
* KEY: NodeVersionKey<br/>
@@ -311,7 +316,10 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
cache,
CACHE_REGION_NODES,
new NodesCacheCallbackDAO());
if (cache instanceof TransactionalCache)
{
this.nodesTransactionalCache = (TransactionalCache<Serializable, Serializable>) cache;
}
}
/**
@@ -629,9 +637,17 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
return txn;
}
public Long getCurrentTransactionId()
public Long getCurrentTransactionId(boolean ensureNew)
{
TransactionEntity txn = AlfrescoTransactionSupport.getResource(KEY_TRANSACTION);
TransactionEntity txn;
if (ensureNew)
{
txn = getCurrentTransaction();
}
else
{
txn = AlfrescoTransactionSupport.getResource(KEY_TRANSACTION);
}
return txn == null ? null : txn.getId();
}
@@ -815,8 +831,8 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
/**
* Callback to cache nodes by ID and {@link NodeRef}. When looking up objects based on the
* value key, only the referencing properties need be populated. <b>ONLY</b> live nodes are
* cached.
* value key, only the referencing properties need be populated. <b>ALL</b> nodes are cached,
* not just live nodes.
*
* @see NodeEntity
*
@@ -838,7 +854,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
*/
public Pair<Long, Node> findByKey(Long nodeId)
{
NodeEntity node = selectNodeById(nodeId, Boolean.FALSE);
NodeEntity node = selectNodeById(nodeId, null);
if (node != null)
{
// Lock it to prevent 'accidental' modification
@@ -867,7 +883,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
public Pair<Long, Node> findByValue(Node node)
{
NodeRef nodeRef = node.getNodeRef();
node = selectNodeByNodeRef(nodeRef, Boolean.FALSE);
node = selectNodeByNodeRef(nodeRef, null);
if (node != null)
{
// Lock it to prevent 'accidental' modification
@@ -897,7 +913,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
@Override
public boolean isInCurrentTxn(Long nodeId)
{
Long currentTxnId = getCurrentTransactionId();
Long currentTxnId = getCurrentTransactionId(false);
if (currentTxnId == null)
{
// No transactional changes have been made to any nodes, therefore the node cannot
@@ -909,59 +925,19 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
return nodeTxnId.equals(currentTxnId);
}
// TODO: Restore to simple version
// TODO: Add read-through option for caches
public Status getNodeRefStatus(NodeRef nodeRef)
{
Node node = null;
// Stage 1: check the cache without reading through
Long nodeId = nodesCache.getKey(nodeRef);
if (nodeId != null)
{
node = nodesCache.getValue(nodeId);
// If the node isn't for the current transaction, we are probably reindexing. So invalidate the cache,
// forcing a read through and a repeatable read on this noderef
if (node == null || AlfrescoTransactionSupport.getTransactionReadState() != TxnReadState.TXN_READ_WRITE
|| !getCurrentTransaction().getId().equals(node.getTransaction().getId())
|| !node.getNodeRef().equals(nodeRef))
{
invalidateNodeCaches(nodeId);
node = null;
}
}
// Stage 2, read through to the database, caching results if appropriate
if (node == null)
{
Node nodeEntity = new NodeEntity(nodeRef);
// Explicitly remove this noderef from the cache, forcing a 'repeatable read' on this noderef from now on.
nodesCache.removeByValue(nodeEntity);
Pair<Long, Node> pair = nodesCache.getByValue(nodeEntity);
if (pair == null)
{
// It's not there, so select ignoring the 'deleted' flag
node = selectNodeByNodeRef(nodeRef, null);
if (node != null)
{
// Invalidate anything cached for this node ID, just in case it has moved store, etc.
invalidateNodeCaches(node.getId());
}
}
else
{
// We have successfully populated the cache
node = pair.getSecond();
}
}
if (node == null)
Node node = new NodeEntity(nodeRef);
Pair<Long, Node> nodePair = nodesCache.getByValue(node);
// The nodesCache gets both live and deleted nodes.
if (nodePair == null)
{
return null;
}
Transaction txn = node.getTransaction();
return new NodeRef.Status(nodeRef, txn.getChangeTxnId(), txn.getId(), node.getDeleted());
else
{
return nodePair.getSecond().getNodeStatus();
}
}
public Pair<Long, NodeRef> getNodePair(NodeRef nodeRef)
@@ -989,6 +965,8 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
Pair<Long, Node> pair = nodesCache.getByKey(nodeId);
if (pair == null || pair.getSecond().getDeleted())
{
// Force a removal from the cache
nodesCache.removeByKey(nodeId);
// Go back to the database and get what is there
NodeEntity dbNode = selectNodeById(nodeId, null);
if (pair == null)
@@ -3679,6 +3657,16 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO
* Bulk caching
*/
@Override
public void setCheckNodeConsistency()
{
if (nodesTransactionalCache != null)
{
nodesTransactionalCache.setDisableSharedCacheReadForTransaction(true);
}
}
@Override
public void cacheNodesById(List<Long> nodeIds)
{
/*