This makes the lookup cache for the AVM have a transaction isolation level of

read committed, more or less.  It was read uncommitted before, and so this 
makes me more comfortable.  I hope it will also reduce optimistic locking failures
under concurrent load.


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@4600 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Britt Park
2006-12-14 03:23:20 +00:00
parent cc94478e72
commit e70798e59f
2 changed files with 69 additions and 18 deletions

View File

@@ -41,4 +41,13 @@ public class AVMLookupCacheListener extends TransactionListenerAdapter
{ {
fLookupCache.onRollback(); fLookupCache.onRollback();
} }
/* (non-Javadoc)
* @see org.alfresco.repo.transaction.TransactionListenerAdapter#afterCommit()
*/
@Override
public void afterCommit()
{
fLookupCache.commitLookups();
}
} }

View File

@@ -21,6 +21,12 @@ public class LookupCache
{ {
private static Logger fgLogger = Logger.getLogger(LookupCache.class); private static Logger fgLogger = Logger.getLogger(LookupCache.class);
/**
* Per transaction lookup results to be added to the cache on successful
* commit.
*/
private ThreadLocal<Map<LookupKey, Lookup>> fToBeAdded;
/** /**
* The Map of of keys to lookups. * The Map of of keys to lookups.
*/ */
@@ -64,6 +70,7 @@ public class LookupCache
fCache = new HashMap<LookupKey, Lookup>(); fCache = new HashMap<LookupKey, Lookup>();
fTimeStamps = new TreeMap<Long, LookupKey>(); fTimeStamps = new TreeMap<Long, LookupKey>();
fInverseTimeStamps = new HashMap<LookupKey, Long>(); fInverseTimeStamps = new HashMap<LookupKey, Long>();
fToBeAdded = new ThreadLocal<Map<LookupKey, Lookup>>();
fTimeStamp = 0L; fTimeStamp = 0L;
fMaxSize = 100; fMaxSize = 100;
} }
@@ -179,7 +186,12 @@ public class LookupCache
*/ */
private synchronized Lookup findInCache(LookupKey key) private synchronized Lookup findInCache(LookupKey key)
{ {
Lookup found = fCache.get(key); Map<LookupKey, Lookup> map = fToBeAdded.get();
Lookup found = (map != null) ? map.get(key) : null;
if (found == null)
{
found = fCache.get(key);
}
if (found != null) if (found != null)
{ {
Lookup result = new Lookup(found, fAVMNodeDAO, fAVMStoreDAO); Lookup result = new Lookup(found, fAVMNodeDAO, fAVMStoreDAO);
@@ -197,7 +209,11 @@ public class LookupCache
{ {
LookupKey newKey = new LookupKey(key); LookupKey newKey = new LookupKey(key);
newKey.setWrite(true); newKey.setWrite(true);
found = fCache.get(newKey); found = (map != null) ? map.get(newKey) : null;
if (found == null)
{
found = fCache.get(newKey);
}
if (found != null) if (found != null)
{ {
Lookup result = new Lookup(found, fAVMNodeDAO, fAVMStoreDAO); Lookup result = new Lookup(found, fAVMNodeDAO, fAVMStoreDAO);
@@ -219,27 +235,52 @@ public class LookupCache
* @param key * @param key
* @param lookup * @param lookup
*/ */
private synchronized void updateCache(LookupKey key, Lookup lookup) private void updateCache(LookupKey key, Lookup lookup)
{ {
if (fCache.containsKey(key)) Map<LookupKey, Lookup> map = fToBeAdded.get();
if (map == null)
{ {
fCache.remove(key); map = new HashMap<LookupKey, Lookup>();
Long oldTime = fInverseTimeStamps.get(key);
fInverseTimeStamps.remove(key);
fTimeStamps.remove(oldTime);
} }
long timeStamp = fTimeStamp++; map.put(key, lookup);
fTimeStamps.put(timeStamp, key); }
fInverseTimeStamps.put(key, timeStamp);
fCache.put(key, lookup); /**
if (fCache.size() > fMaxSize) * Called when a transaction has successfully committed,
* to make lookups from the transaction available to other transactions.
*/
public synchronized void commitLookups()
{
Map<LookupKey, Lookup> map = fToBeAdded.get();
if (map == null)
{ {
// Get rid of the oldest entry. return;
Long oldTime = fTimeStamps.firstKey();
LookupKey old = fTimeStamps.remove(oldTime);
fInverseTimeStamps.remove(old);
fCache.remove(old);
} }
for (Map.Entry<LookupKey, Lookup> entry : map.entrySet())
{
LookupKey key = entry.getKey();
Lookup lookup = entry.getValue();
if (fCache.containsKey(key))
{
fCache.remove(key);
Long oldTime = fInverseTimeStamps.get(key);
fInverseTimeStamps.remove(key);
fTimeStamps.remove(oldTime);
}
long timeStamp = fTimeStamp++;
fTimeStamps.put(timeStamp, key);
fInverseTimeStamps.put(key, timeStamp);
fCache.put(key, lookup);
if (fCache.size() > fMaxSize)
{
// Get rid of the oldest entry.
Long oldTime = fTimeStamps.firstKey();
LookupKey old = fTimeStamps.remove(oldTime);
fInverseTimeStamps.remove(old);
fCache.remove(old);
}
}
fToBeAdded.set(null);
} }
/** /**
@@ -323,5 +364,6 @@ public class LookupCache
fCache.clear(); fCache.clear();
fTimeStamps.clear(); fTimeStamps.clear();
fInverseTimeStamps.clear(); fInverseTimeStamps.clear();
fToBeAdded.set(null);
} }
} }