mirror of
https://github.com/Alfresco/alfresco-community-repo.git
synced 2025-07-24 17:32:48 +00:00
Merged V2.1 to HEAD
6486: Fix for AWC-1134 (Made minimum length for username and password configurable) 6487: Added "copy.verbose" property to build script to list all files being copied in the 'deploy-tomcat-exploded' and 'deploy-webclient-changes' targets 6488: Changed default CIFS server to remove underscore as it can cause problems with OpenOffice. 6489: Fix for WCM-498 (Expired items that are deleted are not represented as such in the change request dialog or submit dialog) 6490: Fix for WCM-446 and WCM-624. Multi-select and All operations in Modified Files list respect items in workflow and ignore them. 6491: Fixes profoundly brain dead behavior in AVMLockingService.init(). 6492: Refactoring of LookupCache to be clustering compatible 6493: Staging and locking changes 6494: WCM Revert action moved to end of all action lists to avoid accidently clicking it (as it has no confirmation screen!) 6496: Fixes for WCM-746 and WCM-747 - Edit File Properties and Edit Folder Properties working correctly again for AVM objects. 6497: Enable virtualization server to be (re)started at any point prior or during the startup of the alfresco webapp (fixes WCM-750). 6498: WCM-742 - moving locks from the preview sandbox to the main user sandbox. git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@6739 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
@@ -3,14 +3,11 @@
|
||||
*/
|
||||
package org.alfresco.repo.avm;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.alfresco.repo.avm.util.SimplePath;
|
||||
import org.alfresco.repo.cache.SimpleCache;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
/**
|
||||
@@ -21,42 +18,10 @@ public class LookupCache
|
||||
{
|
||||
private static Logger fgLogger = Logger.getLogger(LookupCache.class);
|
||||
|
||||
|
||||
/**
|
||||
* Per transaction lookup results to be added to the cache on successful
|
||||
* commit.
|
||||
*/
|
||||
private ThreadLocal<Map<LookupKey, Lookup>> fToBeAdded;
|
||||
|
||||
/**
|
||||
* Per transaction set of invalidated lookup keys.
|
||||
*/
|
||||
private ThreadLocal<Set<LookupKey>> fToBePurged;
|
||||
|
||||
/**
|
||||
* The Map of of keys to lookups.
|
||||
*/
|
||||
private Map<LookupKey, Lookup> fCache;
|
||||
|
||||
/**
|
||||
* The Map of time stamps to keys.
|
||||
*/
|
||||
private SortedMap<Long, LookupKey> fTimeStamps;
|
||||
|
||||
/**
|
||||
* The inverse map of keys to timestamps.
|
||||
*/
|
||||
private Map<LookupKey, Long> fInverseTimeStamps;
|
||||
|
||||
/**
|
||||
* The timestamp to next issue.
|
||||
*/
|
||||
private long fTimeStamp;
|
||||
|
||||
/**
|
||||
* The maximum number of lines to have in the cache.
|
||||
*/
|
||||
private int fMaxSize;
|
||||
private SimpleCache<LookupKey, Lookup> fCache;
|
||||
|
||||
/**
|
||||
* Reference to the Node DAO.
|
||||
@@ -73,13 +38,6 @@ public class LookupCache
|
||||
*/
|
||||
public LookupCache()
|
||||
{
|
||||
fCache = new HashMap<LookupKey, Lookup>();
|
||||
fTimeStamps = new TreeMap<Long, LookupKey>();
|
||||
fInverseTimeStamps = new HashMap<LookupKey, Long>();
|
||||
fToBeAdded = new ThreadLocal<Map<LookupKey, Lookup>>();
|
||||
fToBePurged = new ThreadLocal<Set<LookupKey>>();
|
||||
fTimeStamp = 0L;
|
||||
fMaxSize = 100;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -100,13 +58,9 @@ public class LookupCache
|
||||
fAVMStoreDAO = dao;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the maximum cache size.
|
||||
* @param maxSize
|
||||
*/
|
||||
public void setMaxSize(int maxSize)
|
||||
public void setTransactionalCache(SimpleCache<LookupKey, Lookup> cache)
|
||||
{
|
||||
fMaxSize = maxSize;
|
||||
fCache = cache;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -125,8 +79,14 @@ public class LookupCache
|
||||
LookupKey key = new LookupKey(version, path, store.getName(), write, includeDeleted);
|
||||
// Is it in the cache?
|
||||
Lookup found = findInCache(key);
|
||||
// TODO Testing.
|
||||
// found = null;
|
||||
if (found != null)
|
||||
{
|
||||
if (fgLogger.isDebugEnabled())
|
||||
{
|
||||
fgLogger.debug("Cache Hit: " + key + ", " + found.getCurrentNode().getId());
|
||||
}
|
||||
return found;
|
||||
}
|
||||
// Make up a Lookup to hold the results.
|
||||
@@ -155,7 +115,7 @@ public class LookupCache
|
||||
dir = (DirectoryNode)result.getCurrentNode();
|
||||
if (path.size() == 1 && path.get(0).equals(""))
|
||||
{
|
||||
updateCache(key, result);
|
||||
fCache.put(key, result);
|
||||
return result;
|
||||
}
|
||||
// Now look up each path element in sequence up to one
|
||||
@@ -184,7 +144,7 @@ public class LookupCache
|
||||
return null;
|
||||
}
|
||||
result.add(child, path.get(path.size() - 1), write);
|
||||
updateCache(key, result);
|
||||
fCache.put(key, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -195,24 +155,7 @@ public class LookupCache
|
||||
*/
|
||||
private synchronized Lookup findInCache(LookupKey key)
|
||||
{
|
||||
// Get the current transaction's purged set.
|
||||
Set<LookupKey> purged = fToBePurged.get();
|
||||
// Get the current transaction's added map.
|
||||
Map<LookupKey, Lookup> added = fToBeAdded.get();
|
||||
// See if it's cached in the transaction.
|
||||
Lookup found = (added != null) ? added.get(key) : null;
|
||||
// It's not.
|
||||
if (found == null)
|
||||
{
|
||||
// If it's been purged in the transaction it is
|
||||
// a miss.
|
||||
if (purged != null && purged.contains(key))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
found = fCache.get(key);
|
||||
}
|
||||
// Despite the odds, we found a hit.
|
||||
Lookup found = fCache.get(key);
|
||||
if (found != null)
|
||||
{
|
||||
// Get a freshened Lookup.
|
||||
@@ -224,8 +167,6 @@ public class LookupCache
|
||||
fgLogger.error("Invalid entry in cache: " + key);
|
||||
return null;
|
||||
}
|
||||
// Prepare the cache for a timestamp update on commit.
|
||||
updateCache(key, found);
|
||||
return result;
|
||||
}
|
||||
// Alternatively for a read lookup a write can match.
|
||||
@@ -234,18 +175,7 @@ public class LookupCache
|
||||
// Make a copy of the key and set it to 'write'
|
||||
LookupKey newKey = new LookupKey(key);
|
||||
newKey.setWrite(true);
|
||||
// Is it in the transaction's cache?
|
||||
found = (added != null) ? added.get(newKey) : null;
|
||||
// If not.
|
||||
if (found == null)
|
||||
{
|
||||
// If it's been purged it's a miss.
|
||||
if (purged != null && purged.contains(newKey))
|
||||
{
|
||||
return null;
|
||||
}
|
||||
found = fCache.get(newKey);
|
||||
}
|
||||
found = fCache.get(newKey);
|
||||
if (found != null)
|
||||
{
|
||||
// We found it. Make a freshened copy of the Lookup.
|
||||
@@ -257,103 +187,12 @@ public class LookupCache
|
||||
fgLogger.error("Invalid entry in cache: " + newKey);
|
||||
return null;
|
||||
}
|
||||
// Prepare the cache to update time stamp.
|
||||
updateCache(newKey, found);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add or update an entry in the cache.
|
||||
* @param key
|
||||
* @param lookup
|
||||
*/
|
||||
private void updateCache(LookupKey key, Lookup lookup)
|
||||
{
|
||||
// First, put it in the transaction scoped cache.
|
||||
Map<LookupKey, Lookup> map = fToBeAdded.get();
|
||||
if (map == null)
|
||||
{
|
||||
map = new HashMap<LookupKey, Lookup>();
|
||||
}
|
||||
map.put(key, lookup);
|
||||
// Remove any corresponding entry from the purge list.
|
||||
Set<LookupKey> purged = fToBePurged.get();
|
||||
if (purged == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
purged.remove(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when a transaction has successfully committed,
|
||||
* to make lookups from the transaction available to other transactions.
|
||||
*/
|
||||
public synchronized void onCommit()
|
||||
{
|
||||
// First get rid of all entries purged by the transaction.
|
||||
Set<LookupKey> purged = fToBePurged.get();
|
||||
if (purged != null)
|
||||
{
|
||||
purgeEntries(purged);
|
||||
}
|
||||
// Null out the thread local.
|
||||
fToBePurged.set(null);
|
||||
// Get and go through the transaction's added list.
|
||||
Map<LookupKey, Lookup> added = fToBeAdded.get();
|
||||
if (added == null)
|
||||
{
|
||||
return;
|
||||
}
|
||||
for (Map.Entry<LookupKey, Lookup> entry : added.entrySet())
|
||||
{
|
||||
LookupKey key = entry.getKey();
|
||||
Lookup lookup = entry.getValue();
|
||||
// If the cache already has the key, remove it.
|
||||
if (fCache.containsKey(key))
|
||||
{
|
||||
fCache.remove(key);
|
||||
Long oldTime = fInverseTimeStamps.get(key);
|
||||
fInverseTimeStamps.remove(key);
|
||||
fTimeStamps.remove(oldTime);
|
||||
}
|
||||
// Add the entry.
|
||||
long timeStamp = fTimeStamp++;
|
||||
fTimeStamps.put(timeStamp, key);
|
||||
fInverseTimeStamps.put(key, timeStamp);
|
||||
fCache.put(key, lookup);
|
||||
// Check if we're over the limit and purge the
|
||||
// LRU entry if we are.
|
||||
if (fCache.size() > fMaxSize)
|
||||
{
|
||||
// Get rid of the oldest entry.
|
||||
Long oldTime = fTimeStamps.firstKey();
|
||||
LookupKey old = fTimeStamps.remove(oldTime);
|
||||
fInverseTimeStamps.remove(old);
|
||||
fCache.remove(old);
|
||||
}
|
||||
}
|
||||
// Null out the ThreadLocal.
|
||||
fToBeAdded.set(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a Set of entries.
|
||||
* @param keys The Set of entries.
|
||||
*/
|
||||
private void purgeEntries(Set<LookupKey> keys)
|
||||
{
|
||||
for (LookupKey key : keys)
|
||||
{
|
||||
fCache.remove(key);
|
||||
Long time = fInverseTimeStamps.remove(key);
|
||||
fTimeStamps.remove(time);
|
||||
}
|
||||
}
|
||||
|
||||
// Following are the cache invalidation calls.
|
||||
|
||||
/**
|
||||
@@ -362,37 +201,25 @@ public class LookupCache
|
||||
*/
|
||||
public synchronized void onWrite(String storeName)
|
||||
{
|
||||
// Get or make up the purged Set for this transaction.
|
||||
Set<LookupKey> purged = fToBePurged.get();
|
||||
if (purged == null)
|
||||
{
|
||||
purged = new HashSet<LookupKey>();
|
||||
fToBePurged.set(purged);
|
||||
}
|
||||
// Invalidate if it's a read lookup in the store, or
|
||||
// any read lookup is it's layered.
|
||||
for (Map.Entry<LookupKey, Lookup> entry : fCache.entrySet())
|
||||
List<LookupKey> keys = new ArrayList<LookupKey>();
|
||||
for (LookupKey key : fCache.getKeys())
|
||||
{
|
||||
if ((entry.getKey().getStoreName().equals(storeName) &&
|
||||
!entry.getKey().isWrite()) ||
|
||||
(!entry.getKey().isWrite() && entry.getValue().isLayered()))
|
||||
{
|
||||
purged.add(entry.getKey());
|
||||
}
|
||||
keys.add(key);
|
||||
}
|
||||
// Remove entries from the added set using the same criteria.
|
||||
Map<LookupKey, Lookup> added = fToBeAdded.get();
|
||||
if (added == null)
|
||||
for (LookupKey key : keys)
|
||||
{
|
||||
return;
|
||||
}
|
||||
for (Map.Entry<LookupKey, Lookup> entry : added.entrySet())
|
||||
{
|
||||
if ((entry.getKey().getStoreName().equals(storeName) &&
|
||||
!entry.getKey().isWrite()) ||
|
||||
(!entry.getKey().isWrite() && entry.getValue().isLayered()))
|
||||
Lookup value = fCache.get(key);
|
||||
if ((key.getStoreName().equals(storeName) &&
|
||||
!key.isWrite()) || value == null ||
|
||||
(!key.isWrite() && value.isLayered()))
|
||||
{
|
||||
added.remove(entry.getKey());
|
||||
if (fgLogger.isDebugEnabled())
|
||||
{
|
||||
fgLogger.debug("Invalidating: " + key + ", " + (value != null ? value.getCurrentNode().getId() : -2));
|
||||
}
|
||||
fCache.remove(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -403,34 +230,23 @@ public class LookupCache
|
||||
*/
|
||||
public synchronized void onDelete(String storeName)
|
||||
{
|
||||
// Get or make up a fresh purged Set.
|
||||
Set<LookupKey> purged = fToBePurged.get();
|
||||
if (purged == null)
|
||||
{
|
||||
purged = new HashSet<LookupKey>();
|
||||
fToBePurged.set(purged);
|
||||
}
|
||||
// Invalidate any entries that are in the store or are layered lookups.
|
||||
for (Map.Entry<LookupKey, Lookup> entry : fCache.entrySet())
|
||||
List<LookupKey> keys = new ArrayList<LookupKey>();
|
||||
for (LookupKey key : fCache.getKeys())
|
||||
{
|
||||
if (entry.getKey().getStoreName().equals(storeName) ||
|
||||
entry.getValue().isLayered())
|
||||
{
|
||||
purged.add(entry.getKey());
|
||||
}
|
||||
keys.add(key);
|
||||
}
|
||||
// Get rid of any similarly matching elements in the added list.
|
||||
Map<LookupKey, Lookup> added = fToBeAdded.get();
|
||||
if (added == null)
|
||||
for (LookupKey key : keys)
|
||||
{
|
||||
return;
|
||||
}
|
||||
for (Map.Entry<LookupKey, Lookup> entry : added.entrySet())
|
||||
{
|
||||
if (entry.getKey().getStoreName().equals(storeName) ||
|
||||
entry.getValue().isLayered())
|
||||
Lookup value = fCache.get(key);
|
||||
if (key.getStoreName().equals(storeName) ||
|
||||
value == null || value.isLayered())
|
||||
{
|
||||
added.remove(entry.getKey());
|
||||
if (fgLogger.isDebugEnabled())
|
||||
{
|
||||
fgLogger.debug("Invalidating: " + key + ", " + (value != null ? value.getCurrentNode().getId() : -2));
|
||||
}
|
||||
fCache.remove(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -441,48 +257,31 @@ public class LookupCache
|
||||
*/
|
||||
public synchronized void onSnapshot(String storeName)
|
||||
{
|
||||
// Get or make up a purged set.
|
||||
Set<LookupKey> purged = fToBePurged.get();
|
||||
if (purged == null)
|
||||
{
|
||||
purged = new HashSet<LookupKey>();
|
||||
fToBePurged.set(purged);
|
||||
}
|
||||
// Invalidate any entries that in the store and writes or
|
||||
// any layered lookups.
|
||||
for (Map.Entry<LookupKey, Lookup> entry : fCache.entrySet())
|
||||
List<LookupKey> keys = new ArrayList<LookupKey>();
|
||||
for (LookupKey key : fCache.getKeys())
|
||||
{
|
||||
if ((entry.getKey().getStoreName().equals(storeName) &&
|
||||
entry.getKey().isWrite()) ||
|
||||
entry.getValue().isLayered())
|
||||
{
|
||||
purged.add(entry.getKey());
|
||||
}
|
||||
keys.add(key);
|
||||
}
|
||||
// Remove from the added list by the same criteria.
|
||||
Map<LookupKey, Lookup> added = fToBeAdded.get();
|
||||
if (added == null)
|
||||
for (LookupKey key : keys)
|
||||
{
|
||||
return;
|
||||
}
|
||||
for (Map.Entry<LookupKey, Lookup> entry : added.entrySet())
|
||||
{
|
||||
if ((entry.getKey().getStoreName().equals(storeName) &&
|
||||
entry.getKey().isWrite()) ||
|
||||
entry.getValue().isLayered())
|
||||
Lookup value = fCache.get(key);
|
||||
if ((key.getStoreName().equals(storeName) &&
|
||||
key.isWrite()) ||
|
||||
value == null || value.isLayered())
|
||||
{
|
||||
added.remove(entry.getKey());
|
||||
if (fgLogger.isDebugEnabled())
|
||||
{
|
||||
fgLogger.debug("Invalidating: " + key + ", " + (value != null ? value.getCurrentNode().getId() : -2));
|
||||
}
|
||||
fCache.remove(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when a rollback has occurred.
|
||||
*/
|
||||
public synchronized void onRollback()
|
||||
public synchronized void reset()
|
||||
{
|
||||
// Just toss the transaction level changes.
|
||||
fToBeAdded.set(null);
|
||||
fToBePurged.set(null);
|
||||
fCache.clear();
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user