mirror of
https://github.com/Alfresco/alfresco-community-repo.git
synced 2025-07-31 17:39:05 +00:00
Merged BRANCHES/DEV/THOR1 to HEAD:
30458: ALF-10100: need to be able to apply %age or abs disk space usage constraints on ${dir.cachedcontent} 30573: ALF-9613: Add min age of files checking to cached content cleaner 30594: ALF-10100: added more sensible default in sample config for quota size (4GB) 30695: ALF-10391, ALF-10392: Added MBeans and improved logging for monitoring purposes. 30850: THOR-202: CachingContentStore quota manager should reject large files 30901: Added warn-level logging about failure to cache content item 30951: THOR-217 - when the quota is met or exceeded, then next time the cleaner runs it must use some strategy to make some space. git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@30956 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
@@ -15,10 +15,11 @@
|
||||
</bean>
|
||||
|
||||
|
||||
<bean id="cachingContentStore" class="org.alfresco.repo.content.caching.CachingContentStore">
|
||||
<bean id="cachingContentStore" class="org.alfresco.repo.content.caching.CachingContentStore" init-method="init">
|
||||
<property name="backingStore" ref="backingStore"/>
|
||||
<property name="cache" ref="contentCache"/>
|
||||
<property name="cacheOnInbound" value="${system.content.caching.cacheOnInbound}"/>
|
||||
<property name="quota" ref="standardQuotaManager"/>
|
||||
</bean>
|
||||
|
||||
|
||||
@@ -42,6 +43,25 @@
|
||||
</bean>
|
||||
|
||||
|
||||
<bean
|
||||
id="standardQuotaManager"
|
||||
class="org.alfresco.repo.content.caching.quota.StandardQuotaStrategy"
|
||||
init-method="init"
|
||||
destroy-method="shutdown">
|
||||
<!-- maxUsageMB: the maximum disk usage that should be used for cached content files -->
|
||||
<property name="maxUsageMB" value="4096"/>
|
||||
|
||||
<!-- maxFileSizeMB: files larger than this size will not be kept in the cache -->
|
||||
<property name="maxFileSizeMB" value="0"/>
|
||||
|
||||
<property name="cache" ref="contentCache"/>
|
||||
<property name="cleaner" ref="cachedContentCleaner"/>
|
||||
</bean>
|
||||
|
||||
|
||||
<bean id="unlimitedQuotaManager" class="org.alfresco.repo.content.caching.quota.UnlimitedQuotaStrategy"/>
|
||||
|
||||
|
||||
<bean id="cachingContentStoreCache" class="org.alfresco.repo.cache.EhCacheAdapter">
|
||||
<property name="cache">
|
||||
<bean class="org.springframework.cache.ehcache.EhCacheFactoryBean">
|
||||
@@ -76,12 +96,15 @@
|
||||
</property>
|
||||
</bean>
|
||||
|
||||
<bean id="cachedContentCleaner" class="org.alfresco.repo.content.caching.cleanup.CachedContentCleaner">
|
||||
<bean id="cachedContentCleaner"
|
||||
class="org.alfresco.repo.content.caching.cleanup.CachedContentCleaner"
|
||||
init-method="init">
|
||||
<property name="minFileAgeMillis" value="${system.content.caching.minFileAgeMillis}"/>
|
||||
<property name="maxDeleteWatchCount" value="${system.content.caching.maxDeleteWatchCount}"/>
|
||||
<property name="cache" ref="contentCache"/>
|
||||
<property name="usageTracker" ref="standardQuotaManager"/>
|
||||
</bean>
|
||||
|
||||
|
||||
<bean id="cachingContentStoreCleanerTrigger" class="org.alfresco.util.CronTriggerBean">
|
||||
<property name="jobDetail">
|
||||
<ref bean="cachingContentStoreCleanerJobDetail" />
|
||||
@@ -93,5 +116,4 @@
|
||||
<value>${system.content.caching.contentCleanup.cronExpression}</value>
|
||||
</property>
|
||||
</bean>
|
||||
|
||||
</beans>
|
||||
|
@@ -780,3 +780,4 @@ system.content.caching.timeToLiveSeconds=0
|
||||
system.content.caching.timeToIdleSeconds=86400
|
||||
system.content.caching.maxElementsInMemory=5000
|
||||
system.content.caching.maxElementsOnDisk=10000
|
||||
system.content.caching.minFileAgeMillis=60000
|
||||
|
@@ -135,6 +135,16 @@ public class CacheFileProps
|
||||
return propsFile.exists();
|
||||
}
|
||||
|
||||
/**
|
||||
* Size of the properties file or 0 if it does not exist.
|
||||
*
|
||||
* @return file size in bytes.
|
||||
*/
|
||||
public long fileSize()
|
||||
{
|
||||
return propsFile.length();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the value of the contentUrl property.
|
||||
*
|
||||
|
@@ -25,11 +25,18 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
|
||||
|
||||
import org.alfresco.repo.content.ContentContext;
|
||||
import org.alfresco.repo.content.ContentStore;
|
||||
import org.alfresco.repo.content.caching.quota.QuotaManagerStrategy;
|
||||
import org.alfresco.repo.content.caching.quota.UnlimitedQuotaStrategy;
|
||||
import org.alfresco.service.cmr.repository.ContentIOException;
|
||||
import org.alfresco.service.cmr.repository.ContentReader;
|
||||
import org.alfresco.service.cmr.repository.ContentStreamListener;
|
||||
import org.alfresco.service.cmr.repository.ContentWriter;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.springframework.beans.factory.BeanNameAware;
|
||||
import org.springframework.beans.factory.annotation.Required;
|
||||
import org.springframework.context.ApplicationEventPublisher;
|
||||
import org.springframework.context.ApplicationEventPublisherAware;
|
||||
|
||||
/**
|
||||
* Implementation of ContentStore that wraps any other ContentStore (the backing store)
|
||||
@@ -43,15 +50,19 @@ import org.springframework.beans.factory.annotation.Required;
|
||||
*
|
||||
* @author Matt Ward
|
||||
*/
|
||||
public class CachingContentStore implements ContentStore
|
||||
public class CachingContentStore implements ContentStore, ApplicationEventPublisherAware, BeanNameAware
|
||||
{
|
||||
private final static Log log = LogFactory.getLog(CachingContentStore.class);
|
||||
// NUM_LOCKS absolutely must be a power of 2 for the use of locks to be evenly balanced
|
||||
private final static int numLocks = 32;
|
||||
private final static ReentrantReadWriteLock[] locks;
|
||||
private ContentStore backingStore;
|
||||
private ContentCache cache;
|
||||
private QuotaManagerStrategy quota = new UnlimitedQuotaStrategy();
|
||||
private boolean cacheOnInbound;
|
||||
private int maxCacheTries = 2;
|
||||
private ApplicationEventPublisher eventPublisher;
|
||||
private String beanName;
|
||||
|
||||
static
|
||||
{
|
||||
@@ -73,6 +84,13 @@ public class CachingContentStore implements ContentStore
|
||||
this.cacheOnInbound = cacheOnInbound;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialisation method, should be called once the CachingContentStore has been constructed.
|
||||
*/
|
||||
public void init()
|
||||
{
|
||||
eventPublisher.publishEvent(new CachingContentStoreCreatedEvent(this));
|
||||
}
|
||||
|
||||
/*
|
||||
* @see org.alfresco.repo.content.ContentStore#isContentUrlSupported(java.lang.String)
|
||||
@@ -185,14 +203,32 @@ public class CachingContentStore implements ContentStore
|
||||
{
|
||||
for (int i = 0; i < maxCacheTries; i++)
|
||||
{
|
||||
ContentReader backingStoreReader = backingStore.getReader(url);
|
||||
long contentSize = backingStoreReader.getSize();
|
||||
|
||||
if (!quota.beforeWritingCacheFile(contentSize))
|
||||
{
|
||||
return backingStoreReader;
|
||||
}
|
||||
|
||||
ContentReader reader = attemptCacheAndRead(url);
|
||||
|
||||
if (reader != null)
|
||||
{
|
||||
quota.afterWritingCacheFile(contentSize);
|
||||
return reader;
|
||||
}
|
||||
}
|
||||
// Have tried multiple times to cache the item and read it back from the cache
|
||||
// but there is a recurring problem - give up and return the item from the backing store.
|
||||
if (log.isWarnEnabled())
|
||||
{
|
||||
log.warn("Attempted " + maxCacheTries + " times to cache content item and failed - "
|
||||
+ "returning reader from backing store instead [" +
|
||||
"backingStore=" + backingStore +
|
||||
", url=" + url +
|
||||
"]");
|
||||
}
|
||||
return backingStore.getReader(url);
|
||||
}
|
||||
finally
|
||||
@@ -201,6 +237,18 @@ public class CachingContentStore implements ContentStore
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Attempt to read content into a cached file and return a reader onto it. If the content is
|
||||
* already in the cache (possibly due to a race condition between the read/write locks) then
|
||||
* a reader onto that content is returned.
|
||||
* <p>
|
||||
* If it is not possible to cache the content and/or get a reader onto the cached content then
|
||||
* <code>null</code> is returned and the method ensure that the URL is not stored in the cache.
|
||||
*
|
||||
* @param url URL to cache.
|
||||
* @return A reader onto the cached content file or null if unable to provide one.
|
||||
*/
|
||||
private ContentReader attemptCacheAndRead(String url)
|
||||
{
|
||||
ContentReader reader = null;
|
||||
@@ -236,9 +284,16 @@ public class CachingContentStore implements ContentStore
|
||||
{
|
||||
final ContentWriter bsWriter = backingStore.getWriter(context);
|
||||
|
||||
// write to cache
|
||||
final ContentWriter cacheWriter = cache.getWriter(bsWriter.getContentUrl());
|
||||
if (!quota.beforeWritingCacheFile(0))
|
||||
{
|
||||
return bsWriter;
|
||||
}
|
||||
|
||||
// Writing will be performed straight to the cache.
|
||||
final String url = bsWriter.getContentUrl();
|
||||
final ContentWriter cacheWriter = cache.getWriter(url);
|
||||
|
||||
// When finished writing perform these actions.
|
||||
cacheWriter.addListener(new ContentStreamListener()
|
||||
{
|
||||
@Override
|
||||
@@ -250,6 +305,13 @@ public class CachingContentStore implements ContentStore
|
||||
bsWriter.setLocale(cacheWriter.getLocale());
|
||||
bsWriter.setMimetype(cacheWriter.getMimetype());
|
||||
bsWriter.putContent(cacheWriter.getReader());
|
||||
|
||||
if (!quota.afterWritingCacheFile(cacheWriter.getSize()))
|
||||
{
|
||||
// Quota manager has requested that the new cache file is not kept.
|
||||
cache.deleteFile(url);
|
||||
cache.remove(url);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -357,14 +419,77 @@ public class CachingContentStore implements ContentStore
|
||||
this.backingStore = backingStore;
|
||||
}
|
||||
|
||||
public String getBackingStoreType()
|
||||
{
|
||||
return backingStore.getClass().getName();
|
||||
}
|
||||
|
||||
public String getBackingStoreDescription()
|
||||
{
|
||||
return backingStore.toString();
|
||||
}
|
||||
|
||||
@Required
|
||||
public void setCache(ContentCache cache)
|
||||
{
|
||||
this.cache = cache;
|
||||
}
|
||||
|
||||
public ContentCache getCache()
|
||||
{
|
||||
return this.cache;
|
||||
}
|
||||
|
||||
public void setCacheOnInbound(boolean cacheOnInbound)
|
||||
{
|
||||
this.cacheOnInbound = cacheOnInbound;
|
||||
}
|
||||
|
||||
public boolean isCacheOnInbound()
|
||||
{
|
||||
return this.cacheOnInbound;
|
||||
}
|
||||
|
||||
public int getMaxCacheTries()
|
||||
{
|
||||
return this.maxCacheTries;
|
||||
}
|
||||
|
||||
public void setMaxCacheTries(int maxCacheTries)
|
||||
{
|
||||
this.maxCacheTries = maxCacheTries;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the QuotaManagerStrategy that will be used.
|
||||
*
|
||||
* @param quota
|
||||
*/
|
||||
@Required
|
||||
public void setQuota(QuotaManagerStrategy quota)
|
||||
{
|
||||
this.quota = quota;
|
||||
}
|
||||
|
||||
public QuotaManagerStrategy getQuota()
|
||||
{
|
||||
return this.quota;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher)
|
||||
{
|
||||
this.eventPublisher = applicationEventPublisher;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBeanName(String name)
|
||||
{
|
||||
this.beanName = name;
|
||||
}
|
||||
|
||||
public String getBeanName()
|
||||
{
|
||||
return this.beanName;
|
||||
}
|
||||
}
|
||||
|
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (C) 2005-2011 Alfresco Software Limited.
|
||||
*
|
||||
* This file is part of Alfresco
|
||||
*
|
||||
* Alfresco is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Alfresco is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package org.alfresco.repo.content.caching;
|
||||
|
||||
/**
|
||||
* Event fired when a CachingContentStore instance is created.
|
||||
*
|
||||
* @author Matt Ward
|
||||
*/
|
||||
public class CachingContentStoreCreatedEvent extends CachingContentStoreEvent
|
||||
{
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public CachingContentStoreCreatedEvent(CachingContentStore source)
|
||||
{
|
||||
super(source);
|
||||
}
|
||||
|
||||
public CachingContentStore getCachingContentStore()
|
||||
{
|
||||
return (CachingContentStore) source;
|
||||
}
|
||||
}
|
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (C) 2005-2011 Alfresco Software Limited.
|
||||
*
|
||||
* This file is part of Alfresco
|
||||
*
|
||||
* Alfresco is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Alfresco is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package org.alfresco.repo.content.caching;
|
||||
|
||||
import org.springframework.context.ApplicationEvent;
|
||||
|
||||
/**
|
||||
* Abstract base class for CachingContentStore related application events.
|
||||
*
|
||||
* @author Matt Ward
|
||||
*/
|
||||
public abstract class CachingContentStoreEvent extends ApplicationEvent
|
||||
{
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
/**
|
||||
* Constructor that captures the source of the event.
|
||||
*
|
||||
* @param source
|
||||
*/
|
||||
public CachingContentStoreEvent(Object source)
|
||||
{
|
||||
super(source);
|
||||
}
|
||||
|
||||
/**
|
||||
* Is the event an instance of the specified type (or subclass)?
|
||||
*
|
||||
* @param type
|
||||
* @return
|
||||
*/
|
||||
public boolean isType(Class<?> type)
|
||||
{
|
||||
return type.isAssignableFrom(getClass());
|
||||
}
|
||||
}
|
@@ -29,6 +29,9 @@ import org.alfresco.repo.content.ContentStore;
|
||||
import org.alfresco.repo.content.filestore.FileContentStore;
|
||||
import org.alfresco.service.cmr.repository.ContentWriter;
|
||||
import org.alfresco.util.TempFileProvider;
|
||||
import org.junit.internal.runners.JUnit38ClassRunner;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
|
||||
/**
|
||||
* Tests for the CachingContentStore that benefit from a full set of tests
|
||||
@@ -36,6 +39,7 @@ import org.alfresco.util.TempFileProvider;
|
||||
*
|
||||
* @author Matt Ward
|
||||
*/
|
||||
@RunWith(JUnit38ClassRunner.class)
|
||||
public class CachingContentStoreSpringTest extends AbstractWritableContentStoreTest
|
||||
{
|
||||
private static final String EHCACHE_NAME = "cache.test.cachingContentStoreCache";
|
||||
@@ -44,6 +48,7 @@ public class CachingContentStoreSpringTest extends AbstractWritableContentStoreT
|
||||
private FileContentStore backingStore;
|
||||
private ContentCacheImpl cache;
|
||||
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception
|
||||
{
|
||||
@@ -62,7 +67,6 @@ public class CachingContentStoreSpringTest extends AbstractWritableContentStoreT
|
||||
store = new CachingContentStore(backingStore, cache, false);
|
||||
}
|
||||
|
||||
|
||||
private EhCacheAdapter<Key, String> createMemoryStore()
|
||||
{
|
||||
CacheManager manager = CacheManager.getInstance();
|
||||
@@ -152,7 +156,7 @@ public class CachingContentStoreSpringTest extends AbstractWritableContentStoreT
|
||||
assertEquals(content, retrievedContent);
|
||||
|
||||
// Remove the cached disk file
|
||||
File cacheFile = new File(cache.cacheFileLocation(contentUrl));
|
||||
File cacheFile = new File(cache.getCacheFilePath(contentUrl));
|
||||
cacheFile.delete();
|
||||
assertTrue("Cached content should have been deleted", !cacheFile.exists());
|
||||
|
||||
|
@@ -24,7 +24,9 @@ import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertSame;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyLong;
|
||||
import static org.mockito.Matchers.anyString;
|
||||
import static org.mockito.Mockito.atLeastOnce;
|
||||
import static org.mockito.Mockito.doThrow;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.never;
|
||||
@@ -38,6 +40,8 @@ import java.util.Locale;
|
||||
import org.alfresco.repo.content.ContentContext;
|
||||
import org.alfresco.repo.content.ContentStore;
|
||||
import org.alfresco.repo.content.ContentStore.ContentUrlHandler;
|
||||
import org.alfresco.repo.content.caching.quota.QuotaManagerStrategy;
|
||||
import org.alfresco.repo.content.caching.quota.UnlimitedQuotaStrategy;
|
||||
import org.alfresco.service.cmr.repository.ContentIOException;
|
||||
import org.alfresco.service.cmr.repository.ContentReader;
|
||||
import org.alfresco.service.cmr.repository.ContentStreamListener;
|
||||
@@ -47,7 +51,6 @@ import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.runners.MockitoJUnitRunner;
|
||||
|
||||
/**
|
||||
@@ -59,6 +62,8 @@ import org.mockito.runners.MockitoJUnitRunner;
|
||||
public class CachingContentStoreTest
|
||||
{
|
||||
private CachingContentStore cachingStore;
|
||||
private ContentReader sourceContent;
|
||||
private ContentReader cachedContent;
|
||||
|
||||
@Mock
|
||||
private ContentStore backingStore;
|
||||
@@ -71,26 +76,49 @@ public class CachingContentStoreTest
|
||||
public void setUp() throws Exception
|
||||
{
|
||||
cachingStore = new CachingContentStore(backingStore, cache, false);
|
||||
cachingStore.setQuota(new UnlimitedQuotaStrategy());
|
||||
|
||||
sourceContent = mock(ContentReader.class, "sourceContent");
|
||||
cachedContent = mock(ContentReader.class, "cachedContent");
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void getReaderForItemInCache()
|
||||
{
|
||||
ContentReader cachedContentReader = mock(ContentReader.class);
|
||||
when(cache.getReader("url")).thenReturn(cachedContentReader);
|
||||
when(cache.contains("url")).thenReturn(true);
|
||||
when(cache.getReader("url")).thenReturn(cachedContent);
|
||||
|
||||
ContentReader returnedReader = cachingStore.getReader("url");
|
||||
|
||||
assertSame(returnedReader, cachedContentReader);
|
||||
assertSame(returnedReader, cachedContent);
|
||||
verify(backingStore, never()).getReader(anyString());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
// Item isn't in cache, so will be cached and returned.
|
||||
public void getReaderForItemMissingFromCache()
|
||||
{
|
||||
when(cache.getReader("url")).thenReturn(cachedContent);
|
||||
when(backingStore.getReader("url")).thenReturn(sourceContent);
|
||||
when(sourceContent.getSize()).thenReturn(1274L);
|
||||
when(cache.put("url", sourceContent)).thenReturn(true);
|
||||
|
||||
QuotaManagerStrategy quota = mock(QuotaManagerStrategy.class);
|
||||
cachingStore.setQuota(quota);
|
||||
when(quota.beforeWritingCacheFile(1274L)).thenReturn(true);
|
||||
|
||||
ContentReader returnedReader = cachingStore.getReader("url");
|
||||
|
||||
assertSame(returnedReader, cachedContent);
|
||||
verify(quota).afterWritingCacheFile(1274L);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void getReaderForItemMissingFromCacheWillGiveUpAfterRetrying()
|
||||
{
|
||||
ContentReader sourceContent = mock(ContentReader.class);
|
||||
when(cache.getReader("url")).thenThrow(new CacheMissException("url"));
|
||||
when(backingStore.getReader("url")).thenReturn(sourceContent);
|
||||
when(cache.put("url", sourceContent)).thenReturn(true);
|
||||
@@ -98,7 +126,7 @@ public class CachingContentStoreTest
|
||||
ContentReader returnedReader = cachingStore.getReader("url");
|
||||
|
||||
// Upon failure, item is removed from cache
|
||||
verify(cache, Mockito.atLeastOnce()).remove("url");
|
||||
verify(cache, atLeastOnce()).remove("url");
|
||||
|
||||
// The content comes direct from the backing store
|
||||
assertSame(returnedReader, sourceContent);
|
||||
@@ -108,8 +136,6 @@ public class CachingContentStoreTest
|
||||
@Test
|
||||
public void getReaderForItemMissingFromCacheWillRetryAndCanSucceed()
|
||||
{
|
||||
ContentReader sourceContent = mock(ContentReader.class);
|
||||
ContentReader cachedContent = mock(ContentReader.class);
|
||||
when(cache.getReader("url")).
|
||||
thenThrow(new CacheMissException("url")).
|
||||
thenReturn(cachedContent);
|
||||
@@ -125,7 +151,6 @@ public class CachingContentStoreTest
|
||||
@Test
|
||||
public void getReaderForItemMissingFromCacheButNoContentToCache()
|
||||
{
|
||||
ContentReader sourceContent = mock(ContentReader.class);
|
||||
when(cache.getReader("url")).thenThrow(new CacheMissException("url"));
|
||||
when(backingStore.getReader("url")).thenReturn(sourceContent);
|
||||
when(cache.put("url", sourceContent)).thenReturn(false);
|
||||
@@ -134,14 +159,38 @@ public class CachingContentStoreTest
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
// When attempting to read uncached content.
|
||||
public void quotaManagerCanVetoCacheFileWriting()
|
||||
{
|
||||
when(backingStore.getReader("url")).thenReturn(sourceContent);
|
||||
QuotaManagerStrategy quota = mock(QuotaManagerStrategy.class);
|
||||
cachingStore.setQuota(quota);
|
||||
when(sourceContent.getSize()).thenReturn(1274L);
|
||||
when(quota.beforeWritingCacheFile(1274L)).thenReturn(false);
|
||||
|
||||
ContentReader returnedReader = cachingStore.getReader("url");
|
||||
|
||||
verify(cache, never()).put("url", sourceContent);
|
||||
assertSame(returnedReader, sourceContent);
|
||||
verify(quota, never()).afterWritingCacheFile(anyLong());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void getWriterWhenNotCacheOnInbound()
|
||||
{
|
||||
QuotaManagerStrategy quota = mock(QuotaManagerStrategy.class);
|
||||
cachingStore.setQuota(quota);
|
||||
|
||||
ContentContext ctx = ContentContext.NULL_CONTEXT;
|
||||
|
||||
cachingStore.getWriter(ctx);
|
||||
|
||||
verify(backingStore).getWriter(ctx);
|
||||
// No quota manager interaction - as no caching happening.
|
||||
verify(quota, never()).beforeWritingCacheFile(anyLong());
|
||||
verify(quota, never()).afterWritingCacheFile(anyLong());
|
||||
}
|
||||
|
||||
|
||||
@@ -157,7 +206,12 @@ public class CachingContentStoreTest
|
||||
when(cache.getWriter("url")).thenReturn(cacheWriter);
|
||||
ContentReader readerFromCacheWriter = mock(ContentReader.class);
|
||||
when(cacheWriter.getReader()).thenReturn(readerFromCacheWriter);
|
||||
when(cacheWriter.getSize()).thenReturn(54321L);
|
||||
QuotaManagerStrategy quota = mock(QuotaManagerStrategy.class);
|
||||
cachingStore.setQuota(quota);
|
||||
|
||||
// Quota manager interceptor is fired.
|
||||
when(quota.beforeWritingCacheFile(0L)).thenReturn(true);
|
||||
|
||||
cachingStore.getWriter(ctx);
|
||||
|
||||
@@ -168,8 +222,68 @@ public class CachingContentStoreTest
|
||||
arg.getValue().contentStreamClosed();
|
||||
// Check behaviour of the listener
|
||||
verify(bsWriter).putContent(readerFromCacheWriter);
|
||||
// Post caching quota manager hook is fired.
|
||||
verify(quota).afterWritingCacheFile(54321L);
|
||||
}
|
||||
|
||||
verify(backingStore).getWriter(ctx);
|
||||
|
||||
@Test
|
||||
// When attempting to perform write-through caching, i.e. cacheOnInbound = true
|
||||
public void quotaManagerCanVetoInboundCaching()
|
||||
{
|
||||
cachingStore = new CachingContentStore(backingStore, cache, true);
|
||||
QuotaManagerStrategy quota = mock(QuotaManagerStrategy.class);
|
||||
cachingStore.setQuota(quota);
|
||||
|
||||
ContentContext ctx = ContentContext.NULL_CONTEXT;
|
||||
ContentWriter backingStoreWriter = mock(ContentWriter.class);
|
||||
when(backingStore.getWriter(ctx)).thenReturn(backingStoreWriter);
|
||||
when(quota.beforeWritingCacheFile(0L)).thenReturn(false);
|
||||
|
||||
ContentWriter returnedWriter = cachingStore.getWriter(ctx);
|
||||
|
||||
assertSame("Should be writing direct to backing store", backingStoreWriter, returnedWriter);
|
||||
verify(quota, never()).afterWritingCacheFile(anyLong());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void quotaManagerCanRequestFileDeletionFromCacheAfterWrite()
|
||||
{
|
||||
cachingStore = new CachingContentStore(backingStore, cache, true);
|
||||
ContentContext ctx = ContentContext.NULL_CONTEXT;
|
||||
ContentWriter bsWriter = mock(ContentWriter.class);
|
||||
when(backingStore.getWriter(ctx)).thenReturn(bsWriter);
|
||||
when(bsWriter.getContentUrl()).thenReturn("url");
|
||||
ContentWriter cacheWriter = mock(ContentWriter.class);
|
||||
when(cache.getWriter("url")).thenReturn(cacheWriter);
|
||||
ContentReader readerFromCacheWriter = mock(ContentReader.class);
|
||||
when(cacheWriter.getReader()).thenReturn(readerFromCacheWriter);
|
||||
when(cacheWriter.getSize()).thenReturn(54321L);
|
||||
QuotaManagerStrategy quota = mock(QuotaManagerStrategy.class);
|
||||
cachingStore.setQuota(quota);
|
||||
|
||||
// Quota manager interceptor is fired.
|
||||
when(quota.beforeWritingCacheFile(0L)).thenReturn(true);
|
||||
|
||||
cachingStore.getWriter(ctx);
|
||||
|
||||
// Check that a listener was attached to cacheWriter with the correct behaviour
|
||||
ArgumentCaptor<ContentStreamListener> arg = ArgumentCaptor.forClass(ContentStreamListener.class);
|
||||
verify(cacheWriter).addListener(arg.capture());
|
||||
|
||||
// Don't keep the new cache file
|
||||
when(quota.afterWritingCacheFile(54321L)).thenReturn(false);
|
||||
|
||||
// Simulate a stream close
|
||||
arg.getValue().contentStreamClosed();
|
||||
// Check behaviour of the listener
|
||||
verify(bsWriter).putContent(readerFromCacheWriter);
|
||||
// Post caching quota manager hook is fired.
|
||||
verify(quota).afterWritingCacheFile(54321L);
|
||||
// The item should be deleted from the cache (lookup table and content cache file)
|
||||
verify(cache).deleteFile("url");
|
||||
verify(cache).remove("url");
|
||||
}
|
||||
|
||||
|
||||
|
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (C) 2005-2011 Alfresco Software Limited.
|
||||
*
|
||||
* This file is part of Alfresco
|
||||
*
|
||||
* Alfresco is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Alfresco is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package org.alfresco.repo.content.caching;
|
||||
|
||||
import org.alfresco.repo.content.caching.cleanup.CachedContentCleanupJobTest;
|
||||
import org.alfresco.repo.content.caching.quota.StandardQuotaStrategyMockTest;
|
||||
import org.alfresco.repo.content.caching.quota.StandardQuotaStrategyTest;
|
||||
import org.alfresco.repo.content.caching.quota.UnlimitedQuotaStrategyTest;
|
||||
import org.alfresco.repo.content.caching.test.ConcurrentCachingStoreTest;
|
||||
import org.alfresco.repo.content.caching.test.SlowContentStoreTest;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Suite;
|
||||
|
||||
/**
|
||||
* Test suite for all the CachingContentStore test classes.
|
||||
*
|
||||
* @author Matt Ward
|
||||
*/
|
||||
@RunWith(Suite.class)
|
||||
@Suite.SuiteClasses(
|
||||
{
|
||||
CachedContentCleanupJobTest.class,
|
||||
StandardQuotaStrategyMockTest.class,
|
||||
StandardQuotaStrategyTest.class,
|
||||
UnlimitedQuotaStrategyTest.class,
|
||||
ConcurrentCachingStoreTest.class,
|
||||
SlowContentStoreTest.class,
|
||||
// TODO: CachingContentStoreSpringTest doesn't seem to be like being run in a suite,
|
||||
// will fix later but please run separately for now.
|
||||
//CachingContentStoreSpringTest.class,
|
||||
CachingContentStoreTest.class,
|
||||
ContentCacheImplTest.class,
|
||||
FullTest.class
|
||||
})
|
||||
public class CachingContentStoreTestSuite
|
||||
{
|
||||
|
||||
}
|
@@ -18,6 +18,8 @@
|
||||
*/
|
||||
package org.alfresco.repo.content.caching;
|
||||
|
||||
import java.io.File;
|
||||
|
||||
import org.alfresco.service.cmr.repository.ContentReader;
|
||||
import org.alfresco.service.cmr.repository.ContentWriter;
|
||||
|
||||
@@ -30,6 +32,14 @@ import org.alfresco.service.cmr.repository.ContentWriter;
|
||||
*/
|
||||
public interface ContentCache
|
||||
{
|
||||
/**
|
||||
* Returns the location where cache files will be written (cacheRoot) - implementation
|
||||
* dependant and may be null.
|
||||
*
|
||||
* @return cacheRoot
|
||||
*/
|
||||
public File getCacheRoot();
|
||||
|
||||
/**
|
||||
* Check to see if the content - specified by URL - exists in the cache.
|
||||
* <p>
|
||||
@@ -66,12 +76,21 @@ public interface ContentCache
|
||||
|
||||
/**
|
||||
* Remove a cached item from the in-memory lookup table. Implementation should not remove
|
||||
* the actual cached content (file) - this should be left to the clean-up process.
|
||||
* the actual cached content (file) - this should be left to the clean-up process or can
|
||||
* be deleted with {@link #deleteFile(String)}.
|
||||
*
|
||||
* @param contentUrl
|
||||
*/
|
||||
void remove(String contentUrl);
|
||||
|
||||
/**
|
||||
* Deletes the cached content file for the specified URL. To remove the item from the
|
||||
* lookup table also, use {@link #remove(String)} after calling this method.
|
||||
*
|
||||
* @param url
|
||||
*/
|
||||
void deleteFile(String url);
|
||||
|
||||
/**
|
||||
* Retrieve a ContentWriter to write content to a cache file. Upon closing the stream
|
||||
* a listener will add the new content file to the in-memory lookup table.
|
||||
|
@@ -19,8 +19,12 @@
|
||||
package org.alfresco.repo.content.caching;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Calendar;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.GregorianCalendar;
|
||||
import java.util.List;
|
||||
|
||||
import org.alfresco.repo.cache.SimpleCache;
|
||||
import org.alfresco.repo.content.filestore.FileContentReader;
|
||||
@@ -169,6 +173,21 @@ public class ContentCacheImpl implements ContentCache
|
||||
memoryStore.remove(Key.forCacheFile(path));
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove all items from the lookup table. Cached content files are not removed.
|
||||
*/
|
||||
public void removeAll()
|
||||
{
|
||||
memoryStore.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteFile(String url)
|
||||
{
|
||||
File cacheFile = new File(getCacheFilePath(url));
|
||||
cacheFile.delete();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ContentWriter getWriter(final String url)
|
||||
{
|
||||
@@ -215,6 +234,7 @@ public class ContentCacheImpl implements ContentCache
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Configure ContentCache with a memory store - an EhCacheAdapter.
|
||||
*
|
||||
@@ -232,6 +252,14 @@ public class ContentCacheImpl implements ContentCache
|
||||
*/
|
||||
public void setCacheRoot(File cacheRoot)
|
||||
{
|
||||
if (cacheRoot == null)
|
||||
{
|
||||
throw new IllegalArgumentException("cacheRoot cannot be null.");
|
||||
}
|
||||
if (!cacheRoot.exists())
|
||||
{
|
||||
cacheRoot.mkdirs();
|
||||
}
|
||||
this.cacheRoot = cacheRoot;
|
||||
}
|
||||
|
||||
@@ -240,21 +268,15 @@ public class ContentCacheImpl implements ContentCache
|
||||
*
|
||||
* @return cacheRoot
|
||||
*/
|
||||
@Override
|
||||
public File getCacheRoot()
|
||||
{
|
||||
return this.cacheRoot;
|
||||
}
|
||||
|
||||
// Not part of the ContentCache interface as this breaks encapsulation.
|
||||
// Handy method for tests though, since it allows us to find out where
|
||||
// the content was cached.
|
||||
protected String cacheFileLocation(String url)
|
||||
{
|
||||
return memoryStore.get(Key.forUrl(url));
|
||||
}
|
||||
|
||||
/**
|
||||
* @param cachedContentCleaner
|
||||
* Ask the ContentCacheImpl to visit all the content files in the cache.
|
||||
* @param handler
|
||||
*/
|
||||
public void processFiles(FileHandler handler)
|
||||
{
|
||||
@@ -272,7 +294,8 @@ public class ContentCacheImpl implements ContentCache
|
||||
{
|
||||
if (dir.isDirectory())
|
||||
{
|
||||
File[] files = dir.listFiles();
|
||||
File[] files = sortFiles(dir);
|
||||
|
||||
for (File file : files)
|
||||
{
|
||||
if (file.isDirectory())
|
||||
@@ -290,4 +313,77 @@ public class ContentCacheImpl implements ContentCache
|
||||
throw new IllegalArgumentException("handleDir() called with non-directory: " + dir.getAbsolutePath());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sort files ready for a FileHandler to visit them. This sorts them based on the structure
|
||||
* created by the {@link #createNewCacheFilePath()} method. Knowing that the directories are all
|
||||
* numeric date/time components, if they are sorted in ascending order then the oldest
|
||||
* directories will be visited first.
|
||||
* <p>
|
||||
* The returned array contains the (numerically sorted) directories first followed by the (unsorted) plain files.
|
||||
*
|
||||
* @param dir
|
||||
* @return
|
||||
*/
|
||||
private File[] sortFiles(File dir)
|
||||
{
|
||||
List<File> dirs = new ArrayList<File>();
|
||||
List<File> files = new ArrayList<File>();
|
||||
|
||||
for (File item : dir.listFiles())
|
||||
{
|
||||
if (item.isDirectory())
|
||||
{
|
||||
dirs.add(item);
|
||||
}
|
||||
else
|
||||
{
|
||||
files.add(item);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort directories as numbers - as for structure produced by ContentCacheImpl
|
||||
Collections.sort(dirs, new NumericFileNameComparator());
|
||||
|
||||
// Concatenation of elements in dirs followed by elements in files
|
||||
List<File> all = new ArrayList<File>();
|
||||
all.addAll(dirs);
|
||||
all.addAll(files);
|
||||
|
||||
return all.toArray(new File[]{});
|
||||
}
|
||||
|
||||
|
||||
|
||||
protected static class NumericFileNameComparator implements Comparator<File>
|
||||
{
|
||||
@Override
|
||||
public int compare(File o1, File o2)
|
||||
{
|
||||
Integer n1 = parse(o1.getName());
|
||||
Integer n2 = parse(o2.getName());
|
||||
return n1.compareTo(n2);
|
||||
}
|
||||
|
||||
/**
|
||||
* If unable to parse a String numerically then Integer.MAX_VALUE is returned. This
|
||||
* results in unexpected directories or files in the structure appearing after the
|
||||
* expected directories - so the files we know ought to be older will appear first
|
||||
* in a sorted collection.
|
||||
*
|
||||
* @param s String to parse
|
||||
* @return Numeric form of s
|
||||
*/
|
||||
private int parse(String s)
|
||||
{
|
||||
try
|
||||
{
|
||||
return Integer.parseInt(s);
|
||||
}
|
||||
catch(NumberFormatException e)
|
||||
{
|
||||
return Integer.MAX_VALUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -24,16 +24,20 @@ import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.alfresco.repo.cache.SimpleCache;
|
||||
import org.alfresco.repo.content.caching.ContentCacheImpl.NumericFileNameComparator;
|
||||
import org.alfresco.repo.content.filestore.FileContentReader;
|
||||
import org.alfresco.repo.content.filestore.FileContentWriter;
|
||||
import org.alfresco.service.cmr.repository.ContentReader;
|
||||
import org.alfresco.util.GUID;
|
||||
import org.alfresco.util.TempFileProvider;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.InOrder;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.runners.MockitoJUnitRunner;
|
||||
@@ -59,6 +63,26 @@ public class ContentCacheImplTest
|
||||
}
|
||||
|
||||
|
||||
@Test(expected=IllegalArgumentException.class)
|
||||
public void cannotSetNullCacheRoot()
|
||||
{
|
||||
contentCache.setCacheRoot(null);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void willCreateNonExistentCacheRoot()
|
||||
{
|
||||
File cacheRoot = new File(TempFileProvider.getTempDir(), GUID.generate());
|
||||
cacheRoot.deleteOnExit();
|
||||
assertFalse("Pre-condition of test is that cacheRoot does not exist", cacheRoot.exists());
|
||||
|
||||
contentCache.setCacheRoot(cacheRoot);
|
||||
|
||||
assertTrue("cacheRoot should have been created", cacheRoot.exists());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void canGetReaderForItemInCacheHavingLiveFile()
|
||||
{
|
||||
@@ -98,14 +122,6 @@ public class ContentCacheImplTest
|
||||
}
|
||||
|
||||
|
||||
private File tempfile()
|
||||
{
|
||||
File file = TempFileProvider.createTempFile("cached-content", ".bin");
|
||||
file.deleteOnExit();
|
||||
return file;
|
||||
}
|
||||
|
||||
|
||||
@Test(expected=CacheMissException.class)
|
||||
public void getReaderWhenItemNotInCache()
|
||||
{
|
||||
@@ -211,6 +227,17 @@ public class ContentCacheImplTest
|
||||
Mockito.verify(lookupTable).remove(Key.forCacheFile(path));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void deleteFile()
|
||||
{
|
||||
File cacheFile = tempfile();
|
||||
assertTrue("Temp file should have been written", cacheFile.exists());
|
||||
Mockito.when(contentCache.getCacheFilePath("url")).thenReturn(cacheFile.getAbsolutePath());
|
||||
|
||||
contentCache.deleteFile("url");
|
||||
|
||||
assertFalse("File should have been deleted", cacheFile.exists());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getWriter()
|
||||
@@ -225,4 +252,81 @@ public class ContentCacheImplTest
|
||||
Mockito.verify(lookupTable).put(Key.forUrl(url), writer.getFile().getAbsolutePath());
|
||||
Mockito.verify(lookupTable).put(Key.forCacheFile(writer.getFile().getAbsolutePath()), url);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void compareNumericFileNames()
|
||||
{
|
||||
NumericFileNameComparator comparator = new NumericFileNameComparator();
|
||||
assertEquals(-1, comparator.compare(new File("1"), new File("2")));
|
||||
assertEquals(0, comparator.compare(new File("2"), new File("2")));
|
||||
assertEquals(1, comparator.compare(new File("2"), new File("1")));
|
||||
|
||||
// Make sure that ordering is numeric and not by string value
|
||||
assertEquals(-1, comparator.compare(new File("3"), new File("20")));
|
||||
assertEquals(1, comparator.compare(new File("20"), new File("3")));
|
||||
|
||||
assertEquals(-1, comparator.compare(new File("3"), new File("non-numeric")));
|
||||
assertEquals(1, comparator.compare(new File("non-numeric"), new File("3")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canVisitOldestDirsFirst()
|
||||
{
|
||||
File cacheRoot = new File(TempFileProvider.getTempDir(), GUID.generate());
|
||||
cacheRoot.deleteOnExit();
|
||||
contentCache.setCacheRoot(cacheRoot);
|
||||
|
||||
File f1 = tempfile(createDirs("2000/3/30/17/45/31"), "files-are-unsorted.bin");
|
||||
File f2 = tempfile(createDirs("2000/3/4/17/45/31"), "another-file.bin");
|
||||
File f3 = tempfile(createDirs("2010/12/24/23/59/58"), "a-second-before.bin");
|
||||
File f4 = tempfile(createDirs("2010/12/24/23/59/59"), "last-one.bin");
|
||||
File f5 = tempfile(createDirs("2000/1/7/2/7/12"), "first-one.bin");
|
||||
|
||||
// Check that directories and files are visited in correct order
|
||||
FileHandler handler = Mockito.mock(FileHandler.class);
|
||||
contentCache.processFiles(handler);
|
||||
|
||||
InOrder inOrder = Mockito.inOrder(handler);
|
||||
inOrder.verify(handler).handle(f5);
|
||||
inOrder.verify(handler).handle(f2);
|
||||
inOrder.verify(handler).handle(f1);
|
||||
inOrder.verify(handler).handle(f3);
|
||||
inOrder.verify(handler).handle(f4);
|
||||
}
|
||||
|
||||
|
||||
|
||||
private File tempfile()
|
||||
{
|
||||
return tempfile("cached-content", ".bin");
|
||||
}
|
||||
|
||||
private File tempfile(String name, String suffix)
|
||||
{
|
||||
File file = TempFileProvider.createTempFile(name, suffix);
|
||||
file.deleteOnExit();
|
||||
return file;
|
||||
}
|
||||
|
||||
private File tempfile(File dir, String name)
|
||||
{
|
||||
File f = new File(dir, name);
|
||||
try
|
||||
{
|
||||
f.createNewFile();
|
||||
}
|
||||
catch (IOException error)
|
||||
{
|
||||
throw new RuntimeException(error);
|
||||
}
|
||||
f.deleteOnExit();
|
||||
return f;
|
||||
}
|
||||
|
||||
private File createDirs(String path)
|
||||
{
|
||||
File f = new File(contentCache.getCacheRoot(), path);
|
||||
f.mkdirs();
|
||||
return f;
|
||||
}
|
||||
}
|
||||
|
@@ -27,6 +27,7 @@ import org.alfresco.service.cmr.repository.ContentReader;
|
||||
import org.alfresco.service.cmr.repository.ContentWriter;
|
||||
import org.alfresco.util.ApplicationContextHelper;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
|
||||
@@ -37,15 +38,19 @@ import org.springframework.context.ApplicationContext;
|
||||
*/
|
||||
public class FullTest
|
||||
{
|
||||
private ApplicationContext ctx;
|
||||
private static ApplicationContext ctx;
|
||||
private CachingContentStore store;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass()
|
||||
{
|
||||
String conf = "classpath:cachingstore/test-context.xml";
|
||||
ctx = ApplicationContextHelper.getApplicationContext(new String[] { conf });
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp()
|
||||
{
|
||||
String conf = "classpath:cachingstore/test-context.xml";
|
||||
ctx = ApplicationContextHelper.getApplicationContext(new String[] { conf });
|
||||
|
||||
store = (CachingContentStore) ctx.getBean("cachingContentStore");
|
||||
store.setCacheOnInbound(true);
|
||||
}
|
||||
|
@@ -19,14 +19,20 @@
|
||||
package org.alfresco.repo.content.caching.cleanup;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Date;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import org.alfresco.repo.content.caching.CacheFileProps;
|
||||
import org.alfresco.repo.content.caching.ContentCacheImpl;
|
||||
import org.alfresco.repo.content.caching.FileHandler;
|
||||
import org.alfresco.repo.content.caching.quota.UsageTracker;
|
||||
import org.alfresco.util.Deleter;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.springframework.beans.factory.annotation.Required;
|
||||
import org.springframework.context.ApplicationEventPublisher;
|
||||
import org.springframework.context.ApplicationEventPublisherAware;
|
||||
|
||||
/**
|
||||
* Cleans up redundant cache files from the cached content file store. Once references to cache files are
|
||||
@@ -34,17 +40,113 @@ import org.springframework.beans.factory.annotation.Required;
|
||||
*
|
||||
* @author Matt Ward
|
||||
*/
|
||||
public class CachedContentCleaner implements FileHandler
|
||||
public class CachedContentCleaner implements FileHandler, ApplicationEventPublisherAware
|
||||
{
|
||||
private static final Log log = LogFactory.getLog(CachedContentCleaner.class);
|
||||
private ContentCacheImpl cache; // impl specific functionality required
|
||||
private long minFileAgeMillis = 0;
|
||||
private Integer maxDeleteWatchCount = 1;
|
||||
private ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
|
||||
private boolean running;
|
||||
private UsageTracker usageTracker;
|
||||
private long newDiskUsage;
|
||||
private long numFilesSeen;
|
||||
private long numFilesDeleted;
|
||||
private long sizeFilesDeleted;
|
||||
private long numFilesMarked;
|
||||
private Date timeStarted;
|
||||
private Date timeFinished;
|
||||
private ApplicationEventPublisher eventPublisher;
|
||||
private long targetReductionBytes;
|
||||
|
||||
/**
|
||||
* This method should be called after the cleaner has been fully constructed
|
||||
* to notify interested parties that the cleaner exists.
|
||||
*/
|
||||
public void init()
|
||||
{
|
||||
eventPublisher.publishEvent(new CachedContentCleanerCreatedEvent(this));
|
||||
}
|
||||
|
||||
public void execute()
|
||||
{
|
||||
cache.processFiles(this);
|
||||
execute("none specified");
|
||||
}
|
||||
|
||||
public void executeAggressive(String reason, long targetReductionBytes)
|
||||
{
|
||||
this.targetReductionBytes = targetReductionBytes;
|
||||
execute(reason);
|
||||
this.targetReductionBytes = 0;
|
||||
}
|
||||
|
||||
public void execute(String reason)
|
||||
{
|
||||
lock.readLock().lock();
|
||||
try
|
||||
{
|
||||
if (running)
|
||||
{
|
||||
// Do nothing - we only want one cleaner running at a time.
|
||||
return;
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
lock.writeLock().lock();
|
||||
try
|
||||
{
|
||||
if (!running)
|
||||
{
|
||||
if (log.isInfoEnabled())
|
||||
{
|
||||
log.info("Starting cleaner, reason: " + reason);
|
||||
}
|
||||
running = true;
|
||||
resetStats();
|
||||
timeStarted = new Date();
|
||||
cache.processFiles(this);
|
||||
timeFinished = new Date();
|
||||
|
||||
if (usageTracker != null)
|
||||
{
|
||||
usageTracker.setCurrentUsageBytes(newDiskUsage);
|
||||
}
|
||||
|
||||
running = false;
|
||||
if (log.isInfoEnabled())
|
||||
{
|
||||
log.info("Finished, duration: " + getDurationSeconds() + "s, seen: " + numFilesSeen +
|
||||
", marked: " + numFilesMarked +
|
||||
", deleted: " + numFilesDeleted +
|
||||
" (" + String.format("%.2f", getSizeFilesDeletedMB()) + "MB, " +
|
||||
sizeFilesDeleted + " bytes)" +
|
||||
", target: " + targetReductionBytes + " bytes");
|
||||
}
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
private void resetStats()
|
||||
{
|
||||
newDiskUsage = 0;
|
||||
numFilesSeen = 0;
|
||||
numFilesDeleted = 0;
|
||||
sizeFilesDeleted = 0;
|
||||
numFilesMarked = 0;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void handle(File cachedContentFile)
|
||||
{
|
||||
@@ -52,37 +154,84 @@ public class CachedContentCleaner implements FileHandler
|
||||
{
|
||||
log.debug("handle file: " + cachedContentFile);
|
||||
}
|
||||
numFilesSeen++;
|
||||
CacheFileProps props = null;
|
||||
boolean deleted = false;
|
||||
|
||||
CacheFileProps props = null; // don't load unless required
|
||||
if (targetReductionBytes > 0 && sizeFilesDeleted < targetReductionBytes)
|
||||
{
|
||||
// Aggressive clean mode, delete file straight away.
|
||||
deleted = deleteFilesNow(cachedContentFile);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (oldEnoughForCleanup(cachedContentFile))
|
||||
{
|
||||
if (log.isDebugEnabled())
|
||||
{
|
||||
log.debug("File is older than " + minFileAgeMillis +
|
||||
"ms - considering for cleanup: " + cachedContentFile);
|
||||
}
|
||||
props = new CacheFileProps(cachedContentFile);
|
||||
String url = cache.getContentUrl(cachedContentFile);
|
||||
if (url == null)
|
||||
{
|
||||
// Not in the cache, check the properties file
|
||||
props = new CacheFileProps(cachedContentFile);
|
||||
props.load();
|
||||
url = props.getContentUrl();
|
||||
}
|
||||
|
||||
if (url != null && !cache.contains(url))
|
||||
if (url == null || !cache.contains(url))
|
||||
{
|
||||
// If the url is null, it might still be in the cache, but we were unable to determine it
|
||||
// from the reverse lookup or the properties file. Delete the file as it is most likely orphaned.
|
||||
// If for some reason it is still in the cache, cache.getReader(url) must re-cache it.
|
||||
deleted = markOrDelete(cachedContentFile, props);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (log.isDebugEnabled())
|
||||
{
|
||||
log.debug("File too young for cleanup - ignoring " + cachedContentFile);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!deleted)
|
||||
{
|
||||
if (props == null)
|
||||
{
|
||||
props = new CacheFileProps(cachedContentFile);
|
||||
props.load();
|
||||
}
|
||||
markOrDelete(cachedContentFile, props);
|
||||
}
|
||||
else if (url == null)
|
||||
{
|
||||
// It might still be in the cache, but we were unable to determine it from the reverse lookup
|
||||
// or the properties file. Delete the file as it is most likely orphaned. If for some reason it is
|
||||
// still in the cache, cache.getReader(url) must re-cache it.
|
||||
markOrDelete(cachedContentFile, props);
|
||||
long size = cachedContentFile.length() + props.fileSize();
|
||||
newDiskUsage += size;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Is the file old enough to be considered for cleanup/deletion? The file must be older than minFileAgeMillis
|
||||
* to be considered for deletion - the state of the cache and the file's associated properties file will not
|
||||
* be examined unless the file is old enough.
|
||||
*
|
||||
* @return true if the file is older than minFileAgeMillis, false otherwise.
|
||||
*/
|
||||
private boolean oldEnoughForCleanup(File file)
|
||||
{
|
||||
if (minFileAgeMillis == 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
long now = System.currentTimeMillis();
|
||||
return (file.lastModified() < (now - minFileAgeMillis));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Marks a file for deletion by a future run of the CachedContentCleaner. Each time a file is observed
|
||||
* by the cleaner as being ready for deletion, the deleteWatchCount is incremented until it reaches
|
||||
@@ -99,8 +248,9 @@ public class CachedContentCleaner implements FileHandler
|
||||
*
|
||||
* @param file
|
||||
* @param props
|
||||
* @return true if the content file was deleted, false otherwise.
|
||||
*/
|
||||
private void markOrDelete(File file, CacheFileProps props)
|
||||
private boolean markOrDelete(File file, CacheFileProps props)
|
||||
{
|
||||
Integer deleteWatchCount = props.getDeleteWatchCount();
|
||||
|
||||
@@ -108,16 +258,30 @@ public class CachedContentCleaner implements FileHandler
|
||||
if (deleteWatchCount < 0)
|
||||
deleteWatchCount = 0;
|
||||
|
||||
boolean deleted = false;
|
||||
|
||||
if (deleteWatchCount < maxDeleteWatchCount)
|
||||
{
|
||||
deleteWatchCount++;
|
||||
|
||||
if (log.isDebugEnabled())
|
||||
{
|
||||
log.debug("Marking file for deletion, deleteWatchCount=" + deleteWatchCount + ", file: "+ file);
|
||||
}
|
||||
props.setDeleteWatchCount(deleteWatchCount);
|
||||
props.store();
|
||||
numFilesMarked++;
|
||||
}
|
||||
else
|
||||
{
|
||||
deleteFilesNow(file);
|
||||
if (log.isDebugEnabled())
|
||||
{
|
||||
log.debug("Deleting cache file " + file);
|
||||
}
|
||||
deleted = deleteFilesNow(file);
|
||||
}
|
||||
|
||||
return deleted;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -125,15 +289,24 @@ public class CachedContentCleaner implements FileHandler
|
||||
* original content URL and deletion marker information.
|
||||
*
|
||||
* @param cacheFile Location of cached content file.
|
||||
* @return true if the content file was deleted, false otherwise.
|
||||
*/
|
||||
private void deleteFilesNow(File cacheFile)
|
||||
private boolean deleteFilesNow(File cacheFile)
|
||||
{
|
||||
CacheFileProps props = new CacheFileProps(cacheFile);
|
||||
props.delete();
|
||||
cacheFile.delete();
|
||||
long fileSize = cacheFile.length();
|
||||
boolean deleted = cacheFile.delete();
|
||||
if (deleted)
|
||||
{
|
||||
numFilesDeleted++;
|
||||
sizeFilesDeleted += fileSize;
|
||||
Deleter.deleteEmptyParents(cacheFile, cache.getCacheRoot());
|
||||
}
|
||||
|
||||
return deleted;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Required
|
||||
@@ -142,6 +315,24 @@ public class CachedContentCleaner implements FileHandler
|
||||
this.cache = cache;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Sets the minimum age of a cache file before it will be considered for deletion.
|
||||
* @see #oldEnoughForCleanup(File)
|
||||
* @param minFileAgeMillis
|
||||
*/
|
||||
public void setMinFileAgeMillis(long minFileAgeMillis)
|
||||
{
|
||||
this.minFileAgeMillis = minFileAgeMillis;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Sets the maxDeleteWatchCount value.
|
||||
*
|
||||
* @see #markOrDelete(File, CacheFileProps)
|
||||
* @param maxDeleteWatchCount
|
||||
*/
|
||||
public void setMaxDeleteWatchCount(Integer maxDeleteWatchCount)
|
||||
{
|
||||
if (maxDeleteWatchCount < 0)
|
||||
@@ -150,4 +341,86 @@ public class CachedContentCleaner implements FileHandler
|
||||
}
|
||||
this.maxDeleteWatchCount = maxDeleteWatchCount;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @param usageTracker the usageTracker to set
|
||||
*/
|
||||
public void setUsageTracker(UsageTracker usageTracker)
|
||||
{
|
||||
this.usageTracker = usageTracker;
|
||||
}
|
||||
|
||||
public boolean isRunning()
|
||||
{
|
||||
lock.readLock().lock();
|
||||
try
|
||||
{
|
||||
return running;
|
||||
}
|
||||
finally
|
||||
{
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public long getNumFilesSeen()
|
||||
{
|
||||
return this.numFilesSeen;
|
||||
}
|
||||
|
||||
public long getNumFilesDeleted()
|
||||
{
|
||||
return this.numFilesDeleted;
|
||||
}
|
||||
|
||||
public long getSizeFilesDeleted()
|
||||
{
|
||||
return this.sizeFilesDeleted;
|
||||
}
|
||||
|
||||
public double getSizeFilesDeletedMB()
|
||||
{
|
||||
return (double) getSizeFilesDeleted() / FileUtils.ONE_MB;
|
||||
}
|
||||
|
||||
public long getNumFilesMarked()
|
||||
{
|
||||
return numFilesMarked;
|
||||
}
|
||||
|
||||
public Date getTimeStarted()
|
||||
{
|
||||
return this.timeStarted;
|
||||
}
|
||||
|
||||
public Date getTimeFinished()
|
||||
{
|
||||
return this.timeFinished;
|
||||
}
|
||||
|
||||
public long getDurationSeconds()
|
||||
{
|
||||
return getDurationMillis() / 1000;
|
||||
}
|
||||
|
||||
public long getDurationMillis()
|
||||
{
|
||||
return timeFinished.getTime() - timeStarted.getTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setApplicationEventPublisher(ApplicationEventPublisher eventPublisher)
|
||||
{
|
||||
this.eventPublisher = eventPublisher;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the cacheRoot that this cleaner is responsible for.
|
||||
* @return File
|
||||
*/
|
||||
public File getCacheRoot()
|
||||
{
|
||||
return cache.getCacheRoot();
|
||||
}
|
||||
}
|
||||
|
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (C) 2005-2011 Alfresco Software Limited.
|
||||
*
|
||||
* This file is part of Alfresco
|
||||
*
|
||||
* Alfresco is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Alfresco is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package org.alfresco.repo.content.caching.cleanup;
|
||||
|
||||
import org.alfresco.repo.content.caching.CachingContentStoreEvent;
|
||||
|
||||
/**
|
||||
* Event fired when CachedContentCleaner instances are created.
|
||||
*
|
||||
* @author Matt Ward
|
||||
*/
|
||||
public class CachedContentCleanerCreatedEvent extends CachingContentStoreEvent
|
||||
{
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
/**
|
||||
* @param source
|
||||
*/
|
||||
public CachedContentCleanerCreatedEvent(CachedContentCleaner cleaner)
|
||||
{
|
||||
super(cleaner);
|
||||
}
|
||||
|
||||
public CachedContentCleaner getCleaner()
|
||||
{
|
||||
return (CachedContentCleaner) source;
|
||||
}
|
||||
}
|
@@ -37,7 +37,7 @@ public class CachedContentCleanupJob implements Job
|
||||
{
|
||||
JobDataMap jobData = context.getJobDetail().getJobDataMap();
|
||||
CachedContentCleaner cachedContentCleaner = cachedContentCleaner(jobData);
|
||||
cachedContentCleaner.execute();
|
||||
cachedContentCleaner.execute("scheduled");
|
||||
}
|
||||
|
||||
|
||||
|
@@ -25,6 +25,7 @@ import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
|
||||
import org.alfresco.repo.content.caching.CacheFileProps;
|
||||
@@ -33,7 +34,11 @@ import org.alfresco.repo.content.caching.ContentCacheImpl;
|
||||
import org.alfresco.repo.content.caching.Key;
|
||||
import org.alfresco.service.cmr.repository.ContentReader;
|
||||
import org.alfresco.util.ApplicationContextHelper;
|
||||
import org.alfresco.util.GUID;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
|
||||
@@ -45,25 +50,35 @@ import org.springframework.context.ApplicationContext;
|
||||
public class CachedContentCleanupJobTest
|
||||
{
|
||||
private enum UrlSource { PROPS_FILE, REVERSE_CACHE_LOOKUP, NOT_PRESENT };
|
||||
private ApplicationContext ctx;
|
||||
private static ApplicationContext ctx;
|
||||
private CachingContentStore cachingStore;
|
||||
private ContentCacheImpl cache;
|
||||
private File cacheRoot;
|
||||
private CachedContentCleaner cleaner;
|
||||
|
||||
@Before
|
||||
public void setUp()
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass()
|
||||
{
|
||||
String conf = "classpath:cachingstore/test-context.xml";
|
||||
String cleanerConf = "classpath:cachingstore/test-cleaner-context.xml";
|
||||
ctx = ApplicationContextHelper.getApplicationContext(new String[] { conf, cleanerConf });
|
||||
}
|
||||
|
||||
|
||||
@Before
|
||||
public void setUp() throws IOException
|
||||
{
|
||||
cachingStore = (CachingContentStore) ctx.getBean("cachingContentStore");
|
||||
|
||||
cache = (ContentCacheImpl) ctx.getBean("contentCache");
|
||||
cacheRoot = cache.getCacheRoot();
|
||||
|
||||
cleaner = (CachedContentCleaner) ctx.getBean("cachedContentCleaner");
|
||||
cleaner.setMinFileAgeMillis(0);
|
||||
cleaner.setMaxDeleteWatchCount(0);
|
||||
|
||||
// Clear the cache from disk and memory
|
||||
cache.removeAll();
|
||||
FileUtils.cleanDirectory(cacheRoot);
|
||||
}
|
||||
|
||||
|
||||
@@ -72,7 +87,8 @@ public class CachedContentCleanupJobTest
|
||||
{
|
||||
cleaner.setMaxDeleteWatchCount(0);
|
||||
int numFiles = 300; // Must be a multiple of number of UrlSource types being tested
|
||||
File[] files = new File[300];
|
||||
long totalSize = 0; // what is the total size of the sample files?
|
||||
File[] files = new File[numFiles];
|
||||
for (int i = 0; i < numFiles; i++)
|
||||
{
|
||||
// Testing with a number of files. The cached file cleaner will be able to determine the 'original'
|
||||
@@ -80,8 +96,9 @@ public class CachedContentCleanupJobTest
|
||||
// a 'reverse lookup' in the cache (i.e. cache.contains(Key.forCacheFile(...))), or there will be no
|
||||
// URL determinable for the file.
|
||||
UrlSource urlSource = UrlSource.values()[i % UrlSource.values().length];
|
||||
File cacheFile = createCacheFile(urlSource, i);
|
||||
File cacheFile = createCacheFile(urlSource, i, false);
|
||||
files[i] = cacheFile;
|
||||
totalSize += cacheFile.length();
|
||||
}
|
||||
|
||||
// Run cleaner
|
||||
@@ -92,6 +109,165 @@ public class CachedContentCleanupJobTest
|
||||
{
|
||||
assertFalse("File should have been deleted: " + file, file.exists());
|
||||
}
|
||||
|
||||
assertEquals("Incorrect number of deleted files", numFiles, cleaner.getNumFilesDeleted());
|
||||
assertEquals("Incorrect total size of files deleted", totalSize, cleaner.getSizeFilesDeleted());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void filesNewerThanMinFileAgeMillisAreNotDeleted() throws InterruptedException
|
||||
{
|
||||
final long minFileAge = 1000;
|
||||
cleaner.setMinFileAgeMillis(minFileAge);
|
||||
cleaner.setMaxDeleteWatchCount(0);
|
||||
int numFiles = 10;
|
||||
|
||||
File[] oldFiles = new File[numFiles];
|
||||
for (int i = 0; i < numFiles; i++)
|
||||
{
|
||||
oldFiles[i] = createCacheFile(UrlSource.REVERSE_CACHE_LOOKUP, i, false);
|
||||
}
|
||||
|
||||
// Sleep to make sure 'old' files really are older than minFileAgeMillis
|
||||
Thread.sleep(minFileAge);
|
||||
|
||||
File[] newFiles = new File[numFiles];
|
||||
long newFilesTotalSize = 0;
|
||||
for (int i = 0; i < numFiles; i++)
|
||||
{
|
||||
newFiles[i] = createCacheFile(UrlSource.REVERSE_CACHE_LOOKUP, i, false);
|
||||
newFilesTotalSize += newFiles[i].length();
|
||||
}
|
||||
|
||||
|
||||
// The cleaner must finish before any of the newFiles are older than minFileAge. If the files are too
|
||||
// old the test will fail and it will be necessary to rethink how to test this.
|
||||
cleaner.execute();
|
||||
|
||||
// check all 'old' files deleted
|
||||
for (File file : oldFiles)
|
||||
{
|
||||
assertFalse("File should have been deleted: " + file, file.exists());
|
||||
}
|
||||
// check all 'new' files still present
|
||||
for (File file : newFiles)
|
||||
{
|
||||
assertTrue("File should not have been deleted: " + file, file.exists());
|
||||
}
|
||||
|
||||
assertEquals("Incorrect number of deleted files", newFiles.length, cleaner.getNumFilesDeleted());
|
||||
assertEquals("Incorrect total size of files deleted", newFilesTotalSize, cleaner.getSizeFilesDeleted());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void aggressiveCleanReclaimsTargetSpace() throws InterruptedException
|
||||
{
|
||||
int numFiles = 30;
|
||||
File[] files = new File[numFiles];
|
||||
for (int i = 0; i < numFiles; i++)
|
||||
{
|
||||
// Make sure it's in the cache - all the files will be in the cache, so the
|
||||
// cleaner won't clean any up once it has finished aggressively reclaiming space.
|
||||
files[i] = createCacheFile(UrlSource.REVERSE_CACHE_LOOKUP, i, true);
|
||||
}
|
||||
|
||||
// How much space to reclaim - seven files worth (all files are same size)
|
||||
long fileSize = files[0].length();
|
||||
long sevenFilesSize = 7 * fileSize;
|
||||
|
||||
// We'll get it to clean seven files worth aggressively and then it will continue non-aggressively.
|
||||
// It will delete the older files aggressively (i.e. the ones prior to the two second sleep) and
|
||||
// then will examine the new files for potential deletion.
|
||||
// Since some of the newer files are not in the cache, it will delete those.
|
||||
cleaner.executeAggressive("aggressiveCleanReclaimsTargetSpace()", sevenFilesSize);
|
||||
|
||||
int numDeleted = 0;
|
||||
|
||||
for (File f : files)
|
||||
{
|
||||
if (!f.exists())
|
||||
{
|
||||
numDeleted++;
|
||||
}
|
||||
}
|
||||
// How many were definitely deleted?
|
||||
assertEquals("Wrong number of files deleted", 7 , numDeleted);
|
||||
|
||||
// The cleaner should have recorded the correct number of deletions
|
||||
assertEquals("Incorrect number of deleted files", 7, cleaner.getNumFilesDeleted());
|
||||
assertEquals("Incorrect total size of files deleted", sevenFilesSize, cleaner.getSizeFilesDeleted());
|
||||
}
|
||||
|
||||
@Ignore()
|
||||
@Test
|
||||
public void standardCleanAfterAggressiveFinished() throws InterruptedException
|
||||
{
|
||||
int numFiles = 30;
|
||||
int newerFilesIndex = 14;
|
||||
File[] files = new File[numFiles];
|
||||
|
||||
for (int i = 0; i < numFiles; i++)
|
||||
{
|
||||
if (i == newerFilesIndex)
|
||||
{
|
||||
// Files after this sleep will definitely be in 'newer' directories.
|
||||
Thread.sleep(2000);
|
||||
}
|
||||
|
||||
if (i >= 21 && i <= 24)
|
||||
{
|
||||
// 21 to 24 will be deleted after the aggressive deletions (once the cleaner has returned
|
||||
// to normal cleaning), because they are not in the cache.
|
||||
files[i] = createCacheFile(UrlSource.NOT_PRESENT, i, false);
|
||||
}
|
||||
else
|
||||
{
|
||||
// All other files will be in the cache
|
||||
files[i] = createCacheFile(UrlSource.REVERSE_CACHE_LOOKUP, i, true);
|
||||
}
|
||||
}
|
||||
|
||||
// How much space to reclaim - seven files worth (all files are same size)
|
||||
long fileSize = files[0].length();
|
||||
long sevenFilesSize = 7 * fileSize;
|
||||
|
||||
// We'll get it to clean seven files worth aggressively and then it will continue non-aggressively.
|
||||
// It will delete the older files aggressively (i.e. the ones prior to the two second sleep) and
|
||||
// then will examine the new files for potential deletion.
|
||||
// Since some of the newer files are not in the cache, it will delete those.
|
||||
cleaner.executeAggressive("standardCleanAfterAggressiveFinished()", sevenFilesSize);
|
||||
|
||||
for (int i = 0; i < numFiles; i++)
|
||||
{
|
||||
File f = files[i];
|
||||
String newerOrOlder = ((i >= newerFilesIndex) ? "newer" : "older");
|
||||
System.out.println("files[" + i + "] = " + newerOrOlder + " file, exists=" + f.exists());
|
||||
}
|
||||
|
||||
int numOlderFilesDeleted = 0;
|
||||
for (int i = 0; i < newerFilesIndex; i++)
|
||||
{
|
||||
if (!files[i].exists())
|
||||
{
|
||||
numOlderFilesDeleted++;
|
||||
}
|
||||
}
|
||||
assertEquals("Wrong number of older files deleted", 7, numOlderFilesDeleted);
|
||||
|
||||
int numNewerFilesDeleted = 0;
|
||||
for (int i = newerFilesIndex; i < numFiles; i++)
|
||||
{
|
||||
if (!files[i].exists())
|
||||
{
|
||||
numNewerFilesDeleted++;
|
||||
}
|
||||
}
|
||||
assertEquals("Wrong number of newer files deleted", 4, numNewerFilesDeleted);
|
||||
|
||||
// The cleaner should have recorded the correct number of deletions
|
||||
assertEquals("Incorrect number of deleted files", 11, cleaner.getNumFilesDeleted());
|
||||
assertEquals("Incorrect total size of files deleted", (11*fileSize), cleaner.getSizeFilesDeleted());
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -116,14 +292,14 @@ public class CachedContentCleanupJobTest
|
||||
// A non-advisable setting but useful for testing, maxDeleteWatchCount of zero
|
||||
// which should result in immediate deletion upon discovery of content no longer in the cache.
|
||||
cleaner.setMaxDeleteWatchCount(0);
|
||||
File file = createCacheFile(UrlSource.NOT_PRESENT, 0);
|
||||
File file = createCacheFile(UrlSource.NOT_PRESENT, 0, false);
|
||||
|
||||
cleaner.handle(file);
|
||||
checkFilesDeleted(file);
|
||||
|
||||
// Anticipated to be the most common setting: maxDeleteWatchCount of 1.
|
||||
cleaner.setMaxDeleteWatchCount(1);
|
||||
file = createCacheFile(UrlSource.NOT_PRESENT, 0);
|
||||
file = createCacheFile(UrlSource.NOT_PRESENT, 0, false);
|
||||
|
||||
cleaner.handle(file);
|
||||
checkWatchCountForCacheFile(file, 1);
|
||||
@@ -133,7 +309,7 @@ public class CachedContentCleanupJobTest
|
||||
|
||||
// Check that some other arbitrary figure for maxDeleteWatchCount works correctly.
|
||||
cleaner.setMaxDeleteWatchCount(3);
|
||||
file = createCacheFile(UrlSource.NOT_PRESENT, 0);
|
||||
file = createCacheFile(UrlSource.NOT_PRESENT, 0, false);
|
||||
|
||||
cleaner.handle(file);
|
||||
checkWatchCountForCacheFile(file, 1);
|
||||
@@ -173,10 +349,11 @@ public class CachedContentCleanupJobTest
|
||||
|
||||
// The SlowContentStore will always give out content when asked,
|
||||
// so asking for any content will cause something to be cached.
|
||||
String url = makeContentUrl();
|
||||
int numFiles = 50;
|
||||
for (int i = 0; i < numFiles; i++)
|
||||
{
|
||||
ContentReader reader = cachingStore.getReader(String.format("store://caching/store/url-%03d.bin", i));
|
||||
ContentReader reader = cachingStore.getReader(url);
|
||||
reader.getContentString();
|
||||
}
|
||||
|
||||
@@ -184,18 +361,23 @@ public class CachedContentCleanupJobTest
|
||||
|
||||
for (int i = 0; i < numFiles; i++)
|
||||
{
|
||||
File cacheFile = new File(cache.getCacheFilePath(String.format("store://caching/store/url-%03d.bin", i)));
|
||||
File cacheFile = new File(cache.getCacheFilePath(url));
|
||||
assertTrue("File should exist", cacheFile.exists());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private File createCacheFile(UrlSource urlSource, int fileNum)
|
||||
private File createCacheFile(UrlSource urlSource, int fileNum, boolean putInCache)
|
||||
{
|
||||
File file = new File(cacheRoot, ContentCacheImpl.createNewCacheFilePath());
|
||||
file.getParentFile().mkdirs();
|
||||
writeSampleContent(file);
|
||||
String contentUrl = String.format("protocol://some/made/up/url-%03d.bin", fileNum);
|
||||
String contentUrl = makeContentUrl();
|
||||
|
||||
if (putInCache)
|
||||
{
|
||||
cache.putIntoLookup(Key.forUrl(contentUrl), file.getAbsolutePath());
|
||||
}
|
||||
|
||||
switch(urlSource)
|
||||
{
|
||||
@@ -217,12 +399,19 @@ public class CachedContentCleanupJobTest
|
||||
}
|
||||
|
||||
|
||||
private String makeContentUrl()
|
||||
{
|
||||
return "protocol://some/made/up/url/" + GUID.generate();
|
||||
}
|
||||
|
||||
|
||||
private void writeSampleContent(File file)
|
||||
{
|
||||
try
|
||||
{
|
||||
PrintWriter writer = new PrintWriter(file);
|
||||
writer.println("Content for sample file in " + getClass().getName());
|
||||
writer.close();
|
||||
}
|
||||
catch (Throwable e)
|
||||
{
|
||||
|
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (C) 2005-2011 Alfresco Software Limited.
|
||||
*
|
||||
* This file is part of Alfresco
|
||||
*
|
||||
* Alfresco is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Alfresco is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package org.alfresco.repo.content.caching.quota;
|
||||
|
||||
|
||||
/**
|
||||
* Disk quota managers for the CachingContentStore must implement this interface.
|
||||
*
|
||||
* @author Matt Ward
|
||||
*/
|
||||
public interface QuotaManagerStrategy
|
||||
{
|
||||
/**
|
||||
* Called immediately before writing a cache file or (when cacheOnInBound is set to true
|
||||
* for the CachingContentStore) before handing a ContentWriter to a content producer.
|
||||
* <p>
|
||||
* In the latter case, the contentSize will be unknown (0), since the content
|
||||
* length hasn't been established yet.
|
||||
*
|
||||
* @param contentSize The size of the content that will be written or 0 if not known.
|
||||
* @return true to allow the cache file to be written, false to veto.
|
||||
*/
|
||||
boolean beforeWritingCacheFile(long contentSize);
|
||||
|
||||
|
||||
/**
|
||||
* Called immediately after writing a cache file - specifying the size of the file that was written.
|
||||
* The return value allows implementations control over whether the new cache file is kept (true) or
|
||||
* immediately removed (false).
|
||||
*
|
||||
* @param contentSize The size of the content that was written.
|
||||
* @return true to allow the cache file to remain, false to immediately delete.
|
||||
*/
|
||||
boolean afterWritingCacheFile(long contentSize);
|
||||
}
|
@@ -0,0 +1,386 @@
|
||||
/*
|
||||
* Copyright (C) 2005-2011 Alfresco Software Limited.
|
||||
*
|
||||
* This file is part of Alfresco
|
||||
*
|
||||
* Alfresco is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Alfresco is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package org.alfresco.repo.content.caching.quota;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.ObjectInputStream;
|
||||
import java.io.ObjectOutputStream;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.alfresco.repo.content.caching.ContentCacheImpl;
|
||||
import org.alfresco.repo.content.caching.cleanup.CachedContentCleaner;
|
||||
import org.alfresco.util.PropertyCheck;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.springframework.beans.factory.annotation.Required;
|
||||
|
||||
/**
|
||||
* Quota manager for the CachingContentStore that has the following characteristics:
|
||||
* <p>
|
||||
* When a cache file has been written that results in cleanThresholdPct (default 80%) of maxUsageBytes
|
||||
* being exceeded then the cached content cleaner is invoked (if not already running) in a new thread.
|
||||
* <p>
|
||||
* When the CachingContentStore is about to write a cache file but the disk usage is in excess of panicThresholdPct
|
||||
* (default 90%) then the cache file is not written and the cleaner is started (if not already running) in a new thread.
|
||||
* <p>
|
||||
* This quota manager works in conjunction with the cleaner to update disk usage levels in memory. When the quota
|
||||
* manager shuts down the current disk usage is saved to disk in {ContentCacheImpl.cacheRoot}/cache-usage.ser
|
||||
* <p>
|
||||
* Upon startup, if the cache-usage.ser file exists then the current usage is seeded with that value and the cleaner
|
||||
* is invoked in a new thread so that the value can be updated more accurately (perhaps some files were deleted
|
||||
* manually after shutdown for example).
|
||||
*
|
||||
* @author Matt Ward
|
||||
*/
|
||||
public class StandardQuotaStrategy implements QuotaManagerStrategy, UsageTracker
|
||||
{
|
||||
private static final String CACHE_USAGE_FILENAME = "cache-usage.ser";
|
||||
private final static Log log = LogFactory.getLog(StandardQuotaStrategy.class);
|
||||
private static final long DEFAULT_DISK_USAGE_ESTIMATE = 0L;
|
||||
private int panicThresholdPct = 90;
|
||||
private int cleanThresholdPct = 80;
|
||||
private int targetUsagePct = 70;
|
||||
private long maxUsageBytes = 0;
|
||||
private AtomicLong currentUsageBytes = new AtomicLong(0);
|
||||
private CachedContentCleaner cleaner;
|
||||
private ContentCacheImpl cache; // impl specific functionality required
|
||||
private int maxFileSizeMB = 0;
|
||||
|
||||
/**
|
||||
* Lifecycle method. Should be called immediately after constructing objects of this type (e.g. by the
|
||||
* Spring framework's application context).
|
||||
*/
|
||||
public void init()
|
||||
{
|
||||
if (log.isDebugEnabled())
|
||||
{
|
||||
log.debug("Starting quota strategy.");
|
||||
}
|
||||
PropertyCheck.mandatory(this, "cleaner", cleaner);
|
||||
PropertyCheck.mandatory(this, "cache", cache);
|
||||
|
||||
if (maxUsageBytes < (10 * FileUtils.ONE_MB))
|
||||
{
|
||||
if (log.isWarnEnabled())
|
||||
{
|
||||
log.warn("Low maxUsageBytes of " + maxUsageBytes + "bytes - did you mean to specify in MB?");
|
||||
}
|
||||
}
|
||||
|
||||
loadDiskUsage();
|
||||
// Run the cleaner thread so that it can update the disk usage more accurately.
|
||||
runCleanerThread("quota (init)");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Lifecycle method. Should be called when finished using an object of this type and before the application
|
||||
* container is shutdown (e.g. using a Spring framework destroy method).
|
||||
*/
|
||||
public void shutdown()
|
||||
{
|
||||
if (log.isDebugEnabled())
|
||||
{
|
||||
log.debug("Shutting down quota strategy.");
|
||||
}
|
||||
saveDiskUsage();
|
||||
}
|
||||
|
||||
|
||||
private void loadDiskUsage()
|
||||
{
|
||||
// Load the last known disk usage value.
|
||||
try
|
||||
{
|
||||
FileInputStream fis = new FileInputStream(new File(cache.getCacheRoot(), CACHE_USAGE_FILENAME));
|
||||
ObjectInputStream ois = new ObjectInputStream(fis);
|
||||
currentUsageBytes.set(ois.readLong());
|
||||
ois.close();
|
||||
if (log.isInfoEnabled())
|
||||
{
|
||||
log.info("Using last known disk usage estimate: " + getCurrentUsageBytes());
|
||||
}
|
||||
}
|
||||
catch (Throwable e)
|
||||
{
|
||||
// Assume disk usage
|
||||
setCurrentUsageBytes(DEFAULT_DISK_USAGE_ESTIMATE);
|
||||
|
||||
if (log.isInfoEnabled())
|
||||
{
|
||||
log.info("Unable to load last known disk usage estimate so assuming: " + getCurrentUsageBytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void saveDiskUsage()
|
||||
{
|
||||
// Persist the last known disk usage value.
|
||||
try
|
||||
{
|
||||
FileOutputStream fos = new FileOutputStream(new File(cache.getCacheRoot(), CACHE_USAGE_FILENAME));
|
||||
ObjectOutputStream out = new ObjectOutputStream(fos);
|
||||
out.writeObject(currentUsageBytes);
|
||||
out.close();
|
||||
}
|
||||
catch (Throwable e)
|
||||
{
|
||||
throw new RuntimeException("Unable to save content cache disk usage statistics.", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean beforeWritingCacheFile(long contentSizeBytes)
|
||||
{
|
||||
long maxFileSizeBytes = getMaxFileSizeBytes();
|
||||
if (maxFileSizeBytes > 0 && contentSizeBytes > maxFileSizeBytes)
|
||||
{
|
||||
if (log.isDebugEnabled())
|
||||
{
|
||||
log.debug("File too large (" + contentSizeBytes + " bytes, max allowed is " +
|
||||
getMaxFileSizeBytes() + ") - vetoing disk write.");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
else if (usageWillReach(panicThresholdPct, contentSizeBytes))
|
||||
{
|
||||
if (log.isDebugEnabled())
|
||||
{
|
||||
log.debug("Panic threshold reached (" + panicThresholdPct +
|
||||
"%) - vetoing disk write and starting cached content cleaner.");
|
||||
}
|
||||
runCleanerThread("quota (panic threshold)");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean afterWritingCacheFile(long contentSizeBytes)
|
||||
{
|
||||
boolean keepNewFile = true;
|
||||
|
||||
long maxFileSizeBytes = getMaxFileSizeBytes();
|
||||
if (maxFileSizeBytes > 0 && contentSizeBytes > maxFileSizeBytes)
|
||||
{
|
||||
keepNewFile = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
// The file has just been written so update the usage stats.
|
||||
addUsageBytes(contentSizeBytes);
|
||||
}
|
||||
|
||||
if (getCurrentUsageBytes() >= maxUsageBytes)
|
||||
{
|
||||
// Reached quota limit - time to aggressively recover some space to make sure that
|
||||
// new requests to cache a file are likely to be honoured.
|
||||
if (log.isDebugEnabled())
|
||||
{
|
||||
log.debug("Usage has reached or exceeded quota limit, limit: " + maxUsageBytes +
|
||||
" bytes, current usage: " + getCurrentUsageBytes() + " bytes.");
|
||||
}
|
||||
runAggressiveCleanerThread("quota (limit reached)");
|
||||
}
|
||||
else if (usageHasReached(cleanThresholdPct))
|
||||
{
|
||||
// If usage has reached the clean threshold, start the cleaner
|
||||
if (log.isDebugEnabled())
|
||||
{
|
||||
log.debug("Usage has reached " + cleanThresholdPct + "% - starting cached content cleaner.");
|
||||
}
|
||||
|
||||
runCleanerThread("quota (clean threshold)");
|
||||
}
|
||||
|
||||
return keepNewFile;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Run the cleaner in a new thread.
|
||||
*/
|
||||
private void runCleanerThread(final String reason, final boolean aggressive)
|
||||
{
|
||||
Runnable cleanerRunner = new Runnable()
|
||||
{
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
if (aggressive)
|
||||
{
|
||||
long targetReductionBytes = (long) (((double) targetUsagePct / 100) * maxUsageBytes);
|
||||
cleaner.executeAggressive(reason, targetReductionBytes);
|
||||
}
|
||||
else
|
||||
{
|
||||
cleaner.execute(reason);
|
||||
}
|
||||
}
|
||||
};
|
||||
Thread cleanerThread = new Thread(cleanerRunner, getClass().getSimpleName() + " cleaner");
|
||||
cleanerThread.run();
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a non-aggressive clean up job in a new thread.
|
||||
*
|
||||
* @param reason
|
||||
*/
|
||||
private void runCleanerThread(final String reason)
|
||||
{
|
||||
runCleanerThread(reason, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Run an aggressive clean up job in a new thread.
|
||||
*
|
||||
* @param reason
|
||||
*/
|
||||
private void runAggressiveCleanerThread(final String reason)
|
||||
{
|
||||
runCleanerThread(reason, true);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Will an increase in disk usage of <code>contentSize</code> bytes result in the specified
|
||||
* <code>threshold</code> (percentage of maximum allowed usage) being reached or exceeded?
|
||||
*
|
||||
* @param threshold
|
||||
* @param contentSize
|
||||
* @return true if additional content will reach <code>threshold</code>.
|
||||
*/
|
||||
private boolean usageWillReach(int threshold, long contentSize)
|
||||
{
|
||||
long potentialUsage = getCurrentUsageBytes() + contentSize;
|
||||
double pctOfMaxAllowed = ((double) potentialUsage / maxUsageBytes) * 100;
|
||||
return pctOfMaxAllowed >= threshold;
|
||||
}
|
||||
|
||||
private boolean usageHasReached(int threshold)
|
||||
{
|
||||
return usageWillReach(threshold, 0);
|
||||
}
|
||||
|
||||
|
||||
public void setMaxUsageMB(long maxUsageMB)
|
||||
{
|
||||
setMaxUsageBytes(maxUsageMB * FileUtils.ONE_MB);
|
||||
}
|
||||
|
||||
public void setMaxUsageBytes(long maxUsageBytes)
|
||||
{
|
||||
this.maxUsageBytes = maxUsageBytes;
|
||||
}
|
||||
|
||||
|
||||
public void setPanicThresholdPct(int panicThresholdPct)
|
||||
{
|
||||
this.panicThresholdPct = panicThresholdPct;
|
||||
}
|
||||
|
||||
|
||||
public void setCleanThresholdPct(int cleanThresholdPct)
|
||||
{
|
||||
this.cleanThresholdPct = cleanThresholdPct;
|
||||
}
|
||||
|
||||
|
||||
@Required
|
||||
public void setCache(ContentCacheImpl cache)
|
||||
{
|
||||
this.cache = cache;
|
||||
}
|
||||
|
||||
|
||||
@Required
|
||||
public void setCleaner(CachedContentCleaner cleaner)
|
||||
{
|
||||
this.cleaner = cleaner;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public long getCurrentUsageBytes()
|
||||
{
|
||||
return currentUsageBytes.get();
|
||||
}
|
||||
|
||||
|
||||
public double getCurrentUsageMB()
|
||||
{
|
||||
return (double) getCurrentUsageBytes() / FileUtils.ONE_MB;
|
||||
}
|
||||
|
||||
public long getMaxUsageBytes()
|
||||
{
|
||||
return maxUsageBytes;
|
||||
}
|
||||
|
||||
public long getMaxUsageMB()
|
||||
{
|
||||
return maxUsageBytes / FileUtils.ONE_MB;
|
||||
}
|
||||
|
||||
public int getMaxFileSizeMB()
|
||||
{
|
||||
return this.maxFileSizeMB;
|
||||
}
|
||||
|
||||
protected long getMaxFileSizeBytes()
|
||||
{
|
||||
return maxFileSizeMB * FileUtils.ONE_MB;
|
||||
}
|
||||
|
||||
public void setMaxFileSizeMB(int maxFileSizeMB)
|
||||
{
|
||||
this.maxFileSizeMB = maxFileSizeMB;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public long addUsageBytes(long sizeDelta)
|
||||
{
|
||||
long newUsage = currentUsageBytes.addAndGet(sizeDelta);
|
||||
if (log.isDebugEnabled())
|
||||
{
|
||||
log.debug(String.format("Disk usage changed by %d to %d bytes", sizeDelta, newUsage));
|
||||
}
|
||||
return newUsage;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void setCurrentUsageBytes(long newDiskUsage)
|
||||
{
|
||||
if (log.isInfoEnabled())
|
||||
{
|
||||
log.info(String.format("Setting disk usage to %d bytes", newDiskUsage));
|
||||
}
|
||||
currentUsageBytes.set(newDiskUsage);
|
||||
}
|
||||
}
|
@@ -0,0 +1,207 @@
|
||||
/*
|
||||
* Copyright (C) 2005-2011 Alfresco Software Limited.
|
||||
*
|
||||
* This file is part of Alfresco
|
||||
*
|
||||
* Alfresco is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Alfresco is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package org.alfresco.repo.content.caching.quota;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import org.alfresco.repo.content.caching.cleanup.CachedContentCleaner;
|
||||
import org.apache.commons.lang.reflect.FieldUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.runners.MockitoJUnitRunner;
|
||||
|
||||
/**
|
||||
* Tests for the StandardQuotaStrategy.
|
||||
* @author Matt Ward
|
||||
*/
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class StandardQuotaStrategyMockTest
|
||||
{
|
||||
private StandardQuotaStrategy quota;
|
||||
|
||||
@Mock
|
||||
private CachedContentCleaner cleaner;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception
|
||||
{
|
||||
quota = new StandardQuotaStrategy();
|
||||
// 1000 Bytes max. - unrealistic value but makes the figures easier.
|
||||
quota.setMaxUsageBytes(1000);
|
||||
quota.setMaxFileSizeMB(100);
|
||||
quota.setCleaner(cleaner);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCanSetMaxUsageInMB() throws IllegalAccessException
|
||||
{
|
||||
quota.setMaxUsageMB(0);
|
||||
assertEquals(0, ((Long) FieldUtils.readDeclaredField(quota, "maxUsageBytes", true)).longValue());
|
||||
|
||||
quota.setMaxUsageMB(500);
|
||||
assertEquals(524288000, ((Long) FieldUtils.readDeclaredField(quota, "maxUsageBytes", true)).longValue());
|
||||
|
||||
// 1 GB
|
||||
quota.setMaxUsageMB(1024);
|
||||
assertEquals(1073741824, ((Long) FieldUtils.readDeclaredField(quota, "maxUsageBytes", true)).longValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPanicThresholdForBeforeWritingCacheFile()
|
||||
{
|
||||
quota.setCurrentUsageBytes(0);
|
||||
assertTrue("Should allow writing of cache file", quota.beforeWritingCacheFile(899));
|
||||
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(900));
|
||||
|
||||
quota.setCurrentUsageBytes(890);
|
||||
assertTrue("Should allow writing of cache file", quota.beforeWritingCacheFile(9));
|
||||
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(10));
|
||||
|
||||
quota.setCurrentUsageBytes(600);
|
||||
assertTrue("Should allow writing of cache file", quota.beforeWritingCacheFile(299));
|
||||
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(300));
|
||||
|
||||
quota.setCurrentUsageBytes(899);
|
||||
assertTrue("Should allow writing of cache file", quota.beforeWritingCacheFile(0));
|
||||
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(1));
|
||||
|
||||
|
||||
// When the usage is already exceeding 100% of what is allowed
|
||||
quota.setCurrentUsageBytes(2345);
|
||||
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(0));
|
||||
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(1));
|
||||
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(12300));
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Test
|
||||
public void afterWritingCacheFileDiskUsageUpdatedCorrectly()
|
||||
{
|
||||
quota.setCurrentUsageBytes(410);
|
||||
quota.afterWritingCacheFile(40);
|
||||
assertEquals("Incorrect usage estimate", 450, quota.getCurrentUsageBytes());
|
||||
|
||||
quota.afterWritingCacheFile(150);
|
||||
assertEquals("Incorrect usage estimate", 600, quota.getCurrentUsageBytes());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
// Is the cleaner started when disk usage is over correct threshold?
|
||||
public void testThresholdsAfterWritingCacheFile()
|
||||
{
|
||||
quota.setCurrentUsageBytes(0);
|
||||
quota.afterWritingCacheFile(700);
|
||||
Mockito.verify(cleaner, Mockito.never()).execute("quota (clean threshold)");
|
||||
|
||||
quota.setCurrentUsageBytes(700);
|
||||
quota.afterWritingCacheFile(100);
|
||||
Mockito.verify(cleaner).execute("quota (clean threshold)");
|
||||
|
||||
quota.setCurrentUsageBytes(999);
|
||||
quota.afterWritingCacheFile(1);
|
||||
Mockito.verify(cleaner).executeAggressive("quota (limit reached)", 700);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testThresholdsBeforeWritingCacheFile()
|
||||
{
|
||||
quota.setCurrentUsageBytes(800);
|
||||
quota.beforeWritingCacheFile(0);
|
||||
Mockito.verify(cleaner, Mockito.never()).execute("quota (clean threshold)");
|
||||
|
||||
quota.setCurrentUsageBytes(900);
|
||||
quota.beforeWritingCacheFile(0);
|
||||
Mockito.verify(cleaner).execute("quota (panic threshold)");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canGetMaxFileSizeBytes()
|
||||
{
|
||||
quota.setMaxFileSizeMB(1024);
|
||||
assertEquals("1GB incorrect", 1073741824L, quota.getMaxFileSizeBytes());
|
||||
|
||||
quota.setMaxFileSizeMB(0);
|
||||
assertEquals("0MB incorrect", 0L, quota.getMaxFileSizeBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void attemptToWriteFileExceedingMaxFileSizeIsVetoed()
|
||||
{
|
||||
// Make sure the maxUsageMB doesn't interfere with the tests - set large value.
|
||||
quota.setMaxUsageMB(4096);
|
||||
|
||||
// Zero for no max file size
|
||||
quota.setMaxFileSizeMB(0);
|
||||
assertTrue("File should be written", quota.beforeWritingCacheFile(1));
|
||||
assertTrue("File should be written", quota.beforeWritingCacheFile(20971520));
|
||||
|
||||
// Anything > 0 should result in limit being applied
|
||||
quota.setMaxFileSizeMB(1);
|
||||
assertTrue("File should be written", quota.beforeWritingCacheFile(1048576));
|
||||
assertFalse("File should be vetoed - too large", quota.beforeWritingCacheFile(1048577));
|
||||
|
||||
// Realistic scenario, 20 MB cutoff.
|
||||
quota.setMaxFileSizeMB(20);
|
||||
assertTrue("File should be written", quota.beforeWritingCacheFile(20971520));
|
||||
assertFalse("File should be vetoed - too large", quota.beforeWritingCacheFile(20971521));
|
||||
// Unknown (in advance) file size should always result in write
|
||||
assertTrue("File should be written", quota.beforeWritingCacheFile(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void afterFileWrittenExceedingMaxFileSizeFileIsDeleted()
|
||||
{
|
||||
// Zero for no max file size
|
||||
quota.setMaxFileSizeMB(0);
|
||||
assertTrue("File should be kept", quota.afterWritingCacheFile(1));
|
||||
assertTrue("File should be kept", quota.afterWritingCacheFile(20971520));
|
||||
// Both files were kept
|
||||
assertEquals("Incorrect usage estimate", 20971521, quota.getCurrentUsageBytes());
|
||||
|
||||
// Realistic scenario, 20 MB cutoff.
|
||||
quota.setMaxFileSizeMB(20);
|
||||
quota.setCurrentUsageBytes(0);
|
||||
assertTrue("File should be kept", quota.afterWritingCacheFile(20971520));
|
||||
assertFalse("File should be removed", quota.afterWritingCacheFile(20971521));
|
||||
// Only the first file was kept
|
||||
assertEquals("Incorrect usage estimate", 20971520, quota.getCurrentUsageBytes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCurrentUsageMB()
|
||||
{
|
||||
quota.setCurrentUsageBytes(524288);
|
||||
assertEquals(0.5f, quota.getCurrentUsageMB(), 0);
|
||||
|
||||
quota.setCurrentUsageBytes(1048576);
|
||||
assertEquals(1.0f, quota.getCurrentUsageMB(), 0);
|
||||
|
||||
quota.setCurrentUsageBytes(53262546);
|
||||
assertEquals(50.795f, quota.getCurrentUsageMB(), 0.001);
|
||||
}
|
||||
}
|
@@ -0,0 +1,187 @@
|
||||
/*
|
||||
* Copyright (C) 2005-2011 Alfresco Software Limited.
|
||||
*
|
||||
* This file is part of Alfresco
|
||||
*
|
||||
* Alfresco is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Alfresco is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package org.alfresco.repo.content.caching.quota;
|
||||
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
import org.alfresco.repo.content.ContentContext;
|
||||
import org.alfresco.repo.content.caching.CachingContentStore;
|
||||
import org.alfresco.repo.content.caching.ContentCacheImpl;
|
||||
import org.alfresco.service.cmr.repository.ContentWriter;
|
||||
import org.alfresco.util.ApplicationContextHelper;
|
||||
import org.alfresco.util.GUID;
|
||||
import org.alfresco.util.TempFileProvider;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.io.comparator.SizeFileComparator;
|
||||
import org.apache.commons.io.filefilter.SuffixFileFilter;
|
||||
import org.apache.commons.io.filefilter.TrueFileFilter;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.mozilla.javascript.ObjToIntMap.Iterator;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
|
||||
/**
|
||||
* Tests for the StandardQuotaStrategy.
|
||||
* @author Matt Ward
|
||||
*/
|
||||
public class StandardQuotaStrategyTest
|
||||
{
|
||||
private static ApplicationContext ctx;
|
||||
private CachingContentStore store;
|
||||
private static byte[] aKB;
|
||||
private ContentCacheImpl cache;
|
||||
private File cacheRoot;
|
||||
private StandardQuotaStrategy quota;
|
||||
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass()
|
||||
{
|
||||
ctx = ApplicationContextHelper.getApplicationContext(new String[]
|
||||
{
|
||||
"classpath:cachingstore/test-std-quota-context.xml"
|
||||
});
|
||||
|
||||
aKB = new byte[1024];
|
||||
Arrays.fill(aKB, (byte) 36);
|
||||
}
|
||||
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass()
|
||||
{
|
||||
ApplicationContextHelper.closeApplicationContext();
|
||||
}
|
||||
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception
|
||||
{
|
||||
store = (CachingContentStore) ctx.getBean("cachingContentStore");
|
||||
store.setCacheOnInbound(true);
|
||||
cache = (ContentCacheImpl) ctx.getBean("contentCache");
|
||||
cacheRoot = cache.getCacheRoot();
|
||||
quota = (StandardQuotaStrategy) ctx.getBean("quotaManager");
|
||||
quota.setCurrentUsageBytes(0);
|
||||
|
||||
// Empty the in-memory cache
|
||||
cache.removeAll();
|
||||
|
||||
FileUtils.cleanDirectory(cacheRoot);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void cleanerWillTriggerAtCorrectThreshold() throws IOException
|
||||
{
|
||||
// Write 15 x 1MB files. This will not trigger any quota related actions.
|
||||
// Quota is 20MB. The quota manager will...
|
||||
// * start the cleaner at 16MB (80% of 20MB)
|
||||
// * refuse to cache any more files at 18MB (90% of 20MB)
|
||||
for (int i = 0; i < 15; i++)
|
||||
{
|
||||
writeSingleFileInMB(1);
|
||||
}
|
||||
// All 15 should be retained.
|
||||
assertEquals(15, findCacheFiles().size());
|
||||
|
||||
// Writing one more file should trigger a clean.
|
||||
writeSingleFileInMB(1);
|
||||
|
||||
// As the cache is set to contain a max of 12 items in-memory (see cachingContentStoreCache
|
||||
// definition in test-std-quota-context.xml) and 2 cache items are required per cached content URL
|
||||
// then after the cleaner has processed the tree there will 6 items left on disk (12/2).
|
||||
assertEquals(6, findCacheFiles().size());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void cachingIsDisabledAtCorrectThreshold() throws IOException
|
||||
{
|
||||
// Write 4 x 6MB files.
|
||||
for (int i = 0; i < 4; i++)
|
||||
{
|
||||
writeSingleFileInMB(6);
|
||||
}
|
||||
|
||||
// Only the first 3 are cached - caching is disabled after that as
|
||||
// the panic threshold has been reached.
|
||||
assertEquals(3, findCacheFiles().size());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Test
|
||||
public void largeContentCacheFilesAreNotKeptOnDisk() throws IOException
|
||||
{
|
||||
quota.setMaxFileSizeMB(3);
|
||||
writeSingleFileInMB(1);
|
||||
writeSingleFileInMB(2);
|
||||
writeSingleFileInMB(3);
|
||||
writeSingleFileInMB(4);
|
||||
|
||||
List<File> files = new ArrayList<File>(findCacheFiles());
|
||||
assertEquals(3, files.size());
|
||||
Collections.sort(files,SizeFileComparator.SIZE_COMPARATOR);
|
||||
assertEquals(1, files.get(0).length() / FileUtils.ONE_MB);
|
||||
assertEquals(2, files.get(1).length() / FileUtils.ONE_MB);
|
||||
assertEquals(3, files.get(2).length() / FileUtils.ONE_MB);
|
||||
}
|
||||
|
||||
private void writeSingleFileInMB(int sizeInMb) throws IOException
|
||||
{
|
||||
ContentWriter writer = store.getWriter(ContentContext.NULL_CONTEXT);
|
||||
File content = createFileOfSize(sizeInMb * 1024);
|
||||
writer.putContent(content);
|
||||
}
|
||||
|
||||
private File createFileOfSize(long sizeInKB) throws IOException
|
||||
{
|
||||
File file = new File(TempFileProvider.getSystemTempDir(), GUID.generate() + ".generated");
|
||||
file.deleteOnExit();
|
||||
BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(file));
|
||||
for (long i = 0; i < sizeInKB; i++)
|
||||
{
|
||||
os.write(aKB);
|
||||
}
|
||||
os.close();
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private Collection<File> findCacheFiles()
|
||||
{
|
||||
return FileUtils.listFiles(cacheRoot, new SuffixFileFilter(".bin"), TrueFileFilter.INSTANCE);
|
||||
}
|
||||
}
|
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright (C) 2005-2011 Alfresco Software Limited.
|
||||
*
|
||||
* This file is part of Alfresco
|
||||
*
|
||||
* Alfresco is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Alfresco is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package org.alfresco.repo.content.caching.quota;
|
||||
|
||||
/**
|
||||
* QuotaManagerStrategy that doesn't enforce any quota limits whatsoever.
|
||||
*
|
||||
* @author Matt Ward
|
||||
*/
|
||||
public class UnlimitedQuotaStrategy implements QuotaManagerStrategy
|
||||
{
|
||||
|
||||
@Override
|
||||
public boolean beforeWritingCacheFile(long contentSize)
|
||||
{
|
||||
// Always write cache files.
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean afterWritingCacheFile(long contentSize)
|
||||
{
|
||||
// Always allow cache files to remain.
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (C) 2005-2011 Alfresco Software Limited.
|
||||
*
|
||||
* This file is part of Alfresco
|
||||
*
|
||||
* Alfresco is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Alfresco is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package org.alfresco.repo.content.caching.quota;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
|
||||
/**
|
||||
* Tests for the UnlimitedQuotaStrategy class.
|
||||
*
|
||||
* @author Matt Ward
|
||||
*/
|
||||
public class UnlimitedQuotaStrategyTest
|
||||
{
|
||||
private UnlimitedQuotaStrategy quota;
|
||||
|
||||
@Before
|
||||
public void setUp()
|
||||
{
|
||||
quota = new UnlimitedQuotaStrategy();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void beforeWritingCacheFile()
|
||||
{
|
||||
assertTrue("Should always allow caching", quota.beforeWritingCacheFile(0));
|
||||
assertTrue("Should always allow caching", quota.beforeWritingCacheFile(Long.MAX_VALUE));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void afterWritingCacheFile()
|
||||
{
|
||||
assertTrue("Should always allow cache file to remain", quota.afterWritingCacheFile(0));
|
||||
assertTrue("Should always allow cache file to remain", quota.afterWritingCacheFile(Long.MAX_VALUE));
|
||||
}
|
||||
}
|
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (C) 2005-2011 Alfresco Software Limited.
|
||||
*
|
||||
* This file is part of Alfresco
|
||||
*
|
||||
* Alfresco is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Alfresco is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package org.alfresco.repo.content.caching.quota;
|
||||
|
||||
/**
|
||||
* Interface through which disk usage levels can be set and queried.
|
||||
*
|
||||
* @author Matt Ward
|
||||
*/
|
||||
public interface UsageTracker
|
||||
{
|
||||
long getCurrentUsageBytes();
|
||||
void setCurrentUsageBytes(long newDiskUsage);
|
||||
long addUsageBytes(long sizeDelta);
|
||||
}
|
@@ -65,9 +65,6 @@ class SlowContentStore extends AbstractContentStore
|
||||
@Override
|
||||
public ContentReader getReader(String contentUrl)
|
||||
{
|
||||
urlHits.putIfAbsent(contentUrl, new AtomicLong(0));
|
||||
urlHits.get(contentUrl).incrementAndGet();
|
||||
|
||||
return new SlowReader(contentUrl);
|
||||
}
|
||||
|
||||
@@ -190,6 +187,19 @@ class SlowContentStore extends AbstractContentStore
|
||||
private final byte[] content = "This is the content for my slow ReadableByteChannel".getBytes();
|
||||
private int index = 0;
|
||||
private boolean closed = false;
|
||||
private boolean readCounted = false;
|
||||
|
||||
private synchronized void registerReadAttempt()
|
||||
{
|
||||
if (!readCounted)
|
||||
{
|
||||
// A true attempt to read from this ContentReader - update statistics.
|
||||
String url = getContentUrl();
|
||||
urlHits.putIfAbsent(url, new AtomicLong(0));
|
||||
urlHits.get(url).incrementAndGet();
|
||||
readCounted = true;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isOpen()
|
||||
@@ -206,6 +216,8 @@ class SlowContentStore extends AbstractContentStore
|
||||
@Override
|
||||
public int read(ByteBuffer dst) throws IOException
|
||||
{
|
||||
registerReadAttempt();
|
||||
|
||||
if (index < content.length)
|
||||
{
|
||||
try
|
||||
|
@@ -9,20 +9,6 @@
|
||||
<property name="pauseMillis" value="0"/>
|
||||
</bean>
|
||||
|
||||
|
||||
<bean id="cachingContentStoreCleanerJobDetail" class="org.springframework.scheduling.quartz.JobDetailBean">
|
||||
<property name="jobClass">
|
||||
<value>org.alfresco.repo.content.caching.cleanup.CachedContentCleanupJob</value>
|
||||
</property>
|
||||
<property name="jobDataAsMap">
|
||||
<map>
|
||||
<entry key="cachedContentCleaner">
|
||||
<ref bean="cachedContentCleaner" />
|
||||
</entry>
|
||||
</map>
|
||||
</property>
|
||||
</bean>
|
||||
|
||||
<bean id="cachedContentCleaner" class="org.alfresco.repo.content.caching.cleanup.CachedContentCleaner">
|
||||
<property name="maxDeleteWatchCount" value="1"/>
|
||||
<property name="cache" ref="contentCache"/>
|
||||
|
@@ -0,0 +1,65 @@
|
||||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
<!DOCTYPE beans PUBLIC '-//SPRING//DTD BEAN//EN' 'http://www.springframework.org/dtd/spring-beans.dtd'>
|
||||
|
||||
<beans>
|
||||
<import resource="classpath:alfresco/application-context.xml" />
|
||||
|
||||
|
||||
<bean id="cachingContentStore" class="org.alfresco.repo.content.caching.CachingContentStore">
|
||||
<property name="backingStore" ref="backingStore"/>
|
||||
<property name="cache" ref="contentCache"/>
|
||||
<property name="cacheOnInbound" value="true"/>
|
||||
<property name="quota" ref="quotaManager"/>
|
||||
</bean>
|
||||
|
||||
<bean id="quotaManager"
|
||||
class="org.alfresco.repo.content.caching.quota.StandardQuotaStrategy"
|
||||
init-method="init"
|
||||
destroy-method="shutdown">
|
||||
<property name="maxUsageBytes" value="20971520"/><!-- 20971520 = 20MB -->
|
||||
<property name="cleaner" ref="cachedContentCleaner"/>
|
||||
<property name="cache" ref="contentCache"/>
|
||||
</bean>
|
||||
|
||||
|
||||
<bean id="backingStore" class="org.alfresco.repo.content.filestore.FileContentStore">
|
||||
<constructor-arg>
|
||||
<value>${dir.contentstore}</value>
|
||||
</constructor-arg>
|
||||
</bean>
|
||||
|
||||
|
||||
<bean id="contentCache" class="org.alfresco.repo.content.caching.ContentCacheImpl">
|
||||
<property name="memoryStore" ref="cachingContentStoreCache"/>
|
||||
<property name="cacheRoot" value="${dir.cachedcontent}/quota_test"/>
|
||||
</bean>
|
||||
|
||||
|
||||
<bean id="cachingContentStoreCache" class="org.alfresco.repo.cache.EhCacheAdapter">
|
||||
<property name="cache">
|
||||
<bean class="org.springframework.cache.ehcache.EhCacheFactoryBean">
|
||||
<property name="cacheManager">
|
||||
<ref bean="internalEHCacheManager" />
|
||||
</property>
|
||||
<property name="cacheName">
|
||||
<value>org.alfresco.cache.cachingContentStoreCache</value>
|
||||
</property>
|
||||
<property name="eternal" value="false"/>
|
||||
<property name="timeToLive" value="0"/>
|
||||
<property name="timeToIdle" value="0"/>
|
||||
<property name="maxElementsInMemory" value="12"/>
|
||||
<property name="maxElementsOnDisk" value="0"/>
|
||||
<property name="overflowToDisk" value="false"/>
|
||||
<property name="diskPersistent" value="false"/>
|
||||
</bean>
|
||||
</property>
|
||||
</bean>
|
||||
|
||||
|
||||
<bean id="cachedContentCleaner" class="org.alfresco.repo.content.caching.cleanup.CachedContentCleaner">
|
||||
<property name="maxDeleteWatchCount" value="0"/><!-- zero is NOT recommmend in production -->
|
||||
<property name="cache" ref="contentCache"/>
|
||||
<property name="usageTracker" ref="quotaManager"/>
|
||||
</bean>
|
||||
|
||||
</beans>
|
Reference in New Issue
Block a user