Merged HEAD-QA to HEAD (4.2) (including moving test classes into separate folders)

51903 to 54309 


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@54310 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Samuel Langlois
2013-08-20 17:17:31 +00:00
parent 0a36e2af67
commit ab4ca7177f
1576 changed files with 36419 additions and 8603 deletions

View File

@@ -1,163 +0,0 @@
/*
* Copyright (C) 2005-2011 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.content.caching;
import java.io.File;
import org.alfresco.repo.cache.DefaultSimpleCache;
import org.alfresco.repo.cache.SimpleCache;
import org.alfresco.repo.content.AbstractWritableContentStoreTest;
import org.alfresco.repo.content.ContentContext;
import org.alfresco.repo.content.ContentStore;
import org.alfresco.repo.content.filestore.FileContentStore;
import org.alfresco.service.cmr.repository.ContentWriter;
import org.alfresco.util.TempFileProvider;
import org.junit.internal.runners.JUnit38ClassRunner;
import org.junit.runner.RunWith;
/**
* Tests for the CachingContentStore that benefit from a full set of tests
* defined in AbstractWritableContentStoreTest.
*
* @author Matt Ward
*/
@RunWith(JUnit38ClassRunner.class)
public class CachingContentStoreSpringTest extends AbstractWritableContentStoreTest
{
private CachingContentStore store;
private FileContentStore backingStore;
private ContentCacheImpl cache;
@Override
public void setUp() throws Exception
{
super.setUp();
File tempDir = TempFileProvider.getTempDir();
backingStore = new FileContentStore(ctx,
tempDir.getAbsolutePath() +
File.separatorChar +
getName());
cache = new ContentCacheImpl();
cache.setCacheRoot(TempFileProvider.getLongLifeTempDir("cached_content_test"));
cache.setMemoryStore(createMemoryStore());
store = new CachingContentStore(backingStore, cache, false);
}
private SimpleCache<Key, String> createMemoryStore()
{
SimpleCache<Key, String> memoryStore = new DefaultSimpleCache<Key, String>();
return memoryStore;
}
public void testStoreWillReadFromCacheWhenAvailable()
{
final String content = "Content for " + getName() + " test.";
// Write some content to the backing store.
ContentWriter writer = backingStore.getWriter(ContentContext.NULL_CONTEXT);
writer.putContent(content);
final String contentUrl = writer.getContentUrl();
// Read content using the CachingContentStore - will cause content to be cached.
String retrievedContent = store.getReader(contentUrl).getContentString();
assertEquals(content, retrievedContent);
// Remove the original content from the backing store.
backingStore.delete(contentUrl);
assertFalse("Original content should have been deleted", backingStore.exists(contentUrl));
// The cached version is still available.
String contentAfterDelete = store.getReader(contentUrl).getContentString();
assertEquals(content, contentAfterDelete);
}
public void testCacheOnInbound()
{
store = new CachingContentStore(backingStore, cache, true);
final String content = "Content for " + getName() + " test.";
final String contentUrl = FileContentStore.createNewFileStoreUrl();
assertFalse("Content shouldn't be cached yet", cache.contains(contentUrl));
// Write some content using the caching store
ContentWriter writer = store.getWriter(new ContentContext(null, contentUrl));
writer.putContent(content);
assertTrue("Cache should contain content after write", cache.contains(contentUrl));
// Check DIRECTLY with the cache, since a getReader() from the CachingContentStore would result
// in caching, but we're checking that caching was caused by the write operation.
String retrievedContent = cache.getReader(contentUrl).getContentString();
assertEquals(content, retrievedContent);
// The content should have been written through to the backing store.
String fromBackingStore = backingStore.getReader(contentUrl).getContentString();
assertEquals("Content should be in backing store", content, fromBackingStore);
// Remove the original content from the backing store.
backingStore.delete(contentUrl);
assertFalse("Original content should have been deleted", backingStore.exists(contentUrl));
// The cached version is still available
String contentAfterDelete = store.getReader(contentUrl).getContentString();
assertEquals(content, contentAfterDelete);
}
public void testStoreWillRecoverFromDeletedCacheFile()
{
final String content = "Content for " + getName() + " test.";
// Write some content to the backing store.
ContentWriter writer = backingStore.getWriter(ContentContext.NULL_CONTEXT);
writer.putContent(content);
final String contentUrl = writer.getContentUrl();
// Read content using the CachingContentStore - will cause content to be cached.
String retrievedContent = store.getReader(contentUrl).getContentString();
assertEquals(content, retrievedContent);
// Remove the cached disk file
File cacheFile = new File(cache.getCacheFilePath(contentUrl));
cacheFile.delete();
assertTrue("Cached content should have been deleted", !cacheFile.exists());
// Should still be able to ask for this content, even though the cache file was
// deleted and the record of the cache is still in the in-memory cache/lookup.
String contentAfterDelete = store.getReader(contentUrl).getContentString();
assertEquals(content, contentAfterDelete);
}
/*
* @see org.alfresco.repo.content.AbstractReadOnlyContentStoreTest#getStore()
*/
@Override
protected ContentStore getStore()
{
return store;
}
}

View File

@@ -1,502 +0,0 @@
/*
* Copyright (C) 2005-2011 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.content.caching;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.Date;
import java.util.Locale;
import org.alfresco.repo.content.ContentContext;
import org.alfresco.repo.content.ContentStore;
import org.alfresco.repo.content.ContentStore.ContentUrlHandler;
import org.alfresco.repo.content.caching.quota.QuotaManagerStrategy;
import org.alfresco.repo.content.caching.quota.UnlimitedQuotaStrategy;
import org.alfresco.service.cmr.repository.ContentIOException;
import org.alfresco.service.cmr.repository.ContentReader;
import org.alfresco.service.cmr.repository.ContentStreamListener;
import org.alfresco.service.cmr.repository.ContentWriter;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
/**
* Tests for the CachingContentStore class. Tests use mock backing store and cache.
*
* @author Matt Ward
*/
@RunWith(MockitoJUnitRunner.class)
public class CachingContentStoreTest
{
private CachingContentStore cachingStore;
private ContentReader sourceContent;
private ContentReader cachedContent;
@Mock
private ContentStore backingStore;
@Mock
private ContentCache cache;
@Before
public void setUp() throws Exception
{
cachingStore = new CachingContentStore(backingStore, cache, false);
cachingStore.setQuota(new UnlimitedQuotaStrategy());
sourceContent = mock(ContentReader.class, "sourceContent");
cachedContent = mock(ContentReader.class, "cachedContent");
}
@Test
public void getReaderForItemInCache()
{
when(cache.contains("url")).thenReturn(true);
when(cache.getReader("url")).thenReturn(cachedContent);
ContentReader returnedReader = cachingStore.getReader("url");
assertSame(returnedReader, cachedContent);
verify(backingStore, never()).getReader(anyString());
}
@Test
// Item isn't in cache, so will be cached and returned.
public void getReaderForItemMissingFromCache()
{
when(cache.getReader("url")).thenReturn(cachedContent);
when(backingStore.getReader("url")).thenReturn(sourceContent);
when(sourceContent.getSize()).thenReturn(1274L);
when(cache.put("url", sourceContent)).thenReturn(true);
QuotaManagerStrategy quota = mock(QuotaManagerStrategy.class);
cachingStore.setQuota(quota);
when(quota.beforeWritingCacheFile(1274L)).thenReturn(true);
when(quota.afterWritingCacheFile(1274L)).thenReturn(true);
ContentReader returnedReader = cachingStore.getReader("url");
assertSame(returnedReader, cachedContent);
verify(quota).afterWritingCacheFile(1274L);
}
@Test
public void getReaderForItemMissingFromCacheWillGiveUpAfterRetrying()
{
when(cache.getReader("url")).thenThrow(new CacheMissException("url"));
when(backingStore.getReader("url")).thenReturn(sourceContent);
when(cache.put("url", sourceContent)).thenReturn(true);
ContentReader returnedReader = cachingStore.getReader("url");
// Upon failure, item is removed from cache
verify(cache, atLeastOnce()).remove("url");
// The content comes direct from the backing store
assertSame(returnedReader, sourceContent);
}
@Test
public void getReaderForItemMissingFromCacheWillRetryAndCanSucceed()
{
when(cache.getReader("url")).
thenThrow(new CacheMissException("url")).
thenReturn(cachedContent);
when(backingStore.getReader("url")).thenReturn(sourceContent);
when(cache.put("url", sourceContent)).thenReturn(true);
ContentReader returnedReader = cachingStore.getReader("url");
assertSame(returnedReader, cachedContent);
}
@Test
public void getReaderForItemMissingFromCacheButNoContentToCache()
{
when(cache.getReader("url")).thenThrow(new CacheMissException("url"));
when(backingStore.getReader("url")).thenReturn(sourceContent);
when(cache.put("url", sourceContent)).thenReturn(false);
cachingStore.getReader("url");
}
@Test
// When attempting to read uncached content.
public void quotaManagerCanVetoCacheFileWriting()
{
when(backingStore.getReader("url")).thenReturn(sourceContent);
QuotaManagerStrategy quota = mock(QuotaManagerStrategy.class);
cachingStore.setQuota(quota);
when(sourceContent.getSize()).thenReturn(1274L);
when(quota.beforeWritingCacheFile(1274L)).thenReturn(false);
ContentReader returnedReader = cachingStore.getReader("url");
verify(cache, never()).put("url", sourceContent);
assertSame(returnedReader, sourceContent);
verify(quota, never()).afterWritingCacheFile(anyLong());
}
@Test
public void getWriterWhenNotCacheOnInbound()
{
QuotaManagerStrategy quota = mock(QuotaManagerStrategy.class);
cachingStore.setQuota(quota);
ContentContext ctx = ContentContext.NULL_CONTEXT;
cachingStore.getWriter(ctx);
verify(backingStore).getWriter(ctx);
// No quota manager interaction - as no caching happening.
verify(quota, never()).beforeWritingCacheFile(anyLong());
verify(quota, never()).afterWritingCacheFile(anyLong());
}
@Test
public void getWriterWhenCacheOnInbound() throws ContentIOException, IOException
{
cachingStore = new CachingContentStore(backingStore, cache, true);
ContentContext ctx = ContentContext.NULL_CONTEXT;
ContentWriter bsWriter = mock(ContentWriter.class);
when(backingStore.getWriter(ctx)).thenReturn(bsWriter);
when(bsWriter.getContentUrl()).thenReturn("url");
ContentWriter cacheWriter = mock(ContentWriter.class);
when(cache.getWriter("url")).thenReturn(cacheWriter);
ContentReader readerFromCacheWriter = mock(ContentReader.class);
when(cacheWriter.getReader()).thenReturn(readerFromCacheWriter);
when(cacheWriter.getSize()).thenReturn(54321L);
QuotaManagerStrategy quota = mock(QuotaManagerStrategy.class);
cachingStore.setQuota(quota);
// Quota manager interceptor is fired.
when(quota.beforeWritingCacheFile(0L)).thenReturn(true);
cachingStore.getWriter(ctx);
// Check that a listener was attached to cacheWriter with the correct behaviour
ArgumentCaptor<ContentStreamListener> arg = ArgumentCaptor.forClass(ContentStreamListener.class);
verify(cacheWriter).addListener(arg.capture());
// Simulate a stream close
arg.getValue().contentStreamClosed();
// Check behaviour of the listener
verify(bsWriter).putContent(readerFromCacheWriter);
// Post caching quota manager hook is fired.
verify(quota).afterWritingCacheFile(54321L);
}
@Test
// When attempting to perform write-through caching, i.e. cacheOnInbound = true
public void quotaManagerCanVetoInboundCaching()
{
cachingStore = new CachingContentStore(backingStore, cache, true);
QuotaManagerStrategy quota = mock(QuotaManagerStrategy.class);
cachingStore.setQuota(quota);
ContentContext ctx = ContentContext.NULL_CONTEXT;
ContentWriter backingStoreWriter = mock(ContentWriter.class);
when(backingStore.getWriter(ctx)).thenReturn(backingStoreWriter);
when(quota.beforeWritingCacheFile(0L)).thenReturn(false);
ContentWriter returnedWriter = cachingStore.getWriter(ctx);
assertSame("Should be writing direct to backing store", backingStoreWriter, returnedWriter);
verify(quota, never()).afterWritingCacheFile(anyLong());
}
@Test
public void quotaManagerCanRequestFileDeletionFromCacheAfterWrite()
{
cachingStore = new CachingContentStore(backingStore, cache, true);
ContentContext ctx = ContentContext.NULL_CONTEXT;
ContentWriter bsWriter = mock(ContentWriter.class);
when(backingStore.getWriter(ctx)).thenReturn(bsWriter);
when(bsWriter.getContentUrl()).thenReturn("url");
ContentWriter cacheWriter = mock(ContentWriter.class);
when(cache.getWriter("url")).thenReturn(cacheWriter);
ContentReader readerFromCacheWriter = mock(ContentReader.class);
when(cacheWriter.getReader()).thenReturn(readerFromCacheWriter);
when(cacheWriter.getSize()).thenReturn(54321L);
QuotaManagerStrategy quota = mock(QuotaManagerStrategy.class);
cachingStore.setQuota(quota);
// Quota manager interceptor is fired.
when(quota.beforeWritingCacheFile(0L)).thenReturn(true);
cachingStore.getWriter(ctx);
// Check that a listener was attached to cacheWriter with the correct behaviour
ArgumentCaptor<ContentStreamListener> arg = ArgumentCaptor.forClass(ContentStreamListener.class);
verify(cacheWriter).addListener(arg.capture());
// Don't keep the new cache file
when(quota.afterWritingCacheFile(54321L)).thenReturn(false);
// Simulate a stream close
arg.getValue().contentStreamClosed();
// Check behaviour of the listener
verify(bsWriter).putContent(readerFromCacheWriter);
// Post caching quota manager hook is fired.
verify(quota).afterWritingCacheFile(54321L);
// The item should be deleted from the cache (lookup table and content cache file)
verify(cache).deleteFile("url");
verify(cache).remove("url");
}
@Test
public void quotaManagerCanRequestFileDeletionFromCacheAfterWriteWhenNotCacheOnInbound()
{
when(cache.getReader("url")).thenReturn(cachedContent);
when(backingStore.getReader("url")).thenReturn(sourceContent);
when(sourceContent.getSize()).thenReturn(1274L);
when(cache.put("url", sourceContent)).thenReturn(true);
QuotaManagerStrategy quota = mock(QuotaManagerStrategy.class);
cachingStore.setQuota(quota);
// Don't veto writing the cache file.
when(quota.beforeWritingCacheFile(1274L)).thenReturn(true);
// Do request cache file deletion.
when(quota.afterWritingCacheFile(1234L)).thenReturn(false);
ContentReader returnedReader = cachingStore.getReader("url");
// Was the file deleted?
verify(cache).deleteFile("url");
verify(cache).remove("url");
// As the cache file has been deleted, the reader must come from the backing store
// rather than the cache.
assertSame(returnedReader, sourceContent);
}
@Test(expected=RuntimeException.class)
// Check that exceptions raised by the backing store's putContent(ContentReader)
// aren't swallowed and can therefore cause the transaction to fail.
public void exceptionRaisedWhenCopyingTempToBackingStoreIsPropogatedCorrectly()
throws ContentIOException, IOException
{
cachingStore = new CachingContentStore(backingStore, cache, true);
ContentContext ctx = ContentContext.NULL_CONTEXT;
ContentWriter bsWriter = mock(ContentWriter.class);
when(backingStore.getWriter(ctx)).thenReturn(bsWriter);
when(bsWriter.getContentUrl()).thenReturn("url");
ContentWriter cacheWriter = mock(ContentWriter.class);
when(cache.getWriter("url")).thenReturn(cacheWriter);
ContentReader readerFromCacheWriter = mock(ContentReader.class);
when(cacheWriter.getReader()).thenReturn(readerFromCacheWriter);
doThrow(new RuntimeException()).when(bsWriter).putContent(any(ContentReader.class));
cachingStore.getWriter(ctx);
// Get the stream listener and trigger it
ArgumentCaptor<ContentStreamListener> arg = ArgumentCaptor.forClass(ContentStreamListener.class);
verify(cacheWriter).addListener(arg.capture());
// Simulate a stream close
arg.getValue().contentStreamClosed();
}
@Test
public void encodingAttrsCopiedToBackingStoreWriter()
{
cachingStore = new CachingContentStore(backingStore, cache, true);
ContentContext ctx = ContentContext.NULL_CONTEXT;
ContentWriter bsWriter = mock(ContentWriter.class);
when(backingStore.getWriter(ctx)).thenReturn(bsWriter);
when(bsWriter.getContentUrl()).thenReturn("url");
ContentWriter cacheWriter = mock(ContentWriter.class);
when(cache.getWriter("url")).thenReturn(cacheWriter);
ContentReader readerFromCacheWriter = mock(ContentReader.class);
when(cacheWriter.getReader()).thenReturn(readerFromCacheWriter);
when(cacheWriter.getEncoding()).thenReturn("UTF-8");
when(cacheWriter.getLocale()).thenReturn(Locale.UK);
when(cacheWriter.getMimetype()).thenReturn("not/real/mimetype");
cachingStore.getWriter(ctx);
// Get the stream listener and trigger it
ArgumentCaptor<ContentStreamListener> arg = ArgumentCaptor.forClass(ContentStreamListener.class);
verify(cacheWriter).addListener(arg.capture());
// Simulate a stream close
arg.getValue().contentStreamClosed();
verify(bsWriter).setEncoding("UTF-8");
verify(bsWriter).setLocale(Locale.UK);
verify(bsWriter).setMimetype("not/real/mimetype");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Tests for delegated methods follow...
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@Test
public void delegatedIsContentUrlSupported()
{
when(backingStore.isContentUrlSupported("url")).thenReturn(true);
assertTrue(cachingStore.isContentUrlSupported("url"));
when(backingStore.isContentUrlSupported("url")).thenReturn(false);
assertFalse(cachingStore.isContentUrlSupported("url"));
}
@Test
public void delegatedIsWriteSupported()
{
when(backingStore.isWriteSupported()).thenReturn(true);
assertTrue(cachingStore.isWriteSupported());
when(backingStore.isWriteSupported()).thenReturn(false);
assertFalse(cachingStore.isWriteSupported());
}
@Test
public void delegatedGetTotalSize()
{
when(backingStore.getTotalSize()).thenReturn(234L);
assertEquals(234L, cachingStore.getTotalSize());
}
@Test
public void delegatedGetSpaceUsed()
{
when(backingStore.getSpaceUsed()).thenReturn(453L);
assertEquals(453L, cachingStore.getSpaceUsed());
}
@Test
public void delegatedGetSpaceFree()
{
when(backingStore.getSpaceFree()).thenReturn(124L);
assertEquals(124L, cachingStore.getSpaceFree());
}
@Test
public void delegatedGetSpaceTotal()
{
when(backingStore.getSpaceTotal()).thenReturn(4234L);
assertEquals(4234L, cachingStore.getSpaceTotal());
}
@Test
public void delegatedGetRootLocation()
{
when(backingStore.getRootLocation()).thenReturn("/random/root/dir");
assertEquals("/random/root/dir", cachingStore.getRootLocation());
}
@Test
public void delegatedExists()
{
when(backingStore.exists("url")).thenReturn(true);
assertTrue(cachingStore.exists("url"));
when(backingStore.exists("url")).thenReturn(false);
assertFalse(cachingStore.exists("url"));
}
@Test
public void delegatedGetUrls1()
{
ContentUrlHandler handler = createDummyUrlHandler();
cachingStore.getUrls(handler);
verify(backingStore).getUrls(handler);
}
@Test
public void delegatedGetUrls2()
{
ContentUrlHandler handler = createDummyUrlHandler();
Date after = new Date(123L);
Date before = new Date(456L);
cachingStore.getUrls(after, before, handler);
verify(backingStore).getUrls(after, before, handler);
}
@Test
public void delegatedDelete()
{
when(backingStore.delete("url")).thenReturn(true);
assertTrue(cachingStore.delete("url"));
when(backingStore.delete("url")).thenReturn(false);
assertFalse(cachingStore.delete("url"));
}
/**
* Create a stub handler - just so we can check it has been passed around correctly.
*
* @return ContentUrlHandler
*/
private ContentUrlHandler createDummyUrlHandler()
{
ContentUrlHandler handler = new ContentUrlHandler()
{
@Override
public void handle(String contentUrl)
{
}
};
return handler;
}
}

View File

@@ -1,54 +0,0 @@
/*
* Copyright (C) 2005-2011 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.content.caching;
import org.alfresco.repo.content.caching.cleanup.CachedContentCleanupJobTest;
import org.alfresco.repo.content.caching.quota.StandardQuotaStrategyMockTest;
import org.alfresco.repo.content.caching.quota.StandardQuotaStrategyTest;
import org.alfresco.repo.content.caching.quota.UnlimitedQuotaStrategyTest;
import org.alfresco.repo.content.caching.test.ConcurrentCachingStoreTest;
import org.alfresco.repo.content.caching.test.SlowContentStoreTest;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
/**
* Test suite for all the CachingContentStore test classes.
*
* @author Matt Ward
*/
@RunWith(Suite.class)
@Suite.SuiteClasses(
{
CachedContentCleanupJobTest.class,
StandardQuotaStrategyMockTest.class,
StandardQuotaStrategyTest.class,
UnlimitedQuotaStrategyTest.class,
ConcurrentCachingStoreTest.class,
SlowContentStoreTest.class,
// TODO: CachingContentStoreSpringTest doesn't seem to be like being run in a suite,
// will fix later but please run separately for now.
//CachingContentStoreSpringTest.class,
CachingContentStoreTest.class,
ContentCacheImplTest.class,
FullTest.class
})
public class CachingContentStoreTestSuite
{
}

View File

@@ -41,7 +41,7 @@ import org.apache.commons.logging.LogFactory;
* The one and only implementation of the ContentCache class. Binary content data itself
* is stored on disk in the location specified by {@link cacheRoot}.
* <p>
* The in-memory lookup table is provided by Ehcache.
* The in-memory lookup table is provided by a SimpleCache implementation.
*
* @author Matt Ward
*/
@@ -292,7 +292,7 @@ public class ContentCacheImpl implements ContentCache
/**
* Configure ContentCache with a memory store - an EhCacheAdapter.
* Configure ContentCache with a memory store.
*
* @param memoryStore the memoryStore to set
*/

View File

@@ -1,358 +0,0 @@
/*
* Copyright (C) 2005-2011 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.content.caching;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import org.alfresco.repo.cache.SimpleCache;
import org.alfresco.repo.content.caching.ContentCacheImpl.NumericFileNameComparator;
import org.alfresco.repo.content.filestore.FileContentReader;
import org.alfresco.repo.content.filestore.FileContentWriter;
import org.alfresco.service.cmr.repository.ContentReader;
import org.alfresco.service.cmr.repository.ContentWriter;
import org.alfresco.util.GUID;
import org.alfresco.util.TempFileProvider;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.InOrder;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.runners.MockitoJUnitRunner;
import org.mockito.stubbing.Answer;
/**
* Tests for the ContentCacheImpl class.
*
* @author Matt Ward
*/
@RunWith(MockitoJUnitRunner.class)
public class ContentCacheImplTest
{
private ContentCacheImpl contentCache;
private @Mock SimpleCache<Key, String> lookupTable;
@Before
public void setUp() throws Exception
{
contentCache = new ContentCacheImpl();
contentCache.setMemoryStore(lookupTable);
contentCache.setCacheRoot(TempFileProvider.getTempDir());
}
@Test(expected=IllegalArgumentException.class)
public void cannotSetNullCacheRoot()
{
contentCache.setCacheRoot(null);
}
@Test
public void willCreateNonExistentCacheRoot()
{
File cacheRoot = new File(TempFileProvider.getTempDir(), GUID.generate());
cacheRoot.deleteOnExit();
assertFalse("Pre-condition of test is that cacheRoot does not exist", cacheRoot.exists());
contentCache.setCacheRoot(cacheRoot);
assertTrue("cacheRoot should have been created", cacheRoot.exists());
}
@Test
public void canGetReaderForItemInCacheHavingLiveFile()
{
final String url = "store://content/url.bin";
Mockito.when(lookupTable.contains(Key.forUrl(url))).thenReturn(true);
final String path = tempfile().getAbsolutePath();
Mockito.when(lookupTable.get(Key.forUrl(url))).thenReturn(path);
FileContentReader reader = (FileContentReader) contentCache.getReader(url);
assertEquals("Reader should have correct URL", url, reader.getContentUrl());
assertEquals("Reader should be for correct cached content file", path, reader.getFile().getAbsolutePath());
// Important the get(path) was called, so that the timeToIdle is reset
// for the 'reverse lookup' as well as the URL to path mapping.
Mockito.verify(lookupTable).get(Key.forCacheFile(path));
}
@Test(expected=CacheMissException.class)
public void getReaderForItemInCacheButMissingContentFile()
{
final String url = "store://content/url.bin";
Mockito.when(lookupTable.contains(Key.forUrl(url))).thenReturn(true);
final String path = "/no/content/file/at/this/path.bin";
Mockito.when(lookupTable.get(Key.forUrl(url))).thenReturn(path);
try
{
contentCache.getReader(url);
}
finally
{
// Important the get(path) was called, so that the timeToIdle is reset
// for the 'reverse lookup' as well as the URL to path mapping.
Mockito.verify(lookupTable).get(Key.forCacheFile(path));
}
}
@Test(expected=CacheMissException.class)
public void getReaderWhenItemNotInCache()
{
final String url = "store://content/url.bin";
Mockito.when(lookupTable.contains(Key.forUrl(url))).thenReturn(false);
contentCache.getReader(url);
}
@Test
public void contains()
{
final String url = "store://content/url.bin";
Mockito.when(lookupTable.contains(Key.forUrl(url))).thenReturn(true);
assertTrue(contentCache.contains(Key.forUrl(url)));
assertTrue(contentCache.contains(url));
Mockito.when(lookupTable.contains(Key.forUrl(url))).thenReturn(false);
assertFalse(contentCache.contains(Key.forUrl(url)));
assertFalse(contentCache.contains(url));
}
@Test
public void putIntoLookup()
{
final Key key = Key.forUrl("store://some/url");
final String value = "/some/path";
contentCache.putIntoLookup(key, value);
Mockito.verify(lookupTable).put(key, value);
}
@Test
public void getCacheFilePath()
{
final String url = "store://some/url.bin";
final String expectedPath = "/some/cache/file/path";
Mockito.when(lookupTable.get(Key.forUrl(url))).thenReturn(expectedPath);
String path = contentCache.getCacheFilePath(url);
assertEquals("Paths must match", expectedPath, path);
}
@Test
public void getContentUrl()
{
final File cacheFile = new File("/some/path");
final String expectedUrl = "store://some/url";
Mockito.when(lookupTable.get(Key.forCacheFile(cacheFile))).thenReturn(expectedUrl);
String url = contentCache.getContentUrl(cacheFile);
assertEquals("Content URLs should match", expectedUrl, url);
}
@Test
public void putForZeroLengthFile()
{
ContentReader contentReader = Mockito.mock(ContentReader.class);
Mockito.when(contentReader.getSize()).thenReturn(0L);
boolean putResult = contentCache.put("", contentReader);
assertFalse("Zero length files should not be cached", putResult);
}
@Test
public void putForNonEmptyFile()
{
ContentReader contentReader = Mockito.mock(ContentReader.class);
Mockito.when(contentReader.getSize()).thenReturn(999000L);
ArgumentCaptor<File> cacheFileArg = ArgumentCaptor.forClass(File.class);
Mockito.doAnswer(new Answer() {
public Object answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
File f = (File) args[0];
// The file must be present for the rename to work
// in ContentCacheImpl.put()
try
{
f.createNewFile();
}
catch (IOException error)
{
error.printStackTrace();
}
return null;
}})
.when(contentReader).getContent(cacheFileArg.capture());
final String url = "store://some/url.bin";
boolean putResult = contentCache.put(url, contentReader);
assertTrue("Non-empty files should be cached", putResult);
// The rename will have taken effect
String cacheFilePath = cacheFileArg.getValue().getAbsolutePath().replace(".tmp", ".bin");
// Check cached item is recorded properly in ehcache
Mockito.verify(lookupTable).put(Key.forUrl(url), cacheFilePath);
Mockito.verify(lookupTable).put(Key.forCacheFile(cacheFilePath), url);
}
@Test
public void remove()
{
final String url = "store://some/url.bin";
final String path = "/some/path";
Mockito.when(lookupTable.get(Key.forUrl(url))).thenReturn(path);
contentCache.remove(url);
Mockito.verify(lookupTable).remove(Key.forUrl(url));
Mockito.verify(lookupTable).remove(Key.forCacheFile(path));
}
@Test
public void deleteFile()
{
File cacheFile = tempfile();
assertTrue("Temp file should have been written", cacheFile.exists());
Mockito.when(contentCache.getCacheFilePath("url")).thenReturn(cacheFile.getAbsolutePath());
contentCache.deleteFile("url");
assertFalse("File should have been deleted", cacheFile.exists());
}
@Test
public void getWriter()
{
final String url = "store://some/url.bin";
FileContentWriter writer = (FileContentWriter) contentCache.getWriter(url);
writer.putContent("Some test content for " + getClass().getName());
assertEquals(url, writer.getContentUrl());
// Check cached item is recorded properly in ehcache
Mockito.verify(lookupTable).put(Key.forUrl(url), writer.getFile().getAbsolutePath());
Mockito.verify(lookupTable).put(Key.forCacheFile(writer.getFile().getAbsolutePath()), url);
}
@Test
public void compareNumericFileNames()
{
NumericFileNameComparator comparator = new NumericFileNameComparator();
assertEquals(-1, comparator.compare(new File("1"), new File("2")));
assertEquals(0, comparator.compare(new File("2"), new File("2")));
assertEquals(1, comparator.compare(new File("2"), new File("1")));
// Make sure that ordering is numeric and not by string value
assertEquals(-1, comparator.compare(new File("3"), new File("20")));
assertEquals(1, comparator.compare(new File("20"), new File("3")));
assertEquals(-1, comparator.compare(new File("3"), new File("non-numeric")));
assertEquals(1, comparator.compare(new File("non-numeric"), new File("3")));
}
@Test
public void canVisitOldestDirsFirst()
{
File cacheRoot = new File(TempFileProvider.getTempDir(), GUID.generate());
cacheRoot.deleteOnExit();
contentCache.setCacheRoot(cacheRoot);
File f1 = tempfile(createDirs("2000/3/30/17/45/31"), "files-are-unsorted.bin");
File f2 = tempfile(createDirs("2000/3/4/17/45/31"), "another-file.bin");
File f3 = tempfile(createDirs("2010/12/24/23/59/58"), "a-second-before.bin");
File f4 = tempfile(createDirs("2010/12/24/23/59/59"), "last-one.bin");
File f5 = tempfile(createDirs("2000/1/7/2/7/12"), "first-one.bin");
// Check that directories and files are visited in correct order
FileHandler handler = Mockito.mock(FileHandler.class);
contentCache.processFiles(handler);
InOrder inOrder = Mockito.inOrder(handler);
inOrder.verify(handler).handle(f5);
inOrder.verify(handler).handle(f2);
inOrder.verify(handler).handle(f1);
inOrder.verify(handler).handle(f3);
inOrder.verify(handler).handle(f4);
}
private File tempfile()
{
return tempfile("cached-content", ".bin");
}
private File tempfile(String name, String suffix)
{
File file = TempFileProvider.createTempFile(name, suffix);
file.deleteOnExit();
return file;
}
private File tempfile(File dir, String name)
{
File f = new File(dir, name);
try
{
f.createNewFile();
}
catch (IOException error)
{
throw new RuntimeException(error);
}
f.deleteOnExit();
return f;
}
private File createDirs(String path)
{
File f = new File(contentCache.getCacheRoot(), path);
f.mkdirs();
return f;
}
}

View File

@@ -1,116 +0,0 @@
/*
* Copyright (C) 2005-2011 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.content.caching;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import org.alfresco.repo.content.ContentContext;
import org.alfresco.repo.content.filestore.FileContentStore;
import org.alfresco.service.cmr.repository.ContentReader;
import org.alfresco.service.cmr.repository.ContentWriter;
import org.alfresco.util.ApplicationContextHelper;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.springframework.context.ApplicationContext;
/**
* Tests for CachingContentStore where all the main collaborators are defined as Spring beans.
*
* @author Matt Ward
*/
public class FullTest
{
private static ApplicationContext ctx;
private CachingContentStore store;
@BeforeClass
public static void beforeClass()
{
String conf = "classpath:cachingstore/test-context.xml";
ctx = ApplicationContextHelper.getApplicationContext(new String[] { conf });
}
@Before
public void setUp()
{
store = (CachingContentStore) ctx.getBean("cachingContentStore");
store.setCacheOnInbound(true);
}
@Test
public void canUseCachingContentStore()
{
// Write through the caching content store - cache during the process.
ContentWriter writer = store.getWriter(ContentContext.NULL_CONTEXT);
final String content = makeContent();
writer.putContent(content);
ContentReader reader = store.getReader(writer.getContentUrl());
assertEquals("Reader and writer should have same URLs", writer.getContentUrl(), reader.getContentUrl());
assertEquals("Reader should get correct content", content, reader.getContentString());
}
@Test
public void writeToCacheWithContentContext()
{
// Write through the caching content store - cache during the process.
final String proposedUrl = FileContentStore.createNewFileStoreUrl();
ContentWriter writer = store.getWriter(new ContentContext(null, proposedUrl));
final String content = makeContent();
writer.putContent(content);
assertEquals("Writer should have correct URL", proposedUrl, writer.getContentUrl());
ContentReader reader = store.getReader(writer.getContentUrl());
assertEquals("Reader and writer should have same URLs", writer.getContentUrl(), reader.getContentUrl());
assertEquals("Reader should get correct content", content, reader.getContentString());
}
@Test
public void writeToCacheWithExistingReader()
{
ContentWriter oldWriter = store.getWriter(ContentContext.NULL_CONTEXT);
oldWriter.putContent("Old content for " + getClass().getSimpleName());
ContentReader existingReader = oldWriter.getReader();
// Write through the caching content store - cache during the process.
final String proposedUrl = FileContentStore.createNewFileStoreUrl();
ContentWriter writer = store.getWriter(new ContentContext(existingReader, proposedUrl));
final String content = makeContent();
writer.putContent(content);
assertEquals("Writer should have correct URL", proposedUrl, writer.getContentUrl());
assertFalse("Old and new writers must have different URLs",
oldWriter.getContentUrl().equals(writer.getContentUrl()));
ContentReader reader = store.getReader(writer.getContentUrl());
assertEquals("Reader and writer should have same URLs", writer.getContentUrl(), reader.getContentUrl());
assertEquals("Reader should get correct content", content, reader.getContentString());
}
private String makeContent()
{
return "Example content for " + getClass().getSimpleName();
}
}

View File

@@ -1,468 +0,0 @@
/*
* Copyright (C) 2005-2011 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.content.caching.cleanup;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.Calendar;
import java.util.Date;
import java.util.GregorianCalendar;
import org.alfresco.repo.content.caching.CacheFileProps;
import org.alfresco.repo.content.caching.CachingContentStore;
import org.alfresco.repo.content.caching.ContentCacheImpl;
import org.alfresco.repo.content.caching.Key;
import org.alfresco.service.cmr.repository.ContentReader;
import org.alfresco.util.ApplicationContextHelper;
import org.alfresco.util.GUID;
import org.apache.commons.io.FileUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.springframework.context.ApplicationContext;
/**
* Tests for the CachedContentCleanupJob
*
* @author Matt Ward
*/
public class CachedContentCleanupJobTest
{
private enum UrlSource { PROPS_FILE, REVERSE_CACHE_LOOKUP, NOT_PRESENT };
private static ApplicationContext ctx;
private CachingContentStore cachingStore;
private ContentCacheImpl cache;
private File cacheRoot;
private CachedContentCleaner cleaner;
@BeforeClass
public static void beforeClass()
{
String conf = "classpath:cachingstore/test-context.xml";
String cleanerConf = "classpath:cachingstore/test-cleaner-context.xml";
ctx = ApplicationContextHelper.getApplicationContext(new String[] { conf, cleanerConf });
}
@Before
public void setUp() throws IOException
{
cachingStore = (CachingContentStore) ctx.getBean("cachingContentStore");
cache = (ContentCacheImpl) ctx.getBean("contentCache");
cacheRoot = cache.getCacheRoot();
cleaner = (CachedContentCleaner) ctx.getBean("cachedContentCleaner");
cleaner.setMinFileAgeMillis(0);
cleaner.setMaxDeleteWatchCount(0);
// Clear the cache from disk and memory
cache.removeAll();
FileUtils.cleanDirectory(cacheRoot);
}
@Test
public void filesNotInCacheAreDeleted() throws InterruptedException
{
cleaner.setMaxDeleteWatchCount(0);
int numFiles = 300; // Must be a multiple of number of UrlSource types being tested
long totalSize = 0; // what is the total size of the sample files?
File[] files = new File[numFiles];
for (int i = 0; i < numFiles; i++)
{
// Testing with a number of files. The cached file cleaner will be able to determine the 'original'
// content URL for each file by either retrieving from the companion properties file, or performing
// a 'reverse lookup' in the cache (i.e. cache.contains(Key.forCacheFile(...))), or there will be no
// URL determinable for the file.
UrlSource urlSource = UrlSource.values()[i % UrlSource.values().length];
File cacheFile = createCacheFile(urlSource, false);
files[i] = cacheFile;
totalSize += cacheFile.length();
}
// Run cleaner
cleaner.execute();
Thread.sleep(400);
while (cleaner.isRunning())
{
Thread.sleep(200);
}
// check all files deleted
for (File file : files)
{
assertFalse("File should have been deleted: " + file, file.exists());
}
assertEquals("Incorrect number of deleted files", numFiles, cleaner.getNumFilesDeleted());
assertEquals("Incorrect total size of files deleted", totalSize, cleaner.getSizeFilesDeleted());
}
@Test
public void filesNewerThanMinFileAgeMillisAreNotDeleted() throws InterruptedException
{
final long minFileAge = 1000;
cleaner.setMinFileAgeMillis(minFileAge);
cleaner.setMaxDeleteWatchCount(0);
int numFiles = 10;
File[] oldFiles = new File[numFiles];
for (int i = 0; i < numFiles; i++)
{
oldFiles[i] = createCacheFile(UrlSource.REVERSE_CACHE_LOOKUP, false);
}
// Sleep to make sure 'old' files really are older than minFileAgeMillis
Thread.sleep(minFileAge);
File[] newFiles = new File[numFiles];
long newFilesTotalSize = 0;
for (int i = 0; i < numFiles; i++)
{
newFiles[i] = createCacheFile(UrlSource.REVERSE_CACHE_LOOKUP, false);
newFilesTotalSize += newFiles[i].length();
}
// The cleaner must finish before any of the newFiles are older than minFileAge. If the files are too
// old the test will fail and it will be necessary to rethink how to test this.
cleaner.execute();
Thread.sleep(400);
while (cleaner.isRunning())
{
Thread.sleep(200);
}
// check all 'old' files deleted
for (File file : oldFiles)
{
assertFalse("File should have been deleted: " + file, file.exists());
}
// check all 'new' files still present
for (File file : newFiles)
{
assertTrue("File should not have been deleted: " + file, file.exists());
}
assertEquals("Incorrect number of deleted files", newFiles.length, cleaner.getNumFilesDeleted());
assertEquals("Incorrect total size of files deleted", newFilesTotalSize, cleaner.getSizeFilesDeleted());
}
@Test
public void aggressiveCleanReclaimsTargetSpace() throws InterruptedException
{
int numFiles = 30;
File[] files = new File[numFiles];
for (int i = 0; i < numFiles; i++)
{
// Make sure it's in the cache - all the files will be in the cache, so the
// cleaner won't clean any up once it has finished aggressively reclaiming space.
files[i] = createCacheFile(UrlSource.REVERSE_CACHE_LOOKUP, true);
}
// How much space to reclaim - seven files worth (all files are same size)
long fileSize = files[0].length();
long sevenFilesSize = 7 * fileSize;
// We'll get it to clean seven files worth aggressively and then it will continue non-aggressively.
// It will delete the older files aggressively (i.e. the ones prior to the two second sleep) and
// then will examine the new files for potential deletion.
// Since some of the newer files are not in the cache, it will delete those.
cleaner.executeAggressive("aggressiveCleanReclaimsTargetSpace()", sevenFilesSize);
Thread.sleep(400);
while (cleaner.isRunning())
{
Thread.sleep(200);
}
int numDeleted = 0;
for (File f : files)
{
if (!f.exists())
{
numDeleted++;
}
}
// How many were definitely deleted?
assertEquals("Wrong number of files deleted", 7 , numDeleted);
// The cleaner should have recorded the correct number of deletions
assertEquals("Incorrect number of deleted files", 7, cleaner.getNumFilesDeleted());
assertEquals("Incorrect total size of files deleted", sevenFilesSize, cleaner.getSizeFilesDeleted());
}
@Test
public void standardCleanAfterAggressiveFinished() throws InterruptedException
{
// Don't use numFiles > 59! as we're using this for the minute element in the cache file path.
final int numFiles = 30;
File[] files = new File[numFiles];
for (int i = 0; i < numFiles; i++)
{
Calendar calendar = new GregorianCalendar(2010, 11, 2, 17, i);
if (i >= 21 && i <= 24)
{
// 21 to 24 will be deleted after the aggressive deletions (once the cleaner has returned
// to normal cleaning), because they are not in the cache.
files[i] = createCacheFile(calendar, UrlSource.NOT_PRESENT, false);
}
else
{
// All other files will be in the cache
files[i] = createCacheFile(calendar, UrlSource.REVERSE_CACHE_LOOKUP, true);
}
}
// How much space to reclaim - seven files worth (all files are same size)
long fileSize = files[0].length();
long sevenFilesSize = 7 * fileSize;
// We'll get it to clean seven files worth aggressively and then it will continue non-aggressively.
// It will delete the older files aggressively (i.e. even if they are actively in the cache) and
// then will examine the new files for potential deletion.
// Since some of the newer files are not in the cache, it will delete those too.
cleaner.executeAggressive("standardCleanAfterAggressiveFinished()", sevenFilesSize);
Thread.sleep(400);
while (cleaner.isRunning())
{
Thread.sleep(200);
}
for (int i = 0; i < numFiles; i++)
{
if (i < 7)
{
assertFalse("First 7 files should have been aggressively cleaned", files[i].exists());
}
if (i >= 21 && i <= 24)
{
assertFalse("Files with indexes 21-24 should have been deleted", files[i].exists());
}
}
assertEquals("Incorrect number of deleted files", 11, cleaner.getNumFilesDeleted());
assertEquals("Incorrect total size of files deleted", (11*fileSize), cleaner.getSizeFilesDeleted());
}
@Test
public void emptyParentDirectoriesAreDeleted() throws FileNotFoundException
{
cleaner.setMaxDeleteWatchCount(0);
File file = new File(cacheRoot, "243235984/a/b/c/d.bin");
file.getParentFile().mkdirs();
PrintWriter writer = new PrintWriter(file);
writer.println("Content for emptyParentDirectoriesAreDeleted");
writer.close();
assertTrue("Directory should exist", new File(cacheRoot, "243235984/a/b/c").exists());
cleaner.handle(file);
assertFalse("Directory should have been deleted", new File(cacheRoot, "243235984").exists());
}
@Test
public void markedFilesHaveDeletionDeferredUntilCorrectPassOfCleaner()
{
// A non-advisable setting but useful for testing, maxDeleteWatchCount of zero
// which should result in immediate deletion upon discovery of content no longer in the cache.
cleaner.setMaxDeleteWatchCount(0);
File file = createCacheFile(UrlSource.NOT_PRESENT, false);
cleaner.handle(file);
checkFilesDeleted(file);
// Anticipated to be the most common setting: maxDeleteWatchCount of 1.
cleaner.setMaxDeleteWatchCount(1);
file = createCacheFile(UrlSource.NOT_PRESENT, false);
cleaner.handle(file);
checkWatchCountForCacheFile(file, 1);
cleaner.handle(file);
checkFilesDeleted(file);
// Check that some other arbitrary figure for maxDeleteWatchCount works correctly.
cleaner.setMaxDeleteWatchCount(3);
file = createCacheFile(UrlSource.NOT_PRESENT, false);
cleaner.handle(file);
checkWatchCountForCacheFile(file, 1);
cleaner.handle(file);
checkWatchCountForCacheFile(file, 2);
cleaner.handle(file);
checkWatchCountForCacheFile(file, 3);
cleaner.handle(file);
checkFilesDeleted(file);
}
private void checkFilesDeleted(File file)
{
assertFalse("File should have been deleted: " + file, file.exists());
CacheFileProps props = new CacheFileProps(file);
assertFalse("Properties file should have been deleted, cache file: " + file, props.exists());
}
private void checkWatchCountForCacheFile(File file, Integer expectedWatchCount)
{
assertTrue("File should still exist: " + file, file.exists());
CacheFileProps props = new CacheFileProps(file);
props.load();
assertEquals("File should contain correct deleteWatchCount", expectedWatchCount, props.getDeleteWatchCount());
}
@Test
public void filesInCacheAreNotDeleted() throws InterruptedException
{
cleaner.setMaxDeleteWatchCount(0);
// The SlowContentStore will always give out content when asked,
// so asking for any content will cause something to be cached.
String url = makeContentUrl();
int numFiles = 50;
for (int i = 0; i < numFiles; i++)
{
ContentReader reader = cachingStore.getReader(url);
reader.getContentString();
}
cleaner.execute();
Thread.sleep(400);
while (cleaner.isRunning())
{
Thread.sleep(200);
}
for (int i = 0; i < numFiles; i++)
{
File cacheFile = new File(cache.getCacheFilePath(url));
assertTrue("File should exist", cacheFile.exists());
}
}
private File createCacheFile(UrlSource urlSource, boolean putInCache)
{
Calendar calendar = new GregorianCalendar();
return createCacheFile(calendar, urlSource, putInCache);
}
private File createCacheFile(Calendar calendar, /*int year, int month, int day, int hour, int minute,*/
UrlSource urlSource, boolean putInCache)
{
File file = new File(cacheRoot, createNewCacheFilePath(calendar));
file.getParentFile().mkdirs();
writeSampleContent(file);
String contentUrl = makeContentUrl();
if (putInCache)
{
cache.putIntoLookup(Key.forUrl(contentUrl), file.getAbsolutePath());
}
switch(urlSource)
{
case NOT_PRESENT:
// cache won't be able to determine original content URL for the file
break;
case PROPS_FILE:
// file with content URL in properties file
CacheFileProps props = new CacheFileProps(file);
props.setContentUrl(contentUrl);
props.store();
break;
case REVERSE_CACHE_LOOKUP:
// file with content URL in reverse lookup cache - but not 'in the cache' (forward lookup).
cache.putIntoLookup(Key.forCacheFile(file), contentUrl);
}
assertTrue("File should exist", file.exists());
return file;
}
/**
* Mimick functionality of ContentCacheImpl.createNewCacheFilePath()
* but allowing a specific date (rather than 'now') to be used.
*
* @param calendar
* @return Path to use for cache file.
*/
private String createNewCacheFilePath(Calendar calendar)
{
int year = calendar.get(Calendar.YEAR);
int month = calendar.get(Calendar.MONTH) + 1; // 0-based
int day = calendar.get(Calendar.DAY_OF_MONTH);
int hour = calendar.get(Calendar.HOUR_OF_DAY);
int minute = calendar.get(Calendar.MINUTE);
// create the URL
StringBuilder sb = new StringBuilder(20);
sb.append(year).append('/')
.append(month).append('/')
.append(day).append('/')
.append(hour).append('/')
.append(minute).append('/')
.append(GUID.generate()).append(".bin");
return sb.toString();
}
private String makeContentUrl()
{
return "protocol://some/made/up/url/" + GUID.generate();
}
private void writeSampleContent(File file)
{
try
{
PrintWriter writer = new PrintWriter(file);
writer.println("Content for sample file in " + getClass().getName());
writer.close();
}
catch (Throwable e)
{
throw new RuntimeException("Couldn't write file: " + file, e);
}
}
}

View File

@@ -1,209 +0,0 @@
/*
* Copyright (C) 2005-2011 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.content.caching.quota;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.alfresco.repo.content.caching.cleanup.CachedContentCleaner;
import org.apache.commons.lang.reflect.FieldUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.runners.MockitoJUnitRunner;
/**
* Tests for the StandardQuotaStrategy.
* @author Matt Ward
*/
@RunWith(MockitoJUnitRunner.class)
public class StandardQuotaStrategyMockTest
{
private StandardQuotaStrategy quota;
@Mock
private CachedContentCleaner cleaner;
@Before
public void setUp() throws Exception
{
quota = new StandardQuotaStrategy();
// 1000 Bytes max. - unrealistic value but makes the figures easier.
quota.setMaxUsageBytes(1000);
quota.setMaxFileSizeMB(100);
quota.setCleaner(cleaner);
}
@Test
public void testCanSetMaxUsageInMB() throws IllegalAccessException
{
quota.setMaxUsageMB(0);
assertEquals(0, ((Long) FieldUtils.readDeclaredField(quota, "maxUsageBytes", true)).longValue());
quota.setMaxUsageMB(500);
assertEquals(524288000, ((Long) FieldUtils.readDeclaredField(quota, "maxUsageBytes", true)).longValue());
// 1 GB
quota.setMaxUsageMB(1024);
assertEquals(1073741824, ((Long) FieldUtils.readDeclaredField(quota, "maxUsageBytes", true)).longValue());
}
@Test
public void testPanicThresholdForBeforeWritingCacheFile()
{
quota.setCurrentUsageBytes(0);
assertTrue("Should allow writing of cache file", quota.beforeWritingCacheFile(899));
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(900));
quota.setCurrentUsageBytes(890);
assertTrue("Should allow writing of cache file", quota.beforeWritingCacheFile(9));
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(10));
quota.setCurrentUsageBytes(600);
assertTrue("Should allow writing of cache file", quota.beforeWritingCacheFile(299));
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(300));
quota.setCurrentUsageBytes(899);
assertTrue("Should allow writing of cache file", quota.beforeWritingCacheFile(0));
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(1));
// When the usage is already exceeding 100% of what is allowed
quota.setCurrentUsageBytes(2345);
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(0));
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(1));
assertFalse("Should not allow writing of cache file", quota.beforeWritingCacheFile(12300));
}
@Test
public void afterWritingCacheFileDiskUsageUpdatedCorrectly()
{
quota.setCurrentUsageBytes(410);
quota.afterWritingCacheFile(40);
assertEquals("Incorrect usage estimate", 450, quota.getCurrentUsageBytes());
quota.afterWritingCacheFile(150);
assertEquals("Incorrect usage estimate", 600, quota.getCurrentUsageBytes());
}
@Ignore//cleaner.execute() happens async (in a new thread) so the verify fails
@Test
// Is the cleaner started when disk usage is over correct threshold?
public void testThresholdsAfterWritingCacheFile()
{
quota.setCurrentUsageBytes(0);
quota.afterWritingCacheFile(700);
Mockito.verify(cleaner, Mockito.never()).execute("quota (clean threshold)");
quota.setCurrentUsageBytes(700);
quota.afterWritingCacheFile(100);
Mockito.verify(cleaner).execute("quota (clean threshold)");
quota.setCurrentUsageBytes(999);
quota.afterWritingCacheFile(1);
Mockito.verify(cleaner).executeAggressive("quota (limit reached)", 700);
}
@Ignore//cleaner.execute() happens async (in a new thread) so the verify fails
@Test
public void testThresholdsBeforeWritingCacheFile()
{
quota.setCurrentUsageBytes(800);
quota.beforeWritingCacheFile(0);
Mockito.verify(cleaner, Mockito.never()).execute("quota (clean threshold)");
quota.setCurrentUsageBytes(900);
quota.beforeWritingCacheFile(0);
Mockito.verify(cleaner).execute("quota (panic threshold)");
}
@Test
public void canGetMaxFileSizeBytes()
{
quota.setMaxFileSizeMB(1024);
assertEquals("1GB incorrect", 1073741824L, quota.getMaxFileSizeBytes());
quota.setMaxFileSizeMB(0);
assertEquals("0MB incorrect", 0L, quota.getMaxFileSizeBytes());
}
@Test
public void attemptToWriteFileExceedingMaxFileSizeIsVetoed()
{
// Make sure the maxUsageMB doesn't interfere with the tests - set large value.
quota.setMaxUsageMB(4096);
// Zero for no max file size
quota.setMaxFileSizeMB(0);
assertTrue("File should be written", quota.beforeWritingCacheFile(1));
assertTrue("File should be written", quota.beforeWritingCacheFile(20971520));
// Anything > 0 should result in limit being applied
quota.setMaxFileSizeMB(1);
assertTrue("File should be written", quota.beforeWritingCacheFile(1048576));
assertFalse("File should be vetoed - too large", quota.beforeWritingCacheFile(1048577));
// Realistic scenario, 20 MB cutoff.
quota.setMaxFileSizeMB(20);
assertTrue("File should be written", quota.beforeWritingCacheFile(20971520));
assertFalse("File should be vetoed - too large", quota.beforeWritingCacheFile(20971521));
// Unknown (in advance) file size should always result in write
assertTrue("File should be written", quota.beforeWritingCacheFile(0));
}
@Test
public void afterFileWrittenExceedingMaxFileSizeFileIsDeleted()
{
// Zero for no max file size
quota.setMaxFileSizeMB(0);
assertTrue("File should be kept", quota.afterWritingCacheFile(1));
assertTrue("File should be kept", quota.afterWritingCacheFile(20971520));
// Both files were kept
assertEquals("Incorrect usage estimate", 20971521, quota.getCurrentUsageBytes());
// Realistic scenario, 20 MB cutoff.
quota.setMaxFileSizeMB(20);
quota.setCurrentUsageBytes(0);
assertTrue("File should be kept", quota.afterWritingCacheFile(20971520));
assertFalse("File should be removed", quota.afterWritingCacheFile(20971521));
// Only the first file was kept
assertEquals("Incorrect usage estimate", 20971520, quota.getCurrentUsageBytes());
}
@Test
public void testCurrentUsageMB()
{
quota.setCurrentUsageBytes(524288);
assertEquals(0.5f, quota.getCurrentUsageMB(), 0);
quota.setCurrentUsageBytes(1048576);
assertEquals(1.0f, quota.getCurrentUsageMB(), 0);
quota.setCurrentUsageBytes(53262546);
assertEquals(50.795f, quota.getCurrentUsageMB(), 0.001);
}
}

View File

@@ -1,268 +0,0 @@
/*
* Copyright (C) 2005-2011 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.content.caching.quota;
import static org.junit.Assert.assertEquals;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import org.alfresco.repo.content.ContentContext;
import org.alfresco.repo.content.caching.CachingContentStore;
import org.alfresco.repo.content.caching.ContentCacheImpl;
import org.alfresco.repo.content.caching.cleanup.CachedContentCleaner;
import org.alfresco.service.cmr.repository.ContentWriter;
import org.alfresco.util.ApplicationContextHelper;
import org.alfresco.util.GUID;
import org.alfresco.util.TempFileProvider;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.comparator.SizeFileComparator;
import org.apache.commons.io.filefilter.SuffixFileFilter;
import org.apache.commons.io.filefilter.TrueFileFilter;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.springframework.context.ApplicationContext;
/**
* Tests for the StandardQuotaStrategy.
* @author Matt Ward
*/
public class StandardQuotaStrategyTest
{
private static ApplicationContext ctx;
private CachingContentStore store;
private static byte[] aKB;
private ContentCacheImpl cache;
private File cacheRoot;
private StandardQuotaStrategy quota;
private CachedContentCleaner cleaner;
@BeforeClass
public static void beforeClass()
{
ctx = ApplicationContextHelper.getApplicationContext(new String[]
{
"classpath:cachingstore/test-std-quota-context.xml"
});
aKB = new byte[1024];
Arrays.fill(aKB, (byte) 36);
}
@AfterClass
public static void afterClass()
{
ApplicationContextHelper.closeApplicationContext();
}
@Before
public void setUp() throws Exception
{
store = (CachingContentStore) ctx.getBean("cachingContentStore");
store.setCacheOnInbound(true);
cache = (ContentCacheImpl) ctx.getBean("contentCache");
cacheRoot = cache.getCacheRoot();
quota = (StandardQuotaStrategy) ctx.getBean("quotaManager");
quota.setCurrentUsageBytes(0);
cleaner = (CachedContentCleaner) ctx.getBean("cachedContentCleaner");
// Empty the in-memory cache
cache.removeAll();
FileUtils.cleanDirectory(cacheRoot);
}
@Test
public void cleanerWillTriggerAtCorrectThreshold() throws IOException, InterruptedException
{
// Write 15 x 1MB files. This will not trigger any quota related actions.
// Quota is 20MB. The quota manager will...
// * start the cleaner at 16MB (80% of 20MB)
// * refuse to cache any more files at 18MB (90% of 20MB)
for (int i = 0; i < 15; i++)
{
writeSingleFileInMB(1);
}
// All 15 should be retained.
assertEquals(15, findCacheFiles().size());
// Writing one more file should trigger a clean.
writeSingleFileInMB(1);
Thread.sleep(200);
while (cleaner.isRunning())
{
Thread.sleep(50);
}
// As the cache is set to contain a max of 12 items in-memory (see cachingContentStoreCache
// definition in test-std-quota-context.xml) and 2 cache items are required per cached content URL
// then after the cleaner has processed the tree there will 6 items left on disk (12/2).
assertEquals(6, findCacheFiles().size());
}
@Test
public void cachingIsDisabledAtCorrectThreshold() throws IOException
{
// Write 4 x 6MB files.
for (int i = 0; i < 4; i++)
{
writeSingleFileInMB(6);
}
// Only the first 3 are cached - caching is disabled after that as
// the panic threshold has been reached.
assertEquals(3, findCacheFiles().size());
}
@SuppressWarnings("unchecked")
@Test
public void largeContentCacheFilesAreNotKeptOnDisk() throws IOException
{
quota.setMaxFileSizeMB(3);
writeSingleFileInMB(1);
writeSingleFileInMB(2);
writeSingleFileInMB(3);
writeSingleFileInMB(4);
List<File> files = new ArrayList<File>(findCacheFiles());
assertEquals(3, files.size());
Collections.sort(files,SizeFileComparator.SIZE_COMPARATOR);
assertEquals(1, files.get(0).length() / FileUtils.ONE_MB);
assertEquals(2, files.get(1).length() / FileUtils.ONE_MB);
assertEquals(3, files.get(2).length() / FileUtils.ONE_MB);
}
private void writeSingleFileInMB(int sizeInMb) throws IOException
{
ContentWriter writer = store.getWriter(ContentContext.NULL_CONTEXT);
File content = createFileOfSize(sizeInMb * 1024);
writer.putContent(content);
}
private File createFileOfSize(long sizeInKB) throws IOException
{
File file = new File(TempFileProvider.getSystemTempDir(), GUID.generate() + ".generated");
file.deleteOnExit();
BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(file));
for (long i = 0; i < sizeInKB; i++)
{
os.write(aKB);
}
os.close();
return file;
}
@SuppressWarnings("unchecked")
private Collection<File> findCacheFiles()
{
return FileUtils.listFiles(cacheRoot, new SuffixFileFilter(".bin"), TrueFileFilter.INSTANCE);
}
/**
* Not a unit test, but useful to fire up a lot of writers that will push the
* CachingContentStore's StandardQuotaStrategy beyond the panic threshold. The
* behaviour can then be monitored with, for example, a profiler.
*
* @throws Exception
*/
private void concurrencySmokeTest() throws Exception
{
StandardQuotaStrategyTest.beforeClass();
setUp();
// Need to set maxDeleteWatch count to > 0
// (0 is useful in unit tests, but for real usage must not be used)
cleaner.setMaxDeleteWatchCount(1);
final int numThreads = 100;
Thread[] writers = new Thread[numThreads];
for (int i = 0; i < numThreads; i++)
{
final String threadName = "WriterThread[" + i + "]";
Runnable runnable = new Runnable()
{
@Override
public void run()
{
while (true)
{
writeFile();
pause();
}
}
private void writeFile()
{
try
{
writeSingleFileInMB(1);
}
catch (IOException error)
{
throw new RuntimeException(threadName + " couldn't write file.", error);
}
}
private void pause()
{
long pauseTimeMillis = Math.round(Math.random() * 2000);
try
{
Thread.sleep(pauseTimeMillis);
}
catch (InterruptedException error)
{
// Swallow the exception and carry on.
System.out.println(threadName + " InterruptedException.");
}
}
};
Thread writerThread = new Thread(runnable);
writerThread.setName(threadName);
writers[i] = writerThread;
writerThread.start();
}
}
public static void main(String[] args) throws Exception
{
StandardQuotaStrategyTest test = new StandardQuotaStrategyTest();
test.concurrencySmokeTest();
}
}

View File

@@ -1,55 +0,0 @@
/*
* Copyright (C) 2005-2011 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.content.caching.quota;
import static org.junit.Assert.*;
import org.junit.Before;
import org.junit.Test;
/**
* Tests for the UnlimitedQuotaStrategy class.
*
* @author Matt Ward
*/
public class UnlimitedQuotaStrategyTest
{
private UnlimitedQuotaStrategy quota;
@Before
public void setUp()
{
quota = new UnlimitedQuotaStrategy();
}
@Test
public void beforeWritingCacheFile()
{
assertTrue("Should always allow caching", quota.beforeWritingCacheFile(0));
assertTrue("Should always allow caching", quota.beforeWritingCacheFile(Long.MAX_VALUE));
}
@Test
public void afterWritingCacheFile()
{
assertTrue("Should always allow cache file to remain", quota.afterWritingCacheFile(0));
assertTrue("Should always allow cache file to remain", quota.afterWritingCacheFile(Long.MAX_VALUE));
}
}

View File

@@ -1,133 +0,0 @@
/*
* Copyright (C) 2005-2011 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.content.caching.test;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import org.alfresco.repo.content.caching.CachingContentStore;
import org.alfresco.service.cmr.repository.ContentReader;
import org.alfresco.util.ApplicationContextHelper;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.springframework.context.ApplicationContext;
/**
* Tests to ensure that the CachingContentStore works as expected under highly concurrent load.
*
* @author Matt Ward
*/
public class ConcurrentCachingStoreTest
{
private static final Log log = LogFactory.getLog(ConcurrentCachingStoreTest.class);
// NUM_THREADS must be at least 2 x NUM_URLS to ensure each URLs is accessed by more than one thread.
private static final int NUM_THREADS = 200;
private static final int NUM_URLS = 40;
private ApplicationContext ctx;
private CachingContentStore store;
private SlowContentStore backingStore;
@Before
public void setUp()
{
String conf = "classpath:cachingstore/test-context.xml";
String slowconf = "classpath:cachingstore/test-slow-context.xml";
ctx = ApplicationContextHelper.getApplicationContext(new String[] { conf, slowconf });
store = (CachingContentStore) ctx.getBean("cachingContentStore");
store.setCacheOnInbound(false);
backingStore = (SlowContentStore) ctx.getBean("backingStore");
}
@Test
public void concurrentReadsWillReadCacheOncePerURL() throws InterruptedException
{
// Attack with multiple threads
Thread[] threads = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++)
{
CacheReaderThread t = new CacheReaderThread(i, NUM_URLS);
threads[i] = t;
t.start();
}
for (int i = 0; i < threads.length; i++)
threads[i].join();
log.debug("\nResults:");
// Check how many times the backing store was read from
int failedURLs = 0;
for (Map.Entry<String, AtomicLong> entry : backingStore.getUrlHits().entrySet())
{
String url = entry.getKey();
long numHits = entry.getValue().get();
log.debug("URL: " + url + ", hits: " + numHits);
if (numHits > 1) failedURLs++;
}
// If any of the URLs were accessed more than once, then the test will fail.
if (failedURLs > 0)
Assert.fail(failedURLs + " URLs were requested more than once.");
}
private class CacheReaderThread extends Thread
{
private final int threadNum;
private final int numUrls;
private int reads = 50;
CacheReaderThread(int threadNum, int numUrls) {
super(CacheReaderThread.class.getSimpleName() + "-" + threadNum);
this.threadNum = threadNum;
this.numUrls = numUrls;
}
@Override
public void run()
{
while (reads > 0)
{
String url = generateUrlToRead();
ContentReader reader = store.getReader(url);
String content = reader.getContentString();
log.debug("Thread: " + getName() + ", URL: " + url + ", content: " + content);
reads--;
}
}
private String generateUrlToRead()
{
int urlNum = threadNum % numUrls;
return "store://2010/11/5/17/33/" + urlNum + ".bin";
}
}
}

View File

@@ -1,264 +0,0 @@
/*
* Copyright (C) 2005-2011 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.content.caching.test;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.WritableByteChannel;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicLong;
import org.alfresco.repo.content.AbstractContentReader;
import org.alfresco.repo.content.AbstractContentStore;
import org.alfresco.repo.content.AbstractContentWriter;
import org.alfresco.repo.content.filestore.FileContentStore;
import org.alfresco.service.cmr.repository.ContentIOException;
import org.alfresco.service.cmr.repository.ContentReader;
import org.alfresco.service.cmr.repository.ContentWriter;
/**
* Package-private class used only for testing the CachingContentStore.
* <p>
* Simulates a slow content store such as Amazon S3 or XAM. The ContentStore does not provide
* genuine facilities to store or retrieve content.
* <p>
* Asking for content using {@link #getReader(String)} will result in (generated) content
* being retrieved for any URL. A counter records how many times each arbitrary URL has been asked for.
* <p>
* Attempts to write content using any of the getWriter() methods will succeed. Though the content does not actually
* get stored anywhere.
* <p>
* Both reads and writes are slow - the readers and writers returned by this class sleep for {@link pauseMillis} after
* each operation.
*
* @author Matt Ward
*/
class SlowContentStore extends AbstractContentStore
{
private ConcurrentMap<String, AtomicLong> urlHits = new ConcurrentHashMap<String, AtomicLong>();
private int pauseMillis = 50;
@Override
public boolean isWriteSupported()
{
return true;
}
@Override
public ContentReader getReader(String contentUrl)
{
return new SlowReader(contentUrl);
}
@Override
protected ContentWriter getWriterInternal(ContentReader existingContentReader, String newContentUrl)
{
if (newContentUrl == null)
newContentUrl = FileContentStore.createNewFileStoreUrl() + ".slow";
return new SlowWriter(newContentUrl, existingContentReader);
}
@Override
public boolean exists(String contentUrl)
{
return false;
}
private class SlowWriter extends AbstractContentWriter
{
protected SlowWriter(String contentUrl, ContentReader existingContentReader)
{
super(contentUrl, existingContentReader);
}
@Override
public long getSize()
{
return 20;
}
@Override
protected ContentReader createReader() throws ContentIOException
{
return new SlowReader(getContentUrl());
}
@Override
protected WritableByteChannel getDirectWritableChannel() throws ContentIOException
{
return new WritableByteChannel()
{
private boolean closed = false;
private int left = 200;
@Override
public boolean isOpen()
{
return !closed;
}
@Override
public void close() throws IOException
{
closed = true;
}
@Override
public int write(ByteBuffer src) throws IOException
{
try
{
Thread.sleep(pauseMillis);
}
catch (InterruptedException error)
{
throw new RuntimeException(error);
}
if (left > 0)
{
src.get();
left--;
return 1;
}
return 0;
}
};
}
}
private class SlowReader extends AbstractContentReader
{
protected SlowReader(String contentUrl)
{
super(contentUrl);
}
@Override
public boolean exists()
{
return true;
}
@Override
public long getLastModified()
{
return 0L;
}
@Override
public long getSize()
{
return 20;
}
@Override
protected ContentReader createReader() throws ContentIOException
{
return new SlowReader(getContentUrl());
}
@Override
protected ReadableByteChannel getDirectReadableChannel() throws ContentIOException
{
return new ReadableByteChannel()
{
private final byte[] content = "This is the content for my slow ReadableByteChannel".getBytes();
private int index = 0;
private boolean closed = false;
private boolean readCounted = false;
private synchronized void registerReadAttempt()
{
if (!readCounted)
{
// A true attempt to read from this ContentReader - update statistics.
String url = getContentUrl();
urlHits.putIfAbsent(url, new AtomicLong(0));
urlHits.get(url).incrementAndGet();
readCounted = true;
}
}
@Override
public boolean isOpen()
{
return !closed;
}
@Override
public void close() throws IOException
{
closed = true;
}
@Override
public int read(ByteBuffer dst) throws IOException
{
registerReadAttempt();
if (index < content.length)
{
try
{
Thread.sleep(pauseMillis);
}
catch (InterruptedException error)
{
throw new RuntimeException(error);
}
dst.put(content[index++]);
return 1;
}
else
{
return 0;
}
}
};
}
}
/**
* Get statistics for which URLs have been asked for and the frequencies.
*
* @return Map of URL to frequency
*/
public ConcurrentMap<String, AtomicLong> getUrlHits()
{
return this.urlHits;
}
/**
* Length of time in milliseconds that ReadableByteChannel and WriteableByteChannel objects returned
* by SlowContentStore will pause for during read and write operations respectively.
*
* @param pauseMillis
*/
public void setPauseMillis(int pauseMillis)
{
this.pauseMillis = pauseMillis;
}
}

View File

@@ -1,139 +0,0 @@
/*
* Copyright (C) 2005-2011 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.content.caching.test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.alfresco.repo.content.ContentContext;
import org.alfresco.repo.content.caching.CachingContentStore;
import org.alfresco.util.ApplicationContextHelper;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Test;
import org.springframework.context.support.ClassPathXmlApplicationContext;
/**
* Tests that exercise the CachingContentStore in conjunction with a backing store
* that runs deliberately slowly.
*
* @author Matt Ward
*/
public class SlowContentStoreTest
{
private ClassPathXmlApplicationContext ctx;
private CachingContentStore cachingStore;
private static final Log logger = LogFactory.getLog(SlowContentStoreTest.class);
public SlowContentStoreTest()
{
String conf = "classpath:cachingstore/test-context.xml";
String slowconf = "classpath:cachingstore/test-slow-context.xml";
ctx = (ClassPathXmlApplicationContext) ApplicationContextHelper.getApplicationContext(new String[] { conf, slowconf });
cachingStore = (CachingContentStore) ctx.getBean("cachingContentStore");
cachingStore.setCacheOnInbound(false);
}
@Test
public void readsAreFasterFromCache()
{
// First read will hit the SLOW backing store
TimedStoreReader storeReader = new TimedStoreReader();
storeReader.execute();
assertTrue("First read should take a while", storeReader.timeTakenMillis() > 1000);
logger.debug(String.format("First read took %ds", storeReader.timeTakenMillis()));
// The content came from the slow backing store...
assertEquals("This is the content for my slow ReadableByteChannel", storeReader.content);
// Subsequent reads will hit the cache
for (int i = 0; i < 5; i++)
{
storeReader = new TimedStoreReader();
storeReader.execute();
assertTrue("Subsequent reads should be fast", storeReader.timeTakenMillis() < 100);
logger.debug(String.format("Cache read took %ds", storeReader.timeTakenMillis()));
// The content came from the slow backing store, but was cached...
assertEquals("This is the content for my slow ReadableByteChannel", storeReader.content);
}
}
@Test
public void writeThroughCacheResultsInFastReadFirstTime()
{
cachingStore.setCacheOnInbound(true);
// This content will be cached on the way in
cachingStore.getWriter(new ContentContext(null, "any-url")).
putContent("Content written from " + getClass().getSimpleName());
// First read will hit cache
TimedStoreReader storeReader = new TimedStoreReader();
storeReader.execute();
assertTrue("First read should be fast", storeReader.timeTakenMillis() < 100);
logger.debug(String.format("First read took %ds", storeReader.timeTakenMillis()));
assertEquals("Content written from " + getClass().getSimpleName(), storeReader.content);
// Subsequent reads will also hit the cache
for (int i = 0; i < 5; i++)
{
storeReader = new TimedStoreReader();
storeReader.execute();
assertTrue("Subsequent reads should be fast", storeReader.timeTakenMillis() < 100);
logger.debug(String.format("Cache read took %ds", storeReader.timeTakenMillis()));
// The original cached content, still cached...
assertEquals("Content written from " + getClass().getSimpleName(), storeReader.content);
}
}
private class TimedStoreReader extends TimedExecutor
{
String content;
@Override
protected void doExecute()
{
content = cachingStore.getReader("any-url").getContentString();
logger.debug("Read content: " + content);
}
}
private abstract class TimedExecutor
{
private long start;
private long finish;
public void execute()
{
start = System.currentTimeMillis();
doExecute();
finish = System.currentTimeMillis();
}
public long timeTakenMillis()
{
return finish - start;
}
protected abstract void doExecute();
}
}