diff --git a/config/alfresco/avm-services-context.xml b/config/alfresco/avm-services-context.xml
index 3168fd869c..67bb69c61f 100644
--- a/config/alfresco/avm-services-context.xml
+++ b/config/alfresco/avm-services-context.xml
@@ -147,7 +147,7 @@
-
+
diff --git a/config/alfresco/cache-context.xml b/config/alfresco/cache-context.xml
index 1f8f04b81e..85beadddb6 100644
--- a/config/alfresco/cache-context.xml
+++ b/config/alfresco/cache-context.xml
@@ -245,9 +245,13 @@
-
+
+
+
+
+
-
+
@@ -260,9 +264,11 @@
-
+
+
+
-
+
@@ -273,7 +279,44 @@
50
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ org.alfresco.repo.webservices.querySessionSharedCache
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ org.alfresco.repo.webservices.querySessionTransactionalCache
+
+
+ 50
+
+
+
diff --git a/config/alfresco/core-services-context.xml b/config/alfresco/core-services-context.xml
index ed29ca3e85..5ef697b7fc 100644
--- a/config/alfresco/core-services-context.xml
+++ b/config/alfresco/core-services-context.xml
@@ -360,6 +360,15 @@
+
+
+ 10
+
+
+ 5
+
+
+
EXACT_LANGUAGE_AND_ALL
-
+
+
+
@@ -481,7 +492,9 @@
EXACT_LANGUAGE_AND_ALL
-
+
+
+
diff --git a/config/alfresco/ehcache-default.xml b/config/alfresco/ehcache-default.xml
index cd018c0a48..7718efcfd2 100644
--- a/config/alfresco/ehcache-default.xml
+++ b/config/alfresco/ehcache-default.xml
@@ -256,6 +256,19 @@
/>
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/source/java/org/alfresco/filesys/smb/SMBErrorText.java b/source/java/org/alfresco/filesys/smb/SMBErrorText.java
index dcc3e97622..3c8e7f128a 100644
--- a/source/java/org/alfresco/filesys/smb/SMBErrorText.java
+++ b/source/java/org/alfresco/filesys/smb/SMBErrorText.java
@@ -740,6 +740,9 @@ public final class SMBErrorText
case SMBStatus.NTPipeBusy:
errtext = "Pipe is busy";
break;
+ case SMBStatus.NTInvalidLevel:
+ errtext = "Invalid information level";
+ break;
default:
errtext = "Unknown NT status 0x" + Integer.toHexString(errcode);
break;
diff --git a/source/java/org/alfresco/filesys/smb/SMBStatus.java b/source/java/org/alfresco/filesys/smb/SMBStatus.java
index 5ba4c50155..5c3d93f2d4 100644
--- a/source/java/org/alfresco/filesys/smb/SMBStatus.java
+++ b/source/java/org/alfresco/filesys/smb/SMBStatus.java
@@ -252,6 +252,7 @@ public final class SMBStatus
public static final int NTNoSuchDomain = 0xC00000DF;
public static final int NTTooManyOpenFiles = 0xC000011F;
public static final int NTCancelled = 0xC0000120;
+ public static final int NTInvalidLevel = 0xC0000148;
public static final int NTFileOffline = 0xC0000267;
public static final int Win32FileNotFound = 2;
diff --git a/source/java/org/alfresco/filesys/smb/server/NTProtocolHandler.java b/source/java/org/alfresco/filesys/smb/server/NTProtocolHandler.java
index ed2a78f731..ed2c98c236 100644
--- a/source/java/org/alfresco/filesys/smb/server/NTProtocolHandler.java
+++ b/source/java/org/alfresco/filesys/smb/server/NTProtocolHandler.java
@@ -3477,7 +3477,7 @@ public class NTProtocolHandler extends CoreProtocolHandler
// Requested information level is not supported
- m_sess.sendErrorResponseSMB(SMBStatus.SRVNotSupported, SMBStatus.ErrSrv);
+ m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidLevel, SMBStatus.SRVNotSupported, SMBStatus.ErrSrv);
}
}
@@ -3723,7 +3723,7 @@ public class NTProtocolHandler extends CoreProtocolHandler
// Requested information level is not supported
- m_sess.sendErrorResponseSMB(SMBStatus.SRVNotSupported, SMBStatus.ErrSrv);
+ m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidLevel, SMBStatus.SRVNotSupported, SMBStatus.ErrSrv);
}
}
@@ -4197,7 +4197,7 @@ public class NTProtocolHandler extends CoreProtocolHandler
// Requested information level is not supported
- m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidParameter, SMBStatus.SRVNonSpecificError, SMBStatus.ErrSrv);
+ m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidLevel, SMBStatus.SRVNonSpecificError, SMBStatus.ErrSrv);
return;
}
}
@@ -4412,7 +4412,7 @@ public class NTProtocolHandler extends CoreProtocolHandler
// Requested information level is not supported
- m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidParameter, SMBStatus.SRVNonSpecificError, SMBStatus.ErrSrv);
+ m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidLevel, SMBStatus.SRVNonSpecificError, SMBStatus.ErrSrv);
return;
}
}
diff --git a/source/java/org/alfresco/repo/node/BaseNodeServiceTest.java b/source/java/org/alfresco/repo/node/BaseNodeServiceTest.java
index 7be168d986..edf13d0522 100644
--- a/source/java/org/alfresco/repo/node/BaseNodeServiceTest.java
+++ b/source/java/org/alfresco/repo/node/BaseNodeServiceTest.java
@@ -150,6 +150,7 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
protected NodeService nodeService;
/** populated during setup */
protected NodeRef rootNodeRef;
+ private NodeRef cat;
@Override
protected void onSetUpInTransaction() throws Exception
@@ -187,6 +188,13 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
"Test_" + System.currentTimeMillis());
rootNodeRef = nodeService.getRootNode(storeRef);
+ StoreRef catStoreRef = nodeService.createStore(
+ StoreRef.PROTOCOL_WORKSPACE,
+ "Test_cat_" + System.currentTimeMillis());
+ NodeRef catRootNodeRef = nodeService.getRootNode(catStoreRef);
+
+ cat = nodeService.createNode(catRootNodeRef, ContentModel.ASSOC_CHILDREN, QName.createQName("{namespace}cat"), ContentModel.TYPE_CATEGORY).getChildRef();
+
// downgrade integrity checks
IntegrityChecker.setWarnInTransaction();
}
@@ -1142,7 +1150,7 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
properties.put(PROP_QNAME_QNAME_VALUE, TYPE_QNAME_TEST_CONTENT);
properties.put(PROP_QNAME_PATH_VALUE, pathProperty);
properties.put(PROP_QNAME_CONTENT_VALUE, new ContentData("url", "text/plain", 88L, "UTF-8"));
- properties.put(PROP_QNAME_CATEGORY_VALUE, rootNodeRef);
+ properties.put(PROP_QNAME_CATEGORY_VALUE, cat);
properties.put(PROP_QNAME_LOCALE_VALUE, Locale.CHINESE);
properties.put(PROP_QNAME_NULL_VALUE, null);
properties.put(PROP_QNAME_MULTI_VALUE, listProperty);
@@ -1180,7 +1188,7 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
properties.put(PROP_QNAME_QNAME_VALUE, TYPE_QNAME_TEST_CONTENT);
properties.put(PROP_QNAME_PATH_VALUE, pathProperty);
properties.put(PROP_QNAME_CONTENT_VALUE, new ContentData("url", "text/plain", 88L, "UTF-8"));
- properties.put(PROP_QNAME_CATEGORY_VALUE, rootNodeRef);
+ properties.put(PROP_QNAME_CATEGORY_VALUE, cat);
properties.put(PROP_QNAME_LOCALE_VALUE, Locale.CHINESE);
properties.put(PROP_QNAME_NULL_VALUE, null);
properties.put(PROP_QNAME_MULTI_VALUE, listProperty);
diff --git a/source/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerAndSearcherFactory.java b/source/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerAndSearcherFactory.java
index ce69c4f7d9..d6041ec7b1 100644
--- a/source/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerAndSearcherFactory.java
+++ b/source/java/org/alfresco/repo/search/impl/lucene/AbstractLuceneIndexerAndSearcherFactory.java
@@ -29,6 +29,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.ThreadPoolExecutor;
import javax.transaction.RollbackException;
import javax.transaction.SystemException;
@@ -128,6 +129,8 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
private MLAnalysisMode defaultMLSearchAnalysisMode = MLAnalysisMode.EXACT_LANGUAGE_AND_ALL;
+ private ThreadPoolExecutor threadPoolExecutor;
+
/**
* Private constructor for the singleton TODO: FIt in with IOC
*/
@@ -137,11 +140,11 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
super();
}
- /**
- * Set the directory that contains the indexes
- *
- * @param indexRootLocation
- */
+ /**
+ * Set the directory that contains the indexes
+ *
+ * @param indexRootLocation
+ */
public void setIndexRootLocation(String indexRootLocation)
{
@@ -838,6 +841,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Set the lucene write lock timeout
+ *
* @param timeout
*/
public void setWriteLockTimeout(long timeout)
@@ -847,6 +851,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Set the lucene commit lock timeout (no longer used with lucene 2.1)
+ *
* @param timeout
*/
public void setCommitLockTimeout(long timeout)
@@ -856,6 +861,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Get the commit lock timout.
+ *
* @return - the timeout
*/
public long getCommitLockTimeout()
@@ -864,7 +870,8 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
}
/**
- * Get the write lock timeout
+ * Get the write lock timeout
+ *
* @return - the timeout in ms
*/
public long getWriteLockTimeout()
@@ -884,6 +891,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Get the max number of tokens in the field
+ *
* @return - the max tokens considered.
*/
public int getIndexerMaxFieldLength()
@@ -893,6 +901,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Set the max field length.
+ *
* @param indexerMaxFieldLength
*/
public void setIndexerMaxFieldLength(int indexerMaxFieldLength)
@@ -900,6 +909,16 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
this.indexerMaxFieldLength = indexerMaxFieldLength;
}
+ public ThreadPoolExecutor getThreadPoolExecutor()
+ {
+ return this.threadPoolExecutor;
+ }
+
+ public void setThreadPoolExecutor(ThreadPoolExecutor threadPoolExecutor)
+ {
+ this.threadPoolExecutor = threadPoolExecutor;
+ }
+
/**
* This component is able to safely perform backups of the Lucene indexes while the server is running.
*
@@ -922,7 +941,6 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Default constructor
- *
*/
public LuceneIndexBackupComponent()
{
@@ -1082,8 +1100,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
// make sure the rename worked
if (!targetDir.exists())
{
- throw new AlfrescoRuntimeException(
- "Failed to rename temporary directory to target backup directory");
+ throw new AlfrescoRuntimeException("Failed to rename temporary directory to target backup directory");
}
}
}
@@ -1105,8 +1122,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
public void execute(JobExecutionContext context) throws JobExecutionException
{
JobDataMap jobData = context.getJobDetail().getJobDataMap();
- LuceneIndexBackupComponent backupComponent = (LuceneIndexBackupComponent) jobData
- .get(KEY_LUCENE_INDEX_BACKUP_COMPONENT);
+ LuceneIndexBackupComponent backupComponent = (LuceneIndexBackupComponent) jobData.get(KEY_LUCENE_INDEX_BACKUP_COMPONENT);
if (backupComponent == null)
{
throw new JobExecutionException("Missing job data: " + KEY_LUCENE_INDEX_BACKUP_COMPONENT);
@@ -1139,6 +1155,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Set the ML analysis mode at search time
+ *
* @param mode
*/
public void setDefaultMLSearchAnalysisMode(MLAnalysisMode mode)
diff --git a/source/java/org/alfresco/repo/search/impl/lucene/LuceneConfig.java b/source/java/org/alfresco/repo/search/impl/lucene/LuceneConfig.java
index d2a39e2ebe..1630dede63 100644
--- a/source/java/org/alfresco/repo/search/impl/lucene/LuceneConfig.java
+++ b/source/java/org/alfresco/repo/search/impl/lucene/LuceneConfig.java
@@ -24,6 +24,8 @@
*/
package org.alfresco.repo.search.impl.lucene;
+import java.util.concurrent.ThreadPoolExecutor;
+
import org.alfresco.repo.search.MLAnalysisMode;
public interface LuceneConfig
@@ -73,5 +75,12 @@ public interface LuceneConfig
* @return
*/
public int getIndexerMaxFieldLength();
+
+ /**
+ * Get the thread pool for index merging etc
+ *
+ * @return
+ */
+ public ThreadPoolExecutor getThreadPoolExecutor();
}
diff --git a/source/java/org/alfresco/repo/search/impl/lucene/index/IndexInfo.java b/source/java/org/alfresco/repo/search/impl/lucene/index/IndexInfo.java
index e7812fb037..7ce7348788 100644
--- a/source/java/org/alfresco/repo/search/impl/lucene/index/IndexInfo.java
+++ b/source/java/org/alfresco/repo/search/impl/lucene/index/IndexInfo.java
@@ -49,7 +49,12 @@ import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.zip.CRC32;
@@ -59,6 +64,7 @@ import org.alfresco.repo.search.impl.lucene.FilterIndexReaderByStringId;
import org.alfresco.repo.search.impl.lucene.LuceneConfig;
import org.alfresco.repo.search.impl.lucene.analysis.AlfrescoStandardAnalyser;
import org.alfresco.util.GUID;
+import org.alfresco.util.TraceableThreadFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.lucene.analysis.Analyzer;
@@ -114,6 +120,8 @@ import org.apache.lucene.store.RAMDirectory;
*/
public class IndexInfo
{
+ private static Timer timer = new Timer();
+
/**
* The logger.
*/
@@ -214,8 +222,7 @@ public class IndexInfo
/**
* Map of state transitions
*/
- private EnumMap transitions = new EnumMap(
- TransactionStatus.class);
+ private EnumMap transitions = new EnumMap(TransactionStatus.class);
/**
* The queue of files and folders to delete
@@ -236,8 +243,7 @@ public class IndexInfo
/**
* The thread that deletes old index data
*/
- private Thread cleanerThread;
-
+ // private Thread cleanerThread;
/**
* The class the supports index merging and applying deletions from deltas to indexes and deltas that go before it.
*/
@@ -247,8 +253,7 @@ public class IndexInfo
* The thread that carries out index merging and applying deletions from deltas to indexes and deltas that go before
* it.
*/
- private Thread mergerThread;
-
+ // private Thread mergerThread;
/**
* A shared empty index to use if non exist.
*/
@@ -298,13 +303,15 @@ public class IndexInfo
* Control if the cleaner thread is active
*/
- private boolean enableCleanerThread = true;
+ private boolean enableCleaner = true;
/**
* Control if the merger thread is active
*/
- private boolean enableMergerThread = true;
+ private boolean enableMerger = true;
+
+ private ThreadPoolExecutor threadPoolExecutor;
static
{
@@ -358,13 +365,22 @@ public class IndexInfo
{
super();
initialiseTransitions();
-
- if(config != null)
+
+ if (config != null)
{
this.maxFieldLength = config.getIndexerMaxFieldLength();
+ this.threadPoolExecutor = config.getThreadPoolExecutor();
+ }
+ else
+ {
+ // need a default thread pool ....
+ TraceableThreadFactory threadFactory = new TraceableThreadFactory();
+ threadFactory.setThreadDaemon(true);
+ threadFactory.setThreadPriority(5);
+
+ threadPoolExecutor = new ThreadPoolExecutor(10, 10, 90, TimeUnit.SECONDS, new LinkedBlockingQueue(), threadFactory, new ThreadPoolExecutor.CallerRunsPolicy());
}
-
// Create an empty in memory index
IndexWriter writer;
try
@@ -383,7 +399,7 @@ public class IndexInfo
{
throw new IndexerException("Failed to create an empty in memory index!");
}
-
+
this.indexDirectory = indexDirectory;
// Make sure the directory exists
@@ -446,15 +462,13 @@ public class IndexInfo
long docs = writer.docCount();
writer.close();
- IndexEntry entry = new IndexEntry(IndexType.INDEX, OLD_INDEX, "",
- TransactionStatus.COMMITTED, "", docs, 0, false);
+ IndexEntry entry = new IndexEntry(IndexType.INDEX, OLD_INDEX, "", TransactionStatus.COMMITTED, "", docs, 0, false);
indexEntries.put(OLD_INDEX, entry);
writeStatus();
-
+
// The index exists and we should initialise the single reader
- registerReferenceCountingIndexReader(entry.getName(),
- buildReferenceCountingIndexReader(entry.getName()));
+ registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName()));
}
catch (IOException e)
{
@@ -523,8 +537,7 @@ public class IndexInfo
s_logger.info("Resetting merge to committed " + entry);
}
entry.setStatus(TransactionStatus.COMMITTED);
- registerReferenceCountingIndexReader(entry.getName(),
- buildReferenceCountingIndexReader(entry.getName()));
+ registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName()));
break;
// Complete committing (which is post database
// commit)
@@ -535,14 +548,12 @@ public class IndexInfo
s_logger.info("Committing " + entry);
}
entry.setStatus(TransactionStatus.COMMITTED);
- registerReferenceCountingIndexReader(entry.getName(),
- buildReferenceCountingIndexReader(entry.getName()));
+ registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName()));
mainIndexReader = null;
break;
// States that require no action
case COMMITTED:
- registerReferenceCountingIndexReader(entry.getName(),
- buildReferenceCountingIndexReader(entry.getName()));
+ registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName()));
break;
default:
// nothing to do
@@ -555,14 +566,11 @@ public class IndexInfo
indexEntries.remove(id);
}
clearOldReaders();
- synchronized (cleaner)
- {
- cleaner.notify();
- }
- synchronized (merger)
- {
- merger.notify();
- }
+
+ cleaner.schedule();
+
+ merger.schedule();
+
// persist the new state
writeStatus();
}
@@ -576,25 +584,16 @@ public class IndexInfo
releaseWriteLock();
}
}
- // TODO: Add unrecognised folders for deletion.
- if (enableCleanerThread)
+ // Run the cleaner around every 20 secods - this just makes the request to the thread pool
+ timer.schedule(new TimerTask()
{
- cleanerThread = new Thread(cleaner);
- cleanerThread.setDaemon(true);
- cleanerThread.setName("Index cleaner thread " + indexDirectory);
- cleanerThread.start();
- }
-
- if (enableMergerThread)
- {
- mergerThread = new Thread(merger);
- mergerThread.setDaemon(true);
- mergerThread.setName("Index merger thread " + indexDirectory);
- mergerThread.start();
- }
-
-
+ @Override
+ public void run()
+ {
+ cleaner.schedule();
+ }
+ }, 0, 20000);
}
@@ -674,8 +673,7 @@ public class IndexInfo
// Make sure the index exists
if (!indexEntries.containsKey(id))
{
- indexEntries.put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0,
- 0, false));
+ indexEntries.put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0, 0, false));
}
}
finally
@@ -868,8 +866,7 @@ public class IndexInfo
* should deletions on apply to nodes (ie not to containers)
* @throws IOException
*/
- public void setPreparedState(String id, Set toDelete, long documents, boolean deleteNodesOnly)
- throws IOException
+ public void setPreparedState(String id, Set toDelete, long documents, boolean deleteNodesOnly) throws IOException
{
if (id == null)
{
@@ -887,8 +884,7 @@ public class IndexInfo
}
}
// Write deletions
- DataOutputStream os = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(new File(location,
- INDEX_INFO_DELETIONS).getCanonicalFile())));
+ DataOutputStream os = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(new File(location, INDEX_INFO_DELETIONS).getCanonicalFile())));
os.writeInt(toDelete.size());
for (String ref : toDelete)
{
@@ -905,8 +901,7 @@ public class IndexInfo
{
throw new IndexerException("Invalid index delta id " + id);
}
- if ((entry.getStatus() != TransactionStatus.PREPARING)
- && (entry.getStatus() != TransactionStatus.COMMITTING))
+ if ((entry.getStatus() != TransactionStatus.PREPARING) && (entry.getStatus() != TransactionStatus.COMMITTING))
{
throw new IndexerException("Deletes and doc count can only be set on a preparing index");
}
@@ -1000,8 +995,7 @@ public class IndexInfo
* @return
* @throws IOException
*/
- public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader(String id, Set deletions,
- boolean deleteOnlyNodes) throws IOException
+ public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader(String id, Set deletions, boolean deleteOnlyNodes) throws IOException
{
if (id == null)
{
@@ -1073,9 +1067,7 @@ public class IndexInfo
}
else
{
- reader = new MultiReader(new IndexReader[] {
- new FilterIndexReaderByStringId("main+id", mainIndexReader, deletions, deleteOnlyNodes),
- deltaReader });
+ reader = new MultiReader(new IndexReader[] { new FilterIndexReaderByStringId("main+id", mainIndexReader, deletions, deleteOnlyNodes), deltaReader });
}
reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader("MainReader" + id, reader);
ReferenceCounting refCounting = (ReferenceCounting) reader;
@@ -1089,8 +1081,7 @@ public class IndexInfo
}
}
- public void setStatus(final String id, final TransactionStatus state, final Set toDelete, final Set read)
- throws IOException
+ public void setStatus(final String id, final TransactionStatus state, final Set toDelete, final Set read) throws IOException
{
if (id == null)
{
@@ -1229,8 +1220,7 @@ public class IndexInfo
}
else
{
- throw new IndexerException("Invalid transition for "
- + id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARING);
+ throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARING);
}
}
@@ -1303,8 +1293,7 @@ public class IndexInfo
}
else
{
- throw new IndexerException("Invalid transition for "
- + id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARED);
+ throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARED);
}
}
@@ -1335,8 +1324,7 @@ public class IndexInfo
}
else
{
- throw new IndexerException("Invalid transition for "
- + id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTING);
+ throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTING);
}
}
@@ -1391,17 +1379,13 @@ public class IndexInfo
mainIndexReader = null;
}
- synchronized (merger)
- {
- merger.notify();
- }
+ merger.schedule();
}
}
else
{
- throw new IndexerException("Invalid transition for "
- + id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTED);
+ throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTED);
}
}
@@ -1433,8 +1417,7 @@ public class IndexInfo
}
else
{
- throw new IndexerException("Invalid transition for "
- + id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLINGBACK);
+ throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLINGBACK);
}
}
@@ -1466,8 +1449,7 @@ public class IndexInfo
}
else
{
- throw new IndexerException("Invalid transition for "
- + id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLEDBACK);
+ throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLEDBACK);
}
}
@@ -1495,17 +1477,13 @@ public class IndexInfo
if (TransactionStatus.DELETABLE.follows(entry.getStatus()))
{
indexEntries.remove(id);
- synchronized (cleaner)
- {
- cleaner.notify();
- }
+ cleaner.schedule();
writeStatus();
clearOldReaders();
}
else
{
- throw new IndexerException("Invalid transition for "
- + id + " from " + entry.getStatus() + " to " + TransactionStatus.DELETABLE);
+ throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.DELETABLE);
}
}
@@ -1535,13 +1513,11 @@ public class IndexInfo
if (TransactionStatus.ACTIVE.follows(null))
{
- indexEntries
- .put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0, 0, false));
+ indexEntries.put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0, 0, false));
}
else
{
- throw new IndexerException("Invalid transition for "
- + id + " from " + entry.getStatus() + " to " + TransactionStatus.ACTIVE);
+ throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.ACTIVE);
}
}
@@ -1695,9 +1671,8 @@ public class IndexInfo
{
try
{
- reader = new MultiReader(new IndexReader[] {
- new FilterIndexReaderByStringId(id, reader, getDeletions(entry.getName()), entry
- .isDeletOnlyNodes()), subReader });
+ reader = new MultiReader(new IndexReader[] { new FilterIndexReaderByStringId(id, reader, getDeletions(entry.getName()), entry.isDeletOnlyNodes()),
+ subReader });
}
catch (IOException ioe)
{
@@ -1731,9 +1706,9 @@ public class IndexInfo
private void registerReferenceCountingIndexReader(String id, IndexReader reader)
{
ReferenceCounting referenceCounting = (ReferenceCounting) reader;
- if(!referenceCounting.getId().equals(id))
+ if (!referenceCounting.getId().equals(id))
{
- throw new IllegalStateException("Registering "+referenceCounting.getId()+ " as "+id);
+ throw new IllegalStateException("Registering " + referenceCounting.getId() + " as " + id);
}
referenceCountingReadOnlyIndexReaders.put(id, reader);
}
@@ -1879,8 +1854,7 @@ public class IndexInfo
if (!status.isTransient())
{
- newIndexEntries.put(name, new IndexEntry(indexType, name, parentName, status, mergeId,
- documentCount, deletions, isDeletOnlyNodes));
+ newIndexEntries.put(name, new IndexEntry(indexType, name, parentName, status, mergeId, documentCount, deletions, isDeletOnlyNodes));
}
}
long onDiskCRC32 = buffer.getLong();
@@ -2113,7 +2087,7 @@ public class IndexInfo
* Helper to print out index information
*
* @param args
- * @throws Throwable
+ * @throws Throwable
*/
public static void main(String[] args) throws Throwable
{
@@ -2139,44 +2113,42 @@ public class IndexInfo
}
IndexReader reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader();
TermEnum terms = reader.terms(new Term("@{archiweb.model}instance", ""));
- while(terms.next() && terms.term().field().equals("@{archiweb.model}instance"))
+ while (terms.next() && terms.term().field().equals("@{archiweb.model}instance"))
{
- System.out.println("F = " +terms.term().field() + " V = "+terms.term().text() + " F = "+terms.docFreq());
+ System.out.println("F = " + terms.term().field() + " V = " + terms.term().text() + " F = " + terms.docFreq());
}
terms.close();
long start = System.currentTimeMillis();
TermDocs termDocs = reader.termDocs(new Term("@{archiweb.model}instance", "tfl"));
- while(termDocs.next())
+ while (termDocs.next())
{
- //System.out.println("Doc = " + termDocs.doc());
+ // System.out.println("Doc = " + termDocs.doc());
Document doc = reader.document(termDocs.doc());
doc.getField("ID");
- //System.out.println("Ref = "+doc.getField("ID"));
+ // System.out.println("Ref = "+doc.getField("ID"));
}
termDocs.close();
- System.out.println("Time = "+((System.currentTimeMillis() - start)/1000.0f));
-
-
-
+ System.out.println("Time = " + ((System.currentTimeMillis() - start) / 1000.0f));
+
terms = reader.terms(new Term("TYPE", ""));
- while(terms.next() && terms.term().field().equals("TYPE"))
+ while (terms.next() && terms.term().field().equals("TYPE"))
{
- System.out.println("F = " +terms.term().field() + " V = "+terms.term().text() + " F = "+terms.docFreq());
+ System.out.println("F = " + terms.term().field() + " V = " + terms.term().text() + " F = " + terms.docFreq());
}
terms.close();
start = System.currentTimeMillis();
- termDocs = reader.termDocs(new Term("TYPE","{archiweb.model}tfdoc"));
- while(termDocs.next())
+ termDocs = reader.termDocs(new Term("TYPE", "{archiweb.model}tfdoc"));
+ while (termDocs.next())
{
- //System.out.println("Doc = " + termDocs.doc());
+ // System.out.println("Doc = " + termDocs.doc());
Document doc = reader.document(termDocs.doc());
doc.getField("ID");
- //System.out.println("Ref = "+doc.getField("ID"));
+ // System.out.println("Ref = "+doc.getField("ID"));
}
termDocs.close();
- System.out.println("Time = "+((System.currentTimeMillis() - start)/1000.0f));
-
- //+@\{archiweb.model\}instance:TFL*
+ System.out.println("Time = " + ((System.currentTimeMillis() - start) / 1000.0f));
+
+ // +@\{archiweb.model\}instance:TFL*
}
}
@@ -2185,102 +2157,79 @@ public class IndexInfo
*
* @author Andy Hind
*/
- private class Cleaner implements Runnable
+ private class Cleaner extends AbstractSchedulable
{
public void run()
{
- boolean runnable = true;
- while (runnable)
- {
- // Add any closed index readers we were waiting for
- HashSet waiting = new HashSet();
- IndexReader reader;
- while ((reader = deletableReaders.poll()) != null)
- {
- ReferenceCounting refCounting = (ReferenceCounting) reader;
- if (refCounting.getReferenceCount() == 0)
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("Deleting no longer referenced " + refCounting.getId());
- s_logger.debug("... queued delete for " + refCounting.getId());
- s_logger.debug("... "
- + ReferenceCountingReadOnlyIndexReaderFactory.getState(refCounting.getId()));
- }
- getReadLock();
- try
- {
- if (indexEntries.containsKey(refCounting.getId()))
- {
- s_logger.error("ERROR - deleting live reader - " + refCounting.getId());
- }
- }
- finally
- {
- releaseReadLock();
- }
- deleteQueue.add(refCounting.getId());
- }
- else
- {
- waiting.add(reader);
- }
- }
- deletableReaders.addAll(waiting);
- String id = null;
- HashSet fails = new HashSet();
- while ((id = deleteQueue.poll()) != null)
+ // Add any closed index readers we were waiting for
+ HashSet waiting = new HashSet();
+ IndexReader reader;
+ while ((reader = deletableReaders.poll()) != null)
+ {
+ ReferenceCounting refCounting = (ReferenceCounting) reader;
+ if (refCounting.getReferenceCount() == 0)
{
+ if (s_logger.isDebugEnabled())
+ {
+ s_logger.debug("Deleting no longer referenced " + refCounting.getId());
+ s_logger.debug("... queued delete for " + refCounting.getId());
+ s_logger.debug("... " + ReferenceCountingReadOnlyIndexReaderFactory.getState(refCounting.getId()));
+ }
+ getReadLock();
try
{
- if (s_logger.isDebugEnabled())
+ if (indexEntries.containsKey(refCounting.getId()))
{
- s_logger.debug("Expunging " + id + " remaining " + deleteQueue.size());
- s_logger.debug("... " + ReferenceCountingReadOnlyIndexReaderFactory.getState(id));
- }
- // try and delete
- File location = new File(indexDirectory, id).getCanonicalFile();
- if (!deleteDirectory(location))
- {
- if (s_logger.isDebugEnabled())
- {
- s_logger.debug("DELETE FAILED");
- }
- // try again later
- fails.add(id);
+ s_logger.error("ERROR - deleting live reader - " + refCounting.getId());
}
}
- catch (IOException ioe)
+ finally
{
- s_logger.warn("Failed to delete file - invalid canonical file", ioe);
+ releaseReadLock();
+ }
+ deleteQueue.add(refCounting.getId());
+ }
+ else
+ {
+ waiting.add(reader);
+ }
+ }
+ deletableReaders.addAll(waiting);
+
+ String id = null;
+ HashSet fails = new HashSet();
+ while ((id = deleteQueue.poll()) != null)
+ {
+ try
+ {
+ if (s_logger.isDebugEnabled())
+ {
+ s_logger.debug("Expunging " + id + " remaining " + deleteQueue.size());
+ s_logger.debug("... " + ReferenceCountingReadOnlyIndexReaderFactory.getState(id));
+ }
+ // try and delete
+ File location = new File(indexDirectory, id).getCanonicalFile();
+ if (!deleteDirectory(location))
+ {
+ if (s_logger.isDebugEnabled())
+ {
+ s_logger.debug("DELETE FAILED");
+ }
+ // try again later
fails.add(id);
}
}
- deleteQueue.addAll(fails);
- synchronized (this)
+ catch (IOException ioe)
{
- try
- {
- // wait for more deletes
- if (deleteQueue.size() > 0)
- {
- this.wait(20000);
- }
- else
- {
- this.wait();
- }
- }
- catch (InterruptedException e)
- {
- runnable = false;
- s_logger.warn("Cleaner thread for " + indexDirectory + "stopped by interruption.");
- }
+ s_logger.warn("Failed to delete file - invalid canonical file", ioe);
+ fails.add(id);
}
}
+ deleteQueue.addAll(fails);
+ done();
}
private boolean deleteDirectory(File file)
@@ -2329,146 +2278,179 @@ public class IndexInfo
NONE, MERGE_INDEX, APPLY_DELTA_DELETION, MERGE_DELTA
}
- private class Merger implements Runnable
+ private abstract class AbstractSchedulable implements Schedulable, Runnable
{
+
+ boolean scheduled = false;
+
+ public synchronized void schedule()
+ {
+ if (!scheduled)
+ {
+ threadPoolExecutor.execute(this);
+ scheduled = true;
+ }
+ else
+ {
+ // already done
+ }
+ }
+
+ public synchronized void done()
+ {
+ if (scheduled)
+ {
+ scheduled = false;
+ }
+ else
+ {
+ throw new IllegalStateException();
+ }
+ }
+
+ public synchronized void reschedule()
+ {
+ if (scheduled)
+ {
+ threadPoolExecutor.execute(this);
+ }
+ else
+ {
+ throw new IllegalStateException();
+ }
+ }
+ }
+
+ private class Merger extends AbstractSchedulable
+ {
+
public void run()
{
- boolean running = true;
- while (running)
+ try
{
+ // Get the read local to decide what to do
+ // Single JVM to start with
+ MergeAction action = MergeAction.NONE;
+
+ getReadLock();
try
{
- // Get the read local to decide what to do
- // Single JVM to start with
- MergeAction action = MergeAction.NONE;
-
- getReadLock();
- try
+ if (indexIsShared && !checkVersion())
{
- if (indexIsShared && !checkVersion())
+ releaseReadLock();
+ getWriteLock();
+ try
+ {
+ // Sync with disk image if required
+ doWithFileLock(new LockWork