Merged V2.1 to HEAD

6515: Fix for AWC-1362 (system error page when clicking on space that doesn't exist in navigator)
   6516: Fix for AR-1688 - Vista
   6518: Fix for AWC-1479, AWC-1199 and AWC-426 (javascript insertion into forum posts security related fixes) limit to subset of safe tags for posting
   6519: Fix AR-1690 Web Scripts url.args is missing even though it's documented in WIKI
   6520: Fix for AWC-1271 (component generator config ignored for associations)
   6521: Fix AWC-1492 Some included javascript files in template/webscripts use the wrong app context path i.e. /alfresco when the app is called /alfzip
   6522: Build fix
   6523: - Fix rendering of tasks with no description in office portlets
   6524: Added thread pool for index merging (AR-1633, AR-1579)
   6525: One more fix for rendering of tasks with no description in office portlets
   6527: Renamed axis jar to reflect version number.
   6528: WebServices query cache refactoring


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@6741 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2007-09-10 23:44:07 +00:00
parent d0e64d06b4
commit e02f50bd08
13 changed files with 460 additions and 391 deletions

View File

@@ -147,7 +147,7 @@
<ref bean="avmStoreDAO"/>
</property>
<property name="transactionalCache">
<ref bean="lookupTransactionalCache"/>
<ref bean="avmLookupCache"/>
</property>
</bean>

View File

@@ -245,9 +245,13 @@
</property>
</bean>
<!-- Transactional caches setup for LookupCache -->
<!-- ===================================== -->
<!-- AVM Lookup Cache -->
<!-- ===================================== -->
<bean name="lookupSharedCache" class="org.alfresco.repo.cache.EhCacheAdapter">
<!-- The cross-transaction shared cache for AVM lookups -->
<bean name="avmLookupSharedCache" class="org.alfresco.repo.cache.EhCacheAdapter">
<property name="cache">
<bean class="org.springframework.cache.ehcache.EhCacheFactoryBean">
<property name="cacheManager">
@@ -260,9 +264,11 @@
</property>
</bean>
<bean name="lookupTransactionalCache" class="org.alfresco.repo.cache.TransactionalCache">
<!-- Transactional cache for AVM lookups -->
<bean name="avmLookupCache" class="org.alfresco.repo.cache.TransactionalCache">
<property name="sharedCache">
<ref bean="lookupSharedCache"/>
<ref bean="avmLookupSharedCache"/>
</property>
<property name="cacheManager">
<ref bean="transactionalEHCacheManager"/>
@@ -274,6 +280,43 @@
<value>50</value>
</property>
</bean>
<!-- ===================================== -->
<!-- WebServices Query Session Cache -->
<!-- ===================================== -->
<!-- The cross-transaction shared cache for WebService query sessions -->
<bean name="webServicesQuerySessionSharedCache" class="org.alfresco.repo.cache.EhCacheAdapter">
<property name="cache">
<bean class="org.springframework.cache.ehcache.EhCacheFactoryBean">
<property name="cacheManager">
<ref bean="internalEHCacheManager"/>
</property>
<property name="cacheName">
<value>org.alfresco.repo.webservices.querySessionSharedCache</value>
</property>
</bean>
</property>
</bean>
<!-- Transactional cache for WebService query sessions -->
<bean name="webServicesQuerySessionCache" class="org.alfresco.repo.cache.TransactionalCache">
<property name="sharedCache">
<ref bean="webServicesQuerySessionSharedCache"/>
</property>
<property name="cacheManager">
<ref bean="transactionalEHCacheManager"/>
</property>
<property name="name">
<value>org.alfresco.repo.webservices.querySessionTransactionalCache</value>
</property>
<property name="maxCacheSize">
<value>50</value>
</property>
</bean>
<!-- ===================================== -->
<!-- Messages Caches -->
<!-- ===================================== -->

View File

@@ -360,6 +360,15 @@
</property>
</bean>
<bean id="indexThreadPoolExecutor" class="org.alfresco.util.ThreadPoolExecutorFactoryBean" singleton="true">
<property name="corePoolSize">
<value>10</value>
</property>
<property name="threadPriority">
<value>5</value>
</property>
</bean>
<!-- Indexer and searchers for lucene -->
<bean id="admLuceneIndexerAndSearcherFactory"
@@ -415,7 +424,9 @@
<property name="defaultMLSearchAnalysisMode">
<value>EXACT_LANGUAGE_AND_ALL</value>
</property>
<property name="threadPoolExecutor">
<ref bean="indexThreadPoolExecutor"></ref>
</property>
</bean>
<!-- Indexer and searchers for lucene -->
@@ -481,7 +492,9 @@
<property name="defaultMLSearchAnalysisMode">
<value>EXACT_LANGUAGE_AND_ALL</value>
</property>
<property name="threadPoolExecutor">
<ref bean="indexThreadPoolExecutor"></ref>
</property>
</bean>

View File

@@ -256,6 +256,19 @@
/>
<!-- Internally used caches -->
<cache
name="org.alfresco.repo.avm.lookupSharedCache"
maxElementsInMemory="10000"
eternal="true"
overflowToDisk="false"
/>
<cache
name="org.alfresco.repo.webservices.querySessionSharedCache"
maxElementsInMemory="1000"
eternal="false"
timeToLiveSeconds="300"
overflowToDisk="false"
/>
<cache
name="org.alfresco.cache.parentAssocsCache"
maxElementsInMemory="10000"

View File

@@ -45,7 +45,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</defaultCache>
<cache
@@ -62,8 +61,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -79,8 +76,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -98,8 +93,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -117,8 +110,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -136,8 +127,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -155,8 +144,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -174,8 +161,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -193,8 +178,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -212,8 +195,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -231,8 +212,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -250,8 +229,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -269,8 +246,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -288,8 +263,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -307,8 +280,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -324,8 +295,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -341,8 +310,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -358,8 +325,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -375,8 +340,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -392,8 +355,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -409,12 +370,41 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<!-- Non-Hibernate -->
<cache
name="org.alfresco.repo.avm.lookupSharedCache"
maxElementsInMemory="10000"
eternal="true"
overflowToDisk="false"
<cacheEventListenerFactory
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicatePuts = false,
replicateUpdates = true,
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
name="org.alfresco.repo.webservices.querySessionSharedCache"
maxElementsInMemory="1000"
eternal="false"
timeToLiveSeconds="300"
overflowToDisk="false"
<cacheEventListenerFactory
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicatePuts = false,
replicateUpdates = true,
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
name="org.alfresco.cache.parentAssocsCache"
maxElementsInMemory="10000"
@@ -428,8 +418,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -445,8 +433,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -462,8 +448,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -479,8 +463,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -496,8 +478,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
@@ -513,8 +493,6 @@
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>

View File

@@ -740,6 +740,9 @@ public final class SMBErrorText
case SMBStatus.NTPipeBusy:
errtext = "Pipe is busy";
break;
case SMBStatus.NTInvalidLevel:
errtext = "Invalid information level";
break;
default:
errtext = "Unknown NT status 0x" + Integer.toHexString(errcode);
break;

View File

@@ -252,6 +252,7 @@ public final class SMBStatus
public static final int NTNoSuchDomain = 0xC00000DF;
public static final int NTTooManyOpenFiles = 0xC000011F;
public static final int NTCancelled = 0xC0000120;
public static final int NTInvalidLevel = 0xC0000148;
public static final int NTFileOffline = 0xC0000267;
public static final int Win32FileNotFound = 2;

View File

@@ -3477,7 +3477,7 @@ public class NTProtocolHandler extends CoreProtocolHandler
// Requested information level is not supported
m_sess.sendErrorResponseSMB(SMBStatus.SRVNotSupported, SMBStatus.ErrSrv);
m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidLevel, SMBStatus.SRVNotSupported, SMBStatus.ErrSrv);
}
}
@@ -3723,7 +3723,7 @@ public class NTProtocolHandler extends CoreProtocolHandler
// Requested information level is not supported
m_sess.sendErrorResponseSMB(SMBStatus.SRVNotSupported, SMBStatus.ErrSrv);
m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidLevel, SMBStatus.SRVNotSupported, SMBStatus.ErrSrv);
}
}
@@ -4197,7 +4197,7 @@ public class NTProtocolHandler extends CoreProtocolHandler
// Requested information level is not supported
m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidParameter, SMBStatus.SRVNonSpecificError, SMBStatus.ErrSrv);
m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidLevel, SMBStatus.SRVNonSpecificError, SMBStatus.ErrSrv);
return;
}
}
@@ -4412,7 +4412,7 @@ public class NTProtocolHandler extends CoreProtocolHandler
// Requested information level is not supported
m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidParameter, SMBStatus.SRVNonSpecificError, SMBStatus.ErrSrv);
m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidLevel, SMBStatus.SRVNonSpecificError, SMBStatus.ErrSrv);
return;
}
}

View File

@@ -150,6 +150,7 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
protected NodeService nodeService;
/** populated during setup */
protected NodeRef rootNodeRef;
private NodeRef cat;
@Override
protected void onSetUpInTransaction() throws Exception
@@ -187,6 +188,13 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
"Test_" + System.currentTimeMillis());
rootNodeRef = nodeService.getRootNode(storeRef);
StoreRef catStoreRef = nodeService.createStore(
StoreRef.PROTOCOL_WORKSPACE,
"Test_cat_" + System.currentTimeMillis());
NodeRef catRootNodeRef = nodeService.getRootNode(catStoreRef);
cat = nodeService.createNode(catRootNodeRef, ContentModel.ASSOC_CHILDREN, QName.createQName("{namespace}cat"), ContentModel.TYPE_CATEGORY).getChildRef();
// downgrade integrity checks
IntegrityChecker.setWarnInTransaction();
}
@@ -1142,7 +1150,7 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
properties.put(PROP_QNAME_QNAME_VALUE, TYPE_QNAME_TEST_CONTENT);
properties.put(PROP_QNAME_PATH_VALUE, pathProperty);
properties.put(PROP_QNAME_CONTENT_VALUE, new ContentData("url", "text/plain", 88L, "UTF-8"));
properties.put(PROP_QNAME_CATEGORY_VALUE, rootNodeRef);
properties.put(PROP_QNAME_CATEGORY_VALUE, cat);
properties.put(PROP_QNAME_LOCALE_VALUE, Locale.CHINESE);
properties.put(PROP_QNAME_NULL_VALUE, null);
properties.put(PROP_QNAME_MULTI_VALUE, listProperty);
@@ -1180,7 +1188,7 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
properties.put(PROP_QNAME_QNAME_VALUE, TYPE_QNAME_TEST_CONTENT);
properties.put(PROP_QNAME_PATH_VALUE, pathProperty);
properties.put(PROP_QNAME_CONTENT_VALUE, new ContentData("url", "text/plain", 88L, "UTF-8"));
properties.put(PROP_QNAME_CATEGORY_VALUE, rootNodeRef);
properties.put(PROP_QNAME_CATEGORY_VALUE, cat);
properties.put(PROP_QNAME_LOCALE_VALUE, Locale.CHINESE);
properties.put(PROP_QNAME_NULL_VALUE, null);
properties.put(PROP_QNAME_MULTI_VALUE, listProperty);

View File

@@ -29,6 +29,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ThreadPoolExecutor;
import javax.transaction.RollbackException;
import javax.transaction.SystemException;
@@ -128,6 +129,8 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
private MLAnalysisMode defaultMLSearchAnalysisMode = MLAnalysisMode.EXACT_LANGUAGE_AND_ALL;
private ThreadPoolExecutor threadPoolExecutor;
/**
* Private constructor for the singleton TODO: FIt in with IOC
*/
@@ -838,6 +841,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Set the lucene write lock timeout
*
* @param timeout
*/
public void setWriteLockTimeout(long timeout)
@@ -847,6 +851,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Set the lucene commit lock timeout (no longer used with lucene 2.1)
*
* @param timeout
*/
public void setCommitLockTimeout(long timeout)
@@ -856,6 +861,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Get the commit lock timout.
*
* @return - the timeout
*/
public long getCommitLockTimeout()
@@ -865,6 +871,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Get the write lock timeout
*
* @return - the timeout in ms
*/
public long getWriteLockTimeout()
@@ -884,6 +891,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Get the max number of tokens in the field
*
* @return - the max tokens considered.
*/
public int getIndexerMaxFieldLength()
@@ -893,6 +901,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Set the max field length.
*
* @param indexerMaxFieldLength
*/
public void setIndexerMaxFieldLength(int indexerMaxFieldLength)
@@ -900,6 +909,16 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
this.indexerMaxFieldLength = indexerMaxFieldLength;
}
public ThreadPoolExecutor getThreadPoolExecutor()
{
return this.threadPoolExecutor;
}
public void setThreadPoolExecutor(ThreadPoolExecutor threadPoolExecutor)
{
this.threadPoolExecutor = threadPoolExecutor;
}
/**
* This component is able to <i>safely</i> perform backups of the Lucene indexes while the server is running.
* <p>
@@ -922,7 +941,6 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Default constructor
*
*/
public LuceneIndexBackupComponent()
{
@@ -1082,8 +1100,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
// make sure the rename worked
if (!targetDir.exists())
{
throw new AlfrescoRuntimeException(
"Failed to rename temporary directory to target backup directory");
throw new AlfrescoRuntimeException("Failed to rename temporary directory to target backup directory");
}
}
}
@@ -1105,8 +1122,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
public void execute(JobExecutionContext context) throws JobExecutionException
{
JobDataMap jobData = context.getJobDetail().getJobDataMap();
LuceneIndexBackupComponent backupComponent = (LuceneIndexBackupComponent) jobData
.get(KEY_LUCENE_INDEX_BACKUP_COMPONENT);
LuceneIndexBackupComponent backupComponent = (LuceneIndexBackupComponent) jobData.get(KEY_LUCENE_INDEX_BACKUP_COMPONENT);
if (backupComponent == null)
{
throw new JobExecutionException("Missing job data: " + KEY_LUCENE_INDEX_BACKUP_COMPONENT);
@@ -1139,6 +1155,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/**
* Set the ML analysis mode at search time
*
* @param mode
*/
public void setDefaultMLSearchAnalysisMode(MLAnalysisMode mode)

View File

@@ -24,6 +24,8 @@
*/
package org.alfresco.repo.search.impl.lucene;
import java.util.concurrent.ThreadPoolExecutor;
import org.alfresco.repo.search.MLAnalysisMode;
public interface LuceneConfig
@@ -74,4 +76,11 @@ public interface LuceneConfig
*/
public int getIndexerMaxFieldLength();
/**
* Get the thread pool for index merging etc
*
* @return
*/
public ThreadPoolExecutor getThreadPoolExecutor();
}

View File

@@ -49,7 +49,12 @@ import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.zip.CRC32;
@@ -59,6 +64,7 @@ import org.alfresco.repo.search.impl.lucene.FilterIndexReaderByStringId;
import org.alfresco.repo.search.impl.lucene.LuceneConfig;
import org.alfresco.repo.search.impl.lucene.analysis.AlfrescoStandardAnalyser;
import org.alfresco.util.GUID;
import org.alfresco.util.TraceableThreadFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.lucene.analysis.Analyzer;
@@ -114,6 +120,8 @@ import org.apache.lucene.store.RAMDirectory;
*/
public class IndexInfo
{
private static Timer timer = new Timer();
/**
* The logger.
*/
@@ -214,8 +222,7 @@ public class IndexInfo
/**
* Map of state transitions
*/
private EnumMap<TransactionStatus, Transition> transitions = new EnumMap<TransactionStatus, Transition>(
TransactionStatus.class);
private EnumMap<TransactionStatus, Transition> transitions = new EnumMap<TransactionStatus, Transition>(TransactionStatus.class);
/**
* The queue of files and folders to delete
@@ -236,8 +243,7 @@ public class IndexInfo
/**
* The thread that deletes old index data
*/
private Thread cleanerThread;
// private Thread cleanerThread;
/**
* The class the supports index merging and applying deletions from deltas to indexes and deltas that go before it.
*/
@@ -247,8 +253,7 @@ public class IndexInfo
* The thread that carries out index merging and applying deletions from deltas to indexes and deltas that go before
* it.
*/
private Thread mergerThread;
// private Thread mergerThread;
/**
* A shared empty index to use if non exist.
*/
@@ -298,13 +303,15 @@ public class IndexInfo
* Control if the cleaner thread is active
*/
private boolean enableCleanerThread = true;
private boolean enableCleaner = true;
/**
* Control if the merger thread is active
*/
private boolean enableMergerThread = true;
private boolean enableMerger = true;
private ThreadPoolExecutor threadPoolExecutor;
static
{
@@ -362,8 +369,17 @@ public class IndexInfo
if (config != null)
{
this.maxFieldLength = config.getIndexerMaxFieldLength();
this.threadPoolExecutor = config.getThreadPoolExecutor();
}
else
{
// need a default thread pool ....
TraceableThreadFactory threadFactory = new TraceableThreadFactory();
threadFactory.setThreadDaemon(true);
threadFactory.setThreadPriority(5);
threadPoolExecutor = new ThreadPoolExecutor(10, 10, 90, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory, new ThreadPoolExecutor.CallerRunsPolicy());
}
// Create an empty in memory index
IndexWriter writer;
@@ -446,15 +462,13 @@ public class IndexInfo
long docs = writer.docCount();
writer.close();
IndexEntry entry = new IndexEntry(IndexType.INDEX, OLD_INDEX, "",
TransactionStatus.COMMITTED, "", docs, 0, false);
IndexEntry entry = new IndexEntry(IndexType.INDEX, OLD_INDEX, "", TransactionStatus.COMMITTED, "", docs, 0, false);
indexEntries.put(OLD_INDEX, entry);
writeStatus();
// The index exists and we should initialise the single reader
registerReferenceCountingIndexReader(entry.getName(),
buildReferenceCountingIndexReader(entry.getName()));
registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName()));
}
catch (IOException e)
{
@@ -523,8 +537,7 @@ public class IndexInfo
s_logger.info("Resetting merge to committed " + entry);
}
entry.setStatus(TransactionStatus.COMMITTED);
registerReferenceCountingIndexReader(entry.getName(),
buildReferenceCountingIndexReader(entry.getName()));
registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName()));
break;
// Complete committing (which is post database
// commit)
@@ -535,14 +548,12 @@ public class IndexInfo
s_logger.info("Committing " + entry);
}
entry.setStatus(TransactionStatus.COMMITTED);
registerReferenceCountingIndexReader(entry.getName(),
buildReferenceCountingIndexReader(entry.getName()));
registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName()));
mainIndexReader = null;
break;
// States that require no action
case COMMITTED:
registerReferenceCountingIndexReader(entry.getName(),
buildReferenceCountingIndexReader(entry.getName()));
registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName()));
break;
default:
// nothing to do
@@ -555,14 +566,11 @@ public class IndexInfo
indexEntries.remove(id);
}
clearOldReaders();
synchronized (cleaner)
{
cleaner.notify();
}
synchronized (merger)
{
merger.notify();
}
cleaner.schedule();
merger.schedule();
// persist the new state
writeStatus();
}
@@ -576,25 +584,16 @@ public class IndexInfo
releaseWriteLock();
}
}
// TODO: Add unrecognised folders for deletion.
if (enableCleanerThread)
// Run the cleaner around every 20 secods - this just makes the request to the thread pool
timer.schedule(new TimerTask()
{
cleanerThread = new Thread(cleaner);
cleanerThread.setDaemon(true);
cleanerThread.setName("Index cleaner thread " + indexDirectory);
cleanerThread.start();
}
if (enableMergerThread)
@Override
public void run()
{
mergerThread = new Thread(merger);
mergerThread.setDaemon(true);
mergerThread.setName("Index merger thread " + indexDirectory);
mergerThread.start();
cleaner.schedule();
}
}, 0, 20000);
}
@@ -674,8 +673,7 @@ public class IndexInfo
// Make sure the index exists
if (!indexEntries.containsKey(id))
{
indexEntries.put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0,
0, false));
indexEntries.put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0, 0, false));
}
}
finally
@@ -868,8 +866,7 @@ public class IndexInfo
* should deletions on apply to nodes (ie not to containers)
* @throws IOException
*/
public void setPreparedState(String id, Set<String> toDelete, long documents, boolean deleteNodesOnly)
throws IOException
public void setPreparedState(String id, Set<String> toDelete, long documents, boolean deleteNodesOnly) throws IOException
{
if (id == null)
{
@@ -887,8 +884,7 @@ public class IndexInfo
}
}
// Write deletions
DataOutputStream os = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(new File(location,
INDEX_INFO_DELETIONS).getCanonicalFile())));
DataOutputStream os = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(new File(location, INDEX_INFO_DELETIONS).getCanonicalFile())));
os.writeInt(toDelete.size());
for (String ref : toDelete)
{
@@ -905,8 +901,7 @@ public class IndexInfo
{
throw new IndexerException("Invalid index delta id " + id);
}
if ((entry.getStatus() != TransactionStatus.PREPARING)
&& (entry.getStatus() != TransactionStatus.COMMITTING))
if ((entry.getStatus() != TransactionStatus.PREPARING) && (entry.getStatus() != TransactionStatus.COMMITTING))
{
throw new IndexerException("Deletes and doc count can only be set on a preparing index");
}
@@ -1000,8 +995,7 @@ public class IndexInfo
* @return
* @throws IOException
*/
public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader(String id, Set<String> deletions,
boolean deleteOnlyNodes) throws IOException
public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader(String id, Set<String> deletions, boolean deleteOnlyNodes) throws IOException
{
if (id == null)
{
@@ -1073,9 +1067,7 @@ public class IndexInfo
}
else
{
reader = new MultiReader(new IndexReader[] {
new FilterIndexReaderByStringId("main+id", mainIndexReader, deletions, deleteOnlyNodes),
deltaReader });
reader = new MultiReader(new IndexReader[] { new FilterIndexReaderByStringId("main+id", mainIndexReader, deletions, deleteOnlyNodes), deltaReader });
}
reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader("MainReader" + id, reader);
ReferenceCounting refCounting = (ReferenceCounting) reader;
@@ -1089,8 +1081,7 @@ public class IndexInfo
}
}
public void setStatus(final String id, final TransactionStatus state, final Set<Term> toDelete, final Set<Term> read)
throws IOException
public void setStatus(final String id, final TransactionStatus state, final Set<Term> toDelete, final Set<Term> read) throws IOException
{
if (id == null)
{
@@ -1229,8 +1220,7 @@ public class IndexInfo
}
else
{
throw new IndexerException("Invalid transition for "
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARING);
throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARING);
}
}
@@ -1303,8 +1293,7 @@ public class IndexInfo
}
else
{
throw new IndexerException("Invalid transition for "
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARED);
throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARED);
}
}
@@ -1335,8 +1324,7 @@ public class IndexInfo
}
else
{
throw new IndexerException("Invalid transition for "
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTING);
throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTING);
}
}
@@ -1391,17 +1379,13 @@ public class IndexInfo
mainIndexReader = null;
}
synchronized (merger)
{
merger.notify();
}
merger.schedule();
}
}
else
{
throw new IndexerException("Invalid transition for "
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTED);
throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTED);
}
}
@@ -1433,8 +1417,7 @@ public class IndexInfo
}
else
{
throw new IndexerException("Invalid transition for "
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLINGBACK);
throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLINGBACK);
}
}
@@ -1466,8 +1449,7 @@ public class IndexInfo
}
else
{
throw new IndexerException("Invalid transition for "
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLEDBACK);
throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLEDBACK);
}
}
@@ -1495,17 +1477,13 @@ public class IndexInfo
if (TransactionStatus.DELETABLE.follows(entry.getStatus()))
{
indexEntries.remove(id);
synchronized (cleaner)
{
cleaner.notify();
}
cleaner.schedule();
writeStatus();
clearOldReaders();
}
else
{
throw new IndexerException("Invalid transition for "
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.DELETABLE);
throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.DELETABLE);
}
}
@@ -1535,13 +1513,11 @@ public class IndexInfo
if (TransactionStatus.ACTIVE.follows(null))
{
indexEntries
.put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0, 0, false));
indexEntries.put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0, 0, false));
}
else
{
throw new IndexerException("Invalid transition for "
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.ACTIVE);
throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.ACTIVE);
}
}
@@ -1695,9 +1671,8 @@ public class IndexInfo
{
try
{
reader = new MultiReader(new IndexReader[] {
new FilterIndexReaderByStringId(id, reader, getDeletions(entry.getName()), entry
.isDeletOnlyNodes()), subReader });
reader = new MultiReader(new IndexReader[] { new FilterIndexReaderByStringId(id, reader, getDeletions(entry.getName()), entry.isDeletOnlyNodes()),
subReader });
}
catch (IOException ioe)
{
@@ -1879,8 +1854,7 @@ public class IndexInfo
if (!status.isTransient())
{
newIndexEntries.put(name, new IndexEntry(indexType, name, parentName, status, mergeId,
documentCount, deletions, isDeletOnlyNodes));
newIndexEntries.put(name, new IndexEntry(indexType, name, parentName, status, mergeId, documentCount, deletions, isDeletOnlyNodes));
}
}
long onDiskCRC32 = buffer.getLong();
@@ -2156,8 +2130,6 @@ public class IndexInfo
termDocs.close();
System.out.println("Time = " + ((System.currentTimeMillis() - start) / 1000.0f));
terms = reader.terms(new Term("TYPE", ""));
while (terms.next() && terms.term().field().equals("TYPE"))
{
@@ -2185,14 +2157,12 @@ public class IndexInfo
*
* @author Andy Hind
*/
private class Cleaner implements Runnable
private class Cleaner extends AbstractSchedulable
{
public void run()
{
boolean runnable = true;
while (runnable)
{
// Add any closed index readers we were waiting for
HashSet<IndexReader> waiting = new HashSet<IndexReader>();
IndexReader reader;
@@ -2205,8 +2175,7 @@ public class IndexInfo
{
s_logger.debug("Deleting no longer referenced " + refCounting.getId());
s_logger.debug("... queued delete for " + refCounting.getId());
s_logger.debug("... "
+ ReferenceCountingReadOnlyIndexReaderFactory.getState(refCounting.getId()));
s_logger.debug("... " + ReferenceCountingReadOnlyIndexReaderFactory.getState(refCounting.getId()));
}
getReadLock();
try
@@ -2259,28 +2228,8 @@ public class IndexInfo
}
}
deleteQueue.addAll(fails);
synchronized (this)
{
try
{
// wait for more deletes
if (deleteQueue.size() > 0)
{
this.wait(20000);
}
else
{
this.wait();
}
}
catch (InterruptedException e)
{
runnable = false;
s_logger.warn("Cleaner thread for " + indexDirectory + "stopped by interruption.");
}
}
}
done();
}
private boolean deleteDirectory(File file)
@@ -2329,14 +2278,55 @@ public class IndexInfo
NONE, MERGE_INDEX, APPLY_DELTA_DELETION, MERGE_DELTA
}
private class Merger implements Runnable
private abstract class AbstractSchedulable implements Schedulable, Runnable
{
boolean scheduled = false;
public synchronized void schedule()
{
if (!scheduled)
{
threadPoolExecutor.execute(this);
scheduled = true;
}
else
{
// already done
}
}
public synchronized void done()
{
if (scheduled)
{
scheduled = false;
}
else
{
throw new IllegalStateException();
}
}
public synchronized void reschedule()
{
if (scheduled)
{
threadPoolExecutor.execute(this);
}
else
{
throw new IllegalStateException();
}
}
}
private class Merger extends AbstractSchedulable
{
public void run()
{
boolean running = true;
while (running)
{
try
{
// Get the read local to decide what to do
@@ -2448,19 +2438,13 @@ public class IndexInfo
mergeIndexes();
}
synchronized (this)
{
try
{
if (action == MergeAction.NONE)
{
this.wait();
done();
}
}
catch (InterruptedException e)
else
{
// No action - could signal thread termination
}
reschedule();
}
}
catch (Throwable t)
@@ -2469,8 +2453,6 @@ public class IndexInfo
}
}
}
void mergeDeletions()
{
if (s_logger.isDebugEnabled())
@@ -2496,8 +2478,7 @@ public class IndexInfo
{
return set;
}
if ((entry.getType() == IndexType.DELTA)
&& (entry.getStatus() == TransactionStatus.COMMITTED_DELETING))
if ((entry.getType() == IndexType.DELTA) && (entry.getStatus() == TransactionStatus.COMMITTED_DELETING))
{
return set;
}
@@ -2631,9 +2612,7 @@ public class IndexInfo
{
if (s_logger.isDebugEnabled())
{
s_logger.debug("Deleted "
+ deletedCount + " from " + key + " for id " + stringRef
+ " remaining docs " + reader.numDocs());
s_logger.debug("Deleted " + deletedCount + " from " + key + " for id " + stringRef + " remaining docs " + reader.numDocs());
}
invalidIndexes.add(key);
}
@@ -2812,8 +2791,7 @@ public class IndexInfo
{
return set;
}
if ((entry.getType() == IndexType.DELTA)
&& (entry.getStatus() == TransactionStatus.COMMITTED_DELETING))
if ((entry.getType() == IndexType.DELTA) && (entry.getStatus() == TransactionStatus.COMMITTED_DELETING))
{
return set;
}
@@ -2822,8 +2800,7 @@ public class IndexInfo
ArrayList<IndexEntry> mergeList = new ArrayList<IndexEntry>();
for (IndexEntry entry : indexEntries.values())
{
if ((entry.getType() == IndexType.INDEX)
&& (entry.getStatus() == TransactionStatus.COMMITTED))
if ((entry.getType() == IndexType.INDEX) && (entry.getStatus() == TransactionStatus.COMMITTED))
{
mergeList.add(entry);
}
@@ -2849,8 +2826,7 @@ public class IndexInfo
if (set.size() > 0)
{
IndexEntry target = new IndexEntry(IndexType.INDEX, guid, "",
TransactionStatus.MERGE_TARGET, guid, count, 0, false);
IndexEntry target = new IndexEntry(IndexType.INDEX, guid, "", TransactionStatus.MERGE_TARGET, guid, count, 0, false);
set.put(guid, target);
// rebuild merged index elements
LinkedHashMap<String, IndexEntry> reordered = new LinkedHashMap<String, IndexEntry>();
@@ -3056,10 +3032,7 @@ public class IndexInfo
clearOldReaders();
synchronized (cleaner)
{
cleaner.notify();
}
cleaner.schedule();
return null;
}
@@ -3102,6 +3075,7 @@ public class IndexInfo
}
return 0;
}
}
private void dumpInfo()
@@ -3194,22 +3168,22 @@ public class IndexInfo
public boolean isEnableCleanerThread()
{
return enableCleanerThread;
return enableCleaner;
}
public void setEnableCleanerThread(boolean enableCleanerThread)
public void setEnableCleanerThread(boolean enableCleaner)
{
this.enableCleanerThread = enableCleanerThread;
this.enableCleaner = enableCleaner;
}
public boolean isEnableMergerThread()
public boolean isEnableMerger()
{
return enableMergerThread;
return enableMerger;
}
public void setEnableMergerThread(boolean enableMergerThread)
public void setEnableMerger(boolean enableMerger)
{
this.enableMergerThread = enableMergerThread;
this.enableMerger = enableMerger;
}
public boolean isIndexIsShared()
@@ -3322,4 +3296,14 @@ public class IndexInfo
this.writerUseCompoundFile = writerUseCompoundFile;
}
interface Schedulable
{
void schedule();
public void done();
public void reschedule();
}
}

View File

@@ -60,7 +60,7 @@ public class TraceableThreadFactory implements ThreadFactory
private int threadPriority;
TraceableThreadFactory()
public TraceableThreadFactory()
{
this.group = new ThreadGroup("TraceableThreadGroup-" + factoryNumber.getAndIncrement());
TraceableThreadFactory.activeThreadGroups.add(this.group);