Merged V2.1 to HEAD

6515: Fix for AWC-1362 (system error page when clicking on space that doesn't exist in navigator)
   6516: Fix for AR-1688 - Vista
   6518: Fix for AWC-1479, AWC-1199 and AWC-426 (javascript insertion into forum posts security related fixes) limit to subset of safe tags for posting
   6519: Fix AR-1690 Web Scripts url.args is missing even though it's documented in WIKI
   6520: Fix for AWC-1271 (component generator config ignored for associations)
   6521: Fix AWC-1492 Some included javascript files in template/webscripts use the wrong app context path i.e. /alfresco when the app is called /alfzip
   6522: Build fix
   6523: - Fix rendering of tasks with no description in office portlets
   6524: Added thread pool for index merging (AR-1633, AR-1579)
   6525: One more fix for rendering of tasks with no description in office portlets
   6527: Renamed axis jar to reflect version number.
   6528: WebServices query cache refactoring


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@6741 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2007-09-10 23:44:07 +00:00
parent d0e64d06b4
commit e02f50bd08
13 changed files with 460 additions and 391 deletions

View File

@@ -147,7 +147,7 @@
<ref bean="avmStoreDAO"/> <ref bean="avmStoreDAO"/>
</property> </property>
<property name="transactionalCache"> <property name="transactionalCache">
<ref bean="lookupTransactionalCache"/> <ref bean="avmLookupCache"/>
</property> </property>
</bean> </bean>

View File

@@ -245,9 +245,13 @@
</property> </property>
</bean> </bean>
<!-- Transactional caches setup for LookupCache --> <!-- ===================================== -->
<!-- AVM Lookup Cache -->
<!-- ===================================== -->
<bean name="lookupSharedCache" class="org.alfresco.repo.cache.EhCacheAdapter"> <!-- The cross-transaction shared cache for AVM lookups -->
<bean name="avmLookupSharedCache" class="org.alfresco.repo.cache.EhCacheAdapter">
<property name="cache"> <property name="cache">
<bean class="org.springframework.cache.ehcache.EhCacheFactoryBean"> <bean class="org.springframework.cache.ehcache.EhCacheFactoryBean">
<property name="cacheManager"> <property name="cacheManager">
@@ -260,9 +264,11 @@
</property> </property>
</bean> </bean>
<bean name="lookupTransactionalCache" class="org.alfresco.repo.cache.TransactionalCache"> <!-- Transactional cache for AVM lookups -->
<bean name="avmLookupCache" class="org.alfresco.repo.cache.TransactionalCache">
<property name="sharedCache"> <property name="sharedCache">
<ref bean="lookupSharedCache"/> <ref bean="avmLookupSharedCache"/>
</property> </property>
<property name="cacheManager"> <property name="cacheManager">
<ref bean="transactionalEHCacheManager"/> <ref bean="transactionalEHCacheManager"/>
@@ -274,6 +280,43 @@
<value>50</value> <value>50</value>
</property> </property>
</bean> </bean>
<!-- ===================================== -->
<!-- WebServices Query Session Cache -->
<!-- ===================================== -->
<!-- The cross-transaction shared cache for WebService query sessions -->
<bean name="webServicesQuerySessionSharedCache" class="org.alfresco.repo.cache.EhCacheAdapter">
<property name="cache">
<bean class="org.springframework.cache.ehcache.EhCacheFactoryBean">
<property name="cacheManager">
<ref bean="internalEHCacheManager"/>
</property>
<property name="cacheName">
<value>org.alfresco.repo.webservices.querySessionSharedCache</value>
</property>
</bean>
</property>
</bean>
<!-- Transactional cache for WebService query sessions -->
<bean name="webServicesQuerySessionCache" class="org.alfresco.repo.cache.TransactionalCache">
<property name="sharedCache">
<ref bean="webServicesQuerySessionSharedCache"/>
</property>
<property name="cacheManager">
<ref bean="transactionalEHCacheManager"/>
</property>
<property name="name">
<value>org.alfresco.repo.webservices.querySessionTransactionalCache</value>
</property>
<property name="maxCacheSize">
<value>50</value>
</property>
</bean>
<!-- ===================================== --> <!-- ===================================== -->
<!-- Messages Caches --> <!-- Messages Caches -->
<!-- ===================================== --> <!-- ===================================== -->

View File

@@ -360,6 +360,15 @@
</property> </property>
</bean> </bean>
<bean id="indexThreadPoolExecutor" class="org.alfresco.util.ThreadPoolExecutorFactoryBean" singleton="true">
<property name="corePoolSize">
<value>10</value>
</property>
<property name="threadPriority">
<value>5</value>
</property>
</bean>
<!-- Indexer and searchers for lucene --> <!-- Indexer and searchers for lucene -->
<bean id="admLuceneIndexerAndSearcherFactory" <bean id="admLuceneIndexerAndSearcherFactory"
@@ -415,7 +424,9 @@
<property name="defaultMLSearchAnalysisMode"> <property name="defaultMLSearchAnalysisMode">
<value>EXACT_LANGUAGE_AND_ALL</value> <value>EXACT_LANGUAGE_AND_ALL</value>
</property> </property>
<property name="threadPoolExecutor">
<ref bean="indexThreadPoolExecutor"></ref>
</property>
</bean> </bean>
<!-- Indexer and searchers for lucene --> <!-- Indexer and searchers for lucene -->
@@ -481,7 +492,9 @@
<property name="defaultMLSearchAnalysisMode"> <property name="defaultMLSearchAnalysisMode">
<value>EXACT_LANGUAGE_AND_ALL</value> <value>EXACT_LANGUAGE_AND_ALL</value>
</property> </property>
<property name="threadPoolExecutor">
<ref bean="indexThreadPoolExecutor"></ref>
</property>
</bean> </bean>

View File

@@ -256,6 +256,19 @@
/> />
<!-- Internally used caches --> <!-- Internally used caches -->
<cache
name="org.alfresco.repo.avm.lookupSharedCache"
maxElementsInMemory="10000"
eternal="true"
overflowToDisk="false"
/>
<cache
name="org.alfresco.repo.webservices.querySessionSharedCache"
maxElementsInMemory="1000"
eternal="false"
timeToLiveSeconds="300"
overflowToDisk="false"
/>
<cache <cache
name="org.alfresco.cache.parentAssocsCache" name="org.alfresco.cache.parentAssocsCache"
maxElementsInMemory="10000" maxElementsInMemory="10000"

View File

@@ -45,7 +45,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</defaultCache> </defaultCache>
<cache <cache
@@ -62,8 +61,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -79,8 +76,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -98,8 +93,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -117,8 +110,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -136,8 +127,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -155,8 +144,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -174,8 +161,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -193,8 +178,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -212,8 +195,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -231,8 +212,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -250,8 +229,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -269,8 +246,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -288,8 +263,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -307,8 +280,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -324,8 +295,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -341,8 +310,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -358,8 +325,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -375,8 +340,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -392,8 +355,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -409,12 +370,41 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<!-- Non-Hibernate --> <!-- Non-Hibernate -->
<cache
name="org.alfresco.repo.avm.lookupSharedCache"
maxElementsInMemory="10000"
eternal="true"
overflowToDisk="false"
<cacheEventListenerFactory
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicatePuts = false,
replicateUpdates = true,
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache
name="org.alfresco.repo.webservices.querySessionSharedCache"
maxElementsInMemory="1000"
eternal="false"
timeToLiveSeconds="300"
overflowToDisk="false"
<cacheEventListenerFactory
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicatePuts = false,
replicateUpdates = true,
replicateRemovals = true,
replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/>
</cache>
<cache <cache
name="org.alfresco.cache.parentAssocsCache" name="org.alfresco.cache.parentAssocsCache"
maxElementsInMemory="10000" maxElementsInMemory="10000"
@@ -428,8 +418,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -445,8 +433,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -462,8 +448,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -479,8 +463,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -496,8 +478,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>
<cache <cache
@@ -513,8 +493,6 @@
replicateRemovals = true, replicateRemovals = true,
replicateUpdatesViaCopy = false, replicateUpdatesViaCopy = false,
replicateAsynchronously = false"/> replicateAsynchronously = false"/>
</cache> </cache>

View File

@@ -740,6 +740,9 @@ public final class SMBErrorText
case SMBStatus.NTPipeBusy: case SMBStatus.NTPipeBusy:
errtext = "Pipe is busy"; errtext = "Pipe is busy";
break; break;
case SMBStatus.NTInvalidLevel:
errtext = "Invalid information level";
break;
default: default:
errtext = "Unknown NT status 0x" + Integer.toHexString(errcode); errtext = "Unknown NT status 0x" + Integer.toHexString(errcode);
break; break;

View File

@@ -252,6 +252,7 @@ public final class SMBStatus
public static final int NTNoSuchDomain = 0xC00000DF; public static final int NTNoSuchDomain = 0xC00000DF;
public static final int NTTooManyOpenFiles = 0xC000011F; public static final int NTTooManyOpenFiles = 0xC000011F;
public static final int NTCancelled = 0xC0000120; public static final int NTCancelled = 0xC0000120;
public static final int NTInvalidLevel = 0xC0000148;
public static final int NTFileOffline = 0xC0000267; public static final int NTFileOffline = 0xC0000267;
public static final int Win32FileNotFound = 2; public static final int Win32FileNotFound = 2;

View File

@@ -3477,7 +3477,7 @@ public class NTProtocolHandler extends CoreProtocolHandler
// Requested information level is not supported // Requested information level is not supported
m_sess.sendErrorResponseSMB(SMBStatus.SRVNotSupported, SMBStatus.ErrSrv); m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidLevel, SMBStatus.SRVNotSupported, SMBStatus.ErrSrv);
} }
} }
@@ -3723,7 +3723,7 @@ public class NTProtocolHandler extends CoreProtocolHandler
// Requested information level is not supported // Requested information level is not supported
m_sess.sendErrorResponseSMB(SMBStatus.SRVNotSupported, SMBStatus.ErrSrv); m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidLevel, SMBStatus.SRVNotSupported, SMBStatus.ErrSrv);
} }
} }
@@ -4197,7 +4197,7 @@ public class NTProtocolHandler extends CoreProtocolHandler
// Requested information level is not supported // Requested information level is not supported
m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidParameter, SMBStatus.SRVNonSpecificError, SMBStatus.ErrSrv); m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidLevel, SMBStatus.SRVNonSpecificError, SMBStatus.ErrSrv);
return; return;
} }
} }
@@ -4412,7 +4412,7 @@ public class NTProtocolHandler extends CoreProtocolHandler
// Requested information level is not supported // Requested information level is not supported
m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidParameter, SMBStatus.SRVNonSpecificError, SMBStatus.ErrSrv); m_sess.sendErrorResponseSMB(SMBStatus.NTInvalidLevel, SMBStatus.SRVNonSpecificError, SMBStatus.ErrSrv);
return; return;
} }
} }

View File

@@ -150,6 +150,7 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
protected NodeService nodeService; protected NodeService nodeService;
/** populated during setup */ /** populated during setup */
protected NodeRef rootNodeRef; protected NodeRef rootNodeRef;
private NodeRef cat;
@Override @Override
protected void onSetUpInTransaction() throws Exception protected void onSetUpInTransaction() throws Exception
@@ -187,6 +188,13 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
"Test_" + System.currentTimeMillis()); "Test_" + System.currentTimeMillis());
rootNodeRef = nodeService.getRootNode(storeRef); rootNodeRef = nodeService.getRootNode(storeRef);
StoreRef catStoreRef = nodeService.createStore(
StoreRef.PROTOCOL_WORKSPACE,
"Test_cat_" + System.currentTimeMillis());
NodeRef catRootNodeRef = nodeService.getRootNode(catStoreRef);
cat = nodeService.createNode(catRootNodeRef, ContentModel.ASSOC_CHILDREN, QName.createQName("{namespace}cat"), ContentModel.TYPE_CATEGORY).getChildRef();
// downgrade integrity checks // downgrade integrity checks
IntegrityChecker.setWarnInTransaction(); IntegrityChecker.setWarnInTransaction();
} }
@@ -1142,7 +1150,7 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
properties.put(PROP_QNAME_QNAME_VALUE, TYPE_QNAME_TEST_CONTENT); properties.put(PROP_QNAME_QNAME_VALUE, TYPE_QNAME_TEST_CONTENT);
properties.put(PROP_QNAME_PATH_VALUE, pathProperty); properties.put(PROP_QNAME_PATH_VALUE, pathProperty);
properties.put(PROP_QNAME_CONTENT_VALUE, new ContentData("url", "text/plain", 88L, "UTF-8")); properties.put(PROP_QNAME_CONTENT_VALUE, new ContentData("url", "text/plain", 88L, "UTF-8"));
properties.put(PROP_QNAME_CATEGORY_VALUE, rootNodeRef); properties.put(PROP_QNAME_CATEGORY_VALUE, cat);
properties.put(PROP_QNAME_LOCALE_VALUE, Locale.CHINESE); properties.put(PROP_QNAME_LOCALE_VALUE, Locale.CHINESE);
properties.put(PROP_QNAME_NULL_VALUE, null); properties.put(PROP_QNAME_NULL_VALUE, null);
properties.put(PROP_QNAME_MULTI_VALUE, listProperty); properties.put(PROP_QNAME_MULTI_VALUE, listProperty);
@@ -1180,7 +1188,7 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
properties.put(PROP_QNAME_QNAME_VALUE, TYPE_QNAME_TEST_CONTENT); properties.put(PROP_QNAME_QNAME_VALUE, TYPE_QNAME_TEST_CONTENT);
properties.put(PROP_QNAME_PATH_VALUE, pathProperty); properties.put(PROP_QNAME_PATH_VALUE, pathProperty);
properties.put(PROP_QNAME_CONTENT_VALUE, new ContentData("url", "text/plain", 88L, "UTF-8")); properties.put(PROP_QNAME_CONTENT_VALUE, new ContentData("url", "text/plain", 88L, "UTF-8"));
properties.put(PROP_QNAME_CATEGORY_VALUE, rootNodeRef); properties.put(PROP_QNAME_CATEGORY_VALUE, cat);
properties.put(PROP_QNAME_LOCALE_VALUE, Locale.CHINESE); properties.put(PROP_QNAME_LOCALE_VALUE, Locale.CHINESE);
properties.put(PROP_QNAME_NULL_VALUE, null); properties.put(PROP_QNAME_NULL_VALUE, null);
properties.put(PROP_QNAME_MULTI_VALUE, listProperty); properties.put(PROP_QNAME_MULTI_VALUE, listProperty);

View File

@@ -29,6 +29,7 @@ import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ThreadPoolExecutor;
import javax.transaction.RollbackException; import javax.transaction.RollbackException;
import javax.transaction.SystemException; import javax.transaction.SystemException;
@@ -128,6 +129,8 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
private MLAnalysisMode defaultMLSearchAnalysisMode = MLAnalysisMode.EXACT_LANGUAGE_AND_ALL; private MLAnalysisMode defaultMLSearchAnalysisMode = MLAnalysisMode.EXACT_LANGUAGE_AND_ALL;
private ThreadPoolExecutor threadPoolExecutor;
/** /**
* Private constructor for the singleton TODO: FIt in with IOC * Private constructor for the singleton TODO: FIt in with IOC
*/ */
@@ -838,6 +841,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/** /**
* Set the lucene write lock timeout * Set the lucene write lock timeout
*
* @param timeout * @param timeout
*/ */
public void setWriteLockTimeout(long timeout) public void setWriteLockTimeout(long timeout)
@@ -847,6 +851,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/** /**
* Set the lucene commit lock timeout (no longer used with lucene 2.1) * Set the lucene commit lock timeout (no longer used with lucene 2.1)
*
* @param timeout * @param timeout
*/ */
public void setCommitLockTimeout(long timeout) public void setCommitLockTimeout(long timeout)
@@ -856,6 +861,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/** /**
* Get the commit lock timout. * Get the commit lock timout.
*
* @return - the timeout * @return - the timeout
*/ */
public long getCommitLockTimeout() public long getCommitLockTimeout()
@@ -865,6 +871,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/** /**
* Get the write lock timeout * Get the write lock timeout
*
* @return - the timeout in ms * @return - the timeout in ms
*/ */
public long getWriteLockTimeout() public long getWriteLockTimeout()
@@ -884,6 +891,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/** /**
* Get the max number of tokens in the field * Get the max number of tokens in the field
*
* @return - the max tokens considered. * @return - the max tokens considered.
*/ */
public int getIndexerMaxFieldLength() public int getIndexerMaxFieldLength()
@@ -893,6 +901,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/** /**
* Set the max field length. * Set the max field length.
*
* @param indexerMaxFieldLength * @param indexerMaxFieldLength
*/ */
public void setIndexerMaxFieldLength(int indexerMaxFieldLength) public void setIndexerMaxFieldLength(int indexerMaxFieldLength)
@@ -900,6 +909,16 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
this.indexerMaxFieldLength = indexerMaxFieldLength; this.indexerMaxFieldLength = indexerMaxFieldLength;
} }
public ThreadPoolExecutor getThreadPoolExecutor()
{
return this.threadPoolExecutor;
}
public void setThreadPoolExecutor(ThreadPoolExecutor threadPoolExecutor)
{
this.threadPoolExecutor = threadPoolExecutor;
}
/** /**
* This component is able to <i>safely</i> perform backups of the Lucene indexes while the server is running. * This component is able to <i>safely</i> perform backups of the Lucene indexes while the server is running.
* <p> * <p>
@@ -922,7 +941,6 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/** /**
* Default constructor * Default constructor
*
*/ */
public LuceneIndexBackupComponent() public LuceneIndexBackupComponent()
{ {
@@ -1082,8 +1100,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
// make sure the rename worked // make sure the rename worked
if (!targetDir.exists()) if (!targetDir.exists())
{ {
throw new AlfrescoRuntimeException( throw new AlfrescoRuntimeException("Failed to rename temporary directory to target backup directory");
"Failed to rename temporary directory to target backup directory");
} }
} }
} }
@@ -1105,8 +1122,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
public void execute(JobExecutionContext context) throws JobExecutionException public void execute(JobExecutionContext context) throws JobExecutionException
{ {
JobDataMap jobData = context.getJobDetail().getJobDataMap(); JobDataMap jobData = context.getJobDetail().getJobDataMap();
LuceneIndexBackupComponent backupComponent = (LuceneIndexBackupComponent) jobData LuceneIndexBackupComponent backupComponent = (LuceneIndexBackupComponent) jobData.get(KEY_LUCENE_INDEX_BACKUP_COMPONENT);
.get(KEY_LUCENE_INDEX_BACKUP_COMPONENT);
if (backupComponent == null) if (backupComponent == null)
{ {
throw new JobExecutionException("Missing job data: " + KEY_LUCENE_INDEX_BACKUP_COMPONENT); throw new JobExecutionException("Missing job data: " + KEY_LUCENE_INDEX_BACKUP_COMPONENT);
@@ -1139,6 +1155,7 @@ public abstract class AbstractLuceneIndexerAndSearcherFactory implements LuceneI
/** /**
* Set the ML analysis mode at search time * Set the ML analysis mode at search time
*
* @param mode * @param mode
*/ */
public void setDefaultMLSearchAnalysisMode(MLAnalysisMode mode) public void setDefaultMLSearchAnalysisMode(MLAnalysisMode mode)

View File

@@ -24,6 +24,8 @@
*/ */
package org.alfresco.repo.search.impl.lucene; package org.alfresco.repo.search.impl.lucene;
import java.util.concurrent.ThreadPoolExecutor;
import org.alfresco.repo.search.MLAnalysisMode; import org.alfresco.repo.search.MLAnalysisMode;
public interface LuceneConfig public interface LuceneConfig
@@ -74,4 +76,11 @@ public interface LuceneConfig
*/ */
public int getIndexerMaxFieldLength(); public int getIndexerMaxFieldLength();
/**
* Get the thread pool for index merging etc
*
* @return
*/
public ThreadPoolExecutor getThreadPoolExecutor();
} }

View File

@@ -49,7 +49,12 @@ import java.util.LinkedHashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.zip.CRC32; import java.util.zip.CRC32;
@@ -59,6 +64,7 @@ import org.alfresco.repo.search.impl.lucene.FilterIndexReaderByStringId;
import org.alfresco.repo.search.impl.lucene.LuceneConfig; import org.alfresco.repo.search.impl.lucene.LuceneConfig;
import org.alfresco.repo.search.impl.lucene.analysis.AlfrescoStandardAnalyser; import org.alfresco.repo.search.impl.lucene.analysis.AlfrescoStandardAnalyser;
import org.alfresco.util.GUID; import org.alfresco.util.GUID;
import org.alfresco.util.TraceableThreadFactory;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
@@ -114,6 +120,8 @@ import org.apache.lucene.store.RAMDirectory;
*/ */
public class IndexInfo public class IndexInfo
{ {
private static Timer timer = new Timer();
/** /**
* The logger. * The logger.
*/ */
@@ -214,8 +222,7 @@ public class IndexInfo
/** /**
* Map of state transitions * Map of state transitions
*/ */
private EnumMap<TransactionStatus, Transition> transitions = new EnumMap<TransactionStatus, Transition>( private EnumMap<TransactionStatus, Transition> transitions = new EnumMap<TransactionStatus, Transition>(TransactionStatus.class);
TransactionStatus.class);
/** /**
* The queue of files and folders to delete * The queue of files and folders to delete
@@ -236,8 +243,7 @@ public class IndexInfo
/** /**
* The thread that deletes old index data * The thread that deletes old index data
*/ */
private Thread cleanerThread; // private Thread cleanerThread;
/** /**
* The class the supports index merging and applying deletions from deltas to indexes and deltas that go before it. * The class the supports index merging and applying deletions from deltas to indexes and deltas that go before it.
*/ */
@@ -247,8 +253,7 @@ public class IndexInfo
* The thread that carries out index merging and applying deletions from deltas to indexes and deltas that go before * The thread that carries out index merging and applying deletions from deltas to indexes and deltas that go before
* it. * it.
*/ */
private Thread mergerThread; // private Thread mergerThread;
/** /**
* A shared empty index to use if non exist. * A shared empty index to use if non exist.
*/ */
@@ -298,13 +303,15 @@ public class IndexInfo
* Control if the cleaner thread is active * Control if the cleaner thread is active
*/ */
private boolean enableCleanerThread = true; private boolean enableCleaner = true;
/** /**
* Control if the merger thread is active * Control if the merger thread is active
*/ */
private boolean enableMergerThread = true; private boolean enableMerger = true;
private ThreadPoolExecutor threadPoolExecutor;
static static
{ {
@@ -359,11 +366,20 @@ public class IndexInfo
super(); super();
initialiseTransitions(); initialiseTransitions();
if(config != null) if (config != null)
{ {
this.maxFieldLength = config.getIndexerMaxFieldLength(); this.maxFieldLength = config.getIndexerMaxFieldLength();
this.threadPoolExecutor = config.getThreadPoolExecutor();
} }
else
{
// need a default thread pool ....
TraceableThreadFactory threadFactory = new TraceableThreadFactory();
threadFactory.setThreadDaemon(true);
threadFactory.setThreadPriority(5);
threadPoolExecutor = new ThreadPoolExecutor(10, 10, 90, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory, new ThreadPoolExecutor.CallerRunsPolicy());
}
// Create an empty in memory index // Create an empty in memory index
IndexWriter writer; IndexWriter writer;
@@ -446,15 +462,13 @@ public class IndexInfo
long docs = writer.docCount(); long docs = writer.docCount();
writer.close(); writer.close();
IndexEntry entry = new IndexEntry(IndexType.INDEX, OLD_INDEX, "", IndexEntry entry = new IndexEntry(IndexType.INDEX, OLD_INDEX, "", TransactionStatus.COMMITTED, "", docs, 0, false);
TransactionStatus.COMMITTED, "", docs, 0, false);
indexEntries.put(OLD_INDEX, entry); indexEntries.put(OLD_INDEX, entry);
writeStatus(); writeStatus();
// The index exists and we should initialise the single reader // The index exists and we should initialise the single reader
registerReferenceCountingIndexReader(entry.getName(), registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName()));
buildReferenceCountingIndexReader(entry.getName()));
} }
catch (IOException e) catch (IOException e)
{ {
@@ -523,8 +537,7 @@ public class IndexInfo
s_logger.info("Resetting merge to committed " + entry); s_logger.info("Resetting merge to committed " + entry);
} }
entry.setStatus(TransactionStatus.COMMITTED); entry.setStatus(TransactionStatus.COMMITTED);
registerReferenceCountingIndexReader(entry.getName(), registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName()));
buildReferenceCountingIndexReader(entry.getName()));
break; break;
// Complete committing (which is post database // Complete committing (which is post database
// commit) // commit)
@@ -535,14 +548,12 @@ public class IndexInfo
s_logger.info("Committing " + entry); s_logger.info("Committing " + entry);
} }
entry.setStatus(TransactionStatus.COMMITTED); entry.setStatus(TransactionStatus.COMMITTED);
registerReferenceCountingIndexReader(entry.getName(), registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName()));
buildReferenceCountingIndexReader(entry.getName()));
mainIndexReader = null; mainIndexReader = null;
break; break;
// States that require no action // States that require no action
case COMMITTED: case COMMITTED:
registerReferenceCountingIndexReader(entry.getName(), registerReferenceCountingIndexReader(entry.getName(), buildReferenceCountingIndexReader(entry.getName()));
buildReferenceCountingIndexReader(entry.getName()));
break; break;
default: default:
// nothing to do // nothing to do
@@ -555,14 +566,11 @@ public class IndexInfo
indexEntries.remove(id); indexEntries.remove(id);
} }
clearOldReaders(); clearOldReaders();
synchronized (cleaner)
{ cleaner.schedule();
cleaner.notify();
} merger.schedule();
synchronized (merger)
{
merger.notify();
}
// persist the new state // persist the new state
writeStatus(); writeStatus();
} }
@@ -576,25 +584,16 @@ public class IndexInfo
releaseWriteLock(); releaseWriteLock();
} }
} }
// TODO: Add unrecognised folders for deletion.
if (enableCleanerThread) // Run the cleaner around every 20 secods - this just makes the request to the thread pool
timer.schedule(new TimerTask()
{ {
cleanerThread = new Thread(cleaner); @Override
cleanerThread.setDaemon(true); public void run()
cleanerThread.setName("Index cleaner thread " + indexDirectory);
cleanerThread.start();
}
if (enableMergerThread)
{ {
mergerThread = new Thread(merger); cleaner.schedule();
mergerThread.setDaemon(true);
mergerThread.setName("Index merger thread " + indexDirectory);
mergerThread.start();
} }
}, 0, 20000);
} }
@@ -674,8 +673,7 @@ public class IndexInfo
// Make sure the index exists // Make sure the index exists
if (!indexEntries.containsKey(id)) if (!indexEntries.containsKey(id))
{ {
indexEntries.put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0, indexEntries.put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0, 0, false));
0, false));
} }
} }
finally finally
@@ -868,8 +866,7 @@ public class IndexInfo
* should deletions on apply to nodes (ie not to containers) * should deletions on apply to nodes (ie not to containers)
* @throws IOException * @throws IOException
*/ */
public void setPreparedState(String id, Set<String> toDelete, long documents, boolean deleteNodesOnly) public void setPreparedState(String id, Set<String> toDelete, long documents, boolean deleteNodesOnly) throws IOException
throws IOException
{ {
if (id == null) if (id == null)
{ {
@@ -887,8 +884,7 @@ public class IndexInfo
} }
} }
// Write deletions // Write deletions
DataOutputStream os = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(new File(location, DataOutputStream os = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(new File(location, INDEX_INFO_DELETIONS).getCanonicalFile())));
INDEX_INFO_DELETIONS).getCanonicalFile())));
os.writeInt(toDelete.size()); os.writeInt(toDelete.size());
for (String ref : toDelete) for (String ref : toDelete)
{ {
@@ -905,8 +901,7 @@ public class IndexInfo
{ {
throw new IndexerException("Invalid index delta id " + id); throw new IndexerException("Invalid index delta id " + id);
} }
if ((entry.getStatus() != TransactionStatus.PREPARING) if ((entry.getStatus() != TransactionStatus.PREPARING) && (entry.getStatus() != TransactionStatus.COMMITTING))
&& (entry.getStatus() != TransactionStatus.COMMITTING))
{ {
throw new IndexerException("Deletes and doc count can only be set on a preparing index"); throw new IndexerException("Deletes and doc count can only be set on a preparing index");
} }
@@ -1000,8 +995,7 @@ public class IndexInfo
* @return * @return
* @throws IOException * @throws IOException
*/ */
public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader(String id, Set<String> deletions, public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader(String id, Set<String> deletions, boolean deleteOnlyNodes) throws IOException
boolean deleteOnlyNodes) throws IOException
{ {
if (id == null) if (id == null)
{ {
@@ -1073,9 +1067,7 @@ public class IndexInfo
} }
else else
{ {
reader = new MultiReader(new IndexReader[] { reader = new MultiReader(new IndexReader[] { new FilterIndexReaderByStringId("main+id", mainIndexReader, deletions, deleteOnlyNodes), deltaReader });
new FilterIndexReaderByStringId("main+id", mainIndexReader, deletions, deleteOnlyNodes),
deltaReader });
} }
reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader("MainReader" + id, reader); reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader("MainReader" + id, reader);
ReferenceCounting refCounting = (ReferenceCounting) reader; ReferenceCounting refCounting = (ReferenceCounting) reader;
@@ -1089,8 +1081,7 @@ public class IndexInfo
} }
} }
public void setStatus(final String id, final TransactionStatus state, final Set<Term> toDelete, final Set<Term> read) public void setStatus(final String id, final TransactionStatus state, final Set<Term> toDelete, final Set<Term> read) throws IOException
throws IOException
{ {
if (id == null) if (id == null)
{ {
@@ -1229,8 +1220,7 @@ public class IndexInfo
} }
else else
{ {
throw new IndexerException("Invalid transition for " throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARING);
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARING);
} }
} }
@@ -1303,8 +1293,7 @@ public class IndexInfo
} }
else else
{ {
throw new IndexerException("Invalid transition for " throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARED);
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.PREPARED);
} }
} }
@@ -1335,8 +1324,7 @@ public class IndexInfo
} }
else else
{ {
throw new IndexerException("Invalid transition for " throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTING);
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTING);
} }
} }
@@ -1391,17 +1379,13 @@ public class IndexInfo
mainIndexReader = null; mainIndexReader = null;
} }
synchronized (merger) merger.schedule();
{
merger.notify();
}
} }
} }
else else
{ {
throw new IndexerException("Invalid transition for " throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTED);
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.COMMITTED);
} }
} }
@@ -1433,8 +1417,7 @@ public class IndexInfo
} }
else else
{ {
throw new IndexerException("Invalid transition for " throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLINGBACK);
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLINGBACK);
} }
} }
@@ -1466,8 +1449,7 @@ public class IndexInfo
} }
else else
{ {
throw new IndexerException("Invalid transition for " throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLEDBACK);
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.ROLLEDBACK);
} }
} }
@@ -1495,17 +1477,13 @@ public class IndexInfo
if (TransactionStatus.DELETABLE.follows(entry.getStatus())) if (TransactionStatus.DELETABLE.follows(entry.getStatus()))
{ {
indexEntries.remove(id); indexEntries.remove(id);
synchronized (cleaner) cleaner.schedule();
{
cleaner.notify();
}
writeStatus(); writeStatus();
clearOldReaders(); clearOldReaders();
} }
else else
{ {
throw new IndexerException("Invalid transition for " throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.DELETABLE);
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.DELETABLE);
} }
} }
@@ -1535,13 +1513,11 @@ public class IndexInfo
if (TransactionStatus.ACTIVE.follows(null)) if (TransactionStatus.ACTIVE.follows(null))
{ {
indexEntries indexEntries.put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0, 0, false));
.put(id, new IndexEntry(IndexType.DELTA, id, "", TransactionStatus.ACTIVE, "", 0, 0, false));
} }
else else
{ {
throw new IndexerException("Invalid transition for " throw new IndexerException("Invalid transition for " + id + " from " + entry.getStatus() + " to " + TransactionStatus.ACTIVE);
+ id + " from " + entry.getStatus() + " to " + TransactionStatus.ACTIVE);
} }
} }
@@ -1695,9 +1671,8 @@ public class IndexInfo
{ {
try try
{ {
reader = new MultiReader(new IndexReader[] { reader = new MultiReader(new IndexReader[] { new FilterIndexReaderByStringId(id, reader, getDeletions(entry.getName()), entry.isDeletOnlyNodes()),
new FilterIndexReaderByStringId(id, reader, getDeletions(entry.getName()), entry subReader });
.isDeletOnlyNodes()), subReader });
} }
catch (IOException ioe) catch (IOException ioe)
{ {
@@ -1731,9 +1706,9 @@ public class IndexInfo
private void registerReferenceCountingIndexReader(String id, IndexReader reader) private void registerReferenceCountingIndexReader(String id, IndexReader reader)
{ {
ReferenceCounting referenceCounting = (ReferenceCounting) reader; ReferenceCounting referenceCounting = (ReferenceCounting) reader;
if(!referenceCounting.getId().equals(id)) if (!referenceCounting.getId().equals(id))
{ {
throw new IllegalStateException("Registering "+referenceCounting.getId()+ " as "+id); throw new IllegalStateException("Registering " + referenceCounting.getId() + " as " + id);
} }
referenceCountingReadOnlyIndexReaders.put(id, reader); referenceCountingReadOnlyIndexReaders.put(id, reader);
} }
@@ -1879,8 +1854,7 @@ public class IndexInfo
if (!status.isTransient()) if (!status.isTransient())
{ {
newIndexEntries.put(name, new IndexEntry(indexType, name, parentName, status, mergeId, newIndexEntries.put(name, new IndexEntry(indexType, name, parentName, status, mergeId, documentCount, deletions, isDeletOnlyNodes));
documentCount, deletions, isDeletOnlyNodes));
} }
} }
long onDiskCRC32 = buffer.getLong(); long onDiskCRC32 = buffer.getLong();
@@ -2139,44 +2113,42 @@ public class IndexInfo
} }
IndexReader reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader(); IndexReader reader = ii.getMainIndexReferenceCountingReadOnlyIndexReader();
TermEnum terms = reader.terms(new Term("@{archiweb.model}instance", "")); TermEnum terms = reader.terms(new Term("@{archiweb.model}instance", ""));
while(terms.next() && terms.term().field().equals("@{archiweb.model}instance")) while (terms.next() && terms.term().field().equals("@{archiweb.model}instance"))
{ {
System.out.println("F = " +terms.term().field() + " V = "+terms.term().text() + " F = "+terms.docFreq()); System.out.println("F = " + terms.term().field() + " V = " + terms.term().text() + " F = " + terms.docFreq());
} }
terms.close(); terms.close();
long start = System.currentTimeMillis(); long start = System.currentTimeMillis();
TermDocs termDocs = reader.termDocs(new Term("@{archiweb.model}instance", "tfl")); TermDocs termDocs = reader.termDocs(new Term("@{archiweb.model}instance", "tfl"));
while(termDocs.next()) while (termDocs.next())
{ {
//System.out.println("Doc = " + termDocs.doc()); // System.out.println("Doc = " + termDocs.doc());
Document doc = reader.document(termDocs.doc()); Document doc = reader.document(termDocs.doc());
doc.getField("ID"); doc.getField("ID");
//System.out.println("Ref = "+doc.getField("ID")); // System.out.println("Ref = "+doc.getField("ID"));
} }
termDocs.close(); termDocs.close();
System.out.println("Time = "+((System.currentTimeMillis() - start)/1000.0f)); System.out.println("Time = " + ((System.currentTimeMillis() - start) / 1000.0f));
terms = reader.terms(new Term("TYPE", "")); terms = reader.terms(new Term("TYPE", ""));
while(terms.next() && terms.term().field().equals("TYPE")) while (terms.next() && terms.term().field().equals("TYPE"))
{ {
System.out.println("F = " +terms.term().field() + " V = "+terms.term().text() + " F = "+terms.docFreq()); System.out.println("F = " + terms.term().field() + " V = " + terms.term().text() + " F = " + terms.docFreq());
} }
terms.close(); terms.close();
start = System.currentTimeMillis(); start = System.currentTimeMillis();
termDocs = reader.termDocs(new Term("TYPE","{archiweb.model}tfdoc")); termDocs = reader.termDocs(new Term("TYPE", "{archiweb.model}tfdoc"));
while(termDocs.next()) while (termDocs.next())
{ {
//System.out.println("Doc = " + termDocs.doc()); // System.out.println("Doc = " + termDocs.doc());
Document doc = reader.document(termDocs.doc()); Document doc = reader.document(termDocs.doc());
doc.getField("ID"); doc.getField("ID");
//System.out.println("Ref = "+doc.getField("ID")); // System.out.println("Ref = "+doc.getField("ID"));
} }
termDocs.close(); termDocs.close();
System.out.println("Time = "+((System.currentTimeMillis() - start)/1000.0f)); System.out.println("Time = " + ((System.currentTimeMillis() - start) / 1000.0f));
//+@\{archiweb.model\}instance:TFL* // +@\{archiweb.model\}instance:TFL*
} }
} }
@@ -2185,14 +2157,12 @@ public class IndexInfo
* *
* @author Andy Hind * @author Andy Hind
*/ */
private class Cleaner implements Runnable private class Cleaner extends AbstractSchedulable
{ {
public void run() public void run()
{ {
boolean runnable = true;
while (runnable)
{
// Add any closed index readers we were waiting for // Add any closed index readers we were waiting for
HashSet<IndexReader> waiting = new HashSet<IndexReader>(); HashSet<IndexReader> waiting = new HashSet<IndexReader>();
IndexReader reader; IndexReader reader;
@@ -2205,8 +2175,7 @@ public class IndexInfo
{ {
s_logger.debug("Deleting no longer referenced " + refCounting.getId()); s_logger.debug("Deleting no longer referenced " + refCounting.getId());
s_logger.debug("... queued delete for " + refCounting.getId()); s_logger.debug("... queued delete for " + refCounting.getId());
s_logger.debug("... " s_logger.debug("... " + ReferenceCountingReadOnlyIndexReaderFactory.getState(refCounting.getId()));
+ ReferenceCountingReadOnlyIndexReaderFactory.getState(refCounting.getId()));
} }
getReadLock(); getReadLock();
try try
@@ -2259,28 +2228,8 @@ public class IndexInfo
} }
} }
deleteQueue.addAll(fails); deleteQueue.addAll(fails);
synchronized (this)
{
try
{
// wait for more deletes
if (deleteQueue.size() > 0)
{
this.wait(20000);
}
else
{
this.wait();
}
}
catch (InterruptedException e)
{
runnable = false;
s_logger.warn("Cleaner thread for " + indexDirectory + "stopped by interruption.");
}
}
}
done();
} }
private boolean deleteDirectory(File file) private boolean deleteDirectory(File file)
@@ -2329,14 +2278,55 @@ public class IndexInfo
NONE, MERGE_INDEX, APPLY_DELTA_DELETION, MERGE_DELTA NONE, MERGE_INDEX, APPLY_DELTA_DELETION, MERGE_DELTA
} }
private class Merger implements Runnable private abstract class AbstractSchedulable implements Schedulable, Runnable
{ {
boolean scheduled = false;
public synchronized void schedule()
{
if (!scheduled)
{
threadPoolExecutor.execute(this);
scheduled = true;
}
else
{
// already done
}
}
public synchronized void done()
{
if (scheduled)
{
scheduled = false;
}
else
{
throw new IllegalStateException();
}
}
public synchronized void reschedule()
{
if (scheduled)
{
threadPoolExecutor.execute(this);
}
else
{
throw new IllegalStateException();
}
}
}
private class Merger extends AbstractSchedulable
{
public void run() public void run()
{ {
boolean running = true;
while (running)
{
try try
{ {
// Get the read local to decide what to do // Get the read local to decide what to do
@@ -2448,19 +2438,13 @@ public class IndexInfo
mergeIndexes(); mergeIndexes();
} }
synchronized (this)
{
try
{
if (action == MergeAction.NONE) if (action == MergeAction.NONE)
{ {
this.wait(); done();
} }
} else
catch (InterruptedException e)
{ {
// No action - could signal thread termination reschedule();
}
} }
} }
catch (Throwable t) catch (Throwable t)
@@ -2469,8 +2453,6 @@ public class IndexInfo
} }
} }
}
void mergeDeletions() void mergeDeletions()
{ {
if (s_logger.isDebugEnabled()) if (s_logger.isDebugEnabled())
@@ -2496,8 +2478,7 @@ public class IndexInfo
{ {
return set; return set;
} }
if ((entry.getType() == IndexType.DELTA) if ((entry.getType() == IndexType.DELTA) && (entry.getStatus() == TransactionStatus.COMMITTED_DELETING))
&& (entry.getStatus() == TransactionStatus.COMMITTED_DELETING))
{ {
return set; return set;
} }
@@ -2631,9 +2612,7 @@ public class IndexInfo
{ {
if (s_logger.isDebugEnabled()) if (s_logger.isDebugEnabled())
{ {
s_logger.debug("Deleted " s_logger.debug("Deleted " + deletedCount + " from " + key + " for id " + stringRef + " remaining docs " + reader.numDocs());
+ deletedCount + " from " + key + " for id " + stringRef
+ " remaining docs " + reader.numDocs());
} }
invalidIndexes.add(key); invalidIndexes.add(key);
} }
@@ -2812,8 +2791,7 @@ public class IndexInfo
{ {
return set; return set;
} }
if ((entry.getType() == IndexType.DELTA) if ((entry.getType() == IndexType.DELTA) && (entry.getStatus() == TransactionStatus.COMMITTED_DELETING))
&& (entry.getStatus() == TransactionStatus.COMMITTED_DELETING))
{ {
return set; return set;
} }
@@ -2822,8 +2800,7 @@ public class IndexInfo
ArrayList<IndexEntry> mergeList = new ArrayList<IndexEntry>(); ArrayList<IndexEntry> mergeList = new ArrayList<IndexEntry>();
for (IndexEntry entry : indexEntries.values()) for (IndexEntry entry : indexEntries.values())
{ {
if ((entry.getType() == IndexType.INDEX) if ((entry.getType() == IndexType.INDEX) && (entry.getStatus() == TransactionStatus.COMMITTED))
&& (entry.getStatus() == TransactionStatus.COMMITTED))
{ {
mergeList.add(entry); mergeList.add(entry);
} }
@@ -2849,8 +2826,7 @@ public class IndexInfo
if (set.size() > 0) if (set.size() > 0)
{ {
IndexEntry target = new IndexEntry(IndexType.INDEX, guid, "", IndexEntry target = new IndexEntry(IndexType.INDEX, guid, "", TransactionStatus.MERGE_TARGET, guid, count, 0, false);
TransactionStatus.MERGE_TARGET, guid, count, 0, false);
set.put(guid, target); set.put(guid, target);
// rebuild merged index elements // rebuild merged index elements
LinkedHashMap<String, IndexEntry> reordered = new LinkedHashMap<String, IndexEntry>(); LinkedHashMap<String, IndexEntry> reordered = new LinkedHashMap<String, IndexEntry>();
@@ -3056,10 +3032,7 @@ public class IndexInfo
clearOldReaders(); clearOldReaders();
synchronized (cleaner) cleaner.schedule();
{
cleaner.notify();
}
return null; return null;
} }
@@ -3102,6 +3075,7 @@ public class IndexInfo
} }
return 0; return 0;
} }
} }
private void dumpInfo() private void dumpInfo()
@@ -3194,22 +3168,22 @@ public class IndexInfo
public boolean isEnableCleanerThread() public boolean isEnableCleanerThread()
{ {
return enableCleanerThread; return enableCleaner;
} }
public void setEnableCleanerThread(boolean enableCleanerThread) public void setEnableCleanerThread(boolean enableCleaner)
{ {
this.enableCleanerThread = enableCleanerThread; this.enableCleaner = enableCleaner;
} }
public boolean isEnableMergerThread() public boolean isEnableMerger()
{ {
return enableMergerThread; return enableMerger;
} }
public void setEnableMergerThread(boolean enableMergerThread) public void setEnableMerger(boolean enableMerger)
{ {
this.enableMergerThread = enableMergerThread; this.enableMerger = enableMerger;
} }
public boolean isIndexIsShared() public boolean isIndexIsShared()
@@ -3322,4 +3296,14 @@ public class IndexInfo
this.writerUseCompoundFile = writerUseCompoundFile; this.writerUseCompoundFile = writerUseCompoundFile;
} }
interface Schedulable
{
void schedule();
public void done();
public void reschedule();
}
} }

View File

@@ -60,7 +60,7 @@ public class TraceableThreadFactory implements ThreadFactory
private int threadPriority; private int threadPriority;
TraceableThreadFactory() public TraceableThreadFactory()
{ {
this.group = new ThreadGroup("TraceableThreadGroup-" + factoryNumber.getAndIncrement()); this.group = new ThreadGroup("TraceableThreadGroup-" + factoryNumber.getAndIncrement());
TraceableThreadFactory.activeThreadGroups.add(this.group); TraceableThreadFactory.activeThreadGroups.add(this.group);