Merged V2.9 to HEAD

9845: Merged V2.2 to V2.9
      9733: Merged V2.1 to V2.2
         9281: Improvements to index AUTO recovery
         9316: Fixed ETWOONE-193: Transactional caches not being cleaned up after rollback (2.1.4 regression)
         9317: Fixed ETWOONE-194: Faster void handling during index tracking
         9365: Improved performance for finding which snapshots have been indexed
         9413: Support to retrieve read/write state of the transaction and ensure Lucene commits are handled last
         9414: ACT-3245: Updating node properties and aspects don't bump the alf_node.version value
         9415: Code cleanup: Removed unnecessary empty methods
         9416: Fixed creation of multiple thread pools
         9417: Full index recovery absorbs indexing exceptions by default
         9418: Added AUTO index recovery option to sample in line with Wiki docs
         9419: ETWOONE-194: Index tracking is too slow
         9420: Fixed ETWOONE-201: Better logging and configurability for RetryingTransactionHelper
         9421: Fixed ETWOONE-202: SPlit person cleanup doesn't break read-only transactions
         9422: Follow up on CHK-3317: Removed use of JDK 1.6 NavigableMap interface
         9423: Fixed unit test after CHK-3317
         9424: More test fixes after CHK-3317
         9425: Ensure that index tracking tests don't run too long.
         9426: Made concurrent reindexing optional.  It is on by default.
         9509: ACT-3539: Mid-transaction locking on Lucene resources
         9547: Multithreaded index tracking startup: Handle previously lagging single-threaded rebuilds


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@10592 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2008-08-30 03:11:18 +00:00
parent 75646b4234
commit 0ac7884d1b
31 changed files with 1934 additions and 536 deletions

View File

@@ -213,6 +213,9 @@
<!-- The person service. --> <!-- The person service. -->
<bean id="personService" class="org.alfresco.repo.security.person.PersonServiceImpl" init-method="init"> <bean id="personService" class="org.alfresco.repo.security.person.PersonServiceImpl" init-method="init">
<property name="transactionService">
<ref bean="transactionService" />
</property>
<property name="nodeService"> <property name="nodeService">
<ref bean="nodeService" /> <ref bean="nodeService" />
</property> </property>

View File

@@ -381,6 +381,21 @@
<import resource="classpath*:alfresco/extension/bootstrap/*-context.xml" /> <import resource="classpath*:alfresco/extension/bootstrap/*-context.xml" />
<!-- Perform index recovery before applying any patches -->
<!-- rebuild the index if required - before we check that it is there -->
<bean id="indexRecoveryBootstrap" class="org.alfresco.repo.node.index.IndexRecoveryBootstrapBean" >
<property name="indexRecoveryComponent">
<ref bean="indexRecoveryComponent"/>
</property>
</bean>
<bean id="avmIndexRecoveryBootstrap" class="org.alfresco.repo.node.index.IndexRecoveryBootstrapBean" >
<property name="indexRecoveryComponent">
<ref bean="avmIndexRecoveryComponent"/>
</property>
</bean>
<!-- Descriptor Service --> <!-- Descriptor Service -->
<bean id="descriptorComponent" class="org.alfresco.repo.descriptor.DescriptorServiceImpl"> <bean id="descriptorComponent" class="org.alfresco.repo.descriptor.DescriptorServiceImpl">
@@ -404,21 +419,6 @@
</property> </property>
</bean> </bean>
<!-- Perform index recovery before applying any patches -->
<!-- rebuild the index if required - before we check that it is there -->
<bean id="indexRecoveryBootstrap" class="org.alfresco.repo.node.index.IndexRecoveryBootstrapBean" >
<property name="indexRecoveryComponent">
<ref bean="indexRecoveryComponent"/>
</property>
</bean>
<bean id="avmIndexRecoveryBootstrap" class="org.alfresco.repo.node.index.IndexRecoveryBootstrapBean" >
<property name="indexRecoveryComponent">
<ref bean="avmIndexRecoveryComponent"/>
</property>
</bean>
<!-- This component checks the interconnection between the metadata, indexes and content --> <!-- This component checks the interconnection between the metadata, indexes and content -->
<bean id="configurationChecker" class="org.alfresco.repo.admin.ConfigurationChecker"> <bean id="configurationChecker" class="org.alfresco.repo.admin.ConfigurationChecker">
<property name="strict"> <property name="strict">

View File

@@ -267,6 +267,18 @@
<property name="allowWrite"> <property name="allowWrite">
<value>${server.transaction.allow-writes}</value> <value>${server.transaction.allow-writes}</value>
</property> </property>
<property name="maxRetries">
<value>${server.transaction.max-retries}</value>
</property>
<property name="minRetryWaitMs">
<value>${server.transaction.min-retry-wait-ms}</value>
</property>
<property name="maxRetryWaitMs">
<value>${server.transaction.max-retry-wait-ms}</value>
</property>
<property name="retryWaitIncrementMs">
<value>${server.transaction.wait-increment-ms}</value>
</property>
</bean> </bean>
<bean id="retryingTransactionHelper" class="org.alfresco.repo.transaction.RetryingTransactionHelper"> <bean id="retryingTransactionHelper" class="org.alfresco.repo.transaction.RetryingTransactionHelper">
@@ -276,6 +288,15 @@
<property name="maxRetries"> <property name="maxRetries">
<value>${server.transaction.max-retries}</value> <value>${server.transaction.max-retries}</value>
</property> </property>
<property name="minRetryWaitMs">
<value>${server.transaction.min-retry-wait-ms}</value>
</property>
<property name="maxRetryWaitMs">
<value>${server.transaction.max-retry-wait-ms}</value>
</property>
<property name="retryWaitIncrementMs">
<value>${server.transaction.wait-increment-ms}</value>
</property>
</bean> </bean>
<bean id="readWriteTransactionDefinition" class="org.springframework.transaction.support.DefaultTransactionDefinition"> <bean id="readWriteTransactionDefinition" class="org.springframework.transaction.support.DefaultTransactionDefinition">
@@ -459,12 +480,15 @@
</property> </property>
</bean> </bean>
<bean id="indexThreadPoolExecutor" class="org.alfresco.util.ThreadPoolExecutorFactoryBean" singleton="true"> <bean id="indexThreadPoolExecutor" class="org.alfresco.util.ThreadPoolExecutorFactoryBean">
<property name="corePoolSize"> <property name="corePoolSize">
<value>10</value> <value>10</value>
</property> </property>
<property name="maximumPoolSize">
<value>20</value>
</property>
<property name="threadPriority"> <property name="threadPriority">
<value>5</value> <value>7</value>
</property> </property>
</bean> </bean>
@@ -999,10 +1023,13 @@
<!-- Thread Pool --> <!-- Thread Pool -->
<!-- --> <!-- -->
<bean id="threadPoolExecutor" class="org.alfresco.util.ThreadPoolExecutorFactoryBean" singleton="true"> <bean id="threadPoolExecutor" class="org.alfresco.util.ThreadPoolExecutorFactoryBean">
<property name="corePoolSize"> <property name="corePoolSize">
<value>2</value> <value>2</value>
</property> </property>
<property name="maximumPoolSize">
<value>10</value>
</property>
</bean> </bean>
<!-- Query Register Component --> <!-- Query Register Component -->

View File

@@ -9,3 +9,6 @@ server.transaction.mode.default=PROPAGATION_REQUIRED
server.transaction.allow-writes=true server.transaction.allow-writes=true
server.transaction.max-retries=20 server.transaction.max-retries=20
server.transaction.min-retry-wait-ms=100
server.transaction.max-retry-wait-ms=2000
server.transaction.wait-increment-ms=100

View File

@@ -16,9 +16,10 @@
#db.pool.max=100 #db.pool.max=100
# #
# Sample index tracking frequency # Activate index tracking and recovery
# #
#index.tracking.cronExpression=0/5 * * * * ? #index.tracking.cronExpression=0/5 * * * * ?
#index.recovery.mode=AUTO
# #
# Property to control whether schema updates are performed automatically. # Property to control whether schema updates are performed automatically.

View File

@@ -3,7 +3,25 @@
<beans> <beans>
<!--
The thread pool to use for index rebuilding and recovery
-->
<bean id="indexTrackerThreadPoolExecutor" class="org.alfresco.util.ThreadPoolExecutorFactoryBean">
<property name="corePoolSize">
<value>${index.recovery.maximumPoolSize}</value>
</property>
<property name="maximumPoolSize">
<value>${index.recovery.maximumPoolSize}</value>
</property>
<property name="workQueueSize">
<value>100</value>
</property>
</bean>
<bean id="indexRecoveryComponentBase" abstract="true" > <bean id="indexRecoveryComponentBase" abstract="true" >
<property name="threadPoolExecutor">
<ref bean="indexTrackerThreadPoolExecutor" />
</property>
<property name="authenticationComponent"> <property name="authenticationComponent">
<ref bean="authenticationComponent" /> <ref bean="authenticationComponent" />
</property> </property>
@@ -38,6 +56,9 @@
<property name="stopOnError"> <property name="stopOnError">
<value>${index.recovery.stopOnError}</value> <value>${index.recovery.stopOnError}</value>
</property> </property>
<property name="maxTransactionsPerLuceneCommit">
<value>${index.tracking.maxTransactionsPerLuceneCommit}</value>
</property>
<property name="indexTracker"> <property name="indexTracker">
<ref bean="admIndexTrackerComponent" /> <ref bean="admIndexTrackerComponent" />
</property> </property>
@@ -68,6 +89,9 @@
id="admIndexTrackerComponent" id="admIndexTrackerComponent"
class="org.alfresco.repo.node.index.IndexTransactionTracker" class="org.alfresco.repo.node.index.IndexTransactionTracker"
parent="indexRecoveryComponentBase"> parent="indexRecoveryComponentBase">
<property name="nodeIndexer">
<ref bean="nodeIndexer"/>
</property>
<property name="maxTxnDurationMinutes"> <property name="maxTxnDurationMinutes">
<value>${index.tracking.maxTxnDurationMinutes}</value> <value>${index.tracking.maxTxnDurationMinutes}</value>
</property> </property>
@@ -77,6 +101,12 @@
<property name="maxRecordSetSize"> <property name="maxRecordSetSize">
<value>${index.tracking.maxRecordSetSize}</value> <value>${index.tracking.maxRecordSetSize}</value>
</property> </property>
<property name="maxTransactionsPerLuceneCommit">
<value>${index.tracking.maxTransactionsPerLuceneCommit}</value>
</property>
<property name="disableInTransactionIndexing">
<value>${index.tracking.disableInTransactionIndexing}</value>
</property>
</bean> </bean>
<!-- Schedule index tracking for ADM --> <!-- Schedule index tracking for ADM -->

View File

@@ -26,8 +26,9 @@ dir.indexes.lock=${dir.indexes}/locks
# AUTO: Validates and auto-recovers if validation fails # AUTO: Validates and auto-recovers if validation fails
# FULL: Full index rebuild, processing all transactions in order. The server is temporarily suspended. # FULL: Full index rebuild, processing all transactions in order. The server is temporarily suspended.
index.recovery.mode=VALIDATE index.recovery.mode=VALIDATE
# Force FULL recovery to stop when encountering errors # FULL recovery continues when encountering errors
index.recovery.stopOnError=true index.recovery.stopOnError=false
index.recovery.maximumPoolSize=5
# Set the frequency with which the index tracking is triggered. # Set the frequency with which the index tracking is triggered.
# For more information on index tracking in a cluster: # For more information on index tracking in a cluster:
# http://wiki.alfresco.com/wiki/High_Availability_Configuration_V1.4_to_V2.1#Version_1.4.5.2C_2.1.1_and_later # http://wiki.alfresco.com/wiki/High_Availability_Configuration_V1.4_to_V2.1#Version_1.4.5.2C_2.1.1_and_later
@@ -40,9 +41,11 @@ index.tracking.cronExpression=* * * * * ? 2099
index.tracking.adm.cronExpression=${index.tracking.cronExpression} index.tracking.adm.cronExpression=${index.tracking.cronExpression}
index.tracking.avm.cronExpression=${index.tracking.cronExpression} index.tracking.avm.cronExpression=${index.tracking.cronExpression}
# Other properties. # Other properties.
index.tracking.maxTxnDurationMinutes=60 index.tracking.maxTxnDurationMinutes=10
index.tracking.reindexLagMs=1000 index.tracking.reindexLagMs=1000
index.tracking.maxRecordSetSize=1000 index.tracking.maxRecordSetSize=1000
index.tracking.maxTransactionsPerLuceneCommit=100
index.tracking.disableInTransactionIndexing=false
# Change the failure behaviour of the configuration checker # Change the failure behaviour of the configuration checker
system.bootstrap.config_check.strict=true system.bootstrap.config_check.strict=true

View File

@@ -25,6 +25,7 @@
package org.alfresco.repo.cache; package org.alfresco.repo.cache;
import java.io.Serializable; import java.io.Serializable;
import java.sql.SQLException;
import java.util.Collection; import java.util.Collection;
import javax.transaction.Status; import javax.transaction.Status;
@@ -36,12 +37,14 @@ import net.sf.ehcache.CacheManager;
import org.alfresco.error.AlfrescoRuntimeException; import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport; import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.TransactionListenerAdapter; import org.alfresco.repo.transaction.TransactionListenerAdapter;
import org.alfresco.repo.transaction.RetryingTransactionHelper;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.ServiceRegistry; import org.alfresco.service.ServiceRegistry;
import org.alfresco.service.transaction.TransactionService; import org.alfresco.service.transaction.TransactionService;
import org.alfresco.util.ApplicationContextHelper; import org.alfresco.util.ApplicationContextHelper;
import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext; import org.springframework.context.support.ClassPathXmlApplicationContext;
import org.springframework.dao.DataAccessException;
/** /**
* @see org.alfresco.repo.cache.EhCacheAdapter * @see org.alfresco.repo.cache.EhCacheAdapter
@@ -136,6 +139,32 @@ public class CacheTest extends TestCase
assertNull("Non-transactional remove didn't go to backing cache", backingCache.get(key)); assertNull("Non-transactional remove didn't go to backing cache", backingCache.get(key));
} }
public void testRollbackCleanup() throws Exception
{
TransactionService transactionService = serviceRegistry.getTransactionService();
RetryingTransactionHelper txnHelper = transactionService.getRetryingTransactionHelper();
RetryingTransactionCallback<Object> callback = new RetryingTransactionCallback<Object>()
{
private int throwCount = 0;
public Object execute() throws Throwable
{
String key = "B";
String value = "BBB";
// no transaction - do a put
transactionalCache.put(key, value);
// Blow up
if (throwCount < 5)
{
throwCount++;
throw new SQLException("Dummy");
}
return null;
}
};
txnHelper.doInTransaction(callback);
}
public void testTransactionalCacheWithSingleTxn() throws Throwable public void testTransactionalCacheWithSingleTxn() throws Throwable
{ {
String newGlobalOne = "new_global_one"; String newGlobalOne = "new_global_one";

View File

@@ -43,7 +43,7 @@ import org.alfresco.repo.tenant.Tenant;
import org.alfresco.repo.tenant.TenantDeployerService; import org.alfresco.repo.tenant.TenantDeployerService;
import org.alfresco.repo.tenant.TenantService; import org.alfresco.repo.tenant.TenantService;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport; import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.TransactionListener; import org.alfresco.repo.transaction.TransactionListenerAdapter;
import org.alfresco.repo.workflow.BPMEngineRegistry; import org.alfresco.repo.workflow.BPMEngineRegistry;
import org.alfresco.service.cmr.dictionary.AspectDefinition; import org.alfresco.service.cmr.dictionary.AspectDefinition;
import org.alfresco.service.cmr.dictionary.ClassDefinition; import org.alfresco.service.cmr.dictionary.ClassDefinition;
@@ -388,7 +388,7 @@ public class DictionaryModelType implements ContentServicePolicies.OnContentUpda
/** /**
* Dictionary model type transaction listener class. * Dictionary model type transaction listener class.
*/ */
public class DictionaryModelTypeTransactionListener implements TransactionListener public class DictionaryModelTypeTransactionListener extends TransactionListenerAdapter
{ {
/** /**
* Id used in equals and hash * Id used in equals and hash
@@ -404,17 +404,11 @@ public class DictionaryModelType implements ContentServicePolicies.OnContentUpda
this.contentService = contentService; this.contentService = contentService;
} }
/**
* @see org.alfresco.repo.transaction.TransactionListener#flush()
*/
public void flush()
{
}
/** /**
* @see org.alfresco.repo.transaction.TransactionListener#beforeCommit(boolean) * @see org.alfresco.repo.transaction.TransactionListener#beforeCommit(boolean)
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
@Override
public void beforeCommit(boolean readOnly) public void beforeCommit(boolean readOnly)
{ {
Set<NodeRef> pendingModels = (Set<NodeRef>)AlfrescoTransactionSupport.getResource(KEY_PENDING_MODELS); Set<NodeRef> pendingModels = (Set<NodeRef>)AlfrescoTransactionSupport.getResource(KEY_PENDING_MODELS);
@@ -515,27 +509,6 @@ public class DictionaryModelType implements ContentServicePolicies.OnContentUpda
} }
} }
/**
* @see org.alfresco.repo.transaction.TransactionListener#beforeCompletion()
*/
public void beforeCompletion()
{
}
/**
* @see org.alfresco.repo.transaction.TransactionListener#afterCommit()
*/
public void afterCommit()
{
}
/**
* @see org.alfresco.repo.transaction.TransactionListener#afterRollback()
*/
public void afterRollback()
{
}
/** /**
* @see java.lang.Object#equals(java.lang.Object) * @see java.lang.Object#equals(java.lang.Object)
*/ */

View File

@@ -72,7 +72,7 @@
batch-size="128" batch-size="128"
sort="unsorted" sort="unsorted"
inverse="false" inverse="false"
optimistic-lock="true" optimistic-lock="false"
cascade="delete" > cascade="delete" >
<key column="node_id" foreign-key="fk_alf_n_prop" not-null="true" /> <key column="node_id" foreign-key="fk_alf_n_prop" not-null="true" />
<map-key column="qname_id" type="long" /> <map-key column="qname_id" type="long" />
@@ -98,7 +98,7 @@
batch-size="128" batch-size="128"
inverse="false" inverse="false"
sort="unsorted" sort="unsorted"
optimistic-lock="true" optimistic-lock="false"
cascade="delete" > cascade="delete" >
<key column="node_id" foreign-key="fk_alf_n_asp" not-null="true" /> <key column="node_id" foreign-key="fk_alf_n_asp" not-null="true" />
<element column="qname_id" type="long" not-null="true" /> <element column="qname_id" type="long" not-null="true" />

View File

@@ -72,8 +72,6 @@
org.alfresco.repo.domain.hibernate.TransactionImpl as txn org.alfresco.repo.domain.hibernate.TransactionImpl as txn
where where
txn.id = :txnId txn.id = :txnId
order by
txn.commitTimeMs
</query> </query>
<query name="txn.GetTxnsByCommitTimeAsc"> <query name="txn.GetTxnsByCommitTimeAsc">
@@ -85,9 +83,11 @@
where where
txn.commitTimeMs >= :fromTimeInclusive and txn.commitTimeMs >= :fromTimeInclusive and
txn.commitTimeMs < :toTimeExclusive and txn.commitTimeMs < :toTimeExclusive and
txn.id not in (:excludeTxnIds) txn.id not in (:excludeTxnIds) and
txn.server.id not in (:excludeServerIds)
order by order by
txn.commitTimeMs txn.commitTimeMs asc,
txn.id asc
]]> ]]>
</query> </query>
@@ -100,9 +100,25 @@
where where
txn.commitTimeMs >= :fromTimeInclusive and txn.commitTimeMs >= :fromTimeInclusive and
txn.commitTimeMs < :toTimeExclusive and txn.commitTimeMs < :toTimeExclusive and
txn.id not in (:excludeTxnIds) txn.id not in (:excludeTxnIds) and
txn.server.id not in (:excludeServerIds)
order by order by
txn.commitTimeMs desc txn.commitTimeMs desc,
txn.id desc
]]>
</query>
<query name="txn.GetSelectedTxnsByCommitAsc">
<![CDATA[
select
txn
from
org.alfresco.repo.domain.hibernate.TransactionImpl as txn
where
txn.id in (:includeTxnIds)
order by
txn.commitTimeMs asc,
txn.id asc
]]> ]]>
</query> </query>

View File

@@ -318,23 +318,34 @@ public interface NodeDaoService
* for any given millisecond, a list of optional exclusions may be provided. * for any given millisecond, a list of optional exclusions may be provided.
* *
* @param excludeTxnIds a list of txn IDs to ignore. <tt>null</tt> is allowed. * @param excludeTxnIds a list of txn IDs to ignore. <tt>null</tt> is allowed.
* @param remoteOnly <tt>true</tt> if locally-written transactions must be ignored
*/ */
public List<Transaction> getTxnsByCommitTimeAscending( public List<Transaction> getTxnsByCommitTimeAscending(
long fromTimeInclusive, long fromTimeInclusive,
long toTimeExclusive, long toTimeExclusive,
int count, int count,
List<Long> excludeTxnIds); List<Long> excludeTxnIds,
boolean remoteOnly);
/** /**
* Get all transactions in a given time range. Since time-based retrieval doesn't guarantee uniqueness * Get all transactions in a given time range. Since time-based retrieval doesn't guarantee uniqueness
* for any given millisecond, a list of optional exclusions may be provided. * for any given millisecond, a list of optional exclusions may be provided.
* *
* @param excludeTxnIds a list of txn IDs to ignore. <tt>null</tt> is allowed. * @param excludeTxnIds a list of txn IDs to ignore. <tt>null</tt> is allowed.
* @param remoteOnly <tt>true</tt> if locally-written transactions must be ignored
*/ */
public List<Transaction> getTxnsByCommitTimeDescending( public List<Transaction> getTxnsByCommitTimeDescending(
long fromTimeInclusive, long fromTimeInclusive,
long toTimeExclusive, long toTimeExclusive,
int count, int count,
List<Long> excludeTxnIds); List<Long> excludeTxnIds,
boolean remoteOnly);
/**
* Get the lowest commit time for a set of transactions
*
* @param includeTxnIds a list of transaction IDs to search for
* @return Returns the transactions by commit time for the given IDs
*/
public List<Transaction> getTxnsByMinCommitTime(List<Long> includeTxnIds);
public int getTxnUpdateCount(final long txnId); public int getTxnUpdateCount(final long txnId);
public int getTxnDeleteCount(final long txnId); public int getTxnDeleteCount(final long txnId);
public int getTransactionCount(); public int getTransactionCount();

View File

@@ -215,6 +215,40 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
this.parentAssocsCache = parentAssocsCache; this.parentAssocsCache = parentAssocsCache;
} }
/**
* @return Returns the ID of this instance's <b>server</b> instance or <tt>null</tt>
*/
private Long getServerIdOrNull()
{
Long serverId = serverIdSingleton.get();
if (serverId != null)
{
return serverId;
}
// Query for it
// The server already exists, so get it
HibernateCallback callback = new HibernateCallback()
{
public Object doInHibernate(Session session)
{
Query query = session
.getNamedQuery(HibernateNodeDaoServiceImpl.QUERY_GET_SERVER_BY_IPADDRESS)
.setString("ipAddress", ipAddress);
return query.uniqueResult();
}
};
Server server = (Server) getHibernateTemplate().execute(callback);
if (server != null)
{
// It exists, so just return the ID
return server.getId();
}
else
{
return null;
}
}
/** /**
* Gets/creates the <b>server</b> instance to use for the life of this instance * Gets/creates the <b>server</b> instance to use for the life of this instance
*/ */
@@ -1522,6 +1556,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
private static final String QUERY_GET_TXN_BY_ID = "txn.GetTxnById"; private static final String QUERY_GET_TXN_BY_ID = "txn.GetTxnById";
private static final String QUERY_GET_TXNS_BY_COMMIT_TIME_ASC = "txn.GetTxnsByCommitTimeAsc"; private static final String QUERY_GET_TXNS_BY_COMMIT_TIME_ASC = "txn.GetTxnsByCommitTimeAsc";
private static final String QUERY_GET_TXNS_BY_COMMIT_TIME_DESC = "txn.GetTxnsByCommitTimeDesc"; private static final String QUERY_GET_TXNS_BY_COMMIT_TIME_DESC = "txn.GetTxnsByCommitTimeDesc";
private static final String QUERY_GET_SELECTED_TXNS_BY_COMMIT_TIME_ASC = "txn.GetSelectedTxnsByCommitAsc";
private static final String QUERY_GET_TXN_UPDATE_COUNT_FOR_STORE = "txn.GetTxnUpdateCountForStore"; private static final String QUERY_GET_TXN_UPDATE_COUNT_FOR_STORE = "txn.GetTxnUpdateCountForStore";
private static final String QUERY_GET_TXN_DELETE_COUNT_FOR_STORE = "txn.GetTxnDeleteCountForStore"; private static final String QUERY_GET_TXN_DELETE_COUNT_FOR_STORE = "txn.GetTxnDeleteCountForStore";
private static final String QUERY_COUNT_TRANSACTIONS = "txn.CountTransactions"; private static final String QUERY_COUNT_TRANSACTIONS = "txn.CountTransactions";
@@ -1545,6 +1580,28 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
return txn; return txn;
} }
@SuppressWarnings("unchecked")
public List<Transaction> getTxnsByMinCommitTime(final List<Long> includeTxnIds)
{
if (includeTxnIds.size() == 0)
{
return null;
}
HibernateCallback callback = new HibernateCallback()
{
public Object doInHibernate(Session session)
{
Query query = session.getNamedQuery(QUERY_GET_SELECTED_TXNS_BY_COMMIT_TIME_ASC);
query.setParameterList("includeTxnIds", includeTxnIds)
.setReadOnly(true);
return query.list();
}
};
List<Transaction> txns = (List<Transaction>) getHibernateTemplate().execute(callback);
// done
return txns;
}
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public int getTxnUpdateCount(final long txnId) public int getTxnUpdateCount(final long txnId)
{ {
@@ -1600,12 +1657,14 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
} }
private static final Long TXN_ID_DUD = Long.valueOf(-1L); private static final Long TXN_ID_DUD = Long.valueOf(-1L);
private static final Long SERVER_ID_DUD = Long.valueOf(-1L);
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public List<Transaction> getTxnsByCommitTimeAscending( public List<Transaction> getTxnsByCommitTimeAscending(
final long fromTimeInclusive, final long fromTimeInclusive,
final long toTimeExclusive, final long toTimeExclusive,
final int count, final int count,
List<Long> excludeTxnIds) List<Long> excludeTxnIds,
boolean remoteOnly)
{ {
// Make sure that we have at least one entry in the exclude list // Make sure that we have at least one entry in the exclude list
final List<Long> excludeTxnIdsInner = new ArrayList<Long>(excludeTxnIds == null ? 1 : excludeTxnIds.size()); final List<Long> excludeTxnIdsInner = new ArrayList<Long>(excludeTxnIds == null ? 1 : excludeTxnIds.size());
@@ -1617,6 +1676,25 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
{ {
excludeTxnIdsInner.addAll(excludeTxnIds); excludeTxnIdsInner.addAll(excludeTxnIds);
} }
final List<Long> excludeServerIds = new ArrayList<Long>(1);
if (remoteOnly)
{
// Get the current server ID. This can be null if no transactions have been written by
// a server with this IP address.
Long serverId = getServerIdOrNull();
if (serverId == null)
{
excludeServerIds.add(SERVER_ID_DUD);
}
else
{
excludeServerIds.add(serverId);
}
}
else
{
excludeServerIds.add(SERVER_ID_DUD);
}
HibernateCallback callback = new HibernateCallback() HibernateCallback callback = new HibernateCallback()
{ {
public Object doInHibernate(Session session) public Object doInHibernate(Session session)
@@ -1625,6 +1703,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
query.setLong("fromTimeInclusive", fromTimeInclusive) query.setLong("fromTimeInclusive", fromTimeInclusive)
.setLong("toTimeExclusive", toTimeExclusive) .setLong("toTimeExclusive", toTimeExclusive)
.setParameterList("excludeTxnIds", excludeTxnIdsInner) .setParameterList("excludeTxnIds", excludeTxnIdsInner)
.setParameterList("excludeServerIds", excludeServerIds)
.setMaxResults(count) .setMaxResults(count)
.setReadOnly(true); .setReadOnly(true);
return query.list(); return query.list();
@@ -1640,7 +1719,8 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
final long fromTimeInclusive, final long fromTimeInclusive,
final long toTimeExclusive, final long toTimeExclusive,
final int count, final int count,
List<Long> excludeTxnIds) List<Long> excludeTxnIds,
boolean remoteOnly)
{ {
// Make sure that we have at least one entry in the exclude list // Make sure that we have at least one entry in the exclude list
final List<Long> excludeTxnIdsInner = new ArrayList<Long>(excludeTxnIds == null ? 1 : excludeTxnIds.size()); final List<Long> excludeTxnIdsInner = new ArrayList<Long>(excludeTxnIds == null ? 1 : excludeTxnIds.size());
@@ -1652,6 +1732,25 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
{ {
excludeTxnIdsInner.addAll(excludeTxnIds); excludeTxnIdsInner.addAll(excludeTxnIds);
} }
final List<Long> excludeServerIds = new ArrayList<Long>(1);
if (remoteOnly)
{
// Get the current server ID. This can be null if no transactions have been written by
// a server with this IP address.
Long serverId = getServerIdOrNull();
if (serverId == null)
{
excludeServerIds.add(SERVER_ID_DUD);
}
else
{
excludeServerIds.add(serverId);
}
}
else
{
excludeServerIds.add(SERVER_ID_DUD);
}
HibernateCallback callback = new HibernateCallback() HibernateCallback callback = new HibernateCallback()
{ {
public Object doInHibernate(Session session) public Object doInHibernate(Session session)
@@ -1660,6 +1759,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
query.setLong("fromTimeInclusive", fromTimeInclusive) query.setLong("fromTimeInclusive", fromTimeInclusive)
.setLong("toTimeExclusive", toTimeExclusive) .setLong("toTimeExclusive", toTimeExclusive)
.setParameterList("excludeTxnIds", excludeTxnIdsInner) .setParameterList("excludeTxnIds", excludeTxnIdsInner)
.setParameterList("excludeServerIds", excludeServerIds)
.setMaxResults(count) .setMaxResults(count)
.setReadOnly(true); .setReadOnly(true);
return query.list(); return query.list();

View File

@@ -24,6 +24,8 @@
*/ */
package org.alfresco.repo.node.index; package org.alfresco.repo.node.index;
import java.util.concurrent.ThreadPoolExecutor;
import javax.transaction.Status; import javax.transaction.Status;
import javax.transaction.UserTransaction; import javax.transaction.UserTransaction;
@@ -69,6 +71,8 @@ public class AVMRemoteSnapshotTrackerTest extends BaseSpringTest
private NodeDaoService nodeDaoService; private NodeDaoService nodeDaoService;
private ThreadPoolExecutor threadPoolExecutor;
public AVMRemoteSnapshotTrackerTest() public AVMRemoteSnapshotTrackerTest()
{ {
// TODO Auto-generated constructor stub // TODO Auto-generated constructor stub
@@ -89,6 +93,7 @@ public class AVMRemoteSnapshotTrackerTest extends BaseSpringTest
ftsIndexer = (FullTextSearchIndexer) applicationContext.getBean("LuceneFullTextSearchIndexer"); ftsIndexer = (FullTextSearchIndexer) applicationContext.getBean("LuceneFullTextSearchIndexer");
indexer = (Indexer) applicationContext.getBean("indexerComponent"); indexer = (Indexer) applicationContext.getBean("indexerComponent");
nodeDaoService = (NodeDaoService) applicationContext.getBean("nodeDaoService"); nodeDaoService = (NodeDaoService) applicationContext.getBean("nodeDaoService");
threadPoolExecutor = (ThreadPoolExecutor) applicationContext.getBean("indexTrackerThreadPoolExecutor");
testTX = transactionService.getUserTransaction(); testTX = transactionService.getUserTransaction();
@@ -190,6 +195,7 @@ public class AVMRemoteSnapshotTrackerTest extends BaseSpringTest
tracker.setNodeDaoService(nodeDaoService); tracker.setNodeDaoService(nodeDaoService);
tracker.setNodeService(nodeService); tracker.setNodeService(nodeService);
tracker.setSearcher(searchService); tracker.setSearcher(searchService);
tracker.setThreadPoolExecutor(threadPoolExecutor);
tracker.reindex(); tracker.reindex();

View File

@@ -24,12 +24,17 @@
*/ */
package org.alfresco.repo.node.index; package org.alfresco.repo.node.index;
import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import net.sf.acegisecurity.Authentication; import net.sf.acegisecurity.Authentication;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.model.ContentModel; import org.alfresco.model.ContentModel;
import org.alfresco.repo.domain.Transaction; import org.alfresco.repo.domain.Transaction;
import org.alfresco.repo.node.db.NodeDaoService; import org.alfresco.repo.node.db.NodeDaoService;
@@ -38,7 +43,10 @@ import org.alfresco.repo.search.impl.lucene.LuceneQueryParser;
import org.alfresco.repo.search.impl.lucene.fts.FullTextSearchIndexer; import org.alfresco.repo.search.impl.lucene.fts.FullTextSearchIndexer;
import org.alfresco.repo.security.authentication.AuthenticationComponent; import org.alfresco.repo.security.authentication.AuthenticationComponent;
import org.alfresco.repo.security.authentication.AuthenticationUtil; import org.alfresco.repo.security.authentication.AuthenticationUtil;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.TransactionListenerAdapter;
import org.alfresco.repo.transaction.TransactionServiceImpl; import org.alfresco.repo.transaction.TransactionServiceImpl;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.cmr.repository.ChildAssociationRef; import org.alfresco.service.cmr.repository.ChildAssociationRef;
import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.NodeRef;
@@ -48,6 +56,7 @@ import org.alfresco.service.cmr.repository.NodeRef.Status;
import org.alfresco.service.cmr.search.ResultSet; import org.alfresco.service.cmr.search.ResultSet;
import org.alfresco.service.cmr.search.SearchParameters; import org.alfresco.service.cmr.search.SearchParameters;
import org.alfresco.service.cmr.search.SearchService; import org.alfresco.service.cmr.search.SearchService;
import org.alfresco.util.ParameterCheck;
import org.alfresco.util.PropertyCheck; import org.alfresco.util.PropertyCheck;
import org.alfresco.util.VmShutdownListener; import org.alfresco.util.VmShutdownListener;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@@ -65,6 +74,7 @@ import org.apache.commons.logging.LogFactory;
public abstract class AbstractReindexComponent implements IndexRecovery public abstract class AbstractReindexComponent implements IndexRecovery
{ {
private static Log logger = LogFactory.getLog(AbstractReindexComponent.class); private static Log logger = LogFactory.getLog(AbstractReindexComponent.class);
private static Log loggerOnThread = LogFactory.getLog(AbstractReindexComponent.class.getName() + ".threads");
/** kept to notify the thread that it should quit */ /** kept to notify the thread that it should quit */
private static VmShutdownListener vmShutdownListener = new VmShutdownListener("IndexRecovery"); private static VmShutdownListener vmShutdownListener = new VmShutdownListener("IndexRecovery");
@@ -82,6 +92,8 @@ public abstract class AbstractReindexComponent implements IndexRecovery
protected NodeService nodeService; protected NodeService nodeService;
/** the component giving direct access to <b>transaction</b> instances */ /** the component giving direct access to <b>transaction</b> instances */
protected NodeDaoService nodeDaoService; protected NodeDaoService nodeDaoService;
/** the component that holds the reindex worker threads */
private ThreadPoolExecutor threadPoolExecutor;
private volatile boolean shutdown; private volatile boolean shutdown;
private final WriteLock indexerWriteLock; private final WriteLock indexerWriteLock;
@@ -180,6 +192,30 @@ public abstract class AbstractReindexComponent implements IndexRecovery
this.nodeDaoService = nodeDaoService; this.nodeDaoService = nodeDaoService;
} }
/**
* Set the thread pool to use when doing asynchronous reindexing. Use <tt>null</tt>
* to have the calling thread do the indexing.
*
* @param threadPoolExecutor a pre-configured thread pool for the reindex work
*
* @since 2.1.4
*/
public void setThreadPoolExecutor(ThreadPoolExecutor threadPoolExecutor)
{
this.threadPoolExecutor = threadPoolExecutor;
}
/**
* Determines if calls to {@link #reindexImpl()} should be wrapped in a transaction or not.
* The default is <b>true</b>.
*
* @return Returns <tt>true</tt> if an existing transaction is required for reindexing.
*/
protected boolean requireTransaction()
{
return true;
}
/** /**
* Perform the actual work. This method will be called as the system user * Perform the actual work. This method will be called as the system user
* and within an existing transaction. This thread will only ever be accessed * and within an existing transaction. This thread will only ever be accessed
@@ -217,8 +253,19 @@ public abstract class AbstractReindexComponent implements IndexRecovery
return null; return null;
} }
}; };
if (requireTransaction())
{
transactionService.getRetryingTransactionHelper().doInTransaction(reindexWork, true); transactionService.getRetryingTransactionHelper().doInTransaction(reindexWork, true);
} }
else
{
reindexWork.execute();
}
}
catch (Throwable e)
{
throw new AlfrescoRuntimeException("Reindex failure for " + this.getClass().getName(), e);
}
finally finally
{ {
try { indexerWriteLock.unlock(); } catch (Throwable e) {} try { indexerWriteLock.unlock(); } catch (Throwable e) {}
@@ -247,56 +294,131 @@ public abstract class AbstractReindexComponent implements IndexRecovery
YES, NO, INDETERMINATE; YES, NO, INDETERMINATE;
} }
private static final String KEY_STORE_REFS = "StoreRefCacheMethodInterceptor.StoreRefs";
@SuppressWarnings("unchecked")
/** /**
* Determines if a given transaction is definitely in the index or not. * Helper method that caches ADM store references to prevent repeated and unnecessary calls to the
* * NodeService for this list.
* @param txnId a specific transaction
* @return Returns <tt>true</tt> if the transaction is definitely in the index
*/ */
protected InIndex isTxnIdPresentInIndex(long txnId) private List<StoreRef> getAdmStoreRefs()
{ {
if (logger.isDebugEnabled()) List<StoreRef> storeRefs = (List<StoreRef>) AlfrescoTransactionSupport.getResource(KEY_STORE_REFS);
if (storeRefs != null)
{ {
logger.debug("Checking for transaction in index: " + txnId); return storeRefs;
}
else
{
storeRefs = nodeService.getStores();
Iterator<StoreRef> storeRefsIterator = storeRefs.iterator();
while (storeRefsIterator.hasNext())
{
// Remove AVM stores
StoreRef storeRef = storeRefsIterator.next();
if (storeRef.getProtocol().equals(StoreRef.PROTOCOL_AVM))
{
storeRefsIterator.remove();
}
}
// Change the ordering to favour the most common stores
if (storeRefs.contains(StoreRef.STORE_REF_ARCHIVE_SPACESSTORE))
{
storeRefs.remove(StoreRef.STORE_REF_ARCHIVE_SPACESSTORE);
storeRefs.add(0, StoreRef.STORE_REF_ARCHIVE_SPACESSTORE);
}
if (storeRefs.contains(StoreRef.STORE_REF_WORKSPACE_SPACESSTORE))
{
storeRefs.remove(StoreRef.STORE_REF_WORKSPACE_SPACESSTORE);
storeRefs.add(0, StoreRef.STORE_REF_WORKSPACE_SPACESSTORE);
}
// Bind it in
AlfrescoTransactionSupport.bindResource(KEY_STORE_REFS, storeRefs);
}
return storeRefs;
} }
protected InIndex isTxnIdPresentInIndex(long txnId)
{
Transaction txn = nodeDaoService.getTxnById(txnId); Transaction txn = nodeDaoService.getTxnById(txnId);
if (txn == null) if (txn == null)
{ {
return InIndex.YES; return InIndex.YES;
} }
return isTxnPresentInIndex(txn);
}
// count the changes in the transaction /**
int updateCount = nodeDaoService.getTxnUpdateCount(txnId); * Determines if a given transaction is definitely in the index or not.
int deleteCount = nodeDaoService.getTxnDeleteCount(txnId); *
* @param txn a specific transaction
* @return Returns <tt>true</tt> if the transaction is definitely in the index
*/
protected InIndex isTxnPresentInIndex(final Transaction txn)
{
if (txn == null)
{
return InIndex.YES;
}
final Long txnId = txn.getId();
if (logger.isDebugEnabled()) if (logger.isDebugEnabled())
{ {
logger.debug("Transaction " + txnId + " has " + updateCount + " updates and " + deleteCount + " deletes."); logger.debug("Checking for transaction in index: " + txnId);
} }
// Check if the txn ID is present in any store's index
InIndex result = InIndex.NO; boolean foundInIndex = false;
if (updateCount == 0 && deleteCount == 0) List<StoreRef> storeRefs = getAdmStoreRefs();
{
// If there are no update or deletes, then it is impossible to know if the transaction was removed
// from the index or was never there in the first place.
result = InIndex.INDETERMINATE;
}
else
{
// get the stores
List<StoreRef> storeRefs = nodeService.getStores();
for (StoreRef storeRef : storeRefs) for (StoreRef storeRef : storeRefs)
{ {
boolean inStore = isTxnIdPresentInIndex(storeRef, txn, updateCount, deleteCount); boolean inStore = isTxnIdPresentInIndex(storeRef, txn);
if (inStore) if (inStore)
{ {
// found in a particular store // found in a particular store
foundInIndex = true;
break;
}
}
InIndex result = InIndex.NO;
if (!foundInIndex)
{
// If none of the stores have the transaction, then that might be because it consists of 0 modifications
int updateCount = nodeDaoService.getTxnUpdateCount(txnId);
if (updateCount > 0)
{
// There were updates, but there is no sign in the indexes
result = InIndex.NO;
}
else
{
// We're now in the case where there were no updates
int deleteCount = nodeDaoService.getTxnDeleteCount(txnId);
if (deleteCount == 0)
{
// There are no updates or deletes and no entry in the indexes.
// There are outdated nodes in the index.
result = InIndex.YES; result = InIndex.YES;
}
else
{
// There were deleted nodes only. Check that all the deleted nodes were
// removed from the index otherwise it is out of date.
for (StoreRef storeRef : storeRefs)
{
if (!haveNodesBeenRemovedFromIndex(storeRef, txn))
{
result = InIndex.NO;
break; break;
} }
} }
} }
}
}
else
{
result = InIndex.YES;
}
// done // done
if (logger.isDebugEnabled()) if (logger.isDebugEnabled())
{ {
@@ -306,18 +428,13 @@ public abstract class AbstractReindexComponent implements IndexRecovery
} }
/** /**
* @param updateCount the number of node updates in the transaction * @return Returns true if the given transaction is present in the index
* @param deleteCount the number of node deletions in the transaction
* @return Returns true if the given transaction is indexed,
* or if there are no updates or deletes
*/ */
private boolean isTxnIdPresentInIndex(StoreRef storeRef, Transaction txn, int updateCount, int deleteCount) private boolean isTxnIdPresentInIndex(StoreRef storeRef, Transaction txn)
{ {
long txnId = txn.getId(); long txnId = txn.getId();
String changeTxnId = txn.getChangeTxnId(); String changeTxnId = txn.getChangeTxnId();
// do the most update check, which is most common // do the most update check, which is most common
if (updateCount > 0)
{
ResultSet results = null; ResultSet results = null;
try try
{ {
@@ -352,11 +469,14 @@ public abstract class AbstractReindexComponent implements IndexRecovery
if (results != null) { results.close(); } if (results != null) { results.close(); }
} }
} }
else if (deleteCount > 0)
private boolean haveNodesBeenRemovedFromIndex(final StoreRef storeRef, final Transaction txn)
{ {
final Long txnId = txn.getId();
// there have been deletes, so we have to ensure that none of the nodes deleted are present in the index // there have been deletes, so we have to ensure that none of the nodes deleted are present in the index
// get all node refs for the transaction // get all node refs for the transaction
List<NodeRef> nodeRefs = nodeDaoService.getTxnChangesForStore(storeRef, txnId); List<NodeRef> nodeRefs = nodeDaoService.getTxnChangesForStore(storeRef, txnId);
boolean foundNodeRef = false;
for (NodeRef nodeRef : nodeRefs) for (NodeRef nodeRef : nodeRefs)
{ {
if (logger.isDebugEnabled()) if (logger.isDebugEnabled())
@@ -378,22 +498,10 @@ public abstract class AbstractReindexComponent implements IndexRecovery
results = searcher.query(sp); results = searcher.query(sp);
if (results.length() == 0) if (results.length() > 0)
{ {
// no results, as expected foundNodeRef = true;
if (logger.isDebugEnabled()) break;
{
logger.debug(" --> Node not found (OK)");
}
continue;
}
else
{
if (logger.isDebugEnabled())
{
logger.debug(" --> Node found (Index out of date)");
}
return false;
} }
} }
finally finally
@@ -401,15 +509,22 @@ public abstract class AbstractReindexComponent implements IndexRecovery
if (results != null) { results.close(); } if (results != null) { results.close(); }
} }
} }
} if (foundNodeRef)
// else -> The fallthrough case where there are no updates or deletes {
// all tests passed
if (logger.isDebugEnabled()) if (logger.isDebugEnabled())
{ {
logger.debug("Index is in synch with transaction: " + txnId); logger.debug(" --> Node found (Index out of date)");
} }
return true; }
else
{
// No nodes found
if (logger.isDebugEnabled())
{
logger.debug(" --> Node not found (OK)");
}
}
return !foundNodeRef;
} }
/** /**
@@ -419,8 +534,7 @@ public abstract class AbstractReindexComponent implements IndexRecovery
{ {
for (Transaction txn : txns) for (Transaction txn : txns)
{ {
long txnId = txn.getId().longValue(); if (isTxnPresentInIndex(txn) == InIndex.NO)
if (isTxnIdPresentInIndex(txnId) == InIndex.NO)
{ {
// Missing txn // Missing txn
return false; return false;
@@ -430,25 +544,58 @@ public abstract class AbstractReindexComponent implements IndexRecovery
} }
/** /**
* Perform a full reindexing of the given transaction in the context of a completely * Marker exception to neatly handle VM-driven termination of a reindex
* new transaction. *
* @author Derek Hulley
* @since 2.1.4
*/
public static class ReindexTerminatedException extends RuntimeException
{
private static final long serialVersionUID = -7928720932368892814L;
}
/**
* Callback to notify caller whenever a node has been indexed
*
* @see
* @author Derek Hulley
* @since 2.1.4
*/
protected interface ReindexNodeCallback
{
void reindexedNode(NodeRef nodeRef);
}
protected void reindexTransaction(Long txnId)
{
reindexTransaction(txnId, null);
}
/**
* Perform a full reindexing of the given transaction on the current thread.
* The calling thread must be in the context of a read-only transaction.
* *
* @param txnId the transaction identifier * @param txnId the transaction identifier
* @param callback the callback to notify of each node indexed
*
* @throws ReindexTerminatedException if the VM is shutdown during the reindex
*/ */
protected void reindexTransaction(final long txnId) protected void reindexTransaction(final long txnId, ReindexNodeCallback callback)
{ {
ParameterCheck.mandatory("txnId", txnId);
if (logger.isDebugEnabled()) if (logger.isDebugEnabled())
{ {
logger.debug("Reindexing transaction: " + txnId); logger.debug("Reindexing transaction: " + txnId);
} }
if (AlfrescoTransactionSupport.getTransactionReadState() != TxnReadState.TXN_READ_ONLY)
{
throw new AlfrescoRuntimeException("Reindex work must be done in the context of a read-only transaction");
}
RetryingTransactionCallback<Object> reindexWork = new RetryingTransactionCallback<Object>()
{
public Object execute() throws Exception
{
// get the node references pertinent to the transaction // get the node references pertinent to the transaction
List<NodeRef> nodeRefs = nodeDaoService.getTxnChanges(txnId); List<NodeRef> nodeRefs = nodeDaoService.getTxnChanges(txnId);
// reindex each node // reindex each node
int nodeCount = 0;
for (NodeRef nodeRef : nodeRefs) for (NodeRef nodeRef : nodeRefs)
{ {
Status nodeStatus = nodeService.getNodeStatus(nodeRef); Status nodeStatus = nodeService.getNodeStatus(nodeRef);
@@ -472,12 +619,426 @@ public abstract class AbstractReindexComponent implements IndexRecovery
// reindex // reindex
indexer.updateNode(nodeRef); indexer.updateNode(nodeRef);
} }
// Make the callback
if (callback != null)
{
callback.reindexedNode(nodeRef);
}
// Check for VM shutdown every 100 nodes
if (++nodeCount % 100 == 0 && isShuttingDown())
{
// We can't fail gracefully and run the risk of committing a half-baked transaction
logger.info("Reindexing of transaction " + txnId + " terminated by VM shutdown.");
throw new ReindexTerminatedException();
}
} }
// done // done
}
private static final AtomicInteger ID_GENERATOR = new AtomicInteger();
/**
* Runnable that does reindex work for a given transaction but waits on a queue before
* triggering the commit phase.
* <p>
* This class uses <code>Object</code>'s default equality and hashcode generation.
*
* @author Derek Hulley
* @since 2.1.4
*/
private class ReindexWorkerRunnable extends TransactionListenerAdapter implements Runnable, ReindexNodeCallback
{
private final int id;
private final int uidHashCode;
private final List<Long> txnIds;
private long lastIndexedTimestamp;
private boolean atHeadOfQueue;
private boolean killed;
private ReindexWorkerRunnable(List<Long> txnIds)
{
this.id = ID_GENERATOR.addAndGet(1);
if (ID_GENERATOR.get() > 1000)
{
ID_GENERATOR.set(0);
}
this.uidHashCode = id * 13 + 11;
this.txnIds = txnIds;
this.atHeadOfQueue = false;
this.killed = false;
recordTimestamp();
}
@Override
public String toString()
{
StringBuilder sb = new StringBuilder(128);
sb.append("ReindexWorkerRunnable")
.append("[id=").append(id)
.append("[txnIds=").append(txnIds)
.append("]");
return sb.toString();
}
@Override
public boolean equals(Object obj)
{
if (!(obj instanceof ReindexWorkerRunnable))
{
return false;
}
ReindexWorkerRunnable that = (ReindexWorkerRunnable) obj;
return this.id == that.id;
}
@Override
public int hashCode()
{
return uidHashCode;
}
public synchronized void kill()
{
this.killed = true;
}
private synchronized boolean isKilled()
{
return killed;
}
/**
* @return the time that the last node was indexed (nanoseconds)
*/
public synchronized long getLastIndexedTimestamp()
{
return lastIndexedTimestamp;
}
private synchronized void recordTimestamp()
{
this.lastIndexedTimestamp = System.nanoTime();
}
private synchronized boolean isAtHeadOfQueue()
{
return atHeadOfQueue;
}
private synchronized void waitForHeadOfQueue()
{
try { wait(100L); } catch (InterruptedException e) {}
}
public synchronized void setAtHeadOfQueue()
{
this.notifyAll();
this.atHeadOfQueue = true;
}
public void run()
{
RetryingTransactionCallback<Object> reindexCallback = new RetryingTransactionCallback<Object>()
{
public Object execute() throws Throwable
{
// The first thing is to ensure that beforeCommit will be called
AlfrescoTransactionSupport.bindListener(ReindexWorkerRunnable.this);
// Now reindex
for (Long txnId : txnIds)
{
if (loggerOnThread.isDebugEnabled())
{
String msg = String.format(
" -> Reindexer %5d reindexing %10d",
id, txnId.longValue());
loggerOnThread.debug(msg);
}
reindexTransaction(txnId, ReindexWorkerRunnable.this);
}
// Done
return null; return null;
} }
}; };
transactionService.getRetryingTransactionHelper().doInTransaction(reindexWork, true); // Timestamp for when we start
// done recordTimestamp();
try
{
if (loggerOnThread.isDebugEnabled())
{
int txnIdsSize = txnIds.size();
String msg = String.format(
"Reindexer %5d starting [%10d, %10d] on %s.",
id,
(txnIdsSize == 0 ? -1 : txnIds.get(0)),
(txnIdsSize == 0 ? -1 : txnIds.get(txnIdsSize-1)),
Thread.currentThread().getName());
loggerOnThread.debug(msg);
}
// Do the work
transactionService.getRetryingTransactionHelper().doInTransaction(reindexCallback, true, true);
}
catch (ReindexTerminatedException e)
{
// This is OK
String msg = String.format(
"Reindexer %5d terminated: %s.",
id,
e.getMessage());
loggerOnThread.warn(msg);
}
catch (Throwable e)
{
String msg = String.format(
"Reindexer %5d failed with error: %s.",
id,
e.getMessage());
loggerOnThread.error(msg);
}
finally
{
// Triple check that we get the queue state right
removeFromQueueAndProdHead();
}
}
public synchronized void reindexedNode(NodeRef nodeRef)
{
// Check for forced kill
if (isKilled())
{
throw new ReindexTerminatedException();
}
recordTimestamp();
}
/**
* Removes this instance from the queue and notifies the HEAD
*/
private void removeFromQueueAndProdHead()
{
try
{
reindexThreadLock.writeLock().lock();
// Remove self from head of queue
reindexThreadQueue.remove(this);
}
finally
{
reindexThreadLock.writeLock().unlock();
}
// Now notify the new head object
ReindexWorkerRunnable newPeek = peekHeadReindexWorker();
if (newPeek != null)
{
newPeek.setAtHeadOfQueue();
}
if (loggerOnThread.isDebugEnabled())
{
String msg = String.format(
"Reindexer %5d removed from queue. Current HEAD is %s.",
id, newPeek);
loggerOnThread.debug(msg);
}
}
@Override
public void afterCommit()
{
handleQueue();
}
@Override
public void afterRollback()
{
handleQueue();
}
/**
* Lucene will do its final commit once this has been allowed to proceed.
*/
private void handleQueue()
{
while (true)
{
// Quick check to see if we're at the head of the queue
ReindexWorkerRunnable peek = peekHeadReindexWorker();
// Release the current queue head to finish (this might be this instance)
if (peek != null)
{
peek.setAtHeadOfQueue();
}
// Check kill switch
if (peek == null || isKilled() || isAtHeadOfQueue())
{
// Going to close
break;
}
else
{
// This thread is not at the head of the queue and has not been flagged
// for death, so just wait until someone notifies us to carry on
waitForHeadOfQueue();
// Loop again
}
}
// Lucene can now get on with the commit. We didn't have ordering at this level
// and the IndexInfo locks are handled by Lucene. So we let the thread go and
// the other worker threads can get on with it.
// Record the fact that the thread is on the final straight. From here on, no
// more work notifications will be possible so the timestamp needs to spoof it.
recordTimestamp();
}
}
/**
* FIFO queue to control the ordering of transaction commits. Synchronization around this object is
* controlled by the read-write lock.
*/
private LinkedBlockingQueue<ReindexWorkerRunnable> reindexThreadQueue = new LinkedBlockingQueue<ReindexWorkerRunnable>();
private ReentrantReadWriteLock reindexThreadLock = new ReentrantReadWriteLock(true);
/**
* Read-safe method to peek at the head of the queue
*/
private ReindexWorkerRunnable peekHeadReindexWorker()
{
try
{
reindexThreadLock.readLock().lock();
return reindexThreadQueue.peek();
}
finally
{
reindexThreadLock.readLock().unlock();
}
}
/**
* Performs indexing off the current thread, which may return quickly if there are threads immediately
* available in the thread pool.
* <p>
* Commits are guaranteed to occur in the order in which this reindex jobs are added to the queue.
*
* @see #reindexTransaction(long)
* @see #waitForAsynchronousReindexing()
* @since 2.1.4
*/
protected void reindexTransactionAsynchronously(final List<Long> txnIds)
{
// Bypass if there is no thread pool
if (threadPoolExecutor == null || threadPoolExecutor.getMaximumPoolSize() < 2)
{
if (loggerOnThread.isDebugEnabled())
{
String msg = String.format(
"Reindexing inline: %s.",
txnIds.toString());
loggerOnThread.debug(msg);
}
RetryingTransactionCallback<Object> reindexCallback = new RetryingTransactionCallback<Object>()
{
public Object execute() throws Throwable
{
for (Long txnId : txnIds)
{
if (loggerOnThread.isDebugEnabled())
{
String msg = String.format(
"Reindex %10d.",
txnId.longValue());
loggerOnThread.debug(msg);
}
reindexTransaction(txnId, null);
}
return null;
}
};
transactionService.getRetryingTransactionHelper().doInTransaction(reindexCallback, true, true);
return;
}
ReindexWorkerRunnable runnable = new ReindexWorkerRunnable(txnIds);
try
{
reindexThreadLock.writeLock().lock();
// Add the runnable to the queue to ensure ordering
reindexThreadQueue.add(runnable);
}
finally
{
reindexThreadLock.writeLock().unlock();
}
// Ship it to a thread.
// We don't do this in the lock - but the situation should be avoided by having the blocking
// queue size less than the maximum pool size
threadPoolExecutor.execute(runnable);
}
/**
* Wait for all asynchronous indexing to finish before returning. This is useful if the calling thread
* wants to ensure that all reindex work has finished before continuing.
*/
protected synchronized void waitForAsynchronousReindexing()
{
ReindexWorkerRunnable lastRunnable = null;
long lastTimestamp = Long.MAX_VALUE;
ReindexWorkerRunnable currentRunnable = peekHeadReindexWorker();
while (currentRunnable != null && !isShuttingDown())
{
// Notify the runnable that it is at the head of the queue
currentRunnable.setAtHeadOfQueue();
// Give the thread chance to commit
synchronized(this)
{
try { wait(100); } catch (InterruptedException e) {}
}
long currentTimestamp = currentRunnable.getLastIndexedTimestamp();
// The head of the queue holds proceedings, so it can't be allowed to continue forever
// Allow 60s of inactivity. We don't anticipate more than a few milliseconds between
// timestamp advances for the reindex threads so this checking is just for emergencies
// to prevent the queue from getting locked up.
if (lastRunnable == currentRunnable)
{
if (currentTimestamp - lastTimestamp > 60E9)
{
try
{
reindexThreadLock.writeLock().lock();
// Double check
ReindexWorkerRunnable checkCurrentRunnable = reindexThreadQueue.peek();
if (lastRunnable != checkCurrentRunnable)
{
// It's moved on - just in time
}
else
{
loggerOnThread.info("Terminating reindex thread for inactivity: " + currentRunnable);
reindexThreadQueue.remove(currentRunnable);
currentRunnable.kill();
}
// Reset
lastRunnable = null;
lastTimestamp = Long.MAX_VALUE;
// Peek at the queue and check again
currentRunnable = reindexThreadQueue.peek();
}
finally
{
reindexThreadLock.writeLock().unlock();
}
continue;
}
// Swap timestamps
lastRunnable = currentRunnable;
lastTimestamp = currentTimestamp;
}
else
{
// Swap timestamps
lastRunnable = currentRunnable;
lastTimestamp = currentTimestamp;
}
currentRunnable = peekHeadReindexWorker();
}
} }
} }

View File

@@ -27,6 +27,7 @@ package org.alfresco.repo.node.index;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Date; import java.util.Date;
import java.util.Iterator;
import java.util.List; import java.util.List;
import org.alfresco.i18n.I18NUtil; import org.alfresco.i18n.I18NUtil;
@@ -94,6 +95,7 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
private boolean lockServer; private boolean lockServer;
private IndexTransactionTracker indexTracker; private IndexTransactionTracker indexTracker;
private boolean stopOnError; private boolean stopOnError;
private int maxTransactionsPerLuceneCommit;
/** /**
* <ul> * <ul>
@@ -105,6 +107,7 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
public FullIndexRecoveryComponent() public FullIndexRecoveryComponent()
{ {
recoveryMode = RecoveryMode.VALIDATE; recoveryMode = RecoveryMode.VALIDATE;
maxTransactionsPerLuceneCommit = 100;
} }
/** /**
@@ -118,6 +121,15 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
this.recoveryMode = RecoveryMode.valueOf(recoveryMode); this.recoveryMode = RecoveryMode.valueOf(recoveryMode);
} }
/**
* Set the number of transactions to process per Lucene write.
* Larger values generate less contention on the Lucene IndexInfo files.
*/
public void setMaxTransactionsPerLuceneCommit(int maxTransactionsPerLuceneCommit)
{
this.maxTransactionsPerLuceneCommit = maxTransactionsPerLuceneCommit;
}
/** /**
* Set this on to put the server into READ-ONLY mode for the duration of the index recovery. * Set this on to put the server into READ-ONLY mode for the duration of the index recovery.
* The default is <tt>true</tt>, i.e. the server will be locked against further updates. * The default is <tt>true</tt>, i.e. the server will be locked against further updates.
@@ -178,10 +190,10 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
// Check that the first and last meaningful transactions are indexed // Check that the first and last meaningful transactions are indexed
List<Transaction> startTxns = nodeDaoService.getTxnsByCommitTimeAscending( List<Transaction> startTxns = nodeDaoService.getTxnsByCommitTimeAscending(
Long.MIN_VALUE, Long.MAX_VALUE, 10, null); Long.MIN_VALUE, Long.MAX_VALUE, 10, null, false);
boolean startAllPresent = areTxnsInIndex(startTxns); boolean startAllPresent = areTxnsInIndex(startTxns);
List<Transaction> endTxns = nodeDaoService.getTxnsByCommitTimeDescending( List<Transaction> endTxns = nodeDaoService.getTxnsByCommitTimeDescending(
Long.MIN_VALUE, Long.MAX_VALUE, 10, null); Long.MIN_VALUE, Long.MAX_VALUE, 10, null, false);
boolean endAllPresent = areTxnsInIndex(endTxns); boolean endAllPresent = areTxnsInIndex(endTxns);
// check the level of cover required // check the level of cover required
@@ -275,12 +287,16 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
fromTimeInclusive, fromTimeInclusive,
toTimeExclusive, toTimeExclusive,
MAX_TRANSACTIONS_PER_ITERATION, MAX_TRANSACTIONS_PER_ITERATION,
lastTxnIds); lastTxnIds,
false);
lastTxnIds = new ArrayList<Long>(nextTxns.size()); lastTxnIds = new ArrayList<Long>(nextTxns.size());
// reindex each transaction // reindex each transaction
for (Transaction txn : nextTxns) List<Long> txnIdBuffer = new ArrayList<Long>(maxTransactionsPerLuceneCommit);
Iterator<Transaction> txnIterator = nextTxns.iterator();
while (txnIterator.hasNext())
{ {
Transaction txn = txnIterator.next();
Long txnId = txn.getId(); Long txnId = txn.getId();
// Keep it to ensure we exclude it from the next iteration // Keep it to ensure we exclude it from the next iteration
lastTxnIds.add(txnId); lastTxnIds.add(txnId);
@@ -297,16 +313,24 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
reindexTransaction(txnId); reindexTransaction(txnId);
} }
else else
{
// Add the transaction ID to the buffer
txnIdBuffer.add(txnId);
// Reindex if the buffer is full or if there are no more transactions
if (!txnIterator.hasNext() || txnIdBuffer.size() >= maxTransactionsPerLuceneCommit)
{ {
try try
{ {
reindexTransaction(txnId); reindexTransactionAsynchronously(txnIdBuffer);
} }
catch (Throwable e) catch (Throwable e)
{ {
String msgError = I18NUtil.getMessage(MSG_RECOVERY_ERROR, txnId, e.getMessage()); String msgError = I18NUtil.getMessage(MSG_RECOVERY_ERROR, txnId, e.getMessage());
logger.info(msgError, e); logger.info(msgError, e);
} }
// Clear the buffer
txnIdBuffer = new ArrayList<Long>(maxTransactionsPerLuceneCommit);
}
} }
// Although we use the same time as this transaction for the next iteration, we also // Although we use the same time as this transaction for the next iteration, we also
// make use of the exclusion list to ensure that it doesn't get pulled back again. // make use of the exclusion list to ensure that it doesn't get pulled back again.
@@ -324,6 +348,9 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
} }
} }
// Wait for the asynchronous process to catch up
waitForAsynchronousReindexing();
// have we finished? // have we finished?
if (nextTxns.size() == 0) if (nextTxns.size() == 0)
{ {
@@ -337,8 +364,8 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
} }
/** /**
* Perform a full reindexing of the given transaction in the context of a completely * Perform full reindexing of the given transaction. A read-only transaction is created
* new transaction. * <b>if one doesn't already exist</b>.
* *
* @param txnId the transaction identifier * @param txnId the transaction identifier
*/ */
@@ -384,7 +411,7 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
return null; return null;
} }
}; };
transactionService.getRetryingTransactionHelper().doInTransaction(reindexWork, true, true); transactionService.getRetryingTransactionHelper().doInTransaction(reindexWork, true, false);
// done // done
} }
} }

View File

@@ -19,14 +19,15 @@ package org.alfresco.repo.node.index;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Date; import java.util.Date;
import java.util.HashSet; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import org.alfresco.error.AlfrescoRuntimeException; import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.repo.domain.Transaction; import org.alfresco.repo.domain.Transaction;
import org.alfresco.repo.transaction.RetryingTransactionHelper;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@@ -40,16 +41,20 @@ public class IndexTransactionTracker extends AbstractReindexComponent
private static Log logger = LogFactory.getLog(IndexTransactionTracker.class); private static Log logger = LogFactory.getLog(IndexTransactionTracker.class);
private IndexTransactionTrackerListener listener; private IndexTransactionTrackerListener listener;
private NodeIndexer nodeIndexer;
private long maxTxnDurationMs; private long maxTxnDurationMs;
private long reindexLagMs; private long reindexLagMs;
private int maxRecordSetSize; private int maxRecordSetSize;
private int maxTransactionsPerLuceneCommit;
private boolean disableInTransactionIndexing;
private boolean started; private boolean started;
private List<Long> previousTxnIds; private List<Long> previousTxnIds;
private long lastMaxTxnId; private Long lastMaxTxnId;
private long fromTimeInclusive; private long fromTimeInclusive;
private Map<Long, TxnRecord> voids; private Map<Long, TxnRecord> voids;
private boolean forceReindex;
/** /**
* Set the defaults. * Set the defaults.
@@ -57,6 +62,8 @@ public class IndexTransactionTracker extends AbstractReindexComponent
* <li><b>Maximum transaction duration:</b> 1 hour</li> * <li><b>Maximum transaction duration:</b> 1 hour</li>
* <li><b>Reindex lag:</b> 1 second</li> * <li><b>Reindex lag:</b> 1 second</li>
* <li><b>Maximum recordset size:</b> 1000</li> * <li><b>Maximum recordset size:</b> 1000</li>
* <li><b>Maximum transactions per Lucene commit:</b> 100</li>
* <li><b>Disable in-transaction indexing:</b> false</li>
* </ul> * </ul>
*/ */
public IndexTransactionTracker() public IndexTransactionTracker()
@@ -64,10 +71,13 @@ public class IndexTransactionTracker extends AbstractReindexComponent
maxTxnDurationMs = 3600L * 1000L; maxTxnDurationMs = 3600L * 1000L;
reindexLagMs = 1000L; reindexLagMs = 1000L;
maxRecordSetSize = 1000; maxRecordSetSize = 1000;
maxTransactionsPerLuceneCommit = 100;
disableInTransactionIndexing = false;
previousTxnIds = Collections.<Long>emptyList(); previousTxnIds = Collections.<Long>emptyList();
lastMaxTxnId = Long.MAX_VALUE; lastMaxTxnId = Long.MAX_VALUE;
fromTimeInclusive = -1L; fromTimeInclusive = -1L;
voids = new TreeMap<Long, TxnRecord>(); voids = new TreeMap<Long, TxnRecord>();
forceReindex = false;
} }
public synchronized void setListener(IndexTransactionTrackerListener listener) public synchronized void setListener(IndexTransactionTrackerListener listener)
@@ -75,6 +85,11 @@ public class IndexTransactionTracker extends AbstractReindexComponent
this.listener = listener; this.listener = listener;
} }
public void setNodeIndexer(NodeIndexer nodeIndexer)
{
this.nodeIndexer = nodeIndexer;
}
/** /**
* Set the expected maximum duration of transaction supported. This value is used to adjust the * Set the expected maximum duration of transaction supported. This value is used to adjust the
* look-back used to detect transactions that committed. Values must be greater than zero. * look-back used to detect transactions that committed. Values must be greater than zero.
@@ -118,29 +133,116 @@ public class IndexTransactionTracker extends AbstractReindexComponent
this.maxRecordSetSize = maxRecordSetSize; this.maxRecordSetSize = maxRecordSetSize;
} }
/**
* Set the number of transactions to process per Lucene write.
* Larger values generate less contention on the Lucene IndexInfo files.
*/
public void setMaxTransactionsPerLuceneCommit(int maxTransactionsPerLuceneCommit)
{
this.maxTransactionsPerLuceneCommit = maxTransactionsPerLuceneCommit;
}
/**
* Enable or disabled in-transaction indexing. Under certain circumstances, the system
* can run with only index tracking enabled - in-transaction indexing is not always
* required. The {@link NodeIndexer} is disabled when this component initialises.
*/
public void setDisableInTransactionIndexing(boolean disableInTransactionIndexing)
{
this.disableInTransactionIndexing = disableInTransactionIndexing;
}
/**
* @return Returns <tt>false</tt> always. Transactions are handled internally.
*/
@Override
protected boolean requireTransaction()
{
return false;
}
/** Worker callback for transactional use */
RetryingTransactionCallback<Long> getStartingCommitTimeWork = new RetryingTransactionCallback<Long>()
{
public Long execute() throws Exception
{
return getStartingTxnCommitTime();
}
};
/** Worker callback for transactional use */
RetryingTransactionCallback<Boolean> reindexWork = new RetryingTransactionCallback<Boolean>()
{
public Boolean execute() throws Exception
{
return reindexInTransaction();
}
};
@Override @Override
protected void reindexImpl() protected void reindexImpl()
{ {
RetryingTransactionHelper retryingTransactionHelper = transactionService.getRetryingTransactionHelper();
if (!started) if (!started)
{ {
// Disable in-transaction indexing
if (disableInTransactionIndexing && nodeIndexer != null)
{
logger.warn("In-transaction indexing is being disabled.");
nodeIndexer.setEnabled(false);
}
// Make sure that we start clean // Make sure that we start clean
voids.clear(); voids.clear();
previousTxnIds = new ArrayList<Long>(maxRecordSetSize); previousTxnIds = new ArrayList<Long>(maxRecordSetSize);
lastMaxTxnId = Long.MAX_VALUE; // So that it is ignored at first lastMaxTxnId = null; // So that it is ignored at first
fromTimeInclusive = getStartingTxnCommitTime(); fromTimeInclusive = retryingTransactionHelper.doInTransaction(getStartingCommitTimeWork, true, true);
started = true; started = true;
} }
while (true) while (true)
{
Boolean repeat = retryingTransactionHelper.doInTransaction(reindexWork, true, true);
// Only break out if there isn't any more work to do (for now)
if (repeat == null || repeat.booleanValue() == false)
{
break;
}
}
// Wait for the asynchronous reindexing to complete
waitForAsynchronousReindexing();
}
/**
* @return Returns <tt>true</tt> if the reindex process can exit otherwise <tt>false</tt> if
* a new transaction should be created and the process kicked off again
*/
private boolean reindexInTransaction()
{ {
long toTimeExclusive = System.currentTimeMillis() - reindexLagMs; long toTimeExclusive = System.currentTimeMillis() - reindexLagMs;
// Check that the voids haven't been filled // Check that the voids haven't been filled
fromTimeInclusive = checkVoids(fromTimeInclusive); long minLiveVoidTime = checkVoids();
if (minLiveVoidTime <= fromTimeInclusive)
{
// A void was discovered.
// We need to adjust the search time for transactions, i.e. hop back in time but
// this also entails a full build from that point on. So all previous transactions
// need to be reindexed.
fromTimeInclusive = minLiveVoidTime;
previousTxnIds.clear();
}
// get next transactions to index // get next transactions to index
List<Transaction> txns = getNextTransactions(fromTimeInclusive, toTimeExclusive, previousTxnIds); List<Transaction> txns = getNextTransactions(fromTimeInclusive, toTimeExclusive, previousTxnIds);
// If there are no transactions, then all the work is done
if (txns.size() == 0)
{
// We have caught up.
// There is no need to force reindexing until the next unindex transactions appear.
forceReindex = false;
return false;
}
if (logger.isDebugEnabled()) if (logger.isDebugEnabled())
{ {
String msg = String.format( String msg = String.format(
@@ -154,44 +256,50 @@ public class IndexTransactionTracker extends AbstractReindexComponent
// Reindex the transactions. Voids between the last set of transactions and this // Reindex the transactions. Voids between the last set of transactions and this
// set will be detected as well. Additionally, the last max transaction will be // set will be detected as well. Additionally, the last max transaction will be
// updated by this method. // updated by this method.
reindexTransactions(txns); long maxProcessedTxnCommitTime = reindexTransactions(txns);
// Call the listener // Call the listener
synchronized (this) synchronized (this)
{ {
if (listener != null) if (listener != null)
{ {
listener.indexedTransactions(fromTimeInclusive, toTimeExclusive); listener.indexedTransactions(fromTimeInclusive, maxProcessedTxnCommitTime);
} }
} }
// Move the time on. // Move the time on.
// Note the subtraction here. Yes, it's odd. But the results of the getNextTransactions // The next fromTimeInclusive may well pull back transactions that have just been
// may be limited by recordset size and it is possible to have multiple transactions share // processed. But we keep track of those and exclude them from the results.
// the same commit time. If these txns get split up and we exclude the time period, then if (fromTimeInclusive == maxProcessedTxnCommitTime)
// they won't be requeried. The list of previously used transaction IDs is passed back to {
// be exluded from the next query. // The time didn't advance. If no new transaction appear, we could spin on
fromTimeInclusive = toTimeExclusive - 1L; // two or more transactions with the same commit time. So we DON'T clear
// the list of previous transactions and we allow them to live on.
}
else
{
// The processing time has moved on
fromTimeInclusive = maxProcessedTxnCommitTime;
previousTxnIds.clear(); previousTxnIds.clear();
}
for (Transaction txn : txns) for (Transaction txn : txns)
{ {
previousTxnIds.add(txn.getId()); previousTxnIds.add(txn.getId());
} }
// Break out if there were no transactions processed
if (previousTxnIds.isEmpty())
{
break;
}
// break out if the VM is shutting down
if (isShuttingDown()) if (isShuttingDown())
{ {
break; // break out if the VM is shutting down
return false;
} }
else
{
// There is more work to do and we should be called back right away
return true;
} }
} }
private static final long ONE_HOUR_MS = 3600*1000;
/** /**
* Find a transaction time to start indexing from (inclusive). The last recorded transaction by ID * Find a transaction time to start indexing from (inclusive). The last recorded transaction by ID
* is taken and the max transaction duration substracted from its commit time. A transaction is * is taken and the max transaction duration substracted from its commit time. A transaction is
@@ -200,11 +308,34 @@ public class IndexTransactionTracker extends AbstractReindexComponent
* or a transaction is found in the index. * or a transaction is found in the index.
*/ */
protected long getStartingTxnCommitTime() protected long getStartingTxnCommitTime()
{
long now = System.currentTimeMillis();
// Get the last indexed transaction for all transactions
long lastIndexedAllCommitTimeMs = getLastIndexedCommitTime(now, false);
// Now check back from this time to make sure there are no remote transactions that weren't indexed
long lastIndexedRemoteCommitTimeMs = getLastIndexedCommitTime(now, true);
// The one to start at is the least of the two times
long startTime = Math.min(lastIndexedAllCommitTimeMs, lastIndexedRemoteCommitTimeMs);
// Done
return startTime;
}
/**
* Gets the commit time for the last indexed transaction. If there are no transactions, then the
* current time is returned.
*
* @param maxCommitTimeMs the largest commit time to consider
* @param remoteOnly <tt>true</tt> to only look at remotely-committed transactions
* @return Returns the last indexed transaction commit time for all or
* remote-only transactions.
*/
private long getLastIndexedCommitTime(long maxCommitTimeMs, boolean remoteOnly)
{ {
// Look back in time by the maximum transaction duration // Look back in time by the maximum transaction duration
long toTimeExclusive = System.currentTimeMillis() - maxTxnDurationMs; long maxToTimeExclusive = maxCommitTimeMs - maxTxnDurationMs;
long toTimeExclusive = maxToTimeExclusive;
long fromTimeInclusive = 0L; long fromTimeInclusive = 0L;
double stepFactor = 1.0D; double stepFactor = 1.0D;
boolean firstWasInIndex = true;
found: found:
while (true) while (true)
{ {
@@ -213,7 +344,8 @@ found:
0L, 0L,
toTimeExclusive, toTimeExclusive,
1, 1,
null); null,
remoteOnly);
// There are no transactions in that time range // There are no transactions in that time range
if (nextTransactions.size() == 0) if (nextTransactions.size() == 0)
{ {
@@ -221,93 +353,119 @@ found:
} }
// We found a transaction // We found a transaction
Transaction txn = nextTransactions.get(0); Transaction txn = nextTransactions.get(0);
Long txnId = txn.getId();
long txnCommitTime = txn.getCommitTimeMs(); long txnCommitTime = txn.getCommitTimeMs();
// Check that it is in the index // Check that it is in the index
InIndex txnInIndex = isTxnIdPresentInIndex(txnId); InIndex txnInIndex = isTxnPresentInIndex(txn);
switch (txnInIndex) switch (txnInIndex)
{ {
case YES: case YES:
fromTimeInclusive = txnCommitTime; fromTimeInclusive = txnCommitTime;
break found; break found;
default: default:
// Look further back in time. Step back by the maximum transaction duration and firstWasInIndex = false;
// increase this step back by a factor of 10% each iteration. // Look further back in time. Step back by 60 seconds each time, increasing
toTimeExclusive = txnCommitTime - (long)(maxTxnDurationMs * stepFactor); // the step by 10% each iteration.
// Don't step back by more than a day
long decrement = Math.min(ONE_HOUR_MS, (long) (60000.0D * stepFactor));
toTimeExclusive = txnCommitTime - decrement;
stepFactor *= 1.1D; stepFactor *= 1.1D;
continue; continue;
} }
} }
// We have a starting value // If the last transaction (given the max txn duration) was in the index, then we used the
// maximum commit time i.e. the indexes were up to date up until the most recent time.
if (firstWasInIndex)
{
return maxToTimeExclusive;
}
else
{
return fromTimeInclusive; return fromTimeInclusive;
} }
}
private static final int VOID_BATCH_SIZE = 100;
/** /**
* Voids - otherwise known as 'holes' - in the transaction sequence are timestamped when they are * Voids - otherwise known as 'holes' - in the transaction sequence are timestamped when they are
* discovered. This method discards voids that were timestamped before the given date. It checks * discovered. This method discards voids that were timestamped before the given date. It checks
* all remaining voids, passing back the transaction time for the newly-filled void. Otherwise * all remaining voids, passing back the transaction time for the newly-filled void. Otherwise
* the value passed in is passed back. * the value passed in is passed back.
* *
* @param fromTimeInclusive the oldest void to consider
* @return Returns an adjused start position based on any voids being filled * @return Returns an adjused start position based on any voids being filled
* or <b>Long.MAX_VALUE</b> if no new voids were found
*/ */
private long checkVoids(long fromTimeInclusive) private long checkVoids()
{ {
long maxHistoricalTime = (fromTimeInclusive - maxTxnDurationMs); long maxHistoricalTime = (fromTimeInclusive - maxTxnDurationMs);
long fromTimeAdjusted = fromTimeInclusive; long fromTimeAdjusted = Long.MAX_VALUE;
List<Long> toExpireTxnIds = new ArrayList<Long>(1); List<Long> toExpireTxnIds = new ArrayList<Long>(1);
// The voids are stored in a sorted map, sorted by the txn ID Iterator<Long> voidTxnIdIterator = voids.keySet().iterator();
for (Long voidTxnId : voids.keySet()) List<Long> voidTxnIdBatch = new ArrayList<Long>(VOID_BATCH_SIZE);
while (voidTxnIdIterator.hasNext())
{ {
TxnRecord voidTxnRecord = voids.get(voidTxnId); Long voidTxnId = voidTxnIdIterator.next();
// Is the transaction around, yet? // Add it to the batch
Transaction voidTxn = nodeDaoService.getTxnById(voidTxnId); voidTxnIdBatch.add(voidTxnId);
if (voidTxn == null) // If the batch is full or if there are no more voids, fire the query
if (voidTxnIdBatch.size() == VOID_BATCH_SIZE || !voidTxnIdIterator.hasNext())
{ {
// It's still just a void. Shall we expire it? List<Transaction> filledTxns = nodeDaoService.getTxnsByMinCommitTime(voidTxnIdBatch);
if (voidTxnRecord.txnCommitTime < maxHistoricalTime) for (Transaction txn : filledTxns)
{
if (txn.getCommitTimeMs() == null) // Just coping with Hibernate mysteries
{ {
// It's too late for this void
toExpireTxnIds.add(voidTxnId);
}
continue; continue;
} }
else if (voidTxn.getCommitTimeMs() == null) else if (isTxnPresentInIndex(txn) != InIndex.NO)
{ {
// http://issues.alfresco.com/browse/AR-2041 // It is in the index so expire it from the voids.
// An object was found, but sometimes it is still not fully formed. // This can happen if void was committed locally.
// Perhaps it's the direct request by ID that gives back an uncommitted transaction. toExpireTxnIds.add(txn.getId());
// So this transaction is very likely to become live soon but we just leave it until it does.
// When the issue has been seen, there have not been any committed transactions with null commit times.
if (logger.isDebugEnabled())
{
logger.debug("Void is visible but not live: " + voidTxn);
}
} }
else else
{ {
if (logger.isDebugEnabled()) // It's not in the index so we have a timespamp from which to kick off
// It is a bone fide first transaction. A void has been filled.
long txnCommitTimeMs = txn.getCommitTimeMs().longValue();
// If the value is lower than our current one we keep it
if (txnCommitTimeMs < fromTimeAdjusted)
{ {
logger.debug("Void has become live: " + voidTxn); fromTimeAdjusted = txnCommitTimeMs;
} }
// We found one that has become a real transaction. // The query selected them in timestamp order so there is no need to process
// We don't throw the other voids away. // the remaining transactions in this batch - we have our minimum.
fromTimeAdjusted = voidTxn.getCommitTimeMs();
// Break out as sequential rebuilding is required
break; break;
} }
} }
// Throw away all the expired ones // Wipe the batch clean
for (Long toExpireTxnId : toExpireTxnIds) voidTxnIdBatch.clear();
{
voids.remove(toExpireTxnId);
if (logger.isDebugEnabled())
{
logger.debug("Void has expired: " + toExpireTxnId);
} }
// Check if the void must be expired or not
TxnRecord voidTxnRecord = voids.get(voidTxnId);
if (voidTxnRecord.txnCommitTime < maxHistoricalTime)
{
// It's too late for this void whether or not it has become live
toExpireTxnIds.add(voidTxnId);
}
}
// Throw away all the expired or removable voids
int voidCountBefore = voids.size();
for (Long toRemoveTxnId : toExpireTxnIds)
{
voids.remove(toRemoveTxnId);
}
int voidCountAfter = voids.size();
if (logger.isDebugEnabled() && voidCountBefore != voidCountAfter)
{
logger.debug("Void count " + voidCountBefore + " -> " + voidCountAfter);
} }
// Done // Done
if (logger.isDebugEnabled() && fromTimeAdjusted < Long.MAX_VALUE)
{
logger.debug("Returning to void time " + fromTimeAdjusted);
}
return fromTimeAdjusted; return fromTimeAdjusted;
} }
@@ -317,7 +475,8 @@ found:
fromTimeInclusive, fromTimeInclusive,
toTimeExclusive, toTimeExclusive,
maxRecordSetSize, maxRecordSetSize,
previousTxnIds); previousTxnIds,
false);
// done // done
return txns; return txns;
} }
@@ -328,61 +487,53 @@ found:
* of transaction IDs will be examined for any voids. These will be recorded. * of transaction IDs will be examined for any voids. These will be recorded.
* *
* @param txns transactions ordered by time ascending * @param txns transactions ordered by time ascending
* @return returns the * @return returns the commit time of the last transaction in the list
* @throws IllegalArgumentException if there are no transactions
*/ */
private void reindexTransactions(List<Transaction> txns) private long reindexTransactions(List<Transaction> txns)
{ {
if (txns.isEmpty()) if (txns.isEmpty())
{ {
return; throw new IllegalArgumentException("There are no transactions to process");
} }
Set<Long> processedTxnIds = new HashSet<Long>(13); // Determines the window for void retention
long now = System.currentTimeMillis();
long oldestVoidRetentionTime = (now - maxTxnDurationMs);
boolean forceReindex = false; // Keep an ordered map of IDs that we process along with their commit times
long minNewTxnId = Long.MAX_VALUE; Map<Long, TxnRecord> processedTxnRecords = new TreeMap<Long, TxnRecord>();
long maxNewTxnId = Long.MIN_VALUE;
long maxNewTxnCommitTime = System.currentTimeMillis(); List<Long> txnIdBuffer = new ArrayList<Long>(maxTransactionsPerLuceneCommit);
for (Transaction txn : txns) Iterator<Transaction> txnIterator = txns.iterator();
while (txnIterator.hasNext())
{ {
Transaction txn = txnIterator.next();
Long txnId = txn.getId(); Long txnId = txn.getId();
long txnIdLong = txnId.longValue(); Long txnCommitTimeMs = txn.getCommitTimeMs();
if (txnIdLong < minNewTxnId) if (txnCommitTimeMs == null)
{ {
minNewTxnId = txnIdLong; // What? But let's be cautious and treat this as a void
continue;
} }
if (txnIdLong > maxNewTxnId) // Keep a record of it
{ TxnRecord processedTxnRecord = new TxnRecord();
maxNewTxnId = txnIdLong; processedTxnRecord.txnCommitTime = txnCommitTimeMs;
maxNewTxnCommitTime = txn.getCommitTimeMs(); processedTxnRecords.put(txnId, processedTxnRecord);
}
// Keep track of it for void checking
processedTxnIds.add(txnId);
// Remove this entry from the void list - it is not void // Remove this entry from the void list - it is not void
voids.remove(txnId); voids.remove(txnId);
// Reindex the transaction if we are forcing it or if it isn't in the index already // Reindex the transaction if we are forcing it or if it isn't in the index already
if (forceReindex || isTxnIdPresentInIndex(txnId) == InIndex.NO) if (forceReindex || isTxnPresentInIndex(txn) == InIndex.NO)
{ {
// Any indexing means that all the next transactions have to be indexed // From this point on, until the tracker has caught up, all transactions need to be indexed
forceReindex = true; forceReindex = true;
try // Add the transaction to the buffer of transactions that need processing
{ txnIdBuffer.add(txnId);
if (logger.isDebugEnabled()) if (logger.isDebugEnabled())
{ {
logger.debug("Reindexing transaction: " + txn); logger.debug("Reindexing transaction: " + txn);
} }
// We try the reindex, but for the sake of continuity, have to let it run on
reindexTransaction(txnId);
}
catch (Throwable e)
{
logger.warn("\n" +
"Reindex of transaction failed: \n" +
" Transaction ID: " + txnId + "\n" +
" Error: " + e.getMessage(),
e);
}
} }
else else
{ {
@@ -391,50 +542,82 @@ found:
logger.debug("Reindex skipping transaction: " + txn); logger.debug("Reindex skipping transaction: " + txn);
} }
} }
}
// We have to search for voids now. Don't start at the min transaction,
// but start at the least of the lastMaxTxnId and minNewTxnId
long voidCheckStartTxnId = (lastMaxTxnId < minNewTxnId ? lastMaxTxnId : minNewTxnId) + 1;
long voidCheckEndTxnId = maxNewTxnId;
// Check for voids in new transactions
for (long i = voidCheckStartTxnId; i <= voidCheckEndTxnId; i++)
{
Long txnId = Long.valueOf(i);
if (processedTxnIds.contains(txnId))
{
// It is there
continue;
}
// First make sure that it is a real void. Sometimes, transactions are in the table but don't if (isShuttingDown())
// fall within the commit time window that we queried. If they're in the DB AND in the index,
// then they're not really voids and don't need further checks. If they're missing from either,
// then they're voids and must be processed.
Transaction voidTxn = nodeDaoService.getTxnById(txnId);
if (voidTxn != null && isTxnIdPresentInIndex(txnId) != InIndex.NO)
{ {
// It is a real transaction (not a void) and is already in the index, so just ignore it. break;
continue;
} }
// Flush the reindex buffer, if it is full or if we are on the last transaction and there are no more
// Calculate an age for the void. We can't use the current time as that will mean we keep all if (txnIdBuffer.size() >= maxTransactionsPerLuceneCommit || (!txnIterator.hasNext() && txnIdBuffer.size() > 0))
// discovered voids, even if they are very old. Rather, we use the commit time of the last transaction {
// in the set as it represents the query time for this iteration. try
{
// We try the reindex, but for the sake of continuity, have to let it run on
reindexTransactionAsynchronously(txnIdBuffer);
}
catch (Throwable e)
{
logger.warn("\n" +
"Reindex of transactions failed: \n" +
" Transaction IDs: " + txnIdBuffer + "\n" +
" Error: " + e.getMessage(),
e);
}
// Clear the buffer
txnIdBuffer = new ArrayList<Long>(maxTransactionsPerLuceneCommit);
}
}
// Use the last ID from the previous iteration as our starting point
Long lastId = lastMaxTxnId;
long lastCommitTime = -1L;
// Walk the processed txn IDs
for (Map.Entry<Long, TxnRecord> entry : processedTxnRecords.entrySet())
{
Long processedTxnId = entry.getKey();
TxnRecord processedTxnRecord = entry.getValue();
boolean voidsAreYoungEnough = processedTxnRecord.txnCommitTime >= oldestVoidRetentionTime;
if (lastId != null && voidsAreYoungEnough)
{
int voidCount = 0;
// Iterate BETWEEN the last ID and the current one to find voids
// Only enter the loop if the current upper limit transaction is young enough to
// consider for voids.
for (long i = lastId.longValue() + 1; i < processedTxnId; i++)
{
// The voids are optimistically given the same transaction time as transaction with the
// largest ID. We only bother w
TxnRecord voidRecord = new TxnRecord(); TxnRecord voidRecord = new TxnRecord();
voidRecord.txnCommitTime = maxNewTxnCommitTime; voidRecord.txnCommitTime = processedTxnRecord.txnCommitTime;
voids.put(txnId, voidRecord); voids.put(new Long(i), voidRecord);
if (logger.isDebugEnabled()) voidCount++;
{
logger.debug("Void detected: " + txnId);
} }
if (logger.isDebugEnabled()&& voidCount > 0)
{
logger.debug("Voids detected: " + voidCount + " in range [" + lastId + ", " + processedTxnId + "]");
}
}
lastId = processedTxnId;
lastCommitTime = processedTxnRecord.txnCommitTime;
} }
// Having searched for the nodes, we've recorded all the voids. So move the lastMaxTxnId up. // Having searched for the nodes, we've recorded all the voids. So move the lastMaxTxnId up.
lastMaxTxnId = voidCheckEndTxnId; lastMaxTxnId = lastId;
// Done
return lastCommitTime;
} }
private class TxnRecord private class TxnRecord
{ {
private long txnCommitTime; private long txnCommitTime;
@Override
public String toString()
{
StringBuilder sb = new StringBuilder(128);
sb.append("TxnRecord")
.append("[time=").append(txnCommitTime <= 0 ? "---" : new Date(txnCommitTime))
.append("]");
return sb.toString();
}
} }
/** /**

View File

@@ -16,6 +16,8 @@
*/ */
package org.alfresco.repo.node.index; package org.alfresco.repo.node.index;
import java.util.concurrent.ThreadPoolExecutor;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.alfresco.model.ContentModel; import org.alfresco.model.ContentModel;
@@ -52,6 +54,7 @@ public class IndexTransactionTrackerTest extends TestCase
private AuthenticationComponent authenticationComponent; private AuthenticationComponent authenticationComponent;
private SearchService searchService; private SearchService searchService;
private NodeService nodeService; private NodeService nodeService;
private ThreadPoolExecutor threadPoolExecutor;
private FileFolderService fileFolderService; private FileFolderService fileFolderService;
private ContentStore contentStore; private ContentStore contentStore;
private FullTextSearchIndexer ftsIndexer; private FullTextSearchIndexer ftsIndexer;
@@ -65,6 +68,7 @@ public class IndexTransactionTrackerTest extends TestCase
ServiceRegistry serviceRegistry = (ServiceRegistry) ctx.getBean(ServiceRegistry.SERVICE_REGISTRY); ServiceRegistry serviceRegistry = (ServiceRegistry) ctx.getBean(ServiceRegistry.SERVICE_REGISTRY);
searchService = serviceRegistry.getSearchService(); searchService = serviceRegistry.getSearchService();
nodeService = serviceRegistry.getNodeService(); nodeService = serviceRegistry.getNodeService();
threadPoolExecutor = (ThreadPoolExecutor) ctx.getBean("indexTrackerThreadPoolExecutor");
fileFolderService = serviceRegistry.getFileFolderService(); fileFolderService = serviceRegistry.getFileFolderService();
authenticationComponent = (AuthenticationComponent) ctx.getBean("authenticationComponent"); authenticationComponent = (AuthenticationComponent) ctx.getBean("authenticationComponent");
contentStore = (ContentStore) ctx.getBean("fileContentStore"); contentStore = (ContentStore) ctx.getBean("fileContentStore");
@@ -79,6 +83,7 @@ public class IndexTransactionTrackerTest extends TestCase
indexTracker.setIndexer(indexer); indexTracker.setIndexer(indexer);
indexTracker.setNodeDaoService(nodeDaoService); indexTracker.setNodeDaoService(nodeDaoService);
indexTracker.setNodeService(nodeService); indexTracker.setNodeService(nodeService);
indexTracker.setThreadPoolExecutor(threadPoolExecutor);
indexTracker.setSearcher(searchService); indexTracker.setSearcher(searchService);
indexTracker.setTransactionService((TransactionServiceImpl)transactionService); indexTracker.setTransactionService((TransactionServiceImpl)transactionService);
@@ -117,8 +122,20 @@ public class IndexTransactionTrackerTest extends TestCase
} }
public synchronized void testStartup() throws Exception public synchronized void testStartup() throws Exception
{
Thread reindexThread = new Thread()
{
public void run()
{ {
indexTracker.reindex(); indexTracker.reindex();
indexTracker.reindex(); indexTracker.reindex();
} }
};
reindexThread.setDaemon(true);
reindexThread.start();
// wait a bit and then terminate
wait(20000);
indexTracker.setShutdown(true);
wait(20000);
}
} }

View File

@@ -24,6 +24,8 @@
*/ */
package org.alfresco.repo.node.index; package org.alfresco.repo.node.index;
import java.util.concurrent.ThreadPoolExecutor;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.alfresco.model.ContentModel; import org.alfresco.model.ContentModel;
@@ -64,6 +66,7 @@ public class MissingContentReindexComponentTest extends TestCase
private AuthenticationComponent authenticationComponent; private AuthenticationComponent authenticationComponent;
private SearchService searchService; private SearchService searchService;
private NodeService nodeService; private NodeService nodeService;
private ThreadPoolExecutor threadPoolExecutor;
private FileFolderService fileFolderService; private FileFolderService fileFolderService;
private ContentStore contentStore; private ContentStore contentStore;
private FullTextSearchIndexer ftsIndexer; private FullTextSearchIndexer ftsIndexer;
@@ -76,6 +79,7 @@ public class MissingContentReindexComponentTest extends TestCase
ServiceRegistry serviceRegistry = (ServiceRegistry) ctx.getBean(ServiceRegistry.SERVICE_REGISTRY); ServiceRegistry serviceRegistry = (ServiceRegistry) ctx.getBean(ServiceRegistry.SERVICE_REGISTRY);
searchService = serviceRegistry.getSearchService(); searchService = serviceRegistry.getSearchService();
nodeService = serviceRegistry.getNodeService(); nodeService = serviceRegistry.getNodeService();
threadPoolExecutor = (ThreadPoolExecutor) ctx.getBean("indexTrackerThreadPoolExecutor");
fileFolderService = serviceRegistry.getFileFolderService(); fileFolderService = serviceRegistry.getFileFolderService();
authenticationComponent = (AuthenticationComponent) ctx.getBean("authenticationComponent"); authenticationComponent = (AuthenticationComponent) ctx.getBean("authenticationComponent");
contentStore = (ContentStore) ctx.getBean("fileContentStore"); contentStore = (ContentStore) ctx.getBean("fileContentStore");
@@ -90,6 +94,7 @@ public class MissingContentReindexComponentTest extends TestCase
reindexer.setIndexer(indexer); reindexer.setIndexer(indexer);
reindexer.setNodeDaoService(nodeDaoService); reindexer.setNodeDaoService(nodeDaoService);
reindexer.setNodeService(nodeService); reindexer.setNodeService(nodeService);
reindexer.setThreadPoolExecutor(threadPoolExecutor);
reindexer.setSearcher(searchService); reindexer.setSearcher(searchService);
reindexer.setTransactionService((TransactionServiceImpl)transactionService); reindexer.setTransactionService((TransactionServiceImpl)transactionService);

View File

@@ -52,6 +52,13 @@ public class NodeIndexer
/** the component to index the node hierarchy */ /** the component to index the node hierarchy */
private Indexer indexer; private Indexer indexer;
private TenantService tenantService; private TenantService tenantService;
/** enabled or disabled */
private boolean enabled;
public NodeIndexer()
{
enabled = true;
}
/** /**
* @param policyComponent used for registrations * @param policyComponent used for registrations
@@ -74,6 +81,11 @@ public class NodeIndexer
this.tenantService = tenantService; this.tenantService = tenantService;
} }
/* package */ void setEnabled(boolean enabled)
{
this.enabled = enabled;
}
/** /**
* Registers the policy behaviour methods * Registers the policy behaviour methods
*/ */
@@ -102,30 +114,42 @@ public class NodeIndexer
} }
public void onCreateNode(ChildAssociationRef childAssocRef) public void onCreateNode(ChildAssociationRef childAssocRef)
{
if (enabled)
{ {
indexer.createNode(tenantService.getName(childAssocRef)); indexer.createNode(tenantService.getName(childAssocRef));
} }
}
public void onUpdateNode(NodeRef nodeRef) public void onUpdateNode(NodeRef nodeRef)
{
if (enabled)
{ {
indexer.updateNode(tenantService.getName(nodeRef)); indexer.updateNode(tenantService.getName(nodeRef));
} }
}
public void onDeleteNode(ChildAssociationRef childAssocRef, boolean isArchivedNode) public void onDeleteNode(ChildAssociationRef childAssocRef, boolean isArchivedNode)
{
if (enabled)
{ {
indexer.deleteNode(tenantService.getName(childAssocRef)); indexer.deleteNode(tenantService.getName(childAssocRef));
} }
}
public void onCreateChildAssociation(ChildAssociationRef childAssocRef, boolean isNew) public void onCreateChildAssociation(ChildAssociationRef childAssocRef, boolean isNew)
{ {
if (!isNew) if (!isNew && enabled)
{ {
indexer.createChildRelationship(tenantService.getName(childAssocRef)); indexer.createChildRelationship(tenantService.getName(childAssocRef));
} }
} }
public void onDeleteChildAssociation(ChildAssociationRef childAssocRef) public void onDeleteChildAssociation(ChildAssociationRef childAssocRef)
{
if (enabled)
{ {
indexer.deleteChildRelationship(tenantService.getName(childAssocRef)); indexer.deleteChildRelationship(tenantService.getName(childAssocRef));
} }
}
} }

View File

@@ -24,11 +24,11 @@
*/ */
package org.alfresco.repo.rule; package org.alfresco.repo.rule;
import org.alfresco.repo.rule.ruletrigger.RuleTriggerTest;
import junit.framework.Test; import junit.framework.Test;
import junit.framework.TestSuite; import junit.framework.TestSuite;
import org.alfresco.repo.rule.ruletrigger.RuleTriggerTest;
/** /**
* Version test suite * Version test suite

View File

@@ -24,7 +24,7 @@
*/ */
package org.alfresco.repo.rule; package org.alfresco.repo.rule;
import org.alfresco.repo.transaction.TransactionListener; import org.alfresco.repo.transaction.TransactionListenerAdapter;
import org.alfresco.util.GUID; import org.alfresco.util.GUID;
/** /**
@@ -32,7 +32,7 @@ import org.alfresco.util.GUID;
* *
* @author Roy Wetherall * @author Roy Wetherall
*/ */
public class RuleTransactionListener implements TransactionListener public class RuleTransactionListener extends TransactionListenerAdapter
{ {
/** /**
* Id used in equals and hash * Id used in equals and hash
@@ -54,42 +54,15 @@ public class RuleTransactionListener implements TransactionListener
this.ruleService = ruleService; this.ruleService = ruleService;
} }
/**
* @see org.alfresco.repo.transaction.TransactionListener#flush()
*/
public void flush()
{
}
/** /**
* @see org.alfresco.repo.transaction.TransactionListener#beforeCommit(boolean) * @see org.alfresco.repo.transaction.TransactionListener#beforeCommit(boolean)
*/ */
@Override
public void beforeCommit(boolean readOnly) public void beforeCommit(boolean readOnly)
{ {
this.ruleService.executePendingRules(); this.ruleService.executePendingRules();
} }
/**
* @see org.alfresco.repo.transaction.TransactionListener#beforeCompletion()
*/
public void beforeCompletion()
{
}
/**
* @see org.alfresco.repo.transaction.TransactionListener#afterCommit()
*/
public void afterCommit()
{
}
/**
* @see org.alfresco.repo.transaction.TransactionListener#afterRollback()
*/
public void afterRollback()
{
}
/** /**
* @see java.lang.Object#hashCode() * @see java.lang.Object#hashCode()
*/ */

View File

@@ -254,11 +254,9 @@ public class AVMLuceneIndexerImpl extends AbstractLuceneIndexerImpl<String> impl
} }
catch (AVMSyncException e) catch (AVMSyncException e)
{ {
s_logger.warn("\n" + s_logger.warn("\n"
"Unable to generate change list for synchronous indexing: \n" + + "Unable to generate change list for synchronous indexing: \n" + " Store: " + store + "\n" + " Start version: " + srcVersion + "\n"
" Store: " + store + "\n" + + " End version: " + endVersion);
" Start version: " + srcVersion + "\n" +
" End version: " + endVersion);
return; return;
} }
for (AVMDifference difference : changeList) for (AVMDifference difference : changeList)
@@ -1420,11 +1418,9 @@ public class AVMLuceneIndexerImpl extends AbstractLuceneIndexerImpl<String> impl
TermEnum terms = null; TermEnum terms = null;
try try
{ {
terms = reader.terms(); terms = reader.terms(new Term("ID", prefix));
if (terms.term() != null)
if (terms.skipTo(new Term("ID", prefix)))
{ {
do do
{ {
Term term = terms.term(); Term term = terms.term();
@@ -1459,7 +1455,6 @@ public class AVMLuceneIndexerImpl extends AbstractLuceneIndexerImpl<String> impl
} }
while (terms.next()); while (terms.next());
} }
} }
finally finally
{ {
@@ -1531,9 +1526,8 @@ public class AVMLuceneIndexerImpl extends AbstractLuceneIndexerImpl<String> impl
TermEnum terms = null; TermEnum terms = null;
try try
{ {
terms = reader.terms(); terms = reader.terms(new Term("ID", prefix));
if (terms.term() != null)
if (terms.skipTo(new Term("ID", prefix)))
{ {
do do
{ {

View File

@@ -41,6 +41,10 @@ import org.alfresco.repo.policy.JavaBehaviour;
import org.alfresco.repo.policy.PolicyComponent; import org.alfresco.repo.policy.PolicyComponent;
import org.alfresco.repo.security.permissions.PermissionServiceSPI; import org.alfresco.repo.security.permissions.PermissionServiceSPI;
import org.alfresco.repo.tenant.TenantService; import org.alfresco.repo.tenant.TenantService;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.TransactionListenerAdapter;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.cmr.dictionary.DictionaryService; import org.alfresco.service.cmr.dictionary.DictionaryService;
import org.alfresco.service.cmr.repository.ChildAssociationRef; import org.alfresco.service.cmr.repository.ChildAssociationRef;
import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.NodeRef;
@@ -57,12 +61,15 @@ import org.alfresco.service.cmr.security.PersonService;
import org.alfresco.service.namespace.NamespacePrefixResolver; import org.alfresco.service.namespace.NamespacePrefixResolver;
import org.alfresco.service.namespace.NamespaceService; import org.alfresco.service.namespace.NamespaceService;
import org.alfresco.service.namespace.QName; import org.alfresco.service.namespace.QName;
import org.alfresco.service.transaction.TransactionService;
import org.alfresco.util.GUID; import org.alfresco.util.GUID;
import org.alfresco.util.PropertyCheck;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
public class PersonServiceImpl implements PersonService, public class PersonServiceImpl
NodeServicePolicies.OnCreateNodePolicy, NodeServicePolicies.BeforeDeleteNodePolicy extends TransactionListenerAdapter
implements PersonService, NodeServicePolicies.OnCreateNodePolicy, NodeServicePolicies.BeforeDeleteNodePolicy
{ {
private static Log s_logger = LogFactory.getLog(PersonServiceImpl.class); private static Log s_logger = LogFactory.getLog(PersonServiceImpl.class);
@@ -80,6 +87,8 @@ public class PersonServiceImpl implements PersonService,
private StoreRef storeRef; private StoreRef storeRef;
private TransactionService transactionService;
private NodeService nodeService; private NodeService nodeService;
private TenantService tenantService; private TenantService tenantService;
@@ -127,11 +136,32 @@ public class PersonServiceImpl implements PersonService,
mutableProperties = Collections.unmodifiableSet(props); mutableProperties = Collections.unmodifiableSet(props);
} }
@Override
public boolean equals(Object obj)
{
return this == obj;
}
@Override
public int hashCode()
{
return 1;
}
/** /**
* Spring bean init method * Spring bean init method
*/ */
public void init() public void init()
{ {
PropertyCheck.mandatory(this, "storeUrl", storeRef);
PropertyCheck.mandatory(this, "transactionService", transactionService);
PropertyCheck.mandatory(this, "nodeService", nodeService);
PropertyCheck.mandatory(this, "searchService", searchService);
PropertyCheck.mandatory(this, "permissionServiceSPI", permissionServiceSPI);
PropertyCheck.mandatory(this, "authorityService", authorityService);
PropertyCheck.mandatory(this, "namespacePrefixResolver", namespacePrefixResolver);
PropertyCheck.mandatory(this, "policyComponent", policyComponent);
PropertyCheck.mandatory(this, "personCache", personCache);
this.policyComponent.bindClassBehaviour( this.policyComponent.bindClassBehaviour(
QName.createQName(NamespaceService.ALFRESCO_URI, "onCreateNode"), QName.createQName(NamespaceService.ALFRESCO_URI, "onCreateNode"),
ContentModel.TYPE_PERSON, ContentModel.TYPE_PERSON,
@@ -202,8 +232,10 @@ public class PersonServiceImpl implements PersonService,
NodeRef personNode = getPersonOrNull(userName); NodeRef personNode = getPersonOrNull(userName);
if (personNode == null) if (personNode == null)
{ {
if (createMissingPeople()) TxnReadState txnReadState = AlfrescoTransactionSupport.getTransactionReadState();
if (createMissingPeople() && txnReadState == TxnReadState.TXN_READ_WRITE)
{ {
// We create missing people AND are in a read-write txn
return createMissingPerson(userName); return createMissingPerson(userName);
} }
else else
@@ -289,11 +321,7 @@ public class PersonServiceImpl implements PersonService,
rs.close(); rs.close();
} }
} }
if (singleton) if (!singleton)
{
returnRef = returnRef;
}
else
{ {
returnRef = handleDuplicates(searchUserName); returnRef = handleDuplicates(searchUserName);
} }
@@ -303,29 +331,12 @@ public class PersonServiceImpl implements PersonService,
} }
return returnRef; return returnRef;
} }
private NodeRef handleDuplicates(String searchUserName) private NodeRef handleDuplicates(String searchUserName)
{ {
if (processDuplicates) if (processDuplicates)
{ {
NodeRef best = findBest(searchUserName); NodeRef best = findBest(searchUserName);
if (duplicateMode.equalsIgnoreCase(SPLIT)) addDuplicateUserNameToHandle(searchUserName, best);
{
split(searchUserName, best);
s_logger.info("Split duplicate person objects for uid " + searchUserName);
}
else if (duplicateMode.equalsIgnoreCase(DELETE))
{
delete(searchUserName, best);
s_logger.info("Deleted duplicate person objects for uid " + searchUserName);
}
else
{
if (s_logger.isDebugEnabled())
{
s_logger.debug("Duplicate person objects exist for uid " + searchUserName);
}
}
return best; return best;
} }
else else
@@ -343,6 +354,74 @@ public class PersonServiceImpl implements PersonService,
} }
} }
private static final String KEY_POST_TXN_DUPLICATES = "PersonServiceImpl.KEY_POST_TXN_DUPLICATES";
/**
* Get the txn-bound usernames that need cleaning up
*/
private Map<String, NodeRef> getPostTxnDuplicates()
{
@SuppressWarnings("unchecked")
Map<String, NodeRef> postTxnDuplicates = (Map<String, NodeRef>) AlfrescoTransactionSupport.getResource(KEY_POST_TXN_DUPLICATES);
if (postTxnDuplicates == null)
{
postTxnDuplicates = new HashMap<String, NodeRef>(7);
AlfrescoTransactionSupport.bindResource(KEY_POST_TXN_DUPLICATES, postTxnDuplicates);
}
return postTxnDuplicates;
}
/**
* Flag a username for cleanup after the transaction.
*/
private void addDuplicateUserNameToHandle(String searchUserName, NodeRef best)
{
// Firstly, bind this service to the transaction
AlfrescoTransactionSupport.bindListener(this);
// Now get the post txn duplicate list
Map<String, NodeRef> postTxnDuplicates = getPostTxnDuplicates();
postTxnDuplicates.put(searchUserName, best);
}
/**
* Process clean up any duplicates that were flagged during the transaction.
*/
@Override
public void afterCommit()
{
// Get the duplicates in a form that can be read by the transaction work anonymous instance
final Map<String, NodeRef> postTxnDuplicates = getPostTxnDuplicates();
RetryingTransactionCallback<Object> processDuplicateWork = new RetryingTransactionCallback<Object>()
{
public Object execute() throws Throwable
{
for (Map.Entry<String, NodeRef> entry : postTxnDuplicates.entrySet())
{
String username = entry.getKey();
NodeRef best = entry.getValue();
if (duplicateMode.equalsIgnoreCase(SPLIT))
{
split(username, best);
s_logger.info("Split duplicate person objects for uid " + username);
}
else if (duplicateMode.equalsIgnoreCase(DELETE))
{
delete(username, best);
s_logger.info("Deleted duplicate person objects for uid " + username);
}
else
{
if (s_logger.isDebugEnabled())
{
s_logger.debug("Duplicate person objects exist for uid " + username);
}
}
}
// Done
return null;
}
};
transactionService.getRetryingTransactionHelper().doInTransaction(processDuplicateWork, false, true);
}
private void delete(String searchUserName, NodeRef best) private void delete(String searchUserName, NodeRef best)
{ {
SearchParameters sp = new SearchParameters(); SearchParameters sp = new SearchParameters();
@@ -799,6 +878,11 @@ public class PersonServiceImpl implements PersonService,
this.permissionServiceSPI = permissionServiceSPI; this.permissionServiceSPI = permissionServiceSPI;
} }
public void setTransactionService(TransactionService transactionService)
{
this.transactionService = transactionService;
}
public void setNodeService(NodeService nodeService) public void setNodeService(NodeService nodeService)
{ {
this.nodeService = nodeService; this.nodeService = nodeService;

View File

@@ -27,19 +27,26 @@ package org.alfresco.repo.security.person;
import java.io.Serializable; import java.io.Serializable;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.alfresco.model.ContentModel; import org.alfresco.model.ContentModel;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.cmr.repository.NodeService; import org.alfresco.service.cmr.repository.NodeService;
import org.alfresco.service.cmr.repository.StoreRef; import org.alfresco.service.cmr.repository.StoreRef;
import org.alfresco.service.cmr.repository.datatype.DefaultTypeConverter; import org.alfresco.service.cmr.repository.datatype.DefaultTypeConverter;
import org.alfresco.service.cmr.security.NoSuchPersonException;
import org.alfresco.service.cmr.security.PersonService; import org.alfresco.service.cmr.security.PersonService;
import org.alfresco.service.namespace.QName; import org.alfresco.service.namespace.QName;
import org.alfresco.service.transaction.TransactionService;
import org.alfresco.util.BaseSpringTest; import org.alfresco.util.BaseSpringTest;
import org.alfresco.util.EqualsHelper; import org.alfresco.util.EqualsHelper;
import org.alfresco.util.GUID;
public class PersonTest extends BaseSpringTest public class PersonTest extends BaseSpringTest
{ {
private TransactionService transactionService;
private PersonService personService; private PersonService personService;
@@ -55,6 +62,7 @@ public class PersonTest extends BaseSpringTest
protected void onSetUpInTransaction() throws Exception protected void onSetUpInTransaction() throws Exception
{ {
transactionService = (TransactionService) applicationContext.getBean("transactionService");
personService = (PersonService) applicationContext.getBean("personService"); personService = (PersonService) applicationContext.getBean("personService");
nodeService = (NodeService) applicationContext.getBean("nodeService"); nodeService = (NodeService) applicationContext.getBean("nodeService");
@@ -66,6 +74,7 @@ public class PersonTest extends BaseSpringTest
nodeService.deleteNode(nodeRef); nodeService.deleteNode(nodeRef);
} }
personService.setCreateMissingPeople(true);
} }
protected void onTearDownInTransaction() throws Exception protected void onTearDownInTransaction() throws Exception
@@ -410,4 +419,142 @@ public class PersonTest extends BaseSpringTest
personService.getPerson("Derek"); personService.getPerson("Derek");
} }
public void testReadOnlyTransactionHandling() throws Exception
{
// Kill the annoying Spring-managed txn
super.setComplete();
super.endTransaction();
boolean createMissingPeople = personService.createMissingPeople();
assertTrue("Default should be to create missing people", createMissingPeople);
final String username = "Derek";
// Make sure that the person is missing
RetryingTransactionCallback<Object> deletePersonWork = new RetryingTransactionCallback<Object>()
{
public Object execute() throws Throwable
{
personService.deletePerson(username);
return null;
}
};
transactionService.getRetryingTransactionHelper().doInTransaction(deletePersonWork, false, true);
// Make a read-only transaction and check that we get NoSuchPersonException
RetryingTransactionCallback<NodeRef> getMissingPersonWork = new RetryingTransactionCallback<NodeRef>()
{
public NodeRef execute() throws Throwable
{
return personService.getPerson(username);
}
};
try
{
transactionService.getRetryingTransactionHelper().doInTransaction(getMissingPersonWork, true, true);
fail("Expected auto-creation of person to fail gracefully");
}
catch (NoSuchPersonException e)
{
// Expected
}
// It should work in a write transaction, though
transactionService.getRetryingTransactionHelper().doInTransaction(getMissingPersonWork, false, true);
}
public void testSplitPersonCleanup() throws Exception
{
// Kill the annoying Spring-managed txn
super.setComplete();
super.endTransaction();
boolean createMissingPeople = personService.createMissingPeople();
assertTrue("Default should be to create missing people", createMissingPeople);
PersonServiceImpl personServiceImpl = (PersonServiceImpl) personService;
personServiceImpl.setDuplicateMode("LEAVE");
// The user to duplicate
final String duplicateUsername = GUID.generate();
// Make sure that the person is missing
RetryingTransactionCallback<Object> deletePersonWork = new RetryingTransactionCallback<Object>()
{
public Object execute() throws Throwable
{
personService.deletePerson(duplicateUsername);
return null;
}
};
transactionService.getRetryingTransactionHelper().doInTransaction(deletePersonWork, false, true);
// Fire off 10 threads to create the same person
int threadCount = 10;
final CountDownLatch startLatch = new CountDownLatch(threadCount);
final CountDownLatch endLatch = new CountDownLatch(threadCount);
final Map<String, NodeRef> cleanableNodeRefs = new HashMap<String, NodeRef>(17);
Runnable createPersonRunnable = new Runnable()
{
public void run()
{
final RetryingTransactionCallback<NodeRef> createPersonWork = new RetryingTransactionCallback<NodeRef>()
{
public NodeRef execute() throws Throwable
{
// Wait for the trigger to start
try { startLatch.await(); } catch (InterruptedException e) {}
// Trigger
NodeRef personNodeRef = personService.getPerson(duplicateUsername);
return personNodeRef;
}
};
startLatch.countDown();
try
{
NodeRef nodeRef = transactionService.getRetryingTransactionHelper().doInTransaction(createPersonWork, false, true);
// Store the noderef for later checking
String threadName = Thread.currentThread().getName();
cleanableNodeRefs.put(threadName, nodeRef);
}
catch (Throwable e)
{
// Errrm
e.printStackTrace();
}
endLatch.countDown();
}
};
// Fire the threads
for (int i = 0; i < threadCount; i++)
{
Thread thread = new Thread(createPersonRunnable);
thread.setName(getName() + "-" + i);
thread.setDaemon(true);
thread.start();
}
// Wait for the threads to have finished
try { endLatch.await(60, TimeUnit.SECONDS); } catch (InterruptedException e) {}
// Now, get the user with full split person handling
personServiceImpl.setDuplicateMode("DELETE");
RetryingTransactionCallback<NodeRef> getPersonWork = new RetryingTransactionCallback<NodeRef>()
{
public NodeRef execute() throws Throwable
{
return personService.getPerson(duplicateUsername);
}
};
NodeRef remainingNodeRef = transactionService.getRetryingTransactionHelper().doInTransaction(getPersonWork, false, true);
// Should all be cleaned up now, but no way to check
for (NodeRef nodeRef : cleanableNodeRefs.values())
{
if (nodeRef.equals(remainingNodeRef))
{
// This one should still be around
continue;
}
if (nodeService.exists(nodeRef))
{
fail("Expected unused person noderef to have been cleaned up: " + nodeRef);
}
}
}
} }

View File

@@ -134,6 +134,42 @@ public abstract class AlfrescoTransactionSupport
} }
} }
/**
*
* @author Derek Hulley
* @since 2.1.4
*/
public static enum TxnReadState
{
/** No transaction is active */
TXN_NONE,
/** The current transaction is read-only */
TXN_READ_ONLY,
/** The current transaction supports writes */
TXN_READ_WRITE
}
/**
* @return Returns the read-write state of the current transaction
* @since 2.1.4
*/
public static TxnReadState getTransactionReadState()
{
if (!TransactionSynchronizationManager.isSynchronizationActive())
{
return TxnReadState.TXN_NONE;
}
// Find the read-write state of the txn
if (TransactionSynchronizationManager.isCurrentTransactionReadOnly())
{
return TxnReadState.TXN_READ_ONLY;
}
else
{
return TxnReadState.TXN_READ_WRITE;
}
}
/** /**
* Are there any pending changes which must be synchronized with the store? * Are there any pending changes which must be synchronized with the store?
* *
@@ -710,26 +746,6 @@ public abstract class AlfrescoTransactionSupport
logger.debug("After completion (" + statusStr + "): " + this); logger.debug("After completion (" + statusStr + "): " + this);
} }
// commit/rollback Lucene
for (LuceneIndexerAndSearcher lucene : lucenes)
{
try
{
if (status == TransactionSynchronization.STATUS_COMMITTED)
{
lucene.commit();
}
else
{
lucene.rollback();
}
}
catch (RuntimeException e)
{
logger.error("After completion (" + statusStr + ") Lucene exception", e);
}
}
List<TransactionListener> iterableListeners = getListenersIterable(); List<TransactionListener> iterableListeners = getListenersIterable();
// notify listeners // notify listeners
if (status == TransactionSynchronization.STATUS_COMMITTED) if (status == TransactionSynchronization.STATUS_COMMITTED)
@@ -765,6 +781,46 @@ public abstract class AlfrescoTransactionSupport
} }
} }
// commit/rollback Lucene
for (LuceneIndexerAndSearcher lucene : lucenes)
{
try
{
if (status == TransactionSynchronization.STATUS_COMMITTED)
{
lucene.commit();
}
else
{
lucene.rollback();
}
}
catch (RuntimeException e)
{
logger.error("After completion (" + statusStr + ") Lucene exception", e);
}
}
// Clean up the transactional caches
for (TransactionalCache<Serializable, Object> cache : transactionalCaches)
{
try
{
if (status == TransactionSynchronization.STATUS_COMMITTED)
{
cache.afterCommit();
}
else
{
cache.afterRollback();
}
}
catch (RuntimeException e)
{
logger.error("After completion (" + statusStr + ") TransactionalCache exception", e);
}
}
// clear the thread's registrations and synchronizations // clear the thread's registrations and synchronizations
AlfrescoTransactionSupport.clearSynchronization(); AlfrescoTransactionSupport.clearSynchronization();
} }

View File

@@ -31,6 +31,7 @@ import javax.transaction.UserTransaction;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.service.ServiceRegistry; import org.alfresco.service.ServiceRegistry;
import org.alfresco.service.transaction.TransactionService; import org.alfresco.service.transaction.TransactionService;
@@ -236,4 +237,25 @@ public class AlfrescoTransactionSupportTest extends TestCase
// make sure that the binding all worked // make sure that the binding all worked
assertTrue("Expected callbacks not all processed: " + testList, testList.size() == 0); assertTrue("Expected callbacks not all processed: " + testList, testList.size() == 0);
} }
public void testReadWriteStateRetrieval() throws Exception
{
RetryingTransactionCallback<TxnReadState> getReadStateWork = new RetryingTransactionCallback<TxnReadState>()
{
public TxnReadState execute() throws Exception
{
return AlfrescoTransactionSupport.getTransactionReadState();
}
};
// Check TXN_NONE
TxnReadState checkTxnReadState = AlfrescoTransactionSupport.getTransactionReadState();
assertEquals("Expected 'no transaction'", TxnReadState.TXN_NONE, checkTxnReadState);
// Check TXN_READ_ONLY
checkTxnReadState = transactionService.getRetryingTransactionHelper().doInTransaction(getReadStateWork, true);
assertEquals("Expected 'read-only transaction'", TxnReadState.TXN_READ_ONLY, checkTxnReadState);
// check TXN_READ_WRITE
checkTxnReadState = transactionService.getRetryingTransactionHelper().doInTransaction(getReadStateWork, false);
assertEquals("Expected 'read-write transaction'", TxnReadState.TXN_READ_WRITE, checkTxnReadState);
}
} }

View File

@@ -54,8 +54,21 @@ import org.springframework.jdbc.UncategorizedSQLException;
* A helper that runs a unit of work inside a UserTransaction, * A helper that runs a unit of work inside a UserTransaction,
* transparently retrying the unit of work if the cause of * transparently retrying the unit of work if the cause of
* failure is an optimistic locking or deadlock condition. * failure is an optimistic locking or deadlock condition.
* <p>
* Defaults:
* <ul>
* <li><b>maxRetries: 20</b></li>
* <li><b>minRetryWaitMs: 100</b></li>
* <li><b>maxRetryWaitMs: 2000</b></li>
* <li><b>retryWaitIncrementMs: 100</b></li>
* </ul>
* <p>
* To get details of 'why' transactions are retried use the following log level:<br>
* <b>Summary: log4j.logger.org.alfresco.repo.transaction.RetryingTransactionHelper=INFO</b><br>
* <b>Details: log4j.logger.org.alfresco.repo.transaction.RetryingTransactionHelper=DEBUG</b><br>
* *
* @author britt *
* @author Derek Hulley
*/ */
public class RetryingTransactionHelper public class RetryingTransactionHelper
{ {
@@ -90,10 +103,14 @@ public class RetryingTransactionHelper
*/ */
private TransactionService txnService; private TransactionService txnService;
/** /** The maximum number of retries. -1 for infinity. */
* The maximum number of retries. -1 for infinity.
*/
private int maxRetries; private int maxRetries;
/** The minimum time to wait between retries. */
private int minRetryWaitMs;
/** The maximum time to wait between retries. */
private int maxRetryWaitMs;
/** How much to increase the wait time with each retry. */
private int retryWaitIncrementMs;
/** /**
* Whether the the transactions may only be reads * Whether the the transactions may only be reads
@@ -126,6 +143,10 @@ public class RetryingTransactionHelper
public RetryingTransactionHelper() public RetryingTransactionHelper()
{ {
this.random = new Random(System.currentTimeMillis()); this.random = new Random(System.currentTimeMillis());
this.maxRetries = 20;
this.minRetryWaitMs = 100;
this.maxRetryWaitMs = 2000;
this.retryWaitIncrementMs = 100;
} }
// Setters. // Setters.
@@ -145,6 +166,21 @@ public class RetryingTransactionHelper
this.maxRetries = maxRetries; this.maxRetries = maxRetries;
} }
public void setMinRetryWaitMs(int minRetryWaitMs)
{
this.minRetryWaitMs = minRetryWaitMs;
}
public void setMaxRetryWaitMs(int maxRetryWaitMs)
{
this.maxRetryWaitMs = maxRetryWaitMs;
}
public void setRetryWaitIncrementMs(int retryWaitIncrementMs)
{
this.retryWaitIncrementMs = retryWaitIncrementMs;
}
/** /**
* Set whether this helper only supports read transactions. * Set whether this helper only supports read transactions.
*/ */
@@ -332,9 +368,22 @@ public class RetryingTransactionHelper
{ {
// Sleep a random amount of time before retrying. // Sleep a random amount of time before retrying.
// The sleep interval increases with the number of retries. // The sleep interval increases with the number of retries.
int sleepIntervalRandom = count > 0 ? random.nextInt(count * retryWaitIncrementMs) : minRetryWaitMs;
int sleepInterval = Math.min(maxRetryWaitMs, sleepIntervalRandom);
sleepInterval = Math.max(sleepInterval, minRetryWaitMs);
if (logger.isInfoEnabled() && !logger.isDebugEnabled())
{
String msg = String.format(
"Retrying %s: count %2d; wait: %1.1fs; msg: \"%s\"; exception: (%s)",
Thread.currentThread().getName(),
count, (double)sleepInterval/1000D,
retryCause.getMessage(),
retryCause.getClass().getName());
logger.info(msg);
}
try try
{ {
Thread.sleep(random.nextInt(500 * count + 500)); Thread.sleep(sleepInterval);
} }
catch (InterruptedException ie) catch (InterruptedException ie)
{ {

View File

@@ -33,14 +33,19 @@ import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.TransactionDefinition; import org.springframework.transaction.TransactionDefinition;
/** /**
* Default implementation of Transaction Service * Default implementation of Transaction Service.
* <p>
* Default retry behaviour: see {@link RetryingTransactionHelper#RetryingTransactionHelper()}
* *
* @author David Caruana * @author David Caruana
*/ */
public class TransactionServiceImpl implements TransactionService public class TransactionServiceImpl implements TransactionService
{ {
private PlatformTransactionManager transactionManager; private PlatformTransactionManager transactionManager;
private int maxRetries = 20; private int maxRetries = -1;
private int minRetryWaitMs = -1;
private int maxRetryWaitMs = -1;
private int retryWaitIncrementMs = -1;
// SysAdmin cache - used to cluster certain JMX operations // SysAdmin cache - used to cluster certain JMX operations
private SimpleCache<String, Object> sysAdminCache; private SimpleCache<String, Object> sysAdminCache;
@@ -79,16 +84,37 @@ public class TransactionServiceImpl implements TransactionService
} }
/** /**
* Set the maximum number of retries that will be done by the * @see RetryingTransactionHelper#setMaxRetries(int)
* {@link RetryingTransactionHelper transaction helper}.
*
* @param maxRetries the maximum transaction retries
*/ */
public void setMaxRetries(int maxRetries) public void setMaxRetries(int maxRetries)
{ {
this.maxRetries = maxRetries; this.maxRetries = maxRetries;
} }
/**
* @see RetryingTransactionHelper#setMinRetryWaitMs(int)
*/
public void setMinRetryWaitMs(int minRetryWaitMs)
{
this.minRetryWaitMs = minRetryWaitMs;
}
/**
* @see RetryingTransactionHelper#setMaxRetryWaitMs(int)
*/
public void setMaxRetryWaitMs(int maxRetryWaitMs)
{
this.maxRetryWaitMs = maxRetryWaitMs;
}
/**
* @see RetryingTransactionHelper#setRetryWaitIncrementMs(int)
*/
public void setRetryWaitIncrementMs(int retryWaitIncrementMs)
{
this.retryWaitIncrementMs = retryWaitIncrementMs;
}
/** /**
* @see org.springframework.transaction.TransactionDefinition#PROPAGATION_REQUIRED * @see org.springframework.transaction.TransactionDefinition#PROPAGATION_REQUIRED
*/ */
@@ -146,14 +172,30 @@ public class TransactionServiceImpl implements TransactionService
} }
/** /**
* Creates a new helper instance. It can be reused. * Creates a new helper instance. It can be reused or customized by the client code:
* each instance is new and initialized afresh.
*/ */
public RetryingTransactionHelper getRetryingTransactionHelper() public RetryingTransactionHelper getRetryingTransactionHelper()
{ {
RetryingTransactionHelper helper = new RetryingTransactionHelper(); RetryingTransactionHelper helper = new RetryingTransactionHelper();
helper.setMaxRetries(maxRetries);
helper.setTransactionService(this); helper.setTransactionService(this);
helper.setReadOnly(isReadOnly()); helper.setReadOnly(isReadOnly());
if (maxRetries >= 0)
{
helper.setMaxRetries(maxRetries);
}
if (minRetryWaitMs > 0)
{
helper.setMinRetryWaitMs(minRetryWaitMs);
}
if (maxRetryWaitMs > 0)
{
helper.setMaxRetryWaitMs(maxRetryWaitMs);
}
if (retryWaitIncrementMs > 0)
{
helper.setRetryWaitIncrementMs(retryWaitIncrementMs);
}
return helper; return helper;
} }
} }

View File

@@ -38,8 +38,12 @@ public final class StoreRef implements EntityRef, Serializable
private static final long serialVersionUID = 3905808565129394486L; private static final long serialVersionUID = 3905808565129394486L;
public static final String PROTOCOL_WORKSPACE = "workspace"; public static final String PROTOCOL_WORKSPACE = "workspace";
public static final String PROTOCOL_ARCHIVE = "archive";
public static final String PROTOCOL_AVM = "avm"; public static final String PROTOCOL_AVM = "avm";
public static final StoreRef STORE_REF_WORKSPACE_SPACESSTORE = new StoreRef(PROTOCOL_WORKSPACE, "SpacesStore");
public static final StoreRef STORE_REF_ARCHIVE_SPACESSTORE = new StoreRef(PROTOCOL_ARCHIVE, "SpacesStore");
public static final String URI_FILLER = "://"; public static final String URI_FILLER = "://";
private final String protocol; private final String protocol;

View File

@@ -51,11 +51,11 @@ import org.springframework.beans.factory.InitializingBean;
* <li><b>{@link #setKeepAliveTime(int) keepAliveTime}: </b> * <li><b>{@link #setKeepAliveTime(int) keepAliveTime}: </b>
* 90 seconds</li> * 90 seconds</li>
* <li><b>{@link #setThreadPriority(int) threadPriority}: </b> * <li><b>{@link #setThreadPriority(int) threadPriority}: </b>
* 1 (LOWEST)</li> * 5 (NORM)</li>
* <li><b>{@link #setThreadDaemon(boolean) threadDaemon}: </b> * <li><b>{@link #setThreadDaemon(boolean) threadDaemon}: </b>
* true</li> * true</li>
* <li><b>{@link #setWorkQueue(BlockingQueue) workQueue}: </b> * <li><b>{@link #setWorkQueueSize(int) workQueueSize}: </b>
* An unbounded <code>LinkedBlockingQueue</code></li> * -1 or less (No upper bound)</li>
* <li><b>{@link #setRejectedExecutionHandler(RejectedExecutionHandler) rejectedExecutionHandler: </b> * <li><b>{@link #setRejectedExecutionHandler(RejectedExecutionHandler) rejectedExecutionHandler: </b>
* <code>ThreadPoolExecutor.CallerRunsPolicy</code></li> * <code>ThreadPoolExecutor.CallerRunsPolicy</code></li>
* </ul> * </ul>
@@ -67,9 +67,9 @@ public class ThreadPoolExecutorFactoryBean implements FactoryBean, InitializingB
private static final int DEFAULT_CORE_POOL_SIZE = 20; private static final int DEFAULT_CORE_POOL_SIZE = 20;
private static final int DEFAULT_MAXIMUM_POOL_SIZE = -1; // -1 is a sign that it must match the core pool size private static final int DEFAULT_MAXIMUM_POOL_SIZE = -1; // -1 is a sign that it must match the core pool size
private static final int DEFAULT_KEEP_ALIVE_TIME = 90; // seconds private static final int DEFAULT_KEEP_ALIVE_TIME = 90; // seconds
private static final int DEFAULT_THREAD_PRIORITY = Thread.MIN_PRIORITY; private static final int DEFAULT_THREAD_PRIORITY = Thread.NORM_PRIORITY;
private static final boolean DEFAULT_THREAD_DAEMON = Boolean.TRUE; private static final boolean DEFAULT_THREAD_DAEMON = Boolean.TRUE;
private static final BlockingQueue<Runnable> DEFAULT_WORK_QUEUE = new LinkedBlockingQueue<Runnable>(); private static final int DEFAULT_WORK_QUEUE_SIZE = -1;
private static final RejectedExecutionHandler DEFAULT_REJECTED_EXECUTION_HANDLER = new ThreadPoolExecutor.CallerRunsPolicy(); private static final RejectedExecutionHandler DEFAULT_REJECTED_EXECUTION_HANDLER = new ThreadPoolExecutor.CallerRunsPolicy();
private int corePoolSize; private int corePoolSize;
@@ -77,7 +77,7 @@ public class ThreadPoolExecutorFactoryBean implements FactoryBean, InitializingB
private int keepAliveTime; private int keepAliveTime;
private int threadPriority; private int threadPriority;
private boolean threadDaemon; private boolean threadDaemon;
private BlockingQueue<Runnable> workQueue; private int workQueueSize;
private RejectedExecutionHandler rejectedExecutionHandler; private RejectedExecutionHandler rejectedExecutionHandler;
/** the instance that will be given out by the factory */ /** the instance that will be given out by the factory */
private ThreadPoolExecutor instance; private ThreadPoolExecutor instance;
@@ -92,7 +92,7 @@ public class ThreadPoolExecutorFactoryBean implements FactoryBean, InitializingB
keepAliveTime = DEFAULT_KEEP_ALIVE_TIME; keepAliveTime = DEFAULT_KEEP_ALIVE_TIME;
threadPriority = DEFAULT_THREAD_PRIORITY; threadPriority = DEFAULT_THREAD_PRIORITY;
threadDaemon = DEFAULT_THREAD_DAEMON; threadDaemon = DEFAULT_THREAD_DAEMON;
workQueue = DEFAULT_WORK_QUEUE; workQueueSize = DEFAULT_WORK_QUEUE_SIZE;
rejectedExecutionHandler = DEFAULT_REJECTED_EXECUTION_HANDLER; rejectedExecutionHandler = DEFAULT_REJECTED_EXECUTION_HANDLER;
} }
@@ -148,13 +148,15 @@ public class ThreadPoolExecutorFactoryBean implements FactoryBean, InitializingB
} }
/** /**
* The optional queue instance to use * The maximum number of queued work instances to keep before blocking
* against further adds.
* *
* @param workQueue optional queue implementation * @param size the queue size before blocks, or <b>-1</b> default
* to indicate no upper bound
*/ */
public void setWorkQueue(BlockingQueue<Runnable> workQueue) public void setWorkQueueSize(int workQueueSize)
{ {
this.workQueue = workQueue; this.workQueueSize = workQueueSize;
} }
/** /**
@@ -181,6 +183,12 @@ public class ThreadPoolExecutorFactoryBean implements FactoryBean, InitializingB
threadFactory.setThreadDaemon(threadDaemon); threadFactory.setThreadDaemon(threadDaemon);
threadFactory.setThreadPriority(threadPriority); threadFactory.setThreadPriority(threadPriority);
if (workQueueSize < 0)
{
workQueueSize = Integer.MAX_VALUE;
}
BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>(workQueueSize);
// construct the instance // construct the instance
instance = new ThreadPoolExecutor( instance = new ThreadPoolExecutor(
corePoolSize, corePoolSize,
@@ -197,7 +205,7 @@ public class ThreadPoolExecutorFactoryBean implements FactoryBean, InitializingB
*/ */
public boolean isSingleton() public boolean isSingleton()
{ {
return true; return false;
} }
/** /**