Merged V2.1 to HEAD

6950: Fix for forum issue (6111) when using xsl:include
   6951: Partial fix for WCM-862
   6952: Merged V1.4 to V2.1
      6921: Reindex tracking refactoring.
   6954: Merged V1.4 to V2.1
      6927: Config and startup changes for index tracking


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@7369 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley 2007-11-12 23:52:46 +00:00
parent 209dd85a0d
commit e82c2cd946
22 changed files with 824 additions and 456 deletions

View File

@ -51,6 +51,7 @@
</property>
<property name="postUpdateScriptPatches">
<list>
<ref bean="patch.db-V1.4-TxnCommitTimeIndex" />
<ref bean="patch.db-V2.1-FKIndexes" />
<ref bean="patch.db-V2.1-ExplicitIndexes" />
<ref bean="patch.db-V2.1-JBPMData" />

View File

@ -0,0 +1,18 @@
--
-- Explicit index for alf_transaction.commit_time_ms (Generic Schema 1.4)
--
CREATE INDEX idx_commit_time_ms ON alf_transaction (commit_time_ms);
UPDATE alf_transaction SET commit_time_ms = id WHERE commit_time_ms IS NULL;
--
-- Record script finish
--
delete from alf_applied_patch where id = 'patch.db-V1.4-TxnCommitTimeIndex';
insert into alf_applied_patch
(id, description, fixes_from_schema, fixes_to_schema, applied_to_schema, target_schema, applied_on_date, applied_to_server, was_executed, succeeded, report)
values
(
'patch.db-V1.4-TxnCommitTimeIndex', 'Executed script AlfrescoSchemaUpdate-1.4-TxnCommitTimeIndex.sql',
0, 75, -1, 76, null, 'UNKOWN', 1, 1, 'Script completed'
);

View File

@ -15,6 +15,11 @@
#db.pool.initial=10
#db.pool.max=100
#
# Sample index tracking frequency
#
#index.tracking.cronExpression=0/5 * * * * ?
#
# Property to control whether schema updates are performed automatically.
# Updates must be enabled during upgrades as, apart from the static upgrade scripts,

View File

@ -1,83 +1,4 @@
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE beans PUBLIC '-//SPRING//DTD BEAN//EN' 'http://www.springframework.org/dtd/spring-beans.dtd'>
<beans>
Index tracking is now controlled using core properties.
See 'alfresco/repository.properties' and 'alfresco/extension/custom-repository.properties.sample'.
<!--===========================-->
<!-- ADM index tracking -->
<!--===========================-->
<!-- Schedule index tracking for ADM every 10s -->
<bean id="admIndexTrackerTrigger" class="org.alfresco.util.CronTriggerBean">
<property name="jobDetail">
<bean class="org.springframework.scheduling.quartz.JobDetailBean">
<property name="jobClass">
<value>org.alfresco.repo.node.index.IndexRecoveryJob</value>
</property>
<property name="jobDataAsMap">
<map>
<entry key="indexRecoveryComponent">
<ref bean="admIndexTrackerComponent" />
</entry>
</map>
</property>
</bean>
</property>
<property name="scheduler">
<ref bean="schedulerFactory" />
</property>
<property name="cronExpression">
<value>0,10,20,30,40,50 * * * * ?</value>
</property>
</bean>
<bean
id="admIndexTrackerComponent"
class="org.alfresco.repo.node.index.IndexRemoteTransactionTracker"
parent="indexRecoveryComponentBase">
<property name="remoteOnly">
<value>true</value>
</property>
</bean>
<!--===========================-->
<!-- AVM (WCM) index tracking -->
<!--===========================-->
<!-- Schedule index tracking for AVM every 60s -->
<bean id="indexTrackerTrigger" class="org.alfresco.util.CronTriggerBean">
<property name="jobDetail">
<bean class="org.springframework.scheduling.quartz.JobDetailBean">
<property name="jobClass">
<value>org.alfresco.repo.node.index.IndexRecoveryJob</value>
</property>
<property name="jobDataAsMap">
<map>
<entry key="indexRecoveryComponent">
<ref bean="avmIndexTrackerComponent" />
</entry>
</map>
</property>
</bean>
</property>
<property name="scheduler">
<ref bean="schedulerFactory" />
</property>
<property name="cronExpression">
<value>15 * * * * ?</value>
</property>
</bean>
<bean
id="avmIndexTrackerComponent"
class="org.alfresco.repo.node.index.AVMRemoteSnapshotTracker"
parent="indexRecoveryComponentBase">
<property name="avmService">
<ref bean="avmService" />
</property>
<property name="avmSnapShotTriggeredIndexingMethodInterceptor">
<ref bean="avmSnapShotTriggeredIndexingMethodInterceptor" />
</property>
</bean>
</beans>

View File

@ -28,13 +28,6 @@
</bean>
<!-- index recovery and validation -->
<!--
Recovery types are:
NONE: Ignore
VALIDATE: Checks that the last transaction for each store is represented in the indexes
AUTO: Validates and auto-recovers if validation fails
FULL: Full index rebuild, processing all transactions in order. The server is temporarily suspended.
-->
<bean
id="indexRecoveryComponent"
class="org.alfresco.repo.node.index.FullIndexRecoveryComponent"
@ -42,6 +35,12 @@
<property name="recoveryMode">
<value>${index.recovery.mode}</value>
</property>
<property name="stopOnError">
<value>${index.recovery.stopOnError}</value>
</property>
<property name="indexTracker">
<ref bean="admIndexTrackerComponent" />
</property>
</bean>
<bean
@ -60,6 +59,50 @@
</bean>
<!-- Index tracking -->
<!--
This is the default index tracker component. It is used during bootstrap when incremental recovery
is required. It is also used, by default in the clustered index tracking sample.
-->
<bean
id="admIndexTrackerComponent"
class="org.alfresco.repo.node.index.IndexTransactionTracker"
parent="indexRecoveryComponentBase">
<property name="maxTxnDurationMinutes">
<value>${index.tracking.maxTxnDurationMinutes}</value>
</property>
<property name="reindexLagMs">
<value>${index.tracking.reindexLagMs}</value>
</property>
<property name="maxRecordSetSize">
<value>${index.tracking.maxRecordSetSize}</value>
</property>
</bean>
<!-- Schedule index tracking for ADM -->
<bean id="admIndexTrackerTrigger" class="org.alfresco.util.CronTriggerBean">
<property name="jobDetail">
<bean class="org.springframework.scheduling.quartz.JobDetailBean">
<property name="jobClass">
<value>org.alfresco.repo.node.index.IndexRecoveryJob</value>
</property>
<property name="jobDataAsMap">
<map>
<entry key="indexRecoveryComponent">
<ref bean="admIndexTrackerComponent" />
</entry>
</map>
</property>
</bean>
</property>
<property name="scheduler">
<ref bean="schedulerFactory" />
</property>
<property name="cronExpression">
<value>${index.tracking.cronExpression}</value>
</property>
</bean>
<!-- Bean that attempts to index content that was previously missing -->
<bean
id="missingContentReindexComponent"

View File

@ -1077,4 +1077,21 @@
</property>
</bean>
<bean id="patch.db-V1.4-TxnCommitTimeIndex" class="org.alfresco.repo.admin.patch.impl.SchemaUpgradeScriptPatch" parent="basePatch">
<property name="id"><value>patch.db-V1.4-TxnCommitTimeIndex</value></property>
<property name="description"><value>patch.schemaUpgradeScript.description</value></property>
<property name="fixesFromSchema"><value>0</value></property>
<property name="fixesToSchema"><value>110</value></property>
<property name="targetSchema"><value>111</value></property>
<property name="scriptUrl">
<value>classpath:alfresco/dbscripts/upgrade/1.4/${db.script.dialect}/AlfrescoSchemaUpdate-1.4-TxnCommitTimeIndex.sql</value>
</property>
<!-- dependent on upgrade script 1.4-2 having being run -->
<property name="dependsOn" >
<list>
<ref bean="patch.schemaUpdateScript-V1.4-2" />
</list>
</property>
</bean>
</beans>

View File

@ -13,8 +13,29 @@ dir.indexes=${dir.root}/lucene-indexes
# The location for lucene index locks
dir.indexes.lock=${dir.indexes}/locks
# The index recovery mode (NONE, VALIDATE, AUTO, FULL)
# ######################################### #
# Index Recovery and Tracking Configuration #
# ######################################### #
#
# Recovery types are:
# NONE: Ignore
# VALIDATE: Checks that the first and last transaction for each store is represented in the indexes
# AUTO: Validates and auto-recovers if validation fails
# FULL: Full index rebuild, processing all transactions in order. The server is temporarily suspended.
index.recovery.mode=VALIDATE
# Force FULL recovery to stop when encountering errors
index.recovery.stopOnError=true
# Set the frequency with which the index tracking is triggered.
# By default, this is effectively never, but can be modified as required.
# Examples:
# Once every five seconds: 0/5 * * * * ?
# Once every two seconds : 0/2 * * * * ?
# See http://quartz.sourceforge.net/javadoc/org/quartz/CronTrigger.html
index.tracking.cronExpression=* * * * * ? 2099
# Other properties.
index.tracking.maxTxnDurationMinutes=60
index.tracking.reindexLagMs=50
index.tracking.maxRecordSetSize=1000
# Change the failure behaviour of the configuration checker
system.bootstrap.config_check.strict=true

View File

@ -19,4 +19,4 @@ version.build=@build-number@
# Schema number
version.schema=110
version.schema=111

View File

@ -484,6 +484,13 @@ public class HibernateAuditDAO extends HibernateDaoSupport implements AuditDAO,
getSession().flush();
}
/**
* NO-OP
*/
public void beforeCommit()
{
}
static class SourceKey
{
String application;

View File

@ -39,6 +39,10 @@ public interface Transaction
public void setChangeTxnId(String changeTxnId);
public Long getCommitTimeMs();
public void setCommitTimeMs(Long commitTimeMs);
public Server getServer();
public void setServer(Server server);

View File

@ -150,6 +150,13 @@ public class PermissionsDaoComponentImpl extends HibernateDaoSupport implements
getSession().flush();
}
/**
* NO-OP
*/
public void beforeCommit()
{
}
public void setProtocolToACLDAO(Map<String, AccessControlListDAO> map)
{
fProtocolToACLDAO = map;

View File

@ -32,6 +32,7 @@
not-null="false"
cascade="none" />
<property name="changeTxnId" column="change_txn_id" type="string" length="56" not-null="true" />
<property name="commitTimeMs" column="commit_time_ms" type="long" not-null="false" index="idx_commit_time_ms" />
</class>
<class
@ -63,34 +64,34 @@
server.ipAddress = :ipAddress
</query>
<query name="txn.GetLastTxnIdForStore">
<query name="txn.GetTxnsByCommitTimeAsc">
<![CDATA[
select
max(txn.id)
txn
from
org.alfresco.repo.domain.hibernate.NodeStatusImpl as status
join status.transaction as txn
org.alfresco.repo.domain.hibernate.TransactionImpl as txn
where
status.key.protocol = :protocol and
status.key.identifier = :identifier
txn.commitTimeMs >= :fromTimeInclusive and
txn.commitTimeMs < :toTimeExclusive and
txn.id not in (:excludeTxnIds)
order by
txn.commitTimeMs
]]>
</query>
<query name="txn.GetLastTxnId">
<query name="txn.GetTxnsByCommitTimeDesc">
<![CDATA[
select
max(txn.id)
txn
from
org.alfresco.repo.domain.hibernate.NodeStatusImpl as status
join status.transaction as txn
</query>
<query name="txn.GetLastRemoteTxnId">
select
max(txn.id)
from
org.alfresco.repo.domain.hibernate.NodeStatusImpl as status
join status.transaction as txn
join txn.server as server
org.alfresco.repo.domain.hibernate.TransactionImpl as txn
where
server.ipAddress != :serverIpAddress
txn.commitTimeMs >= :fromTimeInclusive and
txn.commitTimeMs < :toTimeExclusive and
txn.id not in (:excludeTxnIds)
order by
txn.commitTimeMs desc
]]>
</query>
<query name="txn.CountTransactions">
@ -100,34 +101,6 @@
org.alfresco.repo.domain.hibernate.TransactionImpl as txn
</query>
<query name="txn.GetNextTxns">
<![CDATA[
select
txn
from
org.alfresco.repo.domain.hibernate.TransactionImpl as txn
where
txn.id > :lastTxnId
order by
txn.id
]]>
</query>
<query name="txn.GetNextRemoteTxns">
<![CDATA[
select
txn
from
org.alfresco.repo.domain.hibernate.TransactionImpl as txn
join txn.server as server
where
txn.id > :lastTxnId and
server.ipAddress != :serverIpAddress
order by
txn.id
]]>
</query>
<query name="txn.GetTxnUpdateCountForStore">
select
count(status.key.guid)

View File

@ -25,6 +25,7 @@
package org.alfresco.repo.domain.hibernate;
import java.io.Serializable;
import java.util.Date;
import org.alfresco.repo.domain.Server;
import org.alfresco.repo.domain.Transaction;
@ -44,6 +45,7 @@ public class TransactionImpl extends LifecycleAdapter implements Transaction, Se
private Long id;
private Long version;
private String changeTxnId;
private Long commitTimeMs;
private Server server;
public TransactionImpl()
@ -56,6 +58,7 @@ public class TransactionImpl extends LifecycleAdapter implements Transaction, Se
StringBuilder sb = new StringBuilder(50);
sb.append("Transaction")
.append("[id=").append(id)
.append(", txnTimeMs=").append(new Date(commitTimeMs))
.append(", changeTxnId=").append(changeTxnId)
.append("]");
return sb.toString();
@ -99,6 +102,16 @@ public class TransactionImpl extends LifecycleAdapter implements Transaction, Se
this.changeTxnId = changeTransactionId;
}
public Long getCommitTimeMs()
{
return commitTimeMs;
}
public void setCommitTimeMs(Long commitTimeMs)
{
this.commitTimeMs = commitTimeMs;
}
public Server getServer()
{
return server;

View File

@ -294,14 +294,31 @@ public interface NodeDaoService
public int getNodeCount(final StoreRef storeRef);
public Transaction getTxnById(long txnId);
public Transaction getLastTxn();
public Transaction getLastRemoteTxn();
public Transaction getLastTxnForStore(final StoreRef storeRef);
/**
* Get all transactions in a given time range. Since time-based retrieval doesn't guarantee uniqueness
* for any given millisecond, a list of optional exclusions may be provided.
*
* @param excludeTxnIds a list of txn IDs to ignore. <tt>null</tt> is allowed.
*/
public List<Transaction> getTxnsByCommitTimeAscending(
long fromTimeInclusive,
long toTimeExclusive,
int count,
List<Long> excludeTxnIds);
/**
* Get all transactions in a given time range. Since time-based retrieval doesn't guarantee uniqueness
* for any given millisecond, a list of optional exclusions may be provided.
*
* @param excludeTxnIds a list of txn IDs to ignore. <tt>null</tt> is allowed.
*/
public List<Transaction> getTxnsByCommitTimeDescending(
long fromTimeInclusive,
long toTimeExclusive,
int count,
List<Long> excludeTxnIds);
public int getTxnUpdateCount(final long txnId);
public int getTxnDeleteCount(final long txnId);
public int getTransactionCount();
public List<Transaction> getNextTxns(final long lastTxnId, final int count);
public List<Transaction> getNextRemoteTxns(final long lastTxnId, final int count);
public List<NodeRef> getTxnChangesForStore(final StoreRef storeRef, final long txnId);
public List<NodeRef> getTxnChanges(final long txnId);
}

View File

@ -287,6 +287,20 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
return transaction;
}
/**
* Ensure that any transaction that might be present is updated to reflect the current time.
*/
public void beforeCommit()
{
Serializable txnId = (Serializable) AlfrescoTransactionSupport.getResource(RESOURCE_KEY_TRANSACTION_ID);
if (txnId != null)
{
// A write was done during the current transaction
Transaction transaction = (Transaction) getHibernateTemplate().get(TransactionImpl.class, txnId);
transaction.setCommitTimeMs(System.currentTimeMillis());
}
}
/**
* Does this <tt>Session</tt> contain any changes which must be
* synchronized with the store?
@ -1417,14 +1431,11 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
/*
* Queries for transactions
*/
private static final String QUERY_GET_LAST_TXN_ID = "txn.GetLastTxnId";
private static final String QUERY_GET_LAST_REMOTE_TXN_ID = "txn.GetLastRemoteTxnId";
private static final String QUERY_GET_LAST_TXN_ID_FOR_STORE = "txn.GetLastTxnIdForStore";
private static final String QUERY_GET_TXNS_BY_COMMIT_TIME_ASC = "txn.GetTxnsByCommitTimeAsc";
private static final String QUERY_GET_TXNS_BY_COMMIT_TIME_DESC = "txn.GetTxnsByCommitTimeDesc";
private static final String QUERY_GET_TXN_UPDATE_COUNT_FOR_STORE = "txn.GetTxnUpdateCountForStore";
private static final String QUERY_GET_TXN_DELETE_COUNT_FOR_STORE = "txn.GetTxnDeleteCountForStore";
private static final String QUERY_COUNT_TRANSACTIONS = "txn.CountTransactions";
private static final String QUERY_GET_NEXT_TXNS = "txn.GetNextTxns";
private static final String QUERY_GET_NEXT_REMOTE_TXNS = "txn.GetNextRemoteTxns";
private static final String QUERY_GET_TXN_CHANGES_FOR_STORE = "txn.GetTxnChangesForStore";
private static final String QUERY_GET_TXN_CHANGES = "txn.GetTxnChanges";
@ -1433,78 +1444,6 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
return (Transaction) getSession().get(TransactionImpl.class, new Long(txnId));
}
@SuppressWarnings("unchecked")
public Transaction getLastTxn()
{
HibernateCallback callback = new HibernateCallback()
{
public Object doInHibernate(Session session)
{
Query query = session.getNamedQuery(QUERY_GET_LAST_TXN_ID);
query.setMaxResults(1)
.setReadOnly(true);
return query.uniqueResult();
}
};
Long txnId = (Long) getHibernateTemplate().execute(callback);
Transaction txn = null;
if (txnId != null)
{
txn = (Transaction) getSession().get(TransactionImpl.class, txnId);
}
// done
return txn;
}
@SuppressWarnings("unchecked")
public Transaction getLastRemoteTxn()
{
HibernateCallback callback = new HibernateCallback()
{
public Object doInHibernate(Session session)
{
Query query = session.getNamedQuery(QUERY_GET_LAST_REMOTE_TXN_ID);
query.setString("serverIpAddress", ipAddress)
.setMaxResults(1)
.setReadOnly(true);
return query.uniqueResult();
}
};
Long txnId = (Long) getHibernateTemplate().execute(callback);
Transaction txn = null;
if (txnId != null)
{
txn = (Transaction) getSession().get(TransactionImpl.class, txnId);
}
// done
return txn;
}
@SuppressWarnings("unchecked")
public Transaction getLastTxnForStore(final StoreRef storeRef)
{
HibernateCallback callback = new HibernateCallback()
{
public Object doInHibernate(Session session)
{
Query query = session.getNamedQuery(QUERY_GET_LAST_TXN_ID_FOR_STORE);
query.setString("protocol", storeRef.getProtocol())
.setString("identifier", storeRef.getIdentifier())
.setMaxResults(1)
.setReadOnly(true);
return query.uniqueResult();
}
};
Long txnId = (Long) getHibernateTemplate().execute(callback);
Transaction txn = null;
if (txnId != null)
{
txn = (Transaction) getSession().get(TransactionImpl.class, txnId);
}
// done
return txn;
}
@SuppressWarnings("unchecked")
public int getTxnUpdateCount(final long txnId)
{
@ -1559,15 +1498,32 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
return count.intValue();
}
private static final Long TXN_ID_DUD = Long.valueOf(-1L);
@SuppressWarnings("unchecked")
public List<Transaction> getNextTxns(final long lastTxnId, final int count)
public List<Transaction> getTxnsByCommitTimeAscending(
final long fromTimeInclusive,
final long toTimeExclusive,
final int count,
List<Long> excludeTxnIds)
{
// Make sure that we have at least one entry in the exclude list
final List<Long> excludeTxnIdsInner = new ArrayList<Long>(excludeTxnIds == null ? 1 : excludeTxnIds.size());
if (excludeTxnIds == null || excludeTxnIds.isEmpty())
{
excludeTxnIdsInner.add(TXN_ID_DUD);
}
else
{
excludeTxnIdsInner.addAll(excludeTxnIds);
}
HibernateCallback callback = new HibernateCallback()
{
public Object doInHibernate(Session session)
{
Query query = session.getNamedQuery(QUERY_GET_NEXT_TXNS);
query.setLong("lastTxnId", lastTxnId)
Query query = session.getNamedQuery(QUERY_GET_TXNS_BY_COMMIT_TIME_ASC);
query.setLong("fromTimeInclusive", fromTimeInclusive)
.setLong("toTimeExclusive", toTimeExclusive)
.setParameterList("excludeTxnIds", excludeTxnIdsInner)
.setMaxResults(count)
.setReadOnly(true);
return query.list();
@ -1579,15 +1535,30 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
}
@SuppressWarnings("unchecked")
public List<Transaction> getNextRemoteTxns(final long lastTxnId, final int count)
public List<Transaction> getTxnsByCommitTimeDescending(
final long fromTimeInclusive,
final long toTimeExclusive,
final int count,
List<Long> excludeTxnIds)
{
// Make sure that we have at least one entry in the exclude list
final List<Long> excludeTxnIdsInner = new ArrayList<Long>(excludeTxnIds == null ? 1 : excludeTxnIds.size());
if (excludeTxnIds == null || excludeTxnIds.isEmpty())
{
excludeTxnIdsInner.add(TXN_ID_DUD);
}
else
{
excludeTxnIdsInner.addAll(excludeTxnIds);
}
HibernateCallback callback = new HibernateCallback()
{
public Object doInHibernate(Session session)
{
Query query = session.getNamedQuery(QUERY_GET_NEXT_REMOTE_TXNS);
query.setLong("lastTxnId", lastTxnId)
.setString("serverIpAddress", ipAddress)
Query query = session.getNamedQuery(QUERY_GET_TXNS_BY_COMMIT_TIME_DESC);
query.setLong("fromTimeInclusive", fromTimeInclusive)
.setLong("toTimeExclusive", toTimeExclusive)
.setParameterList("excludeTxnIds", excludeTxnIdsInner)
.setMaxResults(count)
.setReadOnly(true);
return query.list();

View File

@ -412,6 +412,23 @@ public abstract class AbstractReindexComponent implements IndexRecovery
return true;
}
/**
* @return Returns <tt>false</tt> if any one of the transactions aren't in the index.
*/
protected boolean areTxnsInIndex(List<Transaction> txns)
{
for (Transaction txn : txns)
{
long txnId = txn.getId().longValue();
if (isTxnIdPresentInIndex(txnId) == InIndex.NO)
{
// Missing txn
return false;
}
}
return true;
}
/**
* Perform a full reindexing of the given transaction in the context of a completely
* new transaction.

View File

@ -24,6 +24,8 @@
*/
package org.alfresco.repo.node.index;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.alfresco.i18n.I18NUtil;
@ -41,6 +43,9 @@ import org.apache.commons.logging.LogFactory;
* put into read-only mode during the reindex process in order to prevent metadata changes.
* This is not critical and can be {@link #setLockServer(boolean) switched off} if the
* server is required immediately.
* <p>
*
* @see RecoveryMode
*
* @author Derek Hulley
*/
@ -51,6 +56,7 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
private static final String MSG_RECOVERY_COMPLETE = "index.recovery.complete";
private static final String MSG_RECOVERY_PROGRESS = "index.recovery.progress";
private static final String MSG_RECOVERY_TERMINATED = "index.recovery.terminated";
private static final String MSG_RECOVERY_ERROR = "index.recovery.error";
private static Log logger = LogFactory.getLog(FullIndexRecoveryComponent.class);
@ -59,11 +65,17 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
/** Do nothing - not even a check. */
NONE,
/**
* Perform a quick check on the state of the indexes only.
* Perform a quick check on the state of the indexes only. This only checks that the
* first N transactions are present in the index and doesn't guarantee that the indexes
* are wholely consistent. Normally, the indexes are consistent up to a certain time.
* The system does a precautionary index top-up by default, so the last transactions are
* not validated.
*/
VALIDATE,
/**
* Performs a validation and starts a quick recovery, if necessary.
* Performs a validation and starts a recovery if necessary. In this mode, if start
* transactions are missing then FULL mode is enabled. If end transactions are missing
* then the indexes will be "topped up" to bring them up to date.
*/
AUTO,
/**
@ -75,7 +87,16 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
private RecoveryMode recoveryMode;
private boolean lockServer;
private IndexTransactionTracker indexTracker;
private boolean stopOnError;
/**
* <ul>
* <li><b>recoveryMode: </b>VALIDATE</li>
* <li><b>stopOnError:</b> true</li>
* </ul>
*
*/
public FullIndexRecoveryComponent()
{
recoveryMode = RecoveryMode.VALIDATE;
@ -103,6 +124,29 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
this.lockServer = lockServer;
}
/**
* Set the tracker that will be used for AUTO mode.
*
* @param indexTracker an index tracker component
*/
public void setIndexTracker(IndexTransactionTracker indexTracker)
{
this.indexTracker = indexTracker;
}
/**
* Set whether a full rebuild should stop in the event of encoutering an error. The default is
* to stop reindexing, and this will lead to the server startup failing when index recovery mode
* is <b>FULL</b>. Sometimes, it is necessary to start the server up regardless of any errors
* with particular nodes.
*
* @param stopOnError <tt>true</tt> to stop reindexing when an error is encountered.
*/
public void setStopOnError(boolean stopOnError)
{
this.stopOnError = stopOnError;
}
@Override
protected void reindexImpl()
{
@ -111,42 +155,11 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
logger.debug("Performing index recovery for type: " + recoveryMode);
}
// do we just ignore
// Ignore when NONE
if (recoveryMode == RecoveryMode.NONE)
{
return;
}
// check the level of cover required
boolean fullRecoveryRequired = false;
if (recoveryMode == RecoveryMode.FULL) // no validate required
{
fullRecoveryRequired = true;
}
else // validate first
{
Transaction txn = nodeDaoService.getLastTxn();
if (txn == null)
{
// no transactions - just bug out
return;
}
long txnId = txn.getId();
InIndex txnInIndex = isTxnIdPresentInIndex(txnId);
if (txnInIndex != InIndex.YES)
{
String msg = I18NUtil.getMessage(ERR_INDEX_OUT_OF_DATE);
logger.warn(msg);
// this store isn't up to date
if (recoveryMode == RecoveryMode.VALIDATE)
{
// the store is out of date - validation failed
}
else if (recoveryMode == RecoveryMode.AUTO)
{
fullRecoveryRequired = true;
}
}
}
// put the server into read-only mode for the duration
boolean allowWrite = !transactionService.isReadOnly();
@ -158,11 +171,41 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
transactionService.setAllowWrite(false);
}
// do we need to perform a full recovery
if (fullRecoveryRequired)
// Check that the first and last meaningful transactions are indexed
List<Transaction> startTxns = nodeDaoService.getTxnsByCommitTimeAscending(
Long.MIN_VALUE, Long.MAX_VALUE, 10, null);
boolean startAllPresent = areTxnsInIndex(startTxns);
List<Transaction> endTxns = nodeDaoService.getTxnsByCommitTimeDescending(
Long.MIN_VALUE, Long.MAX_VALUE, 10, null);
boolean endAllPresent = areTxnsInIndex(endTxns);
// check the level of cover required
switch (recoveryMode)
{
case AUTO:
if (!startAllPresent)
{
// Initial transactions are missing - rebuild
performFullRecovery();
}
else if (!endAllPresent)
{
// Trigger the tracker, which will top up the indexes
indexTracker.reindex();
}
break;
case VALIDATE:
// Check
if (!startAllPresent || !endAllPresent)
{
// Index is out of date
logger.warn(I18NUtil.getMessage(ERR_INDEX_OUT_OF_DATE));
}
break;
case FULL:
performFullRecovery();
break;
}
}
finally
{
@ -182,18 +225,24 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
// count the transactions
int processedCount = 0;
Transaction lastTxn = null;
long fromTimeInclusive = Long.MIN_VALUE;
long toTimeExclusive = Long.MAX_VALUE;
List<Long> lastTxnIds = Collections.<Long>emptyList();
while(true)
{
long lastTxnId = (lastTxn == null) ? -1L : lastTxn.getId().longValue();
List<Transaction> nextTxns = nodeDaoService.getNextTxns(
lastTxnId,
MAX_TRANSACTIONS_PER_ITERATION);
List<Transaction> nextTxns = nodeDaoService.getTxnsByCommitTimeAscending(
fromTimeInclusive,
toTimeExclusive,
MAX_TRANSACTIONS_PER_ITERATION,
lastTxnIds);
lastTxnIds = new ArrayList<Long>(nextTxns.size());
// reindex each transaction
for (Transaction txn : nextTxns)
{
Long txnId = txn.getId();
// Keep it to ensure we exclude it from the next iteration
lastTxnIds.add(txnId);
// check if we have to terminate
if (isShuttingDown())
{
@ -201,8 +250,26 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
logger.warn(msgTerminated);
return;
}
// Allow exception to bubble out or not
if (stopOnError)
{
reindexTransaction(txnId);
}
else
{
try
{
reindexTransaction(txnId);
}
catch (Throwable e)
{
String msgError = I18NUtil.getMessage(MSG_RECOVERY_ERROR, txnId, e.getMessage());
logger.info(msgError, e);
}
}
// Although we use the same time as this transaction for the next iteration, we also
// make use of the exclusion list to ensure that it doesn't get pulled back again.
fromTimeInclusive = txn.getCommitTimeMs();
// dump a progress report every 10% of the way
double before = (double) processedCount / (double) txnCount * 10.0; // 0 - 10
@ -222,7 +289,6 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
// there are no more
break;
}
lastTxn = nextTxns.get(nextTxns.size() - 1);
}
// done
String msgDone = I18NUtil.getMessage(MSG_RECOVERY_COMPLETE);

View File

@ -24,186 +24,41 @@
*/
package org.alfresco.repo.node.index;
import java.util.List;
import org.alfresco.repo.domain.Transaction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Component to check and recover the indexes.
*
* @deprecated Deprecated as of 1.4.5. Use {@linkplain IndexTransactionTracker}
*
* @author Derek Hulley
*/
public class IndexRemoteTransactionTracker extends AbstractReindexComponent
{
private static Log logger = LogFactory.getLog(IndexRemoteTransactionTracker.class);
private boolean remoteOnly;
private boolean started;
private long currentTxnId;
/**
* Dumps an error message.
*/
public IndexRemoteTransactionTracker()
{
remoteOnly = true;
currentTxnId = -1L;
logger.warn(
"The component 'org.alfresco.repo.node.index.IndexRemoteTransactionTracker' " +
"has been replaced by 'org.alfresco.repo.node.index.IndexTransactionTracker' \n" +
"See the extension sample file 'index-tracking-context.xml.sample'. \n" +
"See http://wiki.alfresco.com/wiki/High_Availability_Configuration_V1.4_to_V2.1#Lucene_Index_Synchronization.");
}
/**
* Set whether or not this component should only track remote transactions.
* By default, it is <tt>true</tt>, but under certain test conditions, it may
* be desirable to track local transactions too; e.g. during testing of clustering
* when running multiple instances on the same machine.
*
* @param remoteOnly <tt>true</tt> to reindex only those transactions that were
* committed to the database by a remote server.
* As of release 1.4.5, 2.0.5 and 2.1.1, this property is no longer is use.
*/
public void setRemoteOnly(boolean remoteOnly)
{
this.remoteOnly = remoteOnly;
}
@Override
protected void reindexImpl()
{
if (!started)
{
// Initialize the starting poing
currentTxnId = getLastIndexedTxn();
started = true;
}
if (logger.isDebugEnabled())
{
logger.debug("Performing index tracking from txn " + currentTxnId);
}
while (true)
{
// get next transactions to index
List<Transaction> txns = getNextTransactions(currentTxnId);
if (txns.size() == 0)
{
// we've caught up
break;
}
// break out if the VM is shutting down
if (isShuttingDown())
{
break;
}
// reindex all "foreign" or "local" transactions, one at a time
for (Transaction txn : txns)
{
long txnId = txn.getId();
reindexTransaction(txnId);
currentTxnId = txnId;
// break out if the VM is shutting down
if (isShuttingDown())
{
break;
}
}
}
}
private static final long DECREMENT_COUNT = 10L;
/**
* Finds the last indexed transaction. It works backwards from the
* last index in increments, respecting the {@link #setRemoteOnly(boolean) remoteOnly}
* flag.
*
* @return Returns the last index transaction or -1 if there is none
*/
protected long getLastIndexedTxn()
{
// get the last transaction
Transaction txn = null;
if (remoteOnly)
{
txn = nodeDaoService.getLastRemoteTxn();
}
else
{
txn = nodeDaoService.getLastTxn();
}
if (txn == null)
{
// There is no last transaction to use
return -1L;
}
long currentTxnId = txn.getId();
while (currentTxnId >= 0L)
{
// Check if the current txn is in the index
InIndex txnInIndex = isTxnIdPresentInIndex(currentTxnId);
if (txnInIndex == InIndex.YES)
{
// We found somewhere to start
break;
}
// Get back in time
long lastCheckTxnId = currentTxnId;
currentTxnId -= DECREMENT_COUNT;
if (currentTxnId < 0L)
{
currentTxnId = -1L;
}
// We don't know if this number we have is a local or remote txn, so get the very next one
Transaction nextTxn = null;
if (remoteOnly)
{
List<Transaction> nextTxns = nodeDaoService.getNextRemoteTxns(currentTxnId, 1);
if (nextTxns.size() > 0)
{
nextTxn = nextTxns.get(0);
}
}
else
{
List<Transaction> nextTxns = nodeDaoService.getNextTxns(currentTxnId, 1);
if (nextTxns.size() > 0)
{
nextTxn = nextTxns.get(0);
}
}
if (nextTxn == null)
{
// There was nothing relevant after this, so keep going back in time
continue;
}
else if (nextTxn.getId() >= lastCheckTxnId)
{
// Decrementing by DECREMENT_COUNT was not enough
continue;
}
// Adjust the last one we looked at to reflect the correct txn id
currentTxnId = nextTxn.getId();
}
// We are close enough to the beginning, so just go for the first transaction
if (currentTxnId < 0L)
{
currentTxnId = -1L;
}
return currentTxnId;
}
private static final int MAX_TXN_COUNT = 1000;
private List<Transaction> getNextTransactions(long currentTxnId)
{
List<Transaction> txns = null;
if (remoteOnly)
{
txns = nodeDaoService.getNextRemoteTxns(currentTxnId, MAX_TXN_COUNT);
}
else
{
txns = nodeDaoService.getNextTxns(currentTxnId, MAX_TXN_COUNT);
}
// done
return txns;
}
}

View File

@ -0,0 +1,407 @@
/*
* Copyright (C) 2005-2006 Alfresco, Inc.
*
* Licensed under the Mozilla Public License version 1.1
* with a permitted attribution clause. You may obtain a
* copy of the License at
*
* http://www.alfresco.org/legal/license.txt
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*/
package org.alfresco.repo.node.index;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.repo.domain.Transaction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Component to check and recover the indexes.
*
* @author Derek Hulley
*/
public class IndexTransactionTracker extends AbstractReindexComponent
{
private static Log logger = LogFactory.getLog(IndexTransactionTracker.class);
private long maxTxnDurationMs;
private long reindexLagMs;
private int maxRecordSetSize;
private boolean started;
private List<Long> previousTxnIds;
private long lastMaxTxnId;
private long fromTimeInclusive;
private Map<Long, TxnRecord> voids;
/**
* Set the defaults.
* <ul>
* <li><b>Maximum transaction duration:</b> 1 hour</li>
* <li><b>Reindex lag:</b> 1 second</li>
* <li><b>Maximum recordset size:</b> 1000</li>
* </ul>
*/
public IndexTransactionTracker()
{
maxTxnDurationMs = 3600L * 1000L;
reindexLagMs = 1000L;
maxRecordSetSize = 1000;
previousTxnIds = Collections.<Long>emptyList();
lastMaxTxnId = Long.MAX_VALUE;
fromTimeInclusive = -1L;
voids = new TreeMap<Long, TxnRecord>();
}
/**
* Set the expected maximum duration of transaction supported. This value is used to adjust the
* look-back used to detect transactions that committed. Values must be greater than zero.
*
* @param maxTxnDurationMinutes the maximum length of time a transaction will take in minutes
*
* @since 1.4.5, 2.0.5, 2.1.1
*/
public void setMaxTxnDurationMinutes(long maxTxnDurationMinutes)
{
if (maxTxnDurationMinutes < 1)
{
throw new AlfrescoRuntimeException("Maximum transaction duration must be at least one minute.");
}
this.maxTxnDurationMs = maxTxnDurationMinutes * 60L * 1000L;
}
/**
* Transaction tracking should lag by the average commit time for a transaction. This will minimize
* the number of holes in the transaction sequence. Values must be greater than zero.
*
* @param reindexLagMs the minimum age of a transaction to be considered by
* the index transaction tracking
*
* @since 1.4.5, 2.0.5, 2.1.1
*/
public void setReindexLagMs(long reindexLagMs)
{
if (reindexLagMs < 1)
{
throw new AlfrescoRuntimeException("Reindex lag must be at least 1 millisecond.");
}
this.reindexLagMs = reindexLagMs;
}
/**
* Set the number of transactions to request per query.
*/
public void setMaxRecordSetSize(int maxRecordSetSize)
{
this.maxRecordSetSize = maxRecordSetSize;
}
@Override
protected void reindexImpl()
{
if (!started)
{
// Make sure that we start clean
voids.clear();
previousTxnIds = new ArrayList<Long>(maxRecordSetSize);
lastMaxTxnId = Long.MAX_VALUE; // So that it is ignored at first
fromTimeInclusive = getStartingTxnCommitTime();
started = true;
}
while (true)
{
long toTimeExclusive = System.currentTimeMillis() - reindexLagMs;
// Check that the voids haven't been filled
fromTimeInclusive = checkVoids(fromTimeInclusive);
// get next transactions to index
List<Transaction> txns = getNextTransactions(fromTimeInclusive, toTimeExclusive, previousTxnIds);
if (logger.isDebugEnabled())
{
String msg = String.format(
"Reindexing %d transactions from %s (%s)",
txns.size(),
(new Date(fromTimeInclusive)).toString(),
txns.isEmpty() ? "---" : txns.get(0).getId().toString());
logger.debug(msg);
}
// Reindex the transactions. Voids between the last set of transactions and this
// set will be detected as well. Additionally, the last max transaction will be
// updated by this method.
reindexTransactions(txns);
// Move the time on.
// Note the subtraction here. Yes, it's odd. But the results of the getNextTransactions
// may be limited by recordset size and it is possible to have multiple transactions share
// the same commit time. If these txns get split up and we exclude the time period, then
// they won't be requeried. The list of previously used transaction IDs is passed back to
// be exluded from the next query.
fromTimeInclusive = toTimeExclusive - 1L;
previousTxnIds.clear();
for (Transaction txn : txns)
{
previousTxnIds.add(txn.getId());
}
// Break out if there were no transactions processed
if (previousTxnIds.isEmpty())
{
break;
}
// break out if the VM is shutting down
if (isShuttingDown())
{
break;
}
}
}
/**
* Find a transaction time to start indexing from (inclusive). The last recorded transaction by ID
* is taken and the max transaction duration substracted from its commit time. A transaction is
* retrieved for this time and checked for indexing. If it is present, then that value is chosen.
* If not, a step back in time is taken again. This goes on until there are no more transactions
* or a transaction is found in the index.
*/
protected long getStartingTxnCommitTime()
{
// Look back in time by the maximum transaction duration
long toTimeExclusive = System.currentTimeMillis() - maxTxnDurationMs;
long fromTimeInclusive = 0L;
double stepFactor = 1.0D;
found:
while (true)
{
// Get the most recent transaction before the given look-back
List<Transaction> nextTransactions = nodeDaoService.getTxnsByCommitTimeDescending(
0L,
toTimeExclusive,
1,
null);
// There are no transactions in that time range
if (nextTransactions.size() == 0)
{
break found;
}
// We found a transaction
Transaction txn = nextTransactions.get(0);
Long txnId = txn.getId();
long txnCommitTime = txn.getCommitTimeMs();
// Check that it is in the index
InIndex txnInIndex = isTxnIdPresentInIndex(txnId);
switch (txnInIndex)
{
case YES:
fromTimeInclusive = txnCommitTime;
break found;
default:
// Look further back in time. Step back by the maximum transaction duration and
// increase this step back by a factor of 10% each iteration.
toTimeExclusive = txnCommitTime - (long)(maxTxnDurationMs * stepFactor);
stepFactor *= 1.1D;
continue;
}
}
// We have a starting value
return fromTimeInclusive;
}
/**
* Voids - otherwise known as 'holes' - in the transaction sequence are timestamped when they are
* discovered. This method discards voids that were timestamped before the given date. It checks
* all remaining voids, passing back the transaction time for the newly-filled void. Otherwise
* the value passed in is passed back.
*
* @param fromTimeInclusive the oldest void to consider
* @return Returns an adjused start position based on any voids being filled
*/
private long checkVoids(long fromTimeInclusive)
{
long maxHistoricalTime = (fromTimeInclusive - maxTxnDurationMs);
long fromTimeAdjusted = fromTimeInclusive;
List<Long> toExpireTxnIds = new ArrayList<Long>(1);
// The voids are stored in a sorted map, sorted by the txn ID
for (Long voidTxnId : voids.keySet())
{
TxnRecord voidTxnRecord = voids.get(voidTxnId);
// Is the transaction around, yet?
Transaction voidTxn = nodeDaoService.getTxnById(voidTxnId);
if (voidTxn == null)
{
// It's still just a void. Shall we expire it?
if (voidTxnRecord.txnCommitTime < maxHistoricalTime)
{
// It's too late for this void
toExpireTxnIds.add(voidTxnId);
}
continue;
}
else
{
if (logger.isDebugEnabled())
{
logger.debug("Void has become live: " + voidTxn);
}
// We found one that has become a real transaction.
// We don't throw the other voids away.
fromTimeAdjusted = voidTxn.getCommitTimeMs();
// Break out as sequential rebuilding is required
break;
}
}
// Throw away all the expired ones
for (Long toExpireTxnId : toExpireTxnIds)
{
voids.remove(toExpireTxnId);
}
// Done
return fromTimeAdjusted;
}
private List<Transaction> getNextTransactions(long fromTimeInclusive, long toTimeExclusive, List<Long> previousTxnIds)
{
List<Transaction> txns = nodeDaoService.getTxnsByCommitTimeAscending(
fromTimeInclusive,
toTimeExclusive,
maxRecordSetSize,
previousTxnIds);
// done
return txns;
}
/**
* Checks that each of the transactions is present in the index. As soon as one is found that
* isn't, all the following transactions will be reindexed. After the reindexing, the sequence
* of transaction IDs will be examined for any voids. These will be recorded.
*
* @param txns transactions ordered by time ascending
* @return returns the
*/
private void reindexTransactions(List<Transaction> txns)
{
if (txns.isEmpty())
{
return;
}
Set<Long> processedTxnIds = new HashSet<Long>(13);
boolean forceReindex = false;
long minNewTxnId = Long.MAX_VALUE;
long maxNewTxnId = Long.MIN_VALUE;
long maxNewTxnCommitTime = System.currentTimeMillis();
for (Transaction txn : txns)
{
Long txnId = txn.getId();
long txnIdLong = txnId.longValue();
if (txnIdLong < minNewTxnId)
{
minNewTxnId = txnIdLong;
}
if (txnIdLong > maxNewTxnId)
{
maxNewTxnId = txnIdLong;
maxNewTxnCommitTime = txn.getCommitTimeMs();
}
// Keep track of it for void checking
processedTxnIds.add(txnId);
// Remove this entry from the void list - it is not void
voids.remove(txnId);
// Reindex the transaction if we are forcing it or if it isn't in the index already
if (forceReindex || isTxnIdPresentInIndex(txnId) == InIndex.NO)
{
// Any indexing means that all the next transactions have to be indexed
forceReindex = true;
try
{
if (logger.isDebugEnabled())
{
logger.debug("Reindexing transaction: " + txn);
}
// We try the reindex, but for the sake of continuity, have to let it run on
reindexTransaction(txnId);
}
catch (Throwable e)
{
logger.warn("\n" +
"Reindex of transaction failed: \n" +
" Transaction ID: " + txnId + "\n" +
" Error: " + e.getMessage(),
e);
}
}
else
{
if (logger.isDebugEnabled())
{
logger.debug("Reindex skipping transaction: " + txn);
}
}
}
// We have to search for voids now. Don't start at the min transaction,
// but start at the least of the lastMaxTxnId and minNewTxnId
long voidCheckStartTxnId = (lastMaxTxnId < minNewTxnId ? lastMaxTxnId : minNewTxnId) + 1;
long voidCheckEndTxnId = maxNewTxnId;
// Check for voids in new transactions
for (long i = voidCheckStartTxnId; i <= voidCheckEndTxnId; i++)
{
Long txnId = Long.valueOf(i);
if (processedTxnIds.contains(txnId))
{
// It is there
continue;
}
// First make sure that it is a real void. Sometimes, transactions are in the table but don't
// fall within the commit time window that we queried. If they're in the DB AND in the index,
// then they're not really voids and don't need further checks. If they're missing from either,
// then they're voids and must be processed.
Transaction voidTxn = nodeDaoService.getTxnById(txnId);
if (voidTxn != null && isTxnIdPresentInIndex(txnId) != InIndex.NO)
{
// It is a real transaction (not a void) and is already in the index, so just ignore it.
continue;
}
// Calculate an age for the void. We can't use the current time as that will mean we keep all
// discovered voids, even if they are very old. Rather, we use the commit time of the last transaction
// in the set as it represents the query time for this iteration.
TxnRecord voidRecord = new TxnRecord();
voidRecord.txnCommitTime = maxNewTxnCommitTime;
voids.put(txnId, voidRecord);
if (logger.isDebugEnabled())
{
logger.debug("Void detected: " + txnId);
}
}
// Having searched for the nodes, we've recorded all the voids. So move the lastMaxTxnId up.
lastMaxTxnId = voidCheckEndTxnId;
}
private class TxnRecord
{
private long txnCommitTime;
}
}

View File

@ -1,26 +1,18 @@
/*
* Copyright (C) 2005-2007 Alfresco Software Limited.
* Copyright (C) 2005-2006 Alfresco, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* As a special exception to the terms and conditions of version 2.0 of
* the GPL, you may redistribute this Program in connection with Free/Libre
* and Open Source Software ("FLOSS") applications as described in Alfresco's
* FLOSS exception. You should have recieved a copy of the text describing
* the FLOSS exception, and it is also available here:
* http://www.alfresco.com/legal/licensing"
* Licensed under the Mozilla Public License version 1.1
* with a permitted attribution clause. You may obtain a
* copy of the License at
*
* http://www.alfresco.org/legal/license.txt
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*/
package org.alfresco.repo.node.index;
@ -53,7 +45,7 @@ import org.springframework.context.ApplicationContext;
* @author Derek Hulley
*/
@SuppressWarnings("unused")
public class IndexRemoteTransactionTrackerTest extends TestCase
public class IndexTransactionTrackerTest extends TestCase
{
private static ApplicationContext ctx = ApplicationContextHelper.getApplicationContext();
@ -66,7 +58,7 @@ public class IndexRemoteTransactionTrackerTest extends TestCase
private Indexer indexer;
private NodeRef rootNodeRef;
private IndexRemoteTransactionTracker indexTracker;
private IndexTransactionTracker indexTracker;
public void setUp() throws Exception
{
@ -74,14 +66,14 @@ public class IndexRemoteTransactionTrackerTest extends TestCase
searchService = serviceRegistry.getSearchService();
nodeService = serviceRegistry.getNodeService();
fileFolderService = serviceRegistry.getFileFolderService();
authenticationComponent = (AuthenticationComponent) ctx.getBean("authenticationComponent");
authenticationComponent = (AuthenticationComponent) ctx.getBean("authenticationComponentImpl");
contentStore = (ContentStore) ctx.getBean("fileContentStore");
ftsIndexer = (FullTextSearchIndexer) ctx.getBean("LuceneFullTextSearchIndexer");
indexer = (Indexer) ctx.getBean("indexerComponent");
NodeDaoService nodeDaoService = (NodeDaoService) ctx.getBean("nodeDaoService");
TransactionService transactionService = serviceRegistry.getTransactionService();
indexTracker = new IndexRemoteTransactionTracker();
indexTracker = new IndexTransactionTracker();
indexTracker.setAuthenticationComponent(authenticationComponent);
indexTracker.setFtsIndexer(ftsIndexer);
indexTracker.setIndexer(indexer);
@ -116,7 +108,7 @@ public class IndexRemoteTransactionTrackerTest extends TestCase
return childAssocRef;
}
};
ChildAssociationRef childAssocRef = transactionService.getRetryingTransactionHelper().doInTransaction(createNodeWork);
ChildAssociationRef childAssocRef = transactionService.getRetryingTransactionHelper().doInTransaction(createNodeWork, true);
}
public void testSetup() throws Exception

View File

@ -602,6 +602,12 @@ public abstract class AlfrescoTransactionSupport
{
lucene.prepare();
}
// Flush the DAOs
for (TransactionalDao dao : daoServices)
{
dao.beforeCommit();
}
}
/**

View File

@ -23,4 +23,11 @@ public interface TransactionalDao
* @return true => changes are pending
*/
public boolean isDirty();
/**
* This callback provides a chance for the DAO to do any pre-commit work.
*
* @since 1.4.5
*/
public void beforeCommit();
}