Merged V1.4 to HEAD

svn merge svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@3925 svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@3965 .


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@3966 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2006-09-29 07:45:33 +00:00
parent d4947ef511
commit cf29ca2343
51 changed files with 2076 additions and 1985 deletions

View File

@@ -0,0 +1,227 @@
/*
* Copyright (C) 2005-2006 Alfresco, Inc.
*
* Licensed under the Mozilla Public License version 1.1
* with a permitted attribution clause. You may obtain a
* copy of the License at
*
* http://www.alfresco.org/legal/license.txt
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*/
package org.alfresco.repo.node.index;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import net.sf.acegisecurity.Authentication;
import org.alfresco.repo.node.db.NodeDaoService;
import org.alfresco.repo.search.Indexer;
import org.alfresco.repo.search.impl.lucene.fts.FullTextSearchIndexer;
import org.alfresco.repo.security.authentication.AuthenticationComponent;
import org.alfresco.repo.security.authentication.AuthenticationUtil;
import org.alfresco.repo.transaction.TransactionComponent;
import org.alfresco.repo.transaction.TransactionUtil;
import org.alfresco.repo.transaction.TransactionUtil.TransactionWork;
import org.alfresco.service.cmr.repository.NodeService;
import org.alfresco.service.cmr.search.SearchService;
import org.alfresco.util.PropertyCheck;
import org.alfresco.util.VmShutdownListener;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Abstract helper for reindexing.
*
* @see #reindexImpl()
* @see #getIndexerWriteLock()
* @see #isShuttingDown()
*
* @author Derek Hulley
*/
public abstract class AbstractReindexComponent implements IndexRecovery
{
private static Log logger = LogFactory.getLog(AbstractReindexComponent.class);
/** kept to notify the thread that it should quit */
private static VmShutdownListener vmShutdownListener = new VmShutdownListener("MissingContentReindexComponent");
private AuthenticationComponent authenticationComponent;
/** provides transactions to atomically index each missed transaction */
protected TransactionComponent transactionService;
/** the component to index the node hierarchy */
protected Indexer indexer;
/** the FTS indexer that we will prompt to pick up on any un-indexed text */
protected FullTextSearchIndexer ftsIndexer;
/** the component providing searches of the indexed nodes */
protected SearchService searcher;
/** the component giving direct access to <b>store</b> instances */
protected NodeService nodeService;
/** the component giving direct access to <b>transaction</b> instances */
protected NodeDaoService nodeDaoService;
private boolean shutdown;
private final WriteLock indexerWriteLock;
public AbstractReindexComponent()
{
shutdown = false;
ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock();
indexerWriteLock = readWriteLock.writeLock();
}
/**
* Convenience method to get a common write lock. This can be used to avoid
* concurrent access to the work methods.
*/
protected WriteLock getIndexerWriteLock()
{
return indexerWriteLock;
}
/**
* Programmatically notify a reindex thread to terminate
*
* @param shutdown true to shutdown, false to reset
*/
public void setShutdown(boolean shutdown)
{
this.shutdown = shutdown;
}
/**
*
* @return Returns true if the VM shutdown hook has been triggered, or the instance
* was programmatically {@link #shutdown shut down}
*/
protected boolean isShuttingDown()
{
return shutdown || vmShutdownListener.isVmShuttingDown();
}
/**
* @param authenticationComponent ensures that reindexing operates as system user
*/
public void setAuthenticationComponent(AuthenticationComponent authenticationComponent)
{
this.authenticationComponent = authenticationComponent;
}
/**
* Set the low-level transaction component to use
*
* @param transactionComponent provide transactions to index each missed transaction
*/
public void setTransactionComponent(TransactionComponent transactionComponent)
{
this.transactionService = transactionComponent;
}
/**
* @param indexer the indexer that will be index
*/
public void setIndexer(Indexer indexer)
{
this.indexer = indexer;
}
/**
* @param ftsIndexer the FTS background indexer
*/
public void setFtsIndexer(FullTextSearchIndexer ftsIndexer)
{
this.ftsIndexer = ftsIndexer;
}
/**
* @param searcher component providing index searches
*/
public void setSearcher(SearchService searcher)
{
this.searcher = searcher;
}
/**
* @param nodeService provides information about nodes for indexing
*/
public void setNodeService(NodeService nodeService)
{
this.nodeService = nodeService;
}
/**
* @param nodeDaoService provides access to transaction-related queries
*/
public void setNodeDaoService(NodeDaoService nodeDaoService)
{
this.nodeDaoService = nodeDaoService;
}
/**
* Perform the actual work. This method will be called as the system user
* and within an existing transaction. This thread will only ever be accessed
* by a single thread per instance.
*
*/
protected abstract void reindexImpl();
/**
* If this object is currently busy, then it just nothing
*/
public final void reindex()
{
PropertyCheck.mandatory(this, "authenticationComponent", this.authenticationComponent);
PropertyCheck.mandatory(this, "ftsIndexer", this.ftsIndexer);
PropertyCheck.mandatory(this, "indexer", this.indexer);
PropertyCheck.mandatory(this, "searcher", this.searcher);
PropertyCheck.mandatory(this, "nodeService", this.nodeService);
PropertyCheck.mandatory(this, "nodeDaoService", this.nodeDaoService);
PropertyCheck.mandatory(this, "transactionComponent", this.transactionService);
if (indexerWriteLock.tryLock())
{
Authentication auth = null;
try
{
auth = AuthenticationUtil.getCurrentAuthentication();
// authenticate as the system user
authenticationComponent.setSystemUserAsCurrentUser();
TransactionWork<Object> reindexWork = new TransactionWork<Object>()
{
public Object doWork() throws Exception
{
reindexImpl();
return null;
}
};
TransactionUtil.executeInUserTransaction(transactionService, reindexWork);
}
finally
{
try { indexerWriteLock.unlock(); } catch (Throwable e) {}
if (auth != null)
{
authenticationComponent.setCurrentAuthentication(auth);
}
}
// done
if (logger.isDebugEnabled())
{
logger.debug("Reindex work completed: " + this);
}
}
else
{
if (logger.isDebugEnabled())
{
logger.debug("Bypassed reindex work - already busy: " + this);
}
}
}
}

View File

@@ -1,134 +0,0 @@
/*
* Copyright (C) 2005 Alfresco, Inc.
*
* Licensed under the Mozilla Public License version 1.1
* with a permitted attribution clause. You may obtain a
* copy of the License at
*
* http://www.alfresco.org/legal/license.txt
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*/
package org.alfresco.repo.node.index;
import java.util.ArrayList;
import java.util.List;
import org.alfresco.repo.search.impl.lucene.fts.FullTextSearchIndexer;
import org.alfresco.repo.transaction.TransactionUtil;
import org.alfresco.repo.transaction.TransactionUtil.TransactionWork;
import org.alfresco.service.cmr.repository.NodeService;
import org.alfresco.service.cmr.repository.StoreRef;
import org.alfresco.service.transaction.TransactionService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Ensures that the FTS indexing picks up on any outstanding documents that
* require indexing.
* <p>
* FTS indexing is a background process. It is therefore possible that
* certain documents don't get indexed when the server shuts down.
*
* @author Derek Hulley
*/
public class FtsIndexRecoveryComponent implements IndexRecovery
{
private static Log logger = LogFactory.getLog(FtsIndexRecoveryComponent.class);
/** provides transactions to atomically index each missed transaction */
private TransactionService transactionService;
/** the FTS indexer that we will prompt to pick up on any un-indexed text */
private FullTextSearchIndexer ftsIndexer;
/** the component giving direct access to <b>node</b> instances */
private NodeService nodeService;
/** the workspaces to reindex */
private List<StoreRef> storeRefs;
public FtsIndexRecoveryComponent()
{
this.storeRefs = new ArrayList<StoreRef>(2);
}
/**
* @param transactionService provide transactions to index each missed transaction
*/
public void setTransactionService(TransactionService transactionService)
{
this.transactionService = transactionService;
}
/**
* @param ftsIndexer the FTS background indexer
*/
public void setFtsIndexer(FullTextSearchIndexer ftsIndexer)
{
this.ftsIndexer = ftsIndexer;
}
/**
* @param nodeService provides information about nodes for indexing
*/
public void setNodeService(NodeService nodeService)
{
this.nodeService = nodeService;
}
/**
* Set the workspaces that need reindexing
*
* @param storeRefStrings a list of strings representing store references
*/
public void setStores(List<String> storeRefStrings)
{
storeRefs.clear();
for (String storeRefStr : storeRefStrings)
{
StoreRef storeRef = new StoreRef(storeRefStr);
storeRefs.add(storeRef);
}
}
/**
* Ensures that the FTS indexing is activated for any outstanding full text searches.
*/
public void reindex()
{
TransactionWork<Object> reindexWork = new TransactionWork<Object>()
{
public Object doWork()
{
// reindex each store
for (StoreRef storeRef : storeRefs)
{
// check if the store exists
if (!nodeService.exists(storeRef))
{
// store does not exist
if (logger.isDebugEnabled())
{
logger.debug("Skipping reindex of non-existent store: " + storeRef);
}
continue;
}
// prompt FTS to reindex the store
ftsIndexer.requiresIndex(storeRef);
}
// done
return null;
}
};
TransactionUtil.executeInUserTransaction(transactionService, reindexWork);
// done
if (logger.isDebugEnabled())
{
logger.debug("Prompted FTS index on stores: " + storeRefs);
}
}
}

View File

@@ -1,60 +0,0 @@
/*
* Copyright (C) 2005 Alfresco, Inc.
*
* Licensed under the Mozilla Public License version 1.1
* with a permitted attribution clause. You may obtain a
* copy of the License at
*
* http://www.alfresco.org/legal/license.txt
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*/
package org.alfresco.repo.node.index;
import junit.framework.TestCase;
import org.alfresco.repo.transaction.TransactionUtil;
import org.alfresco.repo.transaction.TransactionUtil.TransactionWork;
import org.alfresco.service.transaction.TransactionService;
import org.alfresco.util.ApplicationContextHelper;
import org.springframework.context.ApplicationContext;
/**
* Checks that the FTS index recovery component is working
*
* @author Derek Hulley
*/
public class FtsIndexRecoveryComponentTest extends TestCase
{
private static ApplicationContext ctx = ApplicationContextHelper.getApplicationContext();
private IndexRecovery indexRecoverer;
private TransactionService txnService;
public void setUp() throws Exception
{
indexRecoverer = (IndexRecovery) ctx.getBean("indexRecoveryComponent");
txnService = (TransactionService) ctx.getBean("transactionComponent");
}
public void testReindexing() throws Exception
{
// performs a reindex
TransactionWork<Object> reindexWork = new TransactionWork<Object>()
{
public Object doWork()
{
indexRecoverer.reindex();
return null;
}
};
// reindex
TransactionUtil.executeInNonPropagatingUserTransaction(txnService, reindexWork);
}
}

View File

@@ -16,660 +16,365 @@
*/
package org.alfresco.repo.node.index;
import java.util.ArrayList;
import java.util.List;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.i18n.I18NUtil;
import org.alfresco.model.ContentModel;
import org.alfresco.repo.domain.NodeStatus;
import org.alfresco.repo.search.Indexer;
import org.alfresco.repo.search.impl.lucene.LuceneIndexerImpl;
import org.alfresco.repo.search.impl.lucene.fts.FullTextSearchIndexer;
import org.alfresco.repo.domain.Transaction;
import org.alfresco.repo.search.impl.lucene.LuceneQueryParser;
import org.alfresco.repo.transaction.TransactionUtil;
import org.alfresco.repo.transaction.TransactionUtil.TransactionWork;
import org.alfresco.service.cmr.repository.ChildAssociationRef;
import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.cmr.repository.NodeService;
import org.alfresco.service.cmr.repository.StoreRef;
import org.alfresco.service.cmr.repository.NodeRef.Status;
import org.alfresco.service.cmr.search.ResultSet;
import org.alfresco.service.cmr.search.SearchParameters;
import org.alfresco.service.cmr.search.SearchService;
import org.alfresco.service.transaction.TransactionService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.hibernate.CacheMode;
import org.hibernate.Query;
import org.hibernate.Session;
import org.springframework.orm.hibernate3.HibernateCallback;
import org.springframework.orm.hibernate3.support.HibernateDaoSupport;
/**
* Ensures that the FTS indexing picks up on any outstanding documents that
* require indexing.
* <p>
* This component must be used as a singleton (one per VM) and may only be
* called to reindex once. It will start a thread that processes all available
* transactions and keeps checking to ensure that the index is up to date with
* the latest database changes.
* <p>
* <b>The following points are important:</b>
* <ul>
* <li>
* By default, the Hibernate L2 cache is used during processing.
* This can be disabled by either disabling the L2 cache globally
* for the server (not recommended) or by setting the
* {@link #setL2CacheMode(String) l2CacheMode} property. If the
* database is static then the L2 cache usage can be set to use
* the <code>NORMAL</code> mode. <code>REFRESH</code> should be
* used where the server will still be accessed from some clients
* despite the database changing. <code>NORMAL</code> can be used
* in the case of the caches being clustered, i.e. the caches will
* not be out of date w.r.t. the database.
* </li>
* <li>
* This process should only be used continuously where the index
* transactions are following the database transactions. Use the
* {@link #setRunContinuously(boolean) runContinuously} property
* to change this behaviour.
* </li>
* </ul>
* Component to check and recover the indexes.
*
* @author Derek Hulley
*/
public class FullIndexRecoveryComponent extends HibernateDaoSupport implements IndexRecovery
public class FullIndexRecoveryComponent extends AbstractReindexComponent
{
public static final String QUERY_GET_NEXT_CHANGE_TXN_IDS = "node.GetNextChangeTxnIds";
public static final String QUERY_GET_CHANGED_NODE_STATUSES = "node.GetChangedNodeStatuses";
public static final String QUERY_GET_DELETED_NODE_STATUSES = "node.GetDeletedNodeStatuses";
public static final String QUERY_GET_CHANGED_NODE_STATUSES_COUNT = "node.GetChangedNodeStatusesCount";
private static final String START_TXN_ID = "000";
private static final String ERR_STORE_NOT_UP_TO_DATE = "index.recovery.store_not_up_to_date";
private static final String MSG_RECOVERY_STARTING = "index.recovery.starting";
private static final String MSG_RECOVERY_COMPLETE = "index.recovery.complete";
private static final String MSG_RECOVERY_PROGRESS = "index.recovery.progress";
private static final String MSG_RECOVERY_TERMINATED = "index.recovery.terminated";
private static Log logger = LogFactory.getLog(FullIndexRecoveryComponent.class);
/** ensures that this process is kicked off once per VM */
private static boolean started = false;
/** The current transaction ID being processed */
private static String currentTxnId = START_TXN_ID;
/** kept to notify the thread that it should quit */
private boolean killThread = false;
/** provides transactions to atomically index each missed transaction */
private TransactionService transactionService;
/** the component to index the node hierarchy */
private Indexer indexer;
/** the FTS indexer that we will prompt to pick up on any un-indexed text */
private FullTextSearchIndexer ftsIndexer;
/** the component providing searches of the indexed nodes */
private SearchService searcher;
/** the component giving direct access to <b>node</b> instances */
private NodeService nodeService;
/** set this to run the index recovery component */
private boolean executeFullRecovery;
/** set this on to keep checking for new transactions and never stop */
private boolean runContinuously;
/** set the time to wait between checking indexes */
private long waitTime;
/** controls how the L2 cache is used */
private CacheMode l2CacheMode;
/**
* @return Returns the ID of the current (or last) transaction processed
*/
public static String getCurrentTransactionId()
public static enum RecoveryMode
{
return currentTxnId;
/** Do nothing - not even a check */
NONE,
/** Perform a quick check on the state of the indexes only */
VALIDATE,
/** Performs a quick validation and then starts a full pass-through on failure */
AUTO,
/** Performs a full pass-through of all recorded transactions to ensure that the indexes are up to date */
FULL;
}
private RecoveryMode recoveryMode;
public FullIndexRecoveryComponent()
{
this.killThread = false;
this.executeFullRecovery = false;
this.runContinuously = false;
this.waitTime = 1000L;
this.l2CacheMode = CacheMode.REFRESH;
// ensure that we kill the thread when the VM is shutting down
Runnable shutdownRunnable = new Runnable()
{
public void run()
{
killThread = true;
};
};
Thread shutdownThread = new Thread(shutdownRunnable);
Runtime.getRuntime().addShutdownHook(shutdownThread);
recoveryMode = RecoveryMode.VALIDATE;
}
/**
* @return Returns true if the component has already been started
* Set the type of recovery to perform.
*
* @param recoveryMode one of the {@link RecoveryMode } values
*/
public static boolean isStarted()
public void setRecoveryMode(String recoveryMode)
{
return started;
}
/**
* @param transactionService provide transactions to index each missed transaction
*/
public void setTransactionService(TransactionService transactionService)
{
this.transactionService = transactionService;
}
/**
* @param indexer the indexer that will be index
*/
public void setIndexer(Indexer indexer)
{
this.indexer = indexer;
this.recoveryMode = RecoveryMode.valueOf(recoveryMode);
}
/**
* @param ftsIndexer the FTS background indexer
*/
public void setFtsIndexer(FullTextSearchIndexer ftsIndexer)
@Override
protected void reindexImpl()
{
this.ftsIndexer = ftsIndexer;
}
/**
* @param searcher component providing index searches
*/
public void setSearcher(SearchService searcher)
{
this.searcher = searcher;
}
/**
* @param nodeService provides information about nodes for indexing
*/
public void setNodeService(NodeService nodeService)
{
this.nodeService = nodeService;
}
/**
* Set this to <code>true</code> to initiate the full index recovery.
* <p>
* This used to default to <code>true</code> but is now false. Set this
* if the potentially long-running process of checking and fixing the
* indexes must be started.
*
* @param executeFullRecovery
*/
public void setExecuteFullRecovery(boolean executeFullRecovery)
{
this.executeFullRecovery = executeFullRecovery;
}
/**
* Set this to ensure that the process continuously checks for new transactions.
* If not, it will permanently terminate once it catches up with the current
* transactions.
*
* @param runContinuously true to never cease looking for new transactions
*/
public void setRunContinuously(boolean runContinuously)
{
this.runContinuously = runContinuously;
}
/**
* Set the time to wait between checking for new transaction changes in the database.
*
* @param waitTime the time to wait in milliseconds
*/
public void setWaitTime(long waitTime)
{
this.waitTime = waitTime;
}
/**
* Set the hibernate cache mode by name
*
* @see org.hibernate.CacheMode
*/
public void setL2CacheMode(String l2CacheModeStr)
{
if (l2CacheModeStr.equals("GET"))
if (logger.isDebugEnabled())
{
l2CacheMode = CacheMode.GET;
}
else if (l2CacheModeStr.equals("IGNORE"))
{
l2CacheMode = CacheMode.IGNORE;
}
else if (l2CacheModeStr.equals("NORMAL"))
{
l2CacheMode = CacheMode.NORMAL;
}
else if (l2CacheModeStr.equals("PUT"))
{
l2CacheMode = CacheMode.PUT;
}
else if (l2CacheModeStr.equals("REFRESH"))
{
l2CacheMode = CacheMode.REFRESH;
}
else
{
throw new IllegalArgumentException("Unrecognised Hibernate L2 cache mode: " + l2CacheModeStr);
}
}
/**
* Ensure that the index is up to date with the current state of the persistence layer.
* The full list of unique transaction change IDs is retrieved and used to detect
* which are not present in the index. All the node changes and deletions for the
* remaining transactions are then indexed.
*/
public synchronized void reindex()
{
if (FullIndexRecoveryComponent.started)
{
throw new AlfrescoRuntimeException
("Only one FullIndexRecoveryComponent may be used per VM and it may only be called once");
logger.debug("Performing index recovery for type: " + recoveryMode);
}
// ensure that we don't redo this work
FullIndexRecoveryComponent.started = true;
// work to mark the stores for full text reindexing
TransactionWork<Object> ftsReindexWork = new TransactionWork<Object>()
// do we just ignore
if (recoveryMode == RecoveryMode.NONE)
{
public Object doWork()
{
List<StoreRef> storeRefs = nodeService.getStores();
// reindex each store
for (StoreRef storeRef : storeRefs)
{
// check if the store exists
if (!nodeService.exists(storeRef))
{
// store does not exist
if (logger.isDebugEnabled())
{
logger.debug("Skipping reindex of non-existent store: " + storeRef);
}
continue;
}
// prompt FTS to reindex the store
ftsIndexer.requiresIndex(storeRef);
}
// done
if (logger.isDebugEnabled())
{
logger.debug("Prompted FTS index on stores: " + storeRefs);
}
return null;
}
};
TransactionUtil.executeInNonPropagatingUserTransaction(transactionService, ftsReindexWork);
// start full index recovery, if necessary
if (!this.executeFullRecovery)
return;
}
// check the level of cover required
boolean fullRecoveryRequired = false;
if (recoveryMode == RecoveryMode.FULL) // no validate required
{
if (logger.isDebugEnabled())
fullRecoveryRequired = true;
}
else // validate first
{
List<StoreRef> storeRefs = nodeService.getStores();
for (StoreRef storeRef : storeRefs)
{
logger.debug("Full index recovery is off - quitting");
// get the last txn ID in the database
Transaction txn = nodeDaoService.getLastTxn(storeRef);
boolean lastChangeTxnIdInIndex = isTxnIdPresentInIndex(storeRef, txn);
if (lastChangeTxnIdInIndex)
{
// this store is good
continue;
}
// this store isn't up to date
String msg = I18NUtil.getMessage(ERR_STORE_NOT_UP_TO_DATE, storeRef);
logger.warn(msg);
// the store is out of date - validation failed
if (recoveryMode == RecoveryMode.VALIDATE)
{
// next store
continue;
}
else if (recoveryMode == RecoveryMode.AUTO)
{
fullRecoveryRequired = true;
}
}
}
else
// put the server into read-only mode for the duration
boolean allowWrite = !transactionService.isReadOnly();
try
{
// set the state of the reindex
FullIndexRecoveryComponent.currentTxnId = START_TXN_ID;
// set the server into read-only mode
transactionService.setAllowWrite(false);
// start a stateful thread that will begin processing the reindexing the transactions
Runnable runnable = new ReindexRunner();
Thread reindexThread = new Thread(runnable);
// make it a daemon thread
reindexThread.setDaemon(true);
// it should not be a high priority
reindexThread.setPriority(Thread.MIN_PRIORITY);
// start it
reindexThread.start();
if (logger.isDebugEnabled())
// do we need to perform a full recovery
if (fullRecoveryRequired)
{
logger.debug("Full index recovery thread started: \n" +
" continuous: " + runContinuously);
performFullRecovery();
}
}
finally
{
// restore read-only state
transactionService.setAllowWrite(allowWrite);
}
}
/**
* Stateful thread runnable that executes reindex calls.
*
* @see FullIndexRecoveryComponent#reindexNodes()
*
* @author Derek Hulley
*/
private class ReindexRunner implements Runnable
private static final int MAX_TRANSACTIONS_PER_ITERATION = 1000;
private void performFullRecovery()
{
public void run()
int txnCount = nodeDaoService.getTransactionCount();
// starting
String msgStart = I18NUtil.getMessage(MSG_RECOVERY_STARTING, txnCount);
logger.info(msgStart);
// count the transactions
int processedCount = 0;
Transaction lastTxn = null;
while(true)
{
// keep this thread going permanently
while (!killThread)
List<Transaction> nextTxns = nodeDaoService.getNextTxns(
lastTxn,
MAX_TRANSACTIONS_PER_ITERATION);
// reindex each transaction
for (Transaction txn : nextTxns)
{
try
Long txnId = txn.getId();
// check if we have to terminate
if (isShuttingDown())
{
// reindex nodes
List<String> txnsIndexed = FullIndexRecoveryComponent.this.reindexNodes();
// reindex missing content
// @SuppressWarnings("unused")
// int missingContentCount = FullIndexRecoveryComponent.this.reindexMissingContent();
// check if the process should terminate
if (txnsIndexed.size() == 0 && !runContinuously)
{
// the thread has caught up with all the available work and should not
// run continuously
if (logger.isDebugEnabled())
{
logger.debug("Thread quitting - no more available indexing to do: \n" +
" last txn: " + FullIndexRecoveryComponent.getCurrentTransactionId());
}
break;
}
// brief pause
synchronized(FullIndexRecoveryComponent.this)
{
FullIndexRecoveryComponent.this.wait(waitTime);
}
String msgTerminated = I18NUtil.getMessage(MSG_RECOVERY_TERMINATED);
logger.warn(msgTerminated);
return;
}
catch (InterruptedException e)
reindexTransaction(txnId);
// dump a progress report every 10% of the way
double before = (double) processedCount / (double) txnCount * 10.0; // 0 - 10
processedCount++;
double after = (double) processedCount / (double) txnCount * 10.0; // 0 - 10
if (Math.floor(before) < Math.floor(after)) // crossed a 0 - 10 integer boundary
{
// ignore
}
catch (Throwable e)
{
if (killThread)
{
// the shutdown may have caused the exception - ignore it
}
else
{
// we are still a go; report it
logger.error("Reindex failure", e);
}
int complete = ((int)Math.floor(after))*10;
String msgProgress = I18NUtil.getMessage(MSG_RECOVERY_PROGRESS, complete);
logger.info(msgProgress);
}
}
// have we finished?
if (nextTxns.size() == 0)
{
// there are no more
break;
}
lastTxn = nextTxns.get(nextTxns.size() - 1);
}
}
/**
* @return Returns the transaction ID just reindexed, i.e. where some work was performed
*/
private List<String> reindexNodes()
{
// get a list of all transactions still requiring a check
List<String> txnsToCheck = getNextChangeTxnIds(FullIndexRecoveryComponent.currentTxnId);
// loop over each transaction
for (String changeTxnId : txnsToCheck)
{
reindexNodes(changeTxnId);
}
// done
return txnsToCheck;
String msgDone = I18NUtil.getMessage(MSG_RECOVERY_COMPLETE);
logger.info(msgDone);
}
/**
* Reindexes changes specific to the change transaction ID.
* <p>
* <b>All exceptions are absorbed.</b>
* Perform a full reindexing of the given transaction in the context of a completely
* new transaction.
*
* @param txnId the transaction identifier
*/
private void reindexNodes(final String changeTxnId)
public void reindexTransaction(final long txnId)
{
/*
* This must execute each within its own transaction.
* The cache size is therefore not an issue.
*/
if (logger.isDebugEnabled())
{
logger.debug("Reindexing transaction: " + txnId);
}
TransactionWork<Object> reindexWork = new TransactionWork<Object>()
{
public Object doWork() throws Exception
{
// perform the work in a Hibernate callback
HibernateCallback callback = new ReindexCallback(changeTxnId);
getHibernateTemplate().execute(callback);
// get the node references pertinent to the transaction
List<NodeRef> nodeRefs = nodeDaoService.getTxnChanges(txnId);
// reindex each node
for (NodeRef nodeRef : nodeRefs)
{
Status nodeStatus = nodeService.getNodeStatus(nodeRef);
if (nodeStatus == null)
{
// it's not there any more
continue;
}
if (nodeStatus.isDeleted()) // node deleted
{
// only the child node ref is relevant
ChildAssociationRef assocRef = new ChildAssociationRef(
ContentModel.ASSOC_CHILDREN,
null,
null,
nodeRef);
indexer.deleteNode(assocRef);
}
else // node created
{
// get the primary assoc for the node
ChildAssociationRef primaryAssocRef = nodeService.getPrimaryParent(nodeRef);
// reindex
indexer.createNode(primaryAssocRef);
}
}
// done
return null;
}
};
try
{
TransactionUtil.executeInNonPropagatingUserTransaction(transactionService, reindexWork);
}
catch (Throwable e)
{
logger.error("Transaction reindex failed: \n" +
" txn: " + changeTxnId,
e);
}
finally
{
// Up the current transaction now, in case the process fails at this point.
// This will prevent the transaction from being processed again.
// This applies to failures as well, which should be dealt with externally
// and having the entire process start again, e.g. such as a system reboot
currentTxnId = changeTxnId;
}
TransactionUtil.executeInNonPropagatingUserTransaction(transactionService, reindexWork, true);
// done
}
/**
* Stateful inner class that implements a single reindex call for a given store
* and transaction.
* <p>
* It must be called within its own transaction.
*
* @author Derek Hulley
*/
private class ReindexCallback implements HibernateCallback
private boolean isTxnIdPresentInIndex(StoreRef storeRef, Transaction txn)
{
private final String changeTxnId;
public ReindexCallback(String changeTxnId)
if (logger.isDebugEnabled())
{
this.changeTxnId = changeTxnId;
logger.debug("Checking for transaction in index: \n" +
" store: " + storeRef + "\n" +
" txn: " + txn);
}
/**
* Changes the L2 cache usage before reindexing for each store
*
* @see #reindexNodes(StoreRef, String)
*/
public Object doInHibernate(Session session)
String changeTxnId = txn.getChangeTxnId();
// count the changes in the transaction
int updateCount = nodeDaoService.getTxnUpdateCountForStore(storeRef, txn.getId());
int deleteCount = nodeDaoService.getTxnDeleteCountForStore(storeRef, txn.getId());
if (logger.isDebugEnabled())
{
// set the way the L2 cache is used
getSession().setCacheMode(l2CacheMode);
// reindex each store
// for (StoreRef storeRef : storeRefs)
// {
// if (!nodeService.exists(storeRef))
// {
// // the store is not present
// continue;
// }
// // reindex for store
// reindexNodes(storeRef, changeTxnId);
// }
// done
return null;
logger.debug("Transaction has " + updateCount + " updates and " + deleteCount + " deletes: " + txn);
}
private void reindexNodes(StoreRef storeRef, String changeTxnId)
// do the most update check, which is most common
if (deleteCount == 0 && updateCount == 0)
{
if (logger.isDebugEnabled())
{
logger.debug("No changes in transaction: " + txn);
}
// there's nothing to check for
return true;
}
else if (updateCount > 0)
{
// check if we need to perform this operation
SearchParameters sp = new SearchParameters();
sp.addStore(storeRef);
// search for it in the index
String query = "TX:\"" + changeTxnId + "\"";
sp.setLanguage(SearchService.LANGUAGE_LUCENE);
sp.setQuery(query);
ResultSet results = null;
try
{
SearchParameters sp = new SearchParameters();
sp.addStore(storeRef);
// search for it in the index, sorting with youngest first, fetching only 1
sp.setLanguage(SearchService.LANGUAGE_LUCENE);
sp.setQuery("TX:" + LuceneQueryParser.escape(changeTxnId));
sp.setLimit(1);
results = searcher.query(sp);
// did the index have any of these changes?
if (results.length() > 0)
{
// the transaction has an entry in the index - assume that it was
// atomically correct
if (logger.isDebugEnabled())
{
logger.debug("Transaction present in index - no indexing required: \n" +
" store: " + storeRef + "\n" +
" txn: " + changeTxnId);
logger.debug("Index has results for txn (OK): " + txn);
}
return;
return true; // there were updates/creates and results for the txn were found
}
else
{
if (logger.isDebugEnabled())
{
logger.debug("Index has no results for txn (Index out of date): " + txn);
}
return false;
}
}
finally
{
if (results != null)
{
results.close();
}
}
// the index has no record of this
// were there any changes, or is it all just deletions?
int changedCount = getChangedNodeStatusesCount(storeRef, changeTxnId);
if (changedCount == 0)
{
// no nodes were changed in the transaction, i.e. they are only deletions
// the index is quite right not to have any entries for the transaction
if (logger.isDebugEnabled())
{
logger.debug("Transaction only has deletions - no indexing required: \n" +
" store: " + storeRef + "\n" +
" txn: " + changeTxnId);
}
return;
}
// process the deletions relevant to the txn and the store
List<NodeStatus> deletedNodeStatuses = getDeletedNodeStatuses(storeRef, changeTxnId);
for (NodeStatus status : deletedNodeStatuses)
{
NodeRef nodeRef = new NodeRef(storeRef, status.getKey().getGuid());
// only the child node ref is relevant
ChildAssociationRef assocRef = new ChildAssociationRef(
ContentModel.ASSOC_CHILDREN,
null,
null,
nodeRef);
indexer.deleteNode(assocRef);
}
// process additions
List<NodeStatus> changedNodeStatuses = getChangedNodeStatuses(storeRef, changeTxnId);
for (NodeStatus status : changedNodeStatuses)
{
NodeRef nodeRef = new NodeRef(storeRef, status.getKey().getGuid());
// get the primary assoc for the node
ChildAssociationRef primaryAssocRef = nodeService.getPrimaryParent(nodeRef);
// reindex
indexer.createNode(primaryAssocRef);
}
// done
if (logger.isDebugEnabled())
{
logger.debug("Transaction reindexed: \n" +
" store: " + storeRef + "\n" +
" txn: " + changeTxnId + "\n" +
" deletions: " + deletedNodeStatuses.size() + "\n" +
" modifications: " + changedNodeStatuses.size());
if (results != null) { results.close(); }
}
}
};
/**
* Retrieve next 50 transaction IDs that are greater than the given transaction ID.
*
* @param currentTxnId the transaction ID that must be less than all returned results
* @return Returns an ordered list of the next 50 transaction IDs
*/
@SuppressWarnings("unchecked")
public List<String> getNextChangeTxnIds(final String currentTxnId)
{
HibernateCallback callback = new HibernateCallback()
// there have been deletes, so we have to ensure that none of the nodes deleted are present in the index
// get all node refs for the transaction
Long txnId = txn.getId();
List<NodeRef> nodeRefs = nodeDaoService.getTxnChangesForStore(storeRef, txnId);
for (NodeRef nodeRef : nodeRefs)
{
public Object doInHibernate(Session session)
if (logger.isDebugEnabled())
{
Query query = session.getNamedQuery(QUERY_GET_NEXT_CHANGE_TXN_IDS);
query.setString("currentTxnId", currentTxnId)
.setMaxResults(50)
.setReadOnly(true);
return query.list();
logger.debug("Searching for node in index: \n" +
" node: " + nodeRef + "\n" +
" txn: " + txn);
}
};
List<String> queryResults = (List<String>) getHibernateTemplate().execute(callback);
// done
return queryResults;
}
@SuppressWarnings("unchecked")
public int getChangedNodeStatusesCount(final StoreRef storeRef, final String changeTxnId)
{
HibernateCallback callback = new HibernateCallback()
// we know that these are all deletions
ResultSet results = null;
try
{
SearchParameters sp = new SearchParameters();
sp.addStore(storeRef);
// search for it in the index, sorting with youngest first, fetching only 1
sp.setLanguage(SearchService.LANGUAGE_LUCENE);
sp.setQuery("ID:" + LuceneQueryParser.escape(nodeRef.toString()));
sp.setLimit(1);
results = searcher.query(sp);
if (results.length() == 0)
{
// no results, as expected
if (logger.isDebugEnabled())
{
logger.debug(" --> Node not found (OK)");
}
continue;
}
else
{
if (logger.isDebugEnabled())
{
logger.debug(" --> Node found (Index out of date)");
}
return false;
}
}
finally
{
if (results != null) { results.close(); }
}
}
// all tests passed
if (logger.isDebugEnabled())
{
public Object doInHibernate(Session session)
{
Query query = session.getNamedQuery(QUERY_GET_CHANGED_NODE_STATUSES_COUNT);
query.setString("storeProtocol", storeRef.getProtocol())
.setString("storeIdentifier", storeRef.getIdentifier())
.setString("changeTxnId", changeTxnId)
.setReadOnly(true);
return query.uniqueResult();
}
};
Integer changeCount = (Integer) getHibernateTemplate().execute(callback);
// done
return changeCount.intValue();
}
@SuppressWarnings("unchecked")
public List<NodeStatus> getChangedNodeStatuses(final StoreRef storeRef, final String changeTxnId)
{
HibernateCallback callback = new HibernateCallback()
{
public Object doInHibernate(Session session)
{
Query query = session.getNamedQuery(QUERY_GET_CHANGED_NODE_STATUSES);
query.setString("storeProtocol", storeRef.getProtocol())
.setString("storeIdentifier", storeRef.getIdentifier())
.setString("changeTxnId", changeTxnId)
.setReadOnly(true);
return query.list();
}
};
List<NodeStatus> queryResults = (List) getHibernateTemplate().execute(callback);
// done
return queryResults;
}
@SuppressWarnings("unchecked")
public List<NodeStatus> getDeletedNodeStatuses(final StoreRef storeRef, final String changeTxnId)
{
HibernateCallback callback = new HibernateCallback()
{
public Object doInHibernate(Session session)
{
Query query = session.getNamedQuery(QUERY_GET_DELETED_NODE_STATUSES);
query.setString("storeProtocol", storeRef.getProtocol())
.setString("storeIdentifier", storeRef.getIdentifier())
.setString("changeTxnId", changeTxnId)
.setReadOnly(true);
return query.list();
}
};
List<NodeStatus> queryResults = (List) getHibernateTemplate().execute(callback);
// done
return queryResults;
logger.debug("Index is in synch with transaction: " + txn);
}
return true;
}
}

View File

@@ -16,24 +16,8 @@
*/
package org.alfresco.repo.node.index;
import java.util.ArrayList;
import java.util.List;
import junit.framework.TestCase;
import org.alfresco.model.ContentModel;
import org.alfresco.repo.search.Indexer;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.TransactionUtil;
import org.alfresco.repo.transaction.TransactionUtil.TransactionWork;
import org.alfresco.service.cmr.repository.ChildAssociationRef;
import org.alfresco.service.cmr.repository.InvalidStoreRefException;
import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.cmr.repository.NodeService;
import org.alfresco.service.cmr.repository.StoreRef;
import org.alfresco.service.namespace.NamespaceService;
import org.alfresco.service.namespace.QName;
import org.alfresco.service.transaction.TransactionService;
import org.alfresco.util.ApplicationContextHelper;
import org.springframework.context.ApplicationContext;
@@ -46,114 +30,35 @@ public class FullIndexRecoveryComponentTest extends TestCase
{
private static ApplicationContext ctx = ApplicationContextHelper.getApplicationContext();
private TransactionService transactionService;
private FullIndexRecoveryComponent indexRecoverer;
private NodeService nodeService;
private TransactionService txnService;
private Indexer indexer;
private List<StoreRef> storeRefs;
public void setUp() throws Exception
{
transactionService = (TransactionService) ctx.getBean("transactionComponent");
indexRecoverer = (FullIndexRecoveryComponent) ctx.getBean("indexRecoveryComponent");
txnService = (TransactionService) ctx.getBean("transactionComponent");
nodeService = (NodeService) ctx.getBean("nodeService");
indexer = (Indexer) ctx.getBean("indexerComponent");
// create 2 stores
TransactionWork<List<StoreRef>> createStoresWork = new TransactionWork<List<StoreRef>>()
{
public List<StoreRef> doWork() throws Exception
{
List<StoreRef> storeRefs = new ArrayList<StoreRef>(2);
storeRefs.add(nodeService.createStore(StoreRef.PROTOCOL_WORKSPACE, getName() + System.nanoTime()));
storeRefs.add(nodeService.createStore(StoreRef.PROTOCOL_WORKSPACE, getName() + System.nanoTime()));
return storeRefs;
}
};
storeRefs = TransactionUtil.executeInUserTransaction(transactionService, createStoresWork);
}
public void testNothing() throws Exception
public void testSetup() throws Exception
{
}
public void xtestReindexing() throws Exception
public synchronized void testReindexing() throws Exception
{
// don't do anything if the component has already started
if (FullIndexRecoveryComponent.isStarted())
{
return;
}
// deletes a content node from the index
final List<String> storeRefStrings = new ArrayList<String>(2);
TransactionWork<String> dropNodeIndexWork = new TransactionWork<String>()
{
public String doWork()
{
// create a node in each store and drop it from the index
for (StoreRef storeRef : storeRefs)
{
try
{
NodeRef rootNodeRef = nodeService.getRootNode(storeRef);
ChildAssociationRef assocRef = nodeService.createNode(
rootNodeRef,
ContentModel.ASSOC_CONTAINS,
QName.createQName(NamespaceService.ALFRESCO_URI, "unindexedChild" + System.currentTimeMillis()),
ContentModel.TYPE_BASE);
// this will have indexed it, so remove it from the index
indexer.deleteNode(assocRef);
// make the string version of the storeRef
storeRefStrings.add(storeRef.toString());
}
catch (InvalidStoreRefException e)
{
// just ignore stores that are invalid
}
}
return AlfrescoTransactionSupport.getTransactionId();
}
};
// create un-indexed nodes
String txnId = TransactionUtil.executeInNonPropagatingUserTransaction(txnService, dropNodeIndexWork);
indexRecoverer.setExecuteFullRecovery(true);
// indexRecoverer.setStores(storeRefStrings);
indexRecoverer.setRecoveryMode(FullIndexRecoveryComponent.RecoveryMode.FULL.name());
// reindex
indexRecoverer.reindex();
// check that reindexing fails
try
Thread reindexThread = new Thread()
{
indexRecoverer.reindex();
fail("Reindexer failed to prevent reindex from being called twice");
}
catch (RuntimeException e)
{
// expected
}
public void run()
{
indexRecoverer.reindex();
}
};
reindexThread.setDaemon(true);
reindexThread.start();
// reindexThread.run();
// loop for some time, giving it a chance to do its thing
String lastProcessedTxnId = null;
for (int i = 0; i < 60; i++)
{
lastProcessedTxnId = FullIndexRecoveryComponent.getCurrentTransactionId();
if (lastProcessedTxnId.equals(txnId))
{
break;
}
// wait for a second
synchronized(this)
{
this.wait(1000L);
}
}
// check that the index was recovered
assertEquals("Index transaction not up to date", txnId, lastProcessedTxnId);
// wait a bit and then terminate
wait(10000);
indexRecoverer.setShutdown(true);
wait(10000);
}
}

View File

@@ -1,741 +1,141 @@
///*
// * Copyright (C) 2005-2006 Alfresco, Inc.
// *
// * Licensed under the Mozilla Public License version 1.1
// * with a permitted attribution clause. You may obtain a
// * copy of the License at
// *
// * http://www.alfresco.org/legal/license.txt
// *
// * Unless required by applicable law or agreed to in writing,
// * software distributed under the License is distributed on an
// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// * either express or implied. See the License for the specific
// * language governing permissions and limitations under the
// * License.
// */
//package org.alfresco.repo.node.index;
//
//import java.util.ArrayList;
//import java.util.List;
//
//import org.alfresco.error.AlfrescoRuntimeException;
//import org.alfresco.model.ContentModel;
//import org.alfresco.repo.domain.NodeStatus;
//import org.alfresco.repo.search.Indexer;
//import org.alfresco.repo.search.impl.lucene.LuceneIndexerImpl;
//import org.alfresco.repo.search.impl.lucene.fts.FullTextSearchIndexer;
//import org.alfresco.repo.transaction.TransactionUtil;
//import org.alfresco.repo.transaction.TransactionUtil.TransactionWork;
//import org.alfresco.service.cmr.repository.ChildAssociationRef;
//import org.alfresco.service.cmr.repository.NodeRef;
//import org.alfresco.service.cmr.repository.NodeService;
//import org.alfresco.service.cmr.repository.StoreRef;
//import org.alfresco.service.cmr.search.ResultSet;
//import org.alfresco.service.cmr.search.SearchParameters;
//import org.alfresco.service.cmr.search.SearchService;
//import org.alfresco.service.transaction.TransactionService;
//import org.apache.commons.logging.Log;
//import org.apache.commons.logging.LogFactory;
//import org.hibernate.CacheMode;
//import org.hibernate.Query;
//import org.hibernate.Session;
//import org.springframework.orm.hibernate3.HibernateCallback;
//import org.springframework.orm.hibernate3.support.HibernateDaoSupport;
//
///**
// * Ensures that the FTS indexing picks up on any outstanding documents that
// * require indexing.
// * <p>
// * This component must be used as a singleton (one per VM) and may only be
// * called to reindex once. It will start a thread that processes all available
// * transactions and keeps checking to ensure that the index is up to date with
// * the latest database changes.
// * <p>
// * <b>The following points are important:</b>
// * <ul>
// * <li>
// * By default, the Hibernate L2 cache is used during processing.
// * This can be disabled by either disabling the L2 cache globally
// * for the server (not recommended) or by setting the
// * {@link #setL2CacheMode(String) l2CacheMode} property. If the
// * database is static then the L2 cache usage can be set to use
// * the <code>NORMAL</code> mode. <code>REFRESH</code> should be
// * used where the server will still be accessed from some clients
// * despite the database changing. <code>NORMAL</code> can be used
// * in the case of the caches being clustered, i.e. the caches will
// * not be out of date w.r.t. the database.
// * </li>
// * <li>
// * This process should only be used continuously where the index
// * transactions are following the database transactions. Use the
// * {@link #setRunContinuously(boolean) runContinuously} property
// * to change this behaviour.
// * </li>
// * </ul>
// *
// * @author Derek Hulley
// */
//public class MissingContentReindexComponent extends HibernateDaoSupport implements IndexRecovery
//{
// public static final String QUERY_GET_NEXT_CHANGE_TXN_IDS = "node.GetNextChangeTxnIds";
// public static final String QUERY_GET_CHANGED_NODE_STATUSES = "node.GetChangedNodeStatuses";
// public static final String QUERY_GET_DELETED_NODE_STATUSES = "node.GetDeletedNodeStatuses";
// public static final String QUERY_GET_CHANGED_NODE_STATUSES_COUNT = "node.GetChangedNodeStatusesCount";
//
// private static final String START_TXN_ID = "000";
//
// private static Log logger = LogFactory.getLog(FullIndexRecoveryComponent.class);
//
// /** ensures that this process is kicked off once per VM */
// private static boolean started = false;
// /** The current transaction ID being processed */
// private static String currentTxnId = START_TXN_ID;
// /** kept to notify the thread that it should quite */
// private boolean killThread = false;
//
// /** provides transactions to atomically index each missed transaction */
// private TransactionService transactionService;
// /** the component to index the node hierarchy */
// private Indexer indexer;
// /** the FTS indexer that we will prompt to pick up on any un-indexed text */
// private FullTextSearchIndexer ftsIndexer;
// /** the component providing searches of the indexed nodes */
// private SearchService searcher;
// /** the component giving direct access to <b>node</b> instances */
// private NodeService nodeService;
// /** set this to run the index recovery component */
// private boolean executeFullRecovery;
// /** set this on to keep checking for new transactions and never stop */
// private boolean runContinuously;
// /** set the time to wait between checking indexes */
// private long waitTime;
// /** controls how the L2 cache is used */
// private CacheMode l2CacheMode;
//
// /**
// * @return Returns the ID of the current (or last) transaction processed
// */
// public static String getCurrentTransactionId()
// {
// return currentTxnId;
// }
//
// public FullIndexRecoveryComponent()
// {
// this.killThread = false;
// this.executeFullRecovery = false;
// this.runContinuously = false;
// this.waitTime = 1000L;
// this.l2CacheMode = CacheMode.REFRESH;
//
// // ensure that we kill the thread when the VM is shutting down
// Runnable shutdownRunnable = new Runnable()
// {
// public void run()
// {
// killThread = true;
// };
// };
// Thread shutdownThread = new Thread(shutdownRunnable);
// Runtime.getRuntime().addShutdownHook(shutdownThread);
// }
//
// /**
// * @return Returns true if the component has already been started
// */
// public static boolean isStarted()
// {
// return started;
// }
//
// /**
// * @param transactionService provide transactions to index each missed transaction
// */
// public void setTransactionService(TransactionService transactionService)
// {
// this.transactionService = transactionService;
// }
//
// /**
// * @param indexer the indexer that will be index
// */
// public void setIndexer(Indexer indexer)
// {
// this.indexer = indexer;
// }
//
// /**
// * @param ftsIndexer the FTS background indexer
// */
// public void setFtsIndexer(FullTextSearchIndexer ftsIndexer)
// {
// this.ftsIndexer = ftsIndexer;
// }
//
// /**
// * @param searcher component providing index searches
// */
// public void setSearcher(SearchService searcher)
// {
// this.searcher = searcher;
// }
//
// /**
// * @param nodeService provides information about nodes for indexing
// */
// public void setNodeService(NodeService nodeService)
// {
// this.nodeService = nodeService;
// }
//
// /**
// * Set this to <code>true</code> to initiate the full index recovery.
// * <p>
// * This used to default to <code>true</code> but is now false. Set this
// * if the potentially long-running process of checking and fixing the
// * indexes must be started.
// *
// * @param executeFullRecovery
// */
// public void setExecuteFullRecovery(boolean executeFullRecovery)
// {
// this.executeFullRecovery = executeFullRecovery;
// }
//
// /**
// * Set this to ensure that the process continuously checks for new transactions.
// * If not, it will permanently terminate once it catches up with the current
// * transactions.
// *
// * @param runContinuously true to never cease looking for new transactions
// */
// public void setRunContinuously(boolean runContinuously)
// {
// this.runContinuously = runContinuously;
// }
//
// /**
// * Set the time to wait between checking for new transaction changes in the database.
// *
// * @param waitTime the time to wait in milliseconds
// */
// public void setWaitTime(long waitTime)
// {
// this.waitTime = waitTime;
// }
//
// /**
// * Set the hibernate cache mode by name
// *
// * @see org.hibernate.CacheMode
// */
// public void setL2CacheMode(String l2CacheModeStr)
// {
// if (l2CacheModeStr.equals("GET"))
// {
// l2CacheMode = CacheMode.GET;
// }
// else if (l2CacheModeStr.equals("IGNORE"))
// {
// l2CacheMode = CacheMode.IGNORE;
// }
// else if (l2CacheModeStr.equals("NORMAL"))
// {
// l2CacheMode = CacheMode.NORMAL;
// }
// else if (l2CacheModeStr.equals("PUT"))
// {
// l2CacheMode = CacheMode.PUT;
// }
// else if (l2CacheModeStr.equals("REFRESH"))
// {
// l2CacheMode = CacheMode.REFRESH;
// }
// else
// {
// throw new IllegalArgumentException("Unrecognised Hibernate L2 cache mode: " + l2CacheModeStr);
// }
// }
//
// /**
// * Ensure that the index is up to date with the current state of the persistence layer.
// * The full list of unique transaction change IDs is retrieved and used to detect
// * which are not present in the index. All the node changes and deletions for the
// * remaining transactions are then indexed.
// */
// public synchronized void reindex()
// {
// if (FullIndexRecoveryComponent.started)
// {
// throw new AlfrescoRuntimeException
// ("Only one FullIndexRecoveryComponent may be used per VM and it may only be called once");
// }
//
// // ensure that we don't redo this work
// FullIndexRecoveryComponent.started = true;
//
// // work to mark the stores for full text reindexing
// TransactionWork<Object> ftsReindexWork = new TransactionWork<Object>()
// {
// public Object doWork()
// {
// List<StoreRef> storeRefs = nodeService.getStores();
// // reindex each store
// for (StoreRef storeRef : storeRefs)
// {
// // check if the store exists
// if (!nodeService.exists(storeRef))
// {
// // store does not exist
// if (logger.isDebugEnabled())
// {
// logger.debug("Skipping reindex of non-existent store: " + storeRef);
// }
// continue;
// }
//
// // prompt FTS to reindex the store
// ftsIndexer.requiresIndex(storeRef);
// }
// // done
// if (logger.isDebugEnabled())
// {
// logger.debug("Prompted FTS index on stores: " + storeRefs);
// }
// return null;
// }
// };
// TransactionUtil.executeInNonPropagatingUserTransaction(transactionService, ftsReindexWork);
//
// // start full index recovery, if necessary
// if (!this.executeFullRecovery)
// {
// if (logger.isDebugEnabled())
// {
// logger.debug("Full index recovery is off - quitting");
// }
// }
// else
// {
// // set the state of the reindex
// FullIndexRecoveryComponent.currentTxnId = START_TXN_ID;
//
// // start a stateful thread that will begin processing the reindexing the transactions
// Runnable runnable = new ReindexRunner();
// Thread reindexThread = new Thread(runnable);
// // make it a daemon thread
// reindexThread.setDaemon(true);
// // it should not be a high priority
// reindexThread.setPriority(Thread.MIN_PRIORITY);
// // start it
// reindexThread.start();
//
// if (logger.isDebugEnabled())
// {
// logger.debug("Full index recovery thread started: \n" +
// " continuous: " + runContinuously);
// }
// }
// }
//
// /**
// * Stateful thread runnable that executes reindex calls.
// *
// * @see FullIndexRecoveryComponent#reindexNodes()
// *
// * @author Derek Hulley
// */
// private class ReindexRunner implements Runnable
// {
// public void run()
// {
// // keep this thread going permanently
// while (!killThread)
// {
// try
// {
// // reindex nodes
// List<String> txnsIndexed = FullIndexRecoveryComponent.this.reindexNodes();
// // reindex missing content
// @SuppressWarnings("unused")
// int missingContentCount = FullIndexRecoveryComponent.this.reindexMissingContent();
// // check if the process should terminate
// if (txnsIndexed.size() == 0 && !runContinuously)
// {
// // the thread has caught up with all the available work and should not
// // run continuously
// if (logger.isDebugEnabled())
// {
// logger.debug("Thread quitting - no more available indexing to do: \n" +
// " last txn: " + FullIndexRecoveryComponent.getCurrentTransactionId());
// }
// break;
// }
// // brief pause
// synchronized(FullIndexRecoveryComponent.this)
// {
// FullIndexRecoveryComponent.this.wait(waitTime);
// }
// }
// catch (InterruptedException e)
// {
// // ignore
// }
// catch (Throwable e)
// {
// if (killThread)
// {
// // the shutdown may have caused the exception - ignore it
// }
// else
// {
// // we are still a go; report it
// logger.error("Reindex failure", e);
// }
// }
// }
// }
// }
//
// /**
// * @return Returns the number of documents reindexed
// */
// private int reindexMissingContent()
// {
// int count = 0;
// for (StoreRef storeRef : storeRefs)
// {
// count += reindexMissingContent(storeRef);
// }
// return count;
// }
//
// /**
// * @param storeRef the store to check for missing content
// * @return Returns the number of documents reindexed
// */
// private int reindexMissingContent(StoreRef storeRef)
// {
// SearchParameters sp = new SearchParameters();
// sp.addStore(storeRef);
//
// // search for it in the index
// String query = "TEXT:" + LuceneIndexerImpl.NOT_INDEXED_CONTENT_MISSING;
// sp.setLanguage(SearchService.LANGUAGE_LUCENE);
// sp.setQuery(query);
// ResultSet results = null;
// try
// {
// results = searcher.query(sp);
//
// int count = 0;
// // loop over the results and get the details of the nodes that have missing content
// List<ChildAssociationRef> assocRefs = results.getChildAssocRefs();
// for (ChildAssociationRef assocRef : assocRefs)
// {
// final NodeRef childNodeRef = assocRef.getChildRef();
// // prompt for a reindex - it might fail again, but we just keep plugging away
// TransactionWork<Object> reindexWork = new TransactionWork<Object>()
// {
// public Object doWork()
// {
// indexer.updateNode(childNodeRef);
// return null;
// }
// };
// TransactionUtil.executeInNonPropagatingUserTransaction(transactionService, reindexWork);
// count++;
// }
// // done
// if (logger.isDebugEnabled())
// {
// logger.debug("Reindexed missing content: \n" +
// " store: " + storeRef + "\n" +
// " node count: " + count);
// }
// return count;
// }
// finally
// {
// if (results != null)
// {
// results.close();
// }
// }
// }
//
// /**
// * @return Returns the transaction ID just reindexed, i.e. where some work was performed
// */
// private List<String> reindexNodes()
// {
// // get a list of all transactions still requiring a check
// List<String> txnsToCheck = getNextChangeTxnIds(FullIndexRecoveryComponent.currentTxnId);
//
// // loop over each transaction
// for (String changeTxnId : txnsToCheck)
// {
// reindexNodes(changeTxnId);
// }
//
// // done
// return txnsToCheck;
// }
//
// /**
// * Reindexes changes specific to the change transaction ID.
// * <p>
// * <b>All exceptions are absorbed.</b>
// */
// private void reindexNodes(final String changeTxnId)
// {
// /*
// * This must execute each within its own transaction.
// * The cache size is therefore not an issue.
// */
// TransactionWork<Object> reindexWork = new TransactionWork<Object>()
// {
// public Object doWork() throws Exception
// {
// // perform the work in a Hibernate callback
// HibernateCallback callback = new ReindexCallback(changeTxnId);
// getHibernateTemplate().execute(callback);
// // done
// return null;
// }
// };
// try
// {
// TransactionUtil.executeInNonPropagatingUserTransaction(transactionService, reindexWork);
// }
// catch (Throwable e)
// {
// logger.error("Transaction reindex failed: \n" +
// " txn: " + changeTxnId,
// e);
// }
// finally
// {
// // Up the current transaction now, in case the process fails at this point.
// // This will prevent the transaction from being processed again.
// // This applies to failures as well, which should be dealt with externally
// // and having the entire process start again, e.g. such as a system reboot
// currentTxnId = changeTxnId;
// }
// }
//
// /**
// * Stateful inner class that implements a single reindex call for a given store
// * and transaction.
// * <p>
// * It must be called within its own transaction.
// *
// * @author Derek Hulley
// */
// private class ReindexCallback implements HibernateCallback
// {
// private final String changeTxnId;
//
// public ReindexCallback(String changeTxnId)
// {
// this.changeTxnId = changeTxnId;
// }
//
// /**
// * Changes the L2 cache usage before reindexing for each store
// *
// * @see #reindexNodes(StoreRef, String)
// */
// public Object doInHibernate(Session session)
// {
// // set the way the L2 cache is used
// getSession().setCacheMode(l2CacheMode);
//
// // reindex each store
// for (StoreRef storeRef : storeRefs)
// {
// if (!nodeService.exists(storeRef))
// {
// // the store is not present
// continue;
// }
// // reindex for store
// reindexNodes(storeRef, changeTxnId);
// }
// // done
// return null;
// }
//
// private void reindexNodes(StoreRef storeRef, String changeTxnId)
// {
// // check if we need to perform this operation
// SearchParameters sp = new SearchParameters();
// sp.addStore(storeRef);
//
// // search for it in the index
// String query = "TX:\"" + changeTxnId + "\"";
// sp.setLanguage(SearchService.LANGUAGE_LUCENE);
// sp.setQuery(query);
// ResultSet results = null;
// try
// {
// results = searcher.query(sp);
// // did the index have any of these changes?
// if (results.length() > 0)
// {
// // the transaction has an entry in the index - assume that it was
// // atomically correct
// if (logger.isDebugEnabled())
// {
// logger.debug("Transaction present in index - no indexing required: \n" +
// " store: " + storeRef + "\n" +
// " txn: " + changeTxnId);
// }
// return;
// }
// }
// finally
// {
// if (results != null)
// {
// results.close();
// }
// }
// // the index has no record of this
// // were there any changes, or is it all just deletions?
// int changedCount = getChangedNodeStatusesCount(storeRef, changeTxnId);
// if (changedCount == 0)
// {
// // no nodes were changed in the transaction, i.e. they are only deletions
// // the index is quite right not to have any entries for the transaction
// if (logger.isDebugEnabled())
// {
// logger.debug("Transaction only has deletions - no indexing required: \n" +
// " store: " + storeRef + "\n" +
// " txn: " + changeTxnId);
// }
// return;
// }
//
// // process the deletions relevant to the txn and the store
// List<NodeStatus> deletedNodeStatuses = getDeletedNodeStatuses(storeRef, changeTxnId);
// for (NodeStatus status : deletedNodeStatuses)
// {
// NodeRef nodeRef = new NodeRef(storeRef, status.getKey().getGuid());
// // only the child node ref is relevant
// ChildAssociationRef assocRef = new ChildAssociationRef(
// ContentModel.ASSOC_CHILDREN,
// null,
// null,
// nodeRef);
// indexer.deleteNode(assocRef);
// }
//
// // process additions
// List<NodeStatus> changedNodeStatuses = getChangedNodeStatuses(storeRef, changeTxnId);
// for (NodeStatus status : changedNodeStatuses)
// {
// NodeRef nodeRef = new NodeRef(storeRef, status.getKey().getGuid());
// // get the primary assoc for the node
// ChildAssociationRef primaryAssocRef = nodeService.getPrimaryParent(nodeRef);
// // reindex
// indexer.createNode(primaryAssocRef);
// }
//
// // done
// if (logger.isDebugEnabled())
// {
// logger.debug("Transaction reindexed: \n" +
// " store: " + storeRef + "\n" +
// " txn: " + changeTxnId + "\n" +
// " deletions: " + deletedNodeStatuses.size() + "\n" +
// " modifications: " + changedNodeStatuses.size());
// }
// }
// };
//
// /**
// * Retrieve all transaction IDs that are greater than the given transaction ID.
// *
// * @param currentTxnId the transaction ID that must be less than all returned results
// * @return Returns an ordered list of transaction IDs
// */
// @SuppressWarnings("unchecked")
// public List<String> getNextChangeTxnIds(final String currentTxnId)
// {
// HibernateCallback callback = new HibernateCallback()
// {
// public Object doInHibernate(Session session)
// {
// Query query = session.getNamedQuery(QUERY_GET_NEXT_CHANGE_TXN_IDS);
// query.setString("currentTxnId", currentTxnId)
// .setReadOnly(true);
// return query.list();
// }
// };
// List<String> queryResults = (List<String>) getHibernateTemplate().execute(callback);
// // done
// return queryResults;
// }
//
// @SuppressWarnings("unchecked")
// public int getChangedNodeStatusesCount(final StoreRef storeRef, final String changeTxnId)
// {
// HibernateCallback callback = new HibernateCallback()
// {
// public Object doInHibernate(Session session)
// {
// Query query = session.getNamedQuery(QUERY_GET_CHANGED_NODE_STATUSES_COUNT);
// query.setString("storeProtocol", storeRef.getProtocol())
// .setString("storeIdentifier", storeRef.getIdentifier())
// .setString("changeTxnId", changeTxnId)
// .setReadOnly(true);
// return query.uniqueResult();
// }
// };
// Integer changeCount = (Integer) getHibernateTemplate().execute(callback);
// // done
// return changeCount.intValue();
// }
//
// @SuppressWarnings("unchecked")
// public List<NodeStatus> getChangedNodeStatuses(final StoreRef storeRef, final String changeTxnId)
// {
// HibernateCallback callback = new HibernateCallback()
// {
// public Object doInHibernate(Session session)
// {
// Query query = session.getNamedQuery(QUERY_GET_CHANGED_NODE_STATUSES);
// query.setString("storeProtocol", storeRef.getProtocol())
// .setString("storeIdentifier", storeRef.getIdentifier())
// .setString("changeTxnId", changeTxnId)
// .setReadOnly(true);
// return query.list();
// }
// };
// List<NodeStatus> queryResults = (List) getHibernateTemplate().execute(callback);
// // done
// return queryResults;
// }
//
// @SuppressWarnings("unchecked")
// public List<NodeStatus> getDeletedNodeStatuses(final StoreRef storeRef, final String changeTxnId)
// {
// HibernateCallback callback = new HibernateCallback()
// {
// public Object doInHibernate(Session session)
// {
// Query query = session.getNamedQuery(QUERY_GET_DELETED_NODE_STATUSES);
// query.setString("storeProtocol", storeRef.getProtocol())
// .setString("storeIdentifier", storeRef.getIdentifier())
// .setString("changeTxnId", changeTxnId)
// .setReadOnly(true);
// return query.list();
// }
// };
// List<NodeStatus> queryResults = (List) getHibernateTemplate().execute(callback);
// // done
// return queryResults;
// }
//}
/*
* Copyright (C) 2005-2006 Alfresco, Inc.
*
* Licensed under the Mozilla Public License version 1.1
* with a permitted attribution clause. You may obtain a
* copy of the License at
*
* http://www.alfresco.org/legal/license.txt
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*/
package org.alfresco.repo.node.index;
import java.util.List;
import org.alfresco.repo.search.impl.lucene.LuceneIndexerImpl;
import org.alfresco.repo.transaction.TransactionUtil;
import org.alfresco.repo.transaction.TransactionUtil.TransactionWork;
import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.cmr.repository.StoreRef;
import org.alfresco.service.cmr.search.ResultSet;
import org.alfresco.service.cmr.search.ResultSetRow;
import org.alfresco.service.cmr.search.SearchParameters;
import org.alfresco.service.cmr.search.SearchService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* This component attempts to reindex
*
* @author Derek Hulley
*/
public class MissingContentReindexComponent extends AbstractReindexComponent
{
private static Log logger = LogFactory.getLog(MissingContentReindexComponent.class);
/** keep track of whether the FTS indexer thread has been poked */
private boolean ftsIndexerCalled;
public MissingContentReindexComponent()
{
ftsIndexerCalled = false;
}
/**
* If this object is currently busy, then it just nothing
*/
@Override
public void reindexImpl()
{
List<StoreRef> storeRefs = nodeService.getStores();
int count = 0;
for (StoreRef storeRef : storeRefs)
{
// prompt the FTS reindexing
if (!ftsIndexerCalled)
{
ftsIndexer.requiresIndex(storeRef);
}
// reindex missing content
count += reindexMissingContent(storeRef);
// check if we have to break out
if (isShuttingDown())
{
break;
}
}
// The FTS indexer only needs to be prompted once
ftsIndexerCalled = true;
// done
if (logger.isDebugEnabled())
{
logger.debug("Missing content indexing touched " + count + " content nodes");
}
}
/**
* @param storeRef the store to check for missing content
* @return Returns the number of documents reindexed
*/
private int reindexMissingContent(StoreRef storeRef)
{
SearchParameters sp = new SearchParameters();
sp.addStore(storeRef);
// search for it in the index, sorting with youngest first
sp.setLanguage(SearchService.LANGUAGE_LUCENE);
sp.setQuery("TEXT:" + LuceneIndexerImpl.NOT_INDEXED_CONTENT_MISSING);
sp.addSort(SearchParameters.SORT_IN_DOCUMENT_ORDER_DESCENDING);
ResultSet results = null;
try
{
results = searcher.query(sp);
int count = 0;
// iterate over the nodes and prompt for reindexing
for (ResultSetRow row : results)
{
final NodeRef childNodeRef = row.getNodeRef();
// prompt for a reindex - it might fail again, but we just keep plugging away
TransactionWork<Object> reindexWork = new TransactionWork<Object>()
{
public Object doWork()
{
indexer.updateNode(childNodeRef);
return null;
}
};
TransactionUtil.executeInNonPropagatingUserTransaction(transactionService, reindexWork);
// check if we have to break out
if (isShuttingDown())
{
break;
}
}
// done
if (logger.isDebugEnabled())
{
logger.debug(
"Reindexed missing content: \n" +
" store: " + storeRef + "\n" +
" node count: " + count);
}
return count;
}
finally
{
if (results != null)
{
results.close();
}
}
}
}

View File

@@ -0,0 +1,172 @@
/*
* Copyright (C) 2005-2006 Alfresco, Inc.
*
* Licensed under the Mozilla Public License version 1.1
* with a permitted attribution clause. You may obtain a
* copy of the License at
*
* http://www.alfresco.org/legal/license.txt
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*/
package org.alfresco.repo.node.index;
import junit.framework.TestCase;
import org.alfresco.model.ContentModel;
import org.alfresco.repo.content.AbstractContentStore;
import org.alfresco.repo.content.ContentStore;
import org.alfresco.repo.node.db.NodeDaoService;
import org.alfresco.repo.search.Indexer;
import org.alfresco.repo.search.impl.lucene.LuceneIndexerImpl;
import org.alfresco.repo.search.impl.lucene.fts.FullTextSearchIndexer;
import org.alfresco.repo.security.authentication.AuthenticationComponent;
import org.alfresco.repo.transaction.TransactionComponent;
import org.alfresco.service.ServiceRegistry;
import org.alfresco.service.cmr.model.FileFolderService;
import org.alfresco.service.cmr.repository.ContentData;
import org.alfresco.service.cmr.repository.ContentWriter;
import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.cmr.repository.NodeService;
import org.alfresco.service.cmr.repository.StoreRef;
import org.alfresco.service.cmr.search.ResultSet;
import org.alfresco.service.cmr.search.SearchParameters;
import org.alfresco.service.cmr.search.SearchService;
import org.alfresco.service.namespace.QName;
import org.alfresco.service.transaction.TransactionService;
import org.alfresco.util.ApplicationContextHelper;
import org.springframework.context.ApplicationContext;
/**
* @see org.alfresco.repo.node.index.MissingContentReindexComponent
*
* @author Derek Hulley
*/
public class MissingContentReindexComponentTest extends TestCase
{
private static ApplicationContext ctx = ApplicationContextHelper.getApplicationContext();
private AuthenticationComponent authenticationComponent;
private SearchService searchService;
private NodeService nodeService;
private FileFolderService fileFolderService;
private ContentStore contentStore;
private FullTextSearchIndexer ftsIndexer;
private NodeRef rootNodeRef;
private MissingContentReindexComponent reindexer;
@Override
protected void setUp() throws Exception
{
ServiceRegistry serviceRegistry = (ServiceRegistry) ctx.getBean(ServiceRegistry.SERVICE_REGISTRY);
searchService = serviceRegistry.getSearchService();
nodeService = serviceRegistry.getNodeService();
fileFolderService = serviceRegistry.getFileFolderService();
authenticationComponent = (AuthenticationComponent) ctx.getBean("authenticationComponentImpl");
contentStore = (ContentStore) ctx.getBean("fileContentStore");
ftsIndexer = (FullTextSearchIndexer) ctx.getBean("LuceneFullTextSearchIndexer");
Indexer indexer = (Indexer) ctx.getBean("indexerComponent");
NodeDaoService nodeDaoService = (NodeDaoService) ctx.getBean("nodeDaoService");
TransactionService transactionService = serviceRegistry.getTransactionService();
reindexer = new MissingContentReindexComponent();
reindexer.setAuthenticationComponent(authenticationComponent);
reindexer.setFtsIndexer(ftsIndexer);
reindexer.setIndexer(indexer);
reindexer.setNodeDaoService(nodeDaoService);
reindexer.setNodeService(nodeService);
reindexer.setSearcher(searchService);
reindexer.setTransactionComponent((TransactionComponent)transactionService);
// authenticate
authenticationComponent.setSystemUserAsCurrentUser();
// create a root node for the test
StoreRef storeRef = nodeService.createStore("test", getName() + "-" + System.nanoTime());
rootNodeRef = nodeService.getRootNode(storeRef);
rootNodeRef = nodeService.createNode(
rootNodeRef,
ContentModel.ASSOC_CHILDREN,
QName.createQName("cm:x"),
ContentModel.TYPE_FOLDER).getChildRef();
}
@Override
protected void tearDown() throws Exception
{
authenticationComponent.clearCurrentSecurityContext();
}
/**
* Create a node with a content URL that points to missing content. It then
* checks that the indexing flagged it, prompts a reindex of missing content
* and checks that the text was properly indexed.
*/
public synchronized void testReindex() throws Exception
{
// create a node with missing content
String contentUrl = AbstractContentStore.createNewUrl();
ContentData contentData = new ContentData(contentUrl, "text/plain", 0L, "UTF8");
// create the file node
NodeRef nodeRef = fileFolderService.create(rootNodeRef, "myfile", ContentModel.TYPE_CONTENT).getNodeRef();
// add the content
nodeService.setProperty(nodeRef, ContentModel.PROP_CONTENT, contentData);
// wait a bit for the indexing
ftsIndexer.index();
wait(1000);
// check that the content was but that the content was M.I.A.
SearchParameters sp = new SearchParameters();
sp.addStore(rootNodeRef.getStoreRef());
sp.setLanguage(SearchService.LANGUAGE_LUCENE);
sp.setQuery("TEXT:" + LuceneIndexerImpl.NOT_INDEXED_CONTENT_MISSING);
sp.addSort(SearchParameters.SORT_IN_DOCUMENT_ORDER_DESCENDING);
ResultSet results = null;
try
{
results = searchService.query(sp);
assertTrue("Content missing NICM not found", results.length() == 1);
}
finally
{
if (results != null) { results.close(); }
}
// now put some content in the store
ContentWriter writer = contentStore.getWriter(null, contentUrl);
writer.setMimetype("text/plain");
writer.setEncoding("UTF8");
writer.putContent("123abc456def");
// prompt for reindex
reindexer.reindex();
// wait for it to have been indexed again
ftsIndexer.index();
wait(1000);
// search for the text
sp = new SearchParameters();
sp.addStore(rootNodeRef.getStoreRef());
sp.setLanguage(SearchService.LANGUAGE_LUCENE);
sp.setQuery("TEXT:" + "123abc456def");
sp.addSort("@" + ContentModel.PROP_CREATED, false);
results = null;
try
{
results = searchService.query(sp);
assertTrue("Indexed content node found", results.length() == 1);
}
finally
{
if (results != null) { results.close(); }
}
}
}