More build related stuff

git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@4858 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Andrew Hind
2007-01-17 12:46:22 +00:00
parent b15a708086
commit de338f9d28
2 changed files with 386 additions and 89 deletions

View File

@@ -27,6 +27,7 @@ import org.alfresco.repo.dictionary.DictionaryDAO;
import org.alfresco.repo.dictionary.M2Model; import org.alfresco.repo.dictionary.M2Model;
import org.alfresco.repo.search.impl.lucene.fts.FullTextSearchIndexer; import org.alfresco.repo.search.impl.lucene.fts.FullTextSearchIndexer;
import org.alfresco.repo.security.authentication.AuthenticationComponent; import org.alfresco.repo.security.authentication.AuthenticationComponent;
import org.alfresco.repo.transaction.TransactionUtil;
import org.alfresco.service.ServiceRegistry; import org.alfresco.service.ServiceRegistry;
import org.alfresco.service.cmr.repository.ChildAssociationRef; import org.alfresco.service.cmr.repository.ChildAssociationRef;
import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.NodeRef;
@@ -43,26 +44,37 @@ import org.alfresco.util.ApplicationContextHelper;
import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContext;
/** /**
*
* @author Andy Hind * @author Andy Hind
*/ */
@SuppressWarnings("unused") @SuppressWarnings("unused")
public class ConcurrentNodeServiceTest extends TestCase public class ConcurrentNodeServiceTest extends TestCase
{ {
public static final String NAMESPACE = "http://www.alfresco.org/test/BaseNodeServiceTest"; public static final String NAMESPACE = "http://www.alfresco.org/test/BaseNodeServiceTest";
public static final String TEST_PREFIX = "test"; public static final String TEST_PREFIX = "test";
public static final QName TYPE_QNAME_TEST_CONTENT = QName.createQName(NAMESPACE, "content"); public static final QName TYPE_QNAME_TEST_CONTENT = QName.createQName(NAMESPACE, "content");
public static final QName ASPECT_QNAME_TEST_TITLED = QName.createQName(NAMESPACE, "titled"); public static final QName ASPECT_QNAME_TEST_TITLED = QName.createQName(NAMESPACE, "titled");
public static final QName PROP_QNAME_TEST_TITLE = QName.createQName(NAMESPACE, "title"); public static final QName PROP_QNAME_TEST_TITLE = QName.createQName(NAMESPACE, "title");
public static final QName PROP_QNAME_TEST_MIMETYPE = QName.createQName(NAMESPACE, "mimetype"); public static final QName PROP_QNAME_TEST_MIMETYPE = QName.createQName(NAMESPACE, "mimetype");
public static final int COUNT = 10;
public static final int REPEATS = 10;
static ApplicationContext ctx = ApplicationContextHelper.getApplicationContext(); static ApplicationContext ctx = ApplicationContextHelper.getApplicationContext();
private NodeService nodeService; private NodeService nodeService;
private TransactionService transactionService; private TransactionService transactionService;
private NodeRef rootNodeRef; private NodeRef rootNodeRef;
private FullTextSearchIndexer luceneFTS; private FullTextSearchIndexer luceneFTS;
private AuthenticationComponent authenticationComponent; private AuthenticationComponent authenticationComponent;
public ConcurrentNodeServiceTest() public ConcurrentNodeServiceTest()
@@ -88,8 +100,8 @@ public class ConcurrentNodeServiceTest extends TestCase
nodeService = (NodeService) ctx.getBean("dbNodeService"); nodeService = (NodeService) ctx.getBean("dbNodeService");
transactionService = (TransactionService) ctx.getBean("transactionComponent"); transactionService = (TransactionService) ctx.getBean("transactionComponent");
luceneFTS = (FullTextSearchIndexer) ctx.getBean("LuceneFullTextSearchIndexer"); luceneFTS = (FullTextSearchIndexer) ctx.getBean("LuceneFullTextSearchIndexer");
this.authenticationComponent = (AuthenticationComponent)ctx.getBean("authenticationComponent"); this.authenticationComponent = (AuthenticationComponent) ctx.getBean("authenticationComponent");
this.authenticationComponent.setSystemUserAsCurrentUser(); this.authenticationComponent.setSystemUserAsCurrentUser();
// create a first store directly // create a first store directly
@@ -106,7 +118,7 @@ public class ConcurrentNodeServiceTest extends TestCase
authenticationComponent.clearCurrentSecurityContext(); authenticationComponent.clearCurrentSecurityContext();
super.tearDown(); super.tearDown();
} }
protected Map<QName, ChildAssociationRef> buildNodeGraph() throws Exception protected Map<QName, ChildAssociationRef> buildNodeGraph() throws Exception
{ {
return BaseNodeServiceTest.buildNodeGraph(nodeService, rootNodeRef); return BaseNodeServiceTest.buildNodeGraph(nodeService, rootNodeRef);
@@ -122,20 +134,60 @@ public class ConcurrentNodeServiceTest extends TestCase
return null;// answer; return null;// answer;
} }
public void test1() throws Exception
{
testConcurrent();
}
public void test2() throws Exception
{
testConcurrent();
}
public void test3() throws Exception
{
testConcurrent();
}
public void test4() throws Exception
{
testConcurrent();
}
public void test5() throws Exception
{
testConcurrent();
}
public void test6() throws Exception
{
testConcurrent();
}
public void test7() throws Exception
{
testConcurrent();
}
public void test8() throws Exception
{
testConcurrent();
}
public void test9() throws Exception
{
testConcurrent();
}
public void test10() throws Exception
{
testConcurrent();
}
public void testConcurrent() throws Exception public void testConcurrent() throws Exception
{ {
luceneFTS.pause(); luceneFTS.pause();
// TODO: LUCENE UPDATE ISSUE fix commit lock time out // TODO: LUCENE UPDATE ISSUE fix commit lock time out
// IndexWriter.COMMIT_LOCK_TIMEOUT = 100000; // IndexWriter.COMMIT_LOCK_TIMEOUT = 100000;
int count = 10;
int repeats = 10;
Map<QName, ChildAssociationRef> assocRefs = commitNodeGraph(); Map<QName, ChildAssociationRef> assocRefs = commitNodeGraph();
Thread runner = null; Thread runner = null;
for (int i = 0; i < count; i++) for (int i = 0; i < COUNT; i++)
{ {
runner = new Nester("Concurrent-" + i, runner, repeats); runner = new Nester("Concurrent-" + i, runner, REPEATS);
} }
if (runner != null) if (runner != null)
{ {
@@ -152,14 +204,62 @@ public class ConcurrentNodeServiceTest extends TestCase
} }
} }
SearchService searcher = (SearchService) ctx.getBean(ServiceRegistry.SEARCH_SERVICE.getLocalName()); TransactionUtil.executeInUserTransaction(transactionService, new TransactionUtil.TransactionWork<Object>()
assertEquals(2 * ((count * repeats) + 1), searcher.selectNodes(rootNodeRef, "/*", null, {
getNamespacePrefixReolsver(""), false).size());
ResultSet results = searcher.query(rootNodeRef.getStoreRef(), "lucene", "PATH:\"/*\""); public Object doWork() throws Exception
// n6 has root aspect - there are three things at the root level in the {
// index // There are two nodes at the base level in each test
assertEquals(3 * ((count * repeats) + 1), results.length()); assertEquals(2 * ((COUNT * REPEATS) + 1), nodeService.getChildAssocs(rootNodeRef).size());
results.close();
SearchService searcher = (SearchService) ctx.getBean(ServiceRegistry.SEARCH_SERVICE.getLocalName());
assertEquals(2 * ((COUNT * REPEATS) + 1), searcher.selectNodes(rootNodeRef, "/*", null,
getNamespacePrefixReolsver(""), false).size());
ResultSet results = null;
try
{
results = searcher.query(rootNodeRef.getStoreRef(), "lucene", "PATH:\"/*\"");
// n6 has root aspect - there are three things at the root level in the
// index
assertEquals(3 * ((COUNT * REPEATS) + 1), results.length());
results.close();
results = searcher.query(rootNodeRef.getStoreRef(), "lucene", "PATH:\"/*/*\"");
// n6 has root aspect - there are three things at the root level in the
// index
assertEquals(3 * ((COUNT * REPEATS) + 1), results.length());
results.close();
results = searcher.query(rootNodeRef.getStoreRef(), "lucene", "PATH:\"/*/*/*\"");
// n6 has root aspect - there are three things at the root level in the
// index
assertEquals(2 * ((COUNT * REPEATS) + 1), results.length());
results.close();
results = searcher.query(rootNodeRef.getStoreRef(), "lucene", "PATH:\"/*/*/*/*\"");
// n6 has root aspect - there are three things at the root level in the
// index
assertEquals(1 * ((COUNT * REPEATS) + 1), results.length());
results.close();
results = searcher.query(rootNodeRef.getStoreRef(), "lucene", "PATH:\"/*/*/*/*/*\"");
// n6 has root aspect - there are three things at the root level in the
// index
assertEquals(0 * ((COUNT * REPEATS) + 1), results.length());
results.close();
}
finally
{
if (results != null)
{
results.close();
}
}
return null;
}
});
} }
/** /**
@@ -182,7 +282,7 @@ public class ConcurrentNodeServiceTest extends TestCase
public void run() public void run()
{ {
authenticationComponent.setSystemUserAsCurrentUser(); authenticationComponent.setSystemUserAsCurrentUser();
if (waiter != null) if (waiter != null)
{ {
System.out.println("Starting " + waiter.getName()); System.out.println("Starting " + waiter.getName());
@@ -207,7 +307,8 @@ public class ConcurrentNodeServiceTest extends TestCase
try try
{ {
waiter.join(); waiter.join();
System.out.println("Thread " + this.getName() + " has waited for " +(waiter == null ? "null" : waiter.getName())); System.out.println("Thread "
+ this.getName() + " has waited for " + (waiter == null ? "null" : waiter.getName()));
} }
catch (InterruptedException e) catch (InterruptedException e)
{ {

View File

@@ -86,13 +86,31 @@ import org.apache.lucene.store.RAMDirectory;
* methods lock as required, the private methods assume that the appropriate locks have been obtained. TODO: Write * methods lock as required, the private methods assume that the appropriate locks have been obtained. TODO: Write
* element status into individual directories. This would be enough for recovery if both index files are lost or * element status into individual directories. This would be enough for recovery if both index files are lost or
* corrupted. TODO: Tidy up index status at start up or after some time. How long would you leave a merge to run? * corrupted. TODO: Tidy up index status at start up or after some time. How long would you leave a merge to run?
* <p>
* The index structure is duplicated to two files. If one is currupted the second is used.
* <p>
* TODO:
* <p>
* <ol>
* <li> make the index sharing configurable
* <li> use a thread pool for deletions, merging and index deletions
* <li> something to control the maximum number of overlays to limit the number of things layered together for searching
* <li> look at lucene locking again post 2.0, to see if it is improved
* <li> clean up old data files (that are not old index entries) - should be a config option
* </ol>
* *
* @author Andy Hind * @author Andy Hind
*/ */
public class IndexInfo public class IndexInfo
{ {
/**
* The logger.
*/
private static Logger s_logger = Logger.getLogger(IndexInfo.class); private static Logger s_logger = Logger.getLogger(IndexInfo.class);
/**
* Use NIO memory mapping to wite the index control file.
*/
private static final boolean useNIOMemoryMapping = true; private static final boolean useNIOMemoryMapping = true;
/** /**
@@ -105,8 +123,14 @@ public class IndexInfo
*/ */
private static String INDEX_INFO_BACKUP = "IndexInfoBackup"; private static String INDEX_INFO_BACKUP = "IndexInfoBackup";
/**
* The default name for the index deletions file
*/
private static String INDEX_INFO_DELETIONS = "IndexInfoDeletions"; private static String INDEX_INFO_DELETIONS = "IndexInfoDeletions";
/**
* What to look for to detect the previous index implementation.
*/
private static String OLD_INDEX = "index"; private static String OLD_INDEX = "index";
/** /**
@@ -182,22 +206,53 @@ public class IndexInfo
private EnumMap<TransactionStatus, Transition> transitions = new EnumMap<TransactionStatus, Transition>( private EnumMap<TransactionStatus, Transition> transitions = new EnumMap<TransactionStatus, Transition>(
TransactionStatus.class); TransactionStatus.class);
/**
* The queue of files and folders to delete
*/
private ConcurrentLinkedQueue<String> deleteQueue = new ConcurrentLinkedQueue<String>(); private ConcurrentLinkedQueue<String> deleteQueue = new ConcurrentLinkedQueue<String>();
/**
* A queue of reference counting index readers. We wait for these to become unused (ref count falls to zero) then
* the data can be removed.
*/
private ConcurrentLinkedQueue<IndexReader> deletableReaders = new ConcurrentLinkedQueue<IndexReader>(); private ConcurrentLinkedQueue<IndexReader> deletableReaders = new ConcurrentLinkedQueue<IndexReader>();
/**
* The call that is responsible for deleting old index information from disk.
*/
private Cleaner cleaner = new Cleaner(); private Cleaner cleaner = new Cleaner();
/**
* The thread that deletes old index data
*/
private Thread cleanerThread; private Thread cleanerThread;
/**
* The class the supports index merging and applying deletions from deltas to indexes and deltas that go before it.
*/
private Merger merger = new Merger(); private Merger merger = new Merger();
/**
* The thread that carries out index merging and applying deletions from deltas to indexes and deltas that go before
* it.
*/
private Thread mergerThread; private Thread mergerThread;
/**
* A shared empty index to use if non exist.
*/
private Directory emptyIndex = new RAMDirectory(); private Directory emptyIndex = new RAMDirectory();
/**
* The index infor files that make up the index
*/
private static HashMap<File, IndexInfo> indexInfos = new HashMap<File, IndexInfo>(); private static HashMap<File, IndexInfo> indexInfos = new HashMap<File, IndexInfo>();
// Properties that cotrol lucene indexing
// --------------------------------------
// Properties for indexes that are created by transactions ...
private int maxDocsForInMemoryMerge = 10000; private int maxDocsForInMemoryMerge = 10000;
private int writerMinMergeDocs = 1000; private int writerMinMergeDocs = 1000;
@@ -208,6 +263,8 @@ public class IndexInfo
private boolean writerUseCompoundFile = true; private boolean writerUseCompoundFile = true;
// Properties for indexes created by merging
private int mergerMinMergeDocs = 1000; private int mergerMinMergeDocs = 1000;
private int mergerMergeFactor = 5; private int mergerMergeFactor = 5;
@@ -218,6 +275,8 @@ public class IndexInfo
private int mergerTargetOverlays = 5; private int mergerTargetOverlays = 5;
// Common properties for indexers
private long writeLockTimeout = IndexWriter.WRITE_LOCK_TIMEOUT; private long writeLockTimeout = IndexWriter.WRITE_LOCK_TIMEOUT;
private long commitLockTimeout = IndexWriter.COMMIT_LOCK_TIMEOUT; private long commitLockTimeout = IndexWriter.COMMIT_LOCK_TIMEOUT;
@@ -226,26 +285,49 @@ public class IndexInfo
private int termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL; private int termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL;
// TODO: Something to control the maximum number of overlays /**
* Control if the cleaner thread is active
*/
private boolean enableCleanerThread = true; private boolean enableCleanerThread = true;
/**
* Control if the merger thread is active
*/
private boolean enableMergerThread = true; private boolean enableMergerThread = true;
static static
{ {
// We do not require any of the lucene in-built locking.
System.setProperty("disableLuceneLocks", "true"); System.setProperty("disableLuceneLocks", "true");
} }
public static synchronized IndexInfo getIndexInfo(File file) /**
* Get the IndexInfo object based in the given directory. There is only one object per directory per JVM.
*
* @param file
* @return
* @throws IndexerException
*/
public static synchronized IndexInfo getIndexInfo(File file) throws IndexerException
{ {
IndexInfo indexInfo = indexInfos.get(file); File canonicalFile;
if (indexInfo == null) try
{ {
indexInfo = new IndexInfo(file); canonicalFile = file.getCanonicalFile();
indexInfos.put(file, indexInfo); IndexInfo indexInfo = indexInfos.get(canonicalFile);
if (indexInfo == null)
{
indexInfo = new IndexInfo(canonicalFile);
indexInfos.put(canonicalFile, indexInfo);
}
return indexInfo;
} }
return indexInfo; catch (IOException e)
{
throw new IndexerException("Failed to transform a file into is canonical form", e);
}
} }
/** /**
@@ -278,19 +360,18 @@ public class IndexInfo
File indexInfoBackupFile = new File(this.indexDirectory, INDEX_INFO_BACKUP); File indexInfoBackupFile = new File(this.indexDirectory, INDEX_INFO_BACKUP);
if (createFile(indexInfoFile) && createFile(indexInfoBackupFile)) if (createFile(indexInfoFile) && createFile(indexInfoBackupFile))
{ {
// a spanking new index // If both files required creation this is a new index
version = 0; version = 0;
} }
// Open the files and channels // Open the files and channels for the index info file and the backup
this.indexInfoRAF = openFile(indexInfoFile); this.indexInfoRAF = openFile(indexInfoFile);
this.indexInfoChannel = this.indexInfoRAF.getChannel(); this.indexInfoChannel = this.indexInfoRAF.getChannel();
this.indexInfoBackupRAF = openFile(indexInfoBackupFile); this.indexInfoBackupRAF = openFile(indexInfoBackupFile);
this.indexInfoBackupChannel = this.indexInfoBackupRAF.getChannel(); this.indexInfoBackupChannel = this.indexInfoBackupRAF.getChannel();
// Read info from disk if this is not a new index. // If the index found no info files (i.e. it is new), check if there is an old style index and covert it.
if (version == 0) if (version == 0)
{ {
// Check if an old style index exists // Check if an old style index exists
@@ -342,6 +423,7 @@ public class IndexInfo
} }
} }
// The index exists
else if (version == -1) else if (version == -1)
{ {
getWriteLock(); getWriteLock();
@@ -353,6 +435,7 @@ public class IndexInfo
{ {
setStatusFromFile(); setStatusFromFile();
// If the index is not shared we can do some easy clean up
if (!indexIsShared) if (!indexIsShared)
{ {
HashSet<String> deletable = new HashSet<String>(); HashSet<String> deletable = new HashSet<String>();
@@ -361,6 +444,8 @@ public class IndexInfo
{ {
switch (entry.getStatus()) switch (entry.getStatus())
{ {
// states which can be deleted
// We could check prepared states can be committed.
case ACTIVE: case ACTIVE:
case MARKED_ROLLBACK: case MARKED_ROLLBACK:
case NO_TRANSACTION: case NO_TRANSACTION:
@@ -378,6 +463,7 @@ public class IndexInfo
entry.setStatus(TransactionStatus.DELETABLE); entry.setStatus(TransactionStatus.DELETABLE);
deletable.add(entry.getName()); deletable.add(entry.getName());
break; break;
// States which are in mid-transition which we can roll back to the committed state
case COMMITTED_DELETING: case COMMITTED_DELETING:
case MERGE: case MERGE:
if (s_logger.isInfoEnabled()) if (s_logger.isInfoEnabled())
@@ -386,6 +472,7 @@ public class IndexInfo
} }
entry.setStatus(TransactionStatus.COMMITTED); entry.setStatus(TransactionStatus.COMMITTED);
break; break;
// Complete committing (which is post database commit)
case COMMITTING: case COMMITTING:
// do the commit // do the commit
if (s_logger.isInfoEnabled()) if (s_logger.isInfoEnabled())
@@ -395,12 +482,14 @@ public class IndexInfo
entry.setStatus(TransactionStatus.COMMITTED); entry.setStatus(TransactionStatus.COMMITTED);
mainIndexReader = null; mainIndexReader = null;
break; break;
// States that require no action
case COMMITTED: case COMMITTED:
default: default:
// nothing to do // nothing to do
break; break;
} }
} }
// Delete entries that are not required
for (String id : deletable) for (String id : deletable)
{ {
indexEntries.remove(id); indexEntries.remove(id);
@@ -414,6 +503,7 @@ public class IndexInfo
{ {
merger.notify(); merger.notify();
} }
// persist the new state
writeStatus(); writeStatus();
} }
return null; return null;
@@ -444,6 +534,7 @@ public class IndexInfo
mergerThread.start(); mergerThread.start();
} }
// Create an empty in memory index
IndexWriter writer; IndexWriter writer;
try try
{ {
@@ -456,6 +547,7 @@ public class IndexInfo
writer.setWriteLockTimeout(writeLockTimeout); writer.setWriteLockTimeout(writeLockTimeout);
writer.setMaxFieldLength(maxFieldLength); writer.setMaxFieldLength(maxFieldLength);
writer.setTermIndexInterval(termIndexInterval); writer.setTermIndexInterval(termIndexInterval);
writer.close();
} }
catch (IOException e) catch (IOException e)
{ {
@@ -465,7 +557,7 @@ public class IndexInfo
} }
/** /**
* This method should only be called from one thread. * This method should only be called from one thread as it is bound to a transaction.
* *
* @param id * @param id
* @return * @return
@@ -524,7 +616,7 @@ public class IndexInfo
// A write lock is required if we have to update the local index entries. // A write lock is required if we have to update the local index entries.
// There should only be one thread trying to access this delta. // There should only be one thread trying to access this delta.
File location = new File(indexDirectory, id); File location = new File(indexDirectory, id).getCanonicalFile();
getReadLock(); getReadLock();
try try
{ {
@@ -558,24 +650,46 @@ public class IndexInfo
return location; return location;
} }
/**
* Make a lucene index writer
*
* @param location
* @param analyzer
* @return
* @throws IOException
*/
private IndexWriter makeDeltaIndexWriter(File location, Analyzer analyzer) throws IOException private IndexWriter makeDeltaIndexWriter(File location, Analyzer analyzer) throws IOException
{ {
IndexWriter writer;
if (!IndexReader.indexExists(location)) if (!IndexReader.indexExists(location))
{ {
IndexWriter creator = new IndexWriter(location, analyzer, true); writer = new IndexWriter(location, analyzer, true);
creator.setUseCompoundFile(writerUseCompoundFile);
creator.setMaxBufferedDocs(writerMinMergeDocs);
creator.setMergeFactor(writerMergeFactor);
creator.setMaxMergeDocs(writerMaxMergeDocs);
creator.setCommitLockTimeout(commitLockTimeout);
creator.setWriteLockTimeout(writeLockTimeout);
creator.setMaxFieldLength(maxFieldLength);
creator.setTermIndexInterval(termIndexInterval);
return creator;
} }
return null; else
{
writer = new IndexWriter(location, analyzer, false);
}
writer.setUseCompoundFile(writerUseCompoundFile);
writer.setMaxBufferedDocs(writerMinMergeDocs);
writer.setMergeFactor(writerMergeFactor);
writer.setMaxMergeDocs(writerMaxMergeDocs);
writer.setCommitLockTimeout(commitLockTimeout);
writer.setWriteLockTimeout(writeLockTimeout);
writer.setMaxFieldLength(maxFieldLength);
writer.setTermIndexInterval(termIndexInterval);
return writer;
} }
/**
* Manage getting a lucene index writer for transactional data - looks after registration and checking there is no
* active reader.
*
* @param id
* @param analyzer
* @return
* @throws IOException
*/
public IndexWriter getDeltaIndexWriter(String id, Analyzer analyzer) throws IOException public IndexWriter getDeltaIndexWriter(String id, Analyzer analyzer) throws IOException
{ {
if (id == null) if (id == null)
@@ -591,23 +705,17 @@ public class IndexInfo
closeDeltaIndexReader(id); closeDeltaIndexReader(id);
File location = ensureDeltaIsRegistered(id); File location = ensureDeltaIsRegistered(id);
writer = makeDeltaIndexWriter(location, analyzer); writer = makeDeltaIndexWriter(location, analyzer);
if (writer == null)
{
writer = new IndexWriter(location, analyzer, false);
writer.setUseCompoundFile(writerUseCompoundFile);
writer.setMaxBufferedDocs(writerMinMergeDocs);
writer.setMergeFactor(writerMergeFactor);
writer.setMaxMergeDocs(writerMaxMergeDocs);
writer.setCommitLockTimeout(commitLockTimeout);
writer.setWriteLockTimeout(writeLockTimeout);
writer.setMaxFieldLength(maxFieldLength);
writer.setTermIndexInterval(termIndexInterval);
}
indexWriters.put(id, writer); indexWriters.put(id, writer);
} }
return writer; return writer;
} }
/**
* Manage closing and unregistering an index reader.
*
* @param id
* @throws IOException
*/
public void closeDeltaIndexReader(String id) throws IOException public void closeDeltaIndexReader(String id) throws IOException
{ {
if (id == null) if (id == null)
@@ -623,6 +731,12 @@ public class IndexInfo
} }
} }
/**
* Manage closing and unregistering an index writer .
*
* @param id
* @throws IOException
*/
public void closeDeltaIndexWriter(String id) throws IOException public void closeDeltaIndexWriter(String id) throws IOException
{ {
if (id == null) if (id == null)
@@ -638,6 +752,12 @@ public class IndexInfo
} }
} }
/**
* Make sure the writer and reader for TX data are closed.
*
* @param id
* @throws IOException
*/
public void closeDelta(String id) throws IOException public void closeDelta(String id) throws IOException
{ {
if (id == null) if (id == null)
@@ -648,6 +768,14 @@ public class IndexInfo
closeDeltaIndexWriter(id); closeDeltaIndexWriter(id);
} }
/**
* Get the deletions for a given index (there is not check if thery should be applied that is up to the calling
* layer)
*
* @param id
* @return
* @throws IOException
*/
public Set<NodeRef> getDeletions(String id) throws IOException public Set<NodeRef> getDeletions(String id) throws IOException
{ {
if (id == null) if (id == null)
@@ -656,11 +784,11 @@ public class IndexInfo
} }
// Check state // Check state
Set<NodeRef> deletions = new HashSet<NodeRef>(); Set<NodeRef> deletions = new HashSet<NodeRef>();
File location = new File(indexDirectory, id); File location = new File(indexDirectory, id).getCanonicalFile();
File file = new File(location, INDEX_INFO_DELETIONS); File file = new File(location, INDEX_INFO_DELETIONS).getCanonicalFile();
if (!file.exists()) if (!file.exists())
{ {
if(s_logger.isDebugEnabled()) if (s_logger.isDebugEnabled())
{ {
s_logger.debug("No deletions for " + id); s_logger.debug("No deletions for " + id);
} }
@@ -674,14 +802,27 @@ public class IndexInfo
deletions.add(new NodeRef(ref)); deletions.add(new NodeRef(ref));
} }
is.close(); is.close();
if(s_logger.isDebugEnabled()) if (s_logger.isDebugEnabled())
{ {
s_logger.debug("There are "+deletions.size()+ " deletions for " + id); s_logger.debug("There are " + deletions.size() + " deletions for " + id);
} }
return deletions; return deletions;
} }
/**
* Set the aux data for the index entry for a transactional unit of work.
*
* @param id -
* the tx id
* @param toDelete -
* noderefs that should be deleted from previous indexes (not this one)
* @param documents -
* the number of docs in the index
* @param deleteNodesOnly -
* should deletions on apply to nodes (ie not to containers)
* @throws IOException
*/
public void setPreparedState(String id, Set<NodeRef> toDelete, long documents, boolean deleteNodesOnly) public void setPreparedState(String id, Set<NodeRef> toDelete, long documents, boolean deleteNodesOnly)
throws IOException throws IOException
{ {
@@ -692,7 +833,7 @@ public class IndexInfo
// Check state // Check state
if (toDelete.size() > 0) if (toDelete.size() > 0)
{ {
File location = new File(indexDirectory, id); File location = new File(indexDirectory, id).getCanonicalFile();
if (!location.exists()) if (!location.exists())
{ {
if (!location.mkdirs()) if (!location.mkdirs())
@@ -700,8 +841,9 @@ public class IndexInfo
throw new IndexerException("Failed to make index directory " + location); throw new IndexerException("Failed to make index directory " + location);
} }
} }
// Write deletions
DataOutputStream os = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(new File(location, DataOutputStream os = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(new File(location,
INDEX_INFO_DELETIONS)))); INDEX_INFO_DELETIONS).getCanonicalFile())));
os.writeInt(toDelete.size()); os.writeInt(toDelete.size());
for (NodeRef ref : toDelete) for (NodeRef ref : toDelete)
{ {
@@ -733,11 +875,19 @@ public class IndexInfo
} }
} }
/**
* Get the main reader for committed index data
*
* @return
* @throws IOException
*/
public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader() throws IOException public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader() throws IOException
{ {
getReadLock(); getReadLock();
try try
{ {
// Check if we need to rebuild the main indexer as it is invalid.
// (it is shared and quick version check fails)
if (indexIsShared && !checkVersion()) if (indexIsShared && !checkVersion())
{ {
releaseReadLock(); releaseReadLock();
@@ -753,6 +903,7 @@ public class IndexInfo
} }
} }
// Build if required
if (mainIndexReader == null) if (mainIndexReader == null)
{ {
releaseReadLock(); releaseReadLock();
@@ -780,6 +931,7 @@ public class IndexInfo
releaseWriteLock(); releaseWriteLock();
} }
} }
// Manage reference counting
ReferenceCounting refCount = (ReferenceCounting) mainIndexReader; ReferenceCounting refCount = (ReferenceCounting) mainIndexReader;
refCount.incrementReferenceCount(); refCount.incrementReferenceCount();
if (s_logger.isDebugEnabled()) if (s_logger.isDebugEnabled())
@@ -794,6 +946,15 @@ public class IndexInfo
} }
} }
/**
* Get the main index reader augmented with the specified TX data As above but we add the TX data
*
* @param id
* @param deletions
* @param deleteOnlyNodes
* @return
* @throws IOException
*/
public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader(String id, Set<NodeRef> deletions, public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader(String id, Set<NodeRef> deletions,
boolean deleteOnlyNodes) throws IOException boolean deleteOnlyNodes) throws IOException
{ {
@@ -965,6 +1126,9 @@ public class IndexInfo
} }
/**
* Initialise the definitions for the available transitions.
*/
private void initialiseTransitions() private void initialiseTransitions()
{ {
@@ -978,6 +1142,11 @@ public class IndexInfo
transitions.put(TransactionStatus.ACTIVE, new ActiveTransition()); transitions.put(TransactionStatus.ACTIVE, new ActiveTransition());
} }
/**
* API for transitions
*
* @author andyh
*/
private interface Transition private interface Transition
{ {
void beforeWithReadLock(String id, Set<Term> toDelete, Set<Term> read) throws IOException; void beforeWithReadLock(String id, Set<Term> toDelete, Set<Term> read) throws IOException;
@@ -987,11 +1156,16 @@ public class IndexInfo
boolean requiresFileLock(); boolean requiresFileLock();
} }
/**
* Transition to the perparing state
*
* @author andyh
*/
private class PreparingTransition implements Transition private class PreparingTransition implements Transition
{ {
public void beforeWithReadLock(String id, Set<Term> toDelete, Set<Term> read) throws IOException public void beforeWithReadLock(String id, Set<Term> toDelete, Set<Term> read) throws IOException
{ {
// Nothing to do
} }
public void transition(String id, Set<Term> toDelete, Set<Term> read) throws IOException public void transition(String id, Set<Term> toDelete, Set<Term> read) throws IOException
@@ -1019,6 +1193,11 @@ public class IndexInfo
} }
} }
/**
* Transition to the prepared state.
*
* @author andyh
*/
private class PreparedTransition implements Transition private class PreparedTransition implements Transition
{ {
public void beforeWithReadLock(String id, Set<Term> toDelete, Set<Term> read) throws IOException public void beforeWithReadLock(String id, Set<Term> toDelete, Set<Term> read) throws IOException
@@ -1396,17 +1575,17 @@ public class IndexInfo
{ {
if (!indexEntries.containsKey(id)) if (!indexEntries.containsKey(id))
{ {
if(s_logger.isDebugEnabled()) if (s_logger.isDebugEnabled())
{ {
s_logger.debug(id+ " is now INVALID "); s_logger.debug(id + " is now INVALID ");
} }
inValid.add(id); inValid.add(id);
} }
else else
{ {
if(s_logger.isDebugEnabled()) if (s_logger.isDebugEnabled())
{ {
s_logger.debug(id+ " is still part of the index "); s_logger.debug(id + " is still part of the index ");
} }
} }
} }
@@ -1484,7 +1663,7 @@ public class IndexInfo
IndexReader reader = referenceCountingReadOnlyIndexReaders.get(id); IndexReader reader = referenceCountingReadOnlyIndexReaders.get(id);
if (reader == null) if (reader == null)
{ {
File location = new File(indexDirectory, id); File location = new File(indexDirectory, id).getCanonicalFile();
if (IndexReader.indexExists(location)) if (IndexReader.indexExists(location))
{ {
reader = IndexReader.open(location); reader = IndexReader.open(location);
@@ -1902,11 +2081,14 @@ public class IndexInfo
IndexReader reader; IndexReader reader;
while ((reader = deletableReaders.poll()) != null) while ((reader = deletableReaders.poll()) != null)
{ {
ReferenceCounting refCounting = (ReferenceCounting)reader; ReferenceCounting refCounting = (ReferenceCounting) reader;
if(refCounting.getReferenceCount() == 0) if (refCounting.getReferenceCount() == 0)
{ {
s_logger.debug("Deleting no longer referenced "+refCounting.getId()); if (s_logger.isDebugEnabled())
s_logger.debug("... queued delete for "+refCounting.getId()); {
s_logger.debug("Deleting no longer referenced " + refCounting.getId());
s_logger.debug("... queued delete for " + refCounting.getId());
}
deleteQueue.add(refCounting.getId()); deleteQueue.add(refCounting.getId());
} }
else else
@@ -1915,24 +2097,32 @@ public class IndexInfo
} }
} }
deletableReaders.addAll(waiting); deletableReaders.addAll(waiting);
String id = null; String id = null;
HashSet<String> fails = new HashSet<String>(); HashSet<String> fails = new HashSet<String>();
while ((id = deleteQueue.poll()) != null) while ((id = deleteQueue.poll()) != null)
{ {
if (s_logger.isDebugEnabled()) try
{
s_logger.debug("Expunging " + id + " remaining " + deleteQueue.size());
}
// try and delete
File location = new File(indexDirectory, id);
if (!deleteDirectory(location))
{ {
if (s_logger.isDebugEnabled()) if (s_logger.isDebugEnabled())
{ {
s_logger.debug("DELETE FAILED"); s_logger.debug("Expunging " + id + " remaining " + deleteQueue.size());
} }
// try again later // try and delete
File location = new File(indexDirectory, id).getCanonicalFile();
if (!deleteDirectory(location))
{
if (s_logger.isDebugEnabled())
{
s_logger.debug("DELETE FAILED");
}
// try again later
fails.add(id);
}
}
catch (IOException ioe)
{
s_logger.warn("Failed to delete file - invalid canonical file", ioe);
fails.add(id); fails.add(id);
} }
} }
@@ -2123,7 +2313,10 @@ public class IndexInfo
{ {
try try
{ {
this.wait(); if (action == MergeAction.NONE)
{
this.wait();
}
} }
catch (InterruptedException e) catch (InterruptedException e)
{ {
@@ -2232,7 +2425,7 @@ public class IndexInfo
LinkedHashMap<String, IndexReader> readers = new LinkedHashMap<String, IndexReader>(); LinkedHashMap<String, IndexReader> readers = new LinkedHashMap<String, IndexReader>();
for (IndexEntry entry : indexes.values()) for (IndexEntry entry : indexes.values())
{ {
File location = new File(indexDirectory, entry.getName()); File location = new File(indexDirectory, entry.getName()).getCanonicalFile();
IndexReader reader; IndexReader reader;
if (IndexReader.indexExists(location)) if (IndexReader.indexExists(location))
{ {
@@ -2286,7 +2479,7 @@ public class IndexInfo
} }
} }
File location = new File(indexDirectory, currentDelete.getName()); File location = new File(indexDirectory, currentDelete.getName()).getCanonicalFile();
IndexReader reader; IndexReader reader;
if (IndexReader.indexExists(location)) if (IndexReader.indexExists(location))
{ {
@@ -2507,7 +2700,7 @@ public class IndexInfo
File outputLocation = null; File outputLocation = null;
for (IndexEntry entry : toMerge.values()) for (IndexEntry entry : toMerge.values())
{ {
File location = new File(indexDirectory, entry.getName()); File location = new File(indexDirectory, entry.getName()).getCanonicalFile();
if (entry.getStatus() == TransactionStatus.MERGE) if (entry.getStatus() == TransactionStatus.MERGE)
{ {
IndexReader reader; IndexReader reader;
@@ -2637,7 +2830,10 @@ public class IndexInfo
// Only delete if there is no existing ref counting reader // Only delete if there is no existing ref counting reader
if (!referenceCountingReadOnlyIndexReaders.containsKey(id)) if (!referenceCountingReadOnlyIndexReaders.containsKey(id))
{ {
s_logger.debug("... queued delete for "+id); if (s_logger.isDebugEnabled())
{
s_logger.debug("... queued delete for " + id);
}
deleteQueue.add(id); deleteQueue.add(id);
} }
} }