Merged BRANCHES/DEV/V3.4-BUG-FIX to HEAD

28236: ALF-8810: Removed trailing space from discussion.discussion_for Italian translation
   28241: Incremented version revision for 3.4.4
   28284: ALF-835 - WCM/AVM: copy (empty) folder into itself
   28285: ALF-6863: More than one cifs device breaks the web UI (explorer)
   28290: ALF-8840: user-*.atomentry.ftl
   28291: ALF-6863: Continuation of fix by Arseny
   28336: ALF-8768: Fixed typo in comment on wcm-bootstrap-context.xml
   28363: Merged DEV to V3.4-BUG-FIX
      28262: ALF-8847: WCM: OrphanReaper contention throws error after 39 retries.
             Checkin Comment:
                Use JobLockService to make sure that only one OrphanReaper job is working.
                Generate list of nodes that must be processed in OrphanReaper.doBatch() transaction. 
   28386: ALF-9100: Merged PATCHES/V3.4.1 to V3.4-BUG-FIX
      28249: ALF-8946: Avoid one full table scan per batch in full reindex
         - Now each batch scans a single time sample, dynamically adjusted based on the number of transactions
		   in the previous sample, always aiming for 1000 transactions per sample.
   28394: Fixed ALF-9090: NPE during inter-cluster subsystem messaging
    - Bean ID is a List<String> and might not be recognized on receiving machine
    - Log warning when bean ID is not available (unsymmetrical configuration, perhaps?)
   28396: Merged DEV to V3.4-BUG-FIX
      28384: ALF-6150: Initial state lost when non-versionable document is saved for the first time
             Creation of new version of document before writing its content was added to
             - AbstractAlfrescoMethodHandler->putDocument (this method is used by Office 2003, 2007)
             - VtiIfHeaderAction->doPut (this method is used by Office 2007 and 2010 on Windows 7)
               Creation of new version was added twice to AbstractAlfrescoMethodHandler to avoid affecting
               initial version when transaction is committed. 
   28432: Merged DEV to V3.4-BUG-FIX
      28431: ALF-8530: Pressing the info icon creates an unrecorded file in the ContentStore
             Use ContentService.getTempWriter() in BaseContentNode$TemplateContentData.getContentAsText() method. 
   28435: Merged DEV/TEMPORARY to V3.4-BUG-FIX
      28428: ALF-9015: cm:modifier not updated when document is updated via CIFS
         In ContentDiskDriver.closeFile() added ContentModel.PROP_MODIFIER property update.
   28436: ALF-8550: Number of http requests (currentThreadsBusy) increases when session times out during creation of webform
   - Corrected use of read and write locks
   28465: Fix for ALF-8023 Share preview doesn't work if...
     fixed as outlined by Dmitry.
   28478: Merged BRANCHES/DEV/ALAN/AUDIT to BRANCHES/DEV/V3.4-BUG-FIX:
      28062-28477 (28062,28063,28080,28081,28302,28303,28334,28340,28464,28469,28477) ALF-8438 Need higher level audit of user actions

git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@28481 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Alan Davis
2011-06-20 12:42:10 +00:00
parent 1714397cac
commit 29f7f5d073
81 changed files with 3808 additions and 418 deletions

View File

@@ -19,7 +19,6 @@
package org.alfresco.repo.node.index;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
@@ -319,6 +318,7 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
}
private static final int MAX_TRANSACTIONS_PER_ITERATION = 1000;
private static final long MIN_SAMPLE_TIME = 10000L;
private void performFullRecovery()
{
RetryingTransactionCallback<Void> deleteWork = new RetryingTransactionCallback<Void>()
@@ -346,11 +346,19 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
// count the transactions
int processedCount = 0;
long fromTimeInclusive = Long.MIN_VALUE;
long toTimeExclusive = Long.MAX_VALUE;
List<Long> lastTxnIds = Collections.<Long>emptyList();
long fromTimeInclusive = nodeDAO.getMinTxnCommitTime();
long maxToTimeExclusive = nodeDAO.getMaxTxnCommitTime() + 1;
// Our first sample will be 10 seconds long (as we often hit 'fake' transactions with time zero). We'll rebalance intervals from there...
long toTimeExclusive = fromTimeInclusive + MIN_SAMPLE_TIME;
long sampleStartTimeInclusive = fromTimeInclusive;
long sampleEndTimeExclusive = -1;
long txnsPerSample = 0;
List<Long> lastTxnIds = new ArrayList<Long>(MAX_TRANSACTIONS_PER_ITERATION);
while(true)
{
{
boolean startedSampleForQuery = false;
List<Transaction> nextTxns = nodeDAO.getTxnsByCommitTimeAscending(
fromTimeInclusive,
toTimeExclusive,
@@ -358,7 +366,16 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
lastTxnIds,
false);
lastTxnIds = new ArrayList<Long>(nextTxns.size());
// have we finished?
if (nextTxns.size() == 0)
{
if (toTimeExclusive >= maxToTimeExclusive)
{
// there are no more
break;
}
}
// reindex each transaction
List<Long> txnIdBuffer = new ArrayList<Long>(maxTransactionsPerLuceneCommit);
Iterator<Transaction> txnIterator = nextTxns.iterator();
@@ -366,8 +383,27 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
{
Transaction txn = txnIterator.next();
Long txnId = txn.getId();
// Keep it to ensure we exclude it from the next iteration
// Remember the IDs of the last simultaneous transactions so they can be excluded from the next query
long txnCommitTime = txn.getCommitTimeMs();
if (lastTxnIds.isEmpty() || txnCommitTime != fromTimeInclusive)
{
if (!startedSampleForQuery)
{
sampleStartTimeInclusive = txnCommitTime;
sampleEndTimeExclusive = -1;
txnsPerSample = 0;
startedSampleForQuery = true;
}
else
{
txnsPerSample += lastTxnIds.size();
sampleEndTimeExclusive = txnCommitTime;
}
lastTxnIds.clear();
fromTimeInclusive = txnCommitTime;
}
lastTxnIds.add(txnId);
// check if we have to terminate
if (isShuttingDown())
{
@@ -399,11 +435,8 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
// Clear the buffer
txnIdBuffer = new ArrayList<Long>(maxTransactionsPerLuceneCommit);
}
}
// Although we use the same time as this transaction for the next iteration, we also
// make use of the exclusion list to ensure that it doesn't get pulled back again.
fromTimeInclusive = txn.getCommitTimeMs();
}
// dump a progress report every 10% of the way
double before = (double) processedCount / (double) txnCount * 10.0; // 0 - 10
processedCount++;
@@ -419,12 +452,31 @@ public class FullIndexRecoveryComponent extends AbstractReindexComponent
// Wait for the asynchronous process to catch up
waitForAsynchronousReindexing();
// have we finished?
if (nextTxns.size() == 0)
// Move the start marker on and extend the sample time if we have completed results
if (nextTxns.size() < MAX_TRANSACTIONS_PER_ITERATION)
{
// there are no more
break;
// Move past the query end
if (!lastTxnIds.isEmpty())
{
txnsPerSample += lastTxnIds.size();
lastTxnIds.clear();
}
fromTimeInclusive = toTimeExclusive;
sampleEndTimeExclusive = toTimeExclusive;
}
// Move the end marker on based on the current transaction rate
long sampleTime;
if (txnsPerSample == 0)
{
sampleTime = MIN_SAMPLE_TIME;
}
else
{
sampleTime = Math.max(MIN_SAMPLE_TIME, MAX_TRANSACTIONS_PER_ITERATION
* (sampleEndTimeExclusive - sampleStartTimeInclusive) / txnsPerSample);
}
toTimeExclusive = fromTimeInclusive + sampleTime;
}
// done
String msgDone = I18NUtil.getMessage(MSG_RECOVERY_COMPLETE);