Merged V3.1 to HEAD

14326: Fix ETHREEOH-2091 - version migrator should not skip subsequent nodes (after error), also add unit tests
    14420: Fix ETHREEOH-1832 - VersionMigrator optimisations (disable indexing of version2Store, if first migration then don't check if already migrated)


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@14961 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Jan Vonka
2009-06-26 13:26:49 +00:00
parent 6763cfabc1
commit b527d6d66c
4 changed files with 274 additions and 67 deletions

View File

@@ -476,6 +476,9 @@
<entry key="workspace://lightWeightVersionStore"> <entry key="workspace://lightWeightVersionStore">
<ref bean="admLuceneUnIndexedIndexerAndSearcherFactory"></ref> <ref bean="admLuceneUnIndexedIndexerAndSearcherFactory"></ref>
</entry> </entry>
<entry key="workspace://version2Store">
<ref bean="admLuceneUnIndexedIndexerAndSearcherFactory"></ref>
</entry>
</map> </map>
</property> </property>
</bean> </bean>

View File

@@ -9,7 +9,7 @@ version_service.err_revert_mismatch=The version provided to revert to does not c
version_service.migration.patch.noop=Nothing to do (no version histories found in old version store) version_service.migration.patch.noop=Nothing to do (no version histories found in old version store)
version_service.migration.patch.complete=Completed migration of {0} old version histories (to new version store) in {1} secs version_service.migration.patch.complete=Completed migration of {0} (out of {1}) old version histories (to new version store) in {2} secs
version_service.migration.patch.warn.skip1=Skipped migration of {0} old version histories (migrate failed) version_service.migration.patch.warn.skip1=Skipped migration of {0} old version histories (migrate failed)
version_service.migration.patch.warn.skip2=Skipped migration of {0} old version histories (already migrated) version_service.migration.patch.warn.skip2=Skipped migration of {0} old version histories (already migrated)

View File

@@ -302,7 +302,9 @@ public class VersionMigrator
public int migrateVersions(final int batchSize, final boolean deleteImmediately) public int migrateVersions(final int batchSize, final boolean deleteImmediately)
{ {
final NodeRef oldRootNodeRef = dbNodeService.getRootNode(VersionMigrator.VERSION_STORE_REF_OLD); final NodeRef oldRootNodeRef = dbNodeService.getRootNode(VersionMigrator.VERSION_STORE_REF_OLD);
final NodeRef newRootNodeRef = dbNodeService.getRootNode(VersionMigrator.VERSION_STORE_REF_NEW);
long splitTime = System.currentTimeMillis();
final List<ChildAssociationRef> childAssocRefs = getVersionHistories(oldRootNodeRef); final List<ChildAssociationRef> childAssocRefs = getVersionHistories(oldRootNodeRef);
int toDo = childAssocRefs.size(); int toDo = childAssocRefs.size();
@@ -313,9 +315,21 @@ public class VersionMigrator
return 0; return 0;
} }
if (logger.isDebugEnabled()) if (logger.isInfoEnabled())
{ {
logger.debug("Found "+toDo+" version histories in old version store"); logger.info("Found "+childAssocRefs.size()+" version histories in old version store (in "+((System.currentTimeMillis()-splitTime)/1000)+" secs)");
}
splitTime = System.currentTimeMillis();
final List<ChildAssociationRef> newChildAssocRefs = getVersionHistories(newRootNodeRef);
final boolean firstMigration = (newChildAssocRefs.size() == 0);
if (logger.isInfoEnabled())
{
if (! firstMigration)
{
logger.warn("This is not the first migration attempt. Found "+newChildAssocRefs.size()+" version histories in new version store (in "+((System.currentTimeMillis()-splitTime)/1000)+" secs)");
}
} }
// note: assumes patch runs before cleanup starts // note: assumes patch runs before cleanup starts
@@ -333,7 +347,6 @@ public class VersionMigrator
try try
{ {
int batchCount = 0;
int totalCount = 0; int totalCount = 0;
final List<NodeRef> tmpBatch = new ArrayList<NodeRef>(batchSize); final List<NodeRef> tmpBatch = new ArrayList<NodeRef>(batchSize);
@@ -343,80 +356,89 @@ public class VersionMigrator
reportProgress(MSG_PATCH_PROGRESS, toDo, totalCount); reportProgress(MSG_PATCH_PROGRESS, toDo, totalCount);
totalCount++; totalCount++;
if (((String)dbNodeService.getProperty(childAssocRef.getChildRef(), ContentModel.PROP_NAME)).startsWith(VersionMigrator.PREFIX_MIGRATED)) // short-cut if first migration
if (!firstMigration)
{ {
// skip - already migrated if (isMigrated(childAssocRef))
alreadyMigratedCount++; {
continue; // skip - already migrated
alreadyMigratedCount++;
continue;
}
} }
if (batchCount < batchSize) if (tmpBatch.size() < batchSize)
{ {
tmpBatch.add(childAssocRef.getChildRef()); tmpBatch.add(childAssocRef.getChildRef());
batchCount++;
} }
if ((batchCount == batchSize) || (totalCount == childAssocRefs.size())) if ((tmpBatch.size() == batchSize) || (totalCount == childAssocRefs.size()))
{ {
while (tmpBatch.size() != 0) while (tmpBatch.size() != 0)
{ {
txHelper.setMaxRetries(1); txHelper.setMaxRetries(1);
NodeRef failed = txHelper.doInTransaction(new RetryingTransactionCallback<NodeRef>() try
{ {
public NodeRef execute() throws Throwable txHelper.doInTransaction(new RetryingTransactionCallback<NodeRef>()
{ {
if (logger.isTraceEnabled()) public NodeRef execute() throws Throwable
{ {
logger.trace("Attempt to migrate batch of "+tmpBatch.size()+" version histories"); if (logger.isTraceEnabled())
}
long startTime = System.currentTimeMillis();
for (NodeRef oldVHNodeRef : tmpBatch)
{
try
{ {
NodeRef versionedNodeRef = v1GetVersionedNodeRef(oldVHNodeRef); logger.trace("Attempt to migrate batch of "+tmpBatch.size()+" version histories");
migrateVersionHistory(oldVHNodeRef, versionedNodeRef); }
if (deleteImmediately) long startTime = System.currentTimeMillis();
for (NodeRef oldVHNodeRef : tmpBatch)
{
try
{ {
// delete old version history node NodeRef versionedNodeRef = v1GetVersionedNodeRef(oldVHNodeRef);
v1DeleteVersionHistory(oldVHNodeRef); migrateVersionHistory(oldVHNodeRef, versionedNodeRef);
if (deleteImmediately)
{
// delete old version history node
v1DeleteVersionHistory(oldVHNodeRef);
}
else
{
// mark old version history node for later cleanup
v1MarkVersionHistory(oldVHNodeRef);
}
} }
else catch (Throwable t)
{ {
// mark old version history node for later cleanup logger.error("Skipping migration of: " + oldVHNodeRef, t);
v1MarkVersionHistory(oldVHNodeRef); throw t;
} }
} }
catch (Throwable t)
if (logger.isDebugEnabled())
{ {
logger.error("Skipping migration of: " + oldVHNodeRef, t); logger.debug("Migrated batch of "+tmpBatch.size()+" version histories in "+(System.currentTimeMillis()-startTime)+ " ms");
return oldVHNodeRef;
} }
return null;
} }
}, false, true);
if (logger.isDebugEnabled()) // batch successful
{
logger.debug("Migrated batch of "+tmpBatch.size()+" version histories in "+(System.currentTimeMillis()-startTime)+ " ms");
}
return null;
}
}, false, true);
if (failed != null)
{
tmpBatch.remove(failed); // retry batch without the failed node
failCount++;
}
else
{
vhCount = vhCount + tmpBatch.size(); vhCount = vhCount + tmpBatch.size();
tmpBatch.clear(); tmpBatch.clear();
batchCount = 0; }
catch (Throwable t)
{
// TODO if batchSize > 1 then could switch into batchSize=1 mode, and re-try one-by-one
// in theory, could fail on commit (although integrity checks are disabled by default) hence don't know which nodes failed
logger.error("Skipping migration of batch size ("+tmpBatch.size()+"): "+t);
// batch failed
failCount = failCount + tmpBatch.size();
tmpBatch.clear();
} }
} }
} }
@@ -437,7 +459,16 @@ public class VersionMigrator
logger.warn(I18NUtil.getMessage(MSG_PATCH_SKIP2, alreadyMigratedCount)); logger.warn(I18NUtil.getMessage(MSG_PATCH_SKIP2, alreadyMigratedCount));
} }
logger.info(I18NUtil.getMessage(MSG_PATCH_COMPLETE, vhCount, ((System.currentTimeMillis()-startTime)/1000))); toDo = toDo - alreadyMigratedCount;
if (vhCount != toDo)
{
logger.warn(I18NUtil.getMessage(MSG_PATCH_COMPLETE, vhCount, toDo, ((System.currentTimeMillis()-startTime)/1000)));
}
else
{
logger.info(I18NUtil.getMessage(MSG_PATCH_COMPLETE, vhCount, toDo, ((System.currentTimeMillis()-startTime)/1000)));
}
return vhCount; return vhCount;
} }
@@ -492,7 +523,7 @@ public class VersionMigrator
reportProgress(MSG_DELETE_PROGRESS, toDo, totalCount); reportProgress(MSG_DELETE_PROGRESS, toDo, totalCount);
totalCount++; totalCount++;
if (((String)dbNodeService.getProperty(childAssocRef.getChildRef(), ContentModel.PROP_NAME)).startsWith(VersionMigrator.PREFIX_MIGRATED)) if (isMigrated(childAssocRef))
{ {
if (batchCount < batchSize) if (batchCount < batchSize)
{ {
@@ -596,6 +627,11 @@ public class VersionMigrator
} }
} }
protected boolean isMigrated(ChildAssociationRef vhChildAssocRef)
{
return (((String)dbNodeService.getProperty(vhChildAssocRef.getChildRef(), ContentModel.PROP_NAME)).startsWith(VersionMigrator.PREFIX_MIGRATED));
}
/** /**
* Support to report % completion and estimated completion time. * Support to report % completion and estimated completion time.
* *

View File

@@ -31,7 +31,10 @@ import java.util.Map;
import java.util.Set; import java.util.Set;
import org.alfresco.model.ContentModel; import org.alfresco.model.ContentModel;
import org.alfresco.repo.node.integrity.IntegrityChecker;
import org.alfresco.repo.policy.PolicyComponent; import org.alfresco.repo.policy.PolicyComponent;
import org.alfresco.repo.transaction.RetryingTransactionHelper;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.repo.version.common.counter.VersionCounterService; import org.alfresco.repo.version.common.counter.VersionCounterService;
import org.alfresco.service.cmr.coci.CheckOutCheckInService; import org.alfresco.service.cmr.coci.CheckOutCheckInService;
import org.alfresco.service.cmr.dictionary.DictionaryService; import org.alfresco.service.cmr.dictionary.DictionaryService;
@@ -41,6 +44,7 @@ import org.alfresco.service.cmr.repository.NodeService;
import org.alfresco.service.cmr.repository.StoreRef; import org.alfresco.service.cmr.repository.StoreRef;
import org.alfresco.service.cmr.version.Version; import org.alfresco.service.cmr.version.Version;
import org.alfresco.service.cmr.version.VersionHistory; import org.alfresco.service.cmr.version.VersionHistory;
import org.alfresco.service.namespace.NamespaceService;
import org.alfresco.service.namespace.QName; import org.alfresco.service.namespace.QName;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@@ -62,6 +66,7 @@ public class VersionMigratorTest extends BaseVersionStoreTest
protected DictionaryService dictionaryService; protected DictionaryService dictionaryService;
protected CheckOutCheckInService cociService; protected CheckOutCheckInService cociService;
protected VersionCounterService versionCounterService; protected VersionCounterService versionCounterService;
protected IntegrityChecker integrityChecker;
public VersionMigratorTest() public VersionMigratorTest()
{ {
@@ -81,6 +86,8 @@ public class VersionMigratorTest extends BaseVersionStoreTest
this.cociService = (CheckOutCheckInService)applicationContext.getBean("CheckoutCheckinService"); this.cociService = (CheckOutCheckInService)applicationContext.getBean("CheckoutCheckinService");
this.versionCounterService = (VersionCounterService)applicationContext.getBean("versionCounterService"); this.versionCounterService = (VersionCounterService)applicationContext.getBean("versionCounterService");
this.integrityChecker = (IntegrityChecker)applicationContext.getBean("integrityChecker");
// Version1Service is used to create the version nodes in Version1Store (workspace://lightWeightVersionStore) // Version1Service is used to create the version nodes in Version1Store (workspace://lightWeightVersionStore)
version1Service.setDbNodeService(dbNodeService); version1Service.setDbNodeService(dbNodeService);
version1Service.setNodeService(dbNodeService); version1Service.setNodeService(dbNodeService);
@@ -125,7 +132,7 @@ public class VersionMigratorTest extends BaseVersionStoreTest
Map<QName, Serializable> oldVersionProps = versionNodeService.getProperties(oldVersion.getFrozenStateNodeRef()); Map<QName, Serializable> oldVersionProps = versionNodeService.getProperties(oldVersion.getFrozenStateNodeRef());
logger.info("oldVersion props: " + oldVersion); logger.info("oldVersion props: " + oldVersion);
logger.info("oldVersion created: " + oldVersion.getCreatedDate() + " [" + oldVersion.getCreatedDate().getTime()+"]"); logger.info("oldVersion created: " + oldVersion.getFrozenModifiedDate() + " [" + oldVersion.getFrozenModifiedDate().getTime()+"]");
logger.info("oldVersion props via versionNodeService: " + oldVersionProps); logger.info("oldVersion props via versionNodeService: " + oldVersionProps);
@@ -147,7 +154,7 @@ public class VersionMigratorTest extends BaseVersionStoreTest
Version newVersion = vh2.getRootVersion(); Version newVersion = vh2.getRootVersion();
logger.info("newVersion props: " + newVersion); logger.info("newVersion props: " + newVersion);
logger.info("newVersion created: " + newVersion.getCreatedDate() + " [" + newVersion.getCreatedDate().getTime()+"]"); logger.info("newVersion created: " + newVersion.getFrozenModifiedDate() + " [" + newVersion.getFrozenModifiedDate().getTime()+"]");
// check new version - switch to new version service to do the check // check new version - switch to new version service to do the check
super.setVersionService(version2Service); super.setVersionService(version2Service);
@@ -260,6 +267,167 @@ public class VersionMigratorTest extends BaseVersionStoreTest
logger.info("testMigrateMultipleVersions: Migrated from oldVHNodeRef = " + oldVHNodeRef + " to newVHNodeRef = " + newVHNodeRef); logger.info("testMigrateMultipleVersions: Migrated from oldVHNodeRef = " + oldVHNodeRef + " to newVHNodeRef = " + newVHNodeRef);
} }
public void testMigrateMultipleNodesSuccessful() throws Exception
{
testMigrateMultipleNodes(false);
}
public void test_ETHREEOH_2091() throws Exception
{
// test partial migration (with skipped nodes)
testMigrateMultipleNodes(true);
}
/**
* Test migration of a multiple nodes (each with one version)
*/
private void testMigrateMultipleNodes(final boolean withSkip)
{
if (version2Service.useDeprecatedV1 == true)
{
logger.info("testMigrateOneVersion: skip");
return;
}
final int nodeCount = 5;
assert(nodeCount > 3);
final NodeRef[] versionableNodes = new NodeRef[nodeCount];
setComplete();
endTransaction();
RetryingTransactionHelper txHelper = transactionService.getRetryingTransactionHelper();
for (int i = 0; i < nodeCount; i++)
{
final int idx = i;
txHelper.doInTransaction(new RetryingTransactionCallback<NodeRef>()
{
public NodeRef execute() throws Throwable
{
NodeRef versionableNode = null;
if ((idx % 2) == 0)
{
versionableNode = createNewVersionableNode();
}
else
{
versionableNode = createNewVersionableContentNode(true);
}
createVersion(versionableNode);
versionableNodes[idx] = versionableNode;
return null;
}
});
}
setComplete();
endTransaction();
txHelper.doInTransaction(new RetryingTransactionCallback<NodeRef>()
{
public NodeRef execute() throws Throwable
{
// check old version histories
for (int i = 0; i< nodeCount; i++)
{
VersionHistory vh1 = version1Service.getVersionHistory(versionableNodes[i]);
assertNotNull(vh1);
assertEquals(1, vh1.getAllVersions().size());
}
return null;
}
});
setComplete();
endTransaction();
if (withSkip)
{
// remove test model - those nodes should fail - currently all - add separate create ...
// TODO ...
dictionaryDAO.removeModel(QName.createQName("http://www.alfresco.org/test/versionstorebasetest/1.0", "versionstorebasetestmodel"));
}
txHelper = transactionService.getRetryingTransactionHelper();
txHelper.doInTransaction(new RetryingTransactionCallback<NodeRef>()
{
public NodeRef execute() throws Throwable
{
// Migrate (and don't delete old version history) !
versionMigrator.migrateVersions(1, false);
return null;
}
});
setComplete();
endTransaction();
txHelper.doInTransaction(new RetryingTransactionCallback<NodeRef>()
{
public NodeRef execute() throws Throwable
{
// check new version histories
for (int i = 0; i < nodeCount; i++)
{
VersionHistory vh2 = version2Service.getVersionHistory(versionableNodes[i]);
if (withSkip && ((i % 2) == 0))
{
assertNull(vh2);
}
else
{
assertNotNull(vh2);
assertEquals(1, vh2.getAllVersions().size());
}
}
return null;
}
});
}
private NodeRef createNewVersionableContentNode(boolean versionable)
{
// Use this map to retrieve the versionable nodes in later tests
this.versionableNodes = new HashMap<String, NodeRef>();
// Create node (this node has some content)
NodeRef nodeRef = this.dbNodeService.createNode(
rootNodeRef,
ContentModel.ASSOC_CHILDREN,
QName.createQName(NamespaceService.CONTENT_MODEL_1_0_URI, "myNode"),
ContentModel.TYPE_CONTENT,
this.nodeProperties).getChildRef();
if (versionable)
{
this.dbNodeService.addAspect(nodeRef, ContentModel.ASPECT_VERSIONABLE, new HashMap<QName, Serializable>());
}
assertNotNull(nodeRef);
this.versionableNodes.put(nodeRef.getId(), nodeRef);
// Add the content to the node
ContentWriter contentWriter = this.contentService.getWriter(nodeRef, ContentModel.PROP_CONTENT, true);
contentWriter.putContent(TEST_CONTENT);
// Set author
Map<QName, Serializable> authorProps = new HashMap<QName, Serializable>(1, 1.0f);
authorProps.put(ContentModel.PROP_AUTHOR, "Charles Dickens");
this.dbNodeService.addAspect(nodeRef, ContentModel.ASPECT_AUTHOR, authorProps);
return nodeRef;
}
public void test_ETHREEOH_1540() throws Exception public void test_ETHREEOH_1540() throws Exception
{ {
// Create the node used for tests // Create the node used for tests