diff --git a/config/alfresco/content-services-context.xml b/config/alfresco/content-services-context.xml
index f9c73f1b76..744ceda35b 100644
--- a/config/alfresco/content-services-context.xml
+++ b/config/alfresco/content-services-context.xml
@@ -299,6 +299,17 @@
+
+
+
+
+
+
+
+ parent="baseComplexContentTransformer" >
@@ -477,12 +488,14 @@
-
+
+ parent="baseComplexContentTransformer">
-
+
@@ -495,7 +508,7 @@
+ parent="baseComplexContentTransformer" >
@@ -535,7 +548,7 @@
+ parent="baseComplexContentTransformer" >
@@ -565,7 +578,7 @@
class="org.alfresco.repo.content.transform.PoiContentTransformer"
parent="baseContentTransformer" />
-
+
@@ -692,7 +705,7 @@
+ parent="baseComplexContentTransformer" >
diff --git a/config/alfresco/mt/mt-contentstore-context.xml b/config/alfresco/mt/mt-contentstore-context.xml
index eee37fd75e..9362c1ca1d 100644
--- a/config/alfresco/mt/mt-contentstore-context.xml
+++ b/config/alfresco/mt/mt-contentstore-context.xml
@@ -8,7 +8,7 @@
-
+
diff --git a/config/alfresco/subsystems/Synchronization/default/default-synchronization-context.xml b/config/alfresco/subsystems/Synchronization/default/default-synchronization-context.xml
index 9272d63016..951ec57d8e 100644
--- a/config/alfresco/subsystems/Synchronization/default/default-synchronization-context.xml
+++ b/config/alfresco/subsystems/Synchronization/default/default-synchronization-context.xml
@@ -74,6 +74,9 @@
${synchronization.workerThreads}
+
+ ${synchronization.allowDeletions}
+
diff --git a/config/alfresco/subsystems/Synchronization/default/default-synchronization.properties b/config/alfresco/subsystems/Synchronization/default/default-synchronization.properties
index 36d8417e29..f73e71c637 100644
--- a/config/alfresco/subsystems/Synchronization/default/default-synchronization.properties
+++ b/config/alfresco/subsystems/Synchronization/default/default-synchronization.properties
@@ -27,4 +27,7 @@ synchronization.autoCreatePeopleOnLogin=true
synchronization.loggingInterval=100
# The number of threads to use when doing a batch (scheduled or startup) sync
-synchronization.workerThreads=2
\ No newline at end of file
+synchronization.workerThreads=2
+
+# Synchronization with deletions
+synchronization.allowDeletions=true
\ No newline at end of file
diff --git a/config/alfresco/swf-transform-context.xml b/config/alfresco/swf-transform-context.xml
index 30ea5141bd..2097bcb09a 100644
--- a/config/alfresco/swf-transform-context.xml
+++ b/config/alfresco/swf-transform-context.xml
@@ -28,7 +28,7 @@
+ parent="baseComplexContentTransformer" >
@@ -84,7 +84,7 @@
+ parent="baseComplexContentTransformer" >
@@ -100,7 +100,7 @@
+ parent="baseComplexContentTransformer" >
@@ -123,7 +123,7 @@
+ parent="baseComplexContentTransformer" >
@@ -147,7 +147,7 @@
+ parent="baseComplexContentTransformer" >
diff --git a/source/meta-inf/bm-remote-manifest.mf b/source/META-INF/bm-remote-manifest.mf
similarity index 100%
rename from source/meta-inf/bm-remote-manifest.mf
rename to source/META-INF/bm-remote-manifest.mf
diff --git a/source/meta-inf/mmt-manifest.mf b/source/META-INF/mmt-manifest.mf
similarity index 100%
rename from source/meta-inf/mmt-manifest.mf
rename to source/META-INF/mmt-manifest.mf
diff --git a/source/java/org/alfresco/filesys/auth/cifs/EnterpriseCifsAuthenticator.java b/source/java/org/alfresco/filesys/auth/cifs/EnterpriseCifsAuthenticator.java
index d1adc9a7c8..0236119e67 100644
--- a/source/java/org/alfresco/filesys/auth/cifs/EnterpriseCifsAuthenticator.java
+++ b/source/java/org/alfresco/filesys/auth/cifs/EnterpriseCifsAuthenticator.java
@@ -609,7 +609,7 @@ public class EnterpriseCifsAuthenticator extends CifsAuthenticatorBase implement
{
return Capability.Unicode + Capability.RemoteAPIs + Capability.NTSMBs + Capability.NTFind +
Capability.NTStatus + Capability.LargeFiles + Capability.LargeRead + Capability.LargeWrite +
- Capability.ExtendedSecurity;
+ Capability.ExtendedSecurity + Capability.InfoPassthru + Capability.Level2Oplocks;
}
/**
diff --git a/source/java/org/alfresco/filesys/repo/CacheLookupSearchContext.java b/source/java/org/alfresco/filesys/repo/CacheLookupSearchContext.java
index af0bbfed6d..24b89c5db6 100644
--- a/source/java/org/alfresco/filesys/repo/CacheLookupSearchContext.java
+++ b/source/java/org/alfresco/filesys/repo/CacheLookupSearchContext.java
@@ -104,8 +104,11 @@ public class CacheLookupSearchContext extends DotDotContentSearchContext {
if ( fstate.hasModifyDateTime())
info.setModifyDateTime( fstate.getModifyDateTime());
- // File allocation size
+ // File used/allocation size
+ if ( fstate.hasFileSize())
+ info.setFileSize( fstate.getFileSize());
+
if ( fstate.hasAllocationSize() && fstate.getAllocationSize() > info.getSize())
info.setAllocationSize( fstate.getAllocationSize());
diff --git a/source/java/org/alfresco/filesys/repo/ContentDiskDriver.java b/source/java/org/alfresco/filesys/repo/ContentDiskDriver.java
index 03400ef54b..965b0ba7f7 100644
--- a/source/java/org/alfresco/filesys/repo/ContentDiskDriver.java
+++ b/source/java/org/alfresco/filesys/repo/ContentDiskDriver.java
@@ -1353,8 +1353,12 @@ public class ContentDiskDriver extends AlfrescoTxDiskDriver implements DiskInter
}
}
}
- else
- searchCtx = new ContentSearchContext(cifsHelper, results, searchFileSpec, pseudoList, paths[0]);
+ else {
+ if ( ctx.hasStateCache())
+ searchCtx = new CacheLookupSearchContext(cifsHelper, results, searchFileSpec, pseudoList, paths[0], ctx.getStateCache());
+ else
+ searchCtx = new ContentSearchContext(cifsHelper, results, searchFileSpec, pseudoList, paths[0]);
+ }
// Debug
diff --git a/source/java/org/alfresco/filesys/repo/ContentNetworkFile.java b/source/java/org/alfresco/filesys/repo/ContentNetworkFile.java
index 602c559700..8a1bf5e0dc 100644
--- a/source/java/org/alfresco/filesys/repo/ContentNetworkFile.java
+++ b/source/java/org/alfresco/filesys/repo/ContentNetworkFile.java
@@ -661,10 +661,12 @@ public class ContentNetworkFile extends NodeRefNetworkFile
setFileSize(channel.size());
- // Update the modification date/time
+ // Update the modification date/time and live file size
- if ( getFileState() != null)
+ if ( getFileState() != null) {
getFileState().updateModifyDateTime();
+ getFileState().setFileSize( getFileSize());
+ }
// DEBUG
diff --git a/source/java/org/alfresco/repo/content/ContentServiceImpl.java b/source/java/org/alfresco/repo/content/ContentServiceImpl.java
index 2d8763e59d..b537e192a8 100644
--- a/source/java/org/alfresco/repo/content/ContentServiceImpl.java
+++ b/source/java/org/alfresco/repo/content/ContentServiceImpl.java
@@ -738,6 +738,15 @@ public class ContentServiceImpl implements ContentService, ApplicationContextAwa
* @see org.alfresco.service.cmr.repository.ContentService#getTransformer(String, java.lang.String, long, java.lang.String, org.alfresco.service.cmr.repository.TransformationOptions)
*/
public ContentTransformer getTransformer(String sourceUrl, String sourceMimetype, long sourceSize, String targetMimetype, TransformationOptions options)
+ {
+ List transformers = getTransformers(sourceUrl, sourceMimetype, sourceSize, targetMimetype, options);
+ return (transformers == null) ? null : transformers.get(0);
+ }
+
+ /**
+ * @see org.alfresco.service.cmr.repository.ContentService#getTransformers(String, java.lang.String, long, java.lang.String, org.alfresco.service.cmr.repository.TransformationOptions)
+ */
+ public List getTransformers(String sourceUrl, String sourceMimetype, long sourceSize, String targetMimetype, TransformationOptions options)
{
try
{
@@ -745,7 +754,7 @@ public class ContentServiceImpl implements ContentService, ApplicationContextAwa
transformerDebug.pushAvailable(sourceUrl, sourceMimetype, targetMimetype, options);
List transformers = getActiveTransformers(sourceMimetype, sourceSize, targetMimetype, options);
transformerDebug.availableTransformers(transformers, sourceSize, "ContentService.getTransformer(...)");
- return (transformers.isEmpty()) ? null : transformers.get(0);
+ return transformers.isEmpty() ? null : transformers;
}
finally
{
diff --git a/source/java/org/alfresco/repo/content/replication/ReplicatingContentStore.java b/source/java/org/alfresco/repo/content/replication/ReplicatingContentStore.java
index dde3eae186..7f3d7a1e8d 100644
--- a/source/java/org/alfresco/repo/content/replication/ReplicatingContentStore.java
+++ b/source/java/org/alfresco/repo/content/replication/ReplicatingContentStore.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005-2010 Alfresco Software Limited.
+ * Copyright (C) 2005-2012 Alfresco Software Limited.
*
* This file is part of Alfresco
*
@@ -213,6 +213,15 @@ public class ReplicatingContentStore extends AbstractContentStore
{
return primaryStore.isContentUrlSupported(contentUrl);
}
+
+ /**
+ * @return Return the primary store root location
+ */
+ @Override
+ public String getRootLocation()
+ {
+ return primaryStore.getRootLocation();
+ }
/**
* Forwards the call directly to the first store in the list of stores.
diff --git a/source/java/org/alfresco/repo/content/transform/AbstractContentTransformerLimits.java b/source/java/org/alfresco/repo/content/transform/AbstractContentTransformerLimits.java
index ea441098cd..f9531c8651 100644
--- a/source/java/org/alfresco/repo/content/transform/AbstractContentTransformerLimits.java
+++ b/source/java/org/alfresco/repo/content/transform/AbstractContentTransformerLimits.java
@@ -62,7 +62,7 @@ public abstract class AbstractContentTransformerLimits extends ContentTransforme
* Indicates if 'page' limits are supported.
* @return false by default.
*/
- protected boolean isPageLimitSupported()
+ protected boolean isPageLimitSupported(String sourceMimetype, String targetMimetype, TransformationOptions options)
{
return pageLimitsSupported;
}
@@ -98,6 +98,10 @@ public abstract class AbstractContentTransformerLimits extends ContentTransforme
@Override
public boolean isTransformable(String sourceMimetype, long sourceSize, String targetMimetype, TransformationOptions options)
{
+ // To make TransformerDebug output clearer, check the mimetypes and then the sizes.
+ // If not done, 'unavailable' transformers due to size might be reported even
+ // though they cannot transform the source to the target mimetype.
+
return
isTransformableMimetype(sourceMimetype, targetMimetype, options) &&
isTransformableSize(sourceMimetype, sourceSize, targetMimetype, options);
@@ -152,7 +156,7 @@ public abstract class AbstractContentTransformerLimits extends ContentTransforme
// of icons. Note the readLimitKBytes value is not checked as the combined limits
// only have the max or limit KBytes value set (the smaller value is returned).
TransformationOptionLimits limits = getLimits(sourceMimetype, targetMimetype, options);
- if (!isPageLimitSupported() || limits.getPageLimit() <= 0)
+ if (!isPageLimitSupported(sourceMimetype, targetMimetype, options) || limits.getPageLimit() <= 0)
{
maxSourceSizeKBytes = limits.getMaxSourceSizeKBytes();
}
diff --git a/source/java/org/alfresco/repo/content/transform/ComplexContentTransformer.java b/source/java/org/alfresco/repo/content/transform/ComplexContentTransformer.java
index aedde22ce3..6ffc4e7273 100644
--- a/source/java/org/alfresco/repo/content/transform/ComplexContentTransformer.java
+++ b/source/java/org/alfresco/repo/content/transform/ComplexContentTransformer.java
@@ -22,7 +22,10 @@ import java.beans.PropertyDescriptor;
import java.io.File;
import java.io.Serializable;
import java.lang.reflect.InvocationTargetException;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
import java.util.Collections;
+import java.util.Deque;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -32,6 +35,7 @@ import javax.faces.el.MethodNotFoundException;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.repo.content.filestore.FileContentWriter;
import org.alfresco.service.cmr.repository.ContentReader;
+import org.alfresco.service.cmr.repository.ContentService;
import org.alfresco.service.cmr.repository.ContentWriter;
import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.cmr.repository.TransformationOptionLimits;
@@ -57,20 +61,37 @@ public class ComplexContentTransformer extends AbstractContentTransformer2 imple
*/
private static Log logger = LogFactory.getLog(ComplexContentTransformer.class);
+ /**
+ * Complex transformers contain lower level transformers. In order to find dynamic
+ * (defined as null) child transformers to use, they recursively check available
+ * transformers. It makes no sense to have a transformer that is its own child.
+ */
+ static final ThreadLocal> parentTransformers = new ThreadLocal>() {
+ @Override
+ protected Deque initialValue() {
+ return new ArrayDeque();
+ }
+ };
+
private List transformers;
private List intermediateMimetypes;
private Map transformationOptionOverrides;
-
+ private ContentService contentService;
+
public ComplexContentTransformer()
{
}
/**
- * The list of transformers to use.
+ * The list of transformers to use. If any element is null
+ * all possible transformers will be considered. If any element
+ * is null, the contentService property must be set.
*
* If a single transformer is supplied, then it will still be used.
*
* @param transformers list of at least one transformer
+ *
+ * @see #setContentService(ContentService)
*/
public void setTransformers(List transformers)
{
@@ -107,6 +128,16 @@ public class ComplexContentTransformer extends AbstractContentTransformer2 imple
this.transformationOptionOverrides = transformationOptionOverrides;
}
+ /**
+ * Sets the ContentService. Only required if {@code null} transformers
+ * are provided to {@link #setTransformers(List).
+ * @param contentService
+ */
+ public void setContentService(ContentService contentService)
+ {
+ this.contentService = contentService;
+ }
+
/**
* Ensures that required properties have been set
*/
@@ -125,25 +156,35 @@ public class ComplexContentTransformer extends AbstractContentTransformer2 imple
{
throw new AlfrescoRuntimeException("'mimetypeService' is a required property");
}
+ for (ContentTransformer transformer: transformers)
+ {
+ if (transformer == null)
+ {
+ if (contentService == null)
+ {
+ throw new AlfrescoRuntimeException("'contentService' is a required property if " +
+ "there are any null (dynamic) transformers");
+ }
+ break;
+ }
+ }
}
- /**
- * Overrides this method to avoid calling
- * {@link #isTransformableMimetype(String, String, TransformationOptions)}
- * twice on each transformer in the list, as
- * {@link #isTransformableSize(String, long, String, TransformationOptions)}
- * in this class must check the mimetype too.
- */
@Override
public boolean isTransformable(String sourceMimetype, long sourceSize, String targetMimetype,
TransformationOptions options)
{
+ // Don't allow transformer to be its own child.
+ if (parentTransformers.get().contains(this))
+ {
+ return false;
+ }
+
overrideTransformationOptions(options);
- // To make TransformerDebug output clearer, check the mimetypes and then the sizes.
- // If not done, 'unavailable' transformers due to size might be reported even
- // though they cannot transform the source to the target mimetype.
-
+ // Can use super isTransformableSize as it indirectly calls getLimits in this class
+ // which combines the limits from the first transformer. Other transformer in the chain
+ // are no checked as sizes are unknown.
return
isTransformableMimetype(sourceMimetype, targetMimetype, options) &&
isTransformableSize(sourceMimetype, sourceSize, targetMimetype, options);
@@ -200,73 +241,42 @@ public class ComplexContentTransformer extends AbstractContentTransformer2 imple
@Override
public boolean isTransformableMimetype(String sourceMimetype, String targetMimetype, TransformationOptions options)
- {
- return isTransformableMimetypeAndSize(sourceMimetype, -1, targetMimetype, options);
- }
-
- @Override
- public boolean isTransformableSize(String sourceMimetype, long sourceSize, String targetMimetype, TransformationOptions options)
- {
- return (sourceSize < 0) ||
- super.isTransformableSize(sourceMimetype, sourceSize, targetMimetype, options) &&
- isTransformableMimetypeAndSize(sourceMimetype, sourceSize, targetMimetype, options);
- }
-
- private boolean isTransformableMimetypeAndSize(String sourceMimetype, long sourceSize,
- String targetMimetype, TransformationOptions options)
{
boolean result = true;
String currentSourceMimetype = sourceMimetype;
-
Iterator transformerIterator = transformers.iterator();
Iterator intermediateMimetypeIterator = intermediateMimetypes.iterator();
while (transformerIterator.hasNext())
{
ContentTransformer transformer = transformerIterator.next();
- // determine the target mimetype. This is the final target if we are on the last transformation
- String currentTargetMimetype = null;
- if (!transformerIterator.hasNext())
+
+ // determine the target mimetype. This is the final target if we are on the last transformation
+ String currentTargetMimetype = transformerIterator.hasNext() ? intermediateMimetypeIterator.next() : targetMimetype;
+ if (transformer == null)
{
- currentTargetMimetype = targetMimetype;
- }
- else
- {
- // use an intermediate transformation mimetype
- currentTargetMimetype = intermediateMimetypeIterator.next();
- }
-
- if (sourceSize < 0)
- {
- // check we can transform the current stage's mimetypes
- if (transformer.isTransformableMimetype(currentSourceMimetype, currentTargetMimetype, options) == false)
- {
- result = false;
- break;
- }
- }
- else
- {
- // check we can transform the current stage's sizes
try
{
- transformerDebug.pushIsTransformableSize(this);
- // (using -1 if not the first stage as we can't know the size)
- if (transformer.isTransformableSize(currentSourceMimetype, sourceSize, currentTargetMimetype, options) == false)
+ parentTransformers.get().push(this);
+ @SuppressWarnings("deprecation")
+ List firstTansformers = contentService.getActiveTransformers(
+ currentSourceMimetype, -1, currentTargetMimetype, options);
+ if (firstTansformers.isEmpty())
{
result = false;
break;
}
-
- // As the size is unknown for the next stages stop.
- // In future we might guess sizes such as excl to pdf
- // is about 110% of the original size, in which case
- // we would continue.
- break;
- // sourceSize += sourceSize * 10 / 100;
}
finally
{
- transformerDebug.popIsTransformableSize();
+ parentTransformers.get().pop();
+ }
+ }
+ else
+ {
+ if (transformer.isTransformableMimetype(currentSourceMimetype, currentTargetMimetype, options) == false)
+ {
+ result = false;
+ break;
}
}
@@ -279,30 +289,111 @@ public class ComplexContentTransformer extends AbstractContentTransformer2 imple
/**
* Indicates if 'page' limits are supported by the first transformer in the chain.
+ * If the first transformer is dynamic, all possible first transformers must support it.
* @return true if the first transformer supports them.
*/
- protected boolean isPageLimitSupported()
+ @Override
+ protected boolean isPageLimitSupported(String sourceMimetype, String targetMimetype,
+ TransformationOptions options)
{
- ContentTransformer firstTransformer = transformers.iterator().next();
- return (firstTransformer instanceof AbstractContentTransformerLimits)
- ? ((AbstractContentTransformerLimits)firstTransformer).isPageLimitSupported()
+ boolean pageLimitSupported;
+ ContentTransformer firstTransformer = transformers.get(0);
+ String firstTargetMimetype = intermediateMimetypes.get(0);
+ if (firstTransformer == null)
+ {
+ try
+ {
+ parentTransformers.get().push(this);
+ @SuppressWarnings("deprecation")
+ List firstTansformers = contentService.getActiveTransformers(
+ sourceMimetype, -1, firstTargetMimetype, options);
+ pageLimitSupported = !firstTansformers.isEmpty();
+ if (pageLimitSupported)
+ {
+ for (ContentTransformer transformer: firstTansformers)
+ {
+ if (!isPageLimitSupported(transformer, sourceMimetype, targetMimetype, options))
+ {
+ pageLimitSupported = false;
+ break;
+ }
+ }
+ }
+ }
+ finally
+ {
+ parentTransformers.get().pop();
+ }
+ }
+ else
+ {
+ pageLimitSupported = isPageLimitSupported(firstTransformer, sourceMimetype, targetMimetype, options);
+ }
+ return pageLimitSupported;
+ }
+
+ private boolean isPageLimitSupported(ContentTransformer transformer, String sourceMimetype,
+ String targetMimetype, TransformationOptions options)
+ {
+ return (transformer instanceof AbstractContentTransformerLimits)
+ ? ((AbstractContentTransformerLimits)transformer).isPageLimitSupported(sourceMimetype, targetMimetype, options)
: false;
}
/**
* Returns the limits from this transformer combined with those of the first transformer in the chain.
+ * If the first transformer is dynamic, the lowest common denominator between all possible first transformers
+ * are combined.
*/
protected TransformationOptionLimits getLimits(String sourceMimetype, String targetMimetype,
TransformationOptions options)
{
+ TransformationOptionLimits firstTransformerLimits = null;
TransformationOptionLimits limits = super.getLimits(sourceMimetype, targetMimetype, options);
ContentTransformer firstTransformer = transformers.get(0);
- if (firstTransformer instanceof AbstractContentTransformerLimits)
+ String firstTargetMimetype = intermediateMimetypes.get(0);
+ if (firstTransformer == null)
{
- String firstTargetMimetype = intermediateMimetypes.get(0);
- limits = limits.combine(((AbstractContentTransformerLimits) firstTransformer).
- getLimits(sourceMimetype, firstTargetMimetype, options));
+ try
+ {
+ parentTransformers.get().push(this);
+ @SuppressWarnings("deprecation")
+ List firstTansformers = contentService.getActiveTransformers(
+ sourceMimetype, -1, firstTargetMimetype, options);
+ if (!firstTansformers.isEmpty())
+ {
+ for (ContentTransformer transformer: firstTansformers)
+ {
+ if (transformer instanceof AbstractContentTransformerLimits)
+ {
+ TransformationOptionLimits transformerLimits = ((AbstractContentTransformerLimits)transformer).
+ getLimits(sourceMimetype, firstTargetMimetype, options);
+ firstTransformerLimits = (firstTransformerLimits == null)
+ ? transformerLimits
+ : firstTransformerLimits.combineUpper(transformerLimits);
+ }
+ }
+ }
+ }
+ finally
+ {
+ parentTransformers.get().pop();
+ }
}
+ else
+ {
+ if (firstTransformer instanceof AbstractContentTransformerLimits)
+ {
+ firstTransformerLimits = ((AbstractContentTransformerLimits)firstTransformer).
+ getLimits(sourceMimetype, firstTargetMimetype, options);
+ }
+ }
+
+ if (firstTransformerLimits != null)
+ {
+ limits = limits.combine(firstTransformerLimits);
+ }
+
return limits;
}
@@ -345,7 +436,22 @@ public class ComplexContentTransformer extends AbstractContentTransformer2 imple
}
// transform
- transformer.transform(currentReader, currentWriter, options);
+ if (transformer == null)
+ {
+ try
+ {
+ parentTransformers.get().push(this);
+ contentService.transform(currentReader, currentWriter, options);
+ }
+ finally
+ {
+ parentTransformers.get().pop();
+ }
+ }
+ else
+ {
+ transformer.transform(currentReader, currentWriter, options);
+ }
// Must clear the sourceNodeRef after the first transformation to avoid later
// transformers thinking the intermediate file is the original node. However as
diff --git a/source/java/org/alfresco/repo/content/transform/ContentTransformerRegistry.java b/source/java/org/alfresco/repo/content/transform/ContentTransformerRegistry.java
index df6c47acb7..205b7f13fb 100644
--- a/source/java/org/alfresco/repo/content/transform/ContentTransformerRegistry.java
+++ b/source/java/org/alfresco/repo/content/transform/ContentTransformerRegistry.java
@@ -109,20 +109,11 @@ public class ContentTransformerRegistry
{
// Get the list of transformers
List transformers = findTransformers(sourceMimetype, sourceSize, targetMimetype, options);
-
final Map activeTransformers = new HashMap();
// identify the performance of all the transformers
for (ContentTransformer transformer : transformers)
{
- // Transformability can be dynamic, i.e. it may have become unusable
- // Don't know why we do this test as it has already been done by findTransformers(...)
- if (transformer.isTransformable(sourceMimetype, sourceSize, targetMimetype, options) == false)
- {
- // It is unreliable now.
- continue;
- }
-
long transformationTime = transformer.getTransformationTime();
activeTransformers.put(transformer, transformationTime);
}
@@ -151,34 +142,6 @@ public class ContentTransformerRegistry
*/
private List findTransformers(String sourceMimetype, long sourceSize, String targetMimetype, TransformationOptions options)
{
- // search for a simple transformer that can do the job
- List transformers = findDirectTransformers(sourceMimetype, sourceSize, targetMimetype, options);
- // get the complex transformers that can do the job
- List complexTransformers = findComplexTransformer(sourceMimetype, targetMimetype, options);
- transformers.addAll(complexTransformers);
- // done
- if (logger.isDebugEnabled())
- {
- logger.debug("Searched for transformer: \n" +
- " source mimetype: " + sourceMimetype + "\n" +
- " target mimetype: " + targetMimetype + "\n" +
- " transformers: " + transformers);
- }
- return transformers;
- }
-
- /**
- * Loops through the content transformers and picks the ones with the highest reliabilities.
- *
- * Where there are several transformers that are equally reliable, they are all returned.
- *
- * @return Returns the most reliable transformers for the translation - empty list if there
- * are none.
- */
- private List findDirectTransformers(String sourceMimetype, long sourceSize, String targetMimetype, TransformationOptions options)
- {
- //double maxReliability = 0.0;
-
List transformers = new ArrayList(2);
boolean foundExplicit = false;
@@ -206,19 +169,16 @@ public class ContentTransformerRegistry
}
}
// done
+ if (logger.isDebugEnabled())
+ {
+ logger.debug("Searched for transformer: \n" +
+ " source mimetype: " + sourceMimetype + "\n" +
+ " target mimetype: " + targetMimetype + "\n" +
+ " transformers: " + transformers);
+ }
return transformers;
}
- /**
- * Uses a list of known mimetypes to build transformations from several direct transformations.
- */
- private List findComplexTransformer(String sourceMimetype, String targetMimetype, TransformationOptions options)
- {
- // get a complete list of mimetypes
- // TODO: Build complex transformers by searching for transformations by mimetype
- return Collections.emptyList();
- }
-
/**
* Recursive method to build up a list of content transformers
*/
diff --git a/source/java/org/alfresco/repo/domain/schema/SchemaBootstrap.java b/source/java/org/alfresco/repo/domain/schema/SchemaBootstrap.java
index b6d9267c3c..f114ded2d0 100644
--- a/source/java/org/alfresco/repo/domain/schema/SchemaBootstrap.java
+++ b/source/java/org/alfresco/repo/domain/schema/SchemaBootstrap.java
@@ -783,6 +783,9 @@ public class SchemaBootstrap extends AbstractLifecycleBean
final Dialect dialect = Dialect.getDialect(cfg.getProperties());
String dialectStr = dialect.getClass().getSimpleName();
+ // Initialise Activiti DB, using an unclosable connection.
+ initialiseActivitiDBSchema(new UnclosableConnection(connection));
+
if (create)
{
// execute pre-create scripts (not patches)
@@ -865,9 +868,6 @@ public class SchemaBootstrap extends AbstractLifecycleBean
checkSchemaPatchScripts(cfg, connection, postUpdateScriptPatches, true);
}
- // Initialise Activiti DB, using an unclosable connection
- initialiseActivitiDBSchema(new UnclosableConnection(connection));
-
return create;
}
diff --git a/source/java/org/alfresco/repo/remoteconnector/RemoteConnectorResponseImpl.java b/source/java/org/alfresco/repo/remoteconnector/RemoteConnectorResponseImpl.java
index b926974784..6b0726fcd3 100644
--- a/source/java/org/alfresco/repo/remoteconnector/RemoteConnectorResponseImpl.java
+++ b/source/java/org/alfresco/repo/remoteconnector/RemoteConnectorResponseImpl.java
@@ -42,6 +42,7 @@ public class RemoteConnectorResponseImpl implements RemoteConnectorResponse
private String contentType;
private String charset;
+ private int status;
private Header[] headers;
private InputStream bodyStream;
@@ -53,21 +54,28 @@ public class RemoteConnectorResponseImpl implements RemoteConnectorResponse
* InputStream shouldn't be used as cleanup is needed
*/
public RemoteConnectorResponseImpl(RemoteConnectorRequest request, String contentType,
- String charset, Header[] headers, InputStream response)
+ String charset, int status, Header[] headers, InputStream response)
{
this.request = request;
this.contentType = contentType;
this.charset = charset;
this.headers = headers;
+ this.status = status;
this.bodyStream = response;
this.bodyBytes = null;
}
public RemoteConnectorResponseImpl(RemoteConnectorRequest request, String contentType,
- String charset, Header[] headers, byte[] response)
+ String charset, int status, Header[] headers, byte[] response)
{
- this(request, contentType, charset, headers, new ByteArrayInputStream(response));
+ this(request, contentType, charset, status, headers, new ByteArrayInputStream(response));
this.bodyBytes = response;
}
+
+ @Override
+ public int getStatus()
+ {
+ return status;
+ }
@Override
public String getCharset()
diff --git a/source/java/org/alfresco/repo/remoteconnector/RemoteConnectorServiceImpl.java b/source/java/org/alfresco/repo/remoteconnector/RemoteConnectorServiceImpl.java
index 9c50dd6029..12d234daf0 100644
--- a/source/java/org/alfresco/repo/remoteconnector/RemoteConnectorServiceImpl.java
+++ b/source/java/org/alfresco/repo/remoteconnector/RemoteConnectorServiceImpl.java
@@ -24,8 +24,10 @@ import java.io.InputStream;
import org.alfresco.repo.content.MimetypeMap;
import org.alfresco.repo.security.authentication.AuthenticationException;
+import org.alfresco.service.cmr.remoteconnector.RemoteConnectorClientException;
import org.alfresco.service.cmr.remoteconnector.RemoteConnectorRequest;
import org.alfresco.service.cmr.remoteconnector.RemoteConnectorResponse;
+import org.alfresco.service.cmr.remoteconnector.RemoteConnectorServerException;
import org.alfresco.service.cmr.remoteconnector.RemoteConnectorService;
import org.alfresco.util.HttpClientHelper;
import org.apache.commons.httpclient.Header;
@@ -79,7 +81,8 @@ public class RemoteConnectorServiceImpl implements RemoteConnectorService
/**
* Executes the specified request, and return the response
*/
- public RemoteConnectorResponse executeRequest(RemoteConnectorRequest request) throws IOException, AuthenticationException
+ public RemoteConnectorResponse executeRequest(RemoteConnectorRequest request) throws IOException, AuthenticationException,
+ RemoteConnectorClientException, RemoteConnectorServerException
{
RemoteConnectorRequestImpl reqImpl = (RemoteConnectorRequestImpl)request;
HttpMethodBase httpRequest = reqImpl.getMethodInstance();
@@ -134,13 +137,13 @@ public class RemoteConnectorServiceImpl implements RemoteConnectorService
// Now build the response
response = new RemoteConnectorResponseImpl(request, responseContentType, responseCharSet,
- responseHdrs, wrappedStream);
+ status, responseHdrs, wrappedStream);
}
else
{
// Fairly small response, just keep the bytes and make life simple
response = new RemoteConnectorResponseImpl(request, responseContentType, responseCharSet,
- responseHdrs, httpRequest.getResponseBody());
+ status, responseHdrs, httpRequest.getResponseBody());
// Now we have the bytes, we can close the HttpClient resources
httpRequest.releaseConnection();
@@ -164,26 +167,42 @@ public class RemoteConnectorServiceImpl implements RemoteConnectorService
logger.debug("Response was " + status + " " + statusText);
// Decide if we should throw an exception
- if (status == Status.STATUS_FORBIDDEN)
+ if (status >= 300)
{
// Tidy if needed
if (httpRequest != null)
httpRequest.releaseConnection();
- // Then report the error
- throw new AuthenticationException(statusText);
+
+ // Specific exceptions
+ if (status == Status.STATUS_FORBIDDEN ||
+ status == Status.STATUS_UNAUTHORIZED)
+ {
+ throw new AuthenticationException(statusText);
+ }
+
+ // Server side exceptions
+ if (status >= 500 && status <= 599)
+ {
+ throw new RemoteConnectorServerException(status, statusText);
+ }
+ else
+ {
+ // Client request exceptions
+ if (httpRequest != null)
+ {
+ // Response wasn't too big and is available, supply it
+ throw new RemoteConnectorClientException(status, statusText, response);
+ }
+ else
+ {
+ // Response was too large, report without it
+ throw new RemoteConnectorClientException(status, statusText, null);
+ }
+ }
}
- if (status == Status.STATUS_INTERNAL_SERVER_ERROR)
- {
- // Tidy if needed
- if (httpRequest != null)
- httpRequest.releaseConnection();
- // Then report the error
- throw new IOException(statusText);
- }
- // TODO Handle the rest of the different status codes
-
- // Return our created response
+ // If we get here, then the request/response was all fine
+ // So, return our created response
return response;
}
diff --git a/source/java/org/alfresco/repo/remoteticket/RemoteAlfrescoTicketServiceImpl.java b/source/java/org/alfresco/repo/remoteticket/RemoteAlfrescoTicketServiceImpl.java
index 5151529807..7b6a8c0a73 100644
--- a/source/java/org/alfresco/repo/remoteticket/RemoteAlfrescoTicketServiceImpl.java
+++ b/source/java/org/alfresco/repo/remoteticket/RemoteAlfrescoTicketServiceImpl.java
@@ -404,7 +404,16 @@ public class RemoteAlfrescoTicketServiceImpl implements RemoteAlfrescoTicketServ
// If the credentials indicate the previous attempt failed, record as now working
if (! credentials.getLastAuthenticationSucceeded())
{
- remoteCredentialsService.updateCredentialsAuthenticationSucceeded(true, credentials);
+ retryingTransactionHelper.doInTransaction(
+ new RetryingTransactionCallback()
+ {
+ public Void execute()
+ {
+ remoteCredentialsService.updateCredentialsAuthenticationSucceeded(true, credentials);
+ return null;
+ }
+ }, false, true
+ );
}
// Wrap and return
diff --git a/source/java/org/alfresco/repo/security/sync/ChainingUserRegistrySynchronizer.java b/source/java/org/alfresco/repo/security/sync/ChainingUserRegistrySynchronizer.java
index fe9e80bf51..1df3de8633 100644
--- a/source/java/org/alfresco/repo/security/sync/ChainingUserRegistrySynchronizer.java
+++ b/source/java/org/alfresco/repo/security/sync/ChainingUserRegistrySynchronizer.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005-2011 Alfresco Software Limited.
+ * Copyright (C) 2005-2012 Alfresco Software Limited.
*
* This file is part of Alfresco
*
@@ -18,7 +18,10 @@
*/
package org.alfresco.repo.security.sync;
+import java.io.IOException;
import java.io.Serializable;
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
import java.text.DateFormat;
import java.util.Collection;
import java.util.Collections;
@@ -38,6 +41,17 @@ import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
+import javax.management.AttributeNotFoundException;
+import javax.management.InstanceNotFoundException;
+import javax.management.IntrospectionException;
+import javax.management.MBeanAttributeInfo;
+import javax.management.MBeanException;
+import javax.management.MBeanInfo;
+import javax.management.MBeanServerConnection;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import javax.management.ReflectionException;
+
import org.alfresco.model.ContentModel;
import org.alfresco.repo.batch.BatchProcessor;
import org.alfresco.repo.batch.BatchProcessor.BatchProcessWorker;
@@ -161,6 +175,11 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
/** The number of worker threads. */
private int workerThreads = 2;
+
+ private MBeanServerConnection mbeanServer;
+
+ /** Allow a full sync to perform deletions? */
+ private boolean allowDeletions = true;
/**
* Sets the application context manager.
@@ -315,13 +334,51 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
{
this.workerThreads = workerThreads;
}
-
+
+ /**
+ * Fullsync is run with deletions. By default is set to true.
+ *
+ * @param allowDeletions
+ */
+ public void setAllowDeletions(boolean allowDeletions)
+ {
+ this.allowDeletions = allowDeletions;
+ }
+
/*
* (non-Javadoc)
* @see org.alfresco.repo.security.sync.UserRegistrySynchronizer#synchronize(boolean, boolean, boolean)
*/
- public void synchronize(boolean forceUpdate, boolean allowDeletions, final boolean splitTxns)
+ public void synchronize(boolean forceUpdate, boolean isFullSync, final boolean splitTxns)
{
+ if (ChainingUserRegistrySynchronizer.logger.isDebugEnabled())
+ {
+
+ if (forceUpdate)
+ {
+ ChainingUserRegistrySynchronizer.logger.debug("Running a full sync.");
+ }
+ else
+ {
+ ChainingUserRegistrySynchronizer.logger.debug("Running a differential sync.");
+ }
+ if (allowDeletions)
+ {
+ ChainingUserRegistrySynchronizer.logger.debug("deletions are allowed");
+ }
+ else
+ {
+ ChainingUserRegistrySynchronizer.logger.debug("deletions are not allowed");
+ }
+ // Don't proceed with the sync if the repository is read only
+ if (this.transactionService.isReadOnly())
+ {
+ ChainingUserRegistrySynchronizer.logger
+ .warn("Unable to proceed with user registry synchronization. Repository is read only.");
+ return;
+ }
+ }
+
// Don't proceed with the sync if the repository is read only
if (this.transactionService.isReadOnly())
{
@@ -414,17 +471,112 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
UserRegistry plugin = (UserRegistry) context.getBean(this.sourceBeanName);
if (!(plugin instanceof ActivateableBean) || ((ActivateableBean) plugin).isActive())
{
+ if (ChainingUserRegistrySynchronizer.logger.isDebugEnabled())
+ {
+ mbeanServer = (MBeanServerConnection) getApplicationContext().getBean("alfrescoMBeanServer");
+ try
+ {
+ StringBuilder nameBuff = new StringBuilder(200).append("Alfresco:Type=Configuration,Category=Authentication,id1=managed,id2=").append(
+ URLDecoder.decode(id, "UTF-8"));
+ ObjectName name = new ObjectName(nameBuff.toString());
+ if (mbeanServer != null && mbeanServer.isRegistered(name))
+ {
+ MBeanInfo info = mbeanServer.getMBeanInfo(name);
+ MBeanAttributeInfo[] attributes = info.getAttributes();
+ ChainingUserRegistrySynchronizer.logger.debug(id + " attributes:");
+ for (MBeanAttributeInfo attribute : attributes)
+ {
+ Object value = mbeanServer.getAttribute(name, attribute.getName());
+ ChainingUserRegistrySynchronizer.logger.debug(attribute.getName() + " = " + value);
+ }
+ }
+ }
+ catch(UnsupportedEncodingException e)
+ {
+ if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
+ {
+ ChainingUserRegistrySynchronizer.logger
+ .warn("Exception during logging", e);
+ }
+ }
+ catch (MalformedObjectNameException e)
+ {
+ if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
+ {
+ ChainingUserRegistrySynchronizer.logger
+ .warn("Exception during logging", e);
+ }
+ }
+ catch (InstanceNotFoundException e)
+ {
+ if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
+ {
+ ChainingUserRegistrySynchronizer.logger
+ .warn("Exception during logging", e);
+ }
+ }
+ catch (IntrospectionException e)
+ {
+ if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
+ {
+ ChainingUserRegistrySynchronizer.logger
+ .warn("Exception during logging", e);
+ }
+ }
+ catch (AttributeNotFoundException e)
+ {
+ if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
+ {
+ ChainingUserRegistrySynchronizer.logger
+ .warn("Exception during logging", e);
+ }
+ }
+ catch (ReflectionException e)
+ {
+ if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
+ {
+ ChainingUserRegistrySynchronizer.logger
+ .warn("Exception during logging", e);
+ }
+ }
+ catch (MBeanException e)
+ {
+ if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
+ {
+ ChainingUserRegistrySynchronizer.logger
+ .warn("Exception during logging", e);
+ }
+ }
+ catch (IOException e)
+ {
+ if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
+ {
+ ChainingUserRegistrySynchronizer.logger
+ .warn("Exception during logging", e);
+ }
+ }
+
+ }
if (ChainingUserRegistrySynchronizer.logger.isInfoEnabled())
{
ChainingUserRegistrySynchronizer.logger
.info("Synchronizing users and groups with user registry '" + id + "'");
}
- if (allowDeletions && ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
+ if (isFullSync && ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
{
ChainingUserRegistrySynchronizer.logger
.warn("Full synchronization with user registry '"
- + id
- + "'; some users and groups previously created by synchronization with this user registry may be removed.");
+ + id + "'");
+ if (allowDeletions)
+ {
+ ChainingUserRegistrySynchronizer.logger
+ .warn("Some users and groups previously created by synchronization with this user registry may be removed.");
+ }
+ else
+ {
+ ChainingUserRegistrySynchronizer.logger
+ .warn("Deletions are disabled. Users and groups removed from this registry will be logged only and will remain in the repository. Users previously found in a different registry will be moved in the repository rather than recreated.");
+ }
}
// Work out whether we should do the work in a separate transaction (it's most performant if we
// bunch it into small transactions, but if we are doing a sync on login, it has to be the same
@@ -432,13 +584,14 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
boolean requiresNew = splitTxns
|| AlfrescoTransactionSupport.getTransactionReadState() == TxnReadState.TXN_READ_ONLY;
- syncWithPlugin(id, plugin, forceUpdate, allowDeletions, requiresNew, visitedZoneIds, allZoneIds);
+ syncWithPlugin(id, plugin, forceUpdate, isFullSync, requiresNew, visitedZoneIds, allZoneIds);
}
}
catch (NoSuchBeanDefinitionException e)
{
// Ignore and continue
}
+
}
}
catch (RuntimeException e)
@@ -583,7 +736,7 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
* the user registry and updated locally. When false then each source is only queried for
* those users and groups modified since the most recent modification date of all the objects last
* queried from that same source.
- * @param allowDeletions
+ * @param isFullSync
* Should a complete set of user and group IDs be queried from the user registries in order to determine
* deletions? This parameter is independent of force as a separate query is run to process
* updates.
@@ -602,7 +755,7 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
* or group needs to be 're-zoned'.
*/
private void syncWithPlugin(final String zone, UserRegistry userRegistry, boolean forceUpdate,
- boolean allowDeletions, boolean splitTxns, final Set visitedZoneIds, final Set allZoneIds)
+ boolean isFullSync, boolean splitTxns, final Set visitedZoneIds, final Set allZoneIds)
{
// Create a prefixed zone ID for use with the authority service
final String zoneId = AuthorityService.ZONE_AUTH_EXT_PREFIX + zone;
@@ -685,10 +838,24 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
// Check whether the group is in any of the authentication chain zones
Set intersection = new TreeSet(groupZones);
intersection.retainAll(allZoneIds);
- if (intersection.isEmpty())
+ // Check whether the group is in any of the higher priority authentication chain zones
+ Set visited = new TreeSet(intersection);
+ visited.retainAll(visitedZoneIds);
+
+ if (groupZones.contains(zoneId))
{
- // The group exists, but not in a zone that's in the authentication chain. May be due to
- // upgrade or zone changes. Let's re-zone them
+ // The group already existed in this zone: update the group
+ updateGroup(group, true);
+ }
+ else if (!visited.isEmpty())
+ {
+ // A group that exists in a different zone with higher precedence
+ return;
+ }
+ else if (!allowDeletions || intersection.isEmpty())
+ {
+ // Deletions are disallowed or the group exists, but not in a zone that's in the authentication
+ // chain. May be due to upgrade or zone changes. Let's re-zone them
if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
{
ChainingUserRegistrySynchronizer.logger.warn("Updating group '" + groupShortName
@@ -698,21 +865,12 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
ChainingUserRegistrySynchronizer.this.authorityService.removeAuthorityFromZones(groupName,
groupZones);
ChainingUserRegistrySynchronizer.this.authorityService.addAuthorityToZones(groupName, zoneSet);
- }
- if (groupZones.contains(zoneId) || intersection.isEmpty())
- {
- // The group already existed in this zone or no valid zone: update the group
+
+ // The group now exists in this zone: update the group
updateGroup(group, true);
}
else
{
- // Check whether the group is in any of the higher priority authentication chain zones
- intersection.retainAll(visitedZoneIds);
- if (!intersection.isEmpty())
- {
- // A group that exists in a different zone with higher precedence
- return;
- }
// The group existed, but in a zone with lower precedence
if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
{
@@ -824,8 +982,6 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
{
if (!newChildPersons.remove(child))
{
- // Make sure each person with association changes features as a key in the creation map
- recordParentAssociationCreation(child, null);
recordParentAssociationDeletion(child, groupName);
}
}
@@ -849,10 +1005,14 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
// Create new associations
for (String child : newChildPersons)
{
+ // Make sure each person with association changes features as a key in the deletion map
+ recordParentAssociationDeletion(child, null);
recordParentAssociationCreation(child, groupName);
}
for (String child : newChildGroups)
{
+ // Make sure each group with association changes features as a key in the deletion map
+ recordParentAssociationDeletion(child, null);
recordParentAssociationCreation(child, groupName);
}
}
@@ -1094,11 +1254,11 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
}
}
- public void processGroups(UserRegistry userRegistry, boolean allowDeletions, boolean splitTxns)
+ public void processGroups(UserRegistry userRegistry, boolean isFullSync, boolean splitTxns)
{
// If we got back some groups, we have to cross reference them with the set of known authorities
- if (allowDeletions || !this.groupParentAssocsToCreate.isEmpty()
- || !this.personParentAssocsToCreate.isEmpty())
+ if (isFullSync || !this.groupParentAssocsToDelete.isEmpty()
+ || !this.groupParentAssocsToDelete.isEmpty())
{
final Set allZonePersons = newPersonSet();
final Set allZoneGroups = new TreeSet();
@@ -1117,17 +1277,19 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
}
}, true, splitTxns);
- final Set personDeletionCandidates = newPersonSet();
- personDeletionCandidates.addAll(allZonePersons);
-
- final Set groupDeletionCandidates = new TreeSet();
- groupDeletionCandidates.addAll(allZoneGroups);
-
allZoneGroups.addAll(this.groupsToCreate.keySet());
// Prune our set of authorities according to deletions
- if (allowDeletions)
+ if (isFullSync)
{
+ final Set personDeletionCandidates = newPersonSet();
+ personDeletionCandidates.addAll(allZonePersons);
+
+ final Set groupDeletionCandidates = new TreeSet();
+ groupDeletionCandidates.addAll(allZoneGroups);
+
+ this.deletionCandidates = new TreeSet();
+
for (String person : userRegistry.getPersonNames())
{
personDeletionCandidates.remove(person);
@@ -1141,14 +1303,80 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
this.deletionCandidates = new TreeSet();
this.deletionCandidates.addAll(personDeletionCandidates);
this.deletionCandidates.addAll(groupDeletionCandidates);
+ if (allowDeletions)
+ {
+ allZonePersons.removeAll(personDeletionCandidates);
+ allZoneGroups.removeAll(groupDeletionCandidates);
+ }
+ else
+ {
+ if (!personDeletionCandidates.isEmpty())
+ {
+ ChainingUserRegistrySynchronizer.logger.warn("The following missing users are not being deleted as allowDeletions == false");
+ for (String person : personDeletionCandidates)
+ {
+ ChainingUserRegistrySynchronizer.logger.warn(" " + person);
+ }
+ }
+ if (!groupDeletionCandidates.isEmpty())
+ {
+ ChainingUserRegistrySynchronizer.logger.warn("The following missing groups are not being deleted as allowDeletions == false");
+ for (String group : groupDeletionCandidates)
+ {
+ ChainingUserRegistrySynchronizer.logger.warn(" " + group);
+ }
+ }
+
+ // Complete association deletion information by scanning deleted groups
+ BatchProcessor groupScanner = new BatchProcessor(zone
+ + " Missing Authority Scanning",
+ ChainingUserRegistrySynchronizer.this.transactionService
+ .getRetryingTransactionHelper(), this.deletionCandidates,
+ ChainingUserRegistrySynchronizer.this.workerThreads, 20,
+ ChainingUserRegistrySynchronizer.this.applicationEventPublisher,
+ ChainingUserRegistrySynchronizer.logger,
+ ChainingUserRegistrySynchronizer.this.loggingInterval);
+ groupScanner.process(new BaseBatchProcessWorker()
+ {
- allZonePersons.removeAll(personDeletionCandidates);
- allZoneGroups.removeAll(groupDeletionCandidates);
+ @Override
+ public String getIdentifier(String entry)
+ {
+ return entry;
+ }
+
+ @Override
+ public void process(String authority) throws Throwable
+ {
+ // Disassociate it from this zone, allowing it to be reclaimed by something further down the chain
+ ChainingUserRegistrySynchronizer.this.authorityService.removeAuthorityFromZones(authority,
+ Collections.singleton(zoneId));
+
+ // For groups, remove all members
+ if (AuthorityType.getAuthorityType(authority) != AuthorityType.USER)
+ {
+ String groupShortName = ChainingUserRegistrySynchronizer.this.authorityService
+ .getShortName(authority);
+ String groupDisplayName = ChainingUserRegistrySynchronizer.this.authorityService
+ .getAuthorityDisplayName(authority);
+ NodeDescription dummy = new NodeDescription(groupShortName + " (Deleted)");
+ PropertyMap dummyProperties = dummy.getProperties();
+ dummyProperties.put(ContentModel.PROP_AUTHORITY_NAME, authority);
+ if (groupDisplayName != null)
+ {
+ dummyProperties.put(ContentModel.PROP_AUTHORITY_DISPLAY_NAME, groupDisplayName);
+ }
+ updateGroup(dummy, true);
+ }
+ }
+ }, splitTxns);
+
+ }
}
// Prune the group associations now that we have complete information
this.groupParentAssocsToCreate.keySet().retainAll(allZoneGroups);
- logRetainParentAssociations(this.groupParentAssocsToDelete, allZoneGroups);
+ logRetainParentAssociations(this.groupParentAssocsToCreate, allZoneGroups);
this.finalGroupChildAssocs.keySet().retainAll(allZoneGroups);
// Pruning person associations will have to wait until we have passed over all persons and built up
@@ -1234,17 +1462,17 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
}
// Remove all the associations we have already dealt with
- this.personParentAssocsToCreate.keySet().removeAll(this.personsProcessed);
+ this.personParentAssocsToDelete.keySet().removeAll(this.personsProcessed);
// Filter out associations to authorities that simply can't exist (and log if debugging is enabled)
logRetainParentAssociations(this.personParentAssocsToCreate, this.allZonePersons);
// Update associations to persons not updated themselves
- if (!this.personParentAssocsToCreate.isEmpty())
+ if (!this.personParentAssocsToDelete.isEmpty())
{
BatchProcessor>> groupCreator = new BatchProcessor>>(
zone + " Person Association", ChainingUserRegistrySynchronizer.this.transactionService
- .getRetryingTransactionHelper(), this.personParentAssocsToCreate.entrySet(),
+ .getRetryingTransactionHelper(), this.personParentAssocsToDelete.entrySet(),
ChainingUserRegistrySynchronizer.this.workerThreads, 20,
ChainingUserRegistrySynchronizer.this.applicationEventPublisher,
ChainingUserRegistrySynchronizer.logger,
@@ -1340,7 +1568,7 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
final Analyzer groupAnalyzer = new Analyzer(lastModifiedMillis);
int groupProcessedCount = groupProcessor.process(groupAnalyzer, splitTxns);
- groupAnalyzer.processGroups(userRegistry, allowDeletions, splitTxns);
+ groupAnalyzer.processGroups(userRegistry, isFullSync, splitTxns);
// Process persons and their parent associations
@@ -1413,10 +1641,19 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
// Check whether the user is in any of the authentication chain zones
Set intersection = new TreeSet(zones);
intersection.retainAll(allZoneIds);
- if (intersection.size() == 0)
+ // Check whether the user is in any of the higher priority authentication chain zones
+ Set visited = new TreeSet(intersection);
+ visited.retainAll(visitedZoneIds);
+ if (visited.size() > 0)
{
- // The person exists, but not in a zone that's in the authentication chain. May be due
- // to upgrade or zone changes. Let's re-zone them
+ // A person that exists in a different zone with higher precedence - ignore
+ return;
+ }
+
+ else if (!allowDeletions || intersection.isEmpty())
+ {
+ // The person exists, but in a different zone. Either deletions are disallowed or the zone is
+ // not in the authentication chain. May be due to upgrade or zone changes. Let's re-zone them
if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
{
ChainingUserRegistrySynchronizer.logger.warn("Updating user '" + personName
@@ -1431,14 +1668,6 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
}
else
{
- // Check whether the user is in any of the higher priority authentication chain zones
- intersection.retainAll(visitedZoneIds);
- if (intersection.size() > 0)
- {
- // A person that exists in a different zone with higher precedence - ignore
- return;
- }
-
// The person existed, but in a zone with lower precedence
if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled())
{
@@ -1491,7 +1720,7 @@ public class ChainingUserRegistrySynchronizer extends AbstractLifecycleBean impl
// Delete authorities if we have complete information for the zone
Set deletionCandidates = groupAnalyzer.getDeletionCandidates();
- if (allowDeletions && !deletionCandidates.isEmpty())
+ if (isFullSync && allowDeletions && !deletionCandidates.isEmpty())
{
BatchProcessor authorityDeletionProcessor = new BatchProcessor(
zone + " Authority Deletion", this.transactionService.getRetryingTransactionHelper(),
diff --git a/source/java/org/alfresco/repo/security/sync/ChainingUserRegistrySynchronizerTest.java b/source/java/org/alfresco/repo/security/sync/ChainingUserRegistrySynchronizerTest.java
index 3da10f46d9..ae5d5545ff 100644
--- a/source/java/org/alfresco/repo/security/sync/ChainingUserRegistrySynchronizerTest.java
+++ b/source/java/org/alfresco/repo/security/sync/ChainingUserRegistrySynchronizerTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005-2010 Alfresco Software Limited.
+ * Copyright (C) 2005-2012 Alfresco Software Limited.
*
* This file is part of Alfresco
*
@@ -208,7 +208,19 @@ public class ChainingUserRegistrySynchronizerTest extends TestCase
*/
public void tearDownTestUsersAndGroups() throws Exception
{
- // Wipe out everything that was in Z1 and Z2
+ // Re-zone everything that may have gone astray
+ this.applicationContextManager.setUserRegistries(new MockUserRegistry("Z0", new NodeDescription[]
+ {
+ newPerson("U1"), newPerson("U2"), newPerson("U3"), newPerson("U4"), newPerson("U5"), newPerson("U6"),
+ newPerson("U7")
+ }, new NodeDescription[]
+ {
+ newGroup("G1"), newGroup("G2"), newGroup("G3"), newGroup("G4"), newGroup("G5"), newGroup("G6"),
+ newGroup("G7")
+ }), new MockUserRegistry("Z1", new NodeDescription[] {}, new NodeDescription[] {}), new MockUserRegistry("Z2",
+ new NodeDescription[] {}, new NodeDescription[] {}));
+ this.synchronizer.synchronize(true, true, true);
+ // Wipe out everything that was in Z0 - Z2
this.applicationContextManager.setUserRegistries(new MockUserRegistry("Z0", new NodeDescription[] {},
new NodeDescription[] {}), new MockUserRegistry("Z1", new NodeDescription[] {},
new NodeDescription[] {}), new MockUserRegistry("Z2", new NodeDescription[] {},
@@ -382,6 +394,53 @@ public class ChainingUserRegistrySynchronizerTest extends TestCase
tearDownTestUsersAndGroups();
}
+ /**
+ * Tests a forced update of the test users and groups with deletions disabled. No users or groups should be deleted,
+ * whether or not they move registry. Groups that would have been deleted should have no members and should only be
+ * in the default zone.
+ *
+ * @throws Exception
+ * the exception
+ */
+ public void testForcedUpdateWithoutDeletions() throws Exception
+ {
+ UserRegistrySynchronizer synchronizer = (UserRegistrySynchronizer) ChainingUserRegistrySynchronizerTest.context
+ .getBean("testUserRegistrySynchronizerPreventDeletions");
+ setUpTestUsersAndGroups();
+ this.applicationContextManager.setUserRegistries(new MockUserRegistry("Z0", new NodeDescription[]
+ {
+ newPerson("U2"), newPerson("U3"), newPerson("U4"),
+ }, new NodeDescription[]
+ {
+ newGroup("G1"), newGroup("G2"),
+ }), new MockUserRegistry("Z1", new NodeDescription[]
+ {
+ newPerson("U5"), newPerson("u6"),
+ }, new NodeDescription[] {}), new MockUserRegistry("Z2", new NodeDescription[]
+ {
+ newPerson("U6"),
+ }, new NodeDescription[] {}));
+ synchronizer.synchronize(true, true, true);
+ this.retryingTransactionHelper.doInTransaction(new RetryingTransactionCallback