Merge merge-2.4/RM-2526_BlendedDispositionSchedule into release/V2.4.

This commit is contained in:
Tom Page
2016-10-24 21:23:09 +01:00
16 changed files with 1065 additions and 459 deletions

View File

@@ -389,6 +389,11 @@
<type>d:date</type>
<mandatory>false</mandatory>
</property>
<property name="rma:manuallySetAsOf">
<title>Manually Set Disposition Date Flag</title>
<type>d:boolean</type>
<default>false</default>
</property>
<property name="rma:dispositionEventsEligible">
<title>Disposition Events Eligible</title>
<type>d:boolean</type>

View File

@@ -39,12 +39,6 @@
<bean id="RecordsManagementServiceRegistry" class="org.alfresco.module.org_alfresco_module_rm.RecordsManagementServiceRegistryImpl" />
<!-- Disposition selection strategy -->
<bean id="org_alfresco_module_rm_dispositionSelectionStrategy"
class="org.alfresco.module.org_alfresco_module_rm.disposition.DispositionSelectionStrategy" >
<property name="dispositionService" ref="dispositionService"/>
</bean>
<!-- Vital Record Service -->
<bean id="vitalRecordService" parent="baseService" class="org.alfresco.module.org_alfresco_module_rm.vital.VitalRecordServiceImpl">
@@ -96,9 +90,6 @@
<property name="recordFolderService" ref="RecordFolderService"/>
<property name="recordService" ref="RecordService"/>
<property name="freezeService" ref="FreezeService"/>
<property name="dispositionSelectionStrategy">
<ref local="org_alfresco_module_rm_dispositionSelectionStrategy" />
</property>
</bean>
<bean id="DispositionService" class="org.springframework.aop.framework.ProxyFactoryBean">
@@ -139,6 +130,7 @@
org.alfresco.module.org_alfresco_module_rm.disposition.DispositionService.registerDispositionProperty=RM_ALLOW
org.alfresco.module.org_alfresco_module_rm.disposition.DispositionService.getDispositionProperties=RM_ALLOW
org.alfresco.module.org_alfresco_module_rm.disposition.DispositionService.getDispositionSchedule=RM.Read.0
org.alfresco.module.org_alfresco_module_rm.disposition.DispositionService.getOriginDispositionSchedule=RM.Read.0
org.alfresco.module.org_alfresco_module_rm.disposition.DispositionService.getAssociatedDispositionSchedule=RM.Read.0
org.alfresco.module.org_alfresco_module_rm.disposition.DispositionService.getAssociatedRecordsManagementContainer=RM_ALLOW
org.alfresco.module.org_alfresco_module_rm.disposition.DispositionService.hasDisposableItems=RM_ALLOW

View File

@@ -5,10 +5,10 @@
URL parameter batchsize is mandatory, and represents the maximum number of records that can be processed in one transaction.<br/>
URL parameter maxProcessedRecords is optional, and represents the maximum number of records that will be processed in one request.<br/>
URL parameter export is optional, and if the it's value is true, will export the processed records into a csv file.<br/>
URL parameter parentNodeRef is optional, and represents the nodeRef of the folder that contains the records to be processed.<br/>
URL parameter parentNodeRef is optional, and represents the nodeRef of the folder that contains the records to be processed.<br/>
]]>
</description>
<url>/api/rm/rm-dynamicauthorities?batchsize={batchsize}&amp;maxProcessedRecords={maxProcessedRecords?}&amp;export={export?}&amp;parentNodeRef={parentNodeRef?}</url>
<url>/api/rm/rm-dynamicauthorities?batchsize={batchsize}&amp;maxProcessedRecords={maxProcessedRecords?}&amp;export={export?}&amp;parentNodeRef={parentNodeRef?}</url>
<format default="json">argument</format>
<authentication>admin</authentication>
<transaction allow="readonly">required</transaction>

View File

@@ -27,6 +27,8 @@
package org.alfresco.module.org_alfresco_module_rm.action.impl;
import static org.apache.commons.lang3.BooleanUtils.isNotTrue;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Date;
@@ -194,7 +196,8 @@ public class BroadcastDispositionActionDefinitionUpdateAction extends RMActionEx
{
// the change does effect the nextAction for this node
// so go ahead and determine what needs updating
if (changedProps.contains(PROP_DISPOSITION_PERIOD))
if (changedProps.contains(PROP_DISPOSITION_PERIOD)
&& isNotTrue((Boolean) getNodeService().getProperty(nextAction.getNodeRef(), PROP_MANUALLY_SET_AS_OF)))
{
persistPeriodChanges(dispositionActionDef, nextAction);
}

View File

@@ -74,6 +74,7 @@ public class EditDispositionActionAsOfDateAction extends RMActionExecuterAbstrac
if (da != null)
{
getNodeService().setProperty(da.getNodeRef(), PROP_DISPOSITION_AS_OF, asOfDate);
getNodeService().setProperty(da.getNodeRef(), PROP_MANUALLY_SET_AS_OF, true);
}
}
else

View File

@@ -242,6 +242,17 @@ public interface DispositionService
* @param nodeRef node reference
*/
void refreshDispositionAction(NodeRef nodeRef);
/**
* Gets date of the disposition action for the given
* disposition schedule with the given action name
*
* @param record
* @param dispositionSchedule nodeRef
* @param dispositionActionName
* @return date
*/
Date getDispositionActionDate(NodeRef record, NodeRef dispositionSchedule, String dispositionActionName);
/**
* Compute the "disposition as of" date (if necessary) for a disposition action and a node.
@@ -253,4 +264,13 @@ public interface DispositionService
*/
Date calculateAsOfDate(NodeRef nodeRef, DispositionActionDefinition dispositionActionDefinition,
boolean allowContextFromAsOf);
/**
* Gets the origin disposition schedule for the record, not the calculated one
* in case of multiple dispositions applied to record
*
* @param nodeRef record
* @return the initial disposition
*/
DispositionSchedule getOriginDispositionSchedule(NodeRef nodeRef);
}

View File

@@ -27,6 +27,8 @@
package org.alfresco.module.org_alfresco_module_rm.disposition;
import static org.apache.commons.lang3.BooleanUtils.isNotTrue;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
@@ -36,6 +38,7 @@ import java.util.List;
import java.util.Map;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.model.ContentModel;
import org.alfresco.module.org_alfresco_module_rm.RecordsManagementPolicies;
import org.alfresco.module.org_alfresco_module_rm.RecordsManagementServiceRegistry;
import org.alfresco.module.org_alfresco_module_rm.disposition.property.DispositionProperty;
@@ -47,6 +50,7 @@ import org.alfresco.module.org_alfresco_module_rm.model.RecordsManagementModel;
import org.alfresco.module.org_alfresco_module_rm.record.RecordService;
import org.alfresco.module.org_alfresco_module_rm.recordfolder.RecordFolderService;
import org.alfresco.module.org_alfresco_module_rm.util.ServiceBaseImpl;
import org.alfresco.repo.dictionary.types.period.Immediately;
import org.alfresco.repo.policy.BehaviourFilter;
import org.alfresco.repo.policy.annotation.Behaviour;
import org.alfresco.repo.policy.annotation.BehaviourBean;
@@ -79,15 +83,26 @@ public class DispositionServiceImpl extends ServiceBaseImpl
/** Logger */
private static final Logger LOGGER = LoggerFactory.getLogger(DispositionServiceImpl.class);
/** Transaction mode for setting next action */
public enum WriteMode
{
/** Do not update any data. */
READ_ONLY,
/** Only set the "disposition as of" date. */
DATE_ONLY,
/**
* Set the "disposition as of" date and the name of the next action. This only happens during the creation of a
* disposition schedule impl node under a record or folder.
*/
DATE_AND_NAME
};
/** Behaviour filter */
private BehaviourFilter behaviourFilter;
/** Records management service registry */
private RecordsManagementServiceRegistry serviceRegistry;
/** Disposition selection strategy */
private DispositionSelectionStrategy dispositionSelectionStrategy;
/** File plan service */
private FilePlanService filePlanService;
@@ -177,16 +192,6 @@ public class DispositionServiceImpl extends ServiceBaseImpl
this.freezeService = freezeService;
}
/**
* Set the dispositionSelectionStrategy bean.
*
* @param dispositionSelectionStrategy
*/
public void setDispositionSelectionStrategy(DispositionSelectionStrategy dispositionSelectionStrategy)
{
this.dispositionSelectionStrategy = dispositionSelectionStrategy;
}
/**
* Behavior to initialize the disposition schedule of a newly filed record.
*
@@ -276,31 +281,68 @@ public class DispositionServiceImpl extends ServiceBaseImpl
* @see org.alfresco.module.org_alfresco_module_rm.disposition.DispositionService#getDispositionSchedule(org.alfresco.service.cmr.repository.NodeRef)
*/
@Override
public DispositionSchedule getDispositionSchedule(NodeRef nodeRef)
public DispositionSchedule getDispositionSchedule(final NodeRef nodeRef)
{
DispositionSchedule di = null;
NodeRef diNodeRef = null;
DispositionSchedule ds = null;
NodeRef dsNodeRef = null;
if (isRecord(nodeRef))
{
// Get the record folders for the record
List<NodeRef> recordFolders = recordFolderService.getRecordFolders(nodeRef);
// At this point, we may have disposition instruction objects from 1..n folders.
diNodeRef = dispositionSelectionStrategy.selectDispositionScheduleFrom(recordFolders);
DispositionSchedule originDispositionSchedule = getOriginDispositionSchedule(nodeRef);
// if the initial disposition schedule of the record is folder based
if (originDispositionSchedule == null ||
isNotTrue(originDispositionSchedule.isRecordLevelDisposition()))
{
return null;
}
final NextActionFromDisposition dsNextAction = getDispositionActionByNameForRecord(nodeRef);
if (dsNextAction != null)
{
final NodeRef action = dsNextAction.getNextActionNodeRef();
if (isNotTrue((Boolean)nodeService.getProperty(action, PROP_MANUALLY_SET_AS_OF)))
{
if (!dsNextAction.getWriteMode().equals(WriteMode.READ_ONLY))
{
final String dispositionActionName = dsNextAction.getNextActionName();
final Date dispositionActionDate = dsNextAction.getNextActionDateAsOf();
AuthenticationUtil.runAsSystem(new RunAsWork<Void>()
{
@Override
public Void doWork()
{
nodeService.setProperty(action, PROP_DISPOSITION_AS_OF, dispositionActionDate);
if (dsNextAction.getWriteMode().equals(WriteMode.DATE_AND_NAME))
{
nodeService.setProperty(action, PROP_DISPOSITION_ACTION_NAME, dispositionActionName);
}
return null;
}
});
}
}
dsNodeRef = dsNextAction.getDispositionNodeRef();
}
}
else
{
// Get the disposition instructions for the node reference provided
diNodeRef = getDispositionScheduleImpl(nodeRef);
dsNodeRef = getDispositionScheduleImpl(nodeRef);
}
if (diNodeRef != null)
if (dsNodeRef != null)
{
di = new DispositionScheduleImpl(serviceRegistry, nodeService, diNodeRef);
ds = new DispositionScheduleImpl(serviceRegistry, nodeService, dsNodeRef);
}
return di;
return ds;
}
/**
* This method returns a NodeRef
* Gets the disposition instructions
@@ -322,6 +364,28 @@ public class DispositionServiceImpl extends ServiceBaseImpl
}
return result;
}
public DispositionSchedule getOriginDispositionSchedule(NodeRef nodeRef)
{
NodeRef parent = this.nodeService.getPrimaryParent(nodeRef).getParentRef();
if (parent != null)
{
if (filePlanService.isRecordCategory(parent))
{
NodeRef result = getAssociatedDispositionScheduleImpl(parent);
if (result == null)
{
return null;
}
return new DispositionScheduleImpl(serviceRegistry, nodeService, result);
}
else
{
return getOriginDispositionSchedule(parent);
}
}
return null;
}
/**
* @see org.alfresco.module.org_alfresco_module_rm.disposition.DispositionService#getAssociatedDispositionSchedule(org.alfresco.service.cmr.repository.NodeRef)
@@ -629,8 +693,14 @@ public class DispositionServiceImpl extends ServiceBaseImpl
* @param dispositionActionDefinition disposition action definition
* @param allowContextFromAsOf true if the context date is allowed to be obtained from the disposition "as of" property.
*/
private void initialiseDispositionAction(NodeRef nodeRef, DispositionActionDefinition dispositionActionDefinition, boolean allowContextFromAsOf)
private DispositionAction initialiseDispositionAction(NodeRef nodeRef, DispositionActionDefinition dispositionActionDefinition, boolean allowContextFromAsOf)
{
List<ChildAssociationRef> childAssocs = nodeService.getChildAssocs(nodeRef, ASSOC_NEXT_DISPOSITION_ACTION, ASSOC_NEXT_DISPOSITION_ACTION, 1, true);
if (childAssocs != null && childAssocs.size() > 0)
{
return new DispositionActionImpl(serviceRegistry, childAssocs.get(0).getChildRef());
}
// Create the properties
Map<QName, Serializable> props = new HashMap<QName, Serializable>(10);
@@ -660,6 +730,7 @@ public class DispositionServiceImpl extends ServiceBaseImpl
// For every event create an entry on the action
da.addEventCompletionDetails(event);
}
return da;
}
/**
@@ -692,9 +763,16 @@ public class DispositionServiceImpl extends ServiceBaseImpl
}
else
{
// for now use 'NOW' as the default context date
// TODO set the default period property ... cut off date or last disposition date depending on context
contextDate = new Date();
if (period.getPeriodType().equals(Immediately.PERIOD_TYPE))
{
contextDate = (Date)nodeService.getProperty(nodeRef, ContentModel.PROP_CREATED);
}
else
{
// for now use 'NOW' as the default context date
// TODO set the default period property ... cut off date or last disposition date depending on context
contextDate = new Date();
}
}
// Calculate the as of date
@@ -905,6 +983,7 @@ public class DispositionServiceImpl extends ServiceBaseImpl
// Get the current action
String currentADId = (String) nodeService.getProperty(currentDispositionAction, PROP_DISPOSITION_ACTION_ID);
currentDispositionActionDefinition = di.getDispositionActionDefinition(currentADId);
// When the record has multiple disposition schedules the current disposition action may not be found by id
// In this case it will be searched by name
if(currentDispositionActionDefinition == null)
@@ -999,6 +1078,24 @@ public class DispositionServiceImpl extends ServiceBaseImpl
}
}
public Date getDispositionActionDate(NodeRef record, NodeRef dispositionSchedule, String dispositionActionName)
{
DispositionSchedule ds = new DispositionScheduleImpl(serviceRegistry, nodeService, dispositionSchedule);
List<ChildAssociationRef> assocs = nodeService.getChildAssocs(dispositionSchedule);
if (assocs != null && assocs.size() > 0)
{
for (ChildAssociationRef assoc : assocs)
{
if (assoc != null && assoc.getQName().getLocalName().contains(dispositionActionName))
{
DispositionActionDefinition actionDefinition = ds.getDispositionActionDefinition(assoc.getChildRef().getId());
return calculateAsOfDate(record, actionDefinition, true);
}
}
}
return null;
}
/**
* Helper method to determine if a node is frozen or has frozen children
*
@@ -1046,4 +1143,169 @@ public class DispositionServiceImpl extends ServiceBaseImpl
}
});
}
/**
* Calculate next disposition action for a record
*
* @param record
* @return next disposition action (name, date) and the disposition associated
*/
protected NextActionFromDisposition getDispositionActionByNameForRecord(NodeRef record)
{
List<NodeRef> recordFolders = recordFolderService.getRecordFolders(record);
DispositionAction nextDispositionAction = getNextDispositionAction(record);
if (nextDispositionAction == null)
{
DispositionAction lastCompletedDispositionAction = getLastCompletedDispostionAction(record);
if (lastCompletedDispositionAction != null)
{
// all disposition actions upon the given record were completed
return null;
}
return getFirstDispositionAction(record, recordFolders);
}
else
{
return getNextDispositionAction(record, recordFolders, nextDispositionAction);
}
}
/**
* Calculate next disposition action when the record already has one
* @param recordFolders
* @param nextDispositionAction
* @return next disposition action and the associated disposition schedule
*/
private NextActionFromDisposition getNextDispositionAction(NodeRef record, List<NodeRef> recordFolders, DispositionAction nextDispositionAction)
{
String recordNextDispositionActionName = nextDispositionAction.getName();
Date recordNextDispositionActionDate = nextDispositionAction.getAsOfDate();
// We're looking for the latest date, so initially start with a very early one.
Date nextDispositionActionDate = new Date(Long.MIN_VALUE);
NodeRef dispositionNodeRef = null;
// Find the latest "disposition as of" date from all the schedules this record is subject to.
for (NodeRef folder : recordFolders)
{
NodeRef dsNodeRef = getDispositionScheduleImpl(folder);
if (dsNodeRef != null)
{
Date dispActionDate = getDispositionActionDate(record, dsNodeRef, recordNextDispositionActionName);
if (dispActionDate == null || (nextDispositionActionDate != null
&& nextDispositionActionDate.before(dispActionDate)))
{
nextDispositionActionDate = dispActionDate;
dispositionNodeRef = dsNodeRef;
if (dispActionDate == null)
{
// Treat null as the latest date possible (so stop searching further).
break;
}
}
}
}
if (dispositionNodeRef == null)
{
return null;
}
WriteMode mode = determineWriteMode(recordNextDispositionActionDate, nextDispositionActionDate);
return new NextActionFromDisposition(dispositionNodeRef, nextDispositionAction.getNodeRef(),
recordNextDispositionActionName, nextDispositionActionDate, mode);
}
/**
* Determine what should be updated for an existing disposition schedule impl. We only update the date if the
* existing date is earlier than the calculated one.
*
* @param recordNextDispositionActionDate The next action date found on the record node (or folder node).
* @param nextDispositionActionDate The next action date calculated from the current disposition schedule(s)
* affecting the node.
* @return READ_ONLY if nothing should be updated, or DATE_ONLY if the date needs updating.
*/
private WriteMode determineWriteMode(Date recordNextDispositionActionDate, Date nextDispositionActionDate)
{
// Treat null dates as being the latest possible date.
Date maxDate = new Date(Long.MAX_VALUE);
Date recordDate = (recordNextDispositionActionDate != null ? recordNextDispositionActionDate : maxDate);
Date calculatedDate = (nextDispositionActionDate != null ? nextDispositionActionDate : maxDate);
// We only need to update the date if the current one is too early.
if (recordDate.before(calculatedDate))
{
return WriteMode.DATE_ONLY;
}
else
{
return WriteMode.READ_ONLY;
}
}
/**
* Calculate first disposition action when the record doesn't have one
* @param recordFolders
* @return next disposition action and the associated disposition schedule
*/
private NextActionFromDisposition getFirstDispositionAction(NodeRef record, List<NodeRef> recordFolders)
{
NodeRef newAction = null;
String newDispositionActionName = null;
// We're looking for the latest date, so start with a very early one.
Date newDispositionActionDateAsOf = new Date(Long.MIN_VALUE);
NodeRef dispositionNodeRef = null;
for (NodeRef folder : recordFolders)
{
NodeRef folderDS = getDispositionScheduleImpl(folder);
if (folderDS != null)
{
DispositionSchedule ds = new DispositionScheduleImpl(serviceRegistry, nodeService, folderDS);
List<DispositionActionDefinition> dispositionActionDefinitions = ds.getDispositionActionDefinitions();
if (dispositionActionDefinitions != null && dispositionActionDefinitions.size() > 0)
{
DispositionActionDefinition firstDispositionActionDef = dispositionActionDefinitions.get(0);
dispositionNodeRef = folderDS;
if (newAction == null)
{
NodeRef recordOrFolder = record;
if (!ds.isRecordLevelDisposition())
{
recordOrFolder = folder;
}
DispositionAction firstDispositionAction = initialiseDispositionAction(recordOrFolder, firstDispositionActionDef, true);
newAction = firstDispositionAction.getNodeRef();
newDispositionActionName = (String)nodeService.getProperty(newAction, PROP_DISPOSITION_ACTION_NAME);
newDispositionActionDateAsOf = firstDispositionAction.getAsOfDate();
}
else if (firstDispositionActionDef.getPeriod() != null)
{
Date firstActionDate = calculateAsOfDate(record, firstDispositionActionDef, true);
if (firstActionDate == null || (newDispositionActionDateAsOf != null
&& newDispositionActionDateAsOf.before(firstActionDate)))
{
newDispositionActionName = firstDispositionActionDef.getName();
newDispositionActionDateAsOf = firstActionDate;
if (firstActionDate == null)
{
// Treat null as the latest date possible, so there's no point searching further.
break;
}
}
}
}
}
}
if (newDispositionActionName == null || dispositionNodeRef == null || newAction == null)
{
return null;
}
return new NextActionFromDisposition(dispositionNodeRef, newAction,
newDispositionActionName, newDispositionActionDateAsOf, WriteMode.DATE_AND_NAME);
}
}

View File

@@ -0,0 +1,106 @@
/*
* #%L
* Alfresco Records Management Module
* %%
* Copyright (C) 2005 - 2016 Alfresco Software Limited
* %%
* This file is part of the Alfresco software.
* -
* If the software was purchased under a paid Alfresco license, the terms of
* the paid license agreement will prevail. Otherwise, the software is
* provided under the following open source license terms:
* -
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* -
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
* -
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.alfresco.module.org_alfresco_module_rm.disposition;
import java.util.Date;
import org.alfresco.module.org_alfresco_module_rm.disposition.DispositionServiceImpl.WriteMode;
import org.alfresco.service.cmr.repository.NodeRef;
public class NextActionFromDisposition
{
public NextActionFromDisposition(NodeRef dispositionNodeRef, NodeRef nextActionNodeRef, String nextActionName, Date nextActionDateAsOf,
WriteMode writeMode)
{
super();
this.dispositionNodeRef = dispositionNodeRef;
this.nextActionNodeRef = nextActionNodeRef;
this.nextActionName = nextActionName;
this.nextActionDateAsOf = nextActionDateAsOf;
this.writeMode = writeMode;
}
private NodeRef dispositionNodeRef;
private NodeRef nextActionNodeRef;
private String nextActionName;
private Date nextActionDateAsOf;
private WriteMode writeMode;
public WriteMode getWriteMode()
{
return writeMode;
}
public void setWriteMode(WriteMode writeMode)
{
this.writeMode = writeMode;
}
public NodeRef getNextActionNodeRef()
{
return nextActionNodeRef;
}
public void setNextActionNodeRef(NodeRef nextActionNodeRef)
{
this.nextActionNodeRef = nextActionNodeRef;
}
public NodeRef getDispositionNodeRef()
{
return dispositionNodeRef;
}
public void setDispositionNodeRef(NodeRef dispositionNodeRef)
{
this.dispositionNodeRef = dispositionNodeRef;
}
public String getNextActionName()
{
return nextActionName;
}
public void setNextActionName(String nextActionName)
{
this.nextActionName = nextActionName;
}
public Date getNextActionDateAsOf()
{
return nextActionDateAsOf;
}
public void setNextActionDateAsOf(Date nextActionDateAsOf)
{
this.nextActionDateAsOf = nextActionDateAsOf;
}
}

View File

@@ -27,6 +27,8 @@
package org.alfresco.module.org_alfresco_module_rm.disposition.property;
import static org.apache.commons.lang3.BooleanUtils.isNotTrue;
import java.io.Serializable;
import java.util.Date;
import java.util.Map;
@@ -220,7 +222,11 @@ public class DispositionProperty extends BaseBehaviourBean
// update asOf date on the disposition action based on the new property value
NodeRef daNodeRef = dispositionAction.getNodeRef();
nodeService.setProperty(daNodeRef, PROP_DISPOSITION_AS_OF, updatedAsOf);
// Don't overwrite a manually set "disposition as of" date.
if (isNotTrue((Boolean) nodeService.getProperty(daNodeRef, PROP_MANUALLY_SET_AS_OF)))
{
nodeService.setProperty(daNodeRef, PROP_DISPOSITION_AS_OF, updatedAsOf);
}
}
}
}

View File

@@ -158,6 +158,8 @@ public interface RecordsManagementModel extends RecordsManagementCustomModel
QName PROP_DISPOSITION_ACTION_ID = QName.createQName(RM_URI, "dispositionActionId");
QName PROP_DISPOSITION_ACTION = QName.createQName(RM_URI, "dispositionAction");
QName PROP_DISPOSITION_AS_OF = QName.createQName(RM_URI, "dispositionAsOf");
/** A flag indicating that the "disposition as of" date has been manually set and shouldn't be changed. */
QName PROP_MANUALLY_SET_AS_OF = QName.createQName(RM_URI, "manuallySetAsOf");
QName PROP_DISPOSITION_EVENTS_ELIGIBLE = QName.createQName(RM_URI, "dispositionEventsEligible");
QName PROP_DISPOSITION_ACTION_STARTED_AT = QName.createQName(RM_URI, "dispositionActionStartedAt");
QName PROP_DISPOSITION_ACTION_STARTED_BY = QName.createQName(RM_URI, "dispositionActionStartedBy");

View File

@@ -45,12 +45,14 @@ import java.util.Set;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.model.ContentModel;
import org.alfresco.module.org_alfresco_module_rm.RecordsManagementServiceRegistry;
import org.alfresco.module.org_alfresco_module_rm.RecordsManagementPolicies.BeforeFileRecord;
import org.alfresco.module.org_alfresco_module_rm.RecordsManagementPolicies.OnFileRecord;
import org.alfresco.module.org_alfresco_module_rm.capability.Capability;
import org.alfresco.module.org_alfresco_module_rm.capability.CapabilityService;
import org.alfresco.module.org_alfresco_module_rm.capability.RMPermissionModel;
import org.alfresco.module.org_alfresco_module_rm.disposition.DispositionSchedule;
import org.alfresco.module.org_alfresco_module_rm.disposition.DispositionScheduleImpl;
import org.alfresco.module.org_alfresco_module_rm.disposition.DispositionService;
import org.alfresco.module.org_alfresco_module_rm.dod5015.DOD5015Model;
import org.alfresco.module.org_alfresco_module_rm.fileplan.FilePlanService;
@@ -241,7 +243,7 @@ public class RecordServiceImpl extends BaseBehaviourBean
/** recordable version service */
private RecordableVersionService recordableVersionService;
/** list of available record meta-data aspects and the file plan types the are applicable to */
private Map<QName, Set<QName>> recordMetaDataAspects;
@@ -398,7 +400,7 @@ public class RecordServiceImpl extends BaseBehaviourBean
{
this.recordableVersionService = recordableVersionService;
}
/**
* Init method
*/
@@ -1753,7 +1755,10 @@ public class RecordServiceImpl extends BaseBehaviourBean
private void validateLinkConditions(NodeRef record, NodeRef recordFolder)
{
// ensure that the linking record folders have compatible disposition schedules
DispositionSchedule recordDispositionSchedule = dispositionService.getDispositionSchedule(record);
// get the origin disposition schedule for the record, not the calculated one
DispositionSchedule recordDispositionSchedule = dispositionService.getOriginDispositionSchedule(record);
if (recordDispositionSchedule != null)
{
DispositionSchedule recordFolderDispositionSchedule = dispositionService.getDispositionSchedule(recordFolder);

View File

@@ -44,22 +44,22 @@
*/
package org.alfresco.repo.web.scripts.roles;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.io.Writer;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.io.Writer;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Set;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpServletResponse;
import org.alfresco.model.ContentModel;
import org.alfresco.module.org_alfresco_module_rm.model.RecordsManagementModel;
import org.alfresco.module.org_alfresco_module_rm.security.ExtendedReaderDynamicAuthority;
@@ -69,26 +69,26 @@ import org.alfresco.repo.domain.node.NodeDAO;
import org.alfresco.repo.domain.patch.PatchDAO;
import org.alfresco.repo.domain.qname.QNameDAO;
import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
import org.alfresco.repo.web.scripts.content.ContentStreamer;
import org.alfresco.service.cmr.model.FileFolderService;
import org.alfresco.service.cmr.model.FileInfo;
import org.alfresco.repo.web.scripts.content.ContentStreamer;
import org.alfresco.service.cmr.model.FileFolderService;
import org.alfresco.service.cmr.model.FileInfo;
import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.cmr.repository.NodeService;
import org.alfresco.service.cmr.security.PermissionService;
import org.alfresco.service.namespace.QName;
import org.alfresco.service.transaction.TransactionService;
import org.alfresco.util.Pair;
import org.alfresco.util.TempFileProvider;
import org.alfresco.util.TempFileProvider;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.extensions.webscripts.AbstractWebScript;
import org.springframework.extensions.webscripts.AbstractWebScript;
import org.springframework.extensions.webscripts.Cache;
import org.springframework.extensions.webscripts.Format;
import org.springframework.extensions.webscripts.Format;
import org.springframework.extensions.webscripts.Status;
import org.springframework.extensions.webscripts.WebScriptException;
import org.springframework.extensions.webscripts.WebScriptException;
import org.springframework.extensions.webscripts.WebScriptRequest;
import org.springframework.extensions.webscripts.WebScriptResponse;
import org.springframework.extensions.webscripts.WebScriptResponse;
/**
* Webscript used for removing dynamic authorities from the records.
@@ -97,7 +97,7 @@ import org.springframework.extensions.webscripts.WebScriptResponse;
* @since 2.3.0.7
*/
@SuppressWarnings("deprecation")
public class DynamicAuthoritiesGet extends AbstractWebScript implements RecordsManagementModel
public class DynamicAuthoritiesGet extends AbstractWebScript implements RecordsManagementModel
{
private static final String MESSAGE_PARAMETER_BATCHSIZE_GREATER_THAN_ZERO = "Parameter batchsize should be a number greater than 0.";
private static final String MESSAGE_PROCESSING_BEGIN = "Processing - BEGIN";
@@ -106,7 +106,7 @@ public class DynamicAuthoritiesGet extends AbstractWebScript implements RecordsM
private static final String MESSAGE_PROCESSING_RECORD_BEGIN_TEMPLATE = "Processing record {0} - BEGIN";
private static final String MESSAGE_BATCHSIZE_IS_INVALID = "Parameter batchsize is invalid.";
private static final String MESSAGE_BATCHSIZE_IS_MANDATORY = "Parameter batchsize is mandatory";
private static final String MESSAGE_NODE_REF_DOES_NOT_EXIST_TEMPLATE = "Parameter parentNodeRef = {0} does not exist.";
private static final String MESSAGE_NODE_REF_DOES_NOT_EXIST_TEMPLATE = "Parameter parentNodeRef = {0} does not exist.";
private static final String SUCCESS_STATUS = "success";
/**
* The logger
@@ -114,8 +114,8 @@ public class DynamicAuthoritiesGet extends AbstractWebScript implements RecordsM
private static Log logger = LogFactory.getLog(DynamicAuthoritiesGet.class);
private static final String BATCH_SIZE = "batchsize";
private static final String TOTAL_NUMBER_TO_PROCESS = "maxProcessedRecords";
private static final String PARAM_EXPORT = "export";
private static final String PARAM_PARENT_NODE_REF = "parentNodeRef";
private static final String PARAM_EXPORT = "export";
private static final String PARAM_PARENT_NODE_REF = "parentNodeRef";
private static final String MODEL_STATUS = "responsestatus";
private static final String MODEL_MESSAGE = "message";
private static final String MESSAGE_ALL_TEMPLATE = "Processed {0} records.";
@@ -131,60 +131,60 @@ public class DynamicAuthoritiesGet extends AbstractWebScript implements RecordsM
private PermissionService permissionService;
private ExtendedSecurityService extendedSecurityService;
private TransactionService transactionService;
/** Content Streamer */
protected ContentStreamer contentStreamer;
private FileFolderService fileFolderService;
/** service setters */
public void setPatchDAO(PatchDAO patchDAO)
{
this.patchDAO = patchDAO;
}
/** Content Streamer */
protected ContentStreamer contentStreamer;
private FileFolderService fileFolderService;
public void setNodeDAO(NodeDAO nodeDAO)
{
this.nodeDAO = nodeDAO;
}
/** service setters */
public void setPatchDAO(PatchDAO patchDAO)
{
this.patchDAO = patchDAO;
}
public void setQnameDAO(QNameDAO qnameDAO)
public void setNodeDAO(NodeDAO nodeDAO)
{
this.qnameDAO = qnameDAO;
}
this.nodeDAO = nodeDAO;
}
public void setNodeService(NodeService nodeService)
public void setQnameDAO(QNameDAO qnameDAO)
{
this.nodeService = nodeService;
this.qnameDAO = qnameDAO;
}
public void setPermissionService(PermissionService permissionService)
public void setNodeService(NodeService nodeService)
{
this.permissionService = permissionService;
}
public void setExtendedSecurityService(ExtendedSecurityService extendedSecurityService)
this.nodeService = nodeService;
}
public void setPermissionService(PermissionService permissionService)
{
this.extendedSecurityService = extendedSecurityService;
this.permissionService = permissionService;
}
public void setTransactionService(TransactionService transactionService)
{
this.transactionService = transactionService;
}
public void setContentStreamer(ContentStreamer contentStreamer)
public void setExtendedSecurityService(ExtendedSecurityService extendedSecurityService)
{
this.contentStreamer = contentStreamer;
this.extendedSecurityService = extendedSecurityService;
}
public void setFileFolderService(FileFolderService fileFolderService)
{
this.fileFolderService = fileFolderService;
}
protected Map<String, Object> buildModel(WebScriptRequest req, WebScriptResponse res) throws IOException
{
Map<String, Object> model = new HashMap<String, Object>();
final Long batchSize = getBatchSizeParameter(req);
public void setTransactionService(TransactionService transactionService)
{
this.transactionService = transactionService;
}
public void setContentStreamer(ContentStreamer contentStreamer)
{
this.contentStreamer = contentStreamer;
}
public void setFileFolderService(FileFolderService fileFolderService)
{
this.fileFolderService = fileFolderService;
}
protected Map<String, Object> buildModel(WebScriptRequest req, WebScriptResponse res) throws IOException
{
Map<String, Object> model = new HashMap<String, Object>();
final Long batchSize = getBatchSizeParameter(req);
// get the max node id and the extended security aspect
Long maxNodeId = patchDAO.getMaxAdmNodeID();
final Pair<Long, QName> recordAspectPair = qnameDAO.getQName(ASPECT_EXTENDED_SECURITY);
@@ -196,201 +196,201 @@ public class DynamicAuthoritiesGet extends AbstractWebScript implements RecordsM
return model;
}
Long totalNumberOfRecordsToProcess = getMaxToProccessParameter(req, batchSize);
boolean attach = getExportParameter(req);
File file = TempFileProvider.createTempFile("processedNodes_", ".csv");
FileWriter writer = new FileWriter(file);
BufferedWriter out = new BufferedWriter(writer);
List<NodeRef> processedNodes = new ArrayList<NodeRef>();
try
{
NodeRef parentNodeRef = getParentNodeRefParameter(req);
if (parentNodeRef != null)
{
processedNodes = processChildrenNodes(parentNodeRef, batchSize.intValue(), recordAspectPair,
totalNumberOfRecordsToProcess.intValue(), out, attach);
}
else
{
processedNodes = processNodes(batchSize, maxNodeId, recordAspectPair, totalNumberOfRecordsToProcess,
out, attach);
}
}
finally
{
out.close();
}
int processedNodesSize = processedNodes.size();
String message = "";
if (totalNumberOfRecordsToProcess == 0
|| (totalNumberOfRecordsToProcess > 0 && processedNodesSize < totalNumberOfRecordsToProcess))
{
message = MessageFormat.format(MESSAGE_ALL_TEMPLATE, processedNodesSize);
}
if (totalNumberOfRecordsToProcess > 0 && totalNumberOfRecordsToProcess == processedNodesSize)
{
message = MessageFormat.format(MESSAGE_PARTIAL_TEMPLATE, totalNumberOfRecordsToProcess);
}
model.put(MODEL_STATUS, SUCCESS_STATUS);
model.put(MODEL_MESSAGE, message);
logger.info(message);
if (attach)
{
try
{
String fileName = file.getName();
contentStreamer.streamContent(req, res, file, null, attach, fileName, model);
model = null;
}
finally
{
if (file != null)
{
file.delete();
}
}
}
return model;
}
/**
* Get export parameter from the request
*
* @param req
* @return
*/
protected boolean getExportParameter(WebScriptRequest req)
{
boolean attach = false;
String export = req.getParameter(PARAM_EXPORT);
if (export != null && Boolean.parseBoolean(export))
{
attach = true;
}
return attach;
}
/*
* (non-Javadoc)
* @see org.alfresco.repo.web.scripts.content.StreamContent#execute(org.springframework.extensions.webscripts.
* WebScriptRequest, org.springframework.extensions.webscripts.WebScriptResponse)
*/
@Override
public void execute(WebScriptRequest req, WebScriptResponse res) throws IOException
{
// retrieve requested format
String format = req.getFormat();
try
{
String mimetype = getContainer().getFormatRegistry().getMimeType(req.getAgent(), format);
Long totalNumberOfRecordsToProcess = getMaxToProccessParameter(req, batchSize);
boolean attach = getExportParameter(req);
File file = TempFileProvider.createTempFile("processedNodes_", ".csv");
FileWriter writer = new FileWriter(file);
BufferedWriter out = new BufferedWriter(writer);
List<NodeRef> processedNodes = new ArrayList<NodeRef>();
try
{
NodeRef parentNodeRef = getParentNodeRefParameter(req);
if (parentNodeRef != null)
{
processedNodes = processChildrenNodes(parentNodeRef, batchSize.intValue(), recordAspectPair,
totalNumberOfRecordsToProcess.intValue(), out, attach);
}
else
{
processedNodes = processNodes(batchSize, maxNodeId, recordAspectPair, totalNumberOfRecordsToProcess,
out, attach);
}
}
finally
{
out.close();
}
int processedNodesSize = processedNodes.size();
String message = "";
if (totalNumberOfRecordsToProcess == 0
|| (totalNumberOfRecordsToProcess > 0 && processedNodesSize < totalNumberOfRecordsToProcess))
{
message = MessageFormat.format(MESSAGE_ALL_TEMPLATE, processedNodesSize);
}
if (totalNumberOfRecordsToProcess > 0 && totalNumberOfRecordsToProcess == processedNodesSize)
{
message = MessageFormat.format(MESSAGE_PARTIAL_TEMPLATE, totalNumberOfRecordsToProcess);
}
model.put(MODEL_STATUS, SUCCESS_STATUS);
model.put(MODEL_MESSAGE, message);
logger.info(message);
if (attach)
{
try
{
String fileName = file.getName();
contentStreamer.streamContent(req, res, file, null, attach, fileName, model);
model = null;
}
finally
{
if (file != null)
{
file.delete();
}
}
}
return model;
}
/**
* Get export parameter from the request
*
* @param req
* @return
*/
protected boolean getExportParameter(WebScriptRequest req)
{
boolean attach = false;
String export = req.getParameter(PARAM_EXPORT);
if (export != null && Boolean.parseBoolean(export))
{
attach = true;
}
return attach;
}
/*
* (non-Javadoc)
* @see org.alfresco.repo.web.scripts.content.StreamContent#execute(org.springframework.extensions.webscripts.
* WebScriptRequest, org.springframework.extensions.webscripts.WebScriptResponse)
*/
@Override
public void execute(WebScriptRequest req, WebScriptResponse res) throws IOException
{
// retrieve requested format
String format = req.getFormat();
try
{
String mimetype = getContainer().getFormatRegistry().getMimeType(req.getAgent(), format);
if (mimetype == null)
{
throw new WebScriptException("Web Script format '" + format + "' is not registered");
}
// construct model for script / template
Status status = new Status();
Cache cache = new Cache(getDescription().getRequiredCache());
Map<String, Object> model = buildModel(req, res);
if (model == null) { return; }
model.put("status", status);
model.put("cache", cache);
Map<String, Object> templateModel = createTemplateParameters(req, res, model);
// render output
int statusCode = status.getCode();
if (statusCode != HttpServletResponse.SC_OK && !req.forceSuccessStatus())
{
if (logger.isDebugEnabled())
{
logger.debug("Force success status header in response: " + req.forceSuccessStatus());
logger.debug("Setting status " + statusCode);
}
res.setStatus(statusCode);
}
// apply location
String location = status.getLocation();
if (location != null && location.length() > 0)
{
if (logger.isDebugEnabled()) logger.debug("Setting location to " + location);
res.setHeader(WebScriptResponse.HEADER_LOCATION, location);
}
// apply cache
res.setCache(cache);
String callback = null;
if (getContainer().allowCallbacks())
{
callback = req.getJSONCallback();
}
if (format.equals(WebScriptResponse.JSON_FORMAT) && callback != null)
{
if (logger.isDebugEnabled()) logger.debug("Rendering JSON callback response: content type="
+ Format.JAVASCRIPT.mimetype() + ", status=" + statusCode + ", callback=" + callback);
// NOTE: special case for wrapping JSON results in a javascript function callback
res.setContentType(Format.JAVASCRIPT.mimetype() + ";charset=UTF-8");
res.getWriter().write((callback + "("));
}
else
{
if (logger.isDebugEnabled())
logger.debug("Rendering response: content type=" + mimetype + ", status=" + statusCode);
res.setContentType(mimetype + ";charset=UTF-8");
}
// render response according to requested format
renderFormatTemplate(format, templateModel, res.getWriter());
if (format.equals(WebScriptResponse.JSON_FORMAT) && callback != null)
{
// NOTE: special case for wrapping JSON results in a javascript function callback
res.getWriter().write(")");
}
}
catch (Throwable e)
{
if (logger.isDebugEnabled())
{
StringWriter stack = new StringWriter();
e.printStackTrace(new PrintWriter(stack));
logger.debug("Caught exception; decorating with appropriate status template : " + stack.toString());
}
throw createStatusException(e, req, res);
}
}
protected void renderFormatTemplate(String format, Map<String, Object> model, Writer writer)
{
format = (format == null) ? "" : format;
String templatePath = getDescription().getId() + "." + format;
if (logger.isDebugEnabled()) logger.debug("Rendering template '" + templatePath + "'");
renderTemplate(templatePath, model, writer);
}
/**
* Obtain maximum of the records to be processed from the request if it is specified or bachsize value otherwise
*
* @param req
* @return maximum of the records to be processed from the request if it is specified or bachsize value otherwise
*/
protected Long getMaxToProccessParameter(WebScriptRequest req, final Long batchSize)
{
String totalToBeProcessedRecordsStr = req.getParameter(TOTAL_NUMBER_TO_PROCESS);
}
// construct model for script / template
Status status = new Status();
Cache cache = new Cache(getDescription().getRequiredCache());
Map<String, Object> model = buildModel(req, res);
if (model == null) { return; }
model.put("status", status);
model.put("cache", cache);
Map<String, Object> templateModel = createTemplateParameters(req, res, model);
// render output
int statusCode = status.getCode();
if (statusCode != HttpServletResponse.SC_OK && !req.forceSuccessStatus())
{
if (logger.isDebugEnabled())
{
logger.debug("Force success status header in response: " + req.forceSuccessStatus());
logger.debug("Setting status " + statusCode);
}
res.setStatus(statusCode);
}
// apply location
String location = status.getLocation();
if (location != null && location.length() > 0)
{
if (logger.isDebugEnabled()) logger.debug("Setting location to " + location);
res.setHeader(WebScriptResponse.HEADER_LOCATION, location);
}
// apply cache
res.setCache(cache);
String callback = null;
if (getContainer().allowCallbacks())
{
callback = req.getJSONCallback();
}
if (format.equals(WebScriptResponse.JSON_FORMAT) && callback != null)
{
if (logger.isDebugEnabled()) logger.debug("Rendering JSON callback response: content type="
+ Format.JAVASCRIPT.mimetype() + ", status=" + statusCode + ", callback=" + callback);
// NOTE: special case for wrapping JSON results in a javascript function callback
res.setContentType(Format.JAVASCRIPT.mimetype() + ";charset=UTF-8");
res.getWriter().write((callback + "("));
}
else
{
if (logger.isDebugEnabled())
logger.debug("Rendering response: content type=" + mimetype + ", status=" + statusCode);
res.setContentType(mimetype + ";charset=UTF-8");
}
// render response according to requested format
renderFormatTemplate(format, templateModel, res.getWriter());
if (format.equals(WebScriptResponse.JSON_FORMAT) && callback != null)
{
// NOTE: special case for wrapping JSON results in a javascript function callback
res.getWriter().write(")");
}
}
catch (Throwable e)
{
if (logger.isDebugEnabled())
{
StringWriter stack = new StringWriter();
e.printStackTrace(new PrintWriter(stack));
logger.debug("Caught exception; decorating with appropriate status template : " + stack.toString());
}
throw createStatusException(e, req, res);
}
}
protected void renderFormatTemplate(String format, Map<String, Object> model, Writer writer)
{
format = (format == null) ? "" : format;
String templatePath = getDescription().getId() + "." + format;
if (logger.isDebugEnabled()) logger.debug("Rendering template '" + templatePath + "'");
renderTemplate(templatePath, model, writer);
}
/**
* Obtain maximum of the records to be processed from the request if it is specified or bachsize value otherwise
*
* @param req
* @return maximum of the records to be processed from the request if it is specified or bachsize value otherwise
*/
protected Long getMaxToProccessParameter(WebScriptRequest req, final Long batchSize)
{
String totalToBeProcessedRecordsStr = req.getParameter(TOTAL_NUMBER_TO_PROCESS);
//default total number of records to be processed to batch size value
Long totalNumberOfRecordsToProcess = batchSize;
if (StringUtils.isNotBlank(totalToBeProcessedRecordsStr))
@@ -404,77 +404,77 @@ public class DynamicAuthoritiesGet extends AbstractWebScript implements RecordsM
//do nothing here, the value will remain 0L in this case
}
}
return totalNumberOfRecordsToProcess;
}
/**
* Obtain batchsize parameter from the request.
*
* @param req
* @return batchsize parameter from the request
*/
protected Long getBatchSizeParameter(WebScriptRequest req)
{
String batchSizeStr = req.getParameter(BATCH_SIZE);
Long size = 0L;
if (StringUtils.isBlank(batchSizeStr))
{
logger.info(MESSAGE_BATCHSIZE_IS_MANDATORY);
throw new WebScriptException(Status.STATUS_BAD_REQUEST, MESSAGE_BATCHSIZE_IS_MANDATORY);
}
try
{
size = Long.parseLong(batchSizeStr);
if (size <= 0)
{
logger.info(MESSAGE_PARAMETER_BATCHSIZE_GREATER_THAN_ZERO);
throw new WebScriptException(Status.STATUS_BAD_REQUEST, MESSAGE_PARAMETER_BATCHSIZE_GREATER_THAN_ZERO);
}
}
catch (NumberFormatException ex)
{
logger.info(MESSAGE_BATCHSIZE_IS_INVALID);
throw new WebScriptException(Status.STATUS_BAD_REQUEST, MESSAGE_BATCHSIZE_IS_INVALID);
}
return size;
}
return totalNumberOfRecordsToProcess;
}
/**
* Get parentNodeRef parameter from the request
*
* @param req
* @return
*/
protected NodeRef getParentNodeRefParameter(WebScriptRequest req)
{
String parentNodeRefStr = req.getParameter(PARAM_PARENT_NODE_REF);
NodeRef parentNodeRef = null;
if (StringUtils.isNotBlank(parentNodeRefStr))
{
parentNodeRef = new NodeRef(parentNodeRefStr);
if(!nodeService.exists(parentNodeRef))
{
String message = MessageFormat.format(MESSAGE_NODE_REF_DOES_NOT_EXIST_TEMPLATE, parentNodeRef.toString());
logger.info(message);
throw new WebScriptException(Status.STATUS_BAD_REQUEST, message);
}
}
return parentNodeRef;
}
/**
* Process nodes all nodes or the maximum number of nodes specified by batchsize or totalNumberOfRecordsToProcess
* parameters
*
* @param batchSize
* @param maxNodeId
* @param recordAspectPair
* @param totalNumberOfRecordsToProcess
* @return the list of processed nodes
*/
protected List<NodeRef> processNodes(final Long batchSize, Long maxNodeId, final Pair<Long, QName> recordAspectPair,
Long totalNumberOfRecordsToProcess, final BufferedWriter out, final boolean attach)
{
/**
* Obtain batchsize parameter from the request.
*
* @param req
* @return batchsize parameter from the request
*/
protected Long getBatchSizeParameter(WebScriptRequest req)
{
String batchSizeStr = req.getParameter(BATCH_SIZE);
Long size = 0L;
if (StringUtils.isBlank(batchSizeStr))
{
logger.info(MESSAGE_BATCHSIZE_IS_MANDATORY);
throw new WebScriptException(Status.STATUS_BAD_REQUEST, MESSAGE_BATCHSIZE_IS_MANDATORY);
}
try
{
size = Long.parseLong(batchSizeStr);
if (size <= 0)
{
logger.info(MESSAGE_PARAMETER_BATCHSIZE_GREATER_THAN_ZERO);
throw new WebScriptException(Status.STATUS_BAD_REQUEST, MESSAGE_PARAMETER_BATCHSIZE_GREATER_THAN_ZERO);
}
}
catch (NumberFormatException ex)
{
logger.info(MESSAGE_BATCHSIZE_IS_INVALID);
throw new WebScriptException(Status.STATUS_BAD_REQUEST, MESSAGE_BATCHSIZE_IS_INVALID);
}
return size;
}
/**
* Get parentNodeRef parameter from the request
*
* @param req
* @return
*/
protected NodeRef getParentNodeRefParameter(WebScriptRequest req)
{
String parentNodeRefStr = req.getParameter(PARAM_PARENT_NODE_REF);
NodeRef parentNodeRef = null;
if (StringUtils.isNotBlank(parentNodeRefStr))
{
parentNodeRef = new NodeRef(parentNodeRefStr);
if(!nodeService.exists(parentNodeRef))
{
String message = MessageFormat.format(MESSAGE_NODE_REF_DOES_NOT_EXIST_TEMPLATE, parentNodeRef.toString());
logger.info(message);
throw new WebScriptException(Status.STATUS_BAD_REQUEST, message);
}
}
return parentNodeRef;
}
/**
* Process nodes all nodes or the maximum number of nodes specified by batchsize or totalNumberOfRecordsToProcess
* parameters
*
* @param batchSize
* @param maxNodeId
* @param recordAspectPair
* @param totalNumberOfRecordsToProcess
* @return the list of processed nodes
*/
protected List<NodeRef> processNodes(final Long batchSize, Long maxNodeId, final Pair<Long, QName> recordAspectPair,
Long totalNumberOfRecordsToProcess, final BufferedWriter out, final boolean attach)
{
final Long maxRecordsToProcess = totalNumberOfRecordsToProcess;
final List<NodeRef> processedNodes = new ArrayList<NodeRef>();
logger.info(MESSAGE_PROCESSING_BEGIN);
@@ -492,8 +492,8 @@ public class DynamicAuthoritiesGet extends AbstractWebScript implements RecordsM
public Void execute() throws Throwable
{
// get the nodes with the extended security aspect applied
List<Long> nodeIds = patchDAO.getNodesByAspectQNameId(recordAspectPair.getFirst(), currentIndex,
currentIndex + batchSize);
List<Long> nodeIds = patchDAO.getNodesByAspectQNameId(recordAspectPair.getFirst(), currentIndex,
currentIndex + batchSize);
// process each one
for (Long nodeId : nodeIds)
@@ -508,79 +508,79 @@ public class DynamicAuthoritiesGet extends AbstractWebScript implements RecordsM
processNode(record);
logger.info(MessageFormat.format(MESSAGE_PROCESSING_RECORD_END_TEMPLATE, recordName));
processedNodes.add(record);
if (attach)
{
out.write(recordName);
out.write(",");
out.write(record.toString());
out.write("\n");
if (attach)
{
out.write(recordName);
out.write(",");
out.write(record.toString());
out.write("\n");
}
}
}
return null;
}
}, false, // read only
}, false, // read only
true); // requires new
}
logger.info(MESSAGE_PROCESSING_END);
return processedNodes;
return processedNodes;
}
protected List<NodeRef> processChildrenNodes(NodeRef parentNodeRef, final int batchSize,
final Pair<Long, QName> recordAspectPair, final int maxRecordsToProcess, final BufferedWriter out,
final boolean attach)
{
final List<NodeRef> processedNodes = new ArrayList<NodeRef>();
final List<FileInfo> children = fileFolderService.search(parentNodeRef, "*", /*filesSearch*/true, /*folderSearch*/true, /*includeSubfolders*/true);
logger.info(MESSAGE_PROCESSING_BEGIN);
// by batch size
for (int i = 0; i < children.size(); i += batchSize)
{
if (maxRecordsToProcess != 0 && processedNodes.size() >= maxRecordsToProcess)
{
break;
}
final int currentIndex = i;
transactionService.getRetryingTransactionHelper().doInTransaction(new RetryingTransactionCallback<Void>()
{
public Void execute() throws Throwable
{
List<FileInfo> nodes = children.subList(currentIndex, Math.min(currentIndex + batchSize, children.size()));
// process each one
for (FileInfo node : nodes)
{
if (maxRecordsToProcess != 0 && processedNodes.size() >= maxRecordsToProcess)
{
break;
}
NodeRef record = node.getNodeRef();
if (nodeService.hasAspect(record, recordAspectPair.getSecond()))
{
String recordName = (String) nodeService.getProperty(record, ContentModel.PROP_NAME);
logger.info(MessageFormat.format(MESSAGE_PROCESSING_RECORD_BEGIN_TEMPLATE, recordName));
processNode(record);
logger.info(MessageFormat.format(MESSAGE_PROCESSING_RECORD_END_TEMPLATE, recordName));
processedNodes.add(record);
if (attach)
{
out.write(recordName);
out.write(",");
out.write(record.toString());
out.write("\n");
}
}
}
return null;
}
}, false, // read only
true); // requires new
}
logger.info(MESSAGE_PROCESSING_END);
return processedNodes;
}
protected List<NodeRef> processChildrenNodes(NodeRef parentNodeRef, final int batchSize,
final Pair<Long, QName> recordAspectPair, final int maxRecordsToProcess, final BufferedWriter out,
final boolean attach)
{
final List<NodeRef> processedNodes = new ArrayList<NodeRef>();
final List<FileInfo> children = fileFolderService.search(parentNodeRef, "*", /*filesSearch*/true, /*folderSearch*/true, /*includeSubfolders*/true);
logger.info(MESSAGE_PROCESSING_BEGIN);
// by batch size
for (int i = 0; i < children.size(); i += batchSize)
{
if (maxRecordsToProcess != 0 && processedNodes.size() >= maxRecordsToProcess)
{
break;
}
final int currentIndex = i;
transactionService.getRetryingTransactionHelper().doInTransaction(new RetryingTransactionCallback<Void>()
{
public Void execute() throws Throwable
{
List<FileInfo> nodes = children.subList(currentIndex, Math.min(currentIndex + batchSize, children.size()));
// process each one
for (FileInfo node : nodes)
{
if (maxRecordsToProcess != 0 && processedNodes.size() >= maxRecordsToProcess)
{
break;
}
NodeRef record = node.getNodeRef();
if (nodeService.hasAspect(record, recordAspectPair.getSecond()))
{
String recordName = (String) nodeService.getProperty(record, ContentModel.PROP_NAME);
logger.info(MessageFormat.format(MESSAGE_PROCESSING_RECORD_BEGIN_TEMPLATE, recordName));
processNode(record);
logger.info(MessageFormat.format(MESSAGE_PROCESSING_RECORD_END_TEMPLATE, recordName));
processedNodes.add(record);
if (attach)
{
out.write(recordName);
out.write(",");
out.write(record.toString());
out.write("\n");
}
}
}
return null;
}
}, false, // read only
true); // requires new
}
logger.info(MESSAGE_PROCESSING_END);
return processedNodes;
}
/**
* Process each node
*
@@ -601,20 +601,20 @@ public class DynamicAuthoritiesGet extends AbstractWebScript implements RecordsM
permissionService.clearPermission(nodeRef, ExtendedWriterDynamicAuthority.EXTENDED_WRITER);
// if record then ...
if (nodeService.hasAspect(nodeRef, ASPECT_RECORD))
{
Set<String> readersKeySet = null;
if (readers != null)
if (nodeService.hasAspect(nodeRef, ASPECT_RECORD))
{
readersKeySet = readers.keySet();
}
Set<String> writersKeySet = null;
if (writers != null)
{
writersKeySet = writers.keySet();
}
Set<String> readersKeySet = null;
if (readers != null)
{
readersKeySet = readers.keySet();
}
Set<String> writersKeySet = null;
if (writers != null)
{
writersKeySet = writers.keySet();
}
// re-set extended security via API
extendedSecurityService.set(nodeRef, readersKeySet, writersKeySet);
extendedSecurityService.set(nodeRef, readersKeySet, writersKeySet);
}
}
}

View File

@@ -0,0 +1,208 @@
/*
* #%L
* Alfresco Records Management Module
* %%
* Copyright (C) 2005 - 2016 Alfresco Software Limited
* %%
* This file is part of the Alfresco software.
* -
* If the software was purchased under a paid Alfresco license, the terms of
* the paid license agreement will prevail. Otherwise, the software is
* provided under the following open source license terms:
* -
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* -
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
* -
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.alfresco.module.org_alfresco_module_rm.test.integration.disposition;
import static org.alfresco.module.org_alfresco_module_rm.test.util.bdt.BehaviourTest.test;
import java.io.Serializable;
import java.util.Calendar;
import java.util.Date;
import java.util.Map;
import org.alfresco.model.ContentModel;
import org.alfresco.module.org_alfresco_module_rm.action.impl.CutOffAction;
import org.alfresco.module.org_alfresco_module_rm.action.impl.DestroyAction;
import org.alfresco.module.org_alfresco_module_rm.disposition.DispositionSchedule;
import org.alfresco.module.org_alfresco_module_rm.disposition.DispositionService;
import org.alfresco.module.org_alfresco_module_rm.test.util.BaseRMTestCase;
import org.alfresco.module.org_alfresco_module_rm.test.util.CommonRMTestUtils;
import org.alfresco.module.org_alfresco_module_rm.test.util.bdt.BehaviourTest;
import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.namespace.QName;
import org.alfresco.util.ApplicationContextHelper;
import org.springframework.extensions.webscripts.GUID;
import com.google.common.collect.ImmutableMap;
/**
* Integration tests for records linked to multiple disposition schedules.
*
* @author Tom Page
* @since 2.3.1
*/
public class MultipleSchedulesTest extends BaseRMTestCase
{
/** A unique prefix for the constants in this test. */
protected static final String TEST_PREFIX = MultipleSchedulesTest.class.getName() + GUID.generate() + "_";
/** The name to use for the first category. */
protected static final String CATEGORY_A_NAME = TEST_PREFIX + "CategoryA";
/** The name to use for the folder within the first category. */
protected static final String FOLDER_A_NAME = TEST_PREFIX + "FolderA";
/** The name to use for the second category. */
protected static final String CATEGORY_B_NAME = TEST_PREFIX + "CategoryB";
/** The name to use for the folder within the second category. */
protected static final String FOLDER_B_NAME = TEST_PREFIX + "FolderB";
/** The name to use for the record. */
protected static final String RECORD_NAME = TEST_PREFIX + "Record";
/** The internal disposition service is used to avoid permissions issues when updating the record. */
private DispositionService internalDispositionService;
/** The first category node. */
private NodeRef categoryA;
/** The folder node within the first category. */
private NodeRef folderA;
/** The second category node. */
private NodeRef categoryB;
/** The folder node within the second category. */
private NodeRef folderB;
/** The record node. */
private NodeRef record;
@Override
protected void setUp() throws Exception
{
super.setUp();
BehaviourTest.initBehaviourTests(retryingTransactionHelper);
// Get the application context
applicationContext = ApplicationContextHelper.getApplicationContext(getConfigLocations());
internalDispositionService = (DispositionService) applicationContext.getBean("dispositionService");
// Ensure different records are used for each test.
record = null;
}
/**
* Create two categories each containing a folder. Set up a schedule on category A that applies to records (cutoff
* immediately, destroy immediately). Set up a schedule on category B that is the same, but with a week delay before
* destroy becomes eligible.
*/
private void setUpFilePlan()
{
// Only set up the file plan if it hasn't already been done.
if (categoryA != null)
{
return;
}
// Create two categories.
categoryA = filePlanService.createRecordCategory(filePlan, CATEGORY_A_NAME);
categoryB = filePlanService.createRecordCategory(filePlan, CATEGORY_B_NAME);
// Create a disposition schedule for category A (Cut off immediately, then Destroy immediately).
DispositionSchedule dispSchedA = utils.createBasicDispositionSchedule(categoryA, "instructions", "authority", true, false);
Map<QName, Serializable> cutOffParamsA = ImmutableMap.of(PROP_DISPOSITION_ACTION_NAME, CutOffAction.NAME,
PROP_DISPOSITION_DESCRIPTION, "description",
PROP_DISPOSITION_PERIOD, CommonRMTestUtils.PERIOD_IMMEDIATELY);
dispositionService.addDispositionActionDefinition(dispSchedA, cutOffParamsA);
Map<QName, Serializable> destroyParamsA = ImmutableMap.of(PROP_DISPOSITION_ACTION_NAME, DestroyAction.NAME,
PROP_DISPOSITION_DESCRIPTION, "description",
PROP_DISPOSITION_PERIOD, CommonRMTestUtils.PERIOD_IMMEDIATELY);
dispositionService.addDispositionActionDefinition(dispSchedA, destroyParamsA);
// Create a disposition schedule for category B (Cut off immediately, then Destroy one week after cutoff).
DispositionSchedule dispSchedB = utils.createBasicDispositionSchedule(categoryB, "instructions", "authority", true, false);
Map<QName, Serializable> cutOffParamsB = ImmutableMap.of(PROP_DISPOSITION_ACTION_NAME, CutOffAction.NAME,
PROP_DISPOSITION_DESCRIPTION, "description",
PROP_DISPOSITION_PERIOD, CommonRMTestUtils.PERIOD_IMMEDIATELY);
dispositionService.addDispositionActionDefinition(dispSchedB, cutOffParamsB);
Map<QName, Serializable> destroyParamsB = ImmutableMap.of(PROP_DISPOSITION_ACTION_NAME, DestroyAction.NAME,
PROP_DISPOSITION_DESCRIPTION, "description",
PROP_DISPOSITION_PERIOD, CommonRMTestUtils.PERIOD_ONE_WEEK,
PROP_DISPOSITION_PERIOD_PROPERTY, PROP_CUT_OFF_DATE);
dispositionService.addDispositionActionDefinition(dispSchedB, destroyParamsB);
// Create a folder within each category.
folderA = recordFolderService.createRecordFolder(categoryA, FOLDER_A_NAME);
folderB = recordFolderService.createRecordFolder(categoryB, FOLDER_B_NAME);
}
/**
* <a href="https://issues.alfresco.com/jira/browse/RM-2526">RM-2526</a>
* <p><pre>
* Given a record subject to a disposition schedule
* And it is linked to a disposition schedule with the same step order, but a longer destroy step
* When the record is moved onto the destroy step
* Then the "as of" date is calculated using the longer period.
* </pre>
*/
public void testLinkedToLongerSchedule()
{
Calendar calendar = Calendar.getInstance();
test()
.given(() -> {
setUpFilePlan();
// Create a record filed under category A and linked to category B.
record = fileFolderService.create(folderA, RECORD_NAME, ContentModel.TYPE_CONTENT).getNodeRef();
recordService.link(record, folderB);
})
.when(() -> {
// Cut off the record.
dispositionService.cutoffDisposableItem(record);
// Ensure the update has been applied to the record.
internalDispositionService.updateNextDispositionAction(record);
calendar.setTime((Date) nodeService.getProperty(record, PROP_CUT_OFF_DATE));
calendar.add(Calendar.WEEK_OF_YEAR, 1);
})
.then()
.expect(calendar.getTime())
.from(() -> dispositionService.getNextDispositionAction(record).getAsOfDate())
.because("Record should follow largest rentention schedule period, which is one week.");
}
/**
* <a href="https://issues.alfresco.com/jira/browse/RM-2526">RM-2526</a>
* <p><pre>
* Given a record subject to a disposition schedule
* And it is linked to a disposition schedule with the same step order, but a shorter destroy step
* When the record is moved onto the destroy step
* Then the "as of" date is calculated using the longer period.
* </pre>
*/
public void testLinkedToShorterSchedule()
{
Calendar calendar = Calendar.getInstance();
test()
.given(() -> {
setUpFilePlan();
// Create a record filed under category B and linked to category A.
record = fileFolderService.create(folderB, RECORD_NAME, ContentModel.TYPE_CONTENT).getNodeRef();
recordService.link(record, folderA);
})
.when(() -> {
// Cut off the record.
dispositionService.cutoffDisposableItem(record);
// Ensure the update has been applied to the record.
internalDispositionService.updateNextDispositionAction(record);
calendar.setTime((Date) nodeService.getProperty(record, PROP_CUT_OFF_DATE));
calendar.add(Calendar.WEEK_OF_YEAR, 1);
})
.then()
.expect(calendar.getTime())
.from(() -> dispositionService.getNextDispositionAction(record).getAsOfDate())
.because("Record should follow largest rentention schedule period, which is one week.");
}
}

View File

@@ -44,6 +44,7 @@ import org.alfresco.module.org_alfresco_module_rm.action.impl.EditDispositionAct
import org.alfresco.module.org_alfresco_module_rm.action.impl.TransferAction;
import org.alfresco.module.org_alfresco_module_rm.disposition.DispositionSchedule;
import org.alfresco.module.org_alfresco_module_rm.test.util.BaseRMTestCase;
import org.alfresco.module.org_alfresco_module_rm.test.util.CommonRMTestUtils;
import org.alfresco.service.cmr.repository.NodeRef;
import org.alfresco.service.namespace.QName;
@@ -51,7 +52,7 @@ import org.alfresco.service.namespace.QName;
* Update next disposition step integration tests.
*
* @author Roxana Lucanu
* @since 2.4.1
* @since 2.3.1
*/
public class UpdateNextDispositionActionTest extends BaseRMTestCase
{
@@ -103,11 +104,6 @@ public class UpdateNextDispositionActionTest extends BaseRMTestCase
// complete record
utils.completeRecord(record);
// set the disposition as of date to now on the record
rmActionService.executeRecordsManagementAction(record,
EditDispositionActionAsOfDateAction.NAME,
Collections.singletonMap(EditDispositionActionAsOfDateAction.PARAM_AS_OF_DATE, new Date()));
// cut off
rmActionService.executeRecordsManagementAction(record, CutOffAction.NAME, null);
}
@@ -115,7 +111,7 @@ public class UpdateNextDispositionActionTest extends BaseRMTestCase
@Override
public void then() throws Exception
{
assertTrue(nodeService.hasAspect(record, ASPECT_CUT_OFF));
assertTrue("Record " + record + " doesn't have the cutOff aspect.", nodeService.hasAspect(record, ASPECT_CUT_OFF));
}
});
}
@@ -128,7 +124,7 @@ public class UpdateNextDispositionActionTest extends BaseRMTestCase
Map<QName, Serializable> cutOff = new HashMap<QName, Serializable>(3);
cutOff.put(PROP_DISPOSITION_ACTION_NAME, CutOffAction.NAME);
cutOff.put(PROP_DISPOSITION_DESCRIPTION, generate());
cutOff.put(PROP_DISPOSITION_PERIOD, PERIOD_ONE_WEEK);
cutOff.put(PROP_DISPOSITION_PERIOD, CommonRMTestUtils.PERIOD_IMMEDIATELY);
dispositionService.addDispositionActionDefinition(ds, cutOff);
// create the properties for TRANSFER action and add it to the disposition action definition

View File

@@ -666,7 +666,7 @@ public class DispositionServiceImplTest extends BaseRMTestCase
checkDisposableItemChanged(mhRecordFolder42);
checkDisposableItemChanged(record43);
checkDisposableItemUnchanged(mhRecordFolder44);
checkDisposableItemUnchanged(record45);;
checkDisposableItemUnchanged(record45);
}
});

View File

@@ -274,7 +274,7 @@ public class RecordServiceImplUnitTest extends BaseUnitTest
DispositionSchedule recordDispositionSchedule = mock(DispositionSchedule.class);
when(recordDispositionSchedule.isRecordLevelDisposition())
.thenReturn(true);
when(mockedDispositionService.getDispositionSchedule(record))
when(mockedDispositionService.getOriginDispositionSchedule(record))
.thenReturn(recordDispositionSchedule);
DispositionSchedule recordFolderDispositionSchedule = mock(DispositionSchedule.class);