APPS-1833 added some null check in service

This commit is contained in:
bdwiwedi
2023-01-16 14:26:27 +05:30
parent 8b16951754
commit 59ef8cf488
3 changed files with 33 additions and 14 deletions

View File

@@ -18,5 +18,6 @@
<property name="behaviourFilter" ref="policyBehaviourFilter" /> <property name="behaviourFilter" ref="policyBehaviourFilter" />
<property name="recordsManagementQueryDAO" ref="recordsManagementQueryDAO"/> <property name="recordsManagementQueryDAO" ref="recordsManagementQueryDAO"/>
<property name="recordsManagementSearchBehaviour" ref="recordsManagementSearchBehaviour"/> <property name="recordsManagementSearchBehaviour" ref="recordsManagementSearchBehaviour"/>
<property name="dispositionService" ref="dispositionService"/>
</bean> </bean>
</beans> </beans>

View File

@@ -25,10 +25,10 @@
* #L% * #L%
*/ */
package org.alfresco.module.org_alfresco_module_rm.patch.v74; package org.alfresco.module.org_alfresco_module_rm.patch.v74;
import java.util.List; import java.util.List;
import org.alfresco.model.ContentModel; import org.alfresco.model.ContentModel;
import org.alfresco.module.org_alfresco_module_rm.disposition.DispositionSchedule;
import org.alfresco.module.org_alfresco_module_rm.disposition.DispositionService;
import org.alfresco.module.org_alfresco_module_rm.model.behaviour.RecordsManagementSearchBehaviour; import org.alfresco.module.org_alfresco_module_rm.model.behaviour.RecordsManagementSearchBehaviour;
import org.alfresco.module.org_alfresco_module_rm.patch.AbstractModulePatch; import org.alfresco.module.org_alfresco_module_rm.patch.AbstractModulePatch;
import org.alfresco.module.org_alfresco_module_rm.query.RecordsManagementQueryDAO; import org.alfresco.module.org_alfresco_module_rm.query.RecordsManagementQueryDAO;
@@ -47,14 +47,11 @@ public class RMv74UpdateDispositionPropertiesPatch extends AbstractModulePatch
{ {
private static final Logger LOGGER = LoggerFactory.getLogger(RMv74UpdateDispositionPropertiesPatch.class); private static final Logger LOGGER = LoggerFactory.getLogger(RMv74UpdateDispositionPropertiesPatch.class);
private NodeDAO nodeDAO; private NodeDAO nodeDAO;
private NodeService nodeService; private NodeService nodeService;
private BehaviourFilter behaviourFilter; private BehaviourFilter behaviourFilter;
private RecordsManagementQueryDAO recordsManagementQueryDAO; private RecordsManagementQueryDAO recordsManagementQueryDAO;
private RecordsManagementSearchBehaviour recordsManagementSearchBehaviour; private RecordsManagementSearchBehaviour recordsManagementSearchBehaviour;
private DispositionService dispositionService;
/** How many operations in a transaction */ /** How many operations in a transaction */
private int batchSize = 1000; private int batchSize = 1000;
/** How many nodes do we query each time */ /** How many nodes do we query each time */
@@ -87,7 +84,10 @@ public class RMv74UpdateDispositionPropertiesPatch extends AbstractModulePatch
{ {
this.recordsManagementSearchBehaviour = recordsManagementSearchBehaviour; this.recordsManagementSearchBehaviour = recordsManagementSearchBehaviour;
} }
public void setDispositionService(DispositionService dispositionService)
{
this.dispositionService = dispositionService;
}
@Override @Override
public void applyInternal() public void applyInternal()
{ {
@@ -145,17 +145,24 @@ public class RMv74UpdateDispositionPropertiesPatch extends AbstractModulePatch
resetCounter(); resetCounter();
transactionService.getRetryingTransactionHelper().doInTransaction(() -> { transactionService.getRetryingTransactionHelper().doInTransaction(() -> {
LOGGER.debug("^^^^^^^^^^^^^^^^^^^^^^^^^^calling process ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^");
Long currentNodeId = nextNodeId; Long currentNodeId = nextNodeId;
// While we haven't reached our batchSize and still have nodes to verify, keep processing // While we haven't reached our batchSize and still have nodes to verify, keep processing
while (counter < batchSize && nextNodeId <= maxNodeId) while (counter < batchSize && nextNodeId <= maxNodeId)
{ {
// Set upper value for query // Set upper value for query
Long upperNodeId = nextNodeId + querySize; Long upperNodeId = nextNodeId + querySize;
LOGGER.debug("calling update desposition batch process batch start node {} and nodes upper node ", nextNodeId,
nextNodeId, upperNodeId);
// Get nodes with aspects from node id nextNodeId to upperNodeId, ordered by node id and add/remove the aspect // Get nodes with aspects from node id nextNodeId to upperNodeId, ordered by node id and add/remove the aspect
updateDispositionPropertiesInFolders(currentNodeId, upperNodeId); updateDispositionPropertiesInFolders(currentNodeId, upperNodeId);
setNextNodeId(); setNextNodeId();
if (nextNodeId >= maxNodeId)
{
// stop processing since we have meet our limit
break;
}
} }
LOGGER.debug("Processed batch [{},{}]. Changed nodes: {}", currentNodeId, lastNodeProcessed, counter); LOGGER.debug("Processed batch [{},{}]. Changed nodes: {}", currentNodeId, lastNodeProcessed, counter);
@@ -168,7 +175,13 @@ public class RMv74UpdateDispositionPropertiesPatch extends AbstractModulePatch
List<NodeRef> folders = recordsManagementQueryDAO.getRecordFoldersWithSchedules(currentNode, upperNodeId); List<NodeRef> folders = recordsManagementQueryDAO.getRecordFoldersWithSchedules(currentNode, upperNodeId);
for (NodeRef folder : folders) for (NodeRef folder : folders)
{ {
recordsManagementSearchBehaviour.onAddDispositionLifecycleAspect(folder, null); DispositionSchedule schedule = dispositionService.getDispositionSchedule(folder);
if (schedule != null && !schedule.isRecordLevelDisposition())
{
LOGGER.debug("Processing folder [{},{}]. moved with node ref: {}", folder);
recordsManagementSearchBehaviour.onAddDispositionLifecycleAspect(folder, null);
LOGGER.debug("Processed folder [{},{}]. moved with node ref: {}", folder);
}
lastNodeProcessed = nodeDAO.getNodePair(folder).getFirst(); lastNodeProcessed = nodeDAO.getNodePair(folder).getFirst();
incrementCounter(); incrementCounter();
} }

View File

@@ -265,10 +265,15 @@ public class RecordsManagementQueryDAOImpl implements RecordsManagementQueryDAO,
public List<NodeRef> getRecordFoldersWithSchedules(Long start, Long end) public List<NodeRef> getRecordFoldersWithSchedules(Long start, Long end)
{ {
Map<String, Object> params = new HashMap<>(2); Map<String, Object> params = new HashMap<>(2);
params.put("processed", qnameDAO.getQName(ASPECT_DISPOSITION_PROCESSED) Pair<Long, QName> aspectPair = qnameDAO.getQName(ASPECT_DISPOSITION_PROCESSED);
.getFirst());
params.put("folderQnameId", qnameDAO.getQName(TYPE_RECORD_FOLDER) if( aspectPair != null )
.getFirst()); params.put("processed", aspectPair.getFirst());
Pair<Long, QName> recordFolderPair = qnameDAO.getQName(TYPE_RECORD_FOLDER);
if( recordFolderPair != null)
params.put("folderQnameId", recordFolderPair.getFirst())
;
params.put("start", start); params.put("start", start);
params.put("end", end); params.put("end", end);