Fixed critical issues reported by sonar (Performance - Method invokes inefficient Number constructor; use static valueOf instead)

git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/modules/recordsmanagement/HEAD@63805 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Tuna Aksoy
2014-03-09 21:54:42 +00:00
parent 3810d137d7
commit b64f1467b4
4 changed files with 56 additions and 56 deletions

View File

@@ -1057,7 +1057,7 @@ public class RecordsManagementAuditServiceImpl extends AbstractLifecycleBean
Date fromDate = getFromDate(date);
if (fromDate != null)
{
fromDateTime = new Long(fromDate.getTime());
fromDateTime = Long.valueOf(fromDate.getTime());
}
return fromDateTime;
}
@@ -1099,7 +1099,7 @@ public class RecordsManagementAuditServiceImpl extends AbstractLifecycleBean
Date toDate = getToDate(date);
if (toDate != null)
{
toDateTime = new Long(toDate.getTime());
toDateTime = Long.valueOf(toDate.getTime());
}
return toDateTime;
}

View File

@@ -285,7 +285,7 @@ public class RMAfterInvocationProvider extends RMSecurityCommon
{
for (ConfigAttributeDefintion cad : supportedDefinitions)
{
if (cad.parent == true && parentResult == AccessDecisionVoter.ACCESS_DENIED)
if (cad.parent && parentResult == AccessDecisionVoter.ACCESS_DENIED)
{
throw new AccessDeniedException("Access Denied");
}
@@ -344,7 +344,7 @@ public class RMAfterInvocationProvider extends RMSecurityCommon
{
NodeRef testNodeRef = null;
if (cad.typeString.equals(cad.parent) == true)
if (cad.typeString.equals(cad.parent))
{
testNodeRef = returnedObject.getParentRef();
}
@@ -360,7 +360,7 @@ public class RMAfterInvocationProvider extends RMSecurityCommon
continue;
}
if (cad.typeString.equals(cad.parent) == true && parentReadCheck != AccessDecisionVoter.ACCESS_GRANTED)
if (cad.typeString.equals(cad.parent) && parentReadCheck != AccessDecisionVoter.ACCESS_GRANTED)
{
throw new AccessDeniedException("Access Denied");
}
@@ -441,16 +441,16 @@ public class RMAfterInvocationProvider extends RMSecurityCommon
Integer maxSize = null;
if (returnedObject.getResultSetMetaData().getSearchParameters().getMaxItems() >= 0)
{
maxSize = new Integer(returnedObject.getResultSetMetaData().getSearchParameters().getMaxItems());
maxSize = Integer.valueOf(returnedObject.getResultSetMetaData().getSearchParameters().getMaxItems());
}
if ((maxSize == null) && (returnedObject.getResultSetMetaData().getSearchParameters().getLimitBy() == LimitBy.FINAL_SIZE))
{
maxSize = new Integer(returnedObject.getResultSetMetaData().getSearchParameters().getLimit());
maxSize = Integer.valueOf(returnedObject.getResultSetMetaData().getSearchParameters().getLimit());
}
// Allow for skip
if ((maxSize != null) && (returnedObject.getResultSetMetaData().getSearchParameters().getSkipCount() >= 0))
{
maxSize = new Integer(maxSize + returnedObject.getResultSetMetaData().getSearchParameters().getSkipCount());
maxSize = Integer.valueOf(maxSize + returnedObject.getResultSetMetaData().getSearchParameters().getSkipCount());
}
// int maxChecks = maxPermissionChecks;
@@ -867,7 +867,7 @@ public class RMAfterInvocationProvider extends RMSecurityCommon
}
int readCheck = childReadChek;
if (cad.parent == true)
if (cad.parent)
{
readCheck = parentReadCheck;
}

View File

@@ -36,41 +36,41 @@ import org.apache.commons.logging.LogFactory;
* disposition schedule for a record when there is more than one which is applicable.
* An example of where this strategy might be used would be in the case of a record
* which was multiply filed.
*
*
* @author neilm
*/
public class DispositionSelectionStrategy implements RecordsManagementModel
{
/** Logger */
private static Log logger = LogFactory.getLog(DispositionSelectionStrategy.class);
/** Disposition service */
private DispositionService dispositionService;
/** File plan authentication service */
private FilePlanAuthenticationService filePlanAuthenticationService;
/**
* Set the disposition service
*
*
* @param dispositionService disposition service
*/
public void setDispositionService(DispositionService dispositionService)
{
this.dispositionService = dispositionService;
}
/**
* @param filePlanAuthenticationService file plan authentication service
*/
public void setFilePlanAuthenticationService(FilePlanAuthenticationService filePlanAuthenticationService)
public void setFilePlanAuthenticationService(FilePlanAuthenticationService filePlanAuthenticationService)
{
this.filePlanAuthenticationService = filePlanAuthenticationService;
}
/**
* Select the disposition schedule to use given there is more than one
*
*
* @param recordFolders
* @return
*/
@@ -82,21 +82,21 @@ public class DispositionSelectionStrategy implements RecordsManagementModel
}
else
{
// 46 CHAPTER 2
// Records assigned more than 1 disposition must be retained and linked to the record folder (category) with the longest
// 46 CHAPTER 2
// Records assigned more than 1 disposition must be retained and linked to the record folder (category) with the longest
// retention period.
// Assumption: an event-based disposition action has a longer retention
// period than a time-based one - as we cannot know when an event will occur
// TODO Automatic events?
NodeRef recordFolder = null;
if (recordFolders.size() == 1)
{
recordFolder = recordFolders.get(0);
}
else
{
{
SortedSet<NodeRef> sortedFolders = new TreeSet<NodeRef>(new DispositionableNodeRefComparator());
for (NodeRef f : recordFolders)
{
@@ -104,14 +104,14 @@ public class DispositionSelectionStrategy implements RecordsManagementModel
}
recordFolder = sortedFolders.first();
}
DispositionSchedule dispSchedule = dispositionService.getDispositionSchedule(recordFolder);
if (logger.isDebugEnabled())
{
logger.debug("Selected disposition schedule: " + dispSchedule);
}
NodeRef result = null;
if (dispSchedule != null)
{
@@ -137,19 +137,19 @@ public class DispositionSelectionStrategy implements RecordsManagementModel
{
public Integer doWork() throws Exception
{
return new Integer(compareImpl(f1, f2));
return Integer.valueOf(compareImpl(f1, f2));
}
}).intValue();
}).intValue();
}
private int compareImpl(NodeRef f1, NodeRef f2)
{
//TODO Check the nodeRefs have the correct aspect
DispositionAction da1 = dispositionService.getNextDispositionAction(f1);
DispositionAction da2 = dispositionService.getNextDispositionAction(f2);
if (da1 != null && da2 != null)
{
Date asOfDate1 = da1.getAsOfDate();
@@ -172,7 +172,7 @@ public class DispositionSelectionStrategy implements RecordsManagementModel
DispositionActionDefinition dad2 = da2.getDispositionActionDefinition();
int eventsCount1 = 0;
int eventsCount2 = 0;
if (dad1 != null)
{
eventsCount1 = dad1.getEvents().size();
@@ -181,7 +181,7 @@ public class DispositionSelectionStrategy implements RecordsManagementModel
{
eventsCount2 = dad2.getEvents().size();
}
return new Integer(eventsCount1).compareTo(eventsCount2);
return Integer.valueOf(eventsCount1).compareTo(eventsCount2);
}
}

View File

@@ -32,31 +32,31 @@ import org.apache.commons.logging.LogFactory;
/**
* Module patch executer base implementation
*
*
* @author Roy Wetherall
* @since 2.2
*/
public class ModulePatchExecuterImpl extends AbstractModuleComponent
implements ModulePatchExecuter
public class ModulePatchExecuterImpl extends AbstractModuleComponent
implements ModulePatchExecuter
{
/** logger */
protected static Log logger = LogFactory.getLog(ModulePatchExecuterImpl.class);
/** default start schema */
private static final int START_SCHEMA = 0;
/** attribute key */
private static final String KEY_MODULE_SCHEMA = "module-schema";
/** configured module schema version */
protected int moduleSchema = START_SCHEMA;
/** attribute service */
protected AttributeService attributeService;
/** module patches */
protected Map<String, ModulePatch> modulePatches = new HashMap<String, ModulePatch>(21);
/**
* @param attributeService attribute service
*/
@@ -72,7 +72,7 @@ public class ModulePatchExecuterImpl extends AbstractModuleComponent
{
this.moduleSchema = moduleSchema;
}
/**
* @see org.alfresco.module.org_alfresco_module_rm.patch.ModulePatchExecuter#register(org.alfresco.module.org_alfresco_module_rm.patch.ModulePatch)
*/
@@ -84,12 +84,12 @@ public class ModulePatchExecuterImpl extends AbstractModuleComponent
{
throw new AlfrescoRuntimeException("Unable to register module patch, becuase module id is invalid.");
}
if (logger.isDebugEnabled() == true)
{
logger.debug("Registering module patch " + modulePatch.getId() + " for module " + getModuleId());
}
modulePatches.put(modulePatch.getId(), modulePatch);
}
@@ -101,16 +101,16 @@ public class ModulePatchExecuterImpl extends AbstractModuleComponent
{
// get current schema version
int currentSchema = getCurrentSchema();
if (logger.isDebugEnabled() == true)
{
logger.debug("Running module patch executer (currentSchema=" + currentSchema + ", configuredSchema=" + moduleSchema + ")");
}
if (moduleSchema > currentSchema)
{
// determine what patches should be applied
List<ModulePatch> patchesToApply = new ArrayList<ModulePatch>(13);
List<ModulePatch> patchesToApply = new ArrayList<ModulePatch>(13);
for (ModulePatch modulePatch : modulePatches.values())
{
if (modulePatch.getFixesFromSchema() <= currentSchema &&
@@ -119,22 +119,22 @@ public class ModulePatchExecuterImpl extends AbstractModuleComponent
patchesToApply.add(modulePatch);
}
}
// apply the patches in the correct order
Collections.sort(patchesToApply);
for (ModulePatch patchToApply : patchesToApply)
{
patchToApply.apply();
}
// update the schema
updateSchema(moduleSchema);
}
}
/**
* Get the currently recorded schema version for the module
*
* Get the currently recorded schema version for the module
*
* @return int currently recorded schema version
*/
protected int getCurrentSchema()
@@ -146,15 +146,15 @@ public class ModulePatchExecuterImpl extends AbstractModuleComponent
}
return result;
}
/**
* Update the recorded schema version for the module.
*
*
* @param newSchema new schema version
*/
protected void updateSchema(int newSchema)
{
attributeService.setAttribute(new Integer(newSchema), KEY_MODULE_SCHEMA, getModuleId());
attributeService.setAttribute(Integer.valueOf(newSchema), KEY_MODULE_SCHEMA, getModuleId());
}
/**