diff --git a/config/alfresco/bootstrap/example_javascripts.acp b/config/alfresco/bootstrap/example_javascripts.acp index 1ce3c1bee7..9669f0f533 100644 Binary files a/config/alfresco/bootstrap/example_javascripts.acp and b/config/alfresco/bootstrap/example_javascripts.acp differ diff --git a/config/alfresco/core-services-context.xml b/config/alfresco/core-services-context.xml index d8253d8b57..9bca7d1851 100644 --- a/config/alfresco/core-services-context.xml +++ b/config/alfresco/core-services-context.xml @@ -151,6 +151,9 @@ ${alfresco.udp.mcast_port} + + ${alfresco.fping.shared.dir} + diff --git a/config/alfresco/dao/dao-context.xml b/config/alfresco/dao/dao-context.xml index f1f8a5df5e..9612dd559a 100644 --- a/config/alfresco/dao/dao-context.xml +++ b/config/alfresco/dao/dao-context.xml @@ -114,6 +114,7 @@ + diff --git a/config/alfresco/ibatis/org.hibernate.dialect.Dialect/node-common-SqlMap.xml b/config/alfresco/ibatis/org.hibernate.dialect.Dialect/node-common-SqlMap.xml index e9ba0d7e26..e5663140df 100644 --- a/config/alfresco/ibatis/org.hibernate.dialect.Dialect/node-common-SqlMap.xml +++ b/config/alfresco/ibatis/org.hibernate.dialect.Dialect/node-common-SqlMap.xml @@ -599,10 +599,10 @@ id = #{id} - + delete from alf_child_assoc where - (parent_node_id = #{parentNode.id} or child_node_id = #{childNode.id}) + child_node_id = #{childNode.id} and type_qname_id in diff --git a/config/alfresco/repository.properties b/config/alfresco/repository.properties index c4cdd10d36..89fb8e1ff4 100644 --- a/config/alfresco/repository.properties +++ b/config/alfresco/repository.properties @@ -1,20 +1,20 @@ -# Repository configuration - -repository.name=Main Repository - -# Directory configuration - -dir.root=./alf_data - -dir.contentstore=${dir.root}/contentstore -dir.contentstore.deleted=${dir.root}/contentstore.deleted - -# The location of cached content -dir.cachedcontent=${dir.root}/cachedcontent - -dir.auditcontentstore=${dir.root}/audit.contentstore - +# Repository configuration + +repository.name=Main Repository + +# Directory configuration + +dir.root=./alf_data + +dir.contentstore=${dir.root}/contentstore +dir.contentstore.deleted=${dir.root}/contentstore.deleted + +# The location of cached content +dir.cachedcontent=${dir.root}/cachedcontent + +dir.auditcontentstore=${dir.root}/audit.contentstore + # The value for the maximum permitted size in bytes of all content. # No value (or a negative long) will be taken to mean that no limit should be applied. # See content-services-context.xml @@ -22,621 +22,621 @@ system.content.maximumFileSizeLimit= -# The location for lucene index files -dir.indexes=${dir.root}/lucene-indexes - -# The location for index backups -dir.indexes.backup=${dir.root}/backup-lucene-indexes - -# The location for lucene index locks -dir.indexes.lock=${dir.indexes}/locks - -#Directory to find external license -dir.license.external=. -# Spring resource location of external license files -location.license.external=file://${dir.license.external}/*.lic -# Spring resource location of embedded license files -location.license.embedded=/WEB-INF/alfresco/license/*.lic -# Spring resource location of license files on shared classpath -location.license.shared=classpath*:/alfresco/extension/license/*.lic - -# WebDAV initialization properties -system.webdav.servlet.enabled=true -system.webdav.url.path.prefix= -system.webdav.storeName=${protocols.storeName} -system.webdav.rootPath=${protocols.rootPath} -system.webdav.activities.enabled=false - -# Is the JBPM Deploy Process Servlet enabled? -# Default is false. Should not be enabled in production environments as the -# servlet allows unauthenticated deployment of new workflows. -system.workflow.deployservlet.enabled=false - -# Sets the location for the JBPM Configuration File -system.workflow.jbpm.config.location=classpath:org/alfresco/repo/workflow/jbpm/jbpm.cfg.xml - -# Determines if JBPM workflow definitions are shown. -# Default is false. This controls the visibility of JBPM -# workflow definitions from the getDefinitions and -# getAllDefinitions WorkflowService API but still allows -# any in-flight JBPM workflows to be completed. -system.workflow.engine.jbpm.definitions.visible=false - -#Determines if Activiti definitions are visible -system.workflow.engine.activiti.definitions.visible=true - -# Determines if the JBPM engine is enabled -system.workflow.engine.jbpm.enabled=true - -# Determines if the Activiti engine is enabled -system.workflow.engine.activiti.enabled=true - -index.subsystem.name=lucene - -# ######################################### # -# Index Recovery and Tracking Configuration # -# ######################################### # -# -# Recovery types are: -# NONE: Ignore -# VALIDATE: Checks that the first and last transaction for each store is represented in the indexes -# AUTO: Validates and auto-recovers if validation fails -# FULL: Full index rebuild, processing all transactions in order. The server is temporarily suspended. -index.recovery.mode=VALIDATE -# FULL recovery continues when encountering errors -index.recovery.stopOnError=false -index.recovery.maximumPoolSize=5 -# Set the frequency with which the index tracking is triggered. -# For more information on index tracking in a cluster: -# http://wiki.alfresco.com/wiki/High_Availability_Configuration_V1.4_to_V2.1#Version_1.4.5.2C_2.1.1_and_later -# By default, this is effectively never, but can be modified as required. -# Examples: -# Never: * * * * * ? 2099 -# Once every five seconds: 0/5 * * * * ? -# Once every two seconds : 0/2 * * * * ? -# See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html -index.tracking.cronExpression=0/5 * * * * ? -index.tracking.adm.cronExpression=${index.tracking.cronExpression} -index.tracking.avm.cronExpression=${index.tracking.cronExpression} -# Other properties. -index.tracking.maxTxnDurationMinutes=10 -index.tracking.reindexLagMs=1000 -index.tracking.maxRecordSetSize=1000 -index.tracking.maxTransactionsPerLuceneCommit=100 -index.tracking.disableInTransactionIndexing=false -# Index tracking information of a certain age is cleaned out by a scheduled job. -# Any clustered system that has been offline for longer than this period will need to be seeded -# with a more recent backup of the Lucene indexes or the indexes will have to be fully rebuilt. -# Use -1 to disable purging. This can be switched on at any stage. -index.tracking.minRecordPurgeAgeDays=30 +# The location for lucene index files +dir.indexes=${dir.root}/lucene-indexes + +# The location for index backups +dir.indexes.backup=${dir.root}/backup-lucene-indexes + +# The location for lucene index locks +dir.indexes.lock=${dir.indexes}/locks + +#Directory to find external license +dir.license.external=. +# Spring resource location of external license files +location.license.external=file://${dir.license.external}/*.lic +# Spring resource location of embedded license files +location.license.embedded=/WEB-INF/alfresco/license/*.lic +# Spring resource location of license files on shared classpath +location.license.shared=classpath*:/alfresco/extension/license/*.lic + +# WebDAV initialization properties +system.webdav.servlet.enabled=true +system.webdav.url.path.prefix= +system.webdav.storeName=${protocols.storeName} +system.webdav.rootPath=${protocols.rootPath} +system.webdav.activities.enabled=false + +# Is the JBPM Deploy Process Servlet enabled? +# Default is false. Should not be enabled in production environments as the +# servlet allows unauthenticated deployment of new workflows. +system.workflow.deployservlet.enabled=false + +# Sets the location for the JBPM Configuration File +system.workflow.jbpm.config.location=classpath:org/alfresco/repo/workflow/jbpm/jbpm.cfg.xml + +# Determines if JBPM workflow definitions are shown. +# Default is false. This controls the visibility of JBPM +# workflow definitions from the getDefinitions and +# getAllDefinitions WorkflowService API but still allows +# any in-flight JBPM workflows to be completed. +system.workflow.engine.jbpm.definitions.visible=false + +#Determines if Activiti definitions are visible +system.workflow.engine.activiti.definitions.visible=true + +# Determines if the JBPM engine is enabled +system.workflow.engine.jbpm.enabled=true + +# Determines if the Activiti engine is enabled +system.workflow.engine.activiti.enabled=true + +index.subsystem.name=lucene + +# ######################################### # +# Index Recovery and Tracking Configuration # +# ######################################### # +# +# Recovery types are: +# NONE: Ignore +# VALIDATE: Checks that the first and last transaction for each store is represented in the indexes +# AUTO: Validates and auto-recovers if validation fails +# FULL: Full index rebuild, processing all transactions in order. The server is temporarily suspended. +index.recovery.mode=VALIDATE +# FULL recovery continues when encountering errors +index.recovery.stopOnError=false +index.recovery.maximumPoolSize=5 +# Set the frequency with which the index tracking is triggered. +# For more information on index tracking in a cluster: +# http://wiki.alfresco.com/wiki/High_Availability_Configuration_V1.4_to_V2.1#Version_1.4.5.2C_2.1.1_and_later +# By default, this is effectively never, but can be modified as required. +# Examples: +# Never: * * * * * ? 2099 +# Once every five seconds: 0/5 * * * * ? +# Once every two seconds : 0/2 * * * * ? +# See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html +index.tracking.cronExpression=0/5 * * * * ? +index.tracking.adm.cronExpression=${index.tracking.cronExpression} +index.tracking.avm.cronExpression=${index.tracking.cronExpression} +# Other properties. +index.tracking.maxTxnDurationMinutes=10 +index.tracking.reindexLagMs=1000 +index.tracking.maxRecordSetSize=1000 +index.tracking.maxTransactionsPerLuceneCommit=100 +index.tracking.disableInTransactionIndexing=false +# Index tracking information of a certain age is cleaned out by a scheduled job. +# Any clustered system that has been offline for longer than this period will need to be seeded +# with a more recent backup of the Lucene indexes or the indexes will have to be fully rebuilt. +# Use -1 to disable purging. This can be switched on at any stage. +index.tracking.minRecordPurgeAgeDays=30 # Unused transactions will be purged in chunks determined by commit time boundaries. 'index.tracking.purgeSize' specifies the size # of the chunk (in ms). Default is a couple of hours. index.tracking.purgeSize=7200000 - -# Reindexing of missing content is by default 'never' carried out. -# The cron expression below can be changed to control the timing of this reindexing. -# Users of Enterprise Alfresco can configure this cron expression via JMX without a server restart. -# Note that if alfresco.cluster.name is not set, then reindexing will not occur. -index.reindexMissingContent.cronExpression=* * * * * ? 2099 - -# Change the failure behaviour of the configuration checker -system.bootstrap.config_check.strict=true - -# The name of the cluster -# Leave this empty to disable cluster entry -alfresco.cluster.name= - -# Time to wait for a cluster node ping before marking the node as not alive (ms) -alfresco.clusterCheck.timeout=4000 - -# Hazelcast clustering configuration -# Password to join the cluster -alfresco.hazelcast.password=alfrescocluster -# Protocol used for member discovery (tcp, ec2, udp) -alfresco.hazelcast.protocol=tcp -# Location of the Hazelcast configuration file -alfresco.hazelcast.configLocation=classpath:alfresco/hazelcast/hazelcast-${alfresco.hazelcast.protocol}.xml -# XML elements to incorporate into Hazelcast config, in particular -# hostnames/IP addresses to use for membership discovery -alfresco.hazelcast.tcp.config= -# Whether to bind to a specific host interface -alfresco.hazelcast.specify.interface=false -# The interface to bind to, if enabled above. -alfresco.hazelcast.bind.interface= - -# Amazon Web Services - EC2 discovery -alfresco.hazelcast.ec2.accesskey=my-access-key -alfresco.hazelcast.ec2.secretkey=my-secret-key -alfresco.hazelcast.ec2.region=us-east-1 -# Only instances belonging to this group will be discovered, default will try all running instances -alfresco.hazelcast.ec2.securitygroup= -alfresco.hazelcast.ec2.tagkey=type -alfresco.hazelcast.ec2.tagvalue=hz-nodes - -# The EHCache RMI peer URL addresses to set in the ehcache-custom.xml file -# Use this property to set the hostname of the current server. -# This is only necessary if the cache peer URLs are generated with an invalid IP address for the local server. -alfresco.ehcache.rmi.hostname= -# Use this property to set the cache peer URL port. -alfresco.ehcache.rmi.remoteObjectPort=0 -alfresco.ehcache.rmi.port=0 -alfresco.ehcache.rmi.socketTimeoutMillis=5000 - -# -# How long should shutdown wait to complete normally before -# taking stronger action and calling System.exit() -# in ms, 10,000 is 10 seconds -# -shutdown.backstop.timeout=10000 -shutdown.backstop.enabled=false - -# Server Single User Mode -# note: -# only allow named user (note: if blank or not set then will allow all users) -# assuming maxusers is not set to 0 -#server.singleuseronly.name=admin - -# Server Max Users - limit number of users with non-expired tickets -# note: -# -1 allows any number of users, assuming not in single-user mode -# 0 prevents further logins, including the ability to enter single-user mode -server.maxusers=-1 - -# The Cron expression controlling the frequency with which the OpenOffice connection is tested -openOffice.test.cronExpression=0 * * * * ? - -# -# Disable all shared caches (mutable and immutable) -# These properties are used for diagnostic purposes -system.cache.disableMutableSharedCaches=false -system.cache.disableImmutableSharedCaches=false - -# -# Properties to limit resources spent on individual searches -# -# The maximum time spent pruning results -system.acl.maxPermissionCheckTimeMillis=10000 -# The maximum number of search results to perform permission checks against -system.acl.maxPermissionChecks=1000 - -# The maximum number of filefolder list results -system.filefolderservice.defaultListMaxResults=5000 - - -# Properties to control read permission evaluation for acegi -system.readpermissions.optimise=true -system.readpermissions.bulkfetchsize=1000 - -# -# Manually control how the system handles maximum string lengths. -# Any zero or negative value is ignored. -# Only change this after consulting support or reading the appropriate Javadocs for -# org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2 -system.maximumStringLength=-1 - -# -# Limit hibernate session size by trying to amalgamate events for the L2 session invalidation -# - hibernate works as is up to this size -# - after the limit is hit events that can be grouped invalidate the L2 cache by type and not instance -# events may not group if there are post action listener registered (this is not the case with the default distribution) -system.hibernateMaxExecutions=20000 - -# -# Determine if modification timestamp propagation from child to parent nodes is respected or not. -# Even if 'true', the functionality is only supported for child associations that declare the -# 'propagateTimestamps' element in the dictionary definition. -system.enableTimestampPropagation=true - -# -# Decide if content should be removed from the system immediately after being orphaned. -# Do not change this unless you have examined the impact it has on your backup procedures. -system.content.eagerOrphanCleanup=false -# The number of days to keep orphaned content in the content stores. -# This has no effect on the 'deleted' content stores, which are not automatically emptied. -system.content.orphanProtectDays=14 -# The action to take when a store or stores fails to delete orphaned content -# IGNORE: Just log a warning. The binary remains and the record is expunged -# KEEP_URL: Log a warning and create a URL entry with orphan time 0. It won't be processed or removed. -system.content.deletionFailureAction=IGNORE -# The CRON expression to trigger the deletion of resources associated with orphaned content. -system.content.orphanCleanup.cronExpression=0 0 4 * * ? -# The CRON expression to trigger content URL conversion. This process is not intesive and can -# be triggered on a live system. Similarly, it can be triggered using JMX on a dedicated machine. -system.content.contentUrlConverter.cronExpression=* * * * * ? 2099 -system.content.contentUrlConverter.threadCount=2 -system.content.contentUrlConverter.batchSize=500 -system.content.contentUrlConverter.runAsScheduledJob=false - -# #################### # -# Lucene configuration # -# #################### # -# -# Millisecond threshold for text transformations -# Slower transformers will force the text extraction to be asynchronous -# + +# Reindexing of missing content is by default 'never' carried out. +# The cron expression below can be changed to control the timing of this reindexing. +# Users of Enterprise Alfresco can configure this cron expression via JMX without a server restart. +# Note that if alfresco.cluster.name is not set, then reindexing will not occur. +index.reindexMissingContent.cronExpression=* * * * * ? 2099 + +# Change the failure behaviour of the configuration checker +system.bootstrap.config_check.strict=true + +# The name of the cluster +# Leave this empty to disable cluster entry +alfresco.cluster.name= + +# Time to wait for a cluster node ping before marking the node as not alive (ms) +alfresco.clusterCheck.timeout=4000 + +# Hazelcast clustering configuration +# Password to join the cluster +alfresco.hazelcast.password=alfrescocluster +# Protocol used for member discovery (tcp, ec2, udp) +alfresco.hazelcast.protocol=tcp +# Location of the Hazelcast configuration file +alfresco.hazelcast.configLocation=classpath:alfresco/hazelcast/hazelcast-${alfresco.hazelcast.protocol}.xml +# XML elements to incorporate into Hazelcast config, in particular +# hostnames/IP addresses to use for membership discovery +alfresco.hazelcast.tcp.config= +# Whether to bind to a specific host interface +alfresco.hazelcast.specify.interface=false +# The interface to bind to, if enabled above. +alfresco.hazelcast.bind.interface= + +# Amazon Web Services - EC2 discovery +alfresco.hazelcast.ec2.accesskey=my-access-key +alfresco.hazelcast.ec2.secretkey=my-secret-key +alfresco.hazelcast.ec2.region=us-east-1 +# Only instances belonging to this group will be discovered, default will try all running instances +alfresco.hazelcast.ec2.securitygroup= +alfresco.hazelcast.ec2.tagkey=type +alfresco.hazelcast.ec2.tagvalue=hz-nodes + +# The EHCache RMI peer URL addresses to set in the ehcache-custom.xml file +# Use this property to set the hostname of the current server. +# This is only necessary if the cache peer URLs are generated with an invalid IP address for the local server. +alfresco.ehcache.rmi.hostname= +# Use this property to set the cache peer URL port. +alfresco.ehcache.rmi.remoteObjectPort=0 +alfresco.ehcache.rmi.port=0 +alfresco.ehcache.rmi.socketTimeoutMillis=5000 + +# +# How long should shutdown wait to complete normally before +# taking stronger action and calling System.exit() +# in ms, 10,000 is 10 seconds +# +shutdown.backstop.timeout=10000 +shutdown.backstop.enabled=false + +# Server Single User Mode +# note: +# only allow named user (note: if blank or not set then will allow all users) +# assuming maxusers is not set to 0 +#server.singleuseronly.name=admin + +# Server Max Users - limit number of users with non-expired tickets +# note: +# -1 allows any number of users, assuming not in single-user mode +# 0 prevents further logins, including the ability to enter single-user mode +server.maxusers=-1 + +# The Cron expression controlling the frequency with which the OpenOffice connection is tested +openOffice.test.cronExpression=0 * * * * ? + +# +# Disable all shared caches (mutable and immutable) +# These properties are used for diagnostic purposes +system.cache.disableMutableSharedCaches=false +system.cache.disableImmutableSharedCaches=false + +# +# Properties to limit resources spent on individual searches +# +# The maximum time spent pruning results +system.acl.maxPermissionCheckTimeMillis=10000 +# The maximum number of search results to perform permission checks against +system.acl.maxPermissionChecks=1000 + +# The maximum number of filefolder list results +system.filefolderservice.defaultListMaxResults=5000 + + +# Properties to control read permission evaluation for acegi +system.readpermissions.optimise=true +system.readpermissions.bulkfetchsize=1000 + +# +# Manually control how the system handles maximum string lengths. +# Any zero or negative value is ignored. +# Only change this after consulting support or reading the appropriate Javadocs for +# org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2 +system.maximumStringLength=-1 + +# +# Limit hibernate session size by trying to amalgamate events for the L2 session invalidation +# - hibernate works as is up to this size +# - after the limit is hit events that can be grouped invalidate the L2 cache by type and not instance +# events may not group if there are post action listener registered (this is not the case with the default distribution) +system.hibernateMaxExecutions=20000 + +# +# Determine if modification timestamp propagation from child to parent nodes is respected or not. +# Even if 'true', the functionality is only supported for child associations that declare the +# 'propagateTimestamps' element in the dictionary definition. +system.enableTimestampPropagation=true + +# +# Decide if content should be removed from the system immediately after being orphaned. +# Do not change this unless you have examined the impact it has on your backup procedures. +system.content.eagerOrphanCleanup=false +# The number of days to keep orphaned content in the content stores. +# This has no effect on the 'deleted' content stores, which are not automatically emptied. +system.content.orphanProtectDays=14 +# The action to take when a store or stores fails to delete orphaned content +# IGNORE: Just log a warning. The binary remains and the record is expunged +# KEEP_URL: Log a warning and create a URL entry with orphan time 0. It won't be processed or removed. +system.content.deletionFailureAction=IGNORE +# The CRON expression to trigger the deletion of resources associated with orphaned content. +system.content.orphanCleanup.cronExpression=0 0 4 * * ? +# The CRON expression to trigger content URL conversion. This process is not intesive and can +# be triggered on a live system. Similarly, it can be triggered using JMX on a dedicated machine. +system.content.contentUrlConverter.cronExpression=* * * * * ? 2099 +system.content.contentUrlConverter.threadCount=2 +system.content.contentUrlConverter.batchSize=500 +system.content.contentUrlConverter.runAsScheduledJob=false + +# #################### # +# Lucene configuration # +# #################### # +# +# Millisecond threshold for text transformations +# Slower transformers will force the text extraction to be asynchronous +# lucene.maxAtomicTransformationTime=100 -# -# The maximum number of clauses that are allowed in a lucene query -# -lucene.query.maxClauses=10000 -# -# The size of the queue of nodes waiting for index -# Events are generated as nodes are changed, this is the maximum size of the queue used to coalesce event -# When this size is reached the lists of nodes will be indexed -# -# http://issues.alfresco.com/browse/AR-1280: Setting this high is the workaround as of 1.4.3. -# -lucene.indexer.batchSize=1000000 -fts.indexer.batchSize=1000 -# -# Index cache sizes -# -lucene.indexer.cacheEnabled=true -lucene.indexer.maxDocIdCacheSize=100000 -lucene.indexer.maxDocumentCacheSize=100 -lucene.indexer.maxIsCategoryCacheSize=-1 -lucene.indexer.maxLinkAspectCacheSize=10000 -lucene.indexer.maxParentCacheSize=100000 -lucene.indexer.maxPathCacheSize=100000 -lucene.indexer.maxTypeCacheSize=10000 -# -# Properties for merge (not this does not affect the final index segment which will be optimised) -# Max merge docs only applies to the merge process not the resulting index which will be optimised. -# -lucene.indexer.mergerMaxMergeDocs=1000000 -lucene.indexer.mergerMergeFactor=5 -lucene.indexer.mergerMaxBufferedDocs=-1 -lucene.indexer.mergerRamBufferSizeMb=16 -# -# Properties for delta indexes (not this does not affect the final index segment which will be optimised) -# Max merge docs only applies to the index building process not the resulting index which will be optimised. -# -lucene.indexer.writerMaxMergeDocs=1000000 -lucene.indexer.writerMergeFactor=5 -lucene.indexer.writerMaxBufferedDocs=-1 -lucene.indexer.writerRamBufferSizeMb=16 -# -# Target number of indexes and deltas in the overall index and what index size to merge in memory -# -lucene.indexer.mergerTargetIndexCount=8 -lucene.indexer.mergerTargetOverlayCount=5 -lucene.indexer.mergerTargetOverlaysBlockingFactor=2 -lucene.indexer.maxDocsForInMemoryMerge=60000 -lucene.indexer.maxRamInMbForInMemoryMerge=16 -lucene.indexer.maxDocsForInMemoryIndex=60000 -lucene.indexer.maxRamInMbForInMemoryIndex=16 -# -# Other lucene properties -# -lucene.indexer.termIndexInterval=128 -lucene.indexer.useNioMemoryMapping=true -# over-ride to false for pre 3.0 behaviour -lucene.indexer.postSortDateTime=true -lucene.indexer.defaultMLIndexAnalysisMode=EXACT_LANGUAGE_AND_ALL -lucene.indexer.defaultMLSearchAnalysisMode=EXACT_LANGUAGE_AND_ALL -# -# The number of terms from a document that will be indexed -# -lucene.indexer.maxFieldLength=10000 - -# Should we use a 'fair' locking policy, giving queue-like access behaviour to -# the indexes and avoiding starvation of waiting writers? Set to false on old -# JVMs where this appears to cause deadlock -lucene.indexer.fairLocking=true - -# -# Index locks (mostly deprecated and will be tidied up with the next lucene upgrade) -# -lucene.write.lock.timeout=10000 -lucene.commit.lock.timeout=100000 -lucene.lock.poll.interval=100 - -lucene.indexer.useInMemorySort=true -lucene.indexer.maxRawResultSetSizeForInMemorySort=1000 -lucene.indexer.contentIndexingEnabled=true - -index.backup.cronExpression=0 0 3 * * ? - -lucene.defaultAnalyserResourceBundleName=alfresco/model/dataTypeAnalyzers - - - -# When transforming archive files (.zip etc) into text representations (such as -# for full text indexing), should the files within the archive be processed too? -# If enabled, transformation takes longer, but searches of the files find more. -transformer.Archive.includeContents=false - -# Database configuration -db.schema.stopAfterSchemaBootstrap=false -db.schema.update=true -db.schema.update.lockRetryCount=24 -db.schema.update.lockRetryWaitSeconds=5 -db.driver=org.gjt.mm.mysql.Driver -db.name=alfresco -db.url=jdbc:mysql:///${db.name} -db.username=alfresco -db.password=alfresco -db.pool.initial=10 -db.pool.max=40 -db.txn.isolation=-1 -db.pool.statements.enable=true -db.pool.statements.max=40 -db.pool.min=0 -db.pool.idle=-1 -db.pool.wait.max=-1 -db.pool.validate.query= -db.pool.evict.interval=-1 -db.pool.evict.idle.min=1800000 -db.pool.validate.borrow=true -db.pool.validate.return=false -db.pool.evict.validate=false -# -db.pool.abandoned.detect=false -db.pool.abandoned.time=300 -# -# db.pool.abandoned.log=true (logAbandoned) adds overhead (http://commons.apache.org/dbcp/configuration.html) -# and also requires db.pool.abandoned.detect=true (removeAbandoned) -# -db.pool.abandoned.log=false - - -# Audit configuration -audit.enabled=true -audit.tagging.enabled=true -audit.alfresco-access.enabled=false +# +# The maximum number of clauses that are allowed in a lucene query +# +lucene.query.maxClauses=10000 +# +# The size of the queue of nodes waiting for index +# Events are generated as nodes are changed, this is the maximum size of the queue used to coalesce event +# When this size is reached the lists of nodes will be indexed +# +# http://issues.alfresco.com/browse/AR-1280: Setting this high is the workaround as of 1.4.3. +# +lucene.indexer.batchSize=1000000 +fts.indexer.batchSize=1000 +# +# Index cache sizes +# +lucene.indexer.cacheEnabled=true +lucene.indexer.maxDocIdCacheSize=100000 +lucene.indexer.maxDocumentCacheSize=100 +lucene.indexer.maxIsCategoryCacheSize=-1 +lucene.indexer.maxLinkAspectCacheSize=10000 +lucene.indexer.maxParentCacheSize=100000 +lucene.indexer.maxPathCacheSize=100000 +lucene.indexer.maxTypeCacheSize=10000 +# +# Properties for merge (not this does not affect the final index segment which will be optimised) +# Max merge docs only applies to the merge process not the resulting index which will be optimised. +# +lucene.indexer.mergerMaxMergeDocs=1000000 +lucene.indexer.mergerMergeFactor=5 +lucene.indexer.mergerMaxBufferedDocs=-1 +lucene.indexer.mergerRamBufferSizeMb=16 +# +# Properties for delta indexes (not this does not affect the final index segment which will be optimised) +# Max merge docs only applies to the index building process not the resulting index which will be optimised. +# +lucene.indexer.writerMaxMergeDocs=1000000 +lucene.indexer.writerMergeFactor=5 +lucene.indexer.writerMaxBufferedDocs=-1 +lucene.indexer.writerRamBufferSizeMb=16 +# +# Target number of indexes and deltas in the overall index and what index size to merge in memory +# +lucene.indexer.mergerTargetIndexCount=8 +lucene.indexer.mergerTargetOverlayCount=5 +lucene.indexer.mergerTargetOverlaysBlockingFactor=2 +lucene.indexer.maxDocsForInMemoryMerge=60000 +lucene.indexer.maxRamInMbForInMemoryMerge=16 +lucene.indexer.maxDocsForInMemoryIndex=60000 +lucene.indexer.maxRamInMbForInMemoryIndex=16 +# +# Other lucene properties +# +lucene.indexer.termIndexInterval=128 +lucene.indexer.useNioMemoryMapping=true +# over-ride to false for pre 3.0 behaviour +lucene.indexer.postSortDateTime=true +lucene.indexer.defaultMLIndexAnalysisMode=EXACT_LANGUAGE_AND_ALL +lucene.indexer.defaultMLSearchAnalysisMode=EXACT_LANGUAGE_AND_ALL +# +# The number of terms from a document that will be indexed +# +lucene.indexer.maxFieldLength=10000 + +# Should we use a 'fair' locking policy, giving queue-like access behaviour to +# the indexes and avoiding starvation of waiting writers? Set to false on old +# JVMs where this appears to cause deadlock +lucene.indexer.fairLocking=true + +# +# Index locks (mostly deprecated and will be tidied up with the next lucene upgrade) +# +lucene.write.lock.timeout=10000 +lucene.commit.lock.timeout=100000 +lucene.lock.poll.interval=100 + +lucene.indexer.useInMemorySort=true +lucene.indexer.maxRawResultSetSizeForInMemorySort=1000 +lucene.indexer.contentIndexingEnabled=true + +index.backup.cronExpression=0 0 3 * * ? + +lucene.defaultAnalyserResourceBundleName=alfresco/model/dataTypeAnalyzers + + + +# When transforming archive files (.zip etc) into text representations (such as +# for full text indexing), should the files within the archive be processed too? +# If enabled, transformation takes longer, but searches of the files find more. +transformer.Archive.includeContents=false + +# Database configuration +db.schema.stopAfterSchemaBootstrap=false +db.schema.update=true +db.schema.update.lockRetryCount=24 +db.schema.update.lockRetryWaitSeconds=5 +db.driver=org.gjt.mm.mysql.Driver +db.name=alfresco +db.url=jdbc:mysql:///${db.name} +db.username=alfresco +db.password=alfresco +db.pool.initial=10 +db.pool.max=40 +db.txn.isolation=-1 +db.pool.statements.enable=true +db.pool.statements.max=40 +db.pool.min=0 +db.pool.idle=-1 +db.pool.wait.max=-1 +db.pool.validate.query= +db.pool.evict.interval=-1 +db.pool.evict.idle.min=1800000 +db.pool.validate.borrow=true +db.pool.validate.return=false +db.pool.evict.validate=false +# +db.pool.abandoned.detect=false +db.pool.abandoned.time=300 +# +# db.pool.abandoned.log=true (logAbandoned) adds overhead (http://commons.apache.org/dbcp/configuration.html) +# and also requires db.pool.abandoned.detect=true (removeAbandoned) +# +db.pool.abandoned.log=false + + +# Audit configuration +audit.enabled=true +audit.tagging.enabled=true +audit.alfresco-access.enabled=false audit.alfresco-access.sub-actions.enabled=false -audit.cmischangelog.enabled=false -audit.dod5015.enabled=false -# Setting this flag to true will force startup failure when invalid audit configurations are detected -audit.config.strict=false -# Audit map filter for AccessAuditor - restricts recorded events to user driven events -audit.filter.alfresco-access.default.enabled=true -audit.filter.alfresco-access.transaction.user=~System;~null;.* -audit.filter.alfresco-access.transaction.type=cm:folder;cm:content;st:site -audit.filter.alfresco-access.transaction.path=~/sys:archivedItem;~/ver:;.* - - -# System Configuration -system.store=system://system -system.descriptor.childname=sys:descriptor -system.descriptor.current.childname=sys:descriptor-current - -# User config -alfresco_user_store.store=user://alfrescoUserStore -alfresco_user_store.system_container.childname=sys:system -alfresco_user_store.user_container.childname=sys:people - -# note: default admin username - should not be changed after installation -alfresco_user_store.adminusername=admin - -# Initial password - editing this will not have any effect once the repository is installed -alfresco_user_store.adminpassword=209c6174da490caeb422f3fa5a7ae634 - -# note: default guest username - should not be changed after installation -alfresco_user_store.guestusername=guest - -# Used to move home folders to a new location -home_folder_provider_synchronizer.enabled=false -home_folder_provider_synchronizer.override_provider= -home_folder_provider_synchronizer.keep_empty_parents=false - -# Spaces Archive Configuration -spaces.archive.store=archive://SpacesStore - -# Spaces Configuration -spaces.store=workspace://SpacesStore -spaces.company_home.childname=app:company_home -spaces.guest_home.childname=app:guest_home -spaces.dictionary.childname=app:dictionary -spaces.templates.childname=app:space_templates -spaces.imapConfig.childname=app:imap_configs -spaces.imap_templates.childname=app:imap_templates -spaces.scheduled_actions.childname=cm:Scheduled Actions -spaces.emailActions.childname=app:email_actions -spaces.searchAction.childname=cm:search -spaces.templates.content.childname=app:content_templates -spaces.templates.email.childname=app:email_templates -spaces.templates.email.invite1.childname=app:invite_email_templates -spaces.templates.email.notify.childname=app:notify_email_templates -spaces.templates.email.following.childname=app:following -spaces.templates.rss.childname=app:rss_templates -spaces.savedsearches.childname=app:saved_searches -spaces.scripts.childname=app:scripts -spaces.wcm.childname=app:wcm -spaces.wcm_content_forms.childname=app:wcm_forms -spaces.content_forms.childname=app:forms -spaces.user_homes.childname=app:user_homes -spaces.user_homes.regex.key=userName -spaces.user_homes.regex.pattern= -spaces.user_homes.regex.group_order= -spaces.sites.childname=st:sites -spaces.templates.email.invite.childname=cm:invite -spaces.templates.email.activities.childname=cm:activities -spaces.rendition.rendering_actions.childname=app:rendering_actions -spaces.replication.replication_actions.childname=app:replication_actions -spaces.wcm_deployed.childname=cm:wcm_deployed -spaces.transfers.childname=app:transfers -spaces.transfer_groups.childname=app:transfer_groups -spaces.transfer_temp.childname=app:temp -spaces.inbound_transfer_records.childname=app:inbound_transfer_records -spaces.webscripts.childname=cm:webscripts -spaces.extension_webscripts.childname=cm:extensionwebscripts -spaces.models.childname=app:models -spaces.workflow.definitions.childname=app:workflow_defs -spaces.publishing.root.childname=app:publishing_root -spaces.templates.email.workflowemailnotification.childname=cm:workflownotification -spaces.nodetemplates.childname=app:node_templates - -# ADM VersionStore Configuration -version.store.enableAutoVersioning=true -version.store.deprecated.lightWeightVersionStore=workspace://lightWeightVersionStore -version.store.version2Store=workspace://version2Store - -version.store.migrateVersionStore.threadCount=3 -version.store.migrateVersionStore.batchSize=1 - -version.store.migrateCleanupJob.threadCount=3 -version.store.migrateCleanupJob.batchSize=1 - - -# WARNING: For non-production testing only !!! Do not change (to avoid version store issues, including possible mismatch). Should be false since lightWeightVersionStore is deprecated. -version.store.onlyUseDeprecatedV1=false - -# The CRON expression to trigger migration of the version store from V1 (2.x) to V2 (3.x) -# By default, this is effectively 'never' but can be modified as required. -# Examples: -# Never: * * * * * ? 2099 -# Once every thirty minutes: 0 0/30 * * * ? -# See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html -version.store.migrateVersionStore.cronExpression=* * * * * ? 2099 -# Limit number of version histories to migrate per job cycle, where -1 = unlimited. Note: if limit > 0 then need to schedule job to run regularly in order to complete the migration. -version.store.migrateVersionStore.limitPerJobCycle=-1 -version.store.migrateVersionStore.runAsScheduledJob=false - -# Folders for storing people -system.system_container.childname=sys:system -system.people_container.childname=sys:people -system.authorities_container.childname=sys:authorities -system.zones_container.childname=sys:zones - -# Folders for storing workflow related info -system.workflow_container.childname=sys:workflow - +audit.cmischangelog.enabled=false +audit.dod5015.enabled=false +# Setting this flag to true will force startup failure when invalid audit configurations are detected +audit.config.strict=false +# Audit map filter for AccessAuditor - restricts recorded events to user driven events +audit.filter.alfresco-access.default.enabled=true +audit.filter.alfresco-access.transaction.user=~System;~null;.* +audit.filter.alfresco-access.transaction.type=cm:folder;cm:content;st:site +audit.filter.alfresco-access.transaction.path=~/sys:archivedItem;~/ver:;.* + + +# System Configuration +system.store=system://system +system.descriptor.childname=sys:descriptor +system.descriptor.current.childname=sys:descriptor-current + +# User config +alfresco_user_store.store=user://alfrescoUserStore +alfresco_user_store.system_container.childname=sys:system +alfresco_user_store.user_container.childname=sys:people + +# note: default admin username - should not be changed after installation +alfresco_user_store.adminusername=admin + +# Initial password - editing this will not have any effect once the repository is installed +alfresco_user_store.adminpassword=209c6174da490caeb422f3fa5a7ae634 + +# note: default guest username - should not be changed after installation +alfresco_user_store.guestusername=guest + +# Used to move home folders to a new location +home_folder_provider_synchronizer.enabled=false +home_folder_provider_synchronizer.override_provider= +home_folder_provider_synchronizer.keep_empty_parents=false + +# Spaces Archive Configuration +spaces.archive.store=archive://SpacesStore + +# Spaces Configuration +spaces.store=workspace://SpacesStore +spaces.company_home.childname=app:company_home +spaces.guest_home.childname=app:guest_home +spaces.dictionary.childname=app:dictionary +spaces.templates.childname=app:space_templates +spaces.imapConfig.childname=app:imap_configs +spaces.imap_templates.childname=app:imap_templates +spaces.scheduled_actions.childname=cm:Scheduled Actions +spaces.emailActions.childname=app:email_actions +spaces.searchAction.childname=cm:search +spaces.templates.content.childname=app:content_templates +spaces.templates.email.childname=app:email_templates +spaces.templates.email.invite1.childname=app:invite_email_templates +spaces.templates.email.notify.childname=app:notify_email_templates +spaces.templates.email.following.childname=app:following +spaces.templates.rss.childname=app:rss_templates +spaces.savedsearches.childname=app:saved_searches +spaces.scripts.childname=app:scripts +spaces.wcm.childname=app:wcm +spaces.wcm_content_forms.childname=app:wcm_forms +spaces.content_forms.childname=app:forms +spaces.user_homes.childname=app:user_homes +spaces.user_homes.regex.key=userName +spaces.user_homes.regex.pattern= +spaces.user_homes.regex.group_order= +spaces.sites.childname=st:sites +spaces.templates.email.invite.childname=cm:invite +spaces.templates.email.activities.childname=cm:activities +spaces.rendition.rendering_actions.childname=app:rendering_actions +spaces.replication.replication_actions.childname=app:replication_actions +spaces.wcm_deployed.childname=cm:wcm_deployed +spaces.transfers.childname=app:transfers +spaces.transfer_groups.childname=app:transfer_groups +spaces.transfer_temp.childname=app:temp +spaces.inbound_transfer_records.childname=app:inbound_transfer_records +spaces.webscripts.childname=cm:webscripts +spaces.extension_webscripts.childname=cm:extensionwebscripts +spaces.models.childname=app:models +spaces.workflow.definitions.childname=app:workflow_defs +spaces.publishing.root.childname=app:publishing_root +spaces.templates.email.workflowemailnotification.childname=cm:workflownotification +spaces.nodetemplates.childname=app:node_templates + +# ADM VersionStore Configuration +version.store.enableAutoVersioning=true +version.store.deprecated.lightWeightVersionStore=workspace://lightWeightVersionStore +version.store.version2Store=workspace://version2Store + +version.store.migrateVersionStore.threadCount=3 +version.store.migrateVersionStore.batchSize=1 + +version.store.migrateCleanupJob.threadCount=3 +version.store.migrateCleanupJob.batchSize=1 + + +# WARNING: For non-production testing only !!! Do not change (to avoid version store issues, including possible mismatch). Should be false since lightWeightVersionStore is deprecated. +version.store.onlyUseDeprecatedV1=false + +# The CRON expression to trigger migration of the version store from V1 (2.x) to V2 (3.x) +# By default, this is effectively 'never' but can be modified as required. +# Examples: +# Never: * * * * * ? 2099 +# Once every thirty minutes: 0 0/30 * * * ? +# See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html +version.store.migrateVersionStore.cronExpression=* * * * * ? 2099 +# Limit number of version histories to migrate per job cycle, where -1 = unlimited. Note: if limit > 0 then need to schedule job to run regularly in order to complete the migration. +version.store.migrateVersionStore.limitPerJobCycle=-1 +version.store.migrateVersionStore.runAsScheduledJob=false + +# Folders for storing people +system.system_container.childname=sys:system +system.people_container.childname=sys:people +system.authorities_container.childname=sys:authorities +system.zones_container.childname=sys:zones + +# Folders for storing workflow related info +system.workflow_container.childname=sys:workflow + # Folder for storing shared remote credentials system.remote_credentials_container.childname=sys:remote_credentials # Folder for storing syncset definitions system.syncset_definition_container.childname=sys:syncset_definitions -# Are user names case sensitive? -user.name.caseSensitive=false -domain.name.caseSensitive=false -domain.separator= - -# AVM Specific properties. -avm.remote.idlestream.timeout=30000 - -#Format caption extracted from the XML Schema. -xforms.formatCaption=true - -# ECM content usages/quotas -system.usages.enabled=false -system.usages.clearBatchSize=50 -system.usages.updateBatchSize=50 - -# Repository endpoint - used by Activity Service -repo.remote.endpoint=/service - -# Create home folders as people are created (true) or create them lazily (false) -home.folder.creation.eager=true - -# Should we consider zero byte content to be the same as no content when firing -# content update policies? Prevents 'premature' firing of inbound content rules -# for some clients such as Mac OS X Finder +# Are user names case sensitive? +user.name.caseSensitive=false +domain.name.caseSensitive=false +domain.separator= + +# AVM Specific properties. +avm.remote.idlestream.timeout=30000 + +#Format caption extracted from the XML Schema. +xforms.formatCaption=true + +# ECM content usages/quotas +system.usages.enabled=false +system.usages.clearBatchSize=50 +system.usages.updateBatchSize=50 + +# Repository endpoint - used by Activity Service +repo.remote.endpoint=/service + +# Create home folders as people are created (true) or create them lazily (false) +home.folder.creation.eager=true + +# Should we consider zero byte content to be the same as no content when firing +# content update policies? Prevents 'premature' firing of inbound content rules +# for some clients such as Mac OS X Finder policy.content.update.ignoreEmpty=true - -# The well known RMI registry port and external host name published in the stubs -# is defined in the alfresco-shared.properties file -# -# alfresco.rmi.services.port=50500 - -# Default value of alfresco.rmi.services.host is 0.0.0.0 which means 'listen on all adapters'. -# This allows connections to JMX both remotely and locally. -# -alfresco.rmi.services.host=0.0.0.0 - -# If the RMI address is in-use, how many retries should be done before aborting -# Default value of alfresco.rmi.services.retries is 0 which means 'Don't retry if the address is in-use' -alfresco.rmi.services.retries=4 - -# RMI service ports for the individual services. -# These eight services are available remotely. -# -# Assign individual ports for each service for best performance -# or run several services on the same port, you can even run everything on 50500 if -# running through a firewall. -# -# Specify 0 to use a random unused port. -# -avm.rmi.service.port=50501 -avmsync.rmi.service.port=50502 -authentication.rmi.service.port=50504 -repo.rmi.service.port=50505 -action.rmi.service.port=50506 -deployment.rmi.service.port=50507 -monitor.rmi.service.port=50508 - - -# Should the Mbean server bind to an existing server. Set to true for most application servers. -# false for WebSphere clusters. -mbean.server.locateExistingServerIfPossible=true - -# External executable locations -ooo.exe=soffice -ooo.user=${dir.root}/oouser -img.root=./ImageMagick -img.dyn=${img.root}/lib -img.exe=${img.root}/bin/convert -swf.exe=./bin/pdf2swf + +# The well known RMI registry port and external host name published in the stubs +# is defined in the alfresco-shared.properties file +# +# alfresco.rmi.services.port=50500 + +# Default value of alfresco.rmi.services.host is 0.0.0.0 which means 'listen on all adapters'. +# This allows connections to JMX both remotely and locally. +# +alfresco.rmi.services.host=0.0.0.0 + +# If the RMI address is in-use, how many retries should be done before aborting +# Default value of alfresco.rmi.services.retries is 0 which means 'Don't retry if the address is in-use' +alfresco.rmi.services.retries=4 + +# RMI service ports for the individual services. +# These eight services are available remotely. +# +# Assign individual ports for each service for best performance +# or run several services on the same port, you can even run everything on 50500 if +# running through a firewall. +# +# Specify 0 to use a random unused port. +# +avm.rmi.service.port=50501 +avmsync.rmi.service.port=50502 +authentication.rmi.service.port=50504 +repo.rmi.service.port=50505 +action.rmi.service.port=50506 +deployment.rmi.service.port=50507 +monitor.rmi.service.port=50508 + + +# Should the Mbean server bind to an existing server. Set to true for most application servers. +# false for WebSphere clusters. +mbean.server.locateExistingServerIfPossible=true + +# External executable locations +ooo.exe=soffice +ooo.user=${dir.root}/oouser +img.root=./ImageMagick +img.dyn=${img.root}/lib +img.exe=${img.root}/bin/convert +swf.exe=./bin/pdf2swf swf.languagedir=. - -# Thumbnail Service -system.thumbnail.generate=true - -# Generate doclib icons -# When creating a doclib icon, only use the first pageLimit pages (currently only understood by pdfbox -# TextToPdfContentTransformer) -system.thumbnail.definition.doclib.timeoutMs=-1 -system.thumbnail.definition.doclib.readLimitTimeMs=-1 -system.thumbnail.definition.doclib.maxSourceSizeKBytes=-1 -system.thumbnail.definition.doclib.readLimitKBytes=-1 -system.thumbnail.definition.doclib.pageLimit=1 -system.thumbnail.definition.doclib.maxPages=-1 - -# Max mimetype sizes to create thumbnail icons -system.thumbnail.mimetype.maxSourceSizeKBytes.pdf=-1 -system.thumbnail.mimetype.maxSourceSizeKBytes.txt=-1 -system.thumbnail.mimetype.maxSourceSizeKBytes.docx=-1 -system.thumbnail.mimetype.maxSourceSizeKBytes.xlsx=-1 -system.thumbnail.mimetype.maxSourceSizeKBytes.pptx=-1 -system.thumbnail.mimetype.maxSourceSizeKBytes.odt=-1 -system.thumbnail.mimetype.maxSourceSizeKBytes.ods=-1 -system.thumbnail.mimetype.maxSourceSizeKBytes.odp=-1 - -# Configuration for handling of failing thumbnails. -# See NodeEligibleForRethumbnailingEvaluator's javadoc for details. -# -# Retry periods limit the frequency with which the repository will attempt to create Share thumbnails -# for content nodes which have previously failed in their thumbnail attempts. -# These periods are in seconds. -# -# 604800s = 60s * 60m * 24h * 7d = 1 week -system.thumbnail.retryPeriod=60 -system.thumbnail.retryCount=2 -system.thumbnail.quietPeriod=604800 -system.thumbnail.quietPeriodRetriesEnabled=true - -# Content Transformers + +# Thumbnail Service +system.thumbnail.generate=true + +# Generate doclib icons +# When creating a doclib icon, only use the first pageLimit pages (currently only understood by pdfbox +# TextToPdfContentTransformer) +system.thumbnail.definition.doclib.timeoutMs=-1 +system.thumbnail.definition.doclib.readLimitTimeMs=-1 +system.thumbnail.definition.doclib.maxSourceSizeKBytes=-1 +system.thumbnail.definition.doclib.readLimitKBytes=-1 +system.thumbnail.definition.doclib.pageLimit=1 +system.thumbnail.definition.doclib.maxPages=-1 + +# Max mimetype sizes to create thumbnail icons +system.thumbnail.mimetype.maxSourceSizeKBytes.pdf=-1 +system.thumbnail.mimetype.maxSourceSizeKBytes.txt=-1 +system.thumbnail.mimetype.maxSourceSizeKBytes.docx=-1 +system.thumbnail.mimetype.maxSourceSizeKBytes.xlsx=-1 +system.thumbnail.mimetype.maxSourceSizeKBytes.pptx=-1 +system.thumbnail.mimetype.maxSourceSizeKBytes.odt=-1 +system.thumbnail.mimetype.maxSourceSizeKBytes.ods=-1 +system.thumbnail.mimetype.maxSourceSizeKBytes.odp=-1 + +# Configuration for handling of failing thumbnails. +# See NodeEligibleForRethumbnailingEvaluator's javadoc for details. +# +# Retry periods limit the frequency with which the repository will attempt to create Share thumbnails +# for content nodes which have previously failed in their thumbnail attempts. +# These periods are in seconds. +# +# 604800s = 60s * 60m * 24h * 7d = 1 week +system.thumbnail.retryPeriod=60 +system.thumbnail.retryCount=2 +system.thumbnail.quietPeriod=604800 +system.thumbnail.quietPeriodRetriesEnabled=true + +# Content Transformers content.transformer.failover=true - -# Base setting for all transformers (2 min timeout) -content.transformer.default.timeoutMs=120000 -content.transformer.default.readLimitTimeMs=-1 -content.transformer.default.maxSourceSizeKBytes=-1 -content.transformer.default.readLimitKBytes=-1 -content.transformer.default.pageLimit=-1 -content.transformer.default.maxPages=-1 - + +# Base setting for all transformers (2 min timeout) +content.transformer.default.timeoutMs=120000 +content.transformer.default.readLimitTimeMs=-1 +content.transformer.default.maxSourceSizeKBytes=-1 +content.transformer.default.readLimitKBytes=-1 +content.transformer.default.pageLimit=-1 +content.transformer.default.maxPages=-1 + # text -> pdf using PdfBox (text/csv, text/xml) 10M takes about 12 seconds -content.transformer.PdfBox.TextToPdf.maxSourceSizeKBytes=10240 - +content.transformer.PdfBox.TextToPdf.maxSourceSizeKBytes=10240 + # pdf -> swf using Pdf2swf 2M takes about 60 seconds. content.transformer.Pdf2swf.maxSourceSizeKBytes=2048 - + # txt -> pdf -> swf 5M (pdf is about the same size as the txt) -# Need this limit as transformer.PdfBox txt -> pdf is allowed up to 10M +# Need this limit as transformer.PdfBox txt -> pdf is allowed up to 10M content.transformer.complex.Text.Pdf2swf.maxSourceSizeKBytes=5120 - + # Transforms to PDF # ================= content.transformer.OpenOffice.mimeTypeLimits.txt.pdf.maxSourceSizeKBytes=5120 @@ -662,7 +662,7 @@ content.transformer.OpenOffice.mimeTypeLimits.xlsm.pdf.maxSourceSizeKBytes=1536 content.transformer.OpenOffice.mimeTypeLimits.xltm.pdf.maxSourceSizeKBytes=1536 content.transformer.OpenOffice.mimeTypeLimits.xlam.pdf.maxSourceSizeKBytes=1536 content.transformer.OpenOffice.mimeTypeLimits.xlsb.pdf.maxSourceSizeKBytes=1536 - + # Transforms to SWF # ================= content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.txt.swf.maxSourceSizeKBytes=5120 @@ -688,7 +688,7 @@ content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.xlsm.swf.maxSourceSizeKByt content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.xltm.swf.maxSourceSizeKBytes=1024 content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.xlam.swf.maxSourceSizeKBytes=1024 content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.xlsb.swf.maxSourceSizeKBytes=1024 - + # OpenOffice transforms to TXT (generally there are better options such as Tika) # ============================= @@ -696,71 +696,71 @@ content.transformer.OpenOffice.PdfBox.mimeTypeLimits.xlsb.txt.maxSourceSizeKByte content.transformer.OpenOffice.PdfBox.mimeTypeLimits.potm.txt.maxSourceSizeKBytes=1024 -# Property to enable upgrade from 2.1-A -V2.1-A.fixes.to.schema=0 -#V2.1-A.fixes.to.schema=82 - -# The default authentication chain -authentication.chain=alfrescoNtlm1:alfrescoNtlm - -# Do authentication tickets expire or live for ever? +# Property to enable upgrade from 2.1-A +V2.1-A.fixes.to.schema=0 +#V2.1-A.fixes.to.schema=82 + +# The default authentication chain +authentication.chain=alfrescoNtlm1:alfrescoNtlm + +# Do authentication tickets expire or live for ever? authentication.ticket.ticketsExpire=true - -# If ticketsEpire is true then how they should expire? -# Valid values are: AFTER_INACTIVITY, AFTER_FIXED_TIME, DO_NOT_EXPIRE -# The default is AFTER_FIXED_TIME + +# If ticketsEpire is true then how they should expire? +# Valid values are: AFTER_INACTIVITY, AFTER_FIXED_TIME, DO_NOT_EXPIRE +# The default is AFTER_FIXED_TIME authentication.ticket.expiryMode=AFTER_INACTIVITY - -# If authentication.ticket.ticketsExpire is true and -# authentication.ticket.expiryMode is AFTER_FIXED_TIME or AFTER_INACTIVITY, -# this controls the minimum period for which tickets are valid. -# The default is PT1H for one hour. -authentication.ticket.validDuration=PT1H - -# Default NFS user mappings (empty). Note these users will be able to -# authenticate through NFS without password so ensure NFS port is secure before -# enabling and adding mappings -nfs.user.mappings= -nfs.user.mappings.default.uid=0 -nfs.user.mappings.default.gid=0 - -#Example NFS user mappings -#nfs.user.mappings=admin,user1 -#nfs.user.mappings.value.admin.uid=0 -#nfs.user.mappings.value.admin.gid=0 -#nfs.user.mappings.value.user1.uid=500 -#nfs.user.mappings.value.user1.gid=500 - -# Default root path for protocols -protocols.storeName=${spaces.store} -protocols.rootPath=/${spaces.company_home.childname} - -# OpenCMIS -opencmis.connector.default.store=${spaces.store} -opencmis.connector.default.rootPath=/${spaces.company_home.childname} -opencmis.connector.default.typesDefaultMaxItems=500 -opencmis.connector.default.typesDefaultDepth=-1 -opencmis.connector.default.objectsDefaultMaxItems=10000 -opencmis.connector.default.objectsDefaultDepth=100 -opencmis.connector.default.openHttpSession=false - -# IMAP -imap.server.enabled=false -imap.server.port=143 -imap.server.attachments.extraction.enabled=true - -# Default IMAP mount points -imap.config.home.store=${spaces.store} -imap.config.home.rootPath=/${spaces.company_home.childname} -imap.config.home.folderPath=Imap Home -imap.config.server.mountPoints=AlfrescoIMAP -imap.config.server.mountPoints.default.mountPointName=IMAP -imap.config.server.mountPoints.default.modeName=ARCHIVE -imap.config.server.mountPoints.default.store=${spaces.store} -imap.config.server.mountPoints.default.rootPath=${protocols.rootPath} -imap.config.server.mountPoints.value.AlfrescoIMAP.mountPointName=Alfresco IMAP -imap.config.server.mountPoints.value.AlfrescoIMAP.modeName=MIXED - + +# If authentication.ticket.ticketsExpire is true and +# authentication.ticket.expiryMode is AFTER_FIXED_TIME or AFTER_INACTIVITY, +# this controls the minimum period for which tickets are valid. +# The default is PT1H for one hour. +authentication.ticket.validDuration=PT1H + +# Default NFS user mappings (empty). Note these users will be able to +# authenticate through NFS without password so ensure NFS port is secure before +# enabling and adding mappings +nfs.user.mappings= +nfs.user.mappings.default.uid=0 +nfs.user.mappings.default.gid=0 + +#Example NFS user mappings +#nfs.user.mappings=admin,user1 +#nfs.user.mappings.value.admin.uid=0 +#nfs.user.mappings.value.admin.gid=0 +#nfs.user.mappings.value.user1.uid=500 +#nfs.user.mappings.value.user1.gid=500 + +# Default root path for protocols +protocols.storeName=${spaces.store} +protocols.rootPath=/${spaces.company_home.childname} + +# OpenCMIS +opencmis.connector.default.store=${spaces.store} +opencmis.connector.default.rootPath=/${spaces.company_home.childname} +opencmis.connector.default.typesDefaultMaxItems=500 +opencmis.connector.default.typesDefaultDepth=-1 +opencmis.connector.default.objectsDefaultMaxItems=10000 +opencmis.connector.default.objectsDefaultDepth=100 +opencmis.connector.default.openHttpSession=false + +# IMAP +imap.server.enabled=false +imap.server.port=143 +imap.server.attachments.extraction.enabled=true + +# Default IMAP mount points +imap.config.home.store=${spaces.store} +imap.config.home.rootPath=/${spaces.company_home.childname} +imap.config.home.folderPath=Imap Home +imap.config.server.mountPoints=AlfrescoIMAP +imap.config.server.mountPoints.default.mountPointName=IMAP +imap.config.server.mountPoints.default.modeName=ARCHIVE +imap.config.server.mountPoints.default.store=${spaces.store} +imap.config.server.mountPoints.default.rootPath=${protocols.rootPath} +imap.config.server.mountPoints.value.AlfrescoIMAP.mountPointName=Alfresco IMAP +imap.config.server.mountPoints.value.AlfrescoIMAP.modeName=MIXED + #Imap extraction settings #imap.attachments.mode: # SEPARATE -- All attachments for each email will be extracted to separate folder. @@ -772,172 +772,172 @@ imap.attachments.folder.store=${spaces.store} imap.attachments.folder.rootPath=/${spaces.company_home.childname} imap.attachments.folder.folderPath=Imap Attachments -# Activities Feed - refer to subsystem - -# Feed max size (number of entries) -activities.feed.max.size=100 -# Feed max age (eg. 44640 mins => 31 days) -activities.feed.max.ageMins=44640 - -activities.feedNotifier.batchSize=200 -activities.feedNotifier.numThreads=2 - -# Subsystem unit test values. Will not have any effect on production servers -subsystems.test.beanProp.default.longProperty=123456789123456789 -subsystems.test.beanProp.default.anotherStringProperty=Global Default -subsystems.test.beanProp=inst1,inst2,inst3 -subsystems.test.beanProp.value.inst2.boolProperty=true -subsystems.test.beanProp.value.inst3.anotherStringProperty=Global Instance Default -subsystems.test.simpleProp2=true -subsystems.test.simpleProp3=Global Default3 - -# Default Async Action Thread Pool -default.async.action.threadPriority=1 -default.async.action.corePoolSize=8 -default.async.action.maximumPoolSize=20 - -# Deployment Service -deployment.service.numberOfSendingThreads=5 -deployment.service.corePoolSize=2 -deployment.service.maximumPoolSize=3 -deployment.service.threadPriority=5 -# How long to wait in mS before refreshing a target lock - detects shutdown servers -deployment.service.targetLockRefreshTime=60000 -# How long to wait in mS from the last communication before deciding that deployment has failed, possibly -# the destination is no longer available? -deployment.service.targetLockTimeout=3600000 - -#Invitation Service -# Should send emails as part of invitation process. -notification.email.siteinvite=true - -# Transfer Service -transferservice.receiver.enabled=true -transferservice.receiver.stagingDir=${java.io.tmpdir}/alfresco-transfer-staging -# -# How long to wait in mS before refreshing a transfer lock - detects shutdown servers -# Default 1 minute. -transferservice.receiver.lockRefreshTime=60000 -# -# How many times to attempt retry the transfer lock -transferservice.receiver.lockRetryCount=3 -# How long to wait, in mS, before retrying the transfer lock -transferservice.receiver.lockRetryWait=100 -# -# How long to wait, in mS, since the last contact with from the client before -# timing out a transfer. Needs to be long enough to cope with network delays and "thinking -# time" for both source and destination. Default 5 minutes. -transferservice.receiver.lockTimeOut=300000 - +# Activities Feed - refer to subsystem + +# Feed max size (number of entries) +activities.feed.max.size=100 +# Feed max age (eg. 44640 mins => 31 days) +activities.feed.max.ageMins=44640 + +activities.feedNotifier.batchSize=200 +activities.feedNotifier.numThreads=2 + +# Subsystem unit test values. Will not have any effect on production servers +subsystems.test.beanProp.default.longProperty=123456789123456789 +subsystems.test.beanProp.default.anotherStringProperty=Global Default +subsystems.test.beanProp=inst1,inst2,inst3 +subsystems.test.beanProp.value.inst2.boolProperty=true +subsystems.test.beanProp.value.inst3.anotherStringProperty=Global Instance Default +subsystems.test.simpleProp2=true +subsystems.test.simpleProp3=Global Default3 + +# Default Async Action Thread Pool +default.async.action.threadPriority=1 +default.async.action.corePoolSize=8 +default.async.action.maximumPoolSize=20 + +# Deployment Service +deployment.service.numberOfSendingThreads=5 +deployment.service.corePoolSize=2 +deployment.service.maximumPoolSize=3 +deployment.service.threadPriority=5 +# How long to wait in mS before refreshing a target lock - detects shutdown servers +deployment.service.targetLockRefreshTime=60000 +# How long to wait in mS from the last communication before deciding that deployment has failed, possibly +# the destination is no longer available? +deployment.service.targetLockTimeout=3600000 + +#Invitation Service +# Should send emails as part of invitation process. +notification.email.siteinvite=true + +# Transfer Service +transferservice.receiver.enabled=true +transferservice.receiver.stagingDir=${java.io.tmpdir}/alfresco-transfer-staging +# +# How long to wait in mS before refreshing a transfer lock - detects shutdown servers +# Default 1 minute. +transferservice.receiver.lockRefreshTime=60000 +# +# How many times to attempt retry the transfer lock +transferservice.receiver.lockRetryCount=3 +# How long to wait, in mS, before retrying the transfer lock +transferservice.receiver.lockRetryWait=100 +# +# How long to wait, in mS, since the last contact with from the client before +# timing out a transfer. Needs to be long enough to cope with network delays and "thinking +# time" for both source and destination. Default 5 minutes. +transferservice.receiver.lockTimeOut=300000 + # Max time allowed for WCM folder rename operation issued by external clients (CIFS, FTP) wcm.rename.max.time.milliseconds=2000 -; DM Receiever Properties -; -; The name of the DM Receiver target - you deploy to this target name -deployment.dmr.name=alfresco - -; consolidate staging, author and workflow sandboxes to one -deployment.dmr.consolidate=true - -; The name of the Alfresco receiver targer -deployment.avm.name=avm - -;Where should the root of the web project be stored, by default /www/avm_webapps -deployment.avm.rootPath=/www/avm_webapps - -; Pattern for live stores deployment by the alfresco receiver -deployment.avm.storeNamePattern=%storeName%-live - -; Built in deployment receiver properties for the default -; filesystem receiver - -; filesystem receiver configuration -deployment.filesystem.rootdir=./wcm -deployment.filesystem.datadir=${deployment.filesystem.rootdir}/depdata -deployment.filesystem.logdir=${deployment.filesystem.rootdir}/deplog -deployment.filesystem.metadatadir=${deployment.filesystem.rootdir}/depmetadata - -deployment.filesystem.autofix=true -deployment.filesystem.errorOnOverwrite=false - -; default filesystem target configuration -deployment.filesystem.default.rootdir=./www -deployment.filesystem.default.name=filesystem -deployment.filesystem.default.metadatadir=${deployment.filesystem.metadatadir}/default - -# OrphanReaper -orphanReaper.lockRefreshTime=60000 -orphanReaper.lockTimeOut=3600000 - +; DM Receiever Properties +; +; The name of the DM Receiver target - you deploy to this target name +deployment.dmr.name=alfresco + +; consolidate staging, author and workflow sandboxes to one +deployment.dmr.consolidate=true + +; The name of the Alfresco receiver targer +deployment.avm.name=avm + +;Where should the root of the web project be stored, by default /www/avm_webapps +deployment.avm.rootPath=/www/avm_webapps + +; Pattern for live stores deployment by the alfresco receiver +deployment.avm.storeNamePattern=%storeName%-live + +; Built in deployment receiver properties for the default +; filesystem receiver + +; filesystem receiver configuration +deployment.filesystem.rootdir=./wcm +deployment.filesystem.datadir=${deployment.filesystem.rootdir}/depdata +deployment.filesystem.logdir=${deployment.filesystem.rootdir}/deplog +deployment.filesystem.metadatadir=${deployment.filesystem.rootdir}/depmetadata + +deployment.filesystem.autofix=true +deployment.filesystem.errorOnOverwrite=false + +; default filesystem target configuration +deployment.filesystem.default.rootdir=./www +deployment.filesystem.default.name=filesystem +deployment.filesystem.default.metadatadir=${deployment.filesystem.metadatadir}/default + +# OrphanReaper +orphanReaper.lockRefreshTime=60000 +orphanReaper.lockTimeOut=3600000 + # security security.anyDenyDenies=true -# -# Encryption properties -# -# default keystores location -dir.keystore=classpath:alfresco/keystore - -# general encryption parameters -encryption.keySpec.class=org.alfresco.encryption.DESEDEKeyGenerator -encryption.keyAlgorithm=DESede -encryption.cipherAlgorithm=DESede/CBC/PKCS5Padding - -# secret key keystore configuration -encryption.keystore.location=${dir.keystore}/keystore -encryption.keystore.keyMetaData.location=${dir.keystore}/keystore-passwords.properties -encryption.keystore.provider= -encryption.keystore.type=JCEKS - -# backup secret key keystore configuration -encryption.keystore.backup.location=${dir.keystore}/backup-keystore -encryption.keystore.backup.keyMetaData.location=${dir.keystore}/backup-keystore-passwords.properties -encryption.keystore.backup.provider= -encryption.keystore.backup.type=JCEKS - -# Should encryptable properties be re-encrypted with new encryption keys on botstrap? -encryption.bootstrap.reencrypt=false - -# mac/md5 encryption -encryption.mac.messageTimeout=30000 -encryption.mac.algorithm=HmacSHA1 - -# ssl encryption -encryption.ssl.keystore.location=${dir.keystore}/ssl.keystore -encryption.ssl.keystore.provider= -encryption.ssl.keystore.type=JCEKS -encryption.ssl.keystore.keyMetaData.location=${dir.keystore}/ssl-keystore-passwords.properties -encryption.ssl.truststore.location=${dir.keystore}/ssl.truststore -encryption.ssl.truststore.provider= -encryption.ssl.truststore.type=JCEKS -encryption.ssl.truststore.keyMetaData.location=${dir.keystore}/ssl-truststore-passwords.properties - -# Re-encryptor properties -encryption.reencryptor.chunkSize=100 -encryption.reencryptor.numThreads=2 - -# SOLR connection details (e.g. for JMX) -solr.host=localhost -solr.port=8080 -solr.port.ssl=8443 -solr.solrUser=solr -solr.solrPassword=solr -# none, https -solr.secureComms=https - - -solr.max.total.connections=40 -solr.max.host.connections=40 - -# Solr connection timeouts -# solr connect timeout in ms -solr.solrConnectTimeout=5000 - -# cron expression defining how often the Solr Admin client (used by JMX) pings Solr if it goes away -solr.solrPingCronExpression=0 0/5 * * * ? * - +# +# Encryption properties +# +# default keystores location +dir.keystore=classpath:alfresco/keystore + +# general encryption parameters +encryption.keySpec.class=org.alfresco.encryption.DESEDEKeyGenerator +encryption.keyAlgorithm=DESede +encryption.cipherAlgorithm=DESede/CBC/PKCS5Padding + +# secret key keystore configuration +encryption.keystore.location=${dir.keystore}/keystore +encryption.keystore.keyMetaData.location=${dir.keystore}/keystore-passwords.properties +encryption.keystore.provider= +encryption.keystore.type=JCEKS + +# backup secret key keystore configuration +encryption.keystore.backup.location=${dir.keystore}/backup-keystore +encryption.keystore.backup.keyMetaData.location=${dir.keystore}/backup-keystore-passwords.properties +encryption.keystore.backup.provider= +encryption.keystore.backup.type=JCEKS + +# Should encryptable properties be re-encrypted with new encryption keys on botstrap? +encryption.bootstrap.reencrypt=false + +# mac/md5 encryption +encryption.mac.messageTimeout=30000 +encryption.mac.algorithm=HmacSHA1 + +# ssl encryption +encryption.ssl.keystore.location=${dir.keystore}/ssl.keystore +encryption.ssl.keystore.provider= +encryption.ssl.keystore.type=JCEKS +encryption.ssl.keystore.keyMetaData.location=${dir.keystore}/ssl-keystore-passwords.properties +encryption.ssl.truststore.location=${dir.keystore}/ssl.truststore +encryption.ssl.truststore.provider= +encryption.ssl.truststore.type=JCEKS +encryption.ssl.truststore.keyMetaData.location=${dir.keystore}/ssl-truststore-passwords.properties + +# Re-encryptor properties +encryption.reencryptor.chunkSize=100 +encryption.reencryptor.numThreads=2 + +# SOLR connection details (e.g. for JMX) +solr.host=localhost +solr.port=8080 +solr.port.ssl=8443 +solr.solrUser=solr +solr.solrPassword=solr +# none, https +solr.secureComms=https + + +solr.max.total.connections=40 +solr.max.host.connections=40 + +# Solr connection timeouts +# solr connect timeout in ms +solr.solrConnectTimeout=5000 + +# cron expression defining how often the Solr Admin client (used by JMX) pings Solr if it goes away +solr.solrPingCronExpression=0 0/5 * * * ? * + #Default SOLR store mappings mappings solr.store.mappings=solrMappingAlfresco,solrMappingArchive @@ -950,49 +950,49 @@ solr.store.mappings.value.solrMappingArchive.baseUrl=/solr/archive solr.store.mappings.value.solrMappingArchive.protocol=archive solr.store.mappings.value.solrMappingArchive.identifier=SpacesStore -# -# Web Publishing Properties -# -publishing.root.path=/app:company_home/app:dictionary -publishing.root=${publishing.root.path}/${spaces.publishing.root.childname} - -# -# URL Shortening Properties -# -urlshortening.bitly.username=brianalfresco -urlshortening.bitly.api.key=R_ca15c6c89e9b25ccd170bafd209a0d4f -urlshortening.bitly.url.length=20 - -# -# Bulk Filesystem Importer -# - -# The number of threads to employ in a batch import -bulkImport.batch.numThreads=4 - -# The size of a batch in a batch import i.e. the number of files to import in a -# transaction/thread -bulkImport.batch.batchSize=20 - - -# -# Caching Content Store -# -system.content.caching.cacheOnInbound=true -system.content.caching.maxDeleteWatchCount=1 -# Clean up every day at 3 am -system.content.caching.contentCleanup.cronExpression=0 0 3 * * ? -system.content.caching.timeToLiveSeconds=0 -system.content.caching.timeToIdleSeconds=86400 -system.content.caching.maxElementsInMemory=5000 -system.content.caching.maxElementsOnDisk=10000 -system.content.caching.minFileAgeMillis=60000 -system.content.caching.maxUsageMB=4096 -# maxFileSizeMB - 0 means no max file size. -system.content.caching.maxFileSizeMB=0 - -mybatis.useLocalCaches=false - +# +# Web Publishing Properties +# +publishing.root.path=/app:company_home/app:dictionary +publishing.root=${publishing.root.path}/${spaces.publishing.root.childname} + +# +# URL Shortening Properties +# +urlshortening.bitly.username=brianalfresco +urlshortening.bitly.api.key=R_ca15c6c89e9b25ccd170bafd209a0d4f +urlshortening.bitly.url.length=20 + +# +# Bulk Filesystem Importer +# + +# The number of threads to employ in a batch import +bulkImport.batch.numThreads=4 + +# The size of a batch in a batch import i.e. the number of files to import in a +# transaction/thread +bulkImport.batch.batchSize=20 + + +# +# Caching Content Store +# +system.content.caching.cacheOnInbound=true +system.content.caching.maxDeleteWatchCount=1 +# Clean up every day at 3 am +system.content.caching.contentCleanup.cronExpression=0 0 3 * * ? +system.content.caching.timeToLiveSeconds=0 +system.content.caching.timeToIdleSeconds=86400 +system.content.caching.maxElementsInMemory=5000 +system.content.caching.maxElementsOnDisk=10000 +system.content.caching.minFileAgeMillis=60000 +system.content.caching.maxUsageMB=4096 +# maxFileSizeMB - 0 means no max file size. +system.content.caching.maxFileSizeMB=0 + +mybatis.useLocalCaches=false + fileFolderService.checkHidden.enabled=true diff --git a/config/alfresco/subsystems/Authentication/alfrescoNtlm/alfresco-authentication-context.xml b/config/alfresco/subsystems/Authentication/alfrescoNtlm/alfresco-authentication-context.xml index e8f1b6cfb9..d82b03b79b 100644 --- a/config/alfresco/subsystems/Authentication/alfrescoNtlm/alfresco-authentication-context.xml +++ b/config/alfresco/subsystems/Authentication/alfrescoNtlm/alfresco-authentication-context.xml @@ -85,6 +85,9 @@ + + + diff --git a/config/alfresco/swf-transform-context.xml b/config/alfresco/swf-transform-context.xml index f777ee7f8f..7974682e35 100644 --- a/config/alfresco/swf-transform-context.xml +++ b/config/alfresco/swf-transform-context.xml @@ -175,4 +175,28 @@ + + + + + + + + + + application/pdf + + + + + + image/tiff + application/x-shockwave-flash + + + + + diff --git a/source/java/org/alfresco/filesys/FTPServerTest.java b/source/java/org/alfresco/filesys/FTPServerTest.java index 24aed7dc88..c0d4ad75de 100644 --- a/source/java/org/alfresco/filesys/FTPServerTest.java +++ b/source/java/org/alfresco/filesys/FTPServerTest.java @@ -657,6 +657,117 @@ public class FTPServerTest extends TestCase } + /** + * Test Setting the modification time FTP server + * + * @throws Exception + */ + public void testModificationTime() throws Exception + { + final String PATH1 = "FTPServerTest"; + final String PATH2 = "ModificationTime"; + + logger.debug("Start testModificationTime"); + + FTPClient ftp = connectClient(); + + try + { + int reply = ftp.getReplyCode(); + + if (!FTPReply.isPositiveCompletion(reply)) + { + fail("FTP server refused connection."); + } + + boolean login = ftp.login(USER_ADMIN, PASSWORD_ADMIN); + assertTrue("admin login successful", login); + + reply = ftp.cwd("/Alfresco/User Homes"); + assertTrue(FTPReply.isPositiveCompletion(reply)); + + // Delete the root directory in case it was left over from a previous test run + try + { + ftp.removeDirectory(PATH1); + } + catch (IOException e) + { + // ignore this error + } + + // make root directory + ftp.makeDirectory(PATH1); + ftp.cwd(PATH1); + + // make sub-directory in new directory + ftp.makeDirectory(PATH2); + ftp.cwd(PATH2); + + // List the files in the new directory + FTPFile[] files = ftp.listFiles(); + assertTrue("files not empty", files.length == 0); + + // Create a file + String FILE1_CONTENT_1="test file 1 content"; + String FILE1_NAME = "testFile1.txt"; + ftp.appendFile(FILE1_NAME , new ByteArrayInputStream(FILE1_CONTENT_1.getBytes("UTF-8"))); + + + String pathname = "/Alfresco/User Homes" + "/" + PATH1 + "/" + PATH2 + "/" + FILE1_NAME; + + logger.debug("set modification time"); + // YYYYMMDDhhmmss Time set to 2012 August 30 12:39:05 + String olympicTime = "20120830123905"; + ftp.setModificationTime(pathname, olympicTime); + + String extractedTime = ftp.getModificationTime(pathname); + // Feature of the commons ftp library ExtractedTime has a "status code" first and is followed by newline chars + + assertTrue("time not set correctly by explicit set time", extractedTime.contains(olympicTime)); + + // Get the new file + FTPFile[] files2 = ftp.listFiles(); + assertTrue("files not one", files2.length == 1); + + InputStream is = ftp.retrieveFileStream(FILE1_NAME); + + String content = inputStreamToString(is); + assertEquals("Content is not as expected", content, FILE1_CONTENT_1); + ftp.completePendingCommand(); + + // Update the file contents without setting time directly + String FILE1_CONTENT_2="That's how it is says Pooh!"; + ftp.appendFile(FILE1_NAME , new ByteArrayInputStream(FILE1_CONTENT_2.getBytes("UTF-8"))); + + InputStream is2 = ftp.retrieveFileStream(FILE1_NAME); + + String content2 = inputStreamToString(is2); + assertEquals("Content is not as expected", FILE1_CONTENT_2, content2); + ftp.completePendingCommand(); + + extractedTime = ftp.getModificationTime(pathname); + + assertFalse("time not moved on if time not explicitly set", extractedTime.contains(olympicTime)); + + // now delete the file we have been using. + assertTrue (ftp.deleteFile(FILE1_NAME)); + + // negative test - file should have gone now. + assertFalse (ftp.deleteFile(FILE1_NAME)); + + } + finally + { + // clean up tree if left over from previous run + + ftp.disconnect(); + } + } // test set time + + + + /** * Create a user with a small quota. * diff --git a/source/java/org/alfresco/filesys/repo/ContentDiskDriver.java b/source/java/org/alfresco/filesys/repo/ContentDiskDriver.java index e38279fc02..6d9f61b7d1 100644 --- a/source/java/org/alfresco/filesys/repo/ContentDiskDriver.java +++ b/source/java/org/alfresco/filesys/repo/ContentDiskDriver.java @@ -60,7 +60,6 @@ import org.alfresco.jlan.server.filesys.SearchContext; import org.alfresco.jlan.server.filesys.SrvDiskInfo; import org.alfresco.jlan.server.filesys.TreeConnection; import org.alfresco.jlan.server.filesys.cache.FileState; -import org.alfresco.jlan.server.filesys.cache.FileStateLockManager; import org.alfresco.jlan.server.filesys.pseudo.MemoryNetworkFile; import org.alfresco.jlan.server.filesys.pseudo.PseudoFile; import org.alfresco.jlan.server.filesys.pseudo.PseudoFileInterface; @@ -99,6 +98,7 @@ import org.alfresco.service.cmr.repository.AssociationRef; import org.alfresco.service.cmr.repository.ContentData; import org.alfresco.service.cmr.repository.ContentIOException; import org.alfresco.service.cmr.repository.ContentService; +import org.alfresco.service.cmr.repository.InvalidNodeRefException; import org.alfresco.service.cmr.repository.MimetypeService; import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.NodeService; @@ -1075,7 +1075,7 @@ public class ContentDiskDriver extends AlfrescoTxDiskDriver implements DiskInter throw new AccessDeniedException("Get file information " + path); } - catch (AlfrescoRuntimeException ex) + catch (RuntimeException ex) { // Debug @@ -1380,7 +1380,7 @@ public class ContentDiskDriver extends AlfrescoTxDiskDriver implements DiskInter throw new FileNotFoundException("Start search " + searchPath); } - catch (AlfrescoRuntimeException ex) + catch (RuntimeException ex) { // Debug @@ -1891,7 +1891,12 @@ public class ContentDiskDriver extends AlfrescoTxDiskDriver implements DiskInter String srvName = null; SMBServer cifsServer = (SMBServer) sess.getServer().getConfiguration().findServer( "CIFS"); - if ( cifsServer != null) + if(sess instanceof SMBSrvSession) + { + SMBSrvSession smbSess = (SMBSrvSession)sess; + srvName = smbSess.getShareHostName(); + } + else if ( cifsServer != null) { // Use the CIFS server name in the URL @@ -2005,7 +2010,7 @@ public class ContentDiskDriver extends AlfrescoTxDiskDriver implements DiskInter throw new AccessDeniedException("Open file " + params.getFullPath()); } - catch (AlfrescoRuntimeException ex) + catch (RuntimeException ex) { // Debug @@ -2191,7 +2196,7 @@ public class ContentDiskDriver extends AlfrescoTxDiskDriver implements DiskInter throw new DiskFullException("Create file " + params.getFullPath()); } - catch (AlfrescoRuntimeException ex) + catch (RuntimeException ex) { // Debug @@ -2322,7 +2327,7 @@ public class ContentDiskDriver extends AlfrescoTxDiskDriver implements DiskInter throw new AccessDeniedException("Create directory " + params.getFullPath()); } - catch (AlfrescoRuntimeException ex) + catch (RuntimeException ex) { // Debug @@ -2423,7 +2428,7 @@ public class ContentDiskDriver extends AlfrescoTxDiskDriver implements DiskInter throw new AccessDeniedException("Delete directory " + dir); } - catch (AlfrescoRuntimeException ex) + catch (RuntimeException ex) { // Debug @@ -2551,7 +2556,7 @@ public class ContentDiskDriver extends AlfrescoTxDiskDriver implements DiskInter if (file instanceof NodeRefNetworkFile) { NodeRef nodeRef = ((NodeRefNetworkFile) file).getNodeRef(); - if (nodeService.hasAspect(nodeRef, ContentModel.ASPECT_NO_CONTENT)) + if (nodeService.exists(nodeRef) && nodeService.hasAspect(nodeRef, ContentModel.ASPECT_NO_CONTENT)) { logger.debug("No content - delete"); fileFolderService.delete(nodeRef); @@ -3444,7 +3449,18 @@ public class ContentDiskDriver extends AlfrescoTxDiskDriver implements DiskInter throw new AccessDeniedException("Node locked " + oldName); } - catch (AlfrescoRuntimeException ex) + catch (InvalidNodeRefException ex) + { + // Debug + + if (logger.isDebugEnabled() && ctx.hasDebug(AlfrescoContext.DBG_RENAME)) + logger.debug("Rename file - file doesn't exist, " + oldName, ex); + + // Convert to a filesystem access denied status + + throw new FileNotFoundException("File doesn't exist " + oldName); + } + catch (RuntimeException ex) { // Unexpected Exception being consumed here - hence the error logging. logger.error("Unable to rename file" + oldName, ex); @@ -3617,7 +3633,7 @@ public class ContentDiskDriver extends AlfrescoTxDiskDriver implements DiskInter throw new AccessDeniedException("Set file information " + name); } - catch (AlfrescoRuntimeException ex) + catch (RuntimeException ex) { // Debug diff --git a/source/java/org/alfresco/filesys/repo/ContentDiskDriver2.java b/source/java/org/alfresco/filesys/repo/ContentDiskDriver2.java index 8956e70e60..091d6e5364 100644 --- a/source/java/org/alfresco/filesys/repo/ContentDiskDriver2.java +++ b/source/java/org/alfresco/filesys/repo/ContentDiskDriver2.java @@ -1636,6 +1636,11 @@ public class ContentDiskDriver2 extends AlfrescoDiskDriver implements ExtendedD if(networkFile != null && !networkFile.isReadOnly()) { networkFile.setModifyDate(info.getModifyDateTime()); + if(networkFile instanceof TempNetworkFile) + { + TempNetworkFile tnf = (TempNetworkFile)networkFile; + tnf.setModificationDateSetDirectly(true); + } } if ( logger.isDebugEnabled()) @@ -2810,7 +2815,16 @@ public class ContentDiskDriver2 extends AlfrescoDiskDriver implements ExtendedD */ getPolicyFilter().disableBehaviour(target, ContentModel.ASPECT_AUDITABLE); nodeService.setProperty(target, ContentModel.PROP_MODIFIER, authService.getCurrentUserName()); - nodeService.setProperty(target, ContentModel.PROP_MODIFIED, new Date(tempFile.getModifyDate())); + if(tempFile.isModificationDateSetDirectly()) + { + logger.debug("modification date set directly"); + nodeService.setProperty(target, ContentModel.PROP_MODIFIED, new Date(tempFile.getModifyDate())); + } + else + { + logger.debug("modification date not set directly"); + nodeService.setProperty(target, ContentModel.PROP_MODIFIED, new Date()); + } // Take an initial guess at the mimetype (if it has not been set by something already) String mimetype = mimetypeService.guessMimetype(tempFile.getFullName(), new FileContentReader(tempFile.getFile())); diff --git a/source/java/org/alfresco/filesys/repo/TempNetworkFile.java b/source/java/org/alfresco/filesys/repo/TempNetworkFile.java index 500e57b89d..1350c59062 100644 --- a/source/java/org/alfresco/filesys/repo/TempNetworkFile.java +++ b/source/java/org/alfresco/filesys/repo/TempNetworkFile.java @@ -17,6 +17,7 @@ import org.alfresco.jlan.smb.server.disk.JavaNetworkFile; public class TempNetworkFile extends JavaNetworkFile implements NetworkFileStateInterface { private boolean changed = false; + boolean modificationDateSetDirectly = false; /** * Create a new temporary file with no existing content. @@ -157,6 +158,16 @@ public class TempNetworkFile extends JavaNetworkFile implements NetworkFileState { return changed; } + + public boolean isModificationDateSetDirectly() + { + return modificationDateSetDirectly; + } + + public void setModificationDateSetDirectly(boolean modificationDateSetDirectly) + { + this.modificationDateSetDirectly = modificationDateSetDirectly; + } private FileState fileState; diff --git a/source/java/org/alfresco/repo/activities/ActivityPostServiceImpl.java b/source/java/org/alfresco/repo/activities/ActivityPostServiceImpl.java index 8f58c381da..2fcb167c1f 100644 --- a/source/java/org/alfresco/repo/activities/ActivityPostServiceImpl.java +++ b/source/java/org/alfresco/repo/activities/ActivityPostServiceImpl.java @@ -77,7 +77,15 @@ public class ActivityPostServiceImpl implements ActivityPostService */ public void postActivity(String activityType, String siteId, String appTool, String activityData) { - postActivity(activityType, siteId, appTool, activityData, ActivityPostEntity.STATUS.PENDING); + postActivity(activityType, siteId, appTool, activityData, ActivityPostEntity.STATUS.PENDING, getCurrentUser()); + } + + /* (non-Javadoc) + * @see org.alfresco.service.cmr.activities.ActivityService#postActivity(java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String) + */ + public void postActivity(String activityType, String siteId, String appTool, String activityData, String userId) + { + postActivity(activityType, siteId, appTool, activityData, ActivityPostEntity.STATUS.PENDING, userId); } /* (non-Javadoc) @@ -90,7 +98,7 @@ public class ActivityPostServiceImpl implements ActivityPostService StringBuffer sb = new StringBuffer(); sb.append("{").append("\""+PostLookup.JSON_NODEREF_LOOKUP+"\":\"").append(nodeRef.toString()).append("\"").append("}"); - postActivity(activityType, siteId, appTool, sb.toString(), ActivityPostEntity.STATUS.PENDING); + postActivity(activityType, siteId, appTool, sb.toString(), ActivityPostEntity.STATUS.PENDING, getCurrentUser()); } /* (non-Javadoc) @@ -105,7 +113,7 @@ public class ActivityPostServiceImpl implements ActivityPostService .append("\"name\":\"").append(name).append("\"") .append("}"); - postActivity(activityType, siteId, appTool, sb.toString(), ActivityPostEntity.STATUS.PENDING); + postActivity(activityType, siteId, appTool, sb.toString(), ActivityPostEntity.STATUS.PENDING, getCurrentUser()); } /* (non-Javadoc) @@ -126,12 +134,11 @@ public class ActivityPostServiceImpl implements ActivityPostService .append("\""+PostLookup.JSON_NODEREF_PARENT+"\":\"").append(parentNodeRef.toString()).append("\"") .append("}"); - postActivity(activityType, siteId, appTool, sb.toString(), ActivityPostEntity.STATUS.PENDING); + postActivity(activityType, siteId, appTool, sb.toString(), ActivityPostEntity.STATUS.PENDING, getCurrentUser()); } - private void postActivity(String activityType, String siteId, String appTool, String activityData, ActivityPostEntity.STATUS status) + private void postActivity(String activityType, String siteId, String appTool, String activityData, ActivityPostEntity.STATUS status, String userId) { - String currentUser = getCurrentUser(); try { @@ -197,11 +204,11 @@ public class ActivityPostServiceImpl implements ActivityPostService } // required - ParameterCheck.mandatoryString("currentUser", currentUser); + ParameterCheck.mandatoryString("userId", userId); - if (currentUser.length() > ActivityPostDAO.MAX_LEN_USER_ID) + if (userId.length() > ActivityPostDAO.MAX_LEN_USER_ID) { - throw new IllegalArgumentException("Invalid user - exceeds " + ActivityPostDAO.MAX_LEN_USER_ID + " chars: " + currentUser); + throw new IllegalArgumentException("Invalid user - exceeds " + ActivityPostDAO.MAX_LEN_USER_ID + " chars: " + userId); } } catch (IllegalArgumentException e) @@ -215,7 +222,7 @@ public class ActivityPostServiceImpl implements ActivityPostService { Date postDate = new Date(); ActivityPostEntity activityPost = new ActivityPostEntity(); - activityPost.setUserId(currentUser); + activityPost.setUserId(userId); activityPost.setSiteNetwork(tenantService.getName(siteId)); @@ -228,7 +235,7 @@ public class ActivityPostServiceImpl implements ActivityPostService // hash the userid to generate a job task node int nodeCount = estGridSize; - int userHashCode = currentUser.hashCode(); + int userHashCode = userId.hashCode(); int nodeHash = (userHashCode % nodeCount) + 1; activityPost.setJobTaskNode(nodeHash); diff --git a/source/java/org/alfresco/repo/activities/ActivityServiceImpl.java b/source/java/org/alfresco/repo/activities/ActivityServiceImpl.java index 9499794379..d5841a1bf4 100644 --- a/source/java/org/alfresco/repo/activities/ActivityServiceImpl.java +++ b/source/java/org/alfresco/repo/activities/ActivityServiceImpl.java @@ -157,6 +157,15 @@ public class ActivityServiceImpl implements ActivityService, InitializingBean activityPostService.postActivity(activityType, siteId, appTool, activityData); } + /* (non-Javadoc) + * @see org.alfresco.service.cmr.activities.ActivityService#postActivity(java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String) + */ + public void postActivity(String activityType, String siteId, String appTool, String activityData, String userId) + { + // delegate + activityPostService.postActivity(activityType, siteId, appTool, activityData, userId); + } + /* (non-Javadoc) * @see org.alfresco.service.cmr.activities.ActivityService#postActivity(java.lang.String, java.lang.String, java.lang.String, org.alfresco.service.cmr.repository.NodeRef) */ diff --git a/source/java/org/alfresco/repo/activities/ActivityServiceImplTest.java b/source/java/org/alfresco/repo/activities/ActivityServiceImplTest.java index 3c04953f5d..f2e60ff008 100644 --- a/source/java/org/alfresco/repo/activities/ActivityServiceImplTest.java +++ b/source/java/org/alfresco/repo/activities/ActivityServiceImplTest.java @@ -25,6 +25,7 @@ import org.alfresco.repo.jscript.ClasspathScriptLocation; import org.alfresco.repo.security.authentication.AuthenticationUtil; import org.alfresco.service.cmr.activities.ActivityService; import org.alfresco.service.cmr.activities.FeedControl; +import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.ScriptLocation; import org.alfresco.service.cmr.repository.ScriptService; import org.alfresco.service.cmr.security.MutableAuthenticationService; @@ -81,7 +82,7 @@ public class ActivityServiceImplTest extends BaseSpringTest { try { - this.activityService.postActivity("", "", "", null, ""); + this.activityService.postActivity("", "", "",(NodeRef) null, ""); fail("invalid post activity"); } catch (IllegalArgumentException iae) diff --git a/source/java/org/alfresco/repo/admin/patch/impl/FixBpmPackagesPatch.java b/source/java/org/alfresco/repo/admin/patch/impl/FixBpmPackagesPatch.java index e5c5efddc3..31cde20457 100644 --- a/source/java/org/alfresco/repo/admin/patch/impl/FixBpmPackagesPatch.java +++ b/source/java/org/alfresco/repo/admin/patch/impl/FixBpmPackagesPatch.java @@ -126,8 +126,12 @@ public class FixBpmPackagesPatch extends AbstractPatch String name = (String) nodeService.getProperty(packageRef, ContentModel.PROP_NAME); if (logger.isDebugEnabled()) logger.debug("Package " + name + " type " + typeQname); - // New type of the package is bpm:package - nodeService.setType(packageRef, WorkflowModel.TYPE_PACKAGE); + + if (!nodeService.getType(packageRef).equals(WorkflowModel.TYPE_PACKAGE)) + { + // New type of the package is bpm:package + nodeService.setType(packageRef, WorkflowModel.TYPE_PACKAGE); + } // Get all package items List packageItemsAssocs = nodeService.getChildAssocs(packageRef, ContentModel.ASSOC_CONTAINS, RegexQNamePattern.MATCH_ALL); @@ -143,6 +147,12 @@ public class FixBpmPackagesPatch extends AbstractPatch logger.error("Association between package: " + name + " and item: " + itemName + " is primary association, so removing this assiciation will result in child node deletion"); continue; } + + if (itemAssoc.getTypeQName().equals(WorkflowModel.ASSOC_PACKAGE_CONTAINS)) + { + continue; + } + boolean assocRemoved = nodeService.removeChildAssociation(itemAssoc); if (assocRemoved) { diff --git a/source/java/org/alfresco/repo/avm/AVMNodeService.java b/source/java/org/alfresco/repo/avm/AVMNodeService.java index 88db525e03..8073b92b78 100644 --- a/source/java/org/alfresco/repo/avm/AVMNodeService.java +++ b/source/java/org/alfresco/repo/avm/AVMNodeService.java @@ -43,6 +43,7 @@ import org.alfresco.service.cmr.avm.AVMNodeDescriptor; import org.alfresco.service.cmr.avm.AVMNotFoundException; import org.alfresco.service.cmr.avm.AVMService; import org.alfresco.service.cmr.avm.AVMStoreDescriptor; +import org.alfresco.service.cmr.avm.AVMWrongTypeException; import org.alfresco.service.cmr.dictionary.AspectDefinition; import org.alfresco.service.cmr.dictionary.ClassDefinition; import org.alfresco.service.cmr.dictionary.DataTypeDefinition; @@ -373,6 +374,7 @@ public class AVMNodeService extends AbstractNodeServiceImpl implements NodeServi * * @see org.alfresco.service.cmr.dictionary.DictionaryService */ + @SuppressWarnings("deprecation") public ChildAssociationRef createNode( NodeRef parentRef, QName assocTypeQName, @@ -1656,16 +1658,58 @@ public class AVMNodeService extends AbstractNodeServiceImpl implements NodeServi { return result; } - List all = getChildAssocs(nodeRef); - for (ChildAssociationRef child : all) + // First check if we are matching on all + if (qnamePattern == null || !(qnamePattern instanceof QName)) { - if (!qnamePattern.isMatch(child.getQName())) + // Either null (always match) or we have to match on each, individually + List all = getChildAssocs(nodeRef); + for (ChildAssociationRef child : all) { - continue; + if (qnamePattern == null || !qnamePattern.isMatch(child.getQName())) + { + continue; + } + result.add(child); } - result.add(child); + return result; + } + else + { + // We have a specific QName and therefore an exact path + QName qname = (QName) qnamePattern; + String name = qname.getLocalName(); + + // Resolve the container + Pair containerVersionPath = AVMNodeConverter.ToAVMVersionPath(nodeRef); + int containerVersion = containerVersionPath.getFirst(); + String containerPath = containerVersionPath.getSecond(); + try + { + // Get the descriptor for the container + AVMNodeDescriptor containerDescriptor = fAVMService.lookup(containerVersion, containerPath); + @SuppressWarnings("unused") // Might succeed or fail + AVMNodeDescriptor childDescriptor = fAVMService.lookup(containerDescriptor, name); + result.add( + new ChildAssociationRef( + ContentModel.ASSOC_CONTAINS, + nodeRef, + QName.createQName(NamespaceService.CONTENT_MODEL_1_0_URI, name), + AVMNodeConverter.ToNodeRef( + containerVersion, + AVMNodeConverter.ExtendAVMPath(containerPath, name)), + true, + -1)); + } + catch (AVMNotFoundException e) + { + return result; + } + catch (AVMWrongTypeException e) + { + return result; + } + return result; } - return result; } @Override diff --git a/source/java/org/alfresco/repo/avm/AVMServiceConcurrentTest.java b/source/java/org/alfresco/repo/avm/AVMServiceConcurrentTest.java index b891bde770..829e2bdbd7 100644 --- a/source/java/org/alfresco/repo/avm/AVMServiceConcurrentTest.java +++ b/source/java/org/alfresco/repo/avm/AVMServiceConcurrentTest.java @@ -27,10 +27,13 @@ import javax.transaction.UserTransaction; import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.service.cmr.avm.AVMNodeDescriptor; import org.alfresco.service.cmr.avmsync.AVMDifference; +import org.alfresco.service.cmr.repository.NodeRef; +import org.alfresco.service.cmr.repository.NodeService; import org.alfresco.service.cmr.repository.StoreRef; import org.alfresco.service.cmr.search.ResultSet; import org.alfresco.service.cmr.search.ResultSetRow; import org.alfresco.service.cmr.search.SearchService; +import org.alfresco.service.namespace.NamespaceService; /** * AVM concurrency and search @@ -176,6 +179,37 @@ public class AVMServiceConcurrentTest extends AVMServiceTestBase testTX.commit(); } + public synchronized void test_ALF_14979() throws Exception + { + String name = "test" + System.currentTimeMillis(); + + fService.createDirectory("main:/", name); + + // Now create many, many orphans + int peerCount = 1000; + for (int i = 0; i < peerCount; i++) + { + fService.createDirectory("main:/", name + "-" + i); + } + + StoreRef storeRef = AVMNodeConverter.ToStoreRef("main"); + NodeService fNodeService = (NodeService) fContext.getBean("NodeService"); + NodeRef rootNodeRef = fNodeService.getRootNode(storeRef); + + SearchService fSearchService = (SearchService) fContext.getBean("SearchService"); + NamespaceService fNamespaceService = (NamespaceService) fContext.getBean("NamespaceService"); + + // Now look it up + long before = System.nanoTime(); + List nodeRefs = fSearchService.selectNodes( + rootNodeRef, + "/cm:" + name, + null, fNamespaceService, false); + assertEquals("Expected to find a result", 1, nodeRefs.size()); + long after = System.nanoTime(); + System.out.println("Took " + (after-before)/1E6 + "ms to find entry out of " + peerCount); + } + public void test_ALF_786() throws Exception { int threads= 4; diff --git a/source/java/org/alfresco/repo/config/xml/RepoXMLConfigService.java b/source/java/org/alfresco/repo/config/xml/RepoXMLConfigService.java index c279518b08..12c538811f 100644 --- a/source/java/org/alfresco/repo/config/xml/RepoXMLConfigService.java +++ b/source/java/org/alfresco/repo/config/xml/RepoXMLConfigService.java @@ -26,13 +26,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import javax.transaction.UserTransaction; -import org.springframework.extensions.config.ConfigDeployment; -import org.springframework.extensions.config.ConfigImpl; -import org.springframework.extensions.config.ConfigSection; -import org.springframework.extensions.config.ConfigSource; -import org.springframework.extensions.config.evaluator.Evaluator; -import org.springframework.extensions.config.xml.XMLConfigService; -import org.springframework.extensions.config.xml.elementreader.ConfigElementReader; import org.alfresco.error.AlfrescoRuntimeException; import org.alfresco.repo.cache.SimpleCache; import org.alfresco.repo.security.authentication.AuthenticationContext; @@ -40,10 +33,18 @@ import org.alfresco.repo.security.authentication.AuthenticationUtil; import org.alfresco.repo.security.authentication.AuthenticationUtil.RunAsWork; import org.alfresco.repo.tenant.TenantAdminService; import org.alfresco.repo.tenant.TenantDeployer; +import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.service.transaction.TransactionService; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.springframework.context.ApplicationEvent; +import org.springframework.extensions.config.ConfigDeployment; +import org.springframework.extensions.config.ConfigImpl; +import org.springframework.extensions.config.ConfigSection; +import org.springframework.extensions.config.ConfigSource; +import org.springframework.extensions.config.evaluator.Evaluator; +import org.springframework.extensions.config.xml.XMLConfigService; +import org.springframework.extensions.config.xml.elementreader.ConfigElementReader; /** * XML-based configuration service which can optionally read config from the Repository @@ -108,41 +109,40 @@ public class RepoXMLConfigService extends XMLConfigService implements TenantDepl return resetRepoConfig().getConfigDeployments(); } - private ConfigData initRepoConfig(String tenantDomain) + private ConfigData initRepoConfig(final String tenantDomain) { - ConfigData configData = null; - - // can be null e.g. initial login, after fresh bootstrap + ConfigData configData; + + // can be null e.g. initial login, after fresh bootstrap String currentUser = authenticationContext.getCurrentUserName(); if (currentUser == null) { authenticationContext.setSystemUserAsCurrentUser(); } - - UserTransaction userTransaction = transactionService.getUserTransaction(); - + try { - userTransaction.begin(); - - // parse config - List configDeployments = super.initConfig(); - - configData = getConfigDataLocal(tenantDomain); - if (configData != null) - { - configData.setConfigDeployments(configDeployments); - } - - userTransaction.commit(); - + configData = transactionService.getRetryingTransactionHelper().doInTransaction( + new RetryingTransactionCallback() + { + + @Override + public ConfigData execute() throws Throwable + { + // parse config + List configDeployments = RepoXMLConfigService.super.initConfig(); + + ConfigData configData = getConfigDataLocal(tenantDomain); + if (configData != null) + { + configData.setConfigDeployments(configDeployments); + } + return configData; + } + }, transactionService.isReadOnly()); + logger.info("Config initialised"); } - catch(Throwable e) - { - try { userTransaction.rollback(); } catch (Exception ex) {} - throw new AlfrescoRuntimeException("Failed to initialise config service", e); - } finally { if (currentUser == null) @@ -150,7 +150,7 @@ public class RepoXMLConfigService extends XMLConfigService implements TenantDepl authenticationContext.clearCurrentSecurityContext(); } } - + return configData; } diff --git a/source/java/org/alfresco/repo/content/transform/magick/AbstractImageMagickContentTransformerWorker.java b/source/java/org/alfresco/repo/content/transform/magick/AbstractImageMagickContentTransformerWorker.java index 3bc81d228b..aebcee6ee6 100644 --- a/source/java/org/alfresco/repo/content/transform/magick/AbstractImageMagickContentTransformerWorker.java +++ b/source/java/org/alfresco/repo/content/transform/magick/AbstractImageMagickContentTransformerWorker.java @@ -187,7 +187,14 @@ public abstract class AbstractImageMagickContentTransformerWorker extends Conten MimetypeMap.MIMETYPE_APPLICATION_ILLUSTRATOR.equals(sourceMimetype)) && MimetypeMap.MIMETYPE_IMAGE_PNG.equals(targetMimetype)) { - return true; // ALF-14303 workaround + return true; + } + + // Add extra support for tiff to pdf to allow multiple page preview (ALF-7278) + if (MimetypeMap.MIMETYPE_IMAGE_TIFF.equals(sourceMimetype) && + MimetypeMap.MIMETYPE_PDF.equals(targetMimetype)) + { + return true; } if (!AbstractImageMagickContentTransformerWorker.isSupported(sourceMimetype) || diff --git a/source/java/org/alfresco/repo/content/transform/magick/ImageMagickContentTransformerWorker.java b/source/java/org/alfresco/repo/content/transform/magick/ImageMagickContentTransformerWorker.java index f277ed5b72..055c10e9a2 100644 --- a/source/java/org/alfresco/repo/content/transform/magick/ImageMagickContentTransformerWorker.java +++ b/source/java/org/alfresco/repo/content/transform/magick/ImageMagickContentTransformerWorker.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005-2010 Alfresco Software Limited. + * Copyright (C) 2005-2012 Alfresco Software Limited. * * This file is part of Alfresco * @@ -172,7 +172,7 @@ public class ImageMagickContentTransformerWorker extends AbstractImageMagickCont } properties.put(KEY_OPTIONS, commandOptions); } - properties.put(VAR_SOURCE, sourceFile.getAbsolutePath() + "[0]"); + properties.put(VAR_SOURCE, sourceFile.getAbsolutePath() + (options.getPageLimit() == 1 ? "[0]" : "")); properties.put(VAR_TARGET, targetFile.getAbsolutePath()); // execute the statement diff --git a/source/java/org/alfresco/repo/domain/node/AbstractNodeDAOImpl.java b/source/java/org/alfresco/repo/domain/node/AbstractNodeDAOImpl.java index b7dc502aed..dcd8568b5b 100644 --- a/source/java/org/alfresco/repo/domain/node/AbstractNodeDAOImpl.java +++ b/source/java/org/alfresco/repo/domain/node/AbstractNodeDAOImpl.java @@ -54,6 +54,7 @@ import org.alfresco.repo.domain.permissions.AccessControlListDAO; import org.alfresco.repo.domain.permissions.AclDAO; import org.alfresco.repo.domain.qname.QNameDAO; import org.alfresco.repo.domain.usage.UsageDAO; +import org.alfresco.repo.node.index.NodeIndexer; import org.alfresco.repo.policy.BehaviourFilter; import org.alfresco.repo.security.permissions.AccessControlListProperties; import org.alfresco.repo.transaction.AlfrescoTransactionSupport; @@ -61,6 +62,8 @@ import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState; import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.repo.transaction.TransactionAwareSingleton; import org.alfresco.repo.transaction.TransactionListenerAdapter; +import org.alfresco.repo.transaction.TransactionalDao; +import org.alfresco.repo.transaction.TransactionalResourceHelper; import org.alfresco.service.cmr.dictionary.DataTypeDefinition; import org.alfresco.service.cmr.dictionary.DictionaryService; import org.alfresco.service.cmr.dictionary.InvalidTypeException; @@ -111,6 +114,9 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO private static final String CACHE_REGION_PROPERTIES = "N.P"; private static final String CACHE_REGION_PARENT_ASSOCS = "N.PA"; + private static final String KEY_LOST_NODE_PAIRS = AbstractNodeDAOImpl.class.getName() + ".lostNodePairs"; + private static final String KEY_DELETED_ASSOCS = AbstractNodeDAOImpl.class.getName() + ".deletedAssocs"; + protected Log logger = LogFactory.getLog(getClass()); private Log loggerPaths = LogFactory.getLog(getClass().getName() + ".paths"); @@ -130,6 +136,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO private ContentDataDAO contentDataDAO; private LocaleDAO localeDAO; private UsageDAO usageDAO; + private NodeIndexer nodeIndexer; /** * Cache for the Store root nodes by StoreRef:
@@ -283,6 +290,14 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO this.usageDAO = usageDAO; } + /** + * @param nodeIndexer used when making changes that affect indexes + */ + public void setNodeIndexer(NodeIndexer nodeIndexer) + { + this.nodeIndexer = nodeIndexer; + } + /** * Set the cache that maintains the Store root node data * @@ -386,6 +401,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO PropertyCheck.mandatory(this, "contentDataDAO", contentDataDAO); PropertyCheck.mandatory(this, "localeDAO", localeDAO); PropertyCheck.mandatory(this, "usageDAO", usageDAO); + PropertyCheck.mandatory(this, "nodeIndexer", nodeIndexer); this.nodePropertyHelper = new NodePropertyHelper(dictionaryService, qnameDAO, localeDAO, contentDataDAO); } @@ -580,8 +596,18 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO * @author Derek Hulley * @since 3.4 */ - private class UpdateTransactionListener extends TransactionListenerAdapter + private class UpdateTransactionListener implements TransactionalDao { + /** + * Checks for the presence of a written DB transaction entry + */ + @Override + public boolean isDirty() + { + Long txnId = AbstractNodeDAOImpl.this.getCurrentTransactionId(false); + return txnId != null; + } + @Override public void beforeCommit(boolean readOnly) { @@ -633,7 +659,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO AlfrescoTransactionSupport.bindResource(KEY_TRANSACTION, txn); // Listen for the end of the transaction - AlfrescoTransactionSupport.bindListener(updateTransactionListener); + AlfrescoTransactionSupport.bindDaoService(updateTransactionListener); // Done return txn; } @@ -921,7 +947,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO // be part of the current transaction return false; } - Node node = getNodeNotNull(nodeId); + Node node = getNodeNotNull(nodeId, true); Long nodeTxnId = node.getTransaction().getId(); return nodeTxnId.equals(currentTxnId); } @@ -971,6 +997,9 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO Long nodeId = dbNode.getId(); if (dbNode.getDeleted()) { + // Trigger a post transaction prune of any associations that point to this deleted one + pruneDanglingAssocs(nodeId); + // The node is actually deleted as the cache said. throw new InvalidNodeRefException(nodeRef); } @@ -989,6 +1018,68 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO return pair.getSecond().getNodePair(); } + /** + * Trigger a post transaction prune of any associations that point to this deleted one. + * @param nodeId + */ + private void pruneDanglingAssocs(Long nodeId) + { + selectChildAssocs(nodeId, null, null, null, null, null, new ChildAssocRefQueryCallback() + { + @Override + public boolean preLoadNodes() + { + return false; + } + + @Override + public boolean orderResults() + { + return false; + } + + @Override + public boolean handle(Pair childAssocPair, Pair parentNodePair, + Pair childNodePair) + { + bindFixAssocAndCollectLostAndFound(childNodePair, "childNodeWithDeletedParent", childAssocPair.getFirst(), childAssocPair.getSecond().isPrimary()); + return true; + } + + @Override + public void done() + { + } + }); + selectParentAssocs(nodeId, null, null, null, new ChildAssocRefQueryCallback() + { + @Override + public boolean preLoadNodes() + { + return false; + } + + @Override + public boolean orderResults() + { + return false; + } + + @Override + public boolean handle(Pair childAssocPair, Pair parentNodePair, + Pair childNodePair) + { + bindFixAssocAndCollectLostAndFound(childNodePair, "deletedChildWithParents", childAssocPair.getFirst(), false); + return true; + } + + @Override + public void done() + { + } + }); + } + public Pair getNodePair(Long nodeId) { Pair pair = nodesCache.getByKey(nodeId); @@ -1004,33 +1095,52 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO */ private Node getNodeNotNull(Long nodeId) { - return getNodeNotNullImpl(nodeId, false); + return getNodeNotNull(nodeId, false); } - private Node getNodeNotNullImpl(Long nodeId, boolean deleted) + /** + * Find a node, validating its deleted status + * + * @param nodeId + * the node + * @param allowDeleted + * is it legal for the node to be deleted? + * @return Returns the fully populated node + * @throws ConcurrencyFailureException + * if the ID doesn't exist or the node is deleted but allowDeleted is false + */ + private Node getNodeNotNull(Long nodeId, boolean allowDeleted) { Pair pair = nodesCache.getByKey(nodeId); - if (pair == null || (pair.getSecond().getDeleted() && (!deleted))) + if (pair == null || pair.getSecond().getDeleted() && (!allowDeleted)) { // Force a removal from the cache nodesCache.removeByKey(nodeId); // Go back to the database and get what is there NodeEntity dbNode = selectNodeById(nodeId, null); - if (pair == null) + if (dbNode == null) { throw new ConcurrencyFailureException( "No node exists: \n" + " ID: " + nodeId + "\n" + " DB row: " + dbNode); } + if (dbNode.getDeleted()) + { + // Trigger a post transaction prune of any associations that point to this deleted one + pruneDanglingAssocs(nodeId); + } + if (allowDeleted || !dbNode.getDeleted()) + { + return dbNode; + } else { - logger.warn("No live node exists: \n" + + throw new ConcurrencyFailureException( + "No node exists: \n" + " ID: " + nodeId + "\n" + - " Cache row: " + pair.getSecond() + "\n" + " DB row: " + dbNode); - throw new NotLiveNodeException(pair); } } else @@ -1812,10 +1922,9 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO // Remove peer associations (no associated cache) deleteNodeAssocsToAndFrom(nodeId); - // Remove child associations (invalidate children) - invalidateNodeChildrenCaches(nodeId, true, true); - invalidateNodeChildrenCaches(nodeId, false, true); - deleteChildAssocsToAndFrom(nodeId); + // Only parent associations to deleted nodes are blindly removed automatically. Any active child associations + // are assumed to have been removed and will otherwise block this operation + deleteParentAssocsTo(nodeId); // Remove aspects deleteNodeAspects(nodeId, null); @@ -2845,7 +2954,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO // Get parent and child nodes. We need them later, so just get them now. final Node parentNode = getNodeNotNull(parentNodeId); - final Node childNode = getNodeNotNullImpl(childNodeId, allowDeletedChild); + final Node childNode = getNodeNotNull(childNodeId, allowDeletedChild); final ChildAssocEntity assoc = new ChildAssocEntity(); // Parent node @@ -3498,8 +3607,11 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO return paths; } - private void bindFixAssocAndCollectLostAndFound(final Pair lostNodePair, final String lostName, final ChildAssocEntity assoc) + private void bindFixAssocAndCollectLostAndFound(final Pair lostNodePair, final String lostName, final Long assocId, final boolean orphanChild) { + // Remember the items already deleted in inner transactions + final Set> lostNodePairs = TransactionalResourceHelper.getSet(KEY_LOST_NODE_PAIRS); + final Set deletedAssocs = TransactionalResourceHelper.getSet(KEY_DELETED_ASSOCS); AlfrescoTransactionSupport.bindListener(new TransactionListenerAdapter() { @Override @@ -3512,22 +3624,31 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO { public Void execute() throws Throwable { - if (assoc == null) + if (assocId == null) { // 'child' with missing parent assoc => collect lost+found orphan child - collectLostAndFoundNode(lostNodePair, lostName); - logger.error("ALF-13066: Orphan child node has been re-homed under lost_found: "+lostNodePair); + if (lostNodePairs.add(lostNodePair)) + { + collectLostAndFoundNode(lostNodePair, lostName); + logger.error("ALF-13066: Orphan child node has been re-homed under lost_found: " + + lostNodePair); + } } else { - // 'child' with deleted parent assoc => delete invalid parent assoc and if primary then collect lost+found orphan child - deleteChildAssoc(assoc.getId()); - logger.error("ALF-12358: Deleted parent - removed child assoc: "+assoc.getId()); + // 'child' with deleted parent assoc => delete invalid parent assoc and if primary then + // collect lost+found orphan child + if (deletedAssocs.add(assocId)) + { + deleteChildAssocById(assocId); // Can't use caching version or may hit infinite loop + logger.error("ALF-12358: Deleted node - removed child assoc: " + assocId); + } - if (assoc.isPrimary()) + if (orphanChild && lostNodePairs.add(lostNodePair)) { collectLostAndFoundNode(lostNodePair, lostName); - logger.error("ALF-12358: Orphan child node has been re-homed under lost_found: "+lostNodePair); + logger.error("ALF-12358: Orphan child node has been re-homed under lost_found: " + + lostNodePair); } } @@ -3566,6 +3687,10 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO ParentAssocsInfo parentAssocInfo = new ParentAssocsInfo(isRoot, isStoreRoot, assoc); setParentAssocsCached(childNodeId, parentAssocInfo); + // Account for index impact; remove the orphan committed to the index + nodeIndexer.indexUpdateChildAssociation(new ChildAssociationRef(null, null, null, lostNodeRef), + assoc.getRef(qnameDAO)); + /* // Update ACLs for moved tree - note: actually a NOOP if oldParentAclId is null Long newParentAclId = newParentNode.getAclId(); @@ -3722,7 +3847,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO completedPaths.add(pathToSave); } - if (!hasParents && !parentAssocInfo.isRoot()) + if (!hasParents && !parentAssocInfo.isStoreRoot()) { // We appear to have an orphaned node. But we may just have a temporarily out of sync clustered cache or a // transaction that started ages before the one that committed the cache content!. So double check the node @@ -3741,7 +3866,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO } // We have a corrupt repository - non-root node has a missing parent ?! - bindFixAssocAndCollectLostAndFound(currentNodePair, "nonRootNodeWithoutParents", null); + bindFixAssocAndCollectLostAndFound(currentNodePair, "nonRootNodeWithoutParents", null, false); // throw - error will be logged and then bound txn listener (afterRollback) will be called throw new NonRootNodeWithoutParentsException(currentNodePair); @@ -3795,20 +3920,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO // push the assoc stack, recurse and pop assocIdStack.push(assocId); - try - { - prependPaths(parentNodePair, currentRootNodePair, path, completedPaths, assocIdStack, primaryOnly); - } - catch (final NotLiveNodeException re) - { - if (re.getNodePair().equals(parentNodePair)) - { - // We have a corrupt repository - deleted parent pointing to live child ?! - bindFixAssocAndCollectLostAndFound(currentNodePair, "childNodeWithDeletedParent", assoc); - } - // rethrow - this will cause error/rollback - throw re; - } + prependPaths(parentNodePair, currentRootNodePair, path, completedPaths, assocIdStack, primaryOnly); assocIdStack.pop(); } @@ -4368,7 +4480,7 @@ public abstract class AbstractNodeDAOImpl implements NodeDAO, BatchingDAO QName assocQName, int index); protected abstract int updateChildAssocsUniqueName(Long childNodeId, String name); - protected abstract int deleteChildAssocsToAndFrom(Long nodeId); + protected abstract int deleteParentAssocsTo(Long nodeId); protected abstract ChildAssocEntity selectChildAssoc(Long assocId); protected abstract List selectChildNodeIds( Long nodeId, diff --git a/source/java/org/alfresco/repo/domain/node/NotLiveNodeException.java b/source/java/org/alfresco/repo/domain/node/NotLiveNodeException.java deleted file mode 100644 index a2825bb639..0000000000 --- a/source/java/org/alfresco/repo/domain/node/NotLiveNodeException.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (C) 2005-2012 Alfresco Software Limited. - * - * This file is part of Alfresco - * - * Alfresco is free software: you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Alfresco is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with Alfresco. If not, see . - */ -package org.alfresco.repo.domain.node; - -import org.alfresco.service.cmr.repository.NodeRef; -import org.alfresco.util.Pair; -import org.springframework.dao.ConcurrencyFailureException; - -/** - * For internal use only: see ALF-13066 / ALF-12358 - */ -/* package */ class NotLiveNodeException extends ConcurrencyFailureException -{ - private static final long serialVersionUID = 5920138218201628243L; - - private final Pair nodePair; - - public NotLiveNodeException(Pair nodePair) - { - super("Unexpected deleted node"); - this.nodePair = nodePair; - } - - public Pair getNodePair() - { - return new Pair(nodePair.getFirst(), nodePair.getSecond().getNodeRef()); - } -} diff --git a/source/java/org/alfresco/repo/domain/node/ibatis/NodeDAOImpl.java b/source/java/org/alfresco/repo/domain/node/ibatis/NodeDAOImpl.java index 22835c2eef..130a02ee1d 100644 --- a/source/java/org/alfresco/repo/domain/node/ibatis/NodeDAOImpl.java +++ b/source/java/org/alfresco/repo/domain/node/ibatis/NodeDAOImpl.java @@ -116,7 +116,7 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl private static final String DELETE_CHILD_ASSOC_BY_ID = "alfresco.node.delete_ChildAssocById"; private static final String UPDATE_CHILD_ASSOCS_INDEX = "alfresco.node.update_ChildAssocsIndex"; private static final String UPDATE_CHILD_ASSOCS_UNIQUE_NAME = "alfresco.node.update_ChildAssocsUniqueName"; - private static final String DELETE_CHILD_ASSOCS_TO_AND_FROM = "alfresco.node.delete_ChildAssocsToAndFrom"; + private static final String DELETE_PARENT_ASSOCS_TO = "alfresco.node.delete_ParentAssocsTo"; private static final String SELECT_CHILD_ASSOC_BY_ID = "alfresco.node.select_ChildAssocById"; private static final String COUNT_CHILD_ASSOC_BY_PARENT_ID = "alfresco.node.count_ChildAssocByParentId"; private static final String SELECT_CHILD_ASSOCS_BY_PROPERTY_VALUE = "alfresco.node.select_ChildAssocsByPropertyValue"; @@ -889,19 +889,16 @@ public class NodeDAOImpl extends AbstractNodeDAOImpl } @Override - protected int deleteChildAssocsToAndFrom(Long nodeId) + protected int deleteParentAssocsTo(Long nodeId) { ChildAssocEntity assoc = new ChildAssocEntity(); - // Parent - NodeEntity parentNode = new NodeEntity(); - parentNode.setId(nodeId); - assoc.setParentNode(parentNode); + // Child NodeEntity childNode = new NodeEntity(); childNode.setId(nodeId); assoc.setChildNode(childNode); - return template.delete(DELETE_CHILD_ASSOCS_TO_AND_FROM, assoc); + return template.delete(DELETE_PARENT_ASSOCS_TO, assoc); } @Override diff --git a/source/java/org/alfresco/repo/node/db/DbNodeServiceImpl.java b/source/java/org/alfresco/repo/node/db/DbNodeServiceImpl.java index 0a5e430544..ab45e1cabd 100644 --- a/source/java/org/alfresco/repo/node/db/DbNodeServiceImpl.java +++ b/source/java/org/alfresco/repo/node/db/DbNodeServiceImpl.java @@ -1061,7 +1061,7 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl invokeBeforeDeleteNode(nodeRef); // Cascade delecte as required - deletePrimaryChildrenNotArchived(nodePair); + deleteChildrenNotArchived(nodePair); // perform a normal deletion nodeDAO.deleteNode(nodeId); @@ -1085,17 +1085,57 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl } /** - * delete primary children - private method for deleteNode. + * delete children - private method for deleteNode. * - * recurses through children when deleting a node. Does not archive. + * recurses through primary children when deleting a node. Does not archive. */ - private void deletePrimaryChildrenNotArchived(Pair nodePair) + private void deleteChildrenNotArchived(Pair nodePair) { Long nodeId = nodePair.getFirst(); - // Get the node's primary children - final List> childNodePairs = new ArrayList>(5); + // Get the node's children + final List> primaryChildAssocs = new ArrayList>(5); + + // Get all the QNames to remove and prune affected secondary associations + removeSecondaryAssociationsCascade(nodeId, primaryChildAssocs); + + // Each primary child must be deleted + for (Pair childAssoc : primaryChildAssocs) + { + // Fire node policies. This ensures that each node in the hierarchy gets a notification fired. + Long childNodeId = childAssoc.getFirst(); + ChildAssociationRef childParentAssocRef = childAssoc.getSecond(); + NodeRef childNodeRef = childParentAssocRef.getChildRef(); + QName childNodeType = nodeDAO.getNodeType(childNodeId); + Set childNodeQNames = nodeDAO.getNodeAspects(childNodeId); + + // remove the deleted node from the list of new nodes + untrackNewNodeRef(childNodeRef); + + // track the deletion of this node - so we can prevent new associations to it. + trackDeletedNodeRef(childNodeRef); + + invokeBeforeDeleteNode(childNodeRef); + + // Delete the child and its parent associations + nodeDAO.deleteNode(childNodeId); + + // Propagate timestamps + propagateTimeStamps(childParentAssocRef); + invokeOnDeleteNode(childParentAssocRef, childNodeType, childNodeQNames, false); + + // Index + nodeIndexer.indexDeleteNode(childParentAssocRef); + + // lose interest in tracking this node ref + untrackNewNodeRef(childNodeRef); + } + } + + private void removeSecondaryAssociationsCascade(Long nodeId, List> primaryChildAssocs) + { + // Get the node's children + final List>> childAssocs = new ArrayList>>(5); - final Map childAssocRefsByChildId = new HashMap(5); NodeDAO.ChildAssocRefQueryCallback callback = new NodeDAO.ChildAssocRefQueryCallback() { public boolean preLoadNodes() @@ -1116,8 +1156,7 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl ) { // Add it - childNodePairs.add(childNodePair); - childAssocRefsByChildId.put(childNodePair.getFirst(), childAssocPair.getSecond()); + childAssocs.add(new Pair>(childNodePair.getFirst(), childAssocPair)); // More results return true; } @@ -1128,41 +1167,32 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl }; // Get all the QNames to remove - nodeDAO.getChildAssocs(nodeId, null, null, null, Boolean.TRUE, null, callback); - // Each child must be deleted - for (Pair childNodePair : childNodePairs) + nodeDAO.getChildAssocs(nodeId, null, null, null, null, null, callback); + // Each child association must be visited, recursively + for (Pair> childAssoc: childAssocs) { - // Fire node policies. This ensures that each node in the hierarchy gets a notification fired. - Long childNodeId = childNodePair.getFirst(); - NodeRef childNodeRef = childNodePair.getSecond(); - QName childNodeType = nodeDAO.getNodeType(childNodeId); - Set childNodeQNames = nodeDAO.getNodeAspects(childNodeId); - ChildAssociationRef childParentAssocRef = childAssocRefsByChildId.get(childNodeId); - - // remove the deleted node from the list of new nodes - untrackNewNodeRef(childNodeRef); - - // track the deletion of this node - so we can prevent new associations to it. - trackDeletedNodeRef(childNodeRef); - - invokeBeforeDeleteNode(childNodeRef); - - // Cascade first - // This ensures that the beforeDelete policy is fired for all nodes in the hierarchy before - // the actual delete starts. - deletePrimaryChildrenNotArchived(childNodePair); - // Delete the child - nodeDAO.deleteNode(childNodeId); - - // Propagate timestamps - propagateTimeStamps(childParentAssocRef); - invokeOnDeleteNode(childParentAssocRef, childNodeType, childNodeQNames, false); - - // Index - nodeIndexer.indexDeleteNode(childParentAssocRef); - - // lose interest in tracking this node ref - untrackNewNodeRef(childNodeRef); + Long childNodeId = childAssoc.getFirst(); + ChildAssociationRef childParentAssocRef = childAssoc.getSecond().getSecond(); + // Recurse on primary associations + if (childParentAssocRef.isPrimary()) + { + // Cascade first + // This ensures that the beforeDelete policy is fired for all nodes in the hierarchy before + // the actual delete starts. + removeSecondaryAssociationsCascade(childNodeId, primaryChildAssocs); + primaryChildAssocs.add(new Pair(childNodeId, childParentAssocRef)); + } + // Remove secondary associations + else + { + // Secondary association - we must fire the appropriate event to touch the node, update the caches and + // fire the index event + invokeBeforeDeleteChildAssociation(childParentAssocRef); + nodeDAO.deleteChildAssoc(childAssoc.getSecond().getFirst()); + invokeOnDeleteChildAssociation(childParentAssocRef); + // Index + nodeIndexer.indexDeleteChildAssociation(childParentAssocRef); + } } } diff --git a/source/java/org/alfresco/repo/node/db/DbNodeServiceImplTest.java b/source/java/org/alfresco/repo/node/db/DbNodeServiceImplTest.java index 6610fd5c10..039a9529d2 100644 --- a/source/java/org/alfresco/repo/node/db/DbNodeServiceImplTest.java +++ b/source/java/org/alfresco/repo/node/db/DbNodeServiceImplTest.java @@ -29,10 +29,12 @@ import javax.transaction.UserTransaction; import org.alfresco.model.ContentModel; import org.alfresco.repo.domain.node.NodeDAO; +import org.alfresco.repo.domain.node.Transaction; import org.alfresco.repo.node.BaseNodeServiceTest; import org.alfresco.repo.node.cleanup.NodeCleanupRegistry; import org.alfresco.repo.transaction.AlfrescoTransactionSupport; import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; +import org.alfresco.repo.transaction.TransactionListenerAdapter; import org.alfresco.service.cmr.dictionary.DictionaryService; import org.alfresco.service.cmr.repository.ChildAssociationRef; import org.alfresco.service.cmr.repository.MLText; @@ -82,6 +84,92 @@ public class DbNodeServiceImplTest extends BaseNodeServiceTest NodeCleanupRegistry cleanupRegistry = (NodeCleanupRegistry) applicationContext.getBean("nodeCleanupRegistry"); cleanupRegistry.doClean(); } + + /** + * ALF-14929 + */ + public synchronized void testTxnCommitTime() throws Exception + { + /* + * This test is subject to intermittent - but correct - failures if bug ALF-14929 is present + */ + + String currentTxn = AlfrescoTransactionSupport.getTransactionId(); + assertNotNull("Must have a txn change UUID for all transactions."); + + long start = System.currentTimeMillis(); + this.wait(10L); + + // The listener + final TestTxnCommitTimeTxnListener listener = new TestTxnCommitTimeTxnListener(); + AlfrescoTransactionSupport.bindListener(listener); + + // First see what the latest transaction is + long currentTxnCommitTime = listener.getTxnCommitTime(currentTxn, start); + assertEquals("Should not have found a written txn", 0L, currentTxnCommitTime); + + // Now commit + setComplete(); + endTransaction(); + + // Now check again. The transaction time must be greater than the last time that + // the listener wrote through. + long recordedCommitTimeMs = listener.getTxnCommitTime(currentTxn, start); + assertTrue( + "DAO txn write time must be greater than last listener write time", + recordedCommitTimeMs > listener.lastWriteTime); + } + + /** + * @see DbNodeServiceImplTest#testTxnCommitTime() + */ + private class TestTxnCommitTimeTxnListener extends TransactionListenerAdapter + { + /* + * Note: equals hides this instance when listeners are processed + */ + private String txnIdStr; + private long lastWriteTime = 0L; + + @Override + public boolean equals(Object obj) + { + return false; + } + @Override + public synchronized void beforeCommit(boolean readOnly) + { + if (txnIdStr == null) + { + txnIdStr = AlfrescoTransactionSupport.getTransactionId(); + // Make a change + nodeService.setProperty(rootNodeRef, ContentModel.PROP_COUNTER, new Integer(5)); + // Reschedule for removal + AlfrescoTransactionSupport.bindListener(this); + } + else + { + nodeService.removeProperty(rootNodeRef, ContentModel.PROP_COUNTER); + } + lastWriteTime = System.currentTimeMillis(); + // We wait a bit so that the time differences are significant + try { this.wait(20L); } catch (InterruptedException e) {} + } + public long getTxnCommitTime(String txnId, long fromTime) + { + List startTxns = nodeDAO.getTxnsByCommitTimeAscending(fromTime, null, Integer.MAX_VALUE, null, false); + long time = 0L; + for (Transaction txn : startTxns) + { + if (txnId.equals(txn.getChangeTxnId())) + { + // Found our transaction + time = txn.getCommitTimeMs(); + } + } + return time; + } + } /** * Deletes a child node and then iterates over the children of the parent node, diff --git a/source/java/org/alfresco/repo/remotecredentials/RemoteCredentialsServicesTest.java b/source/java/org/alfresco/repo/remotecredentials/RemoteCredentialsServicesTest.java index 800185e796..3d6e69e536 100644 --- a/source/java/org/alfresco/repo/remotecredentials/RemoteCredentialsServicesTest.java +++ b/source/java/org/alfresco/repo/remotecredentials/RemoteCredentialsServicesTest.java @@ -44,6 +44,7 @@ import org.alfresco.service.cmr.repository.NodeService; import org.alfresco.service.cmr.security.MutableAuthenticationService; import org.alfresco.service.cmr.security.PermissionService; import org.alfresco.service.cmr.security.PersonService; +import org.alfresco.service.namespace.NamespaceService; import org.alfresco.service.namespace.QName; import org.alfresco.service.transaction.TransactionService; import org.alfresco.util.PropertyMap; diff --git a/source/java/org/alfresco/repo/rule/RuleServiceCoverageTest.java b/source/java/org/alfresco/repo/rule/RuleServiceCoverageTest.java index af63501a13..3461ab368e 100644 --- a/source/java/org/alfresco/repo/rule/RuleServiceCoverageTest.java +++ b/source/java/org/alfresco/repo/rule/RuleServiceCoverageTest.java @@ -930,12 +930,13 @@ public class RuleServiceCoverageTest extends TestCase mailService.setTestMode(true); mailService.clearLastTestMessage(); - this.nodeService.createNode( + NodeRef contentNodeRef = this.nodeService.createNode( this.nodeRef, ContentModel.ASSOC_CHILDREN, QName.createQName(TEST_NAMESPACE, "children"), ContentModel.TYPE_CONTENT, getContentProperties()).getChildRef(); + addContentToNode(contentNodeRef); // An email should appear in the recipients email // System.out.println(NodeStoreInspector.dumpNodeStore(this.nodeService, this.testStoreRef)); @@ -1663,7 +1664,7 @@ public class RuleServiceCoverageTest extends TestCase ContentModel.ASSOC_CHILDREN, QName.createQName(TEST_NAMESPACE, "children"), ContentModel.TYPE_CONTENT).getChildRef(); - assertTrue(this.nodeService.hasAspect(contentNodeRef, ContentModel.ASPECT_VERSIONABLE)); + assertFalse(this.nodeService.hasAspect(contentNodeRef, ContentModel.ASPECT_VERSIONABLE)); addContentToNode(contentNodeRef); assertTrue(this.nodeService.hasAspect(contentNodeRef, ContentModel.ASPECT_VERSIONABLE)); diff --git a/source/java/org/alfresco/repo/rule/ruletrigger/CreateNodeRuleTrigger.java b/source/java/org/alfresco/repo/rule/ruletrigger/CreateNodeRuleTrigger.java index 900138b622..7aa1b8a6c2 100644 --- a/source/java/org/alfresco/repo/rule/ruletrigger/CreateNodeRuleTrigger.java +++ b/source/java/org/alfresco/repo/rule/ruletrigger/CreateNodeRuleTrigger.java @@ -21,12 +21,15 @@ package org.alfresco.repo.rule.ruletrigger; import java.util.Set; import org.alfresco.repo.node.NodeServicePolicies; -import org.alfresco.repo.policy.JavaBehaviour; import org.alfresco.repo.policy.Behaviour.NotificationFrequency; +import org.alfresco.repo.policy.JavaBehaviour; import org.alfresco.repo.rule.RuntimeRuleService; import org.alfresco.repo.transaction.TransactionalResourceHelper; +import org.alfresco.service.cmr.dictionary.AspectDefinition; +import org.alfresco.service.cmr.dictionary.ClassDefinition; import org.alfresco.service.cmr.dictionary.DataTypeDefinition; import org.alfresco.service.cmr.dictionary.PropertyDefinition; +import org.alfresco.service.cmr.dictionary.TypeDefinition; import org.alfresco.service.cmr.repository.ChildAssociationRef; import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.namespace.NamespaceService; @@ -110,6 +113,26 @@ public class CreateNodeRuleTrigger extends RuleTriggerAbstractBase new JavaBehaviour(this, "onRemoveAspect", NotificationFrequency.EVERY_EVENT)); } + + /** + * Return true if provided classDef has property that has propertyType type + */ + private boolean hasPropertyOfType(ClassDefinition classDef, QName propertyType) + { + if (classDef != null) + { + for (PropertyDefinition propertyDef : classDef.getProperties().values()) + { + if (propertyDef.getDataType().getName().equals(propertyType) && !propertyDef.isMultiValued()) + { + return true; + } + } + } + + return false; + } + /** * {@inheritDoc} */ @@ -121,22 +144,31 @@ public class CreateNodeRuleTrigger extends RuleTriggerAbstractBase return; } NodeRef nodeRef = childAssocRef.getChildRef(); + + // Keep track of new nodes to prevent firing of updates in the same transaction + Set newNodeRefSet = TransactionalResourceHelper.getSet(RULE_TRIGGER_NEW_NODES); + newNodeRefSet.add(nodeRef); + + // If the node's type or aspects have a single-valued content property, don't fire the trigger, as it would be + // handled by on-content-create-trigger, according to its settings for empty content - // If the node has a single-valued content property, don't fire the trigger, as it will be handled by - // on-content-create-trigger, according to its settings for empty content - for (QName propertyQName : nodeService.getProperties(nodeRef).keySet()) + // Check node type's properties + QName nodeType = nodeService.getType(nodeRef); + TypeDefinition typeDefinition = dictionaryService.getType(nodeType); + if (hasPropertyOfType(typeDefinition, DataTypeDefinition.CONTENT)) { - PropertyDefinition propertyDef = dictionaryService.getProperty(propertyQName); - if (propertyDef != null && propertyDef.getDataType().getName().equals(DataTypeDefinition.CONTENT) - && !propertyDef.isMultiValued()) + return; + } + + // Check node aspects' properties + for (QName aspectQName : nodeService.getAspects(nodeRef)) + { + AspectDefinition aspectDefinition = dictionaryService.getAspect(aspectQName); + if (hasPropertyOfType(aspectDefinition, DataTypeDefinition.CONTENT)) { return; } } - - // Keep track of new nodes to prevent firing of updates in the same transaction - Set newNodeRefSet = TransactionalResourceHelper.getSet(RULE_TRIGGER_NEW_NODES); - newNodeRefSet.add(nodeRef); if (nodeRef != null && nodeService.exists(nodeRef) == true && diff --git a/source/java/org/alfresco/repo/search/impl/lucene/ADMLuceneIndexerImpl.java b/source/java/org/alfresco/repo/search/impl/lucene/ADMLuceneIndexerImpl.java index bca912e2b0..65a2789c8c 100644 --- a/source/java/org/alfresco/repo/search/impl/lucene/ADMLuceneIndexerImpl.java +++ b/source/java/org/alfresco/repo/search/impl/lucene/ADMLuceneIndexerImpl.java @@ -340,7 +340,8 @@ public class ADMLuceneIndexerImpl extends AbstractLuceneIndexerImpl imp if (s_logger.isDebugEnabled()) { NodeRef parentRef = relationshipRef.getParentRef(); - Path path = parentRef == null ? new Path() : nodeService.getPath(parentRef); + Path path = (parentRef == null || !nodeService.exists(parentRef)) ? new Path() : nodeService + .getPath(parentRef); path.append(new ChildAssocElement(relationshipRef)); s_logger.debug(event + " " + path + " " + relationshipRef.getChildRef()); } diff --git a/source/java/org/alfresco/repo/security/authentication/AbstractAuthenticationComponent.java b/source/java/org/alfresco/repo/security/authentication/AbstractAuthenticationComponent.java index 7a18ed22e3..0a522e5b9f 100644 --- a/source/java/org/alfresco/repo/security/authentication/AbstractAuthenticationComponent.java +++ b/source/java/org/alfresco/repo/security/authentication/AbstractAuthenticationComponent.java @@ -482,9 +482,9 @@ public abstract class AbstractAuthenticationComponent implements AuthenticationC { public Object doWork() throws Exception { - if (!personService.personExists(userName) - || !nodeService.getProperty(personService.getPerson(userName), - ContentModel.PROP_USERNAME).equals(userName)) + String identifier; + if ((identifier = personService.getUserIdentifier(userName)) == null + || !identifier.equals(userName)) { if (logger.isDebugEnabled()) { diff --git a/source/java/org/alfresco/repo/security/authentication/AuthenticationServiceImpl.java b/source/java/org/alfresco/repo/security/authentication/AuthenticationServiceImpl.java index b357ba2aa7..6246624c19 100644 --- a/source/java/org/alfresco/repo/security/authentication/AuthenticationServiceImpl.java +++ b/source/java/org/alfresco/repo/security/authentication/AuthenticationServiceImpl.java @@ -113,7 +113,7 @@ public class AuthenticationServiceImpl extends AbstractAuthenticationService imp // clear context - to avoid MT concurrency issue (causing domain mismatch) - see also 'authenticate' above clearCurrentSecurityContext(); currentUser = ticketComponent.validateTicket(ticket); - authenticationComponent.setCurrentUser(currentUser, UserNameValidationMode.CHECK); + authenticationComponent.setCurrentUser(currentUser, UserNameValidationMode.NONE); } catch (AuthenticationException ae) { diff --git a/source/java/org/alfresco/repo/security/authentication/AuthenticationTest.java b/source/java/org/alfresco/repo/security/authentication/AuthenticationTest.java index c022567bd6..7a955d70cf 100644 --- a/source/java/org/alfresco/repo/security/authentication/AuthenticationTest.java +++ b/source/java/org/alfresco/repo/security/authentication/AuthenticationTest.java @@ -48,6 +48,7 @@ import org.alfresco.repo.policy.PolicyComponent; import org.alfresco.repo.security.authentication.AuthenticationUtil.RunAsWork; import org.alfresco.repo.security.authentication.InMemoryTicketComponentImpl.ExpiryMode; import org.alfresco.repo.security.authentication.InMemoryTicketComponentImpl.Ticket; +import org.alfresco.repo.security.authentication.RepositoryAuthenticationDao.CacheEntry; import org.alfresco.repo.tenant.TenantService; import org.alfresco.repo.transaction.AlfrescoTransactionSupport; import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState; @@ -101,7 +102,7 @@ public class AuthenticationTest extends TestCase private PolicyComponent policyComponent; - private SimpleCache authenticationCache; + private SimpleCache authenticationCache; private SimpleCache immutableSingletonCache; public AuthenticationTest() @@ -137,7 +138,7 @@ public class AuthenticationTest extends TestCase pubPersonService = (PersonService) ctx.getBean("PersonService"); personService = (PersonService) ctx.getBean("personService"); policyComponent = (PolicyComponent) ctx.getBean("policyComponent"); - authenticationCache = (SimpleCache) ctx.getBean("authenticationCache"); + authenticationCache = (SimpleCache) ctx.getBean("authenticationCache"); immutableSingletonCache = (SimpleCache) ctx.getBean("immutableSingletonCache"); // permissionServiceSPI = (PermissionServiceSPI) // ctx.getBean("permissionService"); diff --git a/source/java/org/alfresco/repo/security/authentication/RepositoryAuthenticationDao.java b/source/java/org/alfresco/repo/security/authentication/RepositoryAuthenticationDao.java index 2d453489e0..327f7375e8 100644 --- a/source/java/org/alfresco/repo/security/authentication/RepositoryAuthenticationDao.java +++ b/source/java/org/alfresco/repo/security/authentication/RepositoryAuthenticationDao.java @@ -39,6 +39,8 @@ import org.alfresco.repo.node.NodeServicePolicies.OnUpdatePropertiesPolicy; import org.alfresco.repo.policy.JavaBehaviour; import org.alfresco.repo.policy.PolicyComponent; import org.alfresco.repo.tenant.TenantService; +import org.alfresco.repo.transaction.AlfrescoTransactionSupport; +import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback; import org.alfresco.service.cmr.repository.ChildAssociationRef; import org.alfresco.service.cmr.repository.NodeRef; import org.alfresco.service.cmr.repository.NodeService; @@ -48,7 +50,10 @@ import org.alfresco.service.cmr.security.AuthorityService; import org.alfresco.service.namespace.NamespacePrefixResolver; import org.alfresco.service.namespace.QName; import org.alfresco.service.namespace.RegexQNamePattern; +import org.alfresco.service.transaction.TransactionService; import org.alfresco.util.EqualsHelper; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.springframework.beans.factory.InitializingBean; import org.springframework.dao.DataAccessException; @@ -61,19 +66,24 @@ public class RepositoryAuthenticationDao implements MutableAuthenticationDao, In { private static final StoreRef STOREREF_USERS = new StoreRef("user", "alfrescoUserStore"); + private static final Log logger = LogFactory.getLog(RepositoryAuthenticationDao.class); + private AuthorityService authorityService; + private NodeService nodeService; private TenantService tenantService; private NamespacePrefixResolver namespacePrefixResolver; private PasswordEncoder passwordEncoder; private PolicyComponent policyComponent; + private TransactionService transactionService; + // note: caches are tenant-aware (if using EhCacheAdapter shared cache) private SimpleCache singletonCache; // eg. for user folder nodeRef private final String KEY_USERFOLDER_NODEREF = "key.userfolder.noderef"; - private SimpleCache authenticationCache; + private SimpleCache authenticationCache; public RepositoryAuthenticationDao() { @@ -115,11 +125,16 @@ public class RepositoryAuthenticationDao implements MutableAuthenticationDao, In this.policyComponent = policyComponent; } - public void setAuthenticationCache(SimpleCache authenticationCache) + public void setAuthenticationCache(SimpleCache authenticationCache) { this.authenticationCache = authenticationCache; } + public void setTransactionService(TransactionService transactionService) + { + this.transactionService = transactionService; + } + public void afterPropertiesSet() throws Exception { this.policyComponent.bindClassBehaviour( @@ -130,52 +145,94 @@ public class RepositoryAuthenticationDao implements MutableAuthenticationDao, In BeforeDeleteNodePolicy.QNAME, ContentModel.TYPE_USER, new JavaBehaviour(this, "beforeDeleteNode")); + this.policyComponent.bindClassBehaviour( + OnUpdatePropertiesPolicy.QNAME, + ContentModel.TYPE_USER, + new JavaBehaviour(this, "onUpdateUserProperties")); } @Override public UserDetails loadUserByUsername(String incomingUserName) throws UsernameNotFoundException, DataAccessException { - NodeRef userRef = getUserOrNull(incomingUserName); - if (userRef == null) + CacheEntry userEntry = getUserEntryOrNull(incomingUserName); + if (userEntry == null) { throw new UsernameNotFoundException("Could not find user by userName: " + incomingUserName); } - - Map properties = nodeService.getProperties(userRef); - String password = DefaultTypeConverter.INSTANCE.convert(String.class, properties.get(ContentModel.PROP_PASSWORD)); - - // Report back the user name as stored on the user - String userName = DefaultTypeConverter.INSTANCE.convert(String.class, properties.get(ContentModel.PROP_USER_USERNAME)); - - GrantedAuthority[] gas = new GrantedAuthority[1]; - gas[0] = new GrantedAuthorityImpl("ROLE_AUTHENTICATED"); - - UserDetails ud = new User( - userName, - password, - getEnabled(userName, properties), - !getHasExpired(userName, properties), - !getCredentialsHaveExpired(userName, properties), - !getLocked(userName, properties), - gas); - return ud; + UserDetails userDetails = userEntry.userDetails; + if (userEntry.credentialExpiryDate == null || userEntry.credentialExpiryDate.compareTo(new Date()) >= 0) + { + return userDetails; + } + // If the credentials have expired, we must return a copy with the flag set + return new User(userDetails.getUsername(), userDetails.getPassword(), userDetails.isEnabled(), + userDetails.isAccountNonExpired(), false, + userDetails.isAccountNonLocked(), userDetails.getAuthorities()); } + public NodeRef getUserOrNull(String searchUserName) { + CacheEntry userEntry = getUserEntryOrNull(searchUserName); + return userEntry == null ? null: userEntry.nodeRef; + } + + private CacheEntry getUserEntryOrNull(final String searchUserName) + { + class SearchUserNameCallback implements RetryingTransactionCallback + { + @Override + public CacheEntry execute() throws Throwable + { + List results = nodeService.getChildAssocs(getUserFolderLocation(searchUserName), + ContentModel.ASSOC_CHILDREN, QName.createQName(ContentModel.USER_MODEL_URI, searchUserName)); + if (!results.isEmpty()) + { + NodeRef userRef = tenantService.getName(results.get(0).getChildRef()); + Map properties = nodeService.getProperties(userRef); + String password = DefaultTypeConverter.INSTANCE.convert(String.class, + properties.get(ContentModel.PROP_PASSWORD)); + + // Report back the user name as stored on the user + String userName = DefaultTypeConverter.INSTANCE.convert(String.class, + properties.get(ContentModel.PROP_USER_USERNAME)); + + GrantedAuthority[] gas = new GrantedAuthority[1]; + gas[0] = new GrantedAuthorityImpl("ROLE_AUTHENTICATED"); + + return new CacheEntry(userRef, new User(userName, password, getEnabled(userName, properties), + !getHasExpired(userName, properties), true, !getLocked(userName, properties), gas), + getCredentialsExpiryDate(userName, properties)); + } + return null; + } + } + if (searchUserName == null || searchUserName.length() == 0) { return null; } - NodeRef result = authenticationCache.get(searchUserName); + CacheEntry result = authenticationCache.get(searchUserName); if (result == null) { - List results = nodeService.getChildAssocs(getUserFolderLocation(searchUserName), - ContentModel.ASSOC_CHILDREN, QName.createQName(ContentModel.USER_MODEL_URI, searchUserName)); - if (!results.isEmpty()) + if (AlfrescoTransactionSupport.getTransactionId() == null) + { + result = transactionService.getRetryingTransactionHelper().doInTransaction(new SearchUserNameCallback()); + } + else + { + try + { + result = new SearchUserNameCallback().execute(); + } + catch (Throwable e) + { + logger.error(e); + } + } + if (result != null) { - result = tenantService.getName(results.get(0).getChildRef()); authenticationCache.put(searchUserName, result); } } @@ -475,19 +532,19 @@ public class RepositoryAuthenticationDao implements MutableAuthenticationDao, In @Override public boolean getCredentialsHaveExpired(String userName) { - return getCredentialsHaveExpired(userName, null); + return !loadUserByUsername(userName).isCredentialsNonExpired(); } /** * @param userName the username (never null * @param properties the properties associated with the user or null to get them - * @return true if the user account has expired + * @return Date on which the credentials expire or null if they never expire */ - private boolean getCredentialsHaveExpired(String userName, Map properties) + private Date getCredentialsExpiryDate(String userName, Map properties) { if (authorityService.isAdminAuthority(userName)) { - return false; // Admin never expires + return null; // Admin never expires } if (properties == null) { @@ -495,23 +552,15 @@ public class RepositoryAuthenticationDao implements MutableAuthenticationDao, In } if (properties == null) { - return false; + return null; } if (DefaultTypeConverter.INSTANCE.booleanValue(properties.get(ContentModel.PROP_CREDENTIALS_EXPIRE))) { - Date date = DefaultTypeConverter.INSTANCE.convert(Date.class, properties.get(ContentModel.PROP_CREDENTIALS_EXPIRY_DATE)); - if (date == null) - { - return false; + return DefaultTypeConverter.INSTANCE.convert(Date.class, properties.get(ContentModel.PROP_CREDENTIALS_EXPIRY_DATE)); } else { - return (date.compareTo(new Date()) < 1); - } - } - else - { - return false; + return null; } } @@ -641,7 +690,7 @@ public class RepositoryAuthenticationDao implements MutableAuthenticationDao, In @Override public void onUpdateProperties(NodeRef nodeRef, Map before, Map after) { - String uidBefore = DefaultTypeConverter.INSTANCE.convert(String.class, before.get(ContentModel.PROP_USERNAME)); + String uidBefore = DefaultTypeConverter.INSTANCE.convert(String.class, before.get(ContentModel.PROP_USERNAME)); String uidAfter = DefaultTypeConverter.INSTANCE.convert(String.class, after.get(ContentModel.PROP_USERNAME)); if (uidBefore != null && !EqualsHelper.nullSafeEquals(uidBefore, uidAfter)) { @@ -654,6 +703,13 @@ public class RepositoryAuthenticationDao implements MutableAuthenticationDao, In authenticationCache.remove(uidBefore); } } + authenticationCache.remove(uidAfter); + } + + public void onUpdateUserProperties(NodeRef nodeRef, Map before, Map after) + { + String uidBefore = DefaultTypeConverter.INSTANCE.convert(String.class, before.get(ContentModel.PROP_USER_USERNAME)); + authenticationCache.remove(uidBefore); } @Override @@ -665,4 +721,18 @@ public class RepositoryAuthenticationDao implements MutableAuthenticationDao, In authenticationCache.remove(userName); } } + + static class CacheEntry + { + public NodeRef nodeRef; + public UserDetails userDetails; + public Date credentialExpiryDate; + + public CacheEntry(NodeRef nodeRef, UserDetails userDetails, Date credentialExpiryDate) + { + this.nodeRef = nodeRef; + this.userDetails = userDetails; + this.credentialExpiryDate = credentialExpiryDate; + } + } } diff --git a/source/java/org/alfresco/repo/site/SiteServiceImpl.java b/source/java/org/alfresco/repo/site/SiteServiceImpl.java index 239b9031ab..f20ddd9597 100644 --- a/source/java/org/alfresco/repo/site/SiteServiceImpl.java +++ b/source/java/org/alfresco/repo/site/SiteServiceImpl.java @@ -1969,7 +1969,7 @@ public class SiteServiceImpl extends AbstractLifecycleBean implements SiteServic { activityService.postActivity( ActivityType.SITE_USER_JOINED, shortName, - ACTIVITY_TOOL, getActivityUserData(authorityName, role)); + ACTIVITY_TOOL, getActivityUserData(authorityName, role), authorityName); } else if (authorityType == AuthorityType.GROUP) { diff --git a/source/java/org/alfresco/repo/transaction/AlfrescoTransactionSupport.java b/source/java/org/alfresco/repo/transaction/AlfrescoTransactionSupport.java index 818059f25b..b4e4f39051 100644 --- a/source/java/org/alfresco/repo/transaction/AlfrescoTransactionSupport.java +++ b/source/java/org/alfresco/repo/transaction/AlfrescoTransactionSupport.java @@ -695,7 +695,7 @@ public abstract class AlfrescoTransactionSupport // Flush the DAOs for (TransactionalDao dao : daoServices) { - dao.beforeCommit(); + dao.beforeCommit(readOnly); } // Flush the transactional caches diff --git a/source/java/org/alfresco/repo/transaction/TransactionalDao.java b/source/java/org/alfresco/repo/transaction/TransactionalDao.java index 021fc510bd..548a215e98 100644 --- a/source/java/org/alfresco/repo/transaction/TransactionalDao.java +++ b/source/java/org/alfresco/repo/transaction/TransactionalDao.java @@ -26,15 +26,6 @@ package org.alfresco.repo.transaction; */ public interface TransactionalDao { - /** - * Allows the dao to flush any consuming resources. This mechanism is - * used primarily during long-lived transactions to ensure that system resources - * are not used up. - *

- * This method must not be used for implementing business logic. - */ - void flush(); - /** * Are there any pending changes which must be synchronized with the store? * @@ -45,7 +36,9 @@ public interface TransactionalDao /** * This callback provides a chance for the DAO to do any pre-commit work. * + * @param readOnly true if the transaction was read-only + * * @since 1.4.5 */ - public void beforeCommit(); + public void beforeCommit(boolean readOnly); } diff --git a/source/java/org/alfresco/service/cmr/activities/ActivityPostService.java b/source/java/org/alfresco/service/cmr/activities/ActivityPostService.java index 9678bbd4ce..ad7da32e25 100644 --- a/source/java/org/alfresco/service/cmr/activities/ActivityPostService.java +++ b/source/java/org/alfresco/service/cmr/activities/ActivityPostService.java @@ -38,6 +38,17 @@ public interface ActivityPostService */ public void postActivity(String activityType, String siteId, String appTool, String jsonActivityData); + /** + * Post a custom activity type + * + * @param activityType - required + * @param siteId - optional, if null will be stored as empty string + * @param appTool - optional, if null will be stored as empty string + * @param jsonActivityData - required + * @param userId - required + */ + public void postActivity(String activityType, String siteId, String appTool, String jsonActivityData, String userId); + /** * Post a pre-defined activity type - certain activity data will be looked-up asynchronously, including: *