diff --git a/config/alfresco/bootstrap-context.xml b/config/alfresco/bootstrap-context.xml
index e19bd078e8..d985d9d7b8 100644
--- a/config/alfresco/bootstrap-context.xml
+++ b/config/alfresco/bootstrap-context.xml
@@ -189,6 +189,14 @@
+
+
+
+
+
+
+
+
diff --git a/config/alfresco/repository.properties b/config/alfresco/repository.properties
index 9e4b297610..0a6e68bd90 100644
--- a/config/alfresco/repository.properties
+++ b/config/alfresco/repository.properties
@@ -1,903 +1,906 @@
-# Repository configuration
-
-repository.name=Main Repository
-
-# Directory configuration
-
-dir.root=./alf_data
-
-dir.contentstore=${dir.root}/contentstore
-dir.contentstore.deleted=${dir.root}/contentstore.deleted
-
-# The location of cached content
-dir.cachedcontent=${dir.root}/cachedcontent
-
-dir.auditcontentstore=${dir.root}/audit.contentstore
-
-# The location for lucene index files
-dir.indexes=${dir.root}/lucene-indexes
-
-# The location for index backups
-dir.indexes.backup=${dir.root}/backup-lucene-indexes
-
-# The location for lucene index locks
-dir.indexes.lock=${dir.indexes}/locks
-
-#Directory to find external license
-dir.license.external=.
-# Spring resource location of external license files
-location.license.external=file://${dir.license.external}/*.lic
-# Spring resource location of embedded license files
-location.license.embedded=/WEB-INF/alfresco/license/*.lic
-# Spring resource location of license files on shared classpath
-location.license.shared=classpath*:/alfresco/extension/license/*.lic
-
-# WebDAV initialization properties
-system.webdav.servlet.enabled=true
-system.webdav.storeName=${protocols.storeName}
-system.webdav.rootPath=${protocols.rootPath}
-
-# Is the JBPM Deploy Process Servlet enabled?
-# Default is false. Should not be enabled in production environments as the
-# servlet allows unauthenticated deployment of new workflows.
-system.workflow.deployservlet.enabled=false
-
-# Sets the location for the JBPM Configuration File
-system.workflow.jbpm.config.location=classpath:org/alfresco/repo/workflow/jbpm/jbpm.cfg.xml
-
-# Determines if JBPM workflow definitions are shown.
-# Default is false. This controls the visibility of JBPM
-# workflow definitions from the getDefinitions and
-# getAllDefinitions WorkflowService API but still allows
-# any in-flight JBPM workflows to be completed.
-system.workflow.engine.jbpm.definitions.visible=false
-
-#Determines if Activiti definitions are visible
-system.workflow.engine.activiti.definitions.visible=true
-
-# Determines if the JBPM engine is enabled
-system.workflow.engine.jbpm.enabled=true
-
-# Determines if the Activiti engine is enabled
-system.workflow.engine.activiti.enabled=true
-
-index.subsystem.name=lucene
-
-# ######################################### #
-# Index Recovery and Tracking Configuration #
-# ######################################### #
-#
-# Recovery types are:
-# NONE: Ignore
-# VALIDATE: Checks that the first and last transaction for each store is represented in the indexes
-# AUTO: Validates and auto-recovers if validation fails
-# FULL: Full index rebuild, processing all transactions in order. The server is temporarily suspended.
-index.recovery.mode=VALIDATE
-# FULL recovery continues when encountering errors
-index.recovery.stopOnError=false
-index.recovery.maximumPoolSize=5
-# Set the frequency with which the index tracking is triggered.
-# For more information on index tracking in a cluster:
-# http://wiki.alfresco.com/wiki/High_Availability_Configuration_V1.4_to_V2.1#Version_1.4.5.2C_2.1.1_and_later
-# By default, this is effectively never, but can be modified as required.
-# Examples:
-# Never: * * * * * ? 2099
-# Once every five seconds: 0/5 * * * * ?
-# Once every two seconds : 0/2 * * * * ?
-# See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html
-index.tracking.cronExpression=0/5 * * * * ?
-index.tracking.adm.cronExpression=${index.tracking.cronExpression}
-index.tracking.avm.cronExpression=${index.tracking.cronExpression}
-# Other properties.
-index.tracking.maxTxnDurationMinutes=10
-index.tracking.reindexLagMs=1000
-index.tracking.maxRecordSetSize=1000
-index.tracking.maxTransactionsPerLuceneCommit=100
-index.tracking.disableInTransactionIndexing=false
-# Index tracking information of a certain age is cleaned out by a scheduled job.
-# Any clustered system that has been offline for longer than this period will need to be seeded
-# with a more recent backup of the Lucene indexes or the indexes will have to be fully rebuilt.
-# Use -1 to disable purging. This can be switched on at any stage.
-index.tracking.minRecordPurgeAgeDays=30
-
-# Reindexing of missing content is by default 'never' carried out.
-# The cron expression below can be changed to control the timing of this reindexing.
-# Users of Enterprise Alfresco can configure this cron expression via JMX without a server restart.
-# Note that if alfresco.cluster.name is not set, then reindexing will not occur.
-index.reindexMissingContent.cronExpression=* * * * * ? 2099
-
-# Change the failure behaviour of the configuration checker
-system.bootstrap.config_check.strict=true
-
-# The name of the cluster
-# Leave this empty to disable cluster entry
-alfresco.cluster.name=
-
-# Hazelcast clustering configuration
-# Password to join the cluster
-alfresco.hazelcast.password=alfrescocluster
-# Protocol used for member discovery (tcp, ec2, udp)
-alfresco.hazelcast.protocol=tcp
-# Location of the Hazelcast configuration file
-alfresco.hazelcast.configLocation=classpath:alfresco/hazelcast/hazelcast-${alfresco.hazelcast.protocol}.xml
-# XML elements to incorporate into Hazelcast config, in particular
-# hostnames to use for membership discovery
-alfresco.hazelcast.tcp.config=localhost
-# Amazon Web Services - EC2 discovery
-alfresco.hazelcast.ec2.accesskey=my-access-key
-alfresco.hazelcast.ec2.secretkey=my-secret-key
-alfresco.hazelcast.ec2.region=us-east-1
-# Only instances belonging to this group will be discovered, default will try all running instances
-alfresco.hazelcast.ec2.securitygroup=
-alfresco.hazelcast.ec2.tagkey=type
-alfresco.hazelcast.ec2.tagvalue=hz-nodes
-
-# The EHCache RMI peer URL addresses to set in the ehcache-custom.xml file
-# Use this property to set the hostname of the current server.
-# This is only necessary if the cache peer URLs are generated with an invalid IP address for the local server.
-alfresco.ehcache.rmi.hostname=
-# Use this property to set the cache peer URL port.
-alfresco.ehcache.rmi.remoteObjectPort=0
-alfresco.ehcache.rmi.port=0
-alfresco.ehcache.rmi.socketTimeoutMillis=5000
-
-# The protocol stack to use from the JGroups configuration file
-# Use this property to select which communication method should be used.
-# The JGroups configuration file is build up using the protocol string
-alfresco.jgroups.defaultProtocol=UDP
-# The bind address and interface for JGroups to use; equivalent to -Djgroups.bind_addr and -Djgroups.bind_interface
-alfresco.jgroups.bind_address=
-alfresco.jgroups.bind_interface=
-# JGroups configuration (http://www.jgroups.org)
-# The location of the JGroups configuration file
-alfresco.jgroups.configLocation=classpath:alfresco/jgroups/alfresco-jgroups-${alfresco.jgroups.defaultProtocol}.xml
-
-#
-# How long should shutdown wait to complete normally before
-# taking stronger action and calling System.exit()
-# in ms, 10,000 is 10 seconds
-#
-shutdown.backstop.timeout=10000
-shutdown.backstop.enabled=false
-
-# Server Single User Mode
-# note:
-# only allow named user (note: if blank or not set then will allow all users)
-# assuming maxusers is not set to 0
-#server.singleuseronly.name=admin
-
-# Server Max Users - limit number of users with non-expired tickets
-# note:
-# -1 allows any number of users, assuming not in single-user mode
-# 0 prevents further logins, including the ability to enter single-user mode
-server.maxusers=-1
-
-# The Cron expression controlling the frequency with which the OpenOffice connection is tested
-openOffice.test.cronExpression=0 * * * * ?
-
-#
-# Disable all shared caches (mutable and immutable)
-# These properties are used for diagnostic purposes
-system.cache.disableMutableSharedCaches=false
-system.cache.disableImmutableSharedCaches=false
-
-#
-# Properties to limit resources spent on individual searches
-#
-# The maximum time spent pruning results
-system.acl.maxPermissionCheckTimeMillis=10000
-# The maximum number of search results to perform permission checks against
-system.acl.maxPermissionChecks=1000
-
-# The maximum number of filefolder list results
-system.filefolderservice.defaultListMaxResults=5000
-
-
-# Properties to control read permission evaluation for acegi
-system.readpermissions.optimise=true
-system.readpermissions.bulkfetchsize=1000
-
-#
-# Manually control how the system handles maximum string lengths.
-# Any zero or negative value is ignored.
-# Only change this after consulting support or reading the appropriate Javadocs for
-# org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2
-system.maximumStringLength=-1
-
-#
-# Limit hibernate session size by trying to amalgamate events for the L2 session invalidation
-# - hibernate works as is up to this size
-# - after the limit is hit events that can be grouped invalidate the L2 cache by type and not instance
-# events may not group if there are post action listener registered (this is not the case with the default distribution)
-system.hibernateMaxExecutions=20000
-
-#
-# Determine if modification timestamp propagation from child to parent nodes is respected or not.
-# Even if 'true', the functionality is only supported for child associations that declare the
-# 'propagateTimestamps' element in the dictionary definition.
-system.enableTimestampPropagation=true
-
-#
-# Decide if content should be removed from the system immediately after being orphaned.
-# Do not change this unless you have examined the impact it has on your backup procedures.
-system.content.eagerOrphanCleanup=false
-# The number of days to keep orphaned content in the content stores.
-# This has no effect on the 'deleted' content stores, which are not automatically emptied.
-system.content.orphanProtectDays=14
-# The action to take when a store or stores fails to delete orphaned content
-# IGNORE: Just log a warning. The binary remains and the record is expunged
-# KEEP_URL: Log a warning and create a URL entry with orphan time 0. It won't be processed or removed.
-system.content.deletionFailureAction=IGNORE
-# The CRON expression to trigger the deletion of resources associated with orphaned content.
-system.content.orphanCleanup.cronExpression=0 0 4 * * ?
-# The CRON expression to trigger content URL conversion. This process is not intesive and can
-# be triggered on a live system. Similarly, it can be triggered using JMX on a dedicated machine.
-system.content.contentUrlConverter.cronExpression=* * * * * ? 2099
-system.content.contentUrlConverter.threadCount=2
-system.content.contentUrlConverter.batchSize=500
-system.content.contentUrlConverter.runAsScheduledJob=false
-
-# #################### #
-# Lucene configuration #
-# #################### #
-#
-# Millisecond threshold for text transformations
-# Slower transformers will force the text extraction to be asynchronous
-#
-lucene.maxAtomicTransformationTime=20
-#
-# The maximum number of clauses that are allowed in a lucene query
-#
-lucene.query.maxClauses=10000
-#
-# The size of the queue of nodes waiting for index
-# Events are generated as nodes are changed, this is the maximum size of the queue used to coalesce event
-# When this size is reached the lists of nodes will be indexed
-#
-# http://issues.alfresco.com/browse/AR-1280: Setting this high is the workaround as of 1.4.3.
-#
-lucene.indexer.batchSize=1000000
-fts.indexer.batchSize=1000
-#
-# Index cache sizes
-#
-lucene.indexer.cacheEnabled=true
-lucene.indexer.maxDocIdCacheSize=100000
-lucene.indexer.maxDocumentCacheSize=100
-lucene.indexer.maxIsCategoryCacheSize=-1
-lucene.indexer.maxLinkAspectCacheSize=10000
-lucene.indexer.maxParentCacheSize=100000
-lucene.indexer.maxPathCacheSize=100000
-lucene.indexer.maxTypeCacheSize=10000
-#
-# Properties for merge (not this does not affect the final index segment which will be optimised)
-# Max merge docs only applies to the merge process not the resulting index which will be optimised.
-#
-lucene.indexer.mergerMaxMergeDocs=1000000
-lucene.indexer.mergerMergeFactor=5
-lucene.indexer.mergerMaxBufferedDocs=-1
-lucene.indexer.mergerRamBufferSizeMb=16
-#
-# Properties for delta indexes (not this does not affect the final index segment which will be optimised)
-# Max merge docs only applies to the index building process not the resulting index which will be optimised.
-#
-lucene.indexer.writerMaxMergeDocs=1000000
-lucene.indexer.writerMergeFactor=5
-lucene.indexer.writerMaxBufferedDocs=-1
-lucene.indexer.writerRamBufferSizeMb=16
-#
-# Target number of indexes and deltas in the overall index and what index size to merge in memory
-#
-lucene.indexer.mergerTargetIndexCount=8
-lucene.indexer.mergerTargetOverlayCount=5
-lucene.indexer.mergerTargetOverlaysBlockingFactor=2
-lucene.indexer.maxDocsForInMemoryMerge=60000
-lucene.indexer.maxRamInMbForInMemoryMerge=16
-lucene.indexer.maxDocsForInMemoryIndex=60000
-lucene.indexer.maxRamInMbForInMemoryIndex=16
-#
-# Other lucene properties
-#
-lucene.indexer.termIndexInterval=128
-lucene.indexer.useNioMemoryMapping=true
-# over-ride to false for pre 3.0 behaviour
-lucene.indexer.postSortDateTime=true
-lucene.indexer.defaultMLIndexAnalysisMode=EXACT_LANGUAGE_AND_ALL
-lucene.indexer.defaultMLSearchAnalysisMode=EXACT_LANGUAGE_AND_ALL
-#
-# The number of terms from a document that will be indexed
-#
-lucene.indexer.maxFieldLength=10000
-
-# Should we use a 'fair' locking policy, giving queue-like access behaviour to
-# the indexes and avoiding starvation of waiting writers? Set to false on old
-# JVMs where this appears to cause deadlock
-lucene.indexer.fairLocking=true
-
-#
-# Index locks (mostly deprecated and will be tidied up with the next lucene upgrade)
-#
-lucene.write.lock.timeout=10000
-lucene.commit.lock.timeout=100000
-lucene.lock.poll.interval=100
-
-lucene.indexer.useInMemorySort=true
-lucene.indexer.maxRawResultSetSizeForInMemorySort=1000
-lucene.indexer.contentIndexingEnabled=true
-
-index.backup.cronExpression=0 0 3 * * ?
-
-lucene.defaultAnalyserResourceBundleName=alfresco/model/dataTypeAnalyzers
-
-
-
-# When transforming archive files (.zip etc) into text representations (such as
-# for full text indexing), should the files within the archive be processed too?
-# If enabled, transformation takes longer, but searches of the files find more.
-transformer.Archive.includeContents=false
-
-# Database configuration
-db.schema.stopAfterSchemaBootstrap=false
-db.schema.update=true
-db.schema.update.lockRetryCount=24
-db.schema.update.lockRetryWaitSeconds=5
-db.driver=org.gjt.mm.mysql.Driver
-db.name=alfresco
-db.url=jdbc:mysql:///${db.name}
-db.username=alfresco
-db.password=alfresco
-db.pool.initial=10
-db.pool.max=40
-db.txn.isolation=-1
-db.pool.statements.enable=true
-db.pool.statements.max=40
-db.pool.min=0
-db.pool.idle=-1
-db.pool.wait.max=-1
-db.pool.validate.query=
-db.pool.evict.interval=-1
-db.pool.evict.idle.min=1800000
-db.pool.validate.borrow=true
-db.pool.validate.return=false
-db.pool.evict.validate=false
-#
-db.pool.abandoned.detect=false
-db.pool.abandoned.time=300
-#
-# db.pool.abandoned.log=true (logAbandoned) adds overhead (http://commons.apache.org/dbcp/configuration.html)
-# and also requires db.pool.abandoned.detect=true (removeAbandoned)
-#
-db.pool.abandoned.log=false
-
-
-# Audit configuration
-audit.enabled=true
-audit.tagging.enabled=true
-audit.alfresco-access.enabled=false
-audit.alfresco-access.sub-events.enabled=false
-audit.cmischangelog.enabled=false
-audit.dod5015.enabled=false
-# Setting this flag to true will force startup failure when invalid audit configurations are detected
-audit.config.strict=false
-# Audit map filter for AccessAuditor - restricts recorded events to user driven events
-audit.filter.alfresco-access.default.enabled=true
-audit.filter.alfresco-access.transaction.user=~System;~null;.*
-audit.filter.alfresco-access.transaction.type=cm:folder;cm:content;st:site
-audit.filter.alfresco-access.transaction.path=~/sys:archivedItem;~/ver:;.*
-
-
-# System Configuration
-system.store=system://system
-system.descriptor.childname=sys:descriptor
-system.descriptor.current.childname=sys:descriptor-current
-
-# User config
-alfresco_user_store.store=user://alfrescoUserStore
-alfresco_user_store.system_container.childname=sys:system
-alfresco_user_store.user_container.childname=sys:people
-
-# note: default admin username - should not be changed after installation
-alfresco_user_store.adminusername=admin
-
-# Initial password - editing this will not have any effect once the repository is installed
-alfresco_user_store.adminpassword=209c6174da490caeb422f3fa5a7ae634
-
-# note: default guest username - should not be changed after installation
-alfresco_user_store.guestusername=guest
-
-# Used to move home folders to a new location
-home_folder_provider_synchronizer.enabled=false
-home_folder_provider_synchronizer.override_provider=
-home_folder_provider_synchronizer.keep_empty_parents=false
-
-# Spaces Archive Configuration
-spaces.archive.store=archive://SpacesStore
-
-# Spaces Configuration
-spaces.store=workspace://SpacesStore
-spaces.company_home.childname=app:company_home
-spaces.guest_home.childname=app:guest_home
-spaces.dictionary.childname=app:dictionary
-spaces.templates.childname=app:space_templates
-spaces.imapConfig.childname=app:imap_configs
-spaces.imap_templates.childname=app:imap_templates
-spaces.scheduled_actions.childname=cm:Scheduled Actions
-spaces.emailActions.childname=app:email_actions
-spaces.searchAction.childname=cm:search
-spaces.templates.content.childname=app:content_templates
-spaces.templates.email.childname=app:email_templates
-spaces.templates.email.invite1.childname=app:invite_email_templates
-spaces.templates.email.notify.childname=app:notify_email_templates
-spaces.templates.email.following.childname=app:following
-spaces.templates.rss.childname=app:rss_templates
-spaces.savedsearches.childname=app:saved_searches
-spaces.scripts.childname=app:scripts
-spaces.wcm.childname=app:wcm
-spaces.wcm_content_forms.childname=app:wcm_forms
-spaces.content_forms.childname=app:forms
-spaces.user_homes.childname=app:user_homes
-spaces.user_homes.regex.key=userName
-spaces.user_homes.regex.pattern=
-spaces.user_homes.regex.group_order=
-spaces.sites.childname=st:sites
-spaces.templates.email.invite.childname=cm:invite
-spaces.templates.email.activities.childname=cm:activities
-spaces.rendition.rendering_actions.childname=app:rendering_actions
-spaces.replication.replication_actions.childname=app:replication_actions
-spaces.wcm_deployed.childname=cm:wcm_deployed
-spaces.transfers.childname=app:transfers
-spaces.transfer_groups.childname=app:transfer_groups
-spaces.transfer_temp.childname=app:temp
-spaces.inbound_transfer_records.childname=app:inbound_transfer_records
-spaces.webscripts.childname=cm:webscripts
-spaces.extension_webscripts.childname=cm:extensionwebscripts
-spaces.models.childname=app:models
-spaces.workflow.definitions.childname=app:workflow_defs
-spaces.publishing.root.childname=app:publishing_root
-spaces.templates.email.workflowemailnotification.childname=cm:workflownotification
-spaces.nodetemplates.childname=app:node_templates
-
-# ADM VersionStore Configuration
-version.store.enableAutoVersioning=true
-version.store.deprecated.lightWeightVersionStore=workspace://lightWeightVersionStore
-version.store.version2Store=workspace://version2Store
-
-version.store.migrateVersionStore.threadCount=3
-version.store.migrateVersionStore.batchSize=1
-
-version.store.migrateCleanupJob.threadCount=3
-version.store.migrateCleanupJob.batchSize=1
-
-
-# WARNING: For non-production testing only !!! Do not change (to avoid version store issues, including possible mismatch). Should be false since lightWeightVersionStore is deprecated.
-version.store.onlyUseDeprecatedV1=false
-
-# The CRON expression to trigger migration of the version store from V1 (2.x) to V2 (3.x)
-# By default, this is effectively 'never' but can be modified as required.
-# Examples:
-# Never: * * * * * ? 2099
-# Once every thirty minutes: 0 0/30 * * * ?
-# See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html
-version.store.migrateVersionStore.cronExpression=* * * * * ? 2099
-# Limit number of version histories to migrate per job cycle, where -1 = unlimited. Note: if limit > 0 then need to schedule job to run regularly in order to complete the migration.
-version.store.migrateVersionStore.limitPerJobCycle=-1
-version.store.migrateVersionStore.runAsScheduledJob=false
-
-# Folders for storing people
-system.system_container.childname=sys:system
-system.people_container.childname=sys:people
-system.authorities_container.childname=sys:authorities
-system.zones_container.childname=sys:zones
-
-# Folders for storing workflow related info
-system.workflow_container.childname=sys:workflow
-
-# Are user names case sensitive?
-user.name.caseSensitive=false
-domain.name.caseSensitive=false
-domain.separator=
-
-# AVM Specific properties.
-avm.remote.idlestream.timeout=30000
-
-#Format caption extracted from the XML Schema.
-xforms.formatCaption=true
-
-# ECM content usages/quotas
-system.usages.enabled=false
-system.usages.clearBatchSize=50
-system.usages.updateBatchSize=50
-
-# Repository endpoint - used by Activity Service
-repo.remote.endpoint=/service
-
-# Create home folders as people are created (true) or create them lazily (false)
-home.folder.creation.eager=true
-
-# Should we consider zero byte content to be the same as no content when firing
-# content update policies? Prevents 'premature' firing of inbound content rules
-# for some clients such as Mac OS X Finder
-policy.content.update.ignoreEmpty=false
-
-# The well known RMI registry port and external host name published in the stubs
-# is defined in the alfresco-shared.properties file
-#
-# alfresco.rmi.services.port=50500
-
-# Default value of alfresco.rmi.services.host is 0.0.0.0 which means 'listen on all adapters'.
-# This allows connections to JMX both remotely and locally.
-#
-alfresco.rmi.services.host=0.0.0.0
-
-# If the RMI address is in-use, how many retries should be done before aborting
-# Default value of alfresco.rmi.services.retries is 0 which means 'Don't retry if the address is in-use'
-alfresco.rmi.services.retries=4
-
-# RMI service ports for the individual services.
-# These eight services are available remotely.
-#
-# Assign individual ports for each service for best performance
-# or run several services on the same port, you can even run everything on 50500 if
-# running through a firewall.
-#
-# Specify 0 to use a random unused port.
-#
-avm.rmi.service.port=50501
-avmsync.rmi.service.port=50502
-authentication.rmi.service.port=50504
-repo.rmi.service.port=50505
-action.rmi.service.port=50506
-deployment.rmi.service.port=50507
-monitor.rmi.service.port=50508
-
-
-# Should the Mbean server bind to an existing server. Set to true for most application servers.
-# false for WebSphere clusters.
-mbean.server.locateExistingServerIfPossible=true
-
-# External executable locations
-ooo.exe=soffice
-ooo.user=${dir.root}/oouser
-img.root=./ImageMagick
-img.dyn=${img.root}/lib
-img.exe=${img.root}/bin/convert
-swf.exe=./bin/pdf2swf
-
-# Thumbnail Service
-system.thumbnail.generate=true
-
-# Generate doclib icons
-# When creating a doclib icon, only use the first pageLimit pages (currently only understood by pdfbox
-# TextToPdfContentTransformer)
-system.thumbnail.definition.doclib.timeoutMs=-1
-system.thumbnail.definition.doclib.readLimitTimeMs=-1
-system.thumbnail.definition.doclib.maxSourceSizeKBytes=-1
-system.thumbnail.definition.doclib.readLimitKBytes=-1
-system.thumbnail.definition.doclib.pageLimit=1
-system.thumbnail.definition.doclib.maxPages=-1
-
-# Max mimetype sizes to create thumbnail icons
-system.thumbnail.mimetype.maxSourceSizeKBytes.pdf=-1
-system.thumbnail.mimetype.maxSourceSizeKBytes.txt=-1
-system.thumbnail.mimetype.maxSourceSizeKBytes.docx=-1
-system.thumbnail.mimetype.maxSourceSizeKBytes.xlsx=-1
-system.thumbnail.mimetype.maxSourceSizeKBytes.pptx=-1
-system.thumbnail.mimetype.maxSourceSizeKBytes.odt=-1
-system.thumbnail.mimetype.maxSourceSizeKBytes.ods=-1
-system.thumbnail.mimetype.maxSourceSizeKBytes.odp=-1
-
-# Configuration for handling of failing thumbnails.
-# See NodeEligibleForRethumbnailingEvaluator's javadoc for details.
-#
-# Retry periods limit the frequency with which the repository will attempt to create Share thumbnails
-# for content nodes which have previously failed in their thumbnail attempts.
-# These periods are in seconds.
-#
-# 604800s = 60s * 60m * 24h * 7d = 1 week
-system.thumbnail.retryPeriod=60
-system.thumbnail.retryCount=2
-system.thumbnail.quietPeriod=604800
-system.thumbnail.quietPeriodRetriesEnabled=true
-
-# Content Transformers
-
-# Base setting for all transformers (2 min timeout)
-content.transformer.default.timeoutMs=120000
-content.transformer.default.readLimitTimeMs=-1
-content.transformer.default.maxSourceSizeKBytes=-1
-content.transformer.default.readLimitKBytes=-1
-content.transformer.default.pageLimit=-1
-content.transformer.default.maxPages=-1
-
-# text -> pdf using PdfBox (txt=text/*) 10M takes about 12 seconds
-content.transformer.PdfBox.TextToPdf.maxSourceSizeKBytes=10240
-
-# pdf -> swf using Pdf2swf 1M takes about 30 seconds.
-# Using a value of 1.25M (a bit larger that the txt or xlsx) used to create
-# the pdf on the way to swf to avoid the second part of a transform failing
-content.transformer.Pdf2swf.maxSourceSizeKBytes=1152
-
-# txt -> pdf -> swf 1M (pdf is about the same size as the txt)
-# Need this limit as transformer.PdfBox txt -> pdf is allowed up to 10M
-# unlike transformer.OpenOffice and transformer.JodConverter which
-# also only allow 1M text
-content.transformer.complex.Text.Pdf2swf.maxSourceSizeKBytes=1024
-
-# Limits for specific mimetype transformations (txt=text/plain only) 1M
-# Has implication for Share preview which generally goes via pdf to get to swf
-content.transformer.OpenOffice.mimeTypeLimits.txt.pdf.maxSourceSizeKBytes=1024
-content.transformer.OpenOffice.mimeTypeLimits.xlsx.pdf.maxSourceSizeKBytes=1024
-content.transformer.OpenOffice.mimeTypeLimits.xls.pdf.maxSourceSizeKBytes=1024
-
-content.transformer.JodConverter.mimeTypeLimits.txt.pdf.maxSourceSizeKBytes=1024
-content.transformer.JodConverter.mimeTypeLimits.xlsx.pdf.maxSourceSizeKBytes=1024
-content.transformer.JodConverter.mimeTypeLimits.xls.pdf.maxSourceSizeKBytes=1024
-
-# Property to enable upgrade from 2.1-A
-V2.1-A.fixes.to.schema=0
-#V2.1-A.fixes.to.schema=82
-
-# The default authentication chain
-authentication.chain=alfrescoNtlm1:alfrescoNtlm
-
-# Do authentication tickets expire or live for ever?
-authentication.ticket.ticketsExpire=false
-
-# If ticketsEpire is true then how they should expire?
-# Valid values are: AFTER_INACTIVITY, AFTER_FIXED_TIME, DO_NOT_EXPIRE
-# The default is AFTER_FIXED_TIME
-authentication.ticket.expiryMode=AFTER_FIXED_TIME
-
-# If authentication.ticket.ticketsExpire is true and
-# authentication.ticket.expiryMode is AFTER_FIXED_TIME or AFTER_INACTIVITY,
-# this controls the minimum period for which tickets are valid.
-# The default is PT1H for one hour.
-authentication.ticket.validDuration=PT1H
-
-# Default NFS user mappings (empty). Note these users will be able to
-# authenticate through NFS without password so ensure NFS port is secure before
-# enabling and adding mappings
-nfs.user.mappings=
-nfs.user.mappings.default.uid=0
-nfs.user.mappings.default.gid=0
-
-#Example NFS user mappings
-#nfs.user.mappings=admin,user1
-#nfs.user.mappings.value.admin.uid=0
-#nfs.user.mappings.value.admin.gid=0
-#nfs.user.mappings.value.user1.uid=500
-#nfs.user.mappings.value.user1.gid=500
-
-# Default root path for protocols
-protocols.storeName=${spaces.store}
-protocols.rootPath=/${spaces.company_home.childname}
-
-# OpenCMIS
-opencmis.connector.default.store=${spaces.store}
-opencmis.connector.default.rootPath=/${spaces.company_home.childname}
-opencmis.connector.default.typesDefaultMaxItems=500
-opencmis.connector.default.typesDefaultDepth=-1
-opencmis.connector.default.objectsDefaultMaxItems=10000
-opencmis.connector.default.objectsDefaultDepth=100
-opencmis.connector.default.openHttpSession=false
-
-# IMAP
-imap.server.enabled=false
-imap.server.port=143
-imap.server.attachments.extraction.enabled=true
-
-# Default IMAP mount points
-imap.config.home.store=${spaces.store}
-imap.config.home.rootPath=/${spaces.company_home.childname}
-imap.config.home.folderPath=Imap Home
-imap.config.server.mountPoints=AlfrescoIMAP
-imap.config.server.mountPoints.default.mountPointName=IMAP
-imap.config.server.mountPoints.default.modeName=ARCHIVE
-imap.config.server.mountPoints.default.store=${spaces.store}
-imap.config.server.mountPoints.default.rootPath=${protocols.rootPath}
-imap.config.server.mountPoints.value.AlfrescoIMAP.mountPointName=Alfresco IMAP
-imap.config.server.mountPoints.value.AlfrescoIMAP.modeName=MIXED
-
-# Activities Feed - refer to subsystem
-
-# Feed max size (number of entries)
-activities.feed.max.size=100
-# Feed max age (eg. 44640 mins => 31 days)
-activities.feed.max.ageMins=44640
-
-activities.feedNotifier.batchSize=200
-activities.feedNotifier.numThreads=2
-
-# Subsystem unit test values. Will not have any effect on production servers
-subsystems.test.beanProp.default.longProperty=123456789123456789
-subsystems.test.beanProp.default.anotherStringProperty=Global Default
-subsystems.test.beanProp=inst1,inst2,inst3
-subsystems.test.beanProp.value.inst2.boolProperty=true
-subsystems.test.beanProp.value.inst3.anotherStringProperty=Global Instance Default
-subsystems.test.simpleProp2=true
-subsystems.test.simpleProp3=Global Default3
-
-# Default Async Action Thread Pool
-default.async.action.threadPriority=1
-default.async.action.corePoolSize=8
-default.async.action.maximumPoolSize=20
-
-# Deployment Service
-deployment.service.numberOfSendingThreads=5
-deployment.service.corePoolSize=2
-deployment.service.maximumPoolSize=3
-deployment.service.threadPriority=5
-# How long to wait in mS before refreshing a target lock - detects shutdown servers
-deployment.service.targetLockRefreshTime=60000
-# How long to wait in mS from the last communication before deciding that deployment has failed, possibly
-# the destination is no longer available?
-deployment.service.targetLockTimeout=3600000
-
-#Invitation Service
-# Should send emails as part of invitation process.
-notification.email.siteinvite=true
-
-# Transfer Service
-transferservice.receiver.enabled=true
-transferservice.receiver.stagingDir=${java.io.tmpdir}/alfresco-transfer-staging
-#
-# How long to wait in mS before refreshing a transfer lock - detects shutdown servers
-# Default 1 minute.
-transferservice.receiver.lockRefreshTime=60000
-#
-# How many times to attempt retry the transfer lock
-transferservice.receiver.lockRetryCount=3
-# How long to wait, in mS, before retrying the transfer lock
-transferservice.receiver.lockRetryWait=100
-#
-# How long to wait, in mS, since the last contact with from the client before
-# timing out a transfer. Needs to be long enough to cope with network delays and "thinking
-# time" for both source and destination. Default 5 minutes.
-transferservice.receiver.lockTimeOut=300000
-
-; DM Receiever Properties
-;
-; The name of the DM Receiver target - you deploy to this target name
-deployment.dmr.name=alfresco
-
-; consolidate staging, author and workflow sandboxes to one
-deployment.dmr.consolidate=true
-
-; The name of the Alfresco receiver targer
-deployment.avm.name=avm
-
-;Where should the root of the web project be stored, by default /www/avm_webapps
-deployment.avm.rootPath=/www/avm_webapps
-
-; Pattern for live stores deployment by the alfresco receiver
-deployment.avm.storeNamePattern=%storeName%-live
-
-; Built in deployment receiver properties for the default
-; filesystem receiver
-
-; filesystem receiver configuration
-deployment.filesystem.rootdir=./wcm
-deployment.filesystem.datadir=${deployment.filesystem.rootdir}/depdata
-deployment.filesystem.logdir=${deployment.filesystem.rootdir}/deplog
-deployment.filesystem.metadatadir=${deployment.filesystem.rootdir}/depmetadata
-
-deployment.filesystem.autofix=true
-deployment.filesystem.errorOnOverwrite=false
-
-; default filesystem target configuration
-deployment.filesystem.default.rootdir=./www
-deployment.filesystem.default.name=filesystem
-deployment.filesystem.default.metadatadir=${deployment.filesystem.metadatadir}/default
-
-# OrphanReaper
-orphanReaper.lockRefreshTime=60000
-orphanReaper.lockTimeOut=3600000
-
-#
-# Encryption properties
-#
-# default keystores location
-dir.keystore=classpath:alfresco/keystore
-
-# general encryption parameters
-encryption.keySpec.class=org.alfresco.encryption.DESEDEKeyGenerator
-encryption.keyAlgorithm=DESede
-encryption.cipherAlgorithm=DESede/CBC/PKCS5Padding
-
-# secret key keystore configuration
-encryption.keystore.location=${dir.keystore}/keystore
-encryption.keystore.keyMetaData.location=${dir.keystore}/keystore-passwords.properties
-encryption.keystore.provider=
-encryption.keystore.type=JCEKS
-
-# backup secret key keystore configuration
-encryption.keystore.backup.location=${dir.keystore}/backup-keystore
-encryption.keystore.backup.keyMetaData.location=${dir.keystore}/backup-keystore-passwords.properties
-encryption.keystore.backup.provider=
-encryption.keystore.backup.type=JCEKS
-
-# Should encryptable properties be re-encrypted with new encryption keys on botstrap?
-encryption.bootstrap.reencrypt=false
-
-# mac/md5 encryption
-encryption.mac.messageTimeout=30000
-encryption.mac.algorithm=HmacSHA1
-
-# ssl encryption
-encryption.ssl.keystore.location=${dir.keystore}/ssl.keystore
-encryption.ssl.keystore.provider=
-encryption.ssl.keystore.type=JCEKS
-encryption.ssl.keystore.keyMetaData.location=${dir.keystore}/ssl-keystore-passwords.properties
-encryption.ssl.truststore.location=${dir.keystore}/ssl.truststore
-encryption.ssl.truststore.provider=
-encryption.ssl.truststore.type=JCEKS
-encryption.ssl.truststore.keyMetaData.location=${dir.keystore}/ssl-truststore-passwords.properties
-
-# Re-encryptor properties
-encryption.reencryptor.chunkSize=100
-encryption.reencryptor.numThreads=2
-
-# SOLR connection details (e.g. for JMX)
-solr.host=localhost
-solr.port=8080
-solr.port.ssl=8443
-solr.solrUser=solr
-solr.solrPassword=solr
-# none, https
-solr.secureComms=https
-
-
-solr.max.total.connections=40
-solr.max.host.connections=40
-
-# Solr connection timeouts
-# solr connect timeout in ms
-solr.solrConnectTimeout=5000
-
-# cron expression defining how often the Solr Admin client (used by JMX) pings Solr if it goes away
-solr.solrPingCronExpression=0 0/5 * * * ? *
-
-#
-# Web Publishing Properties
-#
-publishing.root.path=/app:company_home/app:dictionary
-publishing.root=${publishing.root.path}/${spaces.publishing.root.childname}
-
-#
-# URL Shortening Properties
-#
-urlshortening.bitly.username=brianalfresco
-urlshortening.bitly.api.key=R_ca15c6c89e9b25ccd170bafd209a0d4f
-urlshortening.bitly.url.length=20
-
-#
-# Bulk Filesystem Importer
-#
-
-# The number of threads to employ in a batch import
-bulkImport.batch.numThreads=4
-
-# The size of a batch in a batch import i.e. the number of files to import in a
-# transaction/thread
-bulkImport.batch.batchSize=20
-
-
-#
-# Caching Content Store
-#
-system.content.caching.cacheOnInbound=true
-system.content.caching.maxDeleteWatchCount=1
-# Clean up every day at 3 am
-system.content.caching.contentCleanup.cronExpression=0 0 3 * * ?
-system.content.caching.timeToLiveSeconds=0
-system.content.caching.timeToIdleSeconds=86400
-system.content.caching.maxElementsInMemory=5000
-system.content.caching.maxElementsOnDisk=10000
-system.content.caching.minFileAgeMillis=60000
-system.content.caching.maxUsageMB=4096
-# maxFileSizeMB - 0 means no max file size.
-system.content.caching.maxFileSizeMB=0
-
-mybatis.useLocalCaches=false
-
+# Repository configuration
+
+repository.name=Main Repository
+
+# Directory configuration
+
+dir.root=./alf_data
+
+dir.contentstore=${dir.root}/contentstore
+dir.contentstore.deleted=${dir.root}/contentstore.deleted
+
+# The location of cached content
+dir.cachedcontent=${dir.root}/cachedcontent
+
+dir.auditcontentstore=${dir.root}/audit.contentstore
+
+# The location for lucene index files
+dir.indexes=${dir.root}/lucene-indexes
+
+# The location for index backups
+dir.indexes.backup=${dir.root}/backup-lucene-indexes
+
+# The location for lucene index locks
+dir.indexes.lock=${dir.indexes}/locks
+
+#Directory to find external license
+dir.license.external=.
+# Spring resource location of external license files
+location.license.external=file://${dir.license.external}/*.lic
+# Spring resource location of embedded license files
+location.license.embedded=/WEB-INF/alfresco/license/*.lic
+# Spring resource location of license files on shared classpath
+location.license.shared=classpath*:/alfresco/extension/license/*.lic
+
+# WebDAV initialization properties
+system.webdav.servlet.enabled=true
+system.webdav.storeName=${protocols.storeName}
+system.webdav.rootPath=${protocols.rootPath}
+
+# Is the JBPM Deploy Process Servlet enabled?
+# Default is false. Should not be enabled in production environments as the
+# servlet allows unauthenticated deployment of new workflows.
+system.workflow.deployservlet.enabled=false
+
+# Sets the location for the JBPM Configuration File
+system.workflow.jbpm.config.location=classpath:org/alfresco/repo/workflow/jbpm/jbpm.cfg.xml
+
+# Determines if JBPM workflow definitions are shown.
+# Default is false. This controls the visibility of JBPM
+# workflow definitions from the getDefinitions and
+# getAllDefinitions WorkflowService API but still allows
+# any in-flight JBPM workflows to be completed.
+system.workflow.engine.jbpm.definitions.visible=false
+
+#Determines if Activiti definitions are visible
+system.workflow.engine.activiti.definitions.visible=true
+
+# Determines if the JBPM engine is enabled
+system.workflow.engine.jbpm.enabled=true
+
+# Determines if the Activiti engine is enabled
+system.workflow.engine.activiti.enabled=true
+
+index.subsystem.name=lucene
+
+# ######################################### #
+# Index Recovery and Tracking Configuration #
+# ######################################### #
+#
+# Recovery types are:
+# NONE: Ignore
+# VALIDATE: Checks that the first and last transaction for each store is represented in the indexes
+# AUTO: Validates and auto-recovers if validation fails
+# FULL: Full index rebuild, processing all transactions in order. The server is temporarily suspended.
+index.recovery.mode=VALIDATE
+# FULL recovery continues when encountering errors
+index.recovery.stopOnError=false
+index.recovery.maximumPoolSize=5
+# Set the frequency with which the index tracking is triggered.
+# For more information on index tracking in a cluster:
+# http://wiki.alfresco.com/wiki/High_Availability_Configuration_V1.4_to_V2.1#Version_1.4.5.2C_2.1.1_and_later
+# By default, this is effectively never, but can be modified as required.
+# Examples:
+# Never: * * * * * ? 2099
+# Once every five seconds: 0/5 * * * * ?
+# Once every two seconds : 0/2 * * * * ?
+# See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html
+index.tracking.cronExpression=0/5 * * * * ?
+index.tracking.adm.cronExpression=${index.tracking.cronExpression}
+index.tracking.avm.cronExpression=${index.tracking.cronExpression}
+# Other properties.
+index.tracking.maxTxnDurationMinutes=10
+index.tracking.reindexLagMs=1000
+index.tracking.maxRecordSetSize=1000
+index.tracking.maxTransactionsPerLuceneCommit=100
+index.tracking.disableInTransactionIndexing=false
+# Index tracking information of a certain age is cleaned out by a scheduled job.
+# Any clustered system that has been offline for longer than this period will need to be seeded
+# with a more recent backup of the Lucene indexes or the indexes will have to be fully rebuilt.
+# Use -1 to disable purging. This can be switched on at any stage.
+index.tracking.minRecordPurgeAgeDays=30
+
+# Reindexing of missing content is by default 'never' carried out.
+# The cron expression below can be changed to control the timing of this reindexing.
+# Users of Enterprise Alfresco can configure this cron expression via JMX without a server restart.
+# Note that if alfresco.cluster.name is not set, then reindexing will not occur.
+index.reindexMissingContent.cronExpression=* * * * * ? 2099
+
+# Change the failure behaviour of the configuration checker
+system.bootstrap.config_check.strict=true
+
+# The name of the cluster
+# Leave this empty to disable cluster entry
+alfresco.cluster.name=
+
+# Time to wait for a cluster node ping before marking the node as not alive (ms)
+alfresco.clusterCheck.timeout=4000
+
+# Hazelcast clustering configuration
+# Password to join the cluster
+alfresco.hazelcast.password=alfrescocluster
+# Protocol used for member discovery (tcp, ec2, udp)
+alfresco.hazelcast.protocol=tcp
+# Location of the Hazelcast configuration file
+alfresco.hazelcast.configLocation=classpath:alfresco/hazelcast/hazelcast-${alfresco.hazelcast.protocol}.xml
+# XML elements to incorporate into Hazelcast config, in particular
+# hostnames to use for membership discovery
+alfresco.hazelcast.tcp.config=localhost
+# Amazon Web Services - EC2 discovery
+alfresco.hazelcast.ec2.accesskey=my-access-key
+alfresco.hazelcast.ec2.secretkey=my-secret-key
+alfresco.hazelcast.ec2.region=us-east-1
+# Only instances belonging to this group will be discovered, default will try all running instances
+alfresco.hazelcast.ec2.securitygroup=
+alfresco.hazelcast.ec2.tagkey=type
+alfresco.hazelcast.ec2.tagvalue=hz-nodes
+
+# The EHCache RMI peer URL addresses to set in the ehcache-custom.xml file
+# Use this property to set the hostname of the current server.
+# This is only necessary if the cache peer URLs are generated with an invalid IP address for the local server.
+alfresco.ehcache.rmi.hostname=
+# Use this property to set the cache peer URL port.
+alfresco.ehcache.rmi.remoteObjectPort=0
+alfresco.ehcache.rmi.port=0
+alfresco.ehcache.rmi.socketTimeoutMillis=5000
+
+# The protocol stack to use from the JGroups configuration file
+# Use this property to select which communication method should be used.
+# The JGroups configuration file is build up using the protocol string
+alfresco.jgroups.defaultProtocol=UDP
+# The bind address and interface for JGroups to use; equivalent to -Djgroups.bind_addr and -Djgroups.bind_interface
+alfresco.jgroups.bind_address=
+alfresco.jgroups.bind_interface=
+# JGroups configuration (http://www.jgroups.org)
+# The location of the JGroups configuration file
+alfresco.jgroups.configLocation=classpath:alfresco/jgroups/alfresco-jgroups-${alfresco.jgroups.defaultProtocol}.xml
+
+#
+# How long should shutdown wait to complete normally before
+# taking stronger action and calling System.exit()
+# in ms, 10,000 is 10 seconds
+#
+shutdown.backstop.timeout=10000
+shutdown.backstop.enabled=false
+
+# Server Single User Mode
+# note:
+# only allow named user (note: if blank or not set then will allow all users)
+# assuming maxusers is not set to 0
+#server.singleuseronly.name=admin
+
+# Server Max Users - limit number of users with non-expired tickets
+# note:
+# -1 allows any number of users, assuming not in single-user mode
+# 0 prevents further logins, including the ability to enter single-user mode
+server.maxusers=-1
+
+# The Cron expression controlling the frequency with which the OpenOffice connection is tested
+openOffice.test.cronExpression=0 * * * * ?
+
+#
+# Disable all shared caches (mutable and immutable)
+# These properties are used for diagnostic purposes
+system.cache.disableMutableSharedCaches=false
+system.cache.disableImmutableSharedCaches=false
+
+#
+# Properties to limit resources spent on individual searches
+#
+# The maximum time spent pruning results
+system.acl.maxPermissionCheckTimeMillis=10000
+# The maximum number of search results to perform permission checks against
+system.acl.maxPermissionChecks=1000
+
+# The maximum number of filefolder list results
+system.filefolderservice.defaultListMaxResults=5000
+
+
+# Properties to control read permission evaluation for acegi
+system.readpermissions.optimise=true
+system.readpermissions.bulkfetchsize=1000
+
+#
+# Manually control how the system handles maximum string lengths.
+# Any zero or negative value is ignored.
+# Only change this after consulting support or reading the appropriate Javadocs for
+# org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2
+system.maximumStringLength=-1
+
+#
+# Limit hibernate session size by trying to amalgamate events for the L2 session invalidation
+# - hibernate works as is up to this size
+# - after the limit is hit events that can be grouped invalidate the L2 cache by type and not instance
+# events may not group if there are post action listener registered (this is not the case with the default distribution)
+system.hibernateMaxExecutions=20000
+
+#
+# Determine if modification timestamp propagation from child to parent nodes is respected or not.
+# Even if 'true', the functionality is only supported for child associations that declare the
+# 'propagateTimestamps' element in the dictionary definition.
+system.enableTimestampPropagation=true
+
+#
+# Decide if content should be removed from the system immediately after being orphaned.
+# Do not change this unless you have examined the impact it has on your backup procedures.
+system.content.eagerOrphanCleanup=false
+# The number of days to keep orphaned content in the content stores.
+# This has no effect on the 'deleted' content stores, which are not automatically emptied.
+system.content.orphanProtectDays=14
+# The action to take when a store or stores fails to delete orphaned content
+# IGNORE: Just log a warning. The binary remains and the record is expunged
+# KEEP_URL: Log a warning and create a URL entry with orphan time 0. It won't be processed or removed.
+system.content.deletionFailureAction=IGNORE
+# The CRON expression to trigger the deletion of resources associated with orphaned content.
+system.content.orphanCleanup.cronExpression=0 0 4 * * ?
+# The CRON expression to trigger content URL conversion. This process is not intesive and can
+# be triggered on a live system. Similarly, it can be triggered using JMX on a dedicated machine.
+system.content.contentUrlConverter.cronExpression=* * * * * ? 2099
+system.content.contentUrlConverter.threadCount=2
+system.content.contentUrlConverter.batchSize=500
+system.content.contentUrlConverter.runAsScheduledJob=false
+
+# #################### #
+# Lucene configuration #
+# #################### #
+#
+# Millisecond threshold for text transformations
+# Slower transformers will force the text extraction to be asynchronous
+#
+lucene.maxAtomicTransformationTime=20
+#
+# The maximum number of clauses that are allowed in a lucene query
+#
+lucene.query.maxClauses=10000
+#
+# The size of the queue of nodes waiting for index
+# Events are generated as nodes are changed, this is the maximum size of the queue used to coalesce event
+# When this size is reached the lists of nodes will be indexed
+#
+# http://issues.alfresco.com/browse/AR-1280: Setting this high is the workaround as of 1.4.3.
+#
+lucene.indexer.batchSize=1000000
+fts.indexer.batchSize=1000
+#
+# Index cache sizes
+#
+lucene.indexer.cacheEnabled=true
+lucene.indexer.maxDocIdCacheSize=100000
+lucene.indexer.maxDocumentCacheSize=100
+lucene.indexer.maxIsCategoryCacheSize=-1
+lucene.indexer.maxLinkAspectCacheSize=10000
+lucene.indexer.maxParentCacheSize=100000
+lucene.indexer.maxPathCacheSize=100000
+lucene.indexer.maxTypeCacheSize=10000
+#
+# Properties for merge (not this does not affect the final index segment which will be optimised)
+# Max merge docs only applies to the merge process not the resulting index which will be optimised.
+#
+lucene.indexer.mergerMaxMergeDocs=1000000
+lucene.indexer.mergerMergeFactor=5
+lucene.indexer.mergerMaxBufferedDocs=-1
+lucene.indexer.mergerRamBufferSizeMb=16
+#
+# Properties for delta indexes (not this does not affect the final index segment which will be optimised)
+# Max merge docs only applies to the index building process not the resulting index which will be optimised.
+#
+lucene.indexer.writerMaxMergeDocs=1000000
+lucene.indexer.writerMergeFactor=5
+lucene.indexer.writerMaxBufferedDocs=-1
+lucene.indexer.writerRamBufferSizeMb=16
+#
+# Target number of indexes and deltas in the overall index and what index size to merge in memory
+#
+lucene.indexer.mergerTargetIndexCount=8
+lucene.indexer.mergerTargetOverlayCount=5
+lucene.indexer.mergerTargetOverlaysBlockingFactor=2
+lucene.indexer.maxDocsForInMemoryMerge=60000
+lucene.indexer.maxRamInMbForInMemoryMerge=16
+lucene.indexer.maxDocsForInMemoryIndex=60000
+lucene.indexer.maxRamInMbForInMemoryIndex=16
+#
+# Other lucene properties
+#
+lucene.indexer.termIndexInterval=128
+lucene.indexer.useNioMemoryMapping=true
+# over-ride to false for pre 3.0 behaviour
+lucene.indexer.postSortDateTime=true
+lucene.indexer.defaultMLIndexAnalysisMode=EXACT_LANGUAGE_AND_ALL
+lucene.indexer.defaultMLSearchAnalysisMode=EXACT_LANGUAGE_AND_ALL
+#
+# The number of terms from a document that will be indexed
+#
+lucene.indexer.maxFieldLength=10000
+
+# Should we use a 'fair' locking policy, giving queue-like access behaviour to
+# the indexes and avoiding starvation of waiting writers? Set to false on old
+# JVMs where this appears to cause deadlock
+lucene.indexer.fairLocking=true
+
+#
+# Index locks (mostly deprecated and will be tidied up with the next lucene upgrade)
+#
+lucene.write.lock.timeout=10000
+lucene.commit.lock.timeout=100000
+lucene.lock.poll.interval=100
+
+lucene.indexer.useInMemorySort=true
+lucene.indexer.maxRawResultSetSizeForInMemorySort=1000
+lucene.indexer.contentIndexingEnabled=true
+
+index.backup.cronExpression=0 0 3 * * ?
+
+lucene.defaultAnalyserResourceBundleName=alfresco/model/dataTypeAnalyzers
+
+
+
+# When transforming archive files (.zip etc) into text representations (such as
+# for full text indexing), should the files within the archive be processed too?
+# If enabled, transformation takes longer, but searches of the files find more.
+transformer.Archive.includeContents=false
+
+# Database configuration
+db.schema.stopAfterSchemaBootstrap=false
+db.schema.update=true
+db.schema.update.lockRetryCount=24
+db.schema.update.lockRetryWaitSeconds=5
+db.driver=org.gjt.mm.mysql.Driver
+db.name=alfresco
+db.url=jdbc:mysql:///${db.name}
+db.username=alfresco
+db.password=alfresco
+db.pool.initial=10
+db.pool.max=40
+db.txn.isolation=-1
+db.pool.statements.enable=true
+db.pool.statements.max=40
+db.pool.min=0
+db.pool.idle=-1
+db.pool.wait.max=-1
+db.pool.validate.query=
+db.pool.evict.interval=-1
+db.pool.evict.idle.min=1800000
+db.pool.validate.borrow=true
+db.pool.validate.return=false
+db.pool.evict.validate=false
+#
+db.pool.abandoned.detect=false
+db.pool.abandoned.time=300
+#
+# db.pool.abandoned.log=true (logAbandoned) adds overhead (http://commons.apache.org/dbcp/configuration.html)
+# and also requires db.pool.abandoned.detect=true (removeAbandoned)
+#
+db.pool.abandoned.log=false
+
+
+# Audit configuration
+audit.enabled=true
+audit.tagging.enabled=true
+audit.alfresco-access.enabled=false
+audit.alfresco-access.sub-events.enabled=false
+audit.cmischangelog.enabled=false
+audit.dod5015.enabled=false
+# Setting this flag to true will force startup failure when invalid audit configurations are detected
+audit.config.strict=false
+# Audit map filter for AccessAuditor - restricts recorded events to user driven events
+audit.filter.alfresco-access.default.enabled=true
+audit.filter.alfresco-access.transaction.user=~System;~null;.*
+audit.filter.alfresco-access.transaction.type=cm:folder;cm:content;st:site
+audit.filter.alfresco-access.transaction.path=~/sys:archivedItem;~/ver:;.*
+
+
+# System Configuration
+system.store=system://system
+system.descriptor.childname=sys:descriptor
+system.descriptor.current.childname=sys:descriptor-current
+
+# User config
+alfresco_user_store.store=user://alfrescoUserStore
+alfresco_user_store.system_container.childname=sys:system
+alfresco_user_store.user_container.childname=sys:people
+
+# note: default admin username - should not be changed after installation
+alfresco_user_store.adminusername=admin
+
+# Initial password - editing this will not have any effect once the repository is installed
+alfresco_user_store.adminpassword=209c6174da490caeb422f3fa5a7ae634
+
+# note: default guest username - should not be changed after installation
+alfresco_user_store.guestusername=guest
+
+# Used to move home folders to a new location
+home_folder_provider_synchronizer.enabled=false
+home_folder_provider_synchronizer.override_provider=
+home_folder_provider_synchronizer.keep_empty_parents=false
+
+# Spaces Archive Configuration
+spaces.archive.store=archive://SpacesStore
+
+# Spaces Configuration
+spaces.store=workspace://SpacesStore
+spaces.company_home.childname=app:company_home
+spaces.guest_home.childname=app:guest_home
+spaces.dictionary.childname=app:dictionary
+spaces.templates.childname=app:space_templates
+spaces.imapConfig.childname=app:imap_configs
+spaces.imap_templates.childname=app:imap_templates
+spaces.scheduled_actions.childname=cm:Scheduled Actions
+spaces.emailActions.childname=app:email_actions
+spaces.searchAction.childname=cm:search
+spaces.templates.content.childname=app:content_templates
+spaces.templates.email.childname=app:email_templates
+spaces.templates.email.invite1.childname=app:invite_email_templates
+spaces.templates.email.notify.childname=app:notify_email_templates
+spaces.templates.email.following.childname=app:following
+spaces.templates.rss.childname=app:rss_templates
+spaces.savedsearches.childname=app:saved_searches
+spaces.scripts.childname=app:scripts
+spaces.wcm.childname=app:wcm
+spaces.wcm_content_forms.childname=app:wcm_forms
+spaces.content_forms.childname=app:forms
+spaces.user_homes.childname=app:user_homes
+spaces.user_homes.regex.key=userName
+spaces.user_homes.regex.pattern=
+spaces.user_homes.regex.group_order=
+spaces.sites.childname=st:sites
+spaces.templates.email.invite.childname=cm:invite
+spaces.templates.email.activities.childname=cm:activities
+spaces.rendition.rendering_actions.childname=app:rendering_actions
+spaces.replication.replication_actions.childname=app:replication_actions
+spaces.wcm_deployed.childname=cm:wcm_deployed
+spaces.transfers.childname=app:transfers
+spaces.transfer_groups.childname=app:transfer_groups
+spaces.transfer_temp.childname=app:temp
+spaces.inbound_transfer_records.childname=app:inbound_transfer_records
+spaces.webscripts.childname=cm:webscripts
+spaces.extension_webscripts.childname=cm:extensionwebscripts
+spaces.models.childname=app:models
+spaces.workflow.definitions.childname=app:workflow_defs
+spaces.publishing.root.childname=app:publishing_root
+spaces.templates.email.workflowemailnotification.childname=cm:workflownotification
+spaces.nodetemplates.childname=app:node_templates
+
+# ADM VersionStore Configuration
+version.store.enableAutoVersioning=true
+version.store.deprecated.lightWeightVersionStore=workspace://lightWeightVersionStore
+version.store.version2Store=workspace://version2Store
+
+version.store.migrateVersionStore.threadCount=3
+version.store.migrateVersionStore.batchSize=1
+
+version.store.migrateCleanupJob.threadCount=3
+version.store.migrateCleanupJob.batchSize=1
+
+
+# WARNING: For non-production testing only !!! Do not change (to avoid version store issues, including possible mismatch). Should be false since lightWeightVersionStore is deprecated.
+version.store.onlyUseDeprecatedV1=false
+
+# The CRON expression to trigger migration of the version store from V1 (2.x) to V2 (3.x)
+# By default, this is effectively 'never' but can be modified as required.
+# Examples:
+# Never: * * * * * ? 2099
+# Once every thirty minutes: 0 0/30 * * * ?
+# See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html
+version.store.migrateVersionStore.cronExpression=* * * * * ? 2099
+# Limit number of version histories to migrate per job cycle, where -1 = unlimited. Note: if limit > 0 then need to schedule job to run regularly in order to complete the migration.
+version.store.migrateVersionStore.limitPerJobCycle=-1
+version.store.migrateVersionStore.runAsScheduledJob=false
+
+# Folders for storing people
+system.system_container.childname=sys:system
+system.people_container.childname=sys:people
+system.authorities_container.childname=sys:authorities
+system.zones_container.childname=sys:zones
+
+# Folders for storing workflow related info
+system.workflow_container.childname=sys:workflow
+
+# Are user names case sensitive?
+user.name.caseSensitive=false
+domain.name.caseSensitive=false
+domain.separator=
+
+# AVM Specific properties.
+avm.remote.idlestream.timeout=30000
+
+#Format caption extracted from the XML Schema.
+xforms.formatCaption=true
+
+# ECM content usages/quotas
+system.usages.enabled=false
+system.usages.clearBatchSize=50
+system.usages.updateBatchSize=50
+
+# Repository endpoint - used by Activity Service
+repo.remote.endpoint=/service
+
+# Create home folders as people are created (true) or create them lazily (false)
+home.folder.creation.eager=true
+
+# Should we consider zero byte content to be the same as no content when firing
+# content update policies? Prevents 'premature' firing of inbound content rules
+# for some clients such as Mac OS X Finder
+policy.content.update.ignoreEmpty=false
+
+# The well known RMI registry port and external host name published in the stubs
+# is defined in the alfresco-shared.properties file
+#
+# alfresco.rmi.services.port=50500
+
+# Default value of alfresco.rmi.services.host is 0.0.0.0 which means 'listen on all adapters'.
+# This allows connections to JMX both remotely and locally.
+#
+alfresco.rmi.services.host=0.0.0.0
+
+# If the RMI address is in-use, how many retries should be done before aborting
+# Default value of alfresco.rmi.services.retries is 0 which means 'Don't retry if the address is in-use'
+alfresco.rmi.services.retries=4
+
+# RMI service ports for the individual services.
+# These eight services are available remotely.
+#
+# Assign individual ports for each service for best performance
+# or run several services on the same port, you can even run everything on 50500 if
+# running through a firewall.
+#
+# Specify 0 to use a random unused port.
+#
+avm.rmi.service.port=50501
+avmsync.rmi.service.port=50502
+authentication.rmi.service.port=50504
+repo.rmi.service.port=50505
+action.rmi.service.port=50506
+deployment.rmi.service.port=50507
+monitor.rmi.service.port=50508
+
+
+# Should the Mbean server bind to an existing server. Set to true for most application servers.
+# false for WebSphere clusters.
+mbean.server.locateExistingServerIfPossible=true
+
+# External executable locations
+ooo.exe=soffice
+ooo.user=${dir.root}/oouser
+img.root=./ImageMagick
+img.dyn=${img.root}/lib
+img.exe=${img.root}/bin/convert
+swf.exe=./bin/pdf2swf
+
+# Thumbnail Service
+system.thumbnail.generate=true
+
+# Generate doclib icons
+# When creating a doclib icon, only use the first pageLimit pages (currently only understood by pdfbox
+# TextToPdfContentTransformer)
+system.thumbnail.definition.doclib.timeoutMs=-1
+system.thumbnail.definition.doclib.readLimitTimeMs=-1
+system.thumbnail.definition.doclib.maxSourceSizeKBytes=-1
+system.thumbnail.definition.doclib.readLimitKBytes=-1
+system.thumbnail.definition.doclib.pageLimit=1
+system.thumbnail.definition.doclib.maxPages=-1
+
+# Max mimetype sizes to create thumbnail icons
+system.thumbnail.mimetype.maxSourceSizeKBytes.pdf=-1
+system.thumbnail.mimetype.maxSourceSizeKBytes.txt=-1
+system.thumbnail.mimetype.maxSourceSizeKBytes.docx=-1
+system.thumbnail.mimetype.maxSourceSizeKBytes.xlsx=-1
+system.thumbnail.mimetype.maxSourceSizeKBytes.pptx=-1
+system.thumbnail.mimetype.maxSourceSizeKBytes.odt=-1
+system.thumbnail.mimetype.maxSourceSizeKBytes.ods=-1
+system.thumbnail.mimetype.maxSourceSizeKBytes.odp=-1
+
+# Configuration for handling of failing thumbnails.
+# See NodeEligibleForRethumbnailingEvaluator's javadoc for details.
+#
+# Retry periods limit the frequency with which the repository will attempt to create Share thumbnails
+# for content nodes which have previously failed in their thumbnail attempts.
+# These periods are in seconds.
+#
+# 604800s = 60s * 60m * 24h * 7d = 1 week
+system.thumbnail.retryPeriod=60
+system.thumbnail.retryCount=2
+system.thumbnail.quietPeriod=604800
+system.thumbnail.quietPeriodRetriesEnabled=true
+
+# Content Transformers
+
+# Base setting for all transformers (2 min timeout)
+content.transformer.default.timeoutMs=120000
+content.transformer.default.readLimitTimeMs=-1
+content.transformer.default.maxSourceSizeKBytes=-1
+content.transformer.default.readLimitKBytes=-1
+content.transformer.default.pageLimit=-1
+content.transformer.default.maxPages=-1
+
+# text -> pdf using PdfBox (txt=text/*) 10M takes about 12 seconds
+content.transformer.PdfBox.TextToPdf.maxSourceSizeKBytes=10240
+
+# pdf -> swf using Pdf2swf 1M takes about 30 seconds.
+# Using a value of 1.25M (a bit larger that the txt or xlsx) used to create
+# the pdf on the way to swf to avoid the second part of a transform failing
+content.transformer.Pdf2swf.maxSourceSizeKBytes=1152
+
+# txt -> pdf -> swf 1M (pdf is about the same size as the txt)
+# Need this limit as transformer.PdfBox txt -> pdf is allowed up to 10M
+# unlike transformer.OpenOffice and transformer.JodConverter which
+# also only allow 1M text
+content.transformer.complex.Text.Pdf2swf.maxSourceSizeKBytes=1024
+
+# Limits for specific mimetype transformations (txt=text/plain only) 1M
+# Has implication for Share preview which generally goes via pdf to get to swf
+content.transformer.OpenOffice.mimeTypeLimits.txt.pdf.maxSourceSizeKBytes=1024
+content.transformer.OpenOffice.mimeTypeLimits.xlsx.pdf.maxSourceSizeKBytes=1024
+content.transformer.OpenOffice.mimeTypeLimits.xls.pdf.maxSourceSizeKBytes=1024
+
+content.transformer.JodConverter.mimeTypeLimits.txt.pdf.maxSourceSizeKBytes=1024
+content.transformer.JodConverter.mimeTypeLimits.xlsx.pdf.maxSourceSizeKBytes=1024
+content.transformer.JodConverter.mimeTypeLimits.xls.pdf.maxSourceSizeKBytes=1024
+
+# Property to enable upgrade from 2.1-A
+V2.1-A.fixes.to.schema=0
+#V2.1-A.fixes.to.schema=82
+
+# The default authentication chain
+authentication.chain=alfrescoNtlm1:alfrescoNtlm
+
+# Do authentication tickets expire or live for ever?
+authentication.ticket.ticketsExpire=false
+
+# If ticketsEpire is true then how they should expire?
+# Valid values are: AFTER_INACTIVITY, AFTER_FIXED_TIME, DO_NOT_EXPIRE
+# The default is AFTER_FIXED_TIME
+authentication.ticket.expiryMode=AFTER_FIXED_TIME
+
+# If authentication.ticket.ticketsExpire is true and
+# authentication.ticket.expiryMode is AFTER_FIXED_TIME or AFTER_INACTIVITY,
+# this controls the minimum period for which tickets are valid.
+# The default is PT1H for one hour.
+authentication.ticket.validDuration=PT1H
+
+# Default NFS user mappings (empty). Note these users will be able to
+# authenticate through NFS without password so ensure NFS port is secure before
+# enabling and adding mappings
+nfs.user.mappings=
+nfs.user.mappings.default.uid=0
+nfs.user.mappings.default.gid=0
+
+#Example NFS user mappings
+#nfs.user.mappings=admin,user1
+#nfs.user.mappings.value.admin.uid=0
+#nfs.user.mappings.value.admin.gid=0
+#nfs.user.mappings.value.user1.uid=500
+#nfs.user.mappings.value.user1.gid=500
+
+# Default root path for protocols
+protocols.storeName=${spaces.store}
+protocols.rootPath=/${spaces.company_home.childname}
+
+# OpenCMIS
+opencmis.connector.default.store=${spaces.store}
+opencmis.connector.default.rootPath=/${spaces.company_home.childname}
+opencmis.connector.default.typesDefaultMaxItems=500
+opencmis.connector.default.typesDefaultDepth=-1
+opencmis.connector.default.objectsDefaultMaxItems=10000
+opencmis.connector.default.objectsDefaultDepth=100
+opencmis.connector.default.openHttpSession=false
+
+# IMAP
+imap.server.enabled=false
+imap.server.port=143
+imap.server.attachments.extraction.enabled=true
+
+# Default IMAP mount points
+imap.config.home.store=${spaces.store}
+imap.config.home.rootPath=/${spaces.company_home.childname}
+imap.config.home.folderPath=Imap Home
+imap.config.server.mountPoints=AlfrescoIMAP
+imap.config.server.mountPoints.default.mountPointName=IMAP
+imap.config.server.mountPoints.default.modeName=ARCHIVE
+imap.config.server.mountPoints.default.store=${spaces.store}
+imap.config.server.mountPoints.default.rootPath=${protocols.rootPath}
+imap.config.server.mountPoints.value.AlfrescoIMAP.mountPointName=Alfresco IMAP
+imap.config.server.mountPoints.value.AlfrescoIMAP.modeName=MIXED
+
+# Activities Feed - refer to subsystem
+
+# Feed max size (number of entries)
+activities.feed.max.size=100
+# Feed max age (eg. 44640 mins => 31 days)
+activities.feed.max.ageMins=44640
+
+activities.feedNotifier.batchSize=200
+activities.feedNotifier.numThreads=2
+
+# Subsystem unit test values. Will not have any effect on production servers
+subsystems.test.beanProp.default.longProperty=123456789123456789
+subsystems.test.beanProp.default.anotherStringProperty=Global Default
+subsystems.test.beanProp=inst1,inst2,inst3
+subsystems.test.beanProp.value.inst2.boolProperty=true
+subsystems.test.beanProp.value.inst3.anotherStringProperty=Global Instance Default
+subsystems.test.simpleProp2=true
+subsystems.test.simpleProp3=Global Default3
+
+# Default Async Action Thread Pool
+default.async.action.threadPriority=1
+default.async.action.corePoolSize=8
+default.async.action.maximumPoolSize=20
+
+# Deployment Service
+deployment.service.numberOfSendingThreads=5
+deployment.service.corePoolSize=2
+deployment.service.maximumPoolSize=3
+deployment.service.threadPriority=5
+# How long to wait in mS before refreshing a target lock - detects shutdown servers
+deployment.service.targetLockRefreshTime=60000
+# How long to wait in mS from the last communication before deciding that deployment has failed, possibly
+# the destination is no longer available?
+deployment.service.targetLockTimeout=3600000
+
+#Invitation Service
+# Should send emails as part of invitation process.
+notification.email.siteinvite=true
+
+# Transfer Service
+transferservice.receiver.enabled=true
+transferservice.receiver.stagingDir=${java.io.tmpdir}/alfresco-transfer-staging
+#
+# How long to wait in mS before refreshing a transfer lock - detects shutdown servers
+# Default 1 minute.
+transferservice.receiver.lockRefreshTime=60000
+#
+# How many times to attempt retry the transfer lock
+transferservice.receiver.lockRetryCount=3
+# How long to wait, in mS, before retrying the transfer lock
+transferservice.receiver.lockRetryWait=100
+#
+# How long to wait, in mS, since the last contact with from the client before
+# timing out a transfer. Needs to be long enough to cope with network delays and "thinking
+# time" for both source and destination. Default 5 minutes.
+transferservice.receiver.lockTimeOut=300000
+
+; DM Receiever Properties
+;
+; The name of the DM Receiver target - you deploy to this target name
+deployment.dmr.name=alfresco
+
+; consolidate staging, author and workflow sandboxes to one
+deployment.dmr.consolidate=true
+
+; The name of the Alfresco receiver targer
+deployment.avm.name=avm
+
+;Where should the root of the web project be stored, by default /www/avm_webapps
+deployment.avm.rootPath=/www/avm_webapps
+
+; Pattern for live stores deployment by the alfresco receiver
+deployment.avm.storeNamePattern=%storeName%-live
+
+; Built in deployment receiver properties for the default
+; filesystem receiver
+
+; filesystem receiver configuration
+deployment.filesystem.rootdir=./wcm
+deployment.filesystem.datadir=${deployment.filesystem.rootdir}/depdata
+deployment.filesystem.logdir=${deployment.filesystem.rootdir}/deplog
+deployment.filesystem.metadatadir=${deployment.filesystem.rootdir}/depmetadata
+
+deployment.filesystem.autofix=true
+deployment.filesystem.errorOnOverwrite=false
+
+; default filesystem target configuration
+deployment.filesystem.default.rootdir=./www
+deployment.filesystem.default.name=filesystem
+deployment.filesystem.default.metadatadir=${deployment.filesystem.metadatadir}/default
+
+# OrphanReaper
+orphanReaper.lockRefreshTime=60000
+orphanReaper.lockTimeOut=3600000
+
+#
+# Encryption properties
+#
+# default keystores location
+dir.keystore=classpath:alfresco/keystore
+
+# general encryption parameters
+encryption.keySpec.class=org.alfresco.encryption.DESEDEKeyGenerator
+encryption.keyAlgorithm=DESede
+encryption.cipherAlgorithm=DESede/CBC/PKCS5Padding
+
+# secret key keystore configuration
+encryption.keystore.location=${dir.keystore}/keystore
+encryption.keystore.keyMetaData.location=${dir.keystore}/keystore-passwords.properties
+encryption.keystore.provider=
+encryption.keystore.type=JCEKS
+
+# backup secret key keystore configuration
+encryption.keystore.backup.location=${dir.keystore}/backup-keystore
+encryption.keystore.backup.keyMetaData.location=${dir.keystore}/backup-keystore-passwords.properties
+encryption.keystore.backup.provider=
+encryption.keystore.backup.type=JCEKS
+
+# Should encryptable properties be re-encrypted with new encryption keys on botstrap?
+encryption.bootstrap.reencrypt=false
+
+# mac/md5 encryption
+encryption.mac.messageTimeout=30000
+encryption.mac.algorithm=HmacSHA1
+
+# ssl encryption
+encryption.ssl.keystore.location=${dir.keystore}/ssl.keystore
+encryption.ssl.keystore.provider=
+encryption.ssl.keystore.type=JCEKS
+encryption.ssl.keystore.keyMetaData.location=${dir.keystore}/ssl-keystore-passwords.properties
+encryption.ssl.truststore.location=${dir.keystore}/ssl.truststore
+encryption.ssl.truststore.provider=
+encryption.ssl.truststore.type=JCEKS
+encryption.ssl.truststore.keyMetaData.location=${dir.keystore}/ssl-truststore-passwords.properties
+
+# Re-encryptor properties
+encryption.reencryptor.chunkSize=100
+encryption.reencryptor.numThreads=2
+
+# SOLR connection details (e.g. for JMX)
+solr.host=localhost
+solr.port=8080
+solr.port.ssl=8443
+solr.solrUser=solr
+solr.solrPassword=solr
+# none, https
+solr.secureComms=https
+
+
+solr.max.total.connections=40
+solr.max.host.connections=40
+
+# Solr connection timeouts
+# solr connect timeout in ms
+solr.solrConnectTimeout=5000
+
+# cron expression defining how often the Solr Admin client (used by JMX) pings Solr if it goes away
+solr.solrPingCronExpression=0 0/5 * * * ? *
+
+#
+# Web Publishing Properties
+#
+publishing.root.path=/app:company_home/app:dictionary
+publishing.root=${publishing.root.path}/${spaces.publishing.root.childname}
+
+#
+# URL Shortening Properties
+#
+urlshortening.bitly.username=brianalfresco
+urlshortening.bitly.api.key=R_ca15c6c89e9b25ccd170bafd209a0d4f
+urlshortening.bitly.url.length=20
+
+#
+# Bulk Filesystem Importer
+#
+
+# The number of threads to employ in a batch import
+bulkImport.batch.numThreads=4
+
+# The size of a batch in a batch import i.e. the number of files to import in a
+# transaction/thread
+bulkImport.batch.batchSize=20
+
+
+#
+# Caching Content Store
+#
+system.content.caching.cacheOnInbound=true
+system.content.caching.maxDeleteWatchCount=1
+# Clean up every day at 3 am
+system.content.caching.contentCleanup.cronExpression=0 0 3 * * ?
+system.content.caching.timeToLiveSeconds=0
+system.content.caching.timeToIdleSeconds=86400
+system.content.caching.maxElementsInMemory=5000
+system.content.caching.maxElementsOnDisk=10000
+system.content.caching.minFileAgeMillis=60000
+system.content.caching.maxUsageMB=4096
+# maxFileSizeMB - 0 means no max file size.
+system.content.caching.maxFileSizeMB=0
+
+mybatis.useLocalCaches=false
+
fileFolderService.checkHidden.enabled=true
\ No newline at end of file
diff --git a/source/java/org/alfresco/repo/cluster/ClusterCheckEvent.java b/source/java/org/alfresco/repo/cluster/ClusterCheckEvent.java
new file mode 100644
index 0000000000..7d75bc7c6a
--- /dev/null
+++ b/source/java/org/alfresco/repo/cluster/ClusterCheckEvent.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2005-2012 Alfresco Software Limited.
+ *
+ * This file is part of Alfresco
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ */
+package org.alfresco.repo.cluster;
+
+/**
+ *
+ * @since Odin
+ *
+ */
+public class ClusterCheckEvent extends ClusterMessageEvent
+{
+ private static final long serialVersionUID = -4633842466757526069L;
+
+ public ClusterCheckEvent(ClusterChecker clusterChecker, String sourceId, String targetId)
+ {
+ super(clusterChecker, sourceId, targetId);
+ }
+
+}
diff --git a/source/java/org/alfresco/repo/cluster/ClusterChecker.java b/source/java/org/alfresco/repo/cluster/ClusterChecker.java
new file mode 100644
index 0000000000..7bbe410402
--- /dev/null
+++ b/source/java/org/alfresco/repo/cluster/ClusterChecker.java
@@ -0,0 +1,759 @@
+/*
+ * Copyright (C) 2005-2012 Alfresco Software Limited.
+ *
+ * This file is part of Alfresco
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ */
+package org.alfresco.repo.cluster;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.alfresco.repo.lock.JobLockService;
+import org.alfresco.repo.lock.LockAcquisitionException;
+import org.alfresco.repo.security.authentication.AuthenticationException;
+import org.alfresco.repo.security.authentication.AuthenticationUtil;
+import org.alfresco.repo.transaction.RetryingTransactionHelper.RetryingTransactionCallback;
+import org.alfresco.service.cmr.security.AuthenticationService;
+import org.alfresco.service.namespace.NamespaceService;
+import org.alfresco.service.namespace.QName;
+import org.alfresco.service.transaction.TransactionService;
+import org.alfresco.util.EqualsHelper;
+import org.alfresco.util.GUID;
+import org.alfresco.util.TempFileProvider;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.springframework.beans.BeansException;
+import org.springframework.context.ApplicationContext;
+import org.springframework.context.ApplicationContextAware;
+import org.springframework.context.ApplicationEvent;
+
+/**
+ * Checks that the cluster is working.
+ *
+ * @since Odin
+ *
+ */
+public class ClusterChecker implements MessageReceiver, ApplicationContextAware
+{
+ private static final Log logger = LogFactory.getLog(ClusterChecker.class);
+ private static final String TmpFile = ".clusterChecker";
+ private static final QName LOCK = QName.createQName(NamespaceService.SYSTEM_MODEL_1_0_URI, "ClusterChecker");
+
+ /*
+ * WORKING: is synced with other nodes in the cluster
+ * NOTWORKING: is alive but not synced with other nodes in the cluster
+ * UNKNOWN: status is unknown (could be in the middle of checking)
+ * CHECKING: still waiting for cluster check response
+ */
+ public static enum NodeStatus
+ {
+ WORKING, NOTWORKING, TIMEOUT, UNKNOWN;
+ };
+
+ // time to wait for a cluster node to respond
+ private int timeout = 4000; // ms
+
+ private ApplicationContext applicationContext;
+ private AuthenticationService authenticationService;
+ private TransactionService transactionService;
+ private MessengerFactory messengerFactory;
+ private JobLockService jobLockService;
+
+ private Messenger messenger;
+
+ private Timer timer = new Timer();
+
+ // cluster nodes that this node knows about
+ private Map nodeInfo = new ConcurrentHashMap();
+
+ // unique id for this cluster node
+ private String id = null;
+
+ public ClusterChecker() throws FileNotFoundException, IOException, ClassNotFoundException
+ {
+ this.id = buildId();
+ }
+
+ private String buildId() throws FileNotFoundException, IOException, ClassNotFoundException
+ {
+ // we need an immutable unique id for the cluster node
+ String guid = null;
+
+ File systemTmpDir = TempFileProvider.getSystemTempDir();
+ File tmpFile = new File(systemTmpDir, TmpFile);
+
+ // persist the id locally
+ if(!tmpFile.exists())
+ {
+ guid = GUID.generate();
+ tmpFile.createNewFile();
+ ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(tmpFile));
+ out.writeObject(guid);
+ out.close();
+ }
+ else
+ {
+ ObjectInputStream in = new ObjectInputStream(new FileInputStream(tmpFile));
+ guid = (String)in.readObject();
+ in.close();
+ }
+
+ return guid;
+ }
+
+ /**
+ * Attempts to get the lock. If the lock couldn't be taken, then null is returned.
+ *
+ * @return Returns the lock token or null
+ */
+ private String getLock(long time)
+ {
+ try
+ {
+ return jobLockService.getLock(LOCK, time);
+ }
+ catch (LockAcquisitionException e)
+ {
+ return null;
+ }
+ }
+
+ public void init()
+ {
+ this.messenger = messengerFactory.createMessenger(getClass().getName(), true);
+ messenger.setReceiver(this);
+ }
+
+ public void shutdown()
+ {
+ cancelTimer();
+ }
+
+ public void setTimeout(int timeout)
+ {
+ this.timeout = timeout;
+ }
+
+ public void setJobLockService(JobLockService jobLockService)
+ {
+ this.jobLockService = jobLockService;
+ }
+
+ public void setTransactionService(TransactionService transactionService)
+ {
+ this.transactionService = transactionService;
+ }
+
+ public void setAuthenticationService(AuthenticationService authenticationService)
+ {
+ this.authenticationService = authenticationService;
+ }
+
+ public void setMessengerFactory(MessengerFactory messengerFactory)
+ {
+ this.messengerFactory = messengerFactory;
+ }
+
+ private void cancelTimer()
+ {
+ timer.cancel();
+ }
+
+ private NodeInfo registerNode(String id)
+ {
+ NodeInfo info = new NodeInfo(id);
+ nodeInfo.put(id, info);
+ return info;
+ }
+
+ private void checkCluster()
+ {
+ // set the status of any currently tracked to 'checking'
+ for(NodeInfo info : nodeInfo.values())
+ {
+ info.setChecking(true);
+ }
+
+ // Authenticate and get a ticket. This will be used to validate that the other nodes in the cluster are
+ // 'working' i.e. their caches are updating in the cluster.
+ try
+ {
+ AuthenticationUtil.pushAuthentication();
+ AuthenticationUtil.setFullyAuthenticatedUser(AuthenticationUtil.getAdminUserName());
+ String ticket = authenticationService.getCurrentTicket();
+ messenger.send(new ClusterValidateEvent(this, ticket, id, null));
+ }
+ catch(AuthenticationException e)
+ {
+ logger.warn("Unable to check cluster, authentication failed", e);
+ return;
+ }
+ finally
+ {
+ AuthenticationUtil.popAuthentication();
+ }
+
+ // A timer to mark nodes still in the checking state as not alive after a timeout.
+ timer.schedule(new TimerTask()
+ {
+ @Override
+ public void run()
+ {
+ for(NodeInfo info : nodeInfo.values())
+ {
+ List timedOut = info.timeoutNodes();
+ for(String nodeId : timedOut)
+ {
+ nodePairStatusChange(info.getId(), nodeId, NodeStatus.TIMEOUT);
+ }
+ }
+ }
+ }, timeout);
+ }
+
+ private void nodePairStatusChange(String sourceNodeId, String targetNodeId, NodeStatus status)
+ {
+ publishEvent(new ClusterNodePairStatusEvent(this, sourceNodeId, targetNodeId, status));
+ }
+
+ private void nodeFound(String nodeId)
+ {
+ publishEvent(new ClusterNodeExistsEvent(this, nodeId));
+ }
+
+ private String getAddress()
+ {
+ try
+ {
+ return InetAddress.getLocalHost().getHostName();
+ }
+ catch(UnknownHostException e)
+ {
+ return "Unknown";
+ }
+ }
+
+ private void publishEvent(ApplicationEvent event)
+ {
+ applicationContext.publishEvent(event);
+ }
+
+ private void handleValidationEvent(ClusterValidateEvent validateEvent)
+ {
+ String sourceId = validateEvent.getSourceId();
+ String ticket = validateEvent.getTicket();
+
+ // try to validate the ticket generated by the source node
+ boolean ticketValid = true;
+ try
+ {
+ AuthenticationUtil.pushAuthentication();
+ authenticationService.validate(ticket);
+ if(!authenticationService.getCurrentUserName().equals(AuthenticationUtil.getAdminUserName()))
+ {
+ ticketValid = false;
+ }
+ }
+ catch(AuthenticationException e)
+ {
+ ticketValid = false;
+ }
+ finally
+ {
+ AuthenticationUtil.popAuthentication();
+ }
+
+ messenger.send(new ClusterValidateResponseEvent(this, getAddress(), sourceId, id, ticketValid));
+ }
+
+ private void handleValidationResponse(ClusterValidateResponseEvent validateResponseEvent)
+ {
+ String sourceId = validateResponseEvent.getSourceId();
+ String targetId = validateResponseEvent.getTargetId();
+ String address = validateResponseEvent.getAddress(); // target address
+
+ NodeInfo source = getNodeInfo(sourceId);
+ boolean newSourceNode = false;
+ if(source == null)
+ {
+ source = registerNode(sourceId);
+ newSourceNode = true;
+ }
+
+ // update the target's address, if it isn't known already
+ boolean newTargetNode = false;
+ NodeInfo remote = getNodeInfo(targetId);
+ if(remote == null)
+ {
+ remote = registerNode(targetId);
+ newTargetNode = true;
+ }
+ remote.setAddress(address);
+
+ // update source node's view of the target's status
+ boolean ticketValid = validateResponseEvent.isTicketValid();
+ NodeStatus newTargetStatus = ticketValid ? NodeStatus.WORKING : NodeStatus.NOTWORKING;
+ source.setStatus(targetId, newTargetStatus);
+
+ if(newSourceNode)
+ {
+ nodeFound(sourceId);
+ }
+
+ if(newTargetNode)
+ {
+ nodeFound(targetId);
+ }
+
+ if(!sourceId.equals(targetId) && newTargetStatus != NodeStatus.UNKNOWN)
+ {
+ nodePairStatusChange(sourceId, targetId, newTargetStatus);
+ }
+ }
+
+ public boolean isConnected()
+ {
+ return messenger.isConnected();
+ }
+
+ public boolean isClusterActive()
+ {
+ return messengerFactory.isClusterActive();
+ }
+
+ public Map getNodeInfo()
+ {
+ return Collections.unmodifiableMap(nodeInfo);
+ }
+
+ public NodeInfo getNodeInfo(String nodeId)
+ {
+ return nodeInfo.get(nodeId);
+ }
+
+ public String getId()
+ {
+ return id;
+ }
+
+ public void check()
+ {
+ // Take out a lock to prevent more than one check at a time
+ RetryingTransactionCallback txnWork = new RetryingTransactionCallback()
+ {
+ public String execute() throws Exception
+ {
+ String lockToken = getLock(timeout + 1000);
+ return lockToken;
+ }
+ };
+
+ final String lockToken = transactionService.getRetryingTransactionHelper().doInTransaction(txnWork, false, true);
+ if(lockToken == null)
+ {
+ logger.warn("Can't get lock. Assume multiple cluster checkers ...");
+ return;
+ }
+
+ // Kick off the check by broadcasting the initiating event to each node in the cluster
+ if (messenger.isConnected())
+ {
+ messenger.send(new ClusterCheckEvent(this, id, null));
+ }
+
+ // A timer to release the lock after a timeout
+ timer.schedule(new TimerTask()
+ {
+ @Override
+ public void run()
+ {
+ jobLockService.releaseLock(lockToken, LOCK);
+ }
+ }, timeout);
+ }
+
+ public List getPeers(String nodeId)
+ {
+ NodeInfo nodeInfo = getNodeInfo(nodeId);
+ Map peersInfo = nodeInfo.getPeersInfo();
+
+ List ret = new ArrayList();
+ for(String peerId : peersInfo.keySet())
+ {
+ if(peerId.equals(nodeId))
+ {
+ continue;
+ }
+ NodeInfo peerInfo = getNodeInfo(peerId);
+ NodeStatus peerStatus = peersInfo.get(peerId).getNodeStatus();
+ String peerAddress = peerInfo.getAddress();
+ ret.add(new PeerNodeInfo(peerId, peerAddress, peerStatus));
+ }
+
+ return ret;
+ }
+
+ public void stopChecking(String nodeId)
+ {
+ if(nodeInfo.containsKey(nodeId))
+ {
+ nodeInfo.remove(nodeId);
+ }
+ for(NodeInfo node : nodeInfo.values())
+ {
+ node.stopChecking(nodeId);
+ }
+ publishEvent(new ClusterNodeStopTrackingEvent(this, nodeId));
+ }
+
+ @Override
+ public void onReceive(ClusterMessageEvent event)
+ {
+ if (event == null)
+ {
+ return;
+ }
+
+ if(event instanceof ClusterCheckEvent)
+ {
+ checkCluster();
+ }
+ else if(event instanceof ClusterValidateEvent)
+ {
+ // handle validation request from another node
+ handleValidationEvent((ClusterValidateEvent)event);
+ }
+ else if(event instanceof ClusterValidateResponseEvent)
+ {
+ // handle response to a validation request
+ handleValidationResponse((ClusterValidateResponseEvent)event);
+ }
+ }
+
+ public Set> getNonWorkingNodePairs()
+ {
+ Set> nonWorkingPairs = new HashSet>();
+
+ for(NodeInfo node : nodeInfo.values())
+ {
+ // a cluster node is regarded as working only if every other node agrees
+ // notes that for a 2 node cluster with one node down, the other node will still be regarded
+ // as not working because there are no other nodes to counter the non-working node.
+ nonWorkingPairs.addAll(node.getNonWorkingPeers());
+ }
+
+ return nonWorkingPairs;
+ }
+
+ @Override
+ public void setApplicationContext(ApplicationContext applicationContext) throws BeansException
+ {
+ this.applicationContext = applicationContext;
+ }
+
+ // Records information on a peer i.e. whether it is being checked and its status
+ private static class PeerStatus
+ {
+ private boolean checking;
+ private NodeStatus nodeStatus;
+
+ public PeerStatus()
+ {
+ this.checking = false;
+ this.nodeStatus = NodeStatus.UNKNOWN;
+ }
+
+ public boolean isChecking()
+ {
+ return checking;
+ }
+
+ void setChecking(boolean checking)
+ {
+ this.checking = checking;
+ }
+
+ public NodeStatus getNodeStatus()
+ {
+ return nodeStatus;
+ }
+
+ void setNodeStatus(NodeStatus nodeStatus)
+ {
+ this.nodeStatus = nodeStatus;
+ }
+ }
+
+ public static class PeerNodeInfo
+ {
+ private String peerId;
+ private String peerAddress;
+ private NodeStatus peerStatus;
+
+ public PeerNodeInfo(String peerId, String peerAddress, NodeStatus peerStatus) {
+ super();
+ this.peerId = peerId;
+ this.peerAddress = peerAddress;
+ this.peerStatus = peerStatus;
+ }
+
+ public String getPeerId()
+ {
+ return peerId;
+ }
+
+ public String getPeerAddress()
+ {
+ return peerAddress;
+ }
+
+ public NodeStatus getPeerStatus()
+ {
+ return peerStatus;
+ }
+ }
+
+ // Information pertaining to a cluster node and its peers
+ public static class NodeInfo
+ {
+ private String id;
+ private String address;
+ private Map nodeInfos = new ConcurrentHashMap(5);
+
+ public NodeInfo(String id)
+ {
+ super();
+ this.id = id;
+ }
+
+ public String getId()
+ {
+ return id;
+ }
+
+ public String getAddress()
+ {
+ return address;
+ }
+
+ void setAddress(String address)
+ {
+ this.address = address;
+ }
+
+ void setStatus(String targetId, NodeStatus status)
+ {
+ PeerStatus peerStatus = getStatus(targetId, true);
+ peerStatus.setChecking(false);
+ peerStatus.setNodeStatus(status);
+ }
+
+ void stopChecking(String nodeId)
+ {
+ nodeInfos.remove(nodeId);
+ }
+
+ public Map getPeersInfo()
+ {
+ return Collections.unmodifiableMap(nodeInfos);
+ }
+
+ public PeerStatus getStatus(String nodeId)
+ {
+ return getStatus(nodeId, false);
+ }
+
+ public PeerStatus getStatus(String nodeId, boolean create)
+ {
+ PeerStatus peerStatus = nodeInfos.get(nodeId);
+ if(peerStatus == null)
+ {
+ peerStatus = new PeerStatus();
+ nodeInfos.put(nodeId, peerStatus);
+ }
+ return peerStatus;
+ }
+
+ void setChecking(boolean checking)
+ {
+ for(String nodeId : nodeInfos.keySet())
+ {
+ setChecking(nodeId, checking);
+ }
+ }
+
+ void setChecking(String nodeId, boolean checking)
+ {
+ PeerStatus status = getStatus(nodeId, true);
+ status.setChecking(checking);
+ }
+
+ void setStatuses(NodeStatus status)
+ {
+ for(String nodeId : nodeInfos.keySet())
+ {
+ setStatus(nodeId, status);
+ }
+ }
+
+ List timeoutNodes()
+ {
+ List timedOut = new ArrayList();
+
+ for(String nodeId : nodeInfos.keySet())
+ {
+ if(getStatus(nodeId).isChecking())
+ {
+ setStatus(nodeId, NodeStatus.TIMEOUT);
+ timedOut.add(nodeId);
+ }
+ }
+
+ return timedOut;
+ }
+
+ public Set> getNonWorkingPeers()
+ {
+ Set> nonWorkingPeers = new HashSet>();
+ for(String nodeId : nodeInfos.keySet())
+ {
+ if(!getId().equals(nodeId) && getStatus(nodeId).getNodeStatus() != NodeStatus.WORKING)
+ {
+ nonWorkingPeers.add(new UnorderedPair(getId(), nodeId));
+ }
+ }
+
+ return nonWorkingPeers;
+ }
+
+ public boolean equals(Object other)
+ {
+ if (this == other)
+ {
+ return true;
+ }
+
+ if(!(other instanceof NodeInfo))
+ {
+ return false;
+ }
+
+ NodeInfo nodeInfo = (NodeInfo)other;
+ return EqualsHelper.nullSafeEquals(getId(), nodeInfo.getId());
+ }
+ }
+
+ public static class UnorderedPair implements Serializable
+ {
+ private static final long serialVersionUID = -8947346745086237616L;
+
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ public static final UnorderedPair NULL_PAIR = new UnorderedPair(null, null);
+
+ @SuppressWarnings("unchecked")
+ public static final UnorderedPair nullPair()
+ {
+ return NULL_PAIR;
+ }
+
+ /**
+ * The first member of the pair.
+ */
+ private T first;
+
+ /**
+ * The second member of the pair.
+ */
+ private T second;
+
+ /**
+ * Make a new one.
+ *
+ * @param first The first member.
+ * @param second The second member.
+ */
+ public UnorderedPair(T first, T second)
+ {
+ this.first = first;
+ this.second = second;
+ }
+
+ /**
+ * Get the first member of the tuple.
+ * @return The first member.
+ */
+ public final T getFirst()
+ {
+ return first;
+ }
+
+ /**
+ * Get the second member of the tuple.
+ * @return The second member.
+ */
+ public final T getSecond()
+ {
+ return second;
+ }
+
+ @Override
+ public boolean equals(Object other)
+ {
+ if (this == other)
+ {
+ return true;
+ }
+ if (other == null || !(other instanceof UnorderedPair>))
+ {
+ return false;
+ }
+ UnorderedPair> o = (UnorderedPair>)other;
+ return EqualsHelper.nullSafeEquals(this.first, o.first) &&
+ EqualsHelper.nullSafeEquals(this.second, o.second) ||
+ EqualsHelper.nullSafeEquals(this.first, o.second) &&
+ EqualsHelper.nullSafeEquals(this.second, o.first);
+ }
+
+ @Override
+ public int hashCode()
+ {
+ return (first == null ? 0 : first.hashCode()) + (second == null ? 0 : second.hashCode());
+ }
+
+ @Override
+ public String toString()
+ {
+ return "(" + first + ", " + second + ")";
+ }
+ }
+}
\ No newline at end of file
diff --git a/source/java/org/alfresco/repo/cluster/ClusterEvent.java b/source/java/org/alfresco/repo/cluster/ClusterEvent.java
new file mode 100644
index 0000000000..f33448228e
--- /dev/null
+++ b/source/java/org/alfresco/repo/cluster/ClusterEvent.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2005-2012 Alfresco Software Limited.
+ *
+ * This file is part of Alfresco
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ */
+package org.alfresco.repo.cluster;
+
+import org.springframework.context.ApplicationEvent;
+
+/**
+ *
+ * @since Odin
+ *
+ */
+public class ClusterEvent extends ApplicationEvent
+{
+ private static final long serialVersionUID = 7481373845772903712L;
+
+ public ClusterEvent(ClusterChecker clusterChecker)
+ {
+ super(clusterChecker);
+ }
+
+}
diff --git a/source/java/org/alfresco/repo/cluster/ClusterMessageEvent.java b/source/java/org/alfresco/repo/cluster/ClusterMessageEvent.java
new file mode 100644
index 0000000000..0c15bcf48d
--- /dev/null
+++ b/source/java/org/alfresco/repo/cluster/ClusterMessageEvent.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2005-2012 Alfresco Software Limited.
+ *
+ * This file is part of Alfresco
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ */
+package org.alfresco.repo.cluster;
+
+/**
+ *
+ * @since Odin
+ *
+ */
+public class ClusterMessageEvent extends ClusterEvent
+{
+ private static final long serialVersionUID = -8677530378696271077L;
+
+ private String sourceId;
+ private String targetId;
+
+ public ClusterMessageEvent(ClusterChecker clusterChecker, String sourceId, String targetId)
+ {
+ super(clusterChecker);
+ this.sourceId = sourceId;
+ this.targetId = targetId;
+ }
+
+ public String getSourceId()
+ {
+ return sourceId;
+ }
+
+ public String getTargetId()
+ {
+ return targetId;
+ }
+
+}
diff --git a/source/java/org/alfresco/repo/cluster/ClusterNodeExistsEvent.java b/source/java/org/alfresco/repo/cluster/ClusterNodeExistsEvent.java
new file mode 100644
index 0000000000..db2bd59850
--- /dev/null
+++ b/source/java/org/alfresco/repo/cluster/ClusterNodeExistsEvent.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2005-2012 Alfresco Software Limited.
+ *
+ * This file is part of Alfresco
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ */
+package org.alfresco.repo.cluster;
+
+/**
+ *
+ * @since Odin
+ *
+ */
+public class ClusterNodeExistsEvent extends ClusterEvent
+{
+ private static final long serialVersionUID = -9060051914186153663L;
+ public static final String NOTIFICATION_TYPE = "Cluster Node Found";
+
+ private String nodeId;
+
+ public ClusterNodeExistsEvent(ClusterChecker clusterChecker, String nodeId)
+ {
+ super(clusterChecker);
+ this.nodeId = nodeId;
+ }
+
+ public String getNodeId()
+ {
+ return nodeId;
+ }
+
+}
diff --git a/source/java/org/alfresco/repo/cluster/ClusterNodePairStatusEvent.java b/source/java/org/alfresco/repo/cluster/ClusterNodePairStatusEvent.java
new file mode 100644
index 0000000000..9541796ec5
--- /dev/null
+++ b/source/java/org/alfresco/repo/cluster/ClusterNodePairStatusEvent.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2005-2012 Alfresco Software Limited.
+ *
+ * This file is part of Alfresco
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ */
+package org.alfresco.repo.cluster;
+
+import org.alfresco.repo.cluster.ClusterChecker.NodeStatus;
+
+/**
+ *
+ * @Odin
+ *
+ */
+public class ClusterNodePairStatusEvent extends ClusterEvent
+{
+ private static final long serialVersionUID = -4045195741687097066L;
+ public static final String NOTIFICATION_TYPE = "Cluster Node Pair Status";
+
+ private String sourceNodeId;
+ private String targetNodeId;
+ private NodeStatus status;
+
+ public ClusterNodePairStatusEvent(ClusterChecker clusterChecker, String sourceNodeId, String targetNodeId, NodeStatus status)
+ {
+ super(clusterChecker);
+ this.sourceNodeId = sourceNodeId;
+ this.targetNodeId = targetNodeId;
+ this.status = status;
+
+ }
+
+ public String getSourceNodeId()
+ {
+ return sourceNodeId;
+ }
+
+ public String getTargetNodeId()
+ {
+ return targetNodeId;
+ }
+
+ public NodeStatus getStatus()
+ {
+ return status;
+ }
+
+}
\ No newline at end of file
diff --git a/source/java/org/alfresco/repo/cluster/ClusterNodeStopTrackingEvent.java b/source/java/org/alfresco/repo/cluster/ClusterNodeStopTrackingEvent.java
new file mode 100644
index 0000000000..f6415e0452
--- /dev/null
+++ b/source/java/org/alfresco/repo/cluster/ClusterNodeStopTrackingEvent.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2005-2012 Alfresco Software Limited.
+ *
+ * This file is part of Alfresco
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ */
+package org.alfresco.repo.cluster;
+
+/**
+ *
+ * @since Odin
+ *
+ */
+public class ClusterNodeStopTrackingEvent extends ClusterEvent
+{
+ private static final long serialVersionUID = -116885933025872510L;
+
+ public static final String NOTIFICATION_TYPE = "Cluster Node Stop Tracking";
+
+ private String nodeId;
+
+ public ClusterNodeStopTrackingEvent(ClusterChecker clusterChecker, String nodeId)
+ {
+ super(clusterChecker);
+ this.nodeId = nodeId;
+
+ }
+
+ public String getNodeId()
+ {
+ return nodeId;
+ }
+
+}
diff --git a/source/java/org/alfresco/repo/cluster/ClusterValidateEvent.java b/source/java/org/alfresco/repo/cluster/ClusterValidateEvent.java
new file mode 100644
index 0000000000..7899bf4d4c
--- /dev/null
+++ b/source/java/org/alfresco/repo/cluster/ClusterValidateEvent.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2005-2012 Alfresco Software Limited.
+ *
+ * This file is part of Alfresco
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ */
+package org.alfresco.repo.cluster;
+
+/**
+ *
+ * @since Odin
+ *
+ */
+public class ClusterValidateEvent extends ClusterMessageEvent
+{
+ private static final long serialVersionUID = -8091460189522981871L;
+
+ private String ticket;
+
+ public ClusterValidateEvent(ClusterChecker clusterChecker, String ticket, String sourceId, String targetId)
+ {
+ super(clusterChecker, sourceId, targetId);
+ this.ticket = ticket;
+ }
+
+ public String getTicket()
+ {
+ return ticket;
+ }
+}
diff --git a/source/java/org/alfresco/repo/cluster/ClusterValidateResponseEvent.java b/source/java/org/alfresco/repo/cluster/ClusterValidateResponseEvent.java
new file mode 100644
index 0000000000..577032c648
--- /dev/null
+++ b/source/java/org/alfresco/repo/cluster/ClusterValidateResponseEvent.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2005-2012 Alfresco Software Limited.
+ *
+ * This file is part of Alfresco
+ *
+ * Alfresco is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Alfresco is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with Alfresco. If not, see .
+ */
+package org.alfresco.repo.cluster;
+
+/**
+ *
+ * @since Odin
+ *
+ */
+public class ClusterValidateResponseEvent extends ClusterMessageEvent
+{
+ private static final long serialVersionUID = -813956714769487998L;
+
+ private String address;
+ private boolean ticketValid;
+
+ public ClusterValidateResponseEvent(ClusterChecker clusterChecker, String address, String sourceId, String targetId, boolean ticketValid)
+ {
+ super(clusterChecker, sourceId, targetId);
+ this.address = address;
+ this.ticketValid = ticketValid;
+ }
+
+ public String getAddress()
+ {
+ return address;
+ }
+
+ public boolean isTicketValid()
+ {
+ return ticketValid;
+ }
+
+}
diff --git a/source/java/org/alfresco/repo/cluster/HazelcastMessengerFactory.java b/source/java/org/alfresco/repo/cluster/HazelcastMessengerFactory.java
index 91be5a9290..9c2fd0caf1 100644
--- a/source/java/org/alfresco/repo/cluster/HazelcastMessengerFactory.java
+++ b/source/java/org/alfresco/repo/cluster/HazelcastMessengerFactory.java
@@ -38,9 +38,15 @@ import com.hazelcast.core.ITopic;
public class HazelcastMessengerFactory implements MessengerFactory
{
private HazelcastInstance hazelcast;
-
+
@Override
public Messenger createMessenger(String appRegion)
+ {
+ return createMessenger(appRegion);
+ }
+
+ @Override
+ public Messenger createMessenger(String appRegion, boolean acceptLocalMessages)
{
ITopic topic = hazelcast.getTopic(appRegion);
String address = hazelcast.getCluster().getLocalMember().getInetSocketAddress().toString();
diff --git a/source/java/org/alfresco/repo/cluster/JGroupsMessengerFactory.java b/source/java/org/alfresco/repo/cluster/JGroupsMessengerFactory.java
index 3e8d49e86d..09133d85d6 100644
--- a/source/java/org/alfresco/repo/cluster/JGroupsMessengerFactory.java
+++ b/source/java/org/alfresco/repo/cluster/JGroupsMessengerFactory.java
@@ -34,9 +34,15 @@ public class JGroupsMessengerFactory implements MessengerFactory
{
@Override
public Messenger createMessenger(String appRegion)
+ {
+ return createMessenger(appRegion, false);
+ }
+
+ @Override
+ public Messenger createMessenger(String appRegion, boolean acceptLocalMessages)
{
ParameterCheck.mandatory("appRegion", appRegion);
- Channel channel = AlfrescoJGroupsChannelFactory.getChannel(appRegion);
+ Channel channel = AlfrescoJGroupsChannelFactory.getChannel(appRegion, acceptLocalMessages);
return new JGroupsMessenger(channel);
}
diff --git a/source/java/org/alfresco/repo/cluster/MessengerFactory.java b/source/java/org/alfresco/repo/cluster/MessengerFactory.java
index 0c6c75a530..e1bc891e8c 100644
--- a/source/java/org/alfresco/repo/cluster/MessengerFactory.java
+++ b/source/java/org/alfresco/repo/cluster/MessengerFactory.java
@@ -36,5 +36,7 @@ public interface MessengerFactory
Messenger createMessenger(String appRegion);
+ Messenger createMessenger(String appRegion, boolean acceptLocalMessages);
+
boolean isClusterActive();
}
diff --git a/source/java/org/alfresco/repo/jgroups/AlfrescoJGroupsChannelFactory.java b/source/java/org/alfresco/repo/jgroups/AlfrescoJGroupsChannelFactory.java
index 3ecc40b033..2636af2c7c 100644
--- a/source/java/org/alfresco/repo/jgroups/AlfrescoJGroupsChannelFactory.java
+++ b/source/java/org/alfresco/repo/jgroups/AlfrescoJGroupsChannelFactory.java
@@ -195,7 +195,7 @@ public class AlfrescoJGroupsChannelFactory extends AbstractLifecycleBean
* @param appRegion the application region identifier.
* @return Returns a channel
*/
- public static Channel getChannel(String appRegion)
+ public static Channel getChannel(String appRegion, boolean acceptLocalMessages)
{
readLock.lock();
try
@@ -223,7 +223,7 @@ public class AlfrescoJGroupsChannelFactory extends AbstractLifecycleBean
return channelProxy;
}
// Get the channel
- Channel channel = getChannelInternal(appRegion);
+ Channel channel = getChannelInternal(appRegion, acceptLocalMessages);
// Proxy the channel
channelProxy = new ChannelProxy(channel);
// Store the channel to the map
@@ -245,7 +245,7 @@ public class AlfrescoJGroupsChannelFactory extends AbstractLifecycleBean
* @return Returns a channel
*/
/* All calls to this are ultimately wrapped in the writeLock. */
- private static /*synchronized*/ Channel getChannelInternal(String appRegion)
+ private static /*synchronized*/ Channel getChannelInternal(String appRegion, boolean acceptLocalMessages)
{
Channel channel;
URL configUrl = null;
@@ -291,7 +291,15 @@ public class AlfrescoJGroupsChannelFactory extends AbstractLifecycleBean
{
String clusterName = clusterNamePrefix + ":" + appRegion;
// Don't accept messages from self
- channel.setOpt(Channel.LOCAL, Boolean.FALSE);
+ if(acceptLocalMessages)
+ {
+ channel.setOpt(Channel.LOCAL, Boolean.TRUE);
+ }
+ else
+ {
+ channel.setOpt(Channel.LOCAL, Boolean.FALSE);
+ }
+
// Connect
channel.connect(clusterName);
// Done
@@ -355,6 +363,9 @@ public class AlfrescoJGroupsChannelFactory extends AbstractLifecycleBean
// Get the old channel
Channel oldChannel = channelProxy.getDelegate();
+
+ Boolean acceptLocalMessages = (Boolean)oldChannel.getOpt(Channel.LOCAL);
+
// Close the old channel.
try
{
@@ -375,7 +386,7 @@ public class AlfrescoJGroupsChannelFactory extends AbstractLifecycleBean
}
// Create the new channel
- Channel newChannel = getChannelInternal(appRegion);
+ Channel newChannel = getChannelInternal(appRegion, acceptLocalMessages.booleanValue());
// Now do the hot-swap
channelProxy.swap(newChannel);
@@ -597,6 +608,7 @@ public class AlfrescoJGroupsChannelFactory extends AbstractLifecycleBean
// Assign the new delegate and carry the listeners over
delegate = channel;
delegate.setReceiver(delegateReceiver);
+ delegate.setOpt(Channel.LOCAL, oldDelegate.getOpt(Channel.LOCAL));
for (ChannelListener delegateChannelListener : delegateChannelListeners)
{
delegate.addChannelListener(delegateChannelListener);
diff --git a/source/java/org/alfresco/repo/jgroups/AlfrescoJGroupsChannelFactoryTest.java b/source/java/org/alfresco/repo/jgroups/AlfrescoJGroupsChannelFactoryTest.java
index 67f16ee5fc..8a5b8731ef 100644
--- a/source/java/org/alfresco/repo/jgroups/AlfrescoJGroupsChannelFactoryTest.java
+++ b/source/java/org/alfresco/repo/jgroups/AlfrescoJGroupsChannelFactoryTest.java
@@ -62,7 +62,7 @@ public class AlfrescoJGroupsChannelFactoryTest extends TestCase
public void testNoCluster() throws Exception
{
- Channel channel = AlfrescoJGroupsChannelFactory.getChannel(appRegion);
+ Channel channel = AlfrescoJGroupsChannelFactory.getChannel(appRegion, false);
stressChannel(channel);
}
@@ -70,7 +70,7 @@ public class AlfrescoJGroupsChannelFactoryTest extends TestCase
{
AlfrescoJGroupsChannelFactory.changeClusterNamePrefix("blah");
AlfrescoJGroupsChannelFactory.rebuildChannels();
- Channel channel = AlfrescoJGroupsChannelFactory.getChannel(appRegion);
+ Channel channel = AlfrescoJGroupsChannelFactory.getChannel(appRegion, false);
stressChannel(channel);
}
@@ -78,11 +78,11 @@ public class AlfrescoJGroupsChannelFactoryTest extends TestCase
{
AlfrescoJGroupsChannelFactory.changeClusterNamePrefix("ONE");
AlfrescoJGroupsChannelFactory.rebuildChannels();
- Channel channel1 = AlfrescoJGroupsChannelFactory.getChannel(appRegion);
+ Channel channel1 = AlfrescoJGroupsChannelFactory.getChannel(appRegion, false);
stressChannel(channel1);
AlfrescoJGroupsChannelFactory.changeClusterNamePrefix("TWO");
AlfrescoJGroupsChannelFactory.rebuildChannels();
- Channel channel2 = AlfrescoJGroupsChannelFactory.getChannel(appRegion);
+ Channel channel2 = AlfrescoJGroupsChannelFactory.getChannel(appRegion, false);
stressChannel(channel1);
assertTrue("Channel reference must be the same", channel1 == channel2);
}