mirror of
https://github.com/Alfresco/alfresco-community-repo.git
synced 2025-07-31 17:39:05 +00:00
44740: BDE-91 - added ability to enable and disable some RMI services through config. 44744: Incorrect service url used for Links RSS feed. Code tidy up. 44754: Fix performance degradation when using SSO header based authentication config: - The SlingshotAlfrescoConnector used when configuring header based SSO for Share was not extending the RequestCachingConnector class - means it was missing out on the free caching of identical GET requests per page render - This makes a huge difference to the number of small requests required to build pages in Share 4.0+, particularly dashboards and document details pages. 44759: ALF-17245: Merged PATCHES/V3.4.10 to V4.1-BUG-FIX 44104: Merged DEV to PATCHES/V3.4.10 43953: MNT-214 : Modification date is lost when Excel 2003 is closed The modification date is now updated during rename operation. 44711: Merged DEV to PATCHES/V3.4.10 44699: MNT-214 : Modification date is lost when Excel 2003 is closed A fix to save modification date in shuffle rename operation. 44760: ALF-17246: Merged V3.4-BUG-FIX to V4.1-BUG-FIX 44757: ALF-17073: Advanced search - Lucene query construction - Change between 3.4.9 and 3.4.10 - In the fix to ALF-12666 followed by ALF-14699 we somehow lost the ability to distinguish ANDs from ORs. The 'additional attributes' should be part of an OR, but we still need to AND together multiple ORs if the AND operator is specified. 44761: Merged V3.4-BUG-FIX to V4.1-BUG-FIX (RECORD ONLY) 44748: Merged PATCHES/V3.4.10 to V3.4-BUG-FIX 42311: ALF-17235: Merged PATCHES/V4.0.2 to PATCHES/V3.4.10 39939: ALF-15001: Gracefully handle stale NodeRefs in query results in DMDiscoveryServicePort - SOLR makes this more likely to occur 44104: ALF-17236: Merged DEV to PATCHES/V3.4.10 43953: MNT-214 : Modification date is lost when Excel 2003 is closed The modification date is now updated during rename operation. 44415: ALF-17237: Merged DEV to PATCHES/V3.4.10 MNT-211 : versionable aspect lost from .csv files edited over CIFS using Office 2003 44711: ALF-17236: Merged DEV to PATCHES/V3.4.10 44699: MNT-214 : Modification date is lost when Excel 2003 is closed A fix to save modification date in shuffle rename operation. 44762: Merged V4.1 to V4.1-BUG-FIX 44743: ALF-17117: Created article or publication cant be viewed on WQS site - Further corrections to locking to avoid deadlocks git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@44763 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
1065 lines
42 KiB
INI
1065 lines
42 KiB
INI
# Repository configuration
|
|
|
|
repository.name=Main Repository
|
|
|
|
# Directory configuration
|
|
|
|
dir.root=./alf_data
|
|
|
|
dir.contentstore=${dir.root}/contentstore
|
|
dir.contentstore.deleted=${dir.root}/contentstore.deleted
|
|
|
|
# The location of cached content
|
|
dir.cachedcontent=${dir.root}/cachedcontent
|
|
|
|
dir.auditcontentstore=${dir.root}/audit.contentstore
|
|
|
|
# The value for the maximum permitted size in bytes of all content.
|
|
# No value (or a negative long) will be taken to mean that no limit should be applied.
|
|
# See content-services-context.xml
|
|
system.content.maximumFileSizeLimit=
|
|
|
|
# The location for lucene index files
|
|
dir.indexes=${dir.root}/lucene-indexes
|
|
|
|
# The location for index backups
|
|
dir.indexes.backup=${dir.root}/backup-lucene-indexes
|
|
|
|
# The location for lucene index locks
|
|
dir.indexes.lock=${dir.indexes}/locks
|
|
|
|
#Directory to find external license
|
|
dir.license.external=.
|
|
# Spring resource location of external license files
|
|
location.license.external=file://${dir.license.external}/*.lic
|
|
# Spring resource location of embedded license files
|
|
location.license.embedded=/WEB-INF/alfresco/license/*.lic
|
|
# Spring resource location of license files on shared classpath
|
|
location.license.shared=classpath*:/alfresco/extension/license/*.lic
|
|
|
|
# WebDAV initialization properties
|
|
system.webdav.servlet.enabled=true
|
|
system.webdav.url.path.prefix=
|
|
system.webdav.storeName=${protocols.storeName}
|
|
system.webdav.rootPath=${protocols.rootPath}
|
|
system.webdav.activities.enabled=true
|
|
# File name patterns that trigger rename shuffle detection
|
|
# pattern is used by move - tested against full path after it has been lower cased.
|
|
system.webdav.renameShufflePattern=(.*/\\..*)|(.*[a-f0-9]{8}+$)|(.*\\.tmp$)|(.*\\.wbk$)|(.*\\.bak$)|(.*\\~$)|(.*backup.*\\.do[ct]{1}[x]?[m]?$)
|
|
|
|
|
|
# Is the JBPM Deploy Process Servlet enabled?
|
|
# Default is false. Should not be enabled in production environments as the
|
|
# servlet allows unauthenticated deployment of new workflows.
|
|
system.workflow.deployservlet.enabled=false
|
|
|
|
# Sets the location for the JBPM Configuration File
|
|
system.workflow.jbpm.config.location=classpath:org/alfresco/repo/workflow/jbpm/jbpm.cfg.xml
|
|
|
|
# Determines if JBPM workflow definitions are shown.
|
|
# Default is false. This controls the visibility of JBPM
|
|
# workflow definitions from the getDefinitions and
|
|
# getAllDefinitions WorkflowService API but still allows
|
|
# any in-flight JBPM workflows to be completed.
|
|
system.workflow.engine.jbpm.definitions.visible=false
|
|
|
|
#Determines if Activiti definitions are visible
|
|
system.workflow.engine.activiti.definitions.visible=true
|
|
|
|
# Determines if the JBPM engine is enabled
|
|
system.workflow.engine.jbpm.enabled=true
|
|
|
|
# Determines if the Activiti engine is enabled
|
|
system.workflow.engine.activiti.enabled=true
|
|
|
|
index.subsystem.name=lucene
|
|
|
|
# ######################################### #
|
|
# Index Recovery and Tracking Configuration #
|
|
# ######################################### #
|
|
#
|
|
# Recovery types are:
|
|
# NONE: Ignore
|
|
# VALIDATE: Checks that the first and last transaction for each store is represented in the indexes
|
|
# AUTO: Validates and auto-recovers if validation fails
|
|
# FULL: Full index rebuild, processing all transactions in order. The server is temporarily suspended.
|
|
index.recovery.mode=VALIDATE
|
|
# FULL recovery continues when encountering errors
|
|
index.recovery.stopOnError=false
|
|
index.recovery.maximumPoolSize=5
|
|
# Set the frequency with which the index tracking is triggered.
|
|
# For more information on index tracking in a cluster:
|
|
# http://wiki.alfresco.com/wiki/High_Availability_Configuration_V1.4_to_V2.1#Version_1.4.5.2C_2.1.1_and_later
|
|
# By default, this is effectively never, but can be modified as required.
|
|
# Examples:
|
|
# Never: * * * * * ? 2099
|
|
# Once every five seconds: 0/5 * * * * ?
|
|
# Once every two seconds : 0/2 * * * * ?
|
|
# See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html
|
|
index.tracking.cronExpression=0/5 * * * * ?
|
|
index.tracking.adm.cronExpression=${index.tracking.cronExpression}
|
|
index.tracking.avm.cronExpression=${index.tracking.cronExpression}
|
|
# Other properties.
|
|
index.tracking.maxTxnDurationMinutes=10
|
|
index.tracking.reindexLagMs=1000
|
|
index.tracking.maxRecordSetSize=1000
|
|
index.tracking.maxTransactionsPerLuceneCommit=100
|
|
index.tracking.disableInTransactionIndexing=false
|
|
# Index tracking information of a certain age is cleaned out by a scheduled job.
|
|
# Any clustered system that has been offline for longer than this period will need to be seeded
|
|
# with a more recent backup of the Lucene indexes or the indexes will have to be fully rebuilt.
|
|
# Use -1 to disable purging. This can be switched on at any stage.
|
|
index.tracking.minRecordPurgeAgeDays=30
|
|
# Unused transactions will be purged in chunks determined by commit time boundaries. 'index.tracking.purgeSize' specifies the size
|
|
# of the chunk (in ms). Default is a couple of hours.
|
|
index.tracking.purgeSize=7200000
|
|
|
|
# Reindexing of missing content is by default 'never' carried out.
|
|
# The cron expression below can be changed to control the timing of this reindexing.
|
|
# Users of Enterprise Alfresco can configure this cron expression via JMX without a server restart.
|
|
# Note that if alfresco.cluster.name is not set, then reindexing will not occur.
|
|
index.reindexMissingContent.cronExpression=* * * * * ? 2099
|
|
|
|
# Change the failure behaviour of the configuration checker
|
|
system.bootstrap.config_check.strict=true
|
|
|
|
#
|
|
# How long should shutdown wait to complete normally before
|
|
# taking stronger action and calling System.exit()
|
|
# in ms, 10,000 is 10 seconds
|
|
#
|
|
shutdown.backstop.timeout=10000
|
|
shutdown.backstop.enabled=false
|
|
|
|
# Server Single User Mode
|
|
# note:
|
|
# only allow named user (note: if blank or not set then will allow all users)
|
|
# assuming maxusers is not set to 0
|
|
#server.singleuseronly.name=admin
|
|
|
|
# Server Max Users - limit number of users with non-expired tickets
|
|
# note:
|
|
# -1 allows any number of users, assuming not in single-user mode
|
|
# 0 prevents further logins, including the ability to enter single-user mode
|
|
server.maxusers=-1
|
|
|
|
# The Cron expression controlling the frequency with which the OpenOffice connection is tested
|
|
openOffice.test.cronExpression=0 * * * * ?
|
|
|
|
#
|
|
# Disable all shared caches (mutable and immutable)
|
|
# These properties are used for diagnostic purposes
|
|
system.cache.disableMutableSharedCaches=false
|
|
system.cache.disableImmutableSharedCaches=false
|
|
|
|
# The maximum capacity of the parent assocs cache (the number of nodes whose parents can be cached)
|
|
system.cache.parentAssocs.maxSize=130000
|
|
|
|
# The average number of parents expected per cache entry. This parameter is multiplied by the above
|
|
# value to compute a limit on the total number of cached parents, which will be proportional to the
|
|
# cache's memory usage. The cache will be pruned when this limit is exceeded to avoid excessive
|
|
# memory usage.
|
|
system.cache.parentAssocs.limitFactor=8
|
|
|
|
#
|
|
# Properties to limit resources spent on individual searches
|
|
#
|
|
# The maximum time spent pruning results
|
|
system.acl.maxPermissionCheckTimeMillis=10000
|
|
# The maximum number of search results to perform permission checks against
|
|
system.acl.maxPermissionChecks=1000
|
|
|
|
# The maximum number of filefolder list results
|
|
system.filefolderservice.defaultListMaxResults=5000
|
|
|
|
# Properties to control read permission evaluation for acegi
|
|
system.readpermissions.optimise=true
|
|
system.readpermissions.bulkfetchsize=1000
|
|
|
|
#
|
|
# Manually control how the system handles maximum string lengths.
|
|
# Any zero or negative value is ignored.
|
|
# Only change this after consulting support or reading the appropriate Javadocs for
|
|
# org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2
|
|
system.maximumStringLength=-1
|
|
|
|
#
|
|
# Limit hibernate session size by trying to amalgamate events for the L2 session invalidation
|
|
# - hibernate works as is up to this size
|
|
# - after the limit is hit events that can be grouped invalidate the L2 cache by type and not instance
|
|
# events may not group if there are post action listener registered (this is not the case with the default distribution)
|
|
system.hibernateMaxExecutions=20000
|
|
|
|
#
|
|
# Determine if modification timestamp propagation from child to parent nodes is respected or not.
|
|
# Even if 'true', the functionality is only supported for child associations that declare the
|
|
# 'propagateTimestamps' element in the dictionary definition.
|
|
system.enableTimestampPropagation=true
|
|
|
|
#
|
|
# Decide if content should be removed from the system immediately after being orphaned.
|
|
# Do not change this unless you have examined the impact it has on your backup procedures.
|
|
system.content.eagerOrphanCleanup=false
|
|
# The number of days to keep orphaned content in the content stores.
|
|
# This has no effect on the 'deleted' content stores, which are not automatically emptied.
|
|
system.content.orphanProtectDays=14
|
|
# The action to take when a store or stores fails to delete orphaned content
|
|
# IGNORE: Just log a warning. The binary remains and the record is expunged
|
|
# KEEP_URL: Log a warning and create a URL entry with orphan time 0. It won't be processed or removed.
|
|
system.content.deletionFailureAction=IGNORE
|
|
# The CRON expression to trigger the deletion of resources associated with orphaned content.
|
|
system.content.orphanCleanup.cronExpression=0 0 4 * * ?
|
|
# The CRON expression to trigger content URL conversion. This process is not intesive and can
|
|
# be triggered on a live system. Similarly, it can be triggered using JMX on a dedicated machine.
|
|
system.content.contentUrlConverter.cronExpression=* * * * * ? 2099
|
|
system.content.contentUrlConverter.threadCount=2
|
|
system.content.contentUrlConverter.batchSize=500
|
|
system.content.contentUrlConverter.runAsScheduledJob=false
|
|
|
|
# #################### #
|
|
# Lucene configuration #
|
|
# #################### #
|
|
#
|
|
# Millisecond threshold for text transformations
|
|
# Slower transformers will force the text extraction to be asynchronous
|
|
#
|
|
lucene.maxAtomicTransformationTime=100
|
|
#
|
|
# The maximum number of clauses that are allowed in a lucene query
|
|
#
|
|
lucene.query.maxClauses=10000
|
|
#
|
|
# The size of the queue of nodes waiting for index
|
|
# Events are generated as nodes are changed, this is the maximum size of the queue used to coalesce event
|
|
# When this size is reached the lists of nodes will be indexed
|
|
#
|
|
# http://issues.alfresco.com/browse/AR-1280: Setting this high is the workaround as of 1.4.3.
|
|
#
|
|
lucene.indexer.batchSize=1000000
|
|
fts.indexer.batchSize=1000
|
|
#
|
|
# Index cache sizes
|
|
#
|
|
lucene.indexer.cacheEnabled=true
|
|
lucene.indexer.maxDocIdCacheSize=100000
|
|
lucene.indexer.maxDocumentCacheSize=100
|
|
lucene.indexer.maxIsCategoryCacheSize=-1
|
|
lucene.indexer.maxLinkAspectCacheSize=10000
|
|
lucene.indexer.maxParentCacheSize=100000
|
|
lucene.indexer.maxPathCacheSize=100000
|
|
lucene.indexer.maxTypeCacheSize=10000
|
|
#
|
|
# Properties for merge (not this does not affect the final index segment which will be optimised)
|
|
# Max merge docs only applies to the merge process not the resulting index which will be optimised.
|
|
#
|
|
lucene.indexer.mergerMaxMergeDocs=1000000
|
|
lucene.indexer.mergerMergeFactor=5
|
|
lucene.indexer.mergerMaxBufferedDocs=-1
|
|
lucene.indexer.mergerRamBufferSizeMb=16
|
|
#
|
|
# Properties for delta indexes (not this does not affect the final index segment which will be optimised)
|
|
# Max merge docs only applies to the index building process not the resulting index which will be optimised.
|
|
#
|
|
lucene.indexer.writerMaxMergeDocs=1000000
|
|
lucene.indexer.writerMergeFactor=5
|
|
lucene.indexer.writerMaxBufferedDocs=-1
|
|
lucene.indexer.writerRamBufferSizeMb=16
|
|
#
|
|
# Target number of indexes and deltas in the overall index and what index size to merge in memory
|
|
#
|
|
lucene.indexer.mergerTargetIndexCount=8
|
|
lucene.indexer.mergerTargetOverlayCount=5
|
|
lucene.indexer.mergerTargetOverlaysBlockingFactor=2
|
|
lucene.indexer.maxDocsForInMemoryMerge=60000
|
|
lucene.indexer.maxRamInMbForInMemoryMerge=16
|
|
lucene.indexer.maxDocsForInMemoryIndex=60000
|
|
lucene.indexer.maxRamInMbForInMemoryIndex=16
|
|
#
|
|
# Other lucene properties
|
|
#
|
|
lucene.indexer.termIndexInterval=128
|
|
lucene.indexer.useNioMemoryMapping=true
|
|
# over-ride to false for pre 3.0 behaviour
|
|
lucene.indexer.postSortDateTime=true
|
|
lucene.indexer.defaultMLIndexAnalysisMode=EXACT_LANGUAGE_AND_ALL
|
|
lucene.indexer.defaultMLSearchAnalysisMode=EXACT_LANGUAGE_AND_ALL
|
|
#
|
|
# The number of terms from a document that will be indexed
|
|
#
|
|
lucene.indexer.maxFieldLength=10000
|
|
|
|
# Should we use a 'fair' locking policy, giving queue-like access behaviour to
|
|
# the indexes and avoiding starvation of waiting writers? Set to false on old
|
|
# JVMs where this appears to cause deadlock
|
|
lucene.indexer.fairLocking=true
|
|
|
|
#
|
|
# Index locks (mostly deprecated and will be tidied up with the next lucene upgrade)
|
|
#
|
|
lucene.write.lock.timeout=10000
|
|
lucene.commit.lock.timeout=100000
|
|
lucene.lock.poll.interval=100
|
|
|
|
lucene.indexer.useInMemorySort=true
|
|
lucene.indexer.maxRawResultSetSizeForInMemorySort=1000
|
|
lucene.indexer.contentIndexingEnabled=true
|
|
|
|
index.backup.cronExpression=0 0 3 * * ?
|
|
|
|
lucene.defaultAnalyserResourceBundleName=alfresco/model/dataTypeAnalyzers
|
|
|
|
|
|
# When transforming archive files (.zip etc) into text representations (such as
|
|
# for full text indexing), should the files within the archive be processed too?
|
|
# If enabled, transformation takes longer, but searches of the files find more.
|
|
transformer.Archive.includeContents=false
|
|
|
|
# Database configuration
|
|
db.schema.stopAfterSchemaBootstrap=false
|
|
db.schema.update=true
|
|
db.schema.update.lockRetryCount=24
|
|
db.schema.update.lockRetryWaitSeconds=5
|
|
db.driver=org.gjt.mm.mysql.Driver
|
|
db.name=alfresco
|
|
db.url=jdbc:mysql:///${db.name}
|
|
db.username=alfresco
|
|
db.password=alfresco
|
|
db.pool.initial=10
|
|
db.pool.max=40
|
|
db.txn.isolation=-1
|
|
db.pool.statements.enable=true
|
|
db.pool.statements.max=40
|
|
db.pool.min=0
|
|
db.pool.idle=-1
|
|
db.pool.wait.max=-1
|
|
db.pool.validate.query=
|
|
db.pool.evict.interval=-1
|
|
db.pool.evict.idle.min=1800000
|
|
db.pool.validate.borrow=true
|
|
db.pool.validate.return=false
|
|
db.pool.evict.validate=false
|
|
#
|
|
db.pool.abandoned.detect=false
|
|
db.pool.abandoned.time=300
|
|
#
|
|
# db.pool.abandoned.log=true (logAbandoned) adds overhead (http://commons.apache.org/dbcp/configuration.html)
|
|
# and also requires db.pool.abandoned.detect=true (removeAbandoned)
|
|
#
|
|
db.pool.abandoned.log=false
|
|
|
|
|
|
# Audit configuration
|
|
audit.enabled=true
|
|
audit.tagging.enabled=true
|
|
audit.alfresco-access.enabled=false
|
|
audit.alfresco-access.sub-actions.enabled=false
|
|
audit.cmischangelog.enabled=false
|
|
audit.dod5015.enabled=false
|
|
# Setting this flag to true will force startup failure when invalid audit configurations are detected
|
|
audit.config.strict=false
|
|
# Audit map filter for AccessAuditor - restricts recorded events to user driven events
|
|
audit.filter.alfresco-access.default.enabled=true
|
|
audit.filter.alfresco-access.transaction.user=~System;~null;.*
|
|
audit.filter.alfresco-access.transaction.type=cm:folder;cm:content;st:site
|
|
audit.filter.alfresco-access.transaction.path=~/sys:archivedItem;~/ver:;.*
|
|
|
|
|
|
# System Configuration
|
|
system.store=system://system
|
|
system.descriptor.childname=sys:descriptor
|
|
system.descriptor.current.childname=sys:descriptor-current
|
|
|
|
# User config
|
|
alfresco_user_store.store=user://alfrescoUserStore
|
|
alfresco_user_store.system_container.childname=sys:system
|
|
alfresco_user_store.user_container.childname=sys:people
|
|
|
|
# note: default admin username - should not be changed after installation
|
|
alfresco_user_store.adminusername=admin
|
|
|
|
# Initial password - editing this will not have any effect once the repository is installed
|
|
alfresco_user_store.adminpassword=209c6174da490caeb422f3fa5a7ae634
|
|
|
|
# note: default guest username - should not be changed after installation
|
|
alfresco_user_store.guestusername=guest
|
|
|
|
# Used to move home folders to a new location
|
|
home_folder_provider_synchronizer.enabled=false
|
|
home_folder_provider_synchronizer.override_provider=
|
|
home_folder_provider_synchronizer.keep_empty_parents=false
|
|
|
|
# Spaces Archive Configuration
|
|
spaces.archive.store=archive://SpacesStore
|
|
|
|
# Spaces Configuration
|
|
spaces.store=workspace://SpacesStore
|
|
spaces.company_home.childname=app:company_home
|
|
spaces.guest_home.childname=app:guest_home
|
|
spaces.dictionary.childname=app:dictionary
|
|
spaces.templates.childname=app:space_templates
|
|
spaces.imap_attachments.childname=cm:Imap Attachments
|
|
spaces.imapConfig.childname=app:imap_configs
|
|
spaces.imap_templates.childname=app:imap_templates
|
|
spaces.scheduled_actions.childname=cm:Scheduled Actions
|
|
spaces.emailActions.childname=app:email_actions
|
|
spaces.searchAction.childname=cm:search
|
|
spaces.templates.content.childname=app:content_templates
|
|
spaces.templates.email.childname=app:email_templates
|
|
spaces.templates.email.invite1.childname=app:invite_email_templates
|
|
spaces.templates.email.notify.childname=app:notify_email_templates
|
|
spaces.templates.email.following.childname=app:following
|
|
spaces.templates.rss.childname=app:rss_templates
|
|
spaces.savedsearches.childname=app:saved_searches
|
|
spaces.scripts.childname=app:scripts
|
|
spaces.wcm.childname=app:wcm
|
|
spaces.wcm_content_forms.childname=app:wcm_forms
|
|
spaces.content_forms.childname=app:forms
|
|
spaces.user_homes.childname=app:user_homes
|
|
spaces.user_homes.regex.key=userName
|
|
spaces.user_homes.regex.pattern=
|
|
spaces.user_homes.regex.group_order=
|
|
spaces.sites.childname=st:sites
|
|
spaces.templates.email.invite.childname=cm:invite
|
|
spaces.templates.email.activities.childname=cm:activities
|
|
spaces.rendition.rendering_actions.childname=app:rendering_actions
|
|
spaces.replication.replication_actions.childname=app:replication_actions
|
|
spaces.wcm_deployed.childname=cm:wcm_deployed
|
|
spaces.transfers.childname=app:transfers
|
|
spaces.transfer_groups.childname=app:transfer_groups
|
|
spaces.transfer_temp.childname=app:temp
|
|
spaces.inbound_transfer_records.childname=app:inbound_transfer_records
|
|
spaces.webscripts.childname=cm:webscripts
|
|
spaces.extension_webscripts.childname=cm:extensionwebscripts
|
|
spaces.models.childname=app:models
|
|
spaces.workflow.definitions.childname=app:workflow_defs
|
|
spaces.publishing.root.childname=app:publishing_root
|
|
spaces.templates.email.workflowemailnotification.childname=cm:workflownotification
|
|
spaces.nodetemplates.childname=app:node_templates
|
|
|
|
# ADM VersionStore Configuration
|
|
version.store.enableAutoVersioning=true
|
|
version.store.deprecated.lightWeightVersionStore=workspace://lightWeightVersionStore
|
|
version.store.version2Store=workspace://version2Store
|
|
|
|
version.store.migrateVersionStore.threadCount=3
|
|
version.store.migrateVersionStore.batchSize=1
|
|
|
|
version.store.migrateCleanupJob.threadCount=3
|
|
version.store.migrateCleanupJob.batchSize=1
|
|
|
|
|
|
# WARNING: For non-production testing only !!! Do not change (to avoid version store issues, including possible mismatch). Should be false since lightWeightVersionStore is deprecated.
|
|
version.store.onlyUseDeprecatedV1=false
|
|
|
|
# The CRON expression to trigger migration of the version store from V1 (2.x) to V2 (3.x)
|
|
# By default, this is effectively 'never' but can be modified as required.
|
|
# Examples:
|
|
# Never: * * * * * ? 2099
|
|
# Once every thirty minutes: 0 0/30 * * * ?
|
|
# See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html
|
|
version.store.migrateVersionStore.cronExpression=* * * * * ? 2099
|
|
# Limit number of version histories to migrate per job cycle, where -1 = unlimited. Note: if limit > 0 then need to schedule job to run regularly in order to complete the migration.
|
|
version.store.migrateVersionStore.limitPerJobCycle=-1
|
|
version.store.migrateVersionStore.runAsScheduledJob=false
|
|
|
|
# Optional Comparator<Version> class name to sort versions.
|
|
# Set to: org.alfresco.repo.version.common.VersionLabelComparator
|
|
# if upgrading from a version that used unordered sequences in a cluster.
|
|
version.store.versionComparatorClass=
|
|
|
|
# Folders for storing people
|
|
system.system_container.childname=sys:system
|
|
system.people_container.childname=sys:people
|
|
system.authorities_container.childname=sys:authorities
|
|
system.zones_container.childname=sys:zones
|
|
|
|
# Folders for storing workflow related info
|
|
system.workflow_container.childname=sys:workflow
|
|
|
|
# Folder for storing shared remote credentials
|
|
system.remote_credentials_container.childname=sys:remote_credentials
|
|
|
|
# Folder for storing syncset definitions
|
|
system.syncset_definition_container.childname=sys:syncset_definitions
|
|
|
|
# Folder for storing download archives
|
|
system.downloads_container.childname=sys:downloads
|
|
|
|
# Are user names case sensitive?
|
|
user.name.caseSensitive=false
|
|
domain.name.caseSensitive=false
|
|
domain.separator=
|
|
|
|
# AVM Specific properties.
|
|
avm.remote.idlestream.timeout=30000
|
|
|
|
#Format caption extracted from the XML Schema.
|
|
xforms.formatCaption=true
|
|
|
|
# ECM content usages/quotas
|
|
system.usages.enabled=false
|
|
system.usages.clearBatchSize=50
|
|
system.usages.updateBatchSize=50
|
|
|
|
# Repository endpoint - used by Activity Service
|
|
repo.remote.endpoint=/service
|
|
|
|
# Create home folders as people are created (true) or create them lazily (false)
|
|
home.folder.creation.eager=true
|
|
|
|
# Should we consider zero byte content to be the same as no content when firing
|
|
# content update policies? Prevents 'premature' firing of inbound content rules
|
|
# for some clients such as Mac OS X Finder
|
|
policy.content.update.ignoreEmpty=true
|
|
|
|
# The well known RMI registry port and external host name published in the stubs
|
|
# is defined in the alfresco-shared.properties file
|
|
#
|
|
# alfresco.rmi.services.port=50500
|
|
|
|
# Default value of alfresco.rmi.services.host is 0.0.0.0 which means 'listen on all adapters'.
|
|
# This allows connections to JMX both remotely and locally.
|
|
#
|
|
alfresco.rmi.services.host=0.0.0.0
|
|
|
|
# If the RMI address is in-use, how many retries should be done before aborting
|
|
# Default value of alfresco.rmi.services.retries is 0 which means 'Don't retry if the address is in-use'
|
|
alfresco.rmi.services.retries=4
|
|
|
|
# RMI service ports for the individual services.
|
|
# These eight services are available remotely.
|
|
#
|
|
# Assign individual ports for each service for best performance
|
|
# or run several services on the same port, you can even run everything on 50500 if
|
|
# running through a firewall.
|
|
#
|
|
# Specify 0 to use a random unused port.
|
|
#
|
|
avm.rmi.service.port=50501
|
|
avmsync.rmi.service.port=50502
|
|
authentication.rmi.service.port=50504
|
|
repo.rmi.service.port=50505
|
|
action.rmi.service.port=50506
|
|
deployment.rmi.service.port=50507
|
|
monitor.rmi.service.port=50508
|
|
|
|
#
|
|
# enable or disable individual RMI services
|
|
#
|
|
avm.rmi.service.enabled=true
|
|
avmsync.rmi.service.enabled=true
|
|
authentication.rmi.service.enabled=true
|
|
repo.rmi.service.enabled=true
|
|
action.rmi.service.enabled=true
|
|
deployment.rmi.service.enabled=true
|
|
monitor.rmi.service.enabled=true
|
|
|
|
|
|
# Should the Mbean server bind to an existing server. Set to true for most application servers.
|
|
# false for WebSphere clusters.
|
|
mbean.server.locateExistingServerIfPossible=true
|
|
|
|
# External executable locations
|
|
ooo.exe=soffice
|
|
ooo.user=${dir.root}/oouser
|
|
img.root=./ImageMagick
|
|
img.dyn=${img.root}/lib
|
|
img.exe=${img.root}/bin/convert
|
|
swf.exe=./bin/pdf2swf
|
|
swf.languagedir=.
|
|
|
|
# Thumbnail Service
|
|
system.thumbnail.generate=true
|
|
|
|
# Default thumbnail limits
|
|
# When creating thumbnails, only use the first pageLimit pages
|
|
system.thumbnail.definition.default.timeoutMs=-1
|
|
system.thumbnail.definition.default.readLimitTimeMs=-1
|
|
system.thumbnail.definition.default.maxSourceSizeKBytes=-1
|
|
system.thumbnail.definition.default.readLimitKBytes=-1
|
|
system.thumbnail.definition.default.pageLimit=1
|
|
system.thumbnail.definition.default.maxPages=-1
|
|
|
|
# Max mimetype sizes to create thumbnail icons
|
|
system.thumbnail.mimetype.maxSourceSizeKBytes.pdf=-1
|
|
system.thumbnail.mimetype.maxSourceSizeKBytes.txt=-1
|
|
system.thumbnail.mimetype.maxSourceSizeKBytes.docx=-1
|
|
system.thumbnail.mimetype.maxSourceSizeKBytes.xlsx=-1
|
|
system.thumbnail.mimetype.maxSourceSizeKBytes.pptx=-1
|
|
system.thumbnail.mimetype.maxSourceSizeKBytes.odt=-1
|
|
system.thumbnail.mimetype.maxSourceSizeKBytes.ods=-1
|
|
system.thumbnail.mimetype.maxSourceSizeKBytes.odp=-1
|
|
|
|
# Configuration for handling of failing thumbnails.
|
|
# See NodeEligibleForRethumbnailingEvaluator's javadoc for details.
|
|
#
|
|
# Retry periods limit the frequency with which the repository will attempt to create Share thumbnails
|
|
# for content nodes which have previously failed in their thumbnail attempts.
|
|
# These periods are in seconds.
|
|
#
|
|
# 604800s = 60s * 60m * 24h * 7d = 1 week
|
|
system.thumbnail.retryPeriod=60
|
|
system.thumbnail.retryCount=2
|
|
system.thumbnail.quietPeriod=604800
|
|
system.thumbnail.quietPeriodRetriesEnabled=true
|
|
|
|
# Content Transformers
|
|
content.transformer.failover=true
|
|
|
|
# Base setting for all transformers (2 min timeout)
|
|
content.transformer.default.timeoutMs=120000
|
|
content.transformer.default.readLimitTimeMs=-1
|
|
content.transformer.default.maxSourceSizeKBytes=-1
|
|
content.transformer.default.readLimitKBytes=-1
|
|
content.transformer.default.pageLimit=-1
|
|
content.transformer.default.maxPages=-1
|
|
|
|
# text -> pdf using PdfBox (text/csv, text/xml) 10M takes about 12 seconds
|
|
content.transformer.PdfBox.TextToPdf.maxSourceSizeKBytes=10240
|
|
|
|
# pdf -> swf using Pdf2swf 2M takes about 60 seconds.
|
|
content.transformer.Pdf2swf.maxSourceSizeKBytes=5120
|
|
|
|
# txt -> pdf -> swf 5M (pdf is about the same size as the txt)
|
|
# Need this limit as transformer.PdfBox txt -> pdf is allowed up to 10M
|
|
content.transformer.complex.Text.Pdf2swf.maxSourceSizeKBytes=5120
|
|
|
|
# Transforms to PDF
|
|
# =================
|
|
content.transformer.OpenOffice.mimeTypeLimits.txt.pdf.maxSourceSizeKBytes=5120
|
|
content.transformer.OpenOffice.mimeTypeLimits.doc.pdf.maxSourceSizeKBytes=10240
|
|
content.transformer.OpenOffice.mimeTypeLimits.docx.pdf.maxSourceSizeKBytes=768
|
|
content.transformer.OpenOffice.mimeTypeLimits.docm.pdf.maxSourceSizeKBytes=768
|
|
content.transformer.OpenOffice.mimeTypeLimits.dotx.pdf.maxSourceSizeKBytes=768
|
|
content.transformer.OpenOffice.mimeTypeLimits.dotm.pdf.maxSourceSizeKBytes=768
|
|
content.transformer.OpenOffice.mimeTypeLimits.ppt.pdf.maxSourceSizeKBytes=6144
|
|
content.transformer.OpenOffice.mimeTypeLimits.pptx.pdf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.mimeTypeLimits.pptm.pdf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.mimeTypeLimits.ppsx.pdf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.mimeTypeLimits.ppsm.pdf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.mimeTypeLimits.potx.pdf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.mimeTypeLimits.potm.pdf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.mimeTypeLimits.ppam.pdf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.mimeTypeLimits.sldx.pdf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.mimeTypeLimits.sldm.pdf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.mimeTypeLimits.vsd.pdf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.mimeTypeLimits.xls.pdf.maxSourceSizeKBytes=10240
|
|
content.transformer.OpenOffice.mimeTypeLimits.xlsx.pdf.maxSourceSizeKBytes=1536
|
|
content.transformer.OpenOffice.mimeTypeLimits.xltx.pdf.maxSourceSizeKBytes=1536
|
|
content.transformer.OpenOffice.mimeTypeLimits.xlsm.pdf.maxSourceSizeKBytes=1536
|
|
content.transformer.OpenOffice.mimeTypeLimits.xltm.pdf.maxSourceSizeKBytes=1536
|
|
content.transformer.OpenOffice.mimeTypeLimits.xlam.pdf.maxSourceSizeKBytes=1536
|
|
content.transformer.OpenOffice.mimeTypeLimits.xlsb.pdf.maxSourceSizeKBytes=1536
|
|
|
|
# Transforms to SWF
|
|
# =================
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.txt.swf.maxSourceSizeKBytes=5120
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.doc.swf.maxSourceSizeKBytes=1536
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.docx.swf.maxSourceSizeKBytes=256
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.docm.swf.maxSourceSizeKBytes=256
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.dotx.swf.maxSourceSizeKBytes=256
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.dotm.swf.maxSourceSizeKBytes=256
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.ppt.swf.maxSourceSizeKBytes=6144
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.pptx.swf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.pptm.swf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.ppsx.swf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.ppsm.swf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.potx.swf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.potm.swf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.ppam.swf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.sldx.swf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.sldm.swf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.vsd.swf.maxSourceSizeKBytes=4096
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.xls.swf.maxSourceSizeKBytes=1024
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.xlsx.swf.maxSourceSizeKBytes=1024
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.xltx.swf.maxSourceSizeKBytes=1024
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.xlsm.swf.maxSourceSizeKBytes=1024
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.xltm.swf.maxSourceSizeKBytes=1024
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.xlam.swf.maxSourceSizeKBytes=1024
|
|
content.transformer.OpenOffice.Pdf2swf.mimeTypeLimits.xlsb.swf.maxSourceSizeKBytes=1024
|
|
|
|
|
|
# OpenOffice transforms to TXT (generally there are better options such as Tika)
|
|
# =============================
|
|
content.transformer.OpenOffice.PdfBox.mimeTypeLimits.xlsb.txt.maxSourceSizeKBytes=1024
|
|
content.transformer.OpenOffice.PdfBox.mimeTypeLimits.potm.txt.maxSourceSizeKBytes=1024
|
|
|
|
|
|
# Property to enable upgrade from 2.1-A
|
|
V2.1-A.fixes.to.schema=0
|
|
#V2.1-A.fixes.to.schema=82
|
|
|
|
# The default authentication chain
|
|
authentication.chain=alfrescoNtlm1:alfrescoNtlm
|
|
|
|
# Do authentication tickets expire or live for ever?
|
|
authentication.ticket.ticketsExpire=true
|
|
|
|
# If ticketsEpire is true then how they should expire?
|
|
# Valid values are: AFTER_INACTIVITY, AFTER_FIXED_TIME, DO_NOT_EXPIRE
|
|
# The default is AFTER_FIXED_TIME
|
|
authentication.ticket.expiryMode=AFTER_INACTIVITY
|
|
|
|
# If authentication.ticket.ticketsExpire is true and
|
|
# authentication.ticket.expiryMode is AFTER_FIXED_TIME or AFTER_INACTIVITY,
|
|
# this controls the minimum period for which tickets are valid.
|
|
# The default is PT1H for one hour.
|
|
authentication.ticket.validDuration=PT1H
|
|
|
|
# Default NFS user mappings (empty). Note these users will be able to
|
|
# authenticate through NFS without password so ensure NFS port is secure before
|
|
# enabling and adding mappings
|
|
nfs.user.mappings=
|
|
nfs.user.mappings.default.uid=0
|
|
nfs.user.mappings.default.gid=0
|
|
|
|
#Example NFS user mappings
|
|
#nfs.user.mappings=admin,user1
|
|
#nfs.user.mappings.value.admin.uid=0
|
|
#nfs.user.mappings.value.admin.gid=0
|
|
#nfs.user.mappings.value.user1.uid=500
|
|
#nfs.user.mappings.value.user1.gid=500
|
|
|
|
# Default root path for protocols
|
|
protocols.storeName=${spaces.store}
|
|
protocols.rootPath=/${spaces.company_home.childname}
|
|
|
|
# OpenCMIS
|
|
opencmis.connector.default.store=${spaces.store}
|
|
opencmis.connector.default.rootPath=/${spaces.company_home.childname}
|
|
opencmis.connector.default.typesDefaultMaxItems=500
|
|
opencmis.connector.default.typesDefaultDepth=-1
|
|
opencmis.connector.default.objectsDefaultMaxItems=10000
|
|
opencmis.connector.default.objectsDefaultDepth=100
|
|
opencmis.connector.default.openHttpSession=false
|
|
opencmis.activities.enabled=true
|
|
|
|
# IMAP
|
|
imap.server.enabled=false
|
|
imap.server.port=143
|
|
imap.server.attachments.extraction.enabled=true
|
|
|
|
# Default IMAP mount points
|
|
imap.config.home.store=${spaces.store}
|
|
imap.config.home.rootPath=/${spaces.company_home.childname}
|
|
imap.config.home.folderPath=Imap Home
|
|
imap.config.server.mountPoints=AlfrescoIMAP
|
|
imap.config.server.mountPoints.default.mountPointName=IMAP
|
|
imap.config.server.mountPoints.default.modeName=ARCHIVE
|
|
imap.config.server.mountPoints.default.store=${spaces.store}
|
|
imap.config.server.mountPoints.default.rootPath=${protocols.rootPath}
|
|
imap.config.server.mountPoints.value.AlfrescoIMAP.mountPointName=Alfresco IMAP
|
|
imap.config.server.mountPoints.value.AlfrescoIMAP.modeName=MIXED
|
|
|
|
#Imap extraction settings
|
|
#imap.attachments.mode:
|
|
# SEPARATE -- All attachments for each email will be extracted to separate folder.
|
|
# COMMON -- All attachments for all emails will be extracted to one folder.
|
|
# SAME -- Attachments will be extracted to the same folder where email lies.
|
|
imap.server.attachments.extraction.enabled=true
|
|
imap.attachments.mode=SEPARATE
|
|
imap.attachments.folder.store=${spaces.store}
|
|
imap.attachments.folder.rootPath=/${spaces.company_home.childname}
|
|
imap.attachments.folder.folderPath=${spaces.imap_attachments.childname}
|
|
|
|
# Activities Feed - refer to subsystem
|
|
|
|
# Feed max ID range to limit maximum number of entries
|
|
activities.feed.max.idRange=1000000
|
|
# Feed max size (number of entries)
|
|
activities.feed.max.size=100
|
|
# Feed max age (eg. 44640 mins => 31 days)
|
|
activities.feed.max.ageMins=44640
|
|
|
|
activities.feed.generator.jsonFormatOnly=true
|
|
|
|
activities.feedNotifier.batchSize=200
|
|
activities.feedNotifier.numThreads=2
|
|
|
|
# Subsystem unit test values. Will not have any effect on production servers
|
|
subsystems.test.beanProp.default.longProperty=123456789123456789
|
|
subsystems.test.beanProp.default.anotherStringProperty=Global Default
|
|
subsystems.test.beanProp=inst1,inst2,inst3
|
|
subsystems.test.beanProp.value.inst2.boolProperty=true
|
|
subsystems.test.beanProp.value.inst3.anotherStringProperty=Global Instance Default
|
|
subsystems.test.simpleProp2=true
|
|
subsystems.test.simpleProp3=Global Default3
|
|
|
|
# Default Async Action Thread Pool
|
|
default.async.action.threadPriority=1
|
|
default.async.action.corePoolSize=8
|
|
default.async.action.maximumPoolSize=20
|
|
|
|
# Deployment Service
|
|
deployment.service.numberOfSendingThreads=5
|
|
deployment.service.corePoolSize=2
|
|
deployment.service.maximumPoolSize=3
|
|
deployment.service.threadPriority=5
|
|
# How long to wait in mS before refreshing a target lock - detects shutdown servers
|
|
deployment.service.targetLockRefreshTime=60000
|
|
# How long to wait in mS from the last communication before deciding that deployment has failed, possibly
|
|
# the destination is no longer available?
|
|
deployment.service.targetLockTimeout=3600000
|
|
|
|
#Invitation Service
|
|
# Should send emails as part of invitation process.
|
|
notification.email.siteinvite=true
|
|
|
|
# Transfer Service
|
|
transferservice.receiver.enabled=true
|
|
transferservice.receiver.stagingDir=${java.io.tmpdir}/alfresco-transfer-staging
|
|
#
|
|
# How long to wait in mS before refreshing a transfer lock - detects shutdown servers
|
|
# Default 1 minute.
|
|
transferservice.receiver.lockRefreshTime=60000
|
|
#
|
|
# How many times to attempt retry the transfer lock
|
|
transferservice.receiver.lockRetryCount=3
|
|
# How long to wait, in mS, before retrying the transfer lock
|
|
transferservice.receiver.lockRetryWait=100
|
|
#
|
|
# How long to wait, in mS, since the last contact with from the client before
|
|
# timing out a transfer. Needs to be long enough to cope with network delays and "thinking
|
|
# time" for both source and destination. Default 5 minutes.
|
|
transferservice.receiver.lockTimeOut=300000
|
|
|
|
# Max time allowed for WCM folder rename operation issued by external clients (CIFS, FTP)
|
|
wcm.rename.max.time.milliseconds=2000
|
|
|
|
; DM Receiever Properties
|
|
;
|
|
; The name of the DM Receiver target - you deploy to this target name
|
|
deployment.dmr.name=alfresco
|
|
|
|
; consolidate staging, author and workflow sandboxes to one
|
|
deployment.dmr.consolidate=true
|
|
|
|
; The name of the Alfresco receiver targer
|
|
deployment.avm.name=avm
|
|
|
|
;Where should the root of the web project be stored, by default /www/avm_webapps
|
|
deployment.avm.rootPath=/www/avm_webapps
|
|
|
|
; Pattern for live stores deployment by the alfresco receiver
|
|
deployment.avm.storeNamePattern=%storeName%-live
|
|
|
|
; Built in deployment receiver properties for the default
|
|
; filesystem receiver
|
|
|
|
; filesystem receiver configuration
|
|
deployment.filesystem.rootdir=./wcm
|
|
deployment.filesystem.datadir=${deployment.filesystem.rootdir}/depdata
|
|
deployment.filesystem.logdir=${deployment.filesystem.rootdir}/deplog
|
|
deployment.filesystem.metadatadir=${deployment.filesystem.rootdir}/depmetadata
|
|
|
|
deployment.filesystem.autofix=true
|
|
deployment.filesystem.errorOnOverwrite=false
|
|
|
|
; default filesystem target configuration
|
|
deployment.filesystem.default.rootdir=./www
|
|
deployment.filesystem.default.name=filesystem
|
|
deployment.filesystem.default.metadatadir=${deployment.filesystem.metadatadir}/default
|
|
|
|
# OrphanReaper
|
|
orphanReaper.lockRefreshTime=60000
|
|
orphanReaper.lockTimeOut=3600000
|
|
|
|
|
|
# security
|
|
security.anyDenyDenies=true
|
|
|
|
#
|
|
# Encryption properties
|
|
#
|
|
# default keystores location
|
|
dir.keystore=classpath:alfresco/keystore
|
|
|
|
# general encryption parameters
|
|
encryption.keySpec.class=org.alfresco.encryption.DESEDEKeyGenerator
|
|
encryption.keyAlgorithm=DESede
|
|
encryption.cipherAlgorithm=DESede/CBC/PKCS5Padding
|
|
|
|
# secret key keystore configuration
|
|
encryption.keystore.location=${dir.keystore}/keystore
|
|
encryption.keystore.keyMetaData.location=${dir.keystore}/keystore-passwords.properties
|
|
encryption.keystore.provider=
|
|
encryption.keystore.type=JCEKS
|
|
|
|
# backup secret key keystore configuration
|
|
encryption.keystore.backup.location=${dir.keystore}/backup-keystore
|
|
encryption.keystore.backup.keyMetaData.location=${dir.keystore}/backup-keystore-passwords.properties
|
|
encryption.keystore.backup.provider=
|
|
encryption.keystore.backup.type=JCEKS
|
|
|
|
# Should encryptable properties be re-encrypted with new encryption keys on botstrap?
|
|
encryption.bootstrap.reencrypt=false
|
|
|
|
# mac/md5 encryption
|
|
encryption.mac.messageTimeout=30000
|
|
encryption.mac.algorithm=HmacSHA1
|
|
|
|
# ssl encryption
|
|
encryption.ssl.keystore.location=${dir.keystore}/ssl.keystore
|
|
encryption.ssl.keystore.provider=
|
|
encryption.ssl.keystore.type=JCEKS
|
|
encryption.ssl.keystore.keyMetaData.location=${dir.keystore}/ssl-keystore-passwords.properties
|
|
encryption.ssl.truststore.location=${dir.keystore}/ssl.truststore
|
|
encryption.ssl.truststore.provider=
|
|
encryption.ssl.truststore.type=JCEKS
|
|
encryption.ssl.truststore.keyMetaData.location=${dir.keystore}/ssl-truststore-passwords.properties
|
|
|
|
# Re-encryptor properties
|
|
encryption.reencryptor.chunkSize=100
|
|
encryption.reencryptor.numThreads=2
|
|
|
|
# SOLR connection details (e.g. for JMX)
|
|
solr.host=localhost
|
|
solr.port=8080
|
|
solr.port.ssl=8443
|
|
solr.solrUser=solr
|
|
solr.solrPassword=solr
|
|
# none, https
|
|
solr.secureComms=https
|
|
|
|
|
|
solr.max.total.connections=40
|
|
solr.max.host.connections=40
|
|
|
|
# Solr connection timeouts
|
|
# solr connect timeout in ms
|
|
solr.solrConnectTimeout=5000
|
|
|
|
# cron expression defining how often the Solr Admin client (used by JMX) pings Solr if it goes away
|
|
solr.solrPingCronExpression=0 0/5 * * * ? *
|
|
|
|
|
|
#Default SOLR store mappings mappings
|
|
solr.store.mappings=solrMappingAlfresco,solrMappingArchive
|
|
solr.store.mappings.value.solrMappingAlfresco.httpClientFactory=solrHttpClientFactory
|
|
solr.store.mappings.value.solrMappingAlfresco.baseUrl=/solr/alfresco
|
|
solr.store.mappings.value.solrMappingAlfresco.protocol=workspace
|
|
solr.store.mappings.value.solrMappingAlfresco.identifier=SpacesStore
|
|
solr.store.mappings.value.solrMappingArchive.httpClientFactory=solrHttpClientFactory
|
|
solr.store.mappings.value.solrMappingArchive.baseUrl=/solr/archive
|
|
solr.store.mappings.value.solrMappingArchive.protocol=archive
|
|
solr.store.mappings.value.solrMappingArchive.identifier=SpacesStore
|
|
|
|
#
|
|
# Web Publishing Properties
|
|
#
|
|
publishing.root.path=/app:company_home/app:dictionary
|
|
publishing.root=${publishing.root.path}/${spaces.publishing.root.childname}
|
|
|
|
#
|
|
# URL Shortening Properties
|
|
#
|
|
urlshortening.bitly.username=brianalfresco
|
|
urlshortening.bitly.api.key=R_ca15c6c89e9b25ccd170bafd209a0d4f
|
|
urlshortening.bitly.url.length=20
|
|
|
|
#
|
|
# Bulk Filesystem Importer
|
|
#
|
|
|
|
# The number of threads to employ in a batch import
|
|
bulkImport.batch.numThreads=4
|
|
|
|
# The size of a batch in a batch import i.e. the number of files to import in a
|
|
# transaction/thread
|
|
bulkImport.batch.batchSize=20
|
|
|
|
|
|
#
|
|
# Caching Content Store
|
|
#
|
|
system.content.caching.cacheOnInbound=true
|
|
system.content.caching.maxDeleteWatchCount=1
|
|
# Clean up every day at 3 am
|
|
system.content.caching.contentCleanup.cronExpression=0 0 3 * * ?
|
|
system.content.caching.minFileAgeMillis=60000
|
|
system.content.caching.maxUsageMB=4096
|
|
# maxFileSizeMB - 0 means no max file size.
|
|
system.content.caching.maxFileSizeMB=0
|
|
|
|
mybatis.useLocalCaches=false
|
|
|
|
fileFolderService.checkHidden.enabled=true
|
|
|
|
|
|
ticket.cleanup.cronExpression=0 0 * * * ?
|
|
|
|
#
|
|
# Disable load of sample site
|
|
#
|
|
sample.site.disabled=false
|
|
|
|
#
|
|
# Download Service Cleanup
|
|
#
|
|
download.cleaner.startDelayMins=60
|
|
download.cleaner.repeatIntervalMins=60
|
|
download.cleaner.maxAgeMins=60
|
|
|
|
# enable QuickShare - if false then the QuickShare-specific REST APIs will return 403 Forbidden
|
|
system.quickshare.enabled=true
|
|
|
|
#
|
|
# Cache configuration
|
|
#
|
|
cache.propertyValueCache.maxItems=10000
|
|
cache.contentDataSharedCache.maxItems=130000
|
|
cache.immutableEntitySharedCache.maxItems=50000
|
|
cache.node.rootNodesSharedCache.maxItems=1000
|
|
cache.node.allRootNodesSharedCache.maxItems=1000
|
|
cache.node.nodesSharedCache.maxItems=250000
|
|
cache.node.aspectsSharedCache.maxItems=130000
|
|
cache.node.propertiesSharedCache.maxItems=130000
|
|
cache.node.parentAssocsSharedCache.maxItems=130000
|
|
cache.node.childByNameSharedCache.maxItems=130000
|
|
cache.userToAuthoritySharedCache.maxItems=5000
|
|
cache.authenticationSharedCache.maxItems=5000
|
|
cache.authoritySharedCache.maxItems=10000
|
|
cache.authorityToChildAuthoritySharedCache.maxItems=40000
|
|
cache.zoneToAuthoritySharedCache.maxItems=500
|
|
cache.permissionsAccessSharedCache.maxItems=50000
|
|
cache.readersSharedCache.maxItems=10000
|
|
cache.readersDeniedSharedCache.maxItems=10000
|
|
cache.nodeOwnerSharedCache.maxItems=40000
|
|
cache.personSharedCache.maxItems=1000
|
|
cache.ticketsCache.maxItems=1000
|
|
cache.avmEntitySharedCache.maxItems=5000
|
|
cache.avmVersionRootEntitySharedCache.maxItems=1000
|
|
cache.avmNodeSharedCache.maxItems=5000
|
|
cache.avmNodeAspectsSharedCache.maxItems=5000
|
|
cache.webServicesQuerySessionSharedCache.maxItems=1000
|
|
cache.aclSharedCache.maxItems=50000
|
|
cache.aclEntitySharedCache.maxItems=50000
|
|
cache.resourceBundleBaseNamesSharedCache.maxItems=1000
|
|
cache.loadedResourceBundlesSharedCache.maxItems=1000
|
|
cache.messagesSharedCache.maxItems=1000
|
|
cache.compiledModelsSharedCache.maxItems=1000
|
|
cache.prefixesSharedCache.maxItems=1000
|
|
cache.webScriptsRegistrySharedCache.maxItems=1000
|
|
cache.routingContentStoreSharedCache.maxItems=10000
|
|
cache.executingActionsCache.maxItems=1000
|
|
cache.tagscopeSummarySharedCache.maxItems=1000
|
|
cache.imapMessageSharedCache.maxItems=2000
|
|
cache.tenantEntitySharedCache.maxItems=1000
|
|
cache.immutableSingletonSharedCache.maxItems=12000
|
|
cache.remoteAlfrescoTicketService.ticketsCache.maxItems=1000
|
|
cache.contentDiskDriver.fileInfoCache.maxItems=1000
|
|
cache.globalConfigSharedCache.maxItems=1000
|
|
cache.authorityBridgeTableByTenantSharedCache.maxItems=10
|
|
|
|
#
|
|
# Download Service Limits, in bytes
|
|
#
|
|
download.maxContentSize=2152852358
|
|
|
|
|
|
#
|
|
# Use bridge tables for caching authority evaluation.
|
|
#
|
|
authority.useBridgeTable=true
|
|
|