# Repository configuration
repository.name=Main Repository
# Directory configuration
dir.root=./alf_data
dir.contentstore=${dir.root}/contentstore
dir.contentstore.deleted=${dir.root}/contentstore.deleted
# The location of cached content
dir.cachedcontent=${dir.root}/cachedcontent
dir.auditcontentstore=${dir.root}/audit.contentstore
# The value for the maximum permitted size in bytes of all content.
# No value (or a negative long) will be taken to mean that no limit should be applied.
# See content-services-context.xml
system.content.maximumFileSizeLimit=
# The location for lucene index files
dir.indexes=${dir.root}/lucene-indexes
# The location for index backups
dir.indexes.backup=${dir.root}/backup-lucene-indexes
# The location for lucene index locks
dir.indexes.lock=${dir.indexes}/locks
#Directory to find external license
dir.license.external=.
# Spring resource location of external license files
location.license.external=file://${dir.license.external}/*.lic
# Spring resource location of embedded license files
location.license.embedded=/WEB-INF/alfresco/license/*.lic
# Spring resource location of license files on shared classpath
location.license.shared=classpath*:/alfresco/extension/license/*.lic
# WebDAV initialization properties
system.webdav.servlet.enabled=true
system.webdav.url.path.prefix=
system.webdav.storeName=${protocols.storeName}
system.webdav.rootPath=${protocols.rootPath}
# File name patterns that trigger rename shuffle detection
# pattern is used by move - tested against full path after it has been lower cased.
system.webdav.renameShufflePattern=(.*/\\..*)|(.*[a-f0-9]{8}+$)|(.*\\.tmp$)|(.*\\.wbk$)|(.*\\.bak$)|(.*\\~$)|(.*backup.*\\.do[ct]{1}[x]?[m]?$)
system.webdav.activities.enabled=false
# Is the JBPM Deploy Process Servlet enabled?
# Default is false. Should not be enabled in production environments as the
# servlet allows unauthenticated deployment of new workflows.
system.workflow.deployservlet.enabled=false
# Sets the location for the JBPM Configuration File
system.workflow.jbpm.config.location=classpath:org/alfresco/repo/workflow/jbpm/jbpm.cfg.xml
# Determines if JBPM workflow definitions are shown.
# Default is false. This controls the visibility of JBPM
# workflow definitions from the getDefinitions and
# getAllDefinitions WorkflowService API but still allows
# any in-flight JBPM workflows to be completed.
system.workflow.engine.jbpm.definitions.visible=false
#Determines if Activiti definitions are visible
system.workflow.engine.activiti.definitions.visible=true
# Determines if the JBPM engine is enabled
system.workflow.engine.jbpm.enabled=true
# Determines if the Activiti engine is enabled
system.workflow.engine.activiti.enabled=true
system.workflow.engine.activiti.idblocksize=100
# Determines if the workflows that are deployed to the activiti engine should
# be deployed in the tenant-context of the thread IF the tenant-service is enabled
# If set to false, all workflows deployed will be shared among tenants. Recommended
# setting is true unless there is a good reason to not allow deploy tenant-specific
# worklfows when a MT-environment is set up.
system.workflow.deployWorkflowsInTenant=true
# The maximum number of groups to check for pooled tasks. For performance
# reasons, this is limited to 500 by default.
system.workflow.maxAuthoritiesForPooledTasks=500
# The maximum number of pooled tasks to return in a query. It may be necessary
# to limit this depending on UI limitations.
system.workflow.maxPooledTasks=-1
# The maximum number of reviewers for "Group Review and Approve" workflow.
# Use '0' for unlimited.
system.workflow.maxGroupReviewers=0
index.subsystem.name=lucene
# ######################################### #
# Index Recovery and Tracking Configuration #
# ######################################### #
#
# Recovery types are:
# NONE: Ignore
# VALIDATE: Checks that the first and last transaction for each store is represented in the indexes
# AUTO: Validates and auto-recovers if validation fails
# FULL: Full index rebuild, processing all transactions in order. The server is temporarily suspended.
index.recovery.mode=VALIDATE
# FULL recovery continues when encountering errors
index.recovery.stopOnError=false
index.recovery.maximumPoolSize=5
# Set the frequency with which the index tracking is triggered.
# For more information on index tracking in a cluster:
# http://wiki.alfresco.com/wiki/High_Availability_Configuration_V1.4_to_V2.1#Version_1.4.5.2C_2.1.1_and_later
# By default, this is effectively never, but can be modified as required.
# Examples:
# Never: * * * * * ? 2099
# Once every five seconds: 0/5 * * * * ?
# Once every two seconds : 0/2 * * * * ?
# See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html
index.tracking.cronExpression=0/5 * * * * ?
index.tracking.adm.cronExpression=${index.tracking.cronExpression}
index.tracking.avm.cronExpression=${index.tracking.cronExpression}
# Other properties.
index.tracking.maxTxnDurationMinutes=10
index.tracking.reindexLagMs=1000
index.tracking.maxRecordSetSize=1000
index.tracking.maxTransactionsPerLuceneCommit=100
index.tracking.disableInTransactionIndexing=false
# Index tracking information of a certain age is cleaned out by a scheduled job.
# Any clustered system that has been offline for longer than this period will need to be seeded
# with a more recent backup of the Lucene indexes or the indexes will have to be fully rebuilt.
# Use -1 to disable purging. This can be switched on at any stage.
index.tracking.minRecordPurgeAgeDays=30
# Unused transactions will be purged in chunks determined by commit time boundaries. 'index.tracking.purgeSize' specifies the size
# of the chunk (in ms). Default is a couple of hours.
index.tracking.purgeSize=7200000
# Reindexing of missing content is by default 'never' carried out.
# The cron expression below can be changed to control the timing of this reindexing.
# Users of Enterprise Alfresco can configure this cron expression via JMX without a server restart.
# Note that if alfresco.cluster.enabled is false in Enterprise, then reindexing will not occur.
index.reindexMissingContent.cronExpression=* * * * * ? 2099
# Change the failure behaviour of the configuration checker
system.bootstrap.config_check.strict=true
#
# How long should shutdown wait to complete normally before
# taking stronger action and calling System.exit()
# in ms, 10,000 is 10 seconds
#
shutdown.backstop.timeout=10000
shutdown.backstop.enabled=false
# Server Single User Mode
# note:
# only allow named user (note: if blank or not set then will allow all users)
# assuming maxusers is not set to 0
#server.singleuseronly.name=admin
# Server Max Users - limit number of users with non-expired tickets
# note:
# -1 allows any number of users, assuming not in single-user mode
# 0 prevents further logins, including the ability to enter single-user mode
server.maxusers=-1
# The Cron expression controlling the frequency with which the OpenOffice connection is tested
openOffice.test.cronExpression=0 * * * * ?
#
# Disable all shared caches (mutable and immutable)
# These properties are used for diagnostic purposes
system.cache.disableMutableSharedCaches=false
system.cache.disableImmutableSharedCaches=false
# The maximum capacity of the parent assocs cache (the number of nodes whose parents can be cached)
system.cache.parentAssocs.maxSize=130000
# The average number of parents expected per cache entry. This parameter is multiplied by the above
# value to compute a limit on the total number of cached parents, which will be proportional to the
# cache's memory usage. The cache will be pruned when this limit is exceeded to avoid excessive
# memory usage.
system.cache.parentAssocs.limitFactor=8
#
# Properties to limit resources spent on individual searches
#
# The maximum time spent pruning results
system.acl.maxPermissionCheckTimeMillis=10000
# The maximum number of search results to perform permission checks against
system.acl.maxPermissionChecks=1000
# The maximum number of filefolder list results
system.filefolderservice.defaultListMaxResults=5000
# Properties to control read permission evaluation for acegi
system.readpermissions.optimise=true
system.readpermissions.bulkfetchsize=1000
#
# Manually control how the system handles maximum string lengths.
# Any zero or negative value is ignored.
# Only change this after consulting support or reading the appropriate Javadocs for
# org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2
system.maximumStringLength=-1
#
# Limit hibernate session size by trying to amalgamate events for the L2 session invalidation
# - hibernate works as is up to this size
# - after the limit is hit events that can be grouped invalidate the L2 cache by type and not instance
# events may not group if there are post action listener registered (this is not the case with the default distribution)
system.hibernateMaxExecutions=20000
#
# Determine if modification timestamp propagation from child to parent nodes is respected or not.
# Even if 'true', the functionality is only supported for child associations that declare the
# 'propagateTimestamps' element in the dictionary definition.
system.enableTimestampPropagation=true
#
# Enable system model integrity checking.
# WARNING: Changing this is unsupported; bugs may corrupt data
system.integrity.enabled=true
# Do integrity violations fail transactions
# WARNING: Changing this is unsupported; bugs may corrupt data
system.integrity.failOnViolation=true
# The number of errors to report when violations are detected
system.integrity.maxErrorsPerTransaction=5
# Add call stacks to integrity events so that errors are logged with possible causes
# WARNING: This is expensive and should only be switched on for diagnostic purposes
system.integrity.trace=false
#
# Decide if content should be removed from the system immediately after being orphaned.
# Do not change this unless you have examined the impact it has on your backup procedures.
system.content.eagerOrphanCleanup=false
# The number of days to keep orphaned content in the content stores.
# This has no effect on the 'deleted' content stores, which are not automatically emptied.
system.content.orphanProtectDays=14
# The action to take when a store or stores fails to delete orphaned content
# IGNORE: Just log a warning. The binary remains and the record is expunged
# KEEP_URL: Log a warning and create a URL entry with orphan time 0. It won't be processed or removed.
system.content.deletionFailureAction=IGNORE
# The CRON expression to trigger the deletion of resources associated with orphaned content.
system.content.orphanCleanup.cronExpression=0 0 4 * * ?
# The CRON expression to trigger content URL conversion. This process is not intesive and can
# be triggered on a live system. Similarly, it can be triggered using JMX on a dedicated machine.
system.content.contentUrlConverter.cronExpression=* * * * * ? 2099
system.content.contentUrlConverter.threadCount=2
system.content.contentUrlConverter.batchSize=500
system.content.contentUrlConverter.runAsScheduledJob=false
# #################### #
# Lucene configuration #
# #################### #
#
# Millisecond threshold for text transformations
# Slower transformers will force the text extraction to be asynchronous
#
lucene.maxAtomicTransformationTime=100
#
# The maximum number of clauses that are allowed in a lucene query
#
lucene.query.maxClauses=10000
#
# The size of the queue of nodes waiting for index
# Events are generated as nodes are changed, this is the maximum size of the queue used to coalesce event
# When this size is reached the lists of nodes will be indexed
#
# http://issues.alfresco.com/browse/AR-1280: Setting this high is the workaround as of 1.4.3.
#
lucene.indexer.batchSize=1000000
fts.indexer.batchSize=1000
#
# Index cache sizes
#
lucene.indexer.cacheEnabled=true
lucene.indexer.maxDocIdCacheSize=100000
lucene.indexer.maxDocumentCacheSize=100
lucene.indexer.maxIsCategoryCacheSize=-1
lucene.indexer.maxLinkAspectCacheSize=10000
lucene.indexer.maxParentCacheSize=100000
lucene.indexer.maxPathCacheSize=100000
lucene.indexer.maxTypeCacheSize=10000
#
# Properties for merge (not this does not affect the final index segment which will be optimised)
# Max merge docs only applies to the merge process not the resulting index which will be optimised.
#
lucene.indexer.mergerMaxMergeDocs=1000000
lucene.indexer.mergerMergeFactor=5
lucene.indexer.mergerMaxBufferedDocs=-1
lucene.indexer.mergerRamBufferSizeMb=16
#
# Properties for delta indexes (not this does not affect the final index segment which will be optimised)
# Max merge docs only applies to the index building process not the resulting index which will be optimised.
#
lucene.indexer.writerMaxMergeDocs=1000000
lucene.indexer.writerMergeFactor=5
lucene.indexer.writerMaxBufferedDocs=-1
lucene.indexer.writerRamBufferSizeMb=16
#
# Target number of indexes and deltas in the overall index and what index size to merge in memory
#
lucene.indexer.mergerTargetIndexCount=8
lucene.indexer.mergerTargetOverlayCount=5
lucene.indexer.mergerTargetOverlaysBlockingFactor=2
lucene.indexer.maxDocsForInMemoryMerge=60000
lucene.indexer.maxRamInMbForInMemoryMerge=16
lucene.indexer.maxDocsForInMemoryIndex=60000
lucene.indexer.maxRamInMbForInMemoryIndex=16
#
# Other lucene properties
#
lucene.indexer.termIndexInterval=128
lucene.indexer.useNioMemoryMapping=true
# over-ride to false for pre 3.0 behaviour
lucene.indexer.postSortDateTime=true
lucene.indexer.defaultMLIndexAnalysisMode=EXACT_LANGUAGE_AND_ALL
lucene.indexer.defaultMLSearchAnalysisMode=EXACT_LANGUAGE_AND_ALL
#
# The number of terms from a document that will be indexed
#
lucene.indexer.maxFieldLength=10000
# Should we use a 'fair' locking policy, giving queue-like access behaviour to
# the indexes and avoiding starvation of waiting writers? Set to false on old
# JVMs where this appears to cause deadlock
lucene.indexer.fairLocking=true
#
# Index locks (mostly deprecated and will be tidied up with the next lucene upgrade)
#
lucene.write.lock.timeout=10000
lucene.commit.lock.timeout=100000
lucene.lock.poll.interval=100
lucene.indexer.useInMemorySort=true
lucene.indexer.maxRawResultSetSizeForInMemorySort=1000
lucene.indexer.contentIndexingEnabled=true
index.backup.cronExpression=0 0 3 * * ?
lucene.defaultAnalyserResourceBundleName=alfresco/model/dataTypeAnalyzers
# When transforming archive files (.zip etc) into text representations (such as
# for full text indexing), should the files within the archive be processed too?
# If enabled, transformation takes longer, but searches of the files find more.
transformer.Archive.includeContents=false
# Database configuration
db.schema.stopAfterSchemaBootstrap=false
db.schema.update=true
db.schema.update.lockRetryCount=24
db.schema.update.lockRetryWaitSeconds=5
db.driver=org.gjt.mm.mysql.Driver
db.name=alfresco
db.url=jdbc:mysql:///${db.name}
db.username=alfresco
db.password=alfresco
db.pool.initial=10
db.pool.max=40
db.txn.isolation=-1
db.pool.statements.enable=true
db.pool.statements.max=40
db.pool.min=0
db.pool.idle=-1
db.pool.wait.max=-1
db.pool.validate.query=
db.pool.evict.interval=-1
db.pool.evict.idle.min=1800000
#
# note: for 'db.pool.evict.num.tests' see http://commons.apache.org/dbcp/configuration.html (numTestsPerEvictionRun)
# and also following extract from "org.apache.commons.pool.impl.GenericKeyedObjectPool" (1.5.5)
#
# * The number of objects to examine during each run of the idle object evictor thread (if any).
# * When a negative value is supplied, ceil({@link #getNumIdle})/abs({@link #getNumTestsPerEvictionRun})
# * tests will be run. I.e., when the value is -n
, roughly one n
th of the
# * idle objects will be tested per run.
#
db.pool.evict.num.tests=-1
db.pool.evict.validate=false
db.pool.validate.borrow=true
db.pool.validate.return=false
db.pool.abandoned.detect=false
db.pool.abandoned.time=300
#
# db.pool.abandoned.log=true (logAbandoned) adds overhead (http://commons.apache.org/dbcp/configuration.html)
# and also requires db.pool.abandoned.detect=true (removeAbandoned)
#
db.pool.abandoned.log=false
# Audit configuration
audit.enabled=true
audit.tagging.enabled=true
audit.alfresco-access.enabled=false
audit.alfresco-access.sub-actions.enabled=false
audit.cmischangelog.enabled=false
audit.dod5015.enabled=false
# Setting this flag to true will force startup failure when invalid audit configurations are detected
audit.config.strict=false
# Audit map filter for AccessAuditor - restricts recorded events to user driven events
audit.filter.alfresco-access.default.enabled=true
audit.filter.alfresco-access.transaction.user=~System;~null;.*
audit.filter.alfresco-access.transaction.type=cm:folder;cm:content;st:site
audit.filter.alfresco-access.transaction.path=~/sys:archivedItem;~/ver:;.*
# System Configuration
system.store=system://system
system.descriptor.childname=sys:descriptor
system.descriptor.current.childname=sys:descriptor-current
# User config
alfresco_user_store.store=user://alfrescoUserStore
alfresco_user_store.system_container.childname=sys:system
alfresco_user_store.user_container.childname=sys:people
# note: default admin username - should not be changed after installation
alfresco_user_store.adminusername=admin
# Initial password - editing this will not have any effect once the repository is installed
alfresco_user_store.adminpassword=209c6174da490caeb422f3fa5a7ae634
alfresco_user_store.adminsalt=ad3b938f-c1ad-4f2b-828b-6f3afd30ffdd
alfresco_user_store.adminpassword2=f378d5d7b947d5c26f478e21819e7ec3a6668c8149b050d086c64447bc40173b
# note: default guest username - should not be changed after installation
alfresco_user_store.guestusername=guest
# Used to move home folders to a new location
home_folder_provider_synchronizer.enabled=false
home_folder_provider_synchronizer.override_provider=
home_folder_provider_synchronizer.keep_empty_parents=false
# Spaces Archive Configuration
spaces.archive.store=archive://SpacesStore
# Spaces Configuration
spaces.store=workspace://SpacesStore
spaces.company_home.childname=app:company_home
spaces.guest_home.childname=app:guest_home
spaces.dictionary.childname=app:dictionary
spaces.templates.childname=app:space_templates
spaces.imap_attachments.childname=cm:Imap Attachments
spaces.imap_home.childname=cm:Imap Home
spaces.imapConfig.childname=app:imap_configs
spaces.imap_templates.childname=app:imap_templates
spaces.scheduled_actions.childname=cm:Scheduled Actions
spaces.emailActions.childname=app:email_actions
spaces.searchAction.childname=cm:search
spaces.templates.content.childname=app:content_templates
spaces.templates.email.childname=app:email_templates
spaces.templates.email.invite1.childname=app:invite_email_templates
spaces.templates.email.notify.childname=app:notify_email_templates
spaces.templates.email.following.childname=app:following
spaces.templates.rss.childname=app:rss_templates
spaces.savedsearches.childname=app:saved_searches
spaces.scripts.childname=app:scripts
spaces.wcm.childname=app:wcm
spaces.wcm_content_forms.childname=app:wcm_forms
spaces.content_forms.childname=app:forms
spaces.user_homes.childname=app:user_homes
spaces.user_homes.regex.key=userName
spaces.user_homes.regex.pattern=
spaces.user_homes.regex.group_order=
spaces.sites.childname=st:sites
spaces.templates.email.invite.childname=cm:invite
spaces.templates.email.activities.childname=cm:activities
spaces.rendition.rendering_actions.childname=app:rendering_actions
spaces.replication.replication_actions.childname=app:replication_actions
spaces.wcm_deployed.childname=cm:wcm_deployed
spaces.transfers.childname=app:transfers
spaces.transfer_groups.childname=app:transfer_groups
spaces.transfer_temp.childname=app:temp
spaces.inbound_transfer_records.childname=app:inbound_transfer_records
spaces.webscripts.childname=cm:webscripts
spaces.extension_webscripts.childname=cm:extensionwebscripts
spaces.models.childname=app:models
spaces.workflow.definitions.childname=app:workflow_defs
spaces.publishing.root.childname=app:publishing_root
spaces.templates.email.workflowemailnotification.childname=cm:workflownotification
spaces.nodetemplates.childname=app:node_templates
spaces.shared.childname=app:shared
# ADM VersionStore Configuration
version.store.enableAutoVersioning=true
version.store.deprecated.lightWeightVersionStore=workspace://lightWeightVersionStore
version.store.version2Store=workspace://version2Store
version.store.migrateVersionStore.threadCount=3
version.store.migrateVersionStore.batchSize=1
version.store.migrateCleanupJob.threadCount=3
version.store.migrateCleanupJob.batchSize=1
# WARNING: For non-production testing only !!! Do not change (to avoid version store issues, including possible mismatch). Should be false since lightWeightVersionStore is deprecated.
version.store.onlyUseDeprecatedV1=false
# The CRON expression to trigger migration of the version store from V1 (2.x) to V2 (3.x)
# By default, this is effectively 'never' but can be modified as required.
# Examples:
# Never: * * * * * ? 2099
# Once every thirty minutes: 0 0/30 * * * ?
# See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html
version.store.migrateVersionStore.cronExpression=* * * * * ? 2099
# Limit number of version histories to migrate per job cycle, where -1 = unlimited. Note: if limit > 0 then need to schedule job to run regularly in order to complete the migration.
version.store.migrateVersionStore.limitPerJobCycle=-1
version.store.migrateVersionStore.runAsScheduledJob=false
# Optional Comparator class name to sort versions.
# Set to: org.alfresco.repo.version.common.VersionLabelComparator
# if upgrading from a version that used unordered sequences in a cluster.
version.store.versionComparatorClass=
# Folders for storing people
system.system_container.childname=sys:system
system.people_container.childname=sys:people
system.authorities_container.childname=sys:authorities
system.zones_container.childname=sys:zones
# Folders for storing workflow related info
system.workflow_container.childname=sys:workflow
# Folder for storing shared remote credentials
system.remote_credentials_container.childname=sys:remote_credentials
# Folder for storing syncset definitions
system.syncset_definition_container.childname=sys:syncset_definitions
# Folder for storing download archives
system.downloads_container.childname=sys:downloads
# Folder for storing IdP's certificate definitions
system.certificate_container.childname=sys:samlcertificate
# Are user names case sensitive?
user.name.caseSensitive=false
domain.name.caseSensitive=false
domain.separator=
# AVM Specific properties.
avm.remote.idlestream.timeout=30000
#Format caption extracted from the XML Schema.
xforms.formatCaption=true
# ECM content usages/quotas
system.usages.enabled=false
system.usages.clearBatchSize=0
system.usages.updateBatchSize=50
# Repository endpoint - used by Activity Service
repo.remote.endpoint=/service
# Some authentication mechanisms may need to create people
# in the repository on demand. This enables that feature.
# If disabled an error will be generated for missing
# people. If enabled then a person will be created and
# persisted.
create.missing.people=${server.transaction.allow-writes}
# Create home folders (unless disabled, see next property) as people are created (true) or create them lazily (false)
home.folder.creation.eager=true
# Disable home folder creation - if true then home folders are not created (neither eagerly nor lazily)
home.folder.creation.disabled=false
# Should we consider zero byte content to be the same as no content when firing
# content update policies? Prevents 'premature' firing of inbound content rules
# for some clients such as Mac OS X Finder
policy.content.update.ignoreEmpty=true
# The well known RMI registry port and external host name published in the stubs
# is defined in the alfresco-shared.properties file
#
# alfresco.rmi.services.port=50500
# Default value of alfresco.rmi.services.host is 0.0.0.0 which means 'listen on all adapters'.
# This allows connections to JMX both remotely and locally.
#
alfresco.rmi.services.host=0.0.0.0
# If the RMI address is in-use, how many retries should be done before aborting
# Default value of alfresco.rmi.services.retries is 0 which means 'Don't retry if the address is in-use'
alfresco.rmi.services.retries=4
# RMI service ports for the individual services.
# These eight services are available remotely.
#
# Assign individual ports for each service for best performance
# or run several services on the same port, you can even run everything on 50500 if
# running through a firewall.
#
# Specify 0 to use a random unused port.
#
avm.rmi.service.port=50501
avmsync.rmi.service.port=50502
authentication.rmi.service.port=50504
repo.rmi.service.port=50505
action.rmi.service.port=50506
deployment.rmi.service.port=50507
monitor.rmi.service.port=50508
#
# enable or disable individual RMI services
#
avm.rmi.service.enabled=true
avmsync.rmi.service.enabled=true
authentication.rmi.service.enabled=true
repo.rmi.service.enabled=true
action.rmi.service.enabled=true
deployment.rmi.service.enabled=true
monitor.rmi.service.enabled=true
# Should the Mbean server bind to an existing server. Set to true for most application servers.
# false for WebSphere clusters.
mbean.server.locateExistingServerIfPossible=true
# External executable locations
ooo.exe=soffice
ooo.user=${dir.root}/oouser
img.root=./ImageMagick
img.dyn=${img.root}/lib
img.exe=${img.root}/bin/convert
swf.exe=./bin/pdf2swf
swf.languagedir=.
# Thumbnail Service
system.thumbnail.generate=true
# Default thumbnail limits
# When creating thumbnails, only use the first pageLimit pages
system.thumbnail.definition.default.timeoutMs=-1
system.thumbnail.definition.default.readLimitTimeMs=-1
system.thumbnail.definition.default.maxSourceSizeKBytes=-1
system.thumbnail.definition.default.readLimitKBytes=-1
system.thumbnail.definition.default.pageLimit=1
system.thumbnail.definition.default.maxPages=-1
# Max mimetype sizes to create thumbnail icons
system.thumbnail.mimetype.maxSourceSizeKBytes.pdf=-1
system.thumbnail.mimetype.maxSourceSizeKBytes.txt=-1
system.thumbnail.mimetype.maxSourceSizeKBytes.docx=-1
system.thumbnail.mimetype.maxSourceSizeKBytes.xlsx=-1
system.thumbnail.mimetype.maxSourceSizeKBytes.pptx=-1
system.thumbnail.mimetype.maxSourceSizeKBytes.odt=-1
system.thumbnail.mimetype.maxSourceSizeKBytes.ods=-1
system.thumbnail.mimetype.maxSourceSizeKBytes.odp=-1
# Configuration for handling of failing thumbnails.
# See NodeEligibleForRethumbnailingEvaluator's javadoc for details.
#
# Retry periods limit the frequency with which the repository will attempt to create Share thumbnails
# for content nodes which have previously failed in their thumbnail attempts.
# These periods are in seconds.
#
# 604800s = 60s * 60m * 24h * 7d = 1 week
system.thumbnail.retryPeriod=60
system.thumbnail.retryCount=2
system.thumbnail.quietPeriod=604800
system.thumbnail.quietPeriodRetriesEnabled=true
system.thumbnail.redeployStaticDefsOnStartup=true
# The default timeout for metadata mapping extracters
content.metadataExtracter.default.timeoutMs=20000
# Property to enable upgrade from 2.1-A
V2.1-A.fixes.to.schema=0
#V2.1-A.fixes.to.schema=82
# The default authentication chain
authentication.chain=alfrescoNtlm1:alfrescoNtlm
# Do authentication tickets expire or live for ever?
authentication.ticket.ticketsExpire=true
# If ticketsEpire is true then how they should expire?
# Valid values are: AFTER_INACTIVITY, AFTER_FIXED_TIME, DO_NOT_EXPIRE
# The default is AFTER_FIXED_TIME
authentication.ticket.expiryMode=AFTER_INACTIVITY
# If authentication.ticket.ticketsExpire is true and
# authentication.ticket.expiryMode is AFTER_FIXED_TIME or AFTER_INACTIVITY,
# this controls the minimum period for which tickets are valid.
# The default is PT1H for one hour.
authentication.ticket.validDuration=PT1H
# Use one ticket for all user sessions
# For the pre 4.2 behaviour of one ticket per session set this to false.
authentication.ticket.useSingleTicketPerUser=true
# If kerberos.authentication.cifs.enableTicketCracking is false,
# the Kerberos ticket cracking code is switched off.
# This code was required to get mutual authentication with a Windows AD client working in earlier versions of Java (version 5 or earlier).
# The latest JVMs don't require this code as they have better support for SPNEGO/GSS authentication.
# Please, see MNT-7392 for details.
kerberos.authentication.cifs.enableTicketCracking=false
# Default NFS user mappings (empty). Note these users will be able to
# authenticate through NFS without password so ensure NFS port is secure before
# enabling and adding mappings
nfs.user.mappings=
nfs.user.mappings.default.uid=0
nfs.user.mappings.default.gid=0
#Example NFS user mappings
#nfs.user.mappings=admin,user1
#nfs.user.mappings.value.admin.uid=0
#nfs.user.mappings.value.admin.gid=0
#nfs.user.mappings.value.user1.uid=500
#nfs.user.mappings.value.user1.gid=500
# Default root path for protocols
protocols.storeName=${spaces.store}
protocols.rootPath=/${spaces.company_home.childname}
# OpenCMIS
opencmis.connector.default.store=${spaces.store}
opencmis.connector.default.rootPath=/${spaces.company_home.childname}
opencmis.connector.default.typesDefaultMaxItems=500
opencmis.connector.default.typesDefaultDepth=-1
opencmis.connector.default.objectsDefaultMaxItems=10000
opencmis.connector.default.objectsDefaultDepth=100
opencmis.connector.default.openHttpSession=false
opencmis.activities.enabled=true
# IMAP
imap.server.enabled=false
imap.server.port=143
imap.server.attachments.extraction.enabled=true
# Default IMAP mount points
imap.config.home.store=${spaces.store}
imap.config.home.rootPath=/${spaces.company_home.childname}
imap.config.home.folderPath=${spaces.imap_home.childname}
imap.config.server.mountPoints=AlfrescoIMAP
imap.config.server.mountPoints.default.mountPointName=IMAP
imap.config.server.mountPoints.default.modeName=ARCHIVE
imap.config.server.mountPoints.default.store=${spaces.store}
imap.config.server.mountPoints.default.rootPath=${protocols.rootPath}
imap.config.server.mountPoints.value.AlfrescoIMAP.mountPointName=Alfresco IMAP
imap.config.server.mountPoints.value.AlfrescoIMAP.modeName=MIXED
#Imap extraction settings
#imap.attachments.mode:
# SEPARATE -- All attachments for each email will be extracted to separate folder.
# COMMON -- All attachments for all emails will be extracted to one folder.
# SAME -- Attachments will be extracted to the same folder where email lies.
imap.server.attachments.extraction.enabled=true
imap.attachments.mode=SEPARATE
imap.attachments.folder.store=${spaces.store}
imap.attachments.folder.rootPath=/${spaces.company_home.childname}
imap.attachments.folder.folderPath=${spaces.imap_attachments.childname}
# Activities Feed - refer to subsystem
# Feed max ID range to limit maximum number of entries
activities.feed.max.idRange=1000000
# Feed max size (number of entries)
activities.feed.max.size=100
# Feed max age (eg. 44640 mins => 31 days)
activities.feed.max.ageMins=44640
activities.feed.generator.jsonFormatOnly=true
activities.feed.fetchBatchSize=150
activities.feedNotifier.batchSize=200
activities.feedNotifier.numThreads=2
# Subsystem unit test values. Will not have any effect on production servers
subsystems.test.beanProp.default.longProperty=123456789123456789
subsystems.test.beanProp.default.anotherStringProperty=Global Default
subsystems.test.beanProp=inst1,inst2,inst3
subsystems.test.beanProp.value.inst2.boolProperty=true
subsystems.test.beanProp.value.inst3.anotherStringProperty=Global Instance Default
subsystems.test.simpleProp2=true
subsystems.test.simpleProp3=Global Default3
# Default Async Action Thread Pool
default.async.action.threadPriority=1
default.async.action.corePoolSize=8
default.async.action.maximumPoolSize=20
# Deployment Service
deployment.service.numberOfSendingThreads=5
deployment.service.corePoolSize=2
deployment.service.maximumPoolSize=3
deployment.service.threadPriority=5
# How long to wait in mS before refreshing a target lock - detects shutdown servers
deployment.service.targetLockRefreshTime=60000
# How long to wait in mS from the last communication before deciding that deployment has failed, possibly
# the destination is no longer available?
deployment.service.targetLockTimeout=3600000
#Invitation Service
# Should send emails as part of invitation process.
notification.email.siteinvite=true
# Transfer Service
transferservice.receiver.enabled=true
transferservice.receiver.stagingDir=${java.io.tmpdir}/alfresco-transfer-staging
#
# How long to wait in mS before refreshing a transfer lock - detects shutdown servers
# Default 1 minute.
transferservice.receiver.lockRefreshTime=60000
#
# How many times to attempt retry the transfer lock
transferservice.receiver.lockRetryCount=3
# How long to wait, in mS, before retrying the transfer lock
transferservice.receiver.lockRetryWait=100
#
# How long to wait, in mS, since the last contact with from the client before
# timing out a transfer. Needs to be long enough to cope with network delays and "thinking
# time" for both source and destination. Default 5 minutes.
transferservice.receiver.lockTimeOut=300000
# Max time allowed for WCM folder rename operation issued by external clients (CIFS, FTP)
wcm.rename.max.time.milliseconds=2000
; DM Receiever Properties
;
; The name of the DM Receiver target - you deploy to this target name
deployment.dmr.name=alfresco
; consolidate staging, author and workflow sandboxes to one
deployment.dmr.consolidate=true
; The name of the Alfresco receiver targer
deployment.avm.name=avm
;Where should the root of the web project be stored, by default /www/avm_webapps
deployment.avm.rootPath=/www/avm_webapps
; Pattern for live stores deployment by the alfresco receiver
deployment.avm.storeNamePattern=%storeName%-live
; Built in deployment receiver properties for the default
; filesystem receiver
; filesystem receiver configuration
deployment.filesystem.rootdir=./wcm
deployment.filesystem.datadir=${deployment.filesystem.rootdir}/depdata
deployment.filesystem.logdir=${deployment.filesystem.rootdir}/deplog
deployment.filesystem.metadatadir=${deployment.filesystem.rootdir}/depmetadata
deployment.filesystem.autofix=true
deployment.filesystem.errorOnOverwrite=false
; default filesystem target configuration
deployment.filesystem.default.rootdir=./www
deployment.filesystem.default.name=filesystem
deployment.filesystem.default.metadatadir=${deployment.filesystem.metadatadir}/default
# OrphanReaper
orphanReaper.lockRefreshTime=60000
orphanReaper.lockTimeOut=3600000
# security
security.anyDenyDenies=true
#
# Encryption properties
#
# default keystores location
dir.keystore=classpath:alfresco/keystore
# general encryption parameters
encryption.keySpec.class=org.alfresco.encryption.DESEDEKeyGenerator
encryption.keyAlgorithm=DESede
encryption.cipherAlgorithm=DESede/CBC/PKCS5Padding
# secret key keystore configuration
encryption.keystore.location=${dir.keystore}/keystore
encryption.keystore.keyMetaData.location=${dir.keystore}/keystore-passwords.properties
encryption.keystore.provider=
encryption.keystore.type=JCEKS
# backup secret key keystore configuration
encryption.keystore.backup.location=${dir.keystore}/backup-keystore
encryption.keystore.backup.keyMetaData.location=${dir.keystore}/backup-keystore-passwords.properties
encryption.keystore.backup.provider=
encryption.keystore.backup.type=JCEKS
# Should encryptable properties be re-encrypted with new encryption keys on botstrap?
encryption.bootstrap.reencrypt=false
# mac/md5 encryption
encryption.mac.messageTimeout=30000
encryption.mac.algorithm=HmacSHA1
# ssl encryption
encryption.ssl.keystore.location=${dir.keystore}/ssl.keystore
encryption.ssl.keystore.provider=
encryption.ssl.keystore.type=JCEKS
encryption.ssl.keystore.keyMetaData.location=${dir.keystore}/ssl-keystore-passwords.properties
encryption.ssl.truststore.location=${dir.keystore}/ssl.truststore
encryption.ssl.truststore.provider=
encryption.ssl.truststore.type=JCEKS
encryption.ssl.truststore.keyMetaData.location=${dir.keystore}/ssl-truststore-passwords.properties
# Re-encryptor properties
encryption.reencryptor.chunkSize=100
encryption.reencryptor.numThreads=2
# SOLR connection details (e.g. for JMX)
solr.host=localhost
solr.port=8080
solr.port.ssl=8443
solr.solrUser=solr
solr.solrPassword=solr
# none, https
solr.secureComms=https
solr.cmis.alternativeDictionary=DEFAULT_DICTIONARY
solr.max.total.connections=40
solr.max.host.connections=40
# Solr connection timeouts
# solr connect timeout in ms
solr.solrConnectTimeout=5000
# cron expression defining how often the Solr Admin client (used by JMX) pings Solr if it goes away
solr.solrPingCronExpression=0 0/5 * * * ? *
#Default SOLR store mappings mappings
solr.store.mappings=solrMappingAlfresco,solrMappingArchive
solr.store.mappings.value.solrMappingAlfresco.httpClientFactory=solrHttpClientFactory
solr.store.mappings.value.solrMappingAlfresco.baseUrl=/solr/alfresco
solr.store.mappings.value.solrMappingAlfresco.protocol=workspace
solr.store.mappings.value.solrMappingAlfresco.identifier=SpacesStore
solr.store.mappings.value.solrMappingArchive.httpClientFactory=solrHttpClientFactory
solr.store.mappings.value.solrMappingArchive.baseUrl=/solr/archive
solr.store.mappings.value.solrMappingArchive.protocol=archive
solr.store.mappings.value.solrMappingArchive.identifier=SpacesStore
#
# Web Publishing Properties
#
publishing.root.path=/app:company_home/app:dictionary
publishing.root=${publishing.root.path}/${spaces.publishing.root.childname}
#
# URL Shortening Properties
#
urlshortening.bitly.username=brianalfresco
urlshortening.bitly.api.key=R_ca15c6c89e9b25ccd170bafd209a0d4f
urlshortening.bitly.url.length=20
#
# Bulk Filesystem Importer
#
# The number of threads to employ in a batch import
bulkImport.batch.numThreads=4
# The size of a batch in a batch import i.e. the number of files to import in a
# transaction/thread
bulkImport.batch.batchSize=20
#
# Caching Content Store
#
system.content.caching.cacheOnInbound=true
system.content.caching.maxDeleteWatchCount=1
# Clean up every day at 3 am
system.content.caching.contentCleanup.cronExpression=0 0 3 * * ?
system.content.caching.minFileAgeMillis=60000
system.content.caching.maxUsageMB=4096
# maxFileSizeMB - 0 means no max file size.
system.content.caching.maxFileSizeMB=0
mybatis.useLocalCaches=false
fileFolderService.checkHidden.enabled=true
ticket.cleanup.cronExpression=0 0 * * * ?
#
# Disable load of sample site
#
sample.site.disabled=false
#
# Download Service Cleanup
#
download.cleaner.startDelayMins=60
download.cleaner.repeatIntervalMins=60
download.cleaner.maxAgeMins=60
# enable QuickShare - if false then the QuickShare-specific REST APIs will return 403 Forbidden
system.quickshare.enabled=true
#
# Download Service Limits, in bytes
#
download.maxContentSize=2152852358
# Max size of view trashcan files
#
trashcan.MaxSize=1000
#
# Use bridge tables for caching authority evaluation.
#
authority.useBridgeTable=true
# enable QuickShare - if false then the QuickShare-specific REST APIs will return 403 Forbidden
system.quickshare.enabled=true
# Oubound Mail
mail.service.corePoolSize=8
mail.service.maximumPoolSize=20
# OpenCMIS
# URL generation overrides
# if true, the context path of OpenCMIS generated urls will be set to "opencmis.context.value", otherwise it will be taken from the request url
opencmis.context.override=false
opencmis.context.value=
# if true, the servlet path of OpenCMIS generated urls will be set to "opencmis.servletpath.value", otherwise it will be taken from the request url
opencmis.servletpath.override=false
opencmis.servletpath.value=
opencmis.server.override=false
opencmis.server.value=
nodes.bulkLoad.cachingThreshold=10
# Multi-Tenancy
# if "dir.contentstore.tenants" is set then
# tenants are not co-mingled and all content roots will appear below this container (in sub-folder)
# and when creating a tenant the "contentRootPath" (root content store directory for a given tenant) will be ignored
dir.contentstore.tenants=
# Gateway Authentication
# gateway authentication is disabled if empty host is specified
alfresco.authentication.gateway.host=
alfresco.authentication.gateway.protocol=https
alfresco.authentication.gateway.port=443
alfresco.authentication.gateway.outboundHeaders=Authorization,key
alfresco.authentication.gateway.inboundHeaders=X-Alfresco-Authenticator-Key,X-Alfresco-Remote-User
alfresco.authentication.gateway.prefixUrl=/publicapi
alfresco.authentication.gateway.bufferSize=2048
alfresco.authentication.gateway.connectTimeout=10000
alfresco.authentication.gateway.readTimeout=120000
alfresco.authentication.gateway.httpTcpNodelay=true
alfresco.authentication.gateway.httpConnectionStalecheck=true
# webscripts config
webscripts.encryptTempFiles=false
webscripts.tempDirectoryName=WebScripts
# 4mb
webscripts.memoryThreshold=4194304
# 4gb
webscripts.setMaxContentSize=4294967296
# Property to enable index upgrade for metadata query (MDQ)
#
# The indexes are not added unless this value is changed
# Adding each the supporting indexes may take several hours depending on the size of the database.
# The required indexes may be added in stages.
# See: classpath:alfresco/dbscripts/upgrade/4.2/${db.script.dialect}/metadata-query-indexes.sql
system.metadata-query-indexes.ignored=true
#
# Do we defer running the shared folder patch?
#
system.patch.sharedFolder.deferred=false
# Default value is run new years day 2030 i.e. not run.
system.patch.sharedFolder.cronExpression=0 0 0 ? 1 1 2030
#
# Use a canned query when requested to search for people if " [hint:useCQ]" is provided in search term
#
people.search.honor.hint.useCQ=true