Compare commits

...

59 Commits

Author SHA1 Message Date
bamboo_auth
13b4d213dd [maven-release-plugin] prepare release 1.2.0 2018-08-14 23:02:09 +00:00
Michael
3752ea74c4 Search-1029, updated solrconfig to have the correct url format for the master slave setup 2018-08-14 14:32:43 +01:00
Paul Brodner
d273afb2ac Merge pull request #39 from Alfresco/fix/whitesource-cleanup
cleanup remote temporary scan folder
2018-08-14 11:58:56 +03:00
Paul Brodner
185b09c086 cleanup remote temporary scan folder 2018-08-14 11:57:57 +03:00
bamboo_auth
1a5b0a85d4 [maven-release-plugin] prepare for next development iteration 2018-08-09 13:14:54 +00:00
bamboo_auth
c8ead7da49 [maven-release-plugin] prepare release paul-master-1.2.0 2018-08-09 13:14:47 +00:00
Paul Brodner
4234969f75 Merge pull request #36 from Alfresco/fix/SEARCH-1017-merge-tas
SEARCH-1027 capability to run TAS tests
2018-08-09 14:24:53 +03:00
Paul Brodner
9186b8f9af enabling whiteSource scan only on master and relese branches 2018-08-09 14:01:13 +03:00
Paul Brodner
9033106734 rename run.sh as start-alfresco.sh 2018-08-09 13:54:36 +03:00
Paul Brodner
8d2626aecc reset versions 2018-08-09 12:09:06 +03:00
bamboo_auth
48a1c8bc62 [maven-release-plugin] prepare for next development iteration 2018-08-09 08:55:27 +01:00
bamboo_auth
04c77dfa18 [maven-release-plugin] prepare release paulb-master1-1.1.0 2018-08-09 08:55:20 +01:00
Paul Brodner
6d14fffa65 update argument order 2018-08-08 15:46:56 +03:00
Paul Brodner
92b9183f9c remove unecessary cd 2018-08-08 14:57:07 +03:00
Paul Brodner
96d078f0b5 add option to change docker-resource path 2018-08-08 14:11:55 +03:00
Paul Brodner
aa6f7d686a adding capability to override the alfresco endpoint 2018-08-08 13:12:05 +03:00
Paul Brodner
680b5cf562 using env bash for elastic agents 2018-08-08 13:03:06 +03:00
Paul Brodner
5c531eaf00 fix cleanup 2018-08-08 12:59:41 +03:00
Paul Brodner
bdaed92925 removing relapath on find 2018-08-08 12:48:48 +03:00
Paul Brodner
8c0e56dd63 adding info regarding arguments passed for debugging purposes 2018-08-08 12:46:20 +03:00
Paul Brodner
28cadb9cf5 adding cleanup 2018-08-08 12:41:49 +03:00
Paul Brodner
e8b8a090bf remove unecessary spaces 2018-08-08 11:43:29 +03:00
Paul Brodner
9e9436f786 init run.sh script 2018-08-08 11:42:33 +03:00
Paul Brodner
53c101c197 Merge pull request #34 from Alfresco/fix/seach-1008-scan
whitesource-package-scan.sh get automatically version from pom.xml
2018-08-07 13:26:19 +03:00
Paul Brodner
d367fdae9b get automatically version from pom.xml
delete temporary scan folders
2018-08-07 12:29:11 +03:00
Tuna Aksoy
76da52aa33 SEARCH-1003: Sonar OWASP issues - Possible null pointer dereference i… (#33)
* SEARCH-1003: Sonar OWASP issues - Possible null pointer dereference in method on exception path

* SEARCH-1003: Sonar OWASP issues - Method may fail to close stream

* SEARCH-1003: Sonar OWASP issues - Possible null pointer dereference due to return value of called method

* SEARCH-1003: Sonar OWASP issues - Value is null and guaranteed to be dereferenced on exception path
2018-08-03 09:22:08 +01:00
Joel
6190083cf4 SEARCH-1001: Add javadoc 2018-08-01 17:42:44 +01:00
Joel
86f60d9ea2 SEARCH-902: WIP passing unit tests 2018-08-01 17:07:11 +01:00
Joel
8064188e38 SEARCH-902: WIP Solr6 content tracker doesn't fetch new/updated content 2018-08-01 17:07:08 +01:00
Michael
23f62bd57b Revert "SEARCH-1001: Add javadoc"
This reverts commit dc94c779b0.
2018-08-01 17:05:01 +01:00
Michael
05185d826c Revert "Search-902, Cherry picked 1.1.1.1 fix into master"
This reverts commit a13bd1d29b.
2018-08-01 17:04:51 +01:00
Joel
ff14bad807 Search-902, Cherry picked 1.1.1.1 fix into master 2018-08-01 04:18:36 +01:00
Joel
f64ebdd30c SEARCH-1001: Add javadoc 2018-07-30 12:53:06 -04:00
Paul Brodner
8cc4b61474 Merge pull request #32 from Alfresco/fix/SEARCH-847-scan
fix SEARCH-847 add shell script for scanning released distribution
2018-07-20 15:39:19 +03:00
Paul Brodner
12ef1d95e8 whitesource scaning and cleaning up 2018-07-20 11:40:08 +03:00
Joel Bernstein
c191776593 Merge pull request #31 from Alfresco/fix/SEARCH-965
SEARCH-965: Guard against NPE in CacheDocTransformer
2018-07-18 11:10:55 -04:00
Joel
b802e2463e SEARCH-965: Guard against NPE in CacheDocTransformer 2018-07-18 10:55:49 -04:00
Michael
ee4a91dc97 Merge pull request #30 from Alfresco/fix/SEARCH-947
Fix/search 947
2018-07-12 11:57:34 +01:00
Michael
a498f3188c SEARCH-000, added comment to signal end of a file and placeholder for maven-replacer 2018-07-04 21:43:08 +01:00
Michael
fc3f9bddf2 SEARCH-947, fixed solr startup to work when tracking is disabled. 2018-07-03 13:47:27 +01:00
Michael
03056db4ca Merge branch 'master' of https://github.com/Alfresco/SearchServices 2018-07-03 13:39:24 +01:00
bamboo_auth
7886e32ee0 [maven-release-plugin] prepare for next development iteration 2018-06-26 14:09:34 +00:00
bamboo_auth
ed0b4d565b [maven-release-plugin] prepare release 1.2.0-RC1 2018-06-26 14:09:27 +00:00
Michael
80e3fb6aa2 Merge pull request #29 from Alfresco/feature/SEARCH-860_Search_Services_artifacts_are_not_deployed_to_correct_repositories
SEARCH-860 (Search Services artifacts are not deployed to our public …
2018-06-26 13:25:51 +01:00
Tuna Aksoy
1f68e78b11 SEARCH-860 (Search Services artifacts are not deployed to our public snapshots repository) 2018-06-26 13:18:04 +01:00
Michael
84afeaa287 Merge pull request #27 from Alfresco/fix/SEARCH-904
SEARCH-904: Add facet_module to /select handler
2018-06-14 07:36:29 +01:00
Joel
bbce452c95 SEARCH-904: Add facet_module to /select handler 2018-06-13 19:46:36 -04:00
Michael
54cde0505a Merge branch 'master' of https://github.com/Alfresco/SearchServices 2018-04-16 16:23:49 +01:00
Tuna Aksoy
4a56f5eee4 Updated docker image path for Alfresco 2018-04-16 15:27:06 +01:00
Tuna Aksoy
1cd934221d SEARCH-762: Fixed script (#25) 2018-04-06 12:03:59 +01:00
bamboo_auth
fd7018607e [maven-release-plugin] prepare for next development iteration 2018-04-05 10:38:35 +00:00
bamboo_auth
56e270b71f [maven-release-plugin] prepare release 1.2.0-dockerTest7 2018-04-05 10:38:29 +00:00
bamboo_auth
9e33a1e5b2 [maven-release-plugin] prepare for next development iteration 2018-04-04 18:58:41 +00:00
bamboo_auth
0583055259 [maven-release-plugin] prepare release 1.2.0-dockerTest6 2018-04-04 18:58:35 +00:00
bamboo_auth
1f8511a100 [maven-release-plugin] prepare for next development iteration 2018-04-04 17:03:29 +00:00
bamboo_auth
c2ec139aee [maven-release-plugin] prepare release 1.2.0-dockerTest4 2018-04-04 17:03:23 +00:00
bamboo_auth
348ea3ce84 [maven-release-plugin] prepare for next development iteration 2018-04-04 16:44:51 +00:00
bamboo_auth
2fbb8b91b9 [maven-release-plugin] prepare release 1.2.0-dockerTest3 2018-04-04 16:44:45 +00:00
bamboo_auth
3114cb2cf7 [maven-release-plugin] prepare for next development iteration 2018-04-04 16:20:50 +00:00
30 changed files with 644 additions and 184 deletions

View File

@@ -6,7 +6,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-search-parent</artifactId>
<version>1.2.0-dockerTest2</version>
<version>1.2.0</version>
<relativePath>../pom.xml</relativePath>
</parent>

View File

@@ -19,13 +19,49 @@
package org.alfresco.solr;
import static org.alfresco.solr.HandlerOfResources.extractCustomProperties;
import static org.alfresco.solr.HandlerOfResources.getSafeBoolean;
import static org.alfresco.solr.HandlerOfResources.getSafeLong;
import static org.alfresco.solr.HandlerOfResources.openResource;
import static org.alfresco.solr.HandlerOfResources.updatePropertiesFile;
import static org.alfresco.solr.HandlerOfResources.updateSharedProperties;
import static org.alfresco.solr.HandlerReportBuilder.addCoreSummary;
import static org.alfresco.solr.HandlerReportBuilder.buildAclReport;
import static org.alfresco.solr.HandlerReportBuilder.buildAclTxReport;
import static org.alfresco.solr.HandlerReportBuilder.buildNodeReport;
import static org.alfresco.solr.HandlerReportBuilder.buildTrackerReport;
import static org.alfresco.solr.HandlerReportBuilder.buildTxReport;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.httpclient.AuthenticationException;
import org.alfresco.service.cmr.repository.StoreRef;
import org.alfresco.solr.adapters.IOpenBitSet;
import org.alfresco.solr.client.SOLRAPIClientFactory;
import org.alfresco.solr.config.ConfigUtil;
import org.alfresco.solr.tracker.*;
import org.alfresco.solr.tracker.AclTracker;
import org.alfresco.solr.tracker.DBIDRangeRouter;
import org.alfresco.solr.tracker.DocRouter;
import org.alfresco.solr.tracker.IndexHealthReport;
import org.alfresco.solr.tracker.MetadataTracker;
import org.alfresco.solr.tracker.SolrTrackerScheduler;
import org.alfresco.solr.tracker.Tracker;
import org.alfresco.solr.tracker.TrackerRegistry;
import org.alfresco.util.shard.ExplicitShardingPolicy;
import org.apache.commons.codec.EncoderException;
import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
@@ -44,15 +80,6 @@ import org.json.JSONException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.lang.reflect.Method;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import static org.alfresco.solr.HandlerOfResources.*;
import static org.alfresco.solr.HandlerReportBuilder.*;
public class AlfrescoCoreAdminHandler extends CoreAdminHandler
{
protected static final Logger log = LoggerFactory.getLogger(AlfrescoCoreAdminHandler.class);
@@ -540,7 +567,10 @@ public class AlfrescoCoreAdminHandler extends CoreAdminHandler
properties.setProperty("alfresco.stores", storeRef.toString());
//Potentially override the defaults
properties.load(new FileInputStream(config));
try (FileInputStream fileInputStream = new FileInputStream(config))
{
properties.load(fileInputStream);
}
//Don't override these
properties.setProperty("alfresco.template", templateName);
@@ -559,8 +589,11 @@ public class AlfrescoCoreAdminHandler extends CoreAdminHandler
properties.putAll(extraProperties);
}
properties.store(new FileOutputStream(config), null);
try (FileOutputStream fileOutputStream = new FileOutputStream(config))
{
properties.store(fileOutputStream, null);
}
SolrCore core = coreContainer.create(coreName, newCore.toPath(), new HashMap<String, String>(), false);
rsp.add("core", core.getName());
}

View File

@@ -21,14 +21,19 @@
*/
package org.alfresco.solr;
import org.apache.commons.io.FileUtils;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.SolrCore;
import java.io.*;
import java.util.*;
/**
* Methods taken from AlfrescoCoreAdminHandler that deal with I/O resources
@@ -126,7 +131,10 @@ public class HandlerOfResources {
properties.putAll(extraProperties);
}
properties.store(new FileOutputStream(config), "Generated from Solr");
try (FileOutputStream fileOutputStream = new FileOutputStream(config))
{
properties.store(fileOutputStream, "Generated from Solr");
}
} // FileInputStream is closed
catch (IOException e)
{

View File

@@ -18,6 +18,56 @@
*/
package org.alfresco.solr;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ACLID;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ACLTXCOMMITTIME;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ACLTXID;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ANAME;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ANCESTOR;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_APATH;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ASPECT;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ASSOCTYPEQNAME;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_CASCADE_FLAG;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_DBID;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_DENIED;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_DOC_TYPE;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_EXCEPTION_MESSAGE;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_EXCEPTION_STACK;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_FIELDS;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_FTSSTATUS;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_GEO;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_INACLTXID;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_INTXID;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ISNODE;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_LID;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_NPATH;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_NULLPROPERTIES;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_OWNER;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PARENT;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PARENT_ASSOC_CRC;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PATH;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PNAME;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PRIMARYASSOCQNAME;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PRIMARYASSOCTYPEQNAME;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PRIMARYPARENT;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PROPERTIES;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_QNAME;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_READER;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_SITE;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_SOLR4_ID;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_S_ACLTXCOMMITTIME;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_S_ACLTXID;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_S_INACLTXID;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_S_INTXID;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_S_TXCOMMITTIME;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_S_TXID;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_TAG;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_TAG_SUGGEST;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_TENANT;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_TXCOMMITTIME;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_TXID;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_TYPE;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_VERSION;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
@@ -47,6 +97,8 @@ import java.util.regex.Pattern;
import com.carrotsearch.hppc.IntArrayList;
import com.carrotsearch.hppc.LongHashSet;
import com.carrotsearch.hppc.cursors.LongCursor;
import org.alfresco.httpclient.AuthenticationException;
import org.alfresco.model.ContentModel;
import org.alfresco.opencmis.dictionary.CMISStrictDictionaryService;
@@ -149,8 +201,6 @@ import org.slf4j.LoggerFactory;
import org.springframework.extensions.surf.util.I18NUtil;
import org.springframework.util.FileCopyUtils;
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.*;
/**
* This is the Solr4 implementation of the information server (index).
* @author Ahmed Owian
@@ -752,37 +802,37 @@ public class SolrInformationServer implements InformationServer
//System.out.println("################ Transaction floor:"+txnFloor);
//Find the next N transactions
collector = TopFieldCollector.create(new Sort(new SortField(FIELD_INTXID, SortField.Type.LONG)),
rows,
null,
false,
false,
false);
//Query for dirty or new nodes
termQuery1 = new TermQuery(new Term(FIELD_FTSSTATUS, FTSStatus.Dirty.toString()));
termQuery2 = new TermQuery(new Term(FIELD_FTSSTATUS, FTSStatus.New.toString()));
clause1 = new BooleanClause(termQuery1, BooleanClause.Occur.SHOULD);
clause2 = new BooleanClause(termQuery2, BooleanClause.Occur.SHOULD);
builder = new BooleanQuery.Builder();
builder.add(clause1);
builder.add(clause2);
orQuery = builder.build();
delegatingCollector = new TxnFloorFilter(txnFloor, cleanContentCache);
delegatingCollector.setLastDelegate(collector);
TermQuery txnQuery = new TermQuery(new Term(FIELD_DOC_TYPE, DOC_TYPE_TX));
searcher.search(txnQuery, delegatingCollector);
TopDocs docs = collector.topDocs();
//System.out.println("############### Next N transactions ################:" + docs.totalHits);
//The TxnCollector collects the transaction ids from the matching documents
//The txnIds are limited to a range >= the txnFloor and < an arbitrary transaction ceiling.
TxnCollector txnCollector = new TxnCollector(txnFloor);
searcher.search(orQuery, txnCollector);
LongHashSet txnSet = txnCollector.getTxnSet();
//System.out.println("############### Next N transactions ################:" + txnSet.size());
if (collector.getTotalHits() == 0)
if(txnSet.size() == 0)
{
//No new transactions to consider
//This should really never be the case, at a minimum the transaction floor should be collected.
return docIds;
}
leaves = searcher.getTopReaderContext().leaves();
FieldType fieldType = searcher.getSchema().getField(FIELD_INTXID).getType();
builder = new BooleanQuery.Builder();
for (ScoreDoc scoreDoc : docs.scoreDocs)
Iterator<LongCursor> it = txnSet.iterator();
while (it.hasNext())
{
index = ReaderUtil.subIndex(scoreDoc.doc, leaves);
context = leaves.get(index);
longs = context.reader().getNumericDocValues(FIELD_INTXID);
long txnID = longs.get(scoreDoc.doc - context.docBase);
LongCursor cursor = it.next();
long txnID = cursor.value;
//Build up the query for the filter of transactions we need to pull the dirty content for.
TermQuery txnIDQuery = new TermQuery(new Term(FIELD_INTXID, fieldType.readableToIndexed(Long.toString(txnID))));
builder.add(new BooleanClause(txnIDQuery, BooleanClause.Occur.SHOULD));
@@ -790,7 +840,6 @@ public class SolrInformationServer implements InformationServer
BooleanQuery txnFilterQuery = builder.build();
//Get the docs with dirty content for the transactions gathered above.
TermQuery statusQuery1 = new TermQuery(new Term(FIELD_FTSSTATUS, FTSStatus.Dirty.toString()));
@@ -851,7 +900,10 @@ public class SolrInformationServer implements InformationServer
}
finally
{
refCounted.decref();
if (refCounted != null)
{
refCounted.decref();
}
}
}
@@ -972,8 +1024,14 @@ public class SolrInformationServer implements InformationServer
}
finally
{
active.decref();
newest.decref();
if (active != null)
{
active.decref();
}
if (newest != null)
{
newest.decref();
}
}
}
processor.processCommit(command);
@@ -1288,7 +1346,10 @@ public class SolrInformationServer implements InformationServer
}
finally
{
refCounted.decref();
if (refCounted != null)
{
refCounted.decref();
}
}
return errorDocIds;
}
@@ -1439,7 +1500,23 @@ public class SolrInformationServer implements InformationServer
/*
* Choose the max between the last commit time in the index and the last time the tracker started.
* Hole retention is applied to both. *
* Hole retention is applied to both.
*
* This logic is very tricky and very important to understand.
*
* state.getLastGoodTxCommitTimeInIndex() is used to determine where to start pulling transactions from the repo on the
* current tracker run.
*
* If we simply take the current value of state.getLastIndexedTxCommitTime() we have the following problem:
*
* If no data is added to the repo for a long period of time state.getLastIndexedTxCommitTime() never moves forward. This causes the
* loop inside MetadataTracker.getSomeTransactions() to hammer the repo as the time between state.getLastIndexedTxCommitTime()
* and state.setTimeToStopIndexing increases.
*
* To resolve this we choose the max between the last commit time in the index and the last time the tracker started. In theory
* if we start looking for transactions after the last tracker was started (and apply hole retention), we should never miss a
* transaction. Or atleast ensure that principal behind hole retention is respected. This theory should be closely looked at if
* the trackers ever lose data.
*/
timeBeforeWhichThereCanBeNoTxHolesInIndex = Math.max(timeBeforeWhichThereCanBeNoTxHolesInIndex, lastStartTimeWhichThereCanBeNoTxHolesInIndex);
@@ -2148,7 +2225,10 @@ public class SolrInformationServer implements InformationServer
}
finally
{
refCounted.decref();
if (refCounted != null)
{
refCounted.decref();
}
}
//System.out.println("################ CHILD IDs:"+childIds.size());
@@ -2293,7 +2373,10 @@ public class SolrInformationServer implements InformationServer
}
finally
{
refCounted.decref();
if (refCounted != null)
{
refCounted.decref();
}
}
List<NodeMetaData> allNodeMetaDatas = new ArrayList();
@@ -3785,7 +3868,10 @@ public class SolrInformationServer implements InformationServer
}
finally
{
refCounted.decref();
if (refCounted != null)
{
refCounted.decref();
}
}
}
@@ -3827,7 +3913,10 @@ public class SolrInformationServer implements InformationServer
}
finally
{
refCounted.decref();
if (refCounted != null)
{
refCounted.decref();
}
}
}
}
@@ -3864,7 +3953,10 @@ public class SolrInformationServer implements InformationServer
}
finally
{
refCounted.decref();
if (refCounted != null)
{
refCounted.decref();
}
}
}
}
@@ -4167,6 +4259,48 @@ public class SolrInformationServer implements InformationServer
}
}
class TxnCollector extends DelegatingCollector
{
private NumericDocValues currentLongs;
private long txnFloor;
private long txnCeil;
private LongHashSet txnSet = new LongHashSet(1000);
public TxnCollector(long txnFloor)
{
this.txnFloor = txnFloor;
this.txnCeil = txnFloor+500;
}
public void doSetNextReader(LeafReaderContext context) throws IOException
{
currentLongs = context.reader().getNumericDocValues(FIELD_INTXID);
}
public boolean needsScores() {
return false;
}
public void collect(int doc) throws IOException
{
long txnId = currentLongs.get(doc);
//System.out.println("########### Floor Filter #############:"+doc+":"+txnId);
if(txnId >= txnFloor && txnId < txnCeil)
{
txnSet.add(txnId);
}
}
public LongHashSet getTxnSet() {
return txnSet;
}
}
class LRU extends LinkedHashMap
{
private int maxSize;

View File

@@ -66,12 +66,23 @@ public class SolrCoreLoadRegistration {
{
TrackerRegistry trackerRegistry = adminHandler.getTrackerRegistry();
Properties props = new CoreDescriptorDecorator(core.getCoreDescriptor()).getProperties();
//Prepare cores
SolrResourceLoader loader = core.getLatestSchema().getResourceLoader();
SolrKeyResourceLoader keyResourceLoader = new SolrKeyResourceLoader(loader);
SOLRAPIClientFactory clientFactory = new SOLRAPIClientFactory();
SOLRAPIClient repositoryClient = clientFactory.getSOLRAPIClient(props, keyResourceLoader,
AlfrescoSolrDataModel.getInstance().getDictionaryService(CMISStrictDictionaryService.DEFAULT),
AlfrescoSolrDataModel.getInstance().getNamespaceDAO());
//Start content store
SolrContentStore contentStore = new SolrContentStore(coreContainer.getSolrHome());
SolrInformationServer srv = new SolrInformationServer(adminHandler, core, repositoryClient, contentStore);
props.putAll(srv.getProps());
adminHandler.getInformationServers().put(coreName, srv);
log.info("Starting to track " + coreName);
if (Boolean.parseBoolean(props.getProperty("enable.alfresco.tracking", "false")))
{
SolrTrackerScheduler scheduler = adminHandler.getScheduler();
SolrResourceLoader loader = core.getLatestSchema().getResourceLoader();
SolrKeyResourceLoader keyResourceLoader = new SolrKeyResourceLoader(loader);
if (trackerRegistry.hasTrackersForCore(coreName))
{
log.info("Trackers for " + coreName+ " is already registered, shutting them down.");
@@ -80,17 +91,7 @@ public class SolrCoreLoadRegistration {
adminHandler.getInformationServers().remove(coreName);
}
SOLRAPIClientFactory clientFactory = new SOLRAPIClientFactory();
SOLRAPIClient repositoryClient = clientFactory.getSOLRAPIClient(props, keyResourceLoader,
AlfrescoSolrDataModel.getInstance().getDictionaryService(CMISStrictDictionaryService.DEFAULT),
AlfrescoSolrDataModel.getInstance().getNamespaceDAO());
//Start content store
SolrContentStore contentStore = new SolrContentStore(coreContainer.getSolrHome());
SolrInformationServer srv = new SolrInformationServer(adminHandler, core, repositoryClient, contentStore);
props.putAll(srv.getProps());
adminHandler.getInformationServers().put(coreName, srv);
log.info("Starting to track " + coreName);
ModelTracker mTracker = null;
// Prevents other threads from registering the ModelTracker at the same time

View File

@@ -82,15 +82,29 @@ public abstract class AbstractAuthoritySetQuery extends Query
return authorities.hashCode();
}
/*
* This method collects the bitset of documents that match the authorities.
*/
protected HybridBitSet getACLSet(String[] auths, String field, SolrIndexSearcher searcher) throws IOException
{
/*
* Build a query that matches the authorities with a field in the ACL records in the index.
*/
BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder();
for(String current : auths)
{
queryBuilder.add(new TermQuery(new Term(field, current)), BooleanClause.Occur.SHOULD);
}
//NOTE: this query will be in the filter cache. Ideally it would remain cached throughout the users session.
/*
* Collect a docset containing the ACL records that match the query.
* This query will be in the filter cache. Ideally it would remain cached throughout the users session.
*/
DocSet docSet = searcher.getDocSet(queryBuilder.build());
DocIterator iterator = docSet.iterator();
@@ -102,6 +116,12 @@ public abstract class AbstractAuthoritySetQuery extends Query
//TODO : makes this configurable. For some systems this is huge and for others not big enough.
HybridBitSet hybridBitSet = new HybridBitSet(60000000);
/*
* Collect the ACLID's from the matching acl records.
* This is done in a separate step so the initial ACL query can be cached in the FilterCache
* The initial ACL query may be expensive if the number of authorities is very large.
*/
List<LeafReaderContext> leaves = searcher.getTopReaderContext().leaves();
LeafReaderContext context = leaves.get(0);
NumericDocValues aclValues = DocValuesCache.getNumericDocValues(QueryConstants.FIELD_ACLID, context.reader());

View File

@@ -81,8 +81,9 @@ public class AlfrescoFTSQParserPlugin extends QParserPlugin
rerankPhase = RerankPhase.valueOf(arg.toString());
}
//First check the System property.
//Then check solrcore.properties, defaulting to the postFilter.
/*
* This turns on the postFilter
*/
postfilter = Boolean.parseBoolean(System.getProperty("alfresco.postfilter",
req.getCore().getCoreDescriptor().getCoreProperty("alfresco.postfilter",
@@ -109,7 +110,11 @@ public class AlfrescoFTSQParserPlugin extends QParserPlugin
if(authset && postfilter)
{
//Return the PostFilter
/*
* The cost of 200 turns on the postfilter inside Solr
* The postfilter query pulls out all the post filters in the
* query and applies them.
*/
return new PostFilterQuery(200, query);
}

View File

@@ -563,9 +563,17 @@ public class Solr4QueryParser extends QueryParser implements QueryConstants
return createReaderSetQuery(queryText);
} else if (field.equals(FIELD_AUTHORITY))
{
/*
* ACL DOCUMENTATION STARTS HERE
* This creates the query that applies the ACL filter
*/
return createAuthorityQuery(queryText);
} else if (field.equals(FIELD_AUTHORITYSET))
{
/*
* ACL DOCUMENTATION STARTS HERE
* This creates the query that applies the ACL filter
*/
return createAuthoritySetQuery(queryText);
} else if (field.equals(FIELD_DENIED))
{
@@ -837,7 +845,10 @@ public class Solr4QueryParser extends QueryParser implements QueryConstants
}catch(Exception e) {
logger.error(e);
} finally {
executorService.shutdown();
if (executorService != null)
{
executorService.shutdown();
}
}
return fingerPrint;
}
@@ -870,8 +881,14 @@ public class Solr4QueryParser extends QueryParser implements QueryConstants
NamedList fingerprint = (NamedList) dataResponse.get("fingerprint");
return (Collection)fingerprint.get("MINHASH");
} finally {
closeableHttpClient.close();
solrClient.close();
if (closeableHttpClient != null)
{
closeableHttpClient.close();
}
if (solrClient != null)
{
solrClient.close();
}
}
}

View File

@@ -108,6 +108,21 @@ public class SolrAuthoritySetQuery extends AbstractAuthoritySetQuery implements
}
}
/*
* ACL PostFilter
*
* The getFilterCollector function returns a DelegatingCollector
* which is used to filter the documents that match the query.
*
* A delegating collector wraps the TopDocs Collector which gathers the top documents that
* match a query. A delegating can filter the documents before "delegating" to the TopDocs
* collector. This filtering process is where the ACL logic is applied.
*
* The getFilterCollector method sets up the data structures needed to apply the acl rules.
* These data structures are then passed to the access control collectors.
*
*/
public DelegatingCollector getFilterCollector(IndexSearcher searcher)
{
@@ -136,7 +151,19 @@ public class SolrAuthoritySetQuery extends AbstractAuthoritySetQuery implements
try
{
/*
* Collect the ACLID's that match the authorities.
* This is done by querying the ACL records in the index. See the method for more
* documentation on this query.
*/
HybridBitSet aclSet = getACLSet(auths, QueryConstants.FIELD_READER, solrIndexSearcher);
/*
* Collect the documents that the user owns.
*/
BitsFilter ownerFilter = getOwnerFilter(auths, solrIndexSearcher);
if (globalReaders.contains(PermissionService.OWNER_AUTHORITY))
@@ -251,6 +278,11 @@ public class SolrAuthoritySetQuery extends AbstractAuthoritySetQuery implements
}
}
/*
* The AccessControlCollector applies that ACL logic given aclIds and ownerFilter
*/
class AccessControlCollector extends DelegatingCollector
{
private HybridBitSet aclIds;
@@ -276,6 +308,12 @@ public class SolrAuthoritySetQuery extends AbstractAuthoritySetQuery implements
this.ownerDocs = ownerFilter.getBitSets().get(context.ord);
}
/*
* The collect method is applied to each document that matches the
* query. The document's aclId must be in the set of aclId's passed into the collector,
* or the documents id must be in the ownerDocs.
*/
public void collect(int doc) throws IOException
{
long aclId = this.fieldValues.get(doc);

View File

@@ -139,22 +139,25 @@ public class Solr4X509ServletFilter extends X509ServletFilterBase
private void findCores(File dir, List<File> cores)
{
File[] files = dir.listFiles();
for(File file : files)
if (files != null)
{
if(file.isDirectory())
for (File file : files)
{
findCores(file, cores);
}
else
{
if("core.properties".equals(file.getName()))
if (file.isDirectory())
{
if (logger.isDebugEnabled())
findCores(file, cores);
}
else
{
if ("core.properties".equals(file.getName()))
{
logger.debug("Found core:" + dir.getAbsolutePath());
if (logger.isDebugEnabled())
{
logger.debug("Found core:" + dir.getAbsolutePath());
}
cores.add(dir);
}
cores.add(dir);
}
}
}
@@ -167,47 +170,52 @@ public class Solr4X509ServletFilter extends X509ServletFilterBase
private void collectSecureComms(File base, Set<String> secureCommsSet) throws IOException
{
File[] files = base.listFiles();
for(File file : files)
if (files != null)
{
if(file.isDirectory())
for (File file : files)
{
collectSecureComms(file, secureCommsSet);
}
else
{
if (logger.isDebugEnabled())
if (file.isDirectory())
{
logger.debug("scanning file:" + file.getAbsolutePath());
collectSecureComms(file, secureCommsSet);
}
if ("solrcore.properties".equals(file.getName()))
else
{
FileReader propReader = null;
Properties props = new Properties();
try
if (logger.isDebugEnabled())
{
propReader = new FileReader(file);
props.load(propReader);
String prop = props.getProperty(SECURE_COMMS);
if (prop != null)
{
if (logger.isDebugEnabled())
{
logger.debug("Found alfresco.secureComms in:" + file.getAbsolutePath() + " : " + prop);
}
secureCommsSet.add(prop);
}
else
{
secureCommsSet.add("none");
}
logger.debug("scanning file:" + file.getAbsolutePath());
}
finally
if ("solrcore.properties".equals(file.getName()))
{
propReader.close();
FileReader propReader = null;
Properties props = new Properties();
try
{
propReader = new FileReader(file);
props.load(propReader);
String prop = props.getProperty(SECURE_COMMS);
if (prop != null)
{
if (logger.isDebugEnabled())
{
logger.debug("Found alfresco.secureComms in:" + file.getAbsolutePath() + " : " + prop);
}
secureCommsSet.add(prop);
}
else
{
secureCommsSet.add("none");
}
}
finally
{
if (propReader != null)
{
propReader.close();
}
}
}
}
}

View File

@@ -180,6 +180,11 @@ public abstract class AbstractTracker implements Tracker
try
{
/*
* The runLock ensures that for each tracker type (metadata, content, commit, cascade) only one tracker will
* be running at a time.
*/
runLock.acquire();
if(state==null && Boolean.parseBoolean(System.getProperty("alfresco.test", "false")))

View File

@@ -131,6 +131,18 @@ public class CommitTracker extends AbstractTracker
//See if we need a rollback
if(metadataTracker.getRollback() || aclTracker.getRollback()) {
/*
* The metadataTracker and aclTracker will return true if an unhandled exception has occurred during indexing.
*
* The doRollback method rolls the index back to the state that it was in at the last commit. This will undo
* all the work that has been done by other trackers after the last commit.
*
* The state of the other trackers is then set to null so the trackers will initialize their state from
* the index, rather then the in-memory state. This keeps the trackers in-sync with index if their work is
* rolled back.
*/
doRollback();
return;
}

View File

@@ -143,6 +143,15 @@ public class MetadataTracker extends AbstractTracker implements Tracker
if(!isMaster && isSlave)
{
// Dynamic registration
/*
* This section allows Solr's master/slave setup to be used with dynamic shard registration.
* In this scenario the slave is polling a "tracking" Solr node. The code below calls
* the repo to register the state of the node without pulling any real transactions from the repo.
*
* This allows the repo to register the replica so that it will be included in queries. But the slave Solr node
* will pull its data from a "tracking" Solr node using Solr's master/slave replication, rather then tracking the repository.
*
*/
ShardState shardstate = getShardState();
client.getTransactions(0L, null, 0L, null, 0, shardstate);
@@ -610,6 +619,11 @@ public class MetadataTracker extends AbstractTracker implements Tracker
{
try
{
/*
* This write lock is used to lock out the Commit Tracker. The ensures that the MetaDataTracker will
* not be indexing content while commits or rollbacks are occurring.
*/
getWriteLock().acquire();
/*
@@ -621,6 +635,26 @@ public class MetadataTracker extends AbstractTracker implements Tracker
this.state = getTrackerState();
/*
* The fromCommitTime tells getSomeTransactions() where to start, this actually fairly straight forward.
*
* What makes this code so tricky to understand is the state.getTimeToStopIndexing().
*
* There are two scenarios to keep in mind:
*
* 1) Full re-index: In this scenario the state.getTimeToStopIndexing() will never stop the indexing.
*
* 2) Up-to-date indexing: This is where state.getTimeToStopIndexing() gets interesting. In this scenario
* the Solr index is already up to date with the repo and it is tracking new transactions. The state.getTimeToStopIndexing()
* in this scenario causes the getSomeTransactions() call to stop returning results if it finds a transaction
* beyond a specific point in time. This will break out of this loop and end the tracker run.
*
* The next time the metadata tracker runs the "continueState()" method applies the "hole retention"
* to state.getLastGoodTxCommitTimeInIndex(). This causes the state.getLastGoodTxCommitTimeInIndex() to scan
* for prior transactions that might have been missed.
*
*/
Long fromCommitTime = getTxFromCommitTime(txnsFound, state.getLastGoodTxCommitTimeInIndex());
transactions = getSomeTransactions(txnsFound, fromCommitTime, TIME_STEP_1_HR_IN_MS, 2000,
state.getTimeToStopIndexing());
@@ -640,6 +674,22 @@ public class MetadataTracker extends AbstractTracker implements Tracker
ArrayList<Transaction> txBatch = new ArrayList<>();
for (Transaction info : transactions.getTransactions()) {
/*
* isInIndex is used to ensure transactions that are being re-pulled due to "hole retention" are not re-indexed if
* they have already been indexed.
*
* The logic in infoSrv.txnInIndex() first checks an in-memory LRUcache for the txnId. If it doesn't find it in the cache
* it checks the index. The LRUCache is only needed for txnId's that have been indexed but are not yet visible in the index for
* one of two reasons:
*
* 1) The commit tracker has not yet committed the transaction.
* 2) The txnId has been committed to the index but the new searcher has not yet been warmed.
*
* This means that to ensure txnId's are not needlessly reprocessed during hole retention, the LRUCache must be large
* enough to cover the time between when a txnId is indexed and when it becomes visible.
*/
boolean isInIndex = (infoSrv.txnInIndex(info.getId(), true) && info.getCommitTimeMs() <= state.getLastIndexedTxCommitTime());
if (isInIndex) {
txnsFound.add(info);

View File

@@ -142,30 +142,34 @@ public class ModelTracker extends AbstractTracker implements Tracker
};
// List XML files
for (File file : alfrescoModelDir.listFiles(filter))
File[] files = alfrescoModelDir.listFiles(filter);
if (files != null)
{
InputStream modelStream = null;
M2Model model;
try
for (File file : files)
{
modelStream = new FileInputStream(file);
model = M2Model.createModel(modelStream);
}
catch (IOException e)
{
throw new AlfrescoRuntimeException("File not found: " + file, e);
}
finally
{
if (modelStream != null)
InputStream modelStream = null;
M2Model model;
try
{
try { modelStream.close(); } catch (Exception e) {}
modelStream = new FileInputStream(file);
model = M2Model.createModel(modelStream);
}
catch (IOException e)
{
throw new AlfrescoRuntimeException("File not found: " + file, e);
}
finally
{
if (modelStream != null)
{
try { modelStream.close(); } catch (Exception e) {}
}
}
// Model successfully loaded
for (M2Namespace namespace : model.getNamespaces())
{
modelMap.put(namespace.getUri(), model);
}
}
// Model successfully loaded
for (M2Namespace namespace : model.getNamespaces())
{
modelMap.put(namespace.getUri(), model);
}
}
}

View File

@@ -61,7 +61,7 @@ public class TrackerRegistry
public <T extends Tracker> T getTrackerForCore(String coreName, Class<T> trackerClass)
{
Map<Class<? extends Tracker>, Tracker> coreTrackers = this.trackers.get(coreName);
return (T) coreTrackers.get(trackerClass);
return null == coreTrackers ? null : (T) coreTrackers.get(trackerClass);
}
public synchronized void register(String coreName, Tracker tracker)

View File

@@ -106,30 +106,27 @@ public class CachedDocTransformer extends DocTransformer
{
String alfrescoFieldName = AlfrescoSolrDataModel.getInstance().getAlfrescoPropertyFromSchemaField(fieldName);
Collection<Object> values = cachedDoc.getFieldValues(fieldName);
ArrayList<Object> newValues = new ArrayList<Object>(values.size());
for(Object value : values)
{
if(value instanceof String)
{
String stringValue = (String) value;
int start = stringValue.lastIndexOf('\u0000');
if(start == -1)
{
newValues.add(stringValue);
}
else
{
newValues.add(stringValue.substring(start+1));
//Guard against null pointer in case data model field name does not match up with cachedDoc field name.
if(values != null) {
ArrayList<Object> newValues = new ArrayList<Object>(values.size());
for (Object value : values) {
if (value instanceof String) {
String stringValue = (String) value;
int start = stringValue.lastIndexOf('\u0000');
if (start == -1) {
newValues.add(stringValue);
} else {
newValues.add(stringValue.substring(start + 1));
}
} else {
newValues.add(value);
}
}
else
{
newValues.add(value);
}
doc.removeFields(alfrescoFieldName);
doc.addField(alfrescoFieldName, newValues);
}
doc.removeFields(alfrescoFieldName);
doc.addField(alfrescoFieldName, newValues);
}
}
else

View File

@@ -1169,7 +1169,7 @@
-->
<!--
<lst name="slave">
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
<str name="pollInterval">00:00:60</str>
</lst>
-->

View File

@@ -1169,7 +1169,7 @@
-->
<!--
<lst name="slave">
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
<str name="pollInterval">00:00:60</str>
</lst>
-->

View File

@@ -1169,7 +1169,7 @@
-->
<!--
<lst name="slave">
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
<str name="pollInterval">00:00:60</str>
</lst>
-->

View File

@@ -838,6 +838,7 @@
<str>consistencyComponent</str>
<str>query</str>
<str>facet</str>
<str>facet_module</str>
<str>mlt</str>
<str>highlight</str>
<str>stats</str>
@@ -1169,7 +1170,7 @@
-->
<!--
<lst name="slave">
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
<str name="pollInterval">00:00:60</str>
</lst>
-->

View File

@@ -838,6 +838,7 @@
<str>consistencyComponent</str>
<str>query</str>
<str>facet</str>
<str>facet_module</str>
<str>mlt</str>
<str>highlight</str>
<str>stats</str>
@@ -1169,7 +1170,7 @@
-->
<!--
<lst name="slave">
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
<str name="pollInterval">00:00:60</str>
</lst>
-->

View File

@@ -179,3 +179,4 @@ solr.suggester.minSecsBetweenBuilds=3600
alfresco.contentStreamLimit=10000000
#Shard setup
shard.method=DB_ID
#END of solrcore

View File

@@ -1138,7 +1138,7 @@
-->
<!--
<lst name="slave">
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
<str name="pollInterval">00:00:60</str>
</lst>
-->

View File

@@ -1169,7 +1169,7 @@
-->
<!--
<lst name="slave">
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
<str name="pollInterval">00:00:60</str>
</lst>
-->

View File

@@ -4,24 +4,20 @@ set -e
[ "$DEBUG" ] && set -x
nicebranch=`echo "$bamboo_planRepository_1_branch" | sed 's/\//_/'`
DOCKER_RESOURCES_PATH="${1:-packaging/target/docker-resources}"
if [ "${nicebranch}" = "master" ] || [ "${nicebranch#release}" != "${nicebranch}" ]
then
# set current working directory to the directory of the script
cd "$bamboo_working_directory"
docker_registry="quay.io/alfresco/search-services"
then
tag_version=`echo "$bamboo_maven_version"`
if [ "${bamboo_shortJobName}" = "Release" ]
then
tag_version=`echo "$bamboo_release_version"`
docker_registry="alfresco/alfresco-search-services"
fi
dockerImage="$docker_registry:$tag_version"
dockerImage="quay.io/alfresco/search-services:$tag_version"
echo "Building $dockerImage from $nicebranch using version $tag_version"
docker build -t $dockerImage packaging/target/docker-resources
docker build -t $dockerImage ${DOCKER_RESOURCES_PATH}
echo "Running tests"
docker run --rm "$dockerImage" [ -d /opt/alfresco-search-services/solr ] || (echo "solr dir does not exist" && exit 1)

View File

@@ -13,7 +13,7 @@
<parent>
<groupId>org.alfresco</groupId>
<artifactId>alfresco-search-parent</artifactId>
<version>1.2.0-dockerTest2</version>
<version>1.2.0</version>
<relativePath>../pom.xml</relativePath>
</parent>
<dependencies>

View File

@@ -1,7 +1,7 @@
version: '3'
services:
alfresco:
image: quay.io/alfresco/alfresco-content-services${ALFRESCO_EDITION}:${ALFRESCO_TAG}
image: quay.io/alfresco/alfresco-content-repository${ALFRESCO_EDITION}:${ALFRESCO_TAG}
environment:
JAVA_OPTS : "
-Ddb.driver=org.postgresql.Driver

View File

@@ -0,0 +1,88 @@
#!/usr/bin/env bash
# About:
# Run docker-compose using appropriate docker-resources generated in target
# The script is using the 'find' tool to search for a particular docker-compose.yml file
# you can use also a filter-tag (partial strings from absolute path or docker-compose.yml) that will
# uniquely identify your desired docker-compose.yml file
#
# Usage:
# $ run.sh <docker-resource-folder> <clean-or-not> <filter-flag> <debug-or-not> <alfresco-endpoint>
# * <docker-resource-folder>: defaults to 'target'
# * clean: will clean all running docker images on machine it will not start alfresco.
# * <filter-flag>: can be 5.x or 6.x (defaults to 6.x) - it can be used to filter differed compose files
# * <alfresco-endpoint>: the url of alfresco endpoint
#
# Examples:
# $ run.sh - it will use latest docker-compose from this branch
# $ run.sh target clean - it will clean the containers using the latest docker-compose from this branch
# $ run.sh target clean 5.x - it will clean the containers using the 5.x. docker-compose file
# $ run.sh target up 5.x - will start alfresco using the 5.x docker-compose file
# $ run.sh target up docker-resources/docker-compose.yml debug
echo `basename $0` called on `date` with arguments: "$@"
DOCKER_RESOURCES_PATH="${1:-target}"
CLEANUP="${2:-no-clean}"
FILTER_FLAG="${3:-6.x}" #5.x, 6.x or even docker-resources/docker-compose.yml (for release branches)
DOCKER_COMPOSE_FILE=$(find ${DOCKER_RESOURCES_PATH} -name "docker-compose.yml" -type f -exec realpath {} \;| grep ${FILTER_FLAG})
DEBUG="${4:-no-debug}"
ALFRESCO_ENDPOINT="${5:-http://localhost:8081/alfresco}"
# exit if docker-compose not found
[ ! -n "${DOCKER_COMPOSE_FILE}" ] && echo "docker-compose.yml file NOT FOUND in folder: '${DOCKER_RESOURCES_PATH}' using this filter flag: '${FILTER_FLAG}'" && exit 1
DOCKER_RESOURCES_PATH=`dirname ${DOCKER_COMPOSE_FILE}`
function wait_for_alfresco_to_start {
WAIT_INTERVAL=1
COUNTER=0
TIMEOUT=2000
t0=`date +%s`
echo "Waiting for Alfresco to start in docker container: ${ALFRESCO_ENDPOINT}"
until $(curl --output /dev/null --silent --head --fail ${ALFRESCO_ENDPOINT}) || [ "$COUNTER" -eq "$TIMEOUT" ]; do
printf '.'
sleep $WAIT_INTERVAL
COUNTER=$(($COUNTER+$WAIT_INTERVAL))
done
if (("$COUNTER" < "$TIMEOUT")) ; then
t1=`date +%s`
delta=$((($t1 - $t0)/60))
echo "Alfresco Started in $delta minutes: ${ALFRESCO_ENDPOINT}"
else
echo "Waited $COUNTER seconds"
echo "Alfresco Could not start in time."
exit 1
fi
}
function cleanup_containers {
cd ${DOCKER_RESOURCES_PATH} && docker-compose kill
cd ${DOCKER_RESOURCES_PATH} && docker-compose rm -fv
}
function start_alfresco {
# update the basicAuthScheme https://issues.alfresco.com/jira/browse/REPO-2575
sed -ie "s/-Dindex.subsystem.name=solr6/-Dindex.subsystem.name=solr6 -Dalfresco.restApi.basicAuthScheme=true/g" ${DOCKER_COMPOSE_FILE}
# show the configuration of docker-compose.yml file that we will run
cd ${DOCKER_RESOURCES_PATH} && docker-compose config
if [ ${DEBUG} = "debug" ]; then
cd ${DOCKER_RESOURCES_PATH} && docker-compose up
else
cd ${DOCKER_RESOURCES_PATH} && docker-compose up -d
wait_for_alfresco_to_start
fi
}
set -ex
if [ ${CLEANUP} = "clean" ]; then
cleanup_containers
else
cleanup_containers
start_alfresco
fi

View File

@@ -0,0 +1,41 @@
#!/usr/bin/env sh
# Usage: whitesource-package-scan.sh <clean>
# - this will copy the distribution zip to release area
# - will unzip it and scan if using WhiteSource File System Agent https://goo.gl/ohg4Rv
# - and will clean up the scan folder if <clean> string is passed as parameter
# Example:
# $ whitesource-package-scan.sh -> perform the scan
# $ whitesource-package-scan.sh clean -> will cleanup the scan folder
echo `basename $0` called on `date` with arguments: "$@"
set -exu
nicebranch=`echo "$bamboo_planRepository_1_branch" | sed 's/\//_/'`
if [ "${nicebranch}" = "master" ] || [ "${nicebranch#release}" != "${nicebranch}" ]
then
POM_VERSION=$(grep version pom.xml | grep -v -e '<?xml|~'| head -n 1 |awk -F '[><]' '{print $3}')
RELEASE_FOLDER=/data/releases/SearchServices/${POM_VERSION}
DISTRIBUTION_NAME=alfresco-search-services-${POM_VERSION}.zip
DISTRIBUTION_ZIP_PATH=${RELEASE_FOLDER}/${DISTRIBUTION_NAME}
DISTRIBUTION_ZIP_SCAN_PATH=${RELEASE_FOLDER}/scan
CLEANUP="${1:-do-not-clean}"
if [ ${CLEANUP} = "clean" ]; then
echo "Cleaning up scan folder..."
ssh -q tomcat@pbam01.alfresco.com [[ -d ${RELEASE_FOLDER} ]] && ssh tomcat@pbam01.alfresco.com rm -rf ${RELEASE_FOLDER} || echo "Nothing to cleanup"
else
echo "Copy distribution to release area..."
ssh tomcat@pbam01.alfresco.com mkdir -p ${RELEASE_FOLDER}
scp target/${DISTRIBUTION_NAME} tomcat@pbam01.alfresco.com:${RELEASE_FOLDER}
#unzip distribution
ssh tomcat@pbam01.alfresco.com unzip ${DISTRIBUTION_ZIP_PATH} -d ${DISTRIBUTION_ZIP_SCAN_PATH}
#whitesource scanning using file agent: https://goo.gl/ohg4Rv
ssh tomcat@pbam01.alfresco.com sh /etc/bamboo/whitesource-agent.sh -d ${DISTRIBUTION_ZIP_SCAN_PATH} -project distribution-zip -product SearchServices-${bamboo_release_version}
fi
else
echo "WhiteSource scann will be executed only from master or release branches. Skipping for '${nicebranch}'"
fi

View File

@@ -7,7 +7,7 @@
<version>8</version>
</parent>
<artifactId>alfresco-search-parent</artifactId>
<version>1.2.0-dockerTest2</version>
<version>1.2.0</version>
<packaging>pom</packaging>
<name>Alfresco Solr Search parent</name>
<properties>
@@ -16,19 +16,19 @@
</properties>
<distributionManagement>
<repository>
<id>alfresco-internal</id>
<url>https://artifacts.alfresco.com/nexus/content/repositories/internal-releases/</url>
<id>alfresco-releases</id>
<url>https://artifacts.alfresco.com/nexus/content/repositories/releases/</url>
</repository>
<snapshotRepository>
<id>alfresco-internal-snapshots</id>
<url>https://artifacts.alfresco.com/nexus/content/repositories/internal-snapshots/</url>
<id>alfresco-snapshots</id>
<url>https://artifacts.alfresco.com/nexus/content/repositories/snapshots/</url>
</snapshotRepository>
</distributionManagement>
<scm>
<connection>scm:git:git@github.com:Alfresco/SearchServices.git</connection>
<developerConnection>scm:git:git@github.com:Alfresco/SearchServices.git</developerConnection>
<url>https://github.com/Alfresco/SearchServices.git</url>
<tag>1.2.0-dockerTest2</tag>
<tag>1.2.0</tag>
</scm>
<modules>
<module>alfresco-search</module>