mirror of
https://github.com/Alfresco/SearchServices.git
synced 2025-09-10 14:11:25 +00:00
Compare commits
57 Commits
1.2.0-dock
...
1.2.0
Author | SHA1 | Date | |
---|---|---|---|
|
13b4d213dd | ||
|
3752ea74c4 | ||
|
d273afb2ac | ||
|
185b09c086 | ||
|
1a5b0a85d4 | ||
|
c8ead7da49 | ||
|
4234969f75 | ||
|
9186b8f9af | ||
|
9033106734 | ||
|
8d2626aecc | ||
|
48a1c8bc62 | ||
|
04c77dfa18 | ||
|
6d14fffa65 | ||
|
92b9183f9c | ||
|
96d078f0b5 | ||
|
aa6f7d686a | ||
|
680b5cf562 | ||
|
5c531eaf00 | ||
|
bdaed92925 | ||
|
8c0e56dd63 | ||
|
28cadb9cf5 | ||
|
e8b8a090bf | ||
|
9e9436f786 | ||
|
53c101c197 | ||
|
d367fdae9b | ||
|
76da52aa33 | ||
|
6190083cf4 | ||
|
86f60d9ea2 | ||
|
8064188e38 | ||
|
23f62bd57b | ||
|
05185d826c | ||
|
ff14bad807 | ||
|
f64ebdd30c | ||
|
8cc4b61474 | ||
|
12ef1d95e8 | ||
|
c191776593 | ||
|
b802e2463e | ||
|
ee4a91dc97 | ||
|
a498f3188c | ||
|
fc3f9bddf2 | ||
|
03056db4ca | ||
|
7886e32ee0 | ||
|
ed0b4d565b | ||
|
80e3fb6aa2 | ||
|
1f68e78b11 | ||
|
84afeaa287 | ||
|
bbce452c95 | ||
|
54cde0505a | ||
|
4a56f5eee4 | ||
|
1cd934221d | ||
|
fd7018607e | ||
|
56e270b71f | ||
|
9e33a1e5b2 | ||
|
0583055259 | ||
|
1f8511a100 | ||
|
c2ec139aee | ||
|
348ea3ce84 |
@@ -6,7 +6,7 @@
|
||||
<parent>
|
||||
<groupId>org.alfresco</groupId>
|
||||
<artifactId>alfresco-search-parent</artifactId>
|
||||
<version>1.2.0-dockerTest3</version>
|
||||
<version>1.2.0</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
@@ -19,13 +19,49 @@
|
||||
|
||||
package org.alfresco.solr;
|
||||
|
||||
import static org.alfresco.solr.HandlerOfResources.extractCustomProperties;
|
||||
import static org.alfresco.solr.HandlerOfResources.getSafeBoolean;
|
||||
import static org.alfresco.solr.HandlerOfResources.getSafeLong;
|
||||
import static org.alfresco.solr.HandlerOfResources.openResource;
|
||||
import static org.alfresco.solr.HandlerOfResources.updatePropertiesFile;
|
||||
import static org.alfresco.solr.HandlerOfResources.updateSharedProperties;
|
||||
import static org.alfresco.solr.HandlerReportBuilder.addCoreSummary;
|
||||
import static org.alfresco.solr.HandlerReportBuilder.buildAclReport;
|
||||
import static org.alfresco.solr.HandlerReportBuilder.buildAclTxReport;
|
||||
import static org.alfresco.solr.HandlerReportBuilder.buildNodeReport;
|
||||
import static org.alfresco.solr.HandlerReportBuilder.buildTrackerReport;
|
||||
import static org.alfresco.solr.HandlerReportBuilder.buildTxReport;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.alfresco.error.AlfrescoRuntimeException;
|
||||
import org.alfresco.httpclient.AuthenticationException;
|
||||
import org.alfresco.service.cmr.repository.StoreRef;
|
||||
import org.alfresco.solr.adapters.IOpenBitSet;
|
||||
import org.alfresco.solr.client.SOLRAPIClientFactory;
|
||||
import org.alfresco.solr.config.ConfigUtil;
|
||||
import org.alfresco.solr.tracker.*;
|
||||
import org.alfresco.solr.tracker.AclTracker;
|
||||
import org.alfresco.solr.tracker.DBIDRangeRouter;
|
||||
import org.alfresco.solr.tracker.DocRouter;
|
||||
import org.alfresco.solr.tracker.IndexHealthReport;
|
||||
import org.alfresco.solr.tracker.MetadataTracker;
|
||||
import org.alfresco.solr.tracker.SolrTrackerScheduler;
|
||||
import org.alfresco.solr.tracker.Tracker;
|
||||
import org.alfresco.solr.tracker.TrackerRegistry;
|
||||
import org.alfresco.util.shard.ExplicitShardingPolicy;
|
||||
import org.apache.commons.codec.EncoderException;
|
||||
import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
|
||||
@@ -44,15 +80,6 @@ import org.json.JSONException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.*;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.alfresco.solr.HandlerOfResources.*;
|
||||
import static org.alfresco.solr.HandlerReportBuilder.*;
|
||||
|
||||
public class AlfrescoCoreAdminHandler extends CoreAdminHandler
|
||||
{
|
||||
protected static final Logger log = LoggerFactory.getLogger(AlfrescoCoreAdminHandler.class);
|
||||
@@ -540,7 +567,10 @@ public class AlfrescoCoreAdminHandler extends CoreAdminHandler
|
||||
properties.setProperty("alfresco.stores", storeRef.toString());
|
||||
|
||||
//Potentially override the defaults
|
||||
properties.load(new FileInputStream(config));
|
||||
try (FileInputStream fileInputStream = new FileInputStream(config))
|
||||
{
|
||||
properties.load(fileInputStream);
|
||||
}
|
||||
|
||||
//Don't override these
|
||||
properties.setProperty("alfresco.template", templateName);
|
||||
@@ -559,8 +589,11 @@ public class AlfrescoCoreAdminHandler extends CoreAdminHandler
|
||||
properties.putAll(extraProperties);
|
||||
}
|
||||
|
||||
properties.store(new FileOutputStream(config), null);
|
||||
|
||||
try (FileOutputStream fileOutputStream = new FileOutputStream(config))
|
||||
{
|
||||
properties.store(fileOutputStream, null);
|
||||
}
|
||||
|
||||
SolrCore core = coreContainer.create(coreName, newCore.toPath(), new HashMap<String, String>(), false);
|
||||
rsp.add("core", core.getName());
|
||||
}
|
||||
|
@@ -21,14 +21,19 @@
|
||||
*/
|
||||
package org.alfresco.solr;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* Methods taken from AlfrescoCoreAdminHandler that deal with I/O resources
|
||||
@@ -126,7 +131,10 @@ public class HandlerOfResources {
|
||||
properties.putAll(extraProperties);
|
||||
}
|
||||
|
||||
properties.store(new FileOutputStream(config), "Generated from Solr");
|
||||
try (FileOutputStream fileOutputStream = new FileOutputStream(config))
|
||||
{
|
||||
properties.store(fileOutputStream, "Generated from Solr");
|
||||
}
|
||||
} // FileInputStream is closed
|
||||
catch (IOException e)
|
||||
{
|
||||
|
@@ -18,6 +18,56 @@
|
||||
*/
|
||||
package org.alfresco.solr;
|
||||
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ACLID;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ACLTXCOMMITTIME;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ACLTXID;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ANAME;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ANCESTOR;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_APATH;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ASPECT;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ASSOCTYPEQNAME;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_CASCADE_FLAG;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_DBID;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_DENIED;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_DOC_TYPE;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_EXCEPTION_MESSAGE;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_EXCEPTION_STACK;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_FIELDS;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_FTSSTATUS;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_GEO;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_INACLTXID;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_INTXID;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_ISNODE;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_LID;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_NPATH;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_NULLPROPERTIES;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_OWNER;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PARENT;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PARENT_ASSOC_CRC;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PATH;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PNAME;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PRIMARYASSOCQNAME;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PRIMARYASSOCTYPEQNAME;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PRIMARYPARENT;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_PROPERTIES;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_QNAME;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_READER;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_SITE;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_SOLR4_ID;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_S_ACLTXCOMMITTIME;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_S_ACLTXID;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_S_INACLTXID;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_S_INTXID;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_S_TXCOMMITTIME;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_S_TXID;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_TAG;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_TAG_SUGGEST;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_TENANT;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_TXCOMMITTIME;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_TXID;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_TYPE;
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.FIELD_VERSION;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
@@ -47,6 +97,8 @@ import java.util.regex.Pattern;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
|
||||
import com.carrotsearch.hppc.LongHashSet;
|
||||
import com.carrotsearch.hppc.cursors.LongCursor;
|
||||
import org.alfresco.httpclient.AuthenticationException;
|
||||
import org.alfresco.model.ContentModel;
|
||||
import org.alfresco.opencmis.dictionary.CMISStrictDictionaryService;
|
||||
@@ -149,8 +201,6 @@ import org.slf4j.LoggerFactory;
|
||||
import org.springframework.extensions.surf.util.I18NUtil;
|
||||
import org.springframework.util.FileCopyUtils;
|
||||
|
||||
import static org.alfresco.repo.search.adaptor.lucene.QueryConstants.*;
|
||||
|
||||
/**
|
||||
* This is the Solr4 implementation of the information server (index).
|
||||
* @author Ahmed Owian
|
||||
@@ -752,37 +802,37 @@ public class SolrInformationServer implements InformationServer
|
||||
//System.out.println("################ Transaction floor:"+txnFloor);
|
||||
|
||||
//Find the next N transactions
|
||||
collector = TopFieldCollector.create(new Sort(new SortField(FIELD_INTXID, SortField.Type.LONG)),
|
||||
rows,
|
||||
null,
|
||||
false,
|
||||
false,
|
||||
false);
|
||||
//Query for dirty or new nodes
|
||||
termQuery1 = new TermQuery(new Term(FIELD_FTSSTATUS, FTSStatus.Dirty.toString()));
|
||||
termQuery2 = new TermQuery(new Term(FIELD_FTSSTATUS, FTSStatus.New.toString()));
|
||||
clause1 = new BooleanClause(termQuery1, BooleanClause.Occur.SHOULD);
|
||||
clause2 = new BooleanClause(termQuery2, BooleanClause.Occur.SHOULD);
|
||||
builder = new BooleanQuery.Builder();
|
||||
builder.add(clause1);
|
||||
builder.add(clause2);
|
||||
orQuery = builder.build();
|
||||
|
||||
delegatingCollector = new TxnFloorFilter(txnFloor, cleanContentCache);
|
||||
delegatingCollector.setLastDelegate(collector);
|
||||
TermQuery txnQuery = new TermQuery(new Term(FIELD_DOC_TYPE, DOC_TYPE_TX));
|
||||
searcher.search(txnQuery, delegatingCollector);
|
||||
TopDocs docs = collector.topDocs();
|
||||
//System.out.println("############### Next N transactions ################:" + docs.totalHits);
|
||||
//The TxnCollector collects the transaction ids from the matching documents
|
||||
//The txnIds are limited to a range >= the txnFloor and < an arbitrary transaction ceiling.
|
||||
TxnCollector txnCollector = new TxnCollector(txnFloor);
|
||||
searcher.search(orQuery, txnCollector);
|
||||
LongHashSet txnSet = txnCollector.getTxnSet();
|
||||
//System.out.println("############### Next N transactions ################:" + txnSet.size());
|
||||
|
||||
if (collector.getTotalHits() == 0)
|
||||
if(txnSet.size() == 0)
|
||||
{
|
||||
//No new transactions to consider
|
||||
//This should really never be the case, at a minimum the transaction floor should be collected.
|
||||
return docIds;
|
||||
}
|
||||
|
||||
leaves = searcher.getTopReaderContext().leaves();
|
||||
FieldType fieldType = searcher.getSchema().getField(FIELD_INTXID).getType();
|
||||
builder = new BooleanQuery.Builder();
|
||||
|
||||
for (ScoreDoc scoreDoc : docs.scoreDocs)
|
||||
Iterator<LongCursor> it = txnSet.iterator();
|
||||
while (it.hasNext())
|
||||
{
|
||||
index = ReaderUtil.subIndex(scoreDoc.doc, leaves);
|
||||
context = leaves.get(index);
|
||||
longs = context.reader().getNumericDocValues(FIELD_INTXID);
|
||||
long txnID = longs.get(scoreDoc.doc - context.docBase);
|
||||
|
||||
LongCursor cursor = it.next();
|
||||
long txnID = cursor.value;
|
||||
//Build up the query for the filter of transactions we need to pull the dirty content for.
|
||||
TermQuery txnIDQuery = new TermQuery(new Term(FIELD_INTXID, fieldType.readableToIndexed(Long.toString(txnID))));
|
||||
builder.add(new BooleanClause(txnIDQuery, BooleanClause.Occur.SHOULD));
|
||||
@@ -790,7 +840,6 @@ public class SolrInformationServer implements InformationServer
|
||||
|
||||
BooleanQuery txnFilterQuery = builder.build();
|
||||
|
||||
|
||||
//Get the docs with dirty content for the transactions gathered above.
|
||||
|
||||
TermQuery statusQuery1 = new TermQuery(new Term(FIELD_FTSSTATUS, FTSStatus.Dirty.toString()));
|
||||
@@ -851,7 +900,10 @@ public class SolrInformationServer implements InformationServer
|
||||
}
|
||||
finally
|
||||
{
|
||||
refCounted.decref();
|
||||
if (refCounted != null)
|
||||
{
|
||||
refCounted.decref();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -972,8 +1024,14 @@ public class SolrInformationServer implements InformationServer
|
||||
}
|
||||
finally
|
||||
{
|
||||
active.decref();
|
||||
newest.decref();
|
||||
if (active != null)
|
||||
{
|
||||
active.decref();
|
||||
}
|
||||
if (newest != null)
|
||||
{
|
||||
newest.decref();
|
||||
}
|
||||
}
|
||||
}
|
||||
processor.processCommit(command);
|
||||
@@ -1288,7 +1346,10 @@ public class SolrInformationServer implements InformationServer
|
||||
}
|
||||
finally
|
||||
{
|
||||
refCounted.decref();
|
||||
if (refCounted != null)
|
||||
{
|
||||
refCounted.decref();
|
||||
}
|
||||
}
|
||||
return errorDocIds;
|
||||
}
|
||||
@@ -1439,7 +1500,23 @@ public class SolrInformationServer implements InformationServer
|
||||
|
||||
/*
|
||||
* Choose the max between the last commit time in the index and the last time the tracker started.
|
||||
* Hole retention is applied to both. *
|
||||
* Hole retention is applied to both.
|
||||
*
|
||||
* This logic is very tricky and very important to understand.
|
||||
*
|
||||
* state.getLastGoodTxCommitTimeInIndex() is used to determine where to start pulling transactions from the repo on the
|
||||
* current tracker run.
|
||||
*
|
||||
* If we simply take the current value of state.getLastIndexedTxCommitTime() we have the following problem:
|
||||
*
|
||||
* If no data is added to the repo for a long period of time state.getLastIndexedTxCommitTime() never moves forward. This causes the
|
||||
* loop inside MetadataTracker.getSomeTransactions() to hammer the repo as the time between state.getLastIndexedTxCommitTime()
|
||||
* and state.setTimeToStopIndexing increases.
|
||||
*
|
||||
* To resolve this we choose the max between the last commit time in the index and the last time the tracker started. In theory
|
||||
* if we start looking for transactions after the last tracker was started (and apply hole retention), we should never miss a
|
||||
* transaction. Or atleast ensure that principal behind hole retention is respected. This theory should be closely looked at if
|
||||
* the trackers ever lose data.
|
||||
*/
|
||||
|
||||
timeBeforeWhichThereCanBeNoTxHolesInIndex = Math.max(timeBeforeWhichThereCanBeNoTxHolesInIndex, lastStartTimeWhichThereCanBeNoTxHolesInIndex);
|
||||
@@ -2148,7 +2225,10 @@ public class SolrInformationServer implements InformationServer
|
||||
}
|
||||
finally
|
||||
{
|
||||
refCounted.decref();
|
||||
if (refCounted != null)
|
||||
{
|
||||
refCounted.decref();
|
||||
}
|
||||
}
|
||||
|
||||
//System.out.println("################ CHILD IDs:"+childIds.size());
|
||||
@@ -2293,7 +2373,10 @@ public class SolrInformationServer implements InformationServer
|
||||
}
|
||||
finally
|
||||
{
|
||||
refCounted.decref();
|
||||
if (refCounted != null)
|
||||
{
|
||||
refCounted.decref();
|
||||
}
|
||||
}
|
||||
|
||||
List<NodeMetaData> allNodeMetaDatas = new ArrayList();
|
||||
@@ -3785,7 +3868,10 @@ public class SolrInformationServer implements InformationServer
|
||||
}
|
||||
finally
|
||||
{
|
||||
refCounted.decref();
|
||||
if (refCounted != null)
|
||||
{
|
||||
refCounted.decref();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3827,7 +3913,10 @@ public class SolrInformationServer implements InformationServer
|
||||
}
|
||||
finally
|
||||
{
|
||||
refCounted.decref();
|
||||
if (refCounted != null)
|
||||
{
|
||||
refCounted.decref();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3864,7 +3953,10 @@ public class SolrInformationServer implements InformationServer
|
||||
}
|
||||
finally
|
||||
{
|
||||
refCounted.decref();
|
||||
if (refCounted != null)
|
||||
{
|
||||
refCounted.decref();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4167,6 +4259,48 @@ public class SolrInformationServer implements InformationServer
|
||||
}
|
||||
}
|
||||
|
||||
class TxnCollector extends DelegatingCollector
|
||||
{
|
||||
|
||||
private NumericDocValues currentLongs;
|
||||
private long txnFloor;
|
||||
private long txnCeil;
|
||||
private LongHashSet txnSet = new LongHashSet(1000);
|
||||
|
||||
|
||||
public TxnCollector(long txnFloor)
|
||||
{
|
||||
this.txnFloor = txnFloor;
|
||||
this.txnCeil = txnFloor+500;
|
||||
}
|
||||
|
||||
public void doSetNextReader(LeafReaderContext context) throws IOException
|
||||
{
|
||||
currentLongs = context.reader().getNumericDocValues(FIELD_INTXID);
|
||||
}
|
||||
|
||||
public boolean needsScores() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public void collect(int doc) throws IOException
|
||||
{
|
||||
long txnId = currentLongs.get(doc);
|
||||
|
||||
//System.out.println("########### Floor Filter #############:"+doc+":"+txnId);
|
||||
|
||||
if(txnId >= txnFloor && txnId < txnCeil)
|
||||
{
|
||||
txnSet.add(txnId);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
public LongHashSet getTxnSet() {
|
||||
return txnSet;
|
||||
}
|
||||
}
|
||||
|
||||
class LRU extends LinkedHashMap
|
||||
{
|
||||
private int maxSize;
|
||||
|
@@ -66,12 +66,23 @@ public class SolrCoreLoadRegistration {
|
||||
{
|
||||
TrackerRegistry trackerRegistry = adminHandler.getTrackerRegistry();
|
||||
Properties props = new CoreDescriptorDecorator(core.getCoreDescriptor()).getProperties();
|
||||
//Prepare cores
|
||||
SolrResourceLoader loader = core.getLatestSchema().getResourceLoader();
|
||||
SolrKeyResourceLoader keyResourceLoader = new SolrKeyResourceLoader(loader);
|
||||
SOLRAPIClientFactory clientFactory = new SOLRAPIClientFactory();
|
||||
SOLRAPIClient repositoryClient = clientFactory.getSOLRAPIClient(props, keyResourceLoader,
|
||||
AlfrescoSolrDataModel.getInstance().getDictionaryService(CMISStrictDictionaryService.DEFAULT),
|
||||
AlfrescoSolrDataModel.getInstance().getNamespaceDAO());
|
||||
//Start content store
|
||||
SolrContentStore contentStore = new SolrContentStore(coreContainer.getSolrHome());
|
||||
SolrInformationServer srv = new SolrInformationServer(adminHandler, core, repositoryClient, contentStore);
|
||||
props.putAll(srv.getProps());
|
||||
adminHandler.getInformationServers().put(coreName, srv);
|
||||
|
||||
log.info("Starting to track " + coreName);
|
||||
if (Boolean.parseBoolean(props.getProperty("enable.alfresco.tracking", "false")))
|
||||
{
|
||||
SolrTrackerScheduler scheduler = adminHandler.getScheduler();
|
||||
SolrResourceLoader loader = core.getLatestSchema().getResourceLoader();
|
||||
SolrKeyResourceLoader keyResourceLoader = new SolrKeyResourceLoader(loader);
|
||||
if (trackerRegistry.hasTrackersForCore(coreName))
|
||||
{
|
||||
log.info("Trackers for " + coreName+ " is already registered, shutting them down.");
|
||||
@@ -80,17 +91,7 @@ public class SolrCoreLoadRegistration {
|
||||
adminHandler.getInformationServers().remove(coreName);
|
||||
}
|
||||
|
||||
SOLRAPIClientFactory clientFactory = new SOLRAPIClientFactory();
|
||||
SOLRAPIClient repositoryClient = clientFactory.getSOLRAPIClient(props, keyResourceLoader,
|
||||
AlfrescoSolrDataModel.getInstance().getDictionaryService(CMISStrictDictionaryService.DEFAULT),
|
||||
AlfrescoSolrDataModel.getInstance().getNamespaceDAO());
|
||||
//Start content store
|
||||
SolrContentStore contentStore = new SolrContentStore(coreContainer.getSolrHome());
|
||||
SolrInformationServer srv = new SolrInformationServer(adminHandler, core, repositoryClient, contentStore);
|
||||
props.putAll(srv.getProps());
|
||||
adminHandler.getInformationServers().put(coreName, srv);
|
||||
|
||||
log.info("Starting to track " + coreName);
|
||||
|
||||
|
||||
ModelTracker mTracker = null;
|
||||
// Prevents other threads from registering the ModelTracker at the same time
|
||||
|
@@ -82,15 +82,29 @@ public abstract class AbstractAuthoritySetQuery extends Query
|
||||
return authorities.hashCode();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This method collects the bitset of documents that match the authorities.
|
||||
*/
|
||||
|
||||
protected HybridBitSet getACLSet(String[] auths, String field, SolrIndexSearcher searcher) throws IOException
|
||||
{
|
||||
/*
|
||||
* Build a query that matches the authorities with a field in the ACL records in the index.
|
||||
*/
|
||||
|
||||
BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder();
|
||||
for(String current : auths)
|
||||
{
|
||||
queryBuilder.add(new TermQuery(new Term(field, current)), BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
|
||||
//NOTE: this query will be in the filter cache. Ideally it would remain cached throughout the users session.
|
||||
|
||||
/*
|
||||
* Collect a docset containing the ACL records that match the query.
|
||||
* This query will be in the filter cache. Ideally it would remain cached throughout the users session.
|
||||
*/
|
||||
|
||||
DocSet docSet = searcher.getDocSet(queryBuilder.build());
|
||||
|
||||
DocIterator iterator = docSet.iterator();
|
||||
@@ -102,6 +116,12 @@ public abstract class AbstractAuthoritySetQuery extends Query
|
||||
//TODO : makes this configurable. For some systems this is huge and for others not big enough.
|
||||
HybridBitSet hybridBitSet = new HybridBitSet(60000000);
|
||||
|
||||
/*
|
||||
* Collect the ACLID's from the matching acl records.
|
||||
* This is done in a separate step so the initial ACL query can be cached in the FilterCache
|
||||
* The initial ACL query may be expensive if the number of authorities is very large.
|
||||
*/
|
||||
|
||||
List<LeafReaderContext> leaves = searcher.getTopReaderContext().leaves();
|
||||
LeafReaderContext context = leaves.get(0);
|
||||
NumericDocValues aclValues = DocValuesCache.getNumericDocValues(QueryConstants.FIELD_ACLID, context.reader());
|
||||
|
@@ -81,8 +81,9 @@ public class AlfrescoFTSQParserPlugin extends QParserPlugin
|
||||
rerankPhase = RerankPhase.valueOf(arg.toString());
|
||||
}
|
||||
|
||||
//First check the System property.
|
||||
//Then check solrcore.properties, defaulting to the postFilter.
|
||||
/*
|
||||
* This turns on the postFilter
|
||||
*/
|
||||
|
||||
postfilter = Boolean.parseBoolean(System.getProperty("alfresco.postfilter",
|
||||
req.getCore().getCoreDescriptor().getCoreProperty("alfresco.postfilter",
|
||||
@@ -109,7 +110,11 @@ public class AlfrescoFTSQParserPlugin extends QParserPlugin
|
||||
|
||||
if(authset && postfilter)
|
||||
{
|
||||
//Return the PostFilter
|
||||
/*
|
||||
* The cost of 200 turns on the postfilter inside Solr
|
||||
* The postfilter query pulls out all the post filters in the
|
||||
* query and applies them.
|
||||
*/
|
||||
return new PostFilterQuery(200, query);
|
||||
}
|
||||
|
||||
|
@@ -563,9 +563,17 @@ public class Solr4QueryParser extends QueryParser implements QueryConstants
|
||||
return createReaderSetQuery(queryText);
|
||||
} else if (field.equals(FIELD_AUTHORITY))
|
||||
{
|
||||
/*
|
||||
* ACL DOCUMENTATION STARTS HERE
|
||||
* This creates the query that applies the ACL filter
|
||||
*/
|
||||
return createAuthorityQuery(queryText);
|
||||
} else if (field.equals(FIELD_AUTHORITYSET))
|
||||
{
|
||||
/*
|
||||
* ACL DOCUMENTATION STARTS HERE
|
||||
* This creates the query that applies the ACL filter
|
||||
*/
|
||||
return createAuthoritySetQuery(queryText);
|
||||
} else if (field.equals(FIELD_DENIED))
|
||||
{
|
||||
@@ -837,7 +845,10 @@ public class Solr4QueryParser extends QueryParser implements QueryConstants
|
||||
}catch(Exception e) {
|
||||
logger.error(e);
|
||||
} finally {
|
||||
executorService.shutdown();
|
||||
if (executorService != null)
|
||||
{
|
||||
executorService.shutdown();
|
||||
}
|
||||
}
|
||||
return fingerPrint;
|
||||
}
|
||||
@@ -870,8 +881,14 @@ public class Solr4QueryParser extends QueryParser implements QueryConstants
|
||||
NamedList fingerprint = (NamedList) dataResponse.get("fingerprint");
|
||||
return (Collection)fingerprint.get("MINHASH");
|
||||
} finally {
|
||||
closeableHttpClient.close();
|
||||
solrClient.close();
|
||||
if (closeableHttpClient != null)
|
||||
{
|
||||
closeableHttpClient.close();
|
||||
}
|
||||
if (solrClient != null)
|
||||
{
|
||||
solrClient.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -108,6 +108,21 @@ public class SolrAuthoritySetQuery extends AbstractAuthoritySetQuery implements
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* ACL PostFilter
|
||||
*
|
||||
* The getFilterCollector function returns a DelegatingCollector
|
||||
* which is used to filter the documents that match the query.
|
||||
*
|
||||
* A delegating collector wraps the TopDocs Collector which gathers the top documents that
|
||||
* match a query. A delegating can filter the documents before "delegating" to the TopDocs
|
||||
* collector. This filtering process is where the ACL logic is applied.
|
||||
*
|
||||
* The getFilterCollector method sets up the data structures needed to apply the acl rules.
|
||||
* These data structures are then passed to the access control collectors.
|
||||
*
|
||||
*/
|
||||
|
||||
public DelegatingCollector getFilterCollector(IndexSearcher searcher)
|
||||
{
|
||||
|
||||
@@ -136,7 +151,19 @@ public class SolrAuthoritySetQuery extends AbstractAuthoritySetQuery implements
|
||||
|
||||
try
|
||||
{
|
||||
|
||||
/*
|
||||
* Collect the ACLID's that match the authorities.
|
||||
* This is done by querying the ACL records in the index. See the method for more
|
||||
* documentation on this query.
|
||||
*/
|
||||
|
||||
HybridBitSet aclSet = getACLSet(auths, QueryConstants.FIELD_READER, solrIndexSearcher);
|
||||
|
||||
/*
|
||||
* Collect the documents that the user owns.
|
||||
*/
|
||||
|
||||
BitsFilter ownerFilter = getOwnerFilter(auths, solrIndexSearcher);
|
||||
|
||||
if (globalReaders.contains(PermissionService.OWNER_AUTHORITY))
|
||||
@@ -251,6 +278,11 @@ public class SolrAuthoritySetQuery extends AbstractAuthoritySetQuery implements
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* The AccessControlCollector applies that ACL logic given aclIds and ownerFilter
|
||||
*/
|
||||
|
||||
class AccessControlCollector extends DelegatingCollector
|
||||
{
|
||||
private HybridBitSet aclIds;
|
||||
@@ -276,6 +308,12 @@ public class SolrAuthoritySetQuery extends AbstractAuthoritySetQuery implements
|
||||
this.ownerDocs = ownerFilter.getBitSets().get(context.ord);
|
||||
}
|
||||
|
||||
/*
|
||||
* The collect method is applied to each document that matches the
|
||||
* query. The document's aclId must be in the set of aclId's passed into the collector,
|
||||
* or the documents id must be in the ownerDocs.
|
||||
*/
|
||||
|
||||
public void collect(int doc) throws IOException
|
||||
{
|
||||
long aclId = this.fieldValues.get(doc);
|
||||
|
@@ -139,22 +139,25 @@ public class Solr4X509ServletFilter extends X509ServletFilterBase
|
||||
private void findCores(File dir, List<File> cores)
|
||||
{
|
||||
File[] files = dir.listFiles();
|
||||
for(File file : files)
|
||||
if (files != null)
|
||||
{
|
||||
if(file.isDirectory())
|
||||
for (File file : files)
|
||||
{
|
||||
findCores(file, cores);
|
||||
}
|
||||
else
|
||||
{
|
||||
if("core.properties".equals(file.getName()))
|
||||
if (file.isDirectory())
|
||||
{
|
||||
if (logger.isDebugEnabled())
|
||||
findCores(file, cores);
|
||||
}
|
||||
else
|
||||
{
|
||||
if ("core.properties".equals(file.getName()))
|
||||
{
|
||||
logger.debug("Found core:" + dir.getAbsolutePath());
|
||||
if (logger.isDebugEnabled())
|
||||
{
|
||||
logger.debug("Found core:" + dir.getAbsolutePath());
|
||||
}
|
||||
|
||||
cores.add(dir);
|
||||
}
|
||||
|
||||
cores.add(dir);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -167,47 +170,52 @@ public class Solr4X509ServletFilter extends X509ServletFilterBase
|
||||
private void collectSecureComms(File base, Set<String> secureCommsSet) throws IOException
|
||||
{
|
||||
File[] files = base.listFiles();
|
||||
|
||||
for(File file : files)
|
||||
if (files != null)
|
||||
{
|
||||
if(file.isDirectory())
|
||||
for (File file : files)
|
||||
{
|
||||
collectSecureComms(file, secureCommsSet);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
if (logger.isDebugEnabled())
|
||||
if (file.isDirectory())
|
||||
{
|
||||
logger.debug("scanning file:" + file.getAbsolutePath());
|
||||
collectSecureComms(file, secureCommsSet);
|
||||
}
|
||||
|
||||
if ("solrcore.properties".equals(file.getName()))
|
||||
else
|
||||
{
|
||||
FileReader propReader = null;
|
||||
Properties props = new Properties();
|
||||
try
|
||||
|
||||
if (logger.isDebugEnabled())
|
||||
{
|
||||
propReader = new FileReader(file);
|
||||
props.load(propReader);
|
||||
String prop = props.getProperty(SECURE_COMMS);
|
||||
|
||||
if (prop != null)
|
||||
{
|
||||
if (logger.isDebugEnabled())
|
||||
{
|
||||
logger.debug("Found alfresco.secureComms in:" + file.getAbsolutePath() + " : " + prop);
|
||||
}
|
||||
secureCommsSet.add(prop);
|
||||
}
|
||||
else
|
||||
{
|
||||
secureCommsSet.add("none");
|
||||
}
|
||||
logger.debug("scanning file:" + file.getAbsolutePath());
|
||||
}
|
||||
finally
|
||||
|
||||
if ("solrcore.properties".equals(file.getName()))
|
||||
{
|
||||
propReader.close();
|
||||
FileReader propReader = null;
|
||||
Properties props = new Properties();
|
||||
try
|
||||
{
|
||||
propReader = new FileReader(file);
|
||||
props.load(propReader);
|
||||
String prop = props.getProperty(SECURE_COMMS);
|
||||
|
||||
if (prop != null)
|
||||
{
|
||||
if (logger.isDebugEnabled())
|
||||
{
|
||||
logger.debug("Found alfresco.secureComms in:" + file.getAbsolutePath() + " : " + prop);
|
||||
}
|
||||
secureCommsSet.add(prop);
|
||||
}
|
||||
else
|
||||
{
|
||||
secureCommsSet.add("none");
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (propReader != null)
|
||||
{
|
||||
propReader.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -180,6 +180,11 @@ public abstract class AbstractTracker implements Tracker
|
||||
|
||||
try
|
||||
{
|
||||
/*
|
||||
* The runLock ensures that for each tracker type (metadata, content, commit, cascade) only one tracker will
|
||||
* be running at a time.
|
||||
*/
|
||||
|
||||
runLock.acquire();
|
||||
|
||||
if(state==null && Boolean.parseBoolean(System.getProperty("alfresco.test", "false")))
|
||||
|
@@ -131,6 +131,18 @@ public class CommitTracker extends AbstractTracker
|
||||
|
||||
//See if we need a rollback
|
||||
if(metadataTracker.getRollback() || aclTracker.getRollback()) {
|
||||
|
||||
/*
|
||||
* The metadataTracker and aclTracker will return true if an unhandled exception has occurred during indexing.
|
||||
*
|
||||
* The doRollback method rolls the index back to the state that it was in at the last commit. This will undo
|
||||
* all the work that has been done by other trackers after the last commit.
|
||||
*
|
||||
* The state of the other trackers is then set to null so the trackers will initialize their state from
|
||||
* the index, rather then the in-memory state. This keeps the trackers in-sync with index if their work is
|
||||
* rolled back.
|
||||
*/
|
||||
|
||||
doRollback();
|
||||
return;
|
||||
}
|
||||
|
@@ -143,6 +143,15 @@ public class MetadataTracker extends AbstractTracker implements Tracker
|
||||
if(!isMaster && isSlave)
|
||||
{
|
||||
// Dynamic registration
|
||||
/*
|
||||
* This section allows Solr's master/slave setup to be used with dynamic shard registration.
|
||||
* In this scenario the slave is polling a "tracking" Solr node. The code below calls
|
||||
* the repo to register the state of the node without pulling any real transactions from the repo.
|
||||
*
|
||||
* This allows the repo to register the replica so that it will be included in queries. But the slave Solr node
|
||||
* will pull its data from a "tracking" Solr node using Solr's master/slave replication, rather then tracking the repository.
|
||||
*
|
||||
*/
|
||||
|
||||
ShardState shardstate = getShardState();
|
||||
client.getTransactions(0L, null, 0L, null, 0, shardstate);
|
||||
@@ -610,6 +619,11 @@ public class MetadataTracker extends AbstractTracker implements Tracker
|
||||
{
|
||||
try
|
||||
{
|
||||
/*
|
||||
* This write lock is used to lock out the Commit Tracker. The ensures that the MetaDataTracker will
|
||||
* not be indexing content while commits or rollbacks are occurring.
|
||||
*/
|
||||
|
||||
getWriteLock().acquire();
|
||||
|
||||
/*
|
||||
@@ -621,6 +635,26 @@ public class MetadataTracker extends AbstractTracker implements Tracker
|
||||
this.state = getTrackerState();
|
||||
|
||||
|
||||
/*
|
||||
* The fromCommitTime tells getSomeTransactions() where to start, this actually fairly straight forward.
|
||||
*
|
||||
* What makes this code so tricky to understand is the state.getTimeToStopIndexing().
|
||||
*
|
||||
* There are two scenarios to keep in mind:
|
||||
*
|
||||
* 1) Full re-index: In this scenario the state.getTimeToStopIndexing() will never stop the indexing.
|
||||
*
|
||||
* 2) Up-to-date indexing: This is where state.getTimeToStopIndexing() gets interesting. In this scenario
|
||||
* the Solr index is already up to date with the repo and it is tracking new transactions. The state.getTimeToStopIndexing()
|
||||
* in this scenario causes the getSomeTransactions() call to stop returning results if it finds a transaction
|
||||
* beyond a specific point in time. This will break out of this loop and end the tracker run.
|
||||
*
|
||||
* The next time the metadata tracker runs the "continueState()" method applies the "hole retention"
|
||||
* to state.getLastGoodTxCommitTimeInIndex(). This causes the state.getLastGoodTxCommitTimeInIndex() to scan
|
||||
* for prior transactions that might have been missed.
|
||||
*
|
||||
*/
|
||||
|
||||
Long fromCommitTime = getTxFromCommitTime(txnsFound, state.getLastGoodTxCommitTimeInIndex());
|
||||
transactions = getSomeTransactions(txnsFound, fromCommitTime, TIME_STEP_1_HR_IN_MS, 2000,
|
||||
state.getTimeToStopIndexing());
|
||||
@@ -640,6 +674,22 @@ public class MetadataTracker extends AbstractTracker implements Tracker
|
||||
|
||||
ArrayList<Transaction> txBatch = new ArrayList<>();
|
||||
for (Transaction info : transactions.getTransactions()) {
|
||||
|
||||
/*
|
||||
* isInIndex is used to ensure transactions that are being re-pulled due to "hole retention" are not re-indexed if
|
||||
* they have already been indexed.
|
||||
*
|
||||
* The logic in infoSrv.txnInIndex() first checks an in-memory LRUcache for the txnId. If it doesn't find it in the cache
|
||||
* it checks the index. The LRUCache is only needed for txnId's that have been indexed but are not yet visible in the index for
|
||||
* one of two reasons:
|
||||
*
|
||||
* 1) The commit tracker has not yet committed the transaction.
|
||||
* 2) The txnId has been committed to the index but the new searcher has not yet been warmed.
|
||||
*
|
||||
* This means that to ensure txnId's are not needlessly reprocessed during hole retention, the LRUCache must be large
|
||||
* enough to cover the time between when a txnId is indexed and when it becomes visible.
|
||||
*/
|
||||
|
||||
boolean isInIndex = (infoSrv.txnInIndex(info.getId(), true) && info.getCommitTimeMs() <= state.getLastIndexedTxCommitTime());
|
||||
if (isInIndex) {
|
||||
txnsFound.add(info);
|
||||
|
@@ -142,30 +142,34 @@ public class ModelTracker extends AbstractTracker implements Tracker
|
||||
|
||||
};
|
||||
// List XML files
|
||||
for (File file : alfrescoModelDir.listFiles(filter))
|
||||
File[] files = alfrescoModelDir.listFiles(filter);
|
||||
if (files != null)
|
||||
{
|
||||
InputStream modelStream = null;
|
||||
M2Model model;
|
||||
try
|
||||
for (File file : files)
|
||||
{
|
||||
modelStream = new FileInputStream(file);
|
||||
model = M2Model.createModel(modelStream);
|
||||
}
|
||||
catch (IOException e)
|
||||
{
|
||||
throw new AlfrescoRuntimeException("File not found: " + file, e);
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (modelStream != null)
|
||||
InputStream modelStream = null;
|
||||
M2Model model;
|
||||
try
|
||||
{
|
||||
try { modelStream.close(); } catch (Exception e) {}
|
||||
modelStream = new FileInputStream(file);
|
||||
model = M2Model.createModel(modelStream);
|
||||
}
|
||||
catch (IOException e)
|
||||
{
|
||||
throw new AlfrescoRuntimeException("File not found: " + file, e);
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (modelStream != null)
|
||||
{
|
||||
try { modelStream.close(); } catch (Exception e) {}
|
||||
}
|
||||
}
|
||||
// Model successfully loaded
|
||||
for (M2Namespace namespace : model.getNamespaces())
|
||||
{
|
||||
modelMap.put(namespace.getUri(), model);
|
||||
}
|
||||
}
|
||||
// Model successfully loaded
|
||||
for (M2Namespace namespace : model.getNamespaces())
|
||||
{
|
||||
modelMap.put(namespace.getUri(), model);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -61,7 +61,7 @@ public class TrackerRegistry
|
||||
public <T extends Tracker> T getTrackerForCore(String coreName, Class<T> trackerClass)
|
||||
{
|
||||
Map<Class<? extends Tracker>, Tracker> coreTrackers = this.trackers.get(coreName);
|
||||
return (T) coreTrackers.get(trackerClass);
|
||||
return null == coreTrackers ? null : (T) coreTrackers.get(trackerClass);
|
||||
}
|
||||
|
||||
public synchronized void register(String coreName, Tracker tracker)
|
||||
|
@@ -106,30 +106,27 @@ public class CachedDocTransformer extends DocTransformer
|
||||
{
|
||||
String alfrescoFieldName = AlfrescoSolrDataModel.getInstance().getAlfrescoPropertyFromSchemaField(fieldName);
|
||||
Collection<Object> values = cachedDoc.getFieldValues(fieldName);
|
||||
ArrayList<Object> newValues = new ArrayList<Object>(values.size());
|
||||
for(Object value : values)
|
||||
{
|
||||
if(value instanceof String)
|
||||
{
|
||||
String stringValue = (String) value;
|
||||
int start = stringValue.lastIndexOf('\u0000');
|
||||
if(start == -1)
|
||||
{
|
||||
newValues.add(stringValue);
|
||||
}
|
||||
else
|
||||
{
|
||||
newValues.add(stringValue.substring(start+1));
|
||||
|
||||
//Guard against null pointer in case data model field name does not match up with cachedDoc field name.
|
||||
if(values != null) {
|
||||
ArrayList<Object> newValues = new ArrayList<Object>(values.size());
|
||||
for (Object value : values) {
|
||||
if (value instanceof String) {
|
||||
String stringValue = (String) value;
|
||||
int start = stringValue.lastIndexOf('\u0000');
|
||||
if (start == -1) {
|
||||
newValues.add(stringValue);
|
||||
} else {
|
||||
newValues.add(stringValue.substring(start + 1));
|
||||
}
|
||||
} else {
|
||||
newValues.add(value);
|
||||
}
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
newValues.add(value);
|
||||
}
|
||||
|
||||
doc.removeFields(alfrescoFieldName);
|
||||
doc.addField(alfrescoFieldName, newValues);
|
||||
}
|
||||
doc.removeFields(alfrescoFieldName);
|
||||
doc.addField(alfrescoFieldName, newValues);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@@ -1169,7 +1169,7 @@
|
||||
-->
|
||||
<!--
|
||||
<lst name="slave">
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
|
||||
<str name="pollInterval">00:00:60</str>
|
||||
</lst>
|
||||
-->
|
||||
|
@@ -1169,7 +1169,7 @@
|
||||
-->
|
||||
<!--
|
||||
<lst name="slave">
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
|
||||
<str name="pollInterval">00:00:60</str>
|
||||
</lst>
|
||||
-->
|
||||
|
@@ -1169,7 +1169,7 @@
|
||||
-->
|
||||
<!--
|
||||
<lst name="slave">
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
|
||||
<str name="pollInterval">00:00:60</str>
|
||||
</lst>
|
||||
-->
|
||||
|
@@ -838,6 +838,7 @@
|
||||
<str>consistencyComponent</str>
|
||||
<str>query</str>
|
||||
<str>facet</str>
|
||||
<str>facet_module</str>
|
||||
<str>mlt</str>
|
||||
<str>highlight</str>
|
||||
<str>stats</str>
|
||||
@@ -1169,7 +1170,7 @@
|
||||
-->
|
||||
<!--
|
||||
<lst name="slave">
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
|
||||
<str name="pollInterval">00:00:60</str>
|
||||
</lst>
|
||||
-->
|
||||
|
@@ -838,6 +838,7 @@
|
||||
<str>consistencyComponent</str>
|
||||
<str>query</str>
|
||||
<str>facet</str>
|
||||
<str>facet_module</str>
|
||||
<str>mlt</str>
|
||||
<str>highlight</str>
|
||||
<str>stats</str>
|
||||
@@ -1169,7 +1170,7 @@
|
||||
-->
|
||||
<!--
|
||||
<lst name="slave">
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
|
||||
<str name="pollInterval">00:00:60</str>
|
||||
</lst>
|
||||
-->
|
||||
|
@@ -179,3 +179,4 @@ solr.suggester.minSecsBetweenBuilds=3600
|
||||
alfresco.contentStreamLimit=10000000
|
||||
#Shard setup
|
||||
shard.method=DB_ID
|
||||
#END of solrcore
|
@@ -1138,7 +1138,7 @@
|
||||
-->
|
||||
<!--
|
||||
<lst name="slave">
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
|
||||
<str name="pollInterval">00:00:60</str>
|
||||
</lst>
|
||||
-->
|
||||
|
@@ -1169,7 +1169,7 @@
|
||||
-->
|
||||
<!--
|
||||
<lst name="slave">
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
|
||||
<str name="masterUrl">http://your-master-hostname:8983/solr/alfresco</str>
|
||||
<str name="pollInterval">00:00:60</str>
|
||||
</lst>
|
||||
-->
|
||||
|
@@ -4,24 +4,20 @@ set -e
|
||||
[ "$DEBUG" ] && set -x
|
||||
|
||||
nicebranch=`echo "$bamboo_planRepository_1_branch" | sed 's/\//_/'`
|
||||
DOCKER_RESOURCES_PATH="${1:-packaging/target/docker-resources}"
|
||||
|
||||
if [ "${nicebranch}" = "master" ] || [ "${nicebranch#release}" != "${nicebranch}" ]
|
||||
then
|
||||
# set current working directory to the directory of the script
|
||||
cd "$bamboo_working_directory"
|
||||
|
||||
docker_registry="quay.io/alfresco/search-services"
|
||||
then
|
||||
tag_version=`echo "$bamboo_maven_version"`
|
||||
if [ "${bamboo_shortJobName}" = "Release" ]
|
||||
then
|
||||
tag_version=`echo "$bamboo_release_version"`
|
||||
docker_registry="alfresco/alfresco-search-services"
|
||||
fi
|
||||
|
||||
dockerImage="$docker_registry:$tag_version"
|
||||
dockerImage="quay.io/alfresco/search-services:$tag_version"
|
||||
echo "Building $dockerImage from $nicebranch using version $tag_version"
|
||||
|
||||
docker build -t $dockerImage packaging/target/docker-resources
|
||||
docker build -t $dockerImage ${DOCKER_RESOURCES_PATH}
|
||||
|
||||
echo "Running tests"
|
||||
docker run --rm "$dockerImage" [ -d /opt/alfresco-search-services/solr ] || (echo "solr dir does not exist" && exit 1)
|
||||
|
@@ -13,7 +13,7 @@
|
||||
<parent>
|
||||
<groupId>org.alfresco</groupId>
|
||||
<artifactId>alfresco-search-parent</artifactId>
|
||||
<version>1.2.0-dockerTest3</version>
|
||||
<version>1.2.0</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
<dependencies>
|
||||
|
@@ -1,7 +1,7 @@
|
||||
version: '3'
|
||||
services:
|
||||
alfresco:
|
||||
image: quay.io/alfresco/alfresco-content-services${ALFRESCO_EDITION}:${ALFRESCO_TAG}
|
||||
image: quay.io/alfresco/alfresco-content-repository${ALFRESCO_EDITION}:${ALFRESCO_TAG}
|
||||
environment:
|
||||
JAVA_OPTS : "
|
||||
-Ddb.driver=org.postgresql.Driver
|
||||
|
88
search-services/packaging/start-alfresco.sh
Executable file
88
search-services/packaging/start-alfresco.sh
Executable file
@@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env bash
|
||||
# About:
|
||||
# Run docker-compose using appropriate docker-resources generated in target
|
||||
# The script is using the 'find' tool to search for a particular docker-compose.yml file
|
||||
# you can use also a filter-tag (partial strings from absolute path or docker-compose.yml) that will
|
||||
# uniquely identify your desired docker-compose.yml file
|
||||
#
|
||||
# Usage:
|
||||
# $ run.sh <docker-resource-folder> <clean-or-not> <filter-flag> <debug-or-not> <alfresco-endpoint>
|
||||
# * <docker-resource-folder>: defaults to 'target'
|
||||
# * clean: will clean all running docker images on machine it will not start alfresco.
|
||||
# * <filter-flag>: can be 5.x or 6.x (defaults to 6.x) - it can be used to filter differed compose files
|
||||
# * <alfresco-endpoint>: the url of alfresco endpoint
|
||||
#
|
||||
# Examples:
|
||||
# $ run.sh - it will use latest docker-compose from this branch
|
||||
# $ run.sh target clean - it will clean the containers using the latest docker-compose from this branch
|
||||
# $ run.sh target clean 5.x - it will clean the containers using the 5.x. docker-compose file
|
||||
# $ run.sh target up 5.x - will start alfresco using the 5.x docker-compose file
|
||||
# $ run.sh target up docker-resources/docker-compose.yml debug
|
||||
|
||||
echo `basename $0` called on `date` with arguments: "$@"
|
||||
|
||||
DOCKER_RESOURCES_PATH="${1:-target}"
|
||||
CLEANUP="${2:-no-clean}"
|
||||
FILTER_FLAG="${3:-6.x}" #5.x, 6.x or even docker-resources/docker-compose.yml (for release branches)
|
||||
DOCKER_COMPOSE_FILE=$(find ${DOCKER_RESOURCES_PATH} -name "docker-compose.yml" -type f -exec realpath {} \;| grep ${FILTER_FLAG})
|
||||
DEBUG="${4:-no-debug}"
|
||||
ALFRESCO_ENDPOINT="${5:-http://localhost:8081/alfresco}"
|
||||
|
||||
|
||||
# exit if docker-compose not found
|
||||
[ ! -n "${DOCKER_COMPOSE_FILE}" ] && echo "docker-compose.yml file NOT FOUND in folder: '${DOCKER_RESOURCES_PATH}' using this filter flag: '${FILTER_FLAG}'" && exit 1
|
||||
|
||||
DOCKER_RESOURCES_PATH=`dirname ${DOCKER_COMPOSE_FILE}`
|
||||
|
||||
function wait_for_alfresco_to_start {
|
||||
WAIT_INTERVAL=1
|
||||
COUNTER=0
|
||||
TIMEOUT=2000
|
||||
t0=`date +%s`
|
||||
|
||||
echo "Waiting for Alfresco to start in docker container: ${ALFRESCO_ENDPOINT}"
|
||||
until $(curl --output /dev/null --silent --head --fail ${ALFRESCO_ENDPOINT}) || [ "$COUNTER" -eq "$TIMEOUT" ]; do
|
||||
printf '.'
|
||||
sleep $WAIT_INTERVAL
|
||||
COUNTER=$(($COUNTER+$WAIT_INTERVAL))
|
||||
done
|
||||
|
||||
if (("$COUNTER" < "$TIMEOUT")) ; then
|
||||
t1=`date +%s`
|
||||
delta=$((($t1 - $t0)/60))
|
||||
echo "Alfresco Started in $delta minutes: ${ALFRESCO_ENDPOINT}"
|
||||
else
|
||||
echo "Waited $COUNTER seconds"
|
||||
echo "Alfresco Could not start in time."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function cleanup_containers {
|
||||
cd ${DOCKER_RESOURCES_PATH} && docker-compose kill
|
||||
cd ${DOCKER_RESOURCES_PATH} && docker-compose rm -fv
|
||||
}
|
||||
function start_alfresco {
|
||||
# update the basicAuthScheme https://issues.alfresco.com/jira/browse/REPO-2575
|
||||
sed -ie "s/-Dindex.subsystem.name=solr6/-Dindex.subsystem.name=solr6 -Dalfresco.restApi.basicAuthScheme=true/g" ${DOCKER_COMPOSE_FILE}
|
||||
|
||||
# show the configuration of docker-compose.yml file that we will run
|
||||
cd ${DOCKER_RESOURCES_PATH} && docker-compose config
|
||||
|
||||
if [ ${DEBUG} = "debug" ]; then
|
||||
cd ${DOCKER_RESOURCES_PATH} && docker-compose up
|
||||
else
|
||||
cd ${DOCKER_RESOURCES_PATH} && docker-compose up -d
|
||||
wait_for_alfresco_to_start
|
||||
fi
|
||||
}
|
||||
|
||||
set -ex
|
||||
|
||||
if [ ${CLEANUP} = "clean" ]; then
|
||||
cleanup_containers
|
||||
else
|
||||
cleanup_containers
|
||||
start_alfresco
|
||||
fi
|
||||
|
41
search-services/packaging/whitesource-package-scan.sh
Executable file
41
search-services/packaging/whitesource-package-scan.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env sh
|
||||
# Usage: whitesource-package-scan.sh <clean>
|
||||
# - this will copy the distribution zip to release area
|
||||
# - will unzip it and scan if using WhiteSource File System Agent https://goo.gl/ohg4Rv
|
||||
# - and will clean up the scan folder if <clean> string is passed as parameter
|
||||
# Example:
|
||||
# $ whitesource-package-scan.sh -> perform the scan
|
||||
# $ whitesource-package-scan.sh clean -> will cleanup the scan folder
|
||||
|
||||
echo `basename $0` called on `date` with arguments: "$@"
|
||||
set -exu
|
||||
|
||||
nicebranch=`echo "$bamboo_planRepository_1_branch" | sed 's/\//_/'`
|
||||
|
||||
if [ "${nicebranch}" = "master" ] || [ "${nicebranch#release}" != "${nicebranch}" ]
|
||||
then
|
||||
POM_VERSION=$(grep version pom.xml | grep -v -e '<?xml|~'| head -n 1 |awk -F '[><]' '{print $3}')
|
||||
RELEASE_FOLDER=/data/releases/SearchServices/${POM_VERSION}
|
||||
DISTRIBUTION_NAME=alfresco-search-services-${POM_VERSION}.zip
|
||||
DISTRIBUTION_ZIP_PATH=${RELEASE_FOLDER}/${DISTRIBUTION_NAME}
|
||||
DISTRIBUTION_ZIP_SCAN_PATH=${RELEASE_FOLDER}/scan
|
||||
CLEANUP="${1:-do-not-clean}"
|
||||
|
||||
if [ ${CLEANUP} = "clean" ]; then
|
||||
echo "Cleaning up scan folder..."
|
||||
ssh -q tomcat@pbam01.alfresco.com [[ -d ${RELEASE_FOLDER} ]] && ssh tomcat@pbam01.alfresco.com rm -rf ${RELEASE_FOLDER} || echo "Nothing to cleanup"
|
||||
else
|
||||
echo "Copy distribution to release area..."
|
||||
ssh tomcat@pbam01.alfresco.com mkdir -p ${RELEASE_FOLDER}
|
||||
scp target/${DISTRIBUTION_NAME} tomcat@pbam01.alfresco.com:${RELEASE_FOLDER}
|
||||
|
||||
#unzip distribution
|
||||
ssh tomcat@pbam01.alfresco.com unzip ${DISTRIBUTION_ZIP_PATH} -d ${DISTRIBUTION_ZIP_SCAN_PATH}
|
||||
|
||||
#whitesource scanning using file agent: https://goo.gl/ohg4Rv
|
||||
ssh tomcat@pbam01.alfresco.com sh /etc/bamboo/whitesource-agent.sh -d ${DISTRIBUTION_ZIP_SCAN_PATH} -project distribution-zip -product SearchServices-${bamboo_release_version}
|
||||
fi
|
||||
|
||||
else
|
||||
echo "WhiteSource scann will be executed only from master or release branches. Skipping for '${nicebranch}'"
|
||||
fi
|
@@ -7,7 +7,7 @@
|
||||
<version>8</version>
|
||||
</parent>
|
||||
<artifactId>alfresco-search-parent</artifactId>
|
||||
<version>1.2.0-dockerTest3</version>
|
||||
<version>1.2.0</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>Alfresco Solr Search parent</name>
|
||||
<properties>
|
||||
@@ -16,19 +16,19 @@
|
||||
</properties>
|
||||
<distributionManagement>
|
||||
<repository>
|
||||
<id>alfresco-internal</id>
|
||||
<url>https://artifacts.alfresco.com/nexus/content/repositories/internal-releases/</url>
|
||||
<id>alfresco-releases</id>
|
||||
<url>https://artifacts.alfresco.com/nexus/content/repositories/releases/</url>
|
||||
</repository>
|
||||
<snapshotRepository>
|
||||
<id>alfresco-internal-snapshots</id>
|
||||
<url>https://artifacts.alfresco.com/nexus/content/repositories/internal-snapshots/</url>
|
||||
<id>alfresco-snapshots</id>
|
||||
<url>https://artifacts.alfresco.com/nexus/content/repositories/snapshots/</url>
|
||||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<scm>
|
||||
<connection>scm:git:git@github.com:Alfresco/SearchServices.git</connection>
|
||||
<developerConnection>scm:git:git@github.com:Alfresco/SearchServices.git</developerConnection>
|
||||
<url>https://github.com/Alfresco/SearchServices.git</url>
|
||||
<tag>1.2.0-dockerTest3</tag>
|
||||
<tag>1.2.0</tag>
|
||||
</scm>
|
||||
<modules>
|
||||
<module>alfresco-search</module>
|
||||
|
Reference in New Issue
Block a user