[ SEARCH-1752 ] TrackerState got updated on slaves + remove unused configuration properties

This commit is contained in:
agazzarini
2019-10-22 16:25:30 +01:00
parent 200e71ce73
commit 091afab426
11 changed files with 47 additions and 44 deletions

View File

@@ -54,7 +54,6 @@ ENV MASTER_HOST $MASTER_HOST
# Set Master / Slave configuration for this Node
RUN if [ "$ENABLE_MASTER" == "true" ] ; then \
sed -i '/^bash.*/i echo "\nenable.master=${ENABLE_MASTER}\nenable.slave=${ENABLE_SLAVE}" >> ${DIST_DIR}/solrhome/templates/rerank/conf/solrcore.properties\n' \
${DIST_DIR}/solr/bin/search_config_setup.sh; \
sed -i "/^bash.*/i sed -i '/^\\\\\s*<requestHandler name=\"\\\\/replication\".*/a \
<lst name=\"master\">\
@@ -64,7 +63,6 @@ RUN if [ "$ENABLE_MASTER" == "true" ] ; then \
</lst>' ${DIST_DIR}/solrhome/templates/rerank/conf/solrconfig.xml\n" ${DIST_DIR}/solr/bin/search_config_setup.sh; \
fi
RUN if [ "$ENABLE_SLAVE" == "true" ] ; then \
sed -i '/^bash.*/i echo "\nenable.master=${ENABLE_MASTER}\nenable.slave=${ENABLE_SLAVE}" >> ${DIST_DIR}/solrhome/templates/rerank/conf/solrcore.properties\n' \
${DIST_DIR}/solr/bin/search_config_setup.sh; \
sed -i "/^bash.*/i sed -i '/^\\\\\s*<requestHandler name=\"\\\\/replication\".*/a \
<lst name=\"slave\">\
@@ -131,4 +129,4 @@ fi
RUN mkdir ${DIST_DIR}/keystore \
&& chown -R solr:solr ${DIST_DIR}/keystore
VOLUME ["${DIST_DIR}/keystore"]
VOLUME ["${DIST_DIR}/keystore"]

View File

@@ -150,8 +150,6 @@ The following table illustrates the configuration properties used by the Tracker
|alfresco.stores|workspace://SpacesStore|The reference to a node store| | |Y|Y| | |
|batch.count|5000|UpSert batch size (e.g. metadata docs, acls)| | |Y|Y| | |
|alfresco.maxLiveSearchers|2|Max allowed number of active searchers|Y| |Y|Y| | |
|enable.slave|false|Indicates if the hosting instance is a slave| | |Y|Y| | |
|enable.master|true|Indicates if the hosting instance is a master| | |Y|Y| | |
|shard.count|1|The total number of shards that compose the Solr infrastructure|| |Y|Y| | |
|shard.instance|0|The unique shard identifier assigned to this instance|| |Y|Y| | |
|shard.method|"DB_ID"|Data (Documents, ACLs) Routing criteria among shards| | |Y|Y| | |

View File

@@ -159,7 +159,7 @@ public class SolrCoreLoadListener extends AbstractSolrEventListener
{
LOGGER.info("SearchServices Core Trackers have been disabled on core \"{}\" because it is a slave core.", core.getName());
SlaveNodeStateProvider stateProvider = new SlaveNodeStateProvider(coreProperties, repositoryClient, core.getName(), informationServer);
SlaveNodeStateProvider stateProvider = new SlaveNodeStateProvider(false, coreProperties, repositoryClient, core.getName(), informationServer);
trackerRegistry.register(core.getName(), stateProvider);
scheduler.schedule(stateProvider, core.getName(), coreProperties);
@@ -206,7 +206,7 @@ public class SolrCoreLoadListener extends AbstractSolrEventListener
trackerRegistry.register(coreName, contentTrkr);
scheduler.schedule(contentTrkr, coreName, props);
MetadataTracker metaTrkr = new MetadataTracker(props, repositoryClient, coreName, srv);
MetadataTracker metaTrkr = new MetadataTracker(true, props, repositoryClient, coreName, srv);
trackerRegistry.register(coreName, metaTrkr);
scheduler.schedule(metaTrkr, coreName, props);
@@ -289,7 +289,7 @@ public class SolrCoreLoadListener extends AbstractSolrEventListener
}
/**
* Checks if the content store belonging to the hosting Solr node must be set in read only mode.
* Checks if the configuration declares this node as a slave.
*
* @param core the hosting {@link SolrCore} instance.
* @return true if the content store must be set in read only mode, false otherwise.

View File

@@ -31,7 +31,6 @@ import org.alfresco.solr.client.SOLRAPIClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Abstract base class that provides common {@link Tracker} behaviour.
*
@@ -39,20 +38,17 @@ import org.slf4j.LoggerFactory;
*/
public abstract class AbstractTracker implements Tracker
{
public static final long TIME_STEP_32_DAYS_IN_MS = 1000 * 60 * 60 * 24 * 32L;
public static final long TIME_STEP_1_HR_IN_MS = 60 * 60 * 1000L;
public static final String SHARD_METHOD_ACLID = "ACL_ID";
public static final String SHARD_METHOD_DBID = "DB_ID";
static final long TIME_STEP_32_DAYS_IN_MS = 1000 * 60 * 60 * 24 * 32L;
static final long TIME_STEP_1_HR_IN_MS = 60 * 60 * 1000L;
static final String SHARD_METHOD_DBID = "DB_ID";
protected final static Logger log = LoggerFactory.getLogger(AbstractTracker.class);
protected Properties props;
protected SOLRAPIClient client;
protected InformationServer infoSrv;
InformationServer infoSrv;
protected String coreName;
protected StoreRef storeRef;
protected long batchCount;
protected boolean isSlave = false;
protected boolean isMaster = true;
protected String alfrescoVersion;
protected TrackerStats trackerStats;
protected boolean runPostModelLoadInit = true;
@@ -71,14 +67,10 @@ public abstract class AbstractTracker implements Tracker
protected volatile boolean rollback;
protected final Type type;
/*
* A thread handler can be used by subclasses, but they have to intentionally instantiate it.
*/
protected ThreadHandler threadHandler;
ThreadHandler threadHandler;
/**
* Default constructor, strictly for testing.
@@ -98,8 +90,6 @@ public abstract class AbstractTracker implements Tracker
storeRef = new StoreRef(p.getProperty("alfresco.stores", "workspace://SpacesStore"));
batchCount = Integer.parseInt(p.getProperty("alfresco.batch.count", "5000"));
maxLiveSearchers = Integer.parseInt(p.getProperty("alfresco.maxLiveSearchers", "2"));
isSlave = Boolean.parseBoolean(p.getProperty("enable.slave", "false"));
isMaster = Boolean.parseBoolean(p.getProperty("enable.master", "true"));
shardCount = Integer.parseInt(p.getProperty("shard.count", "1"));
shardInstance = Integer.parseInt(p.getProperty("shard.instance", "0"));

View File

@@ -288,11 +288,6 @@ public class AclTracker extends AbstractTracker
protected void trackRepository() throws IOException, AuthenticationException, JSONException
{
checkShutdown();
if(!isMaster && isSlave)
{
return;
}
TrackerState state = super.getTrackerState();

View File

@@ -64,10 +64,10 @@ public class MetadataTracker extends NodeStateProvider implements Tracker
private ConcurrentLinkedQueue<Long> nodesToPurge = new ConcurrentLinkedQueue<>();
private ConcurrentLinkedQueue<String> queriesToReindex = new ConcurrentLinkedQueue<>();
public MetadataTracker(Properties p, SOLRAPIClient client, String coreName,
public MetadataTracker(final boolean isMaster, Properties p, SOLRAPIClient client, String coreName,
InformationServer informationServer)
{
super(p, client, coreName, informationServer, Tracker.Type.METADATA);
super(isMaster, p, client, coreName, informationServer, Tracker.Type.METADATA);
transactionDocsBatchSize = Integer.parseInt(p.getProperty("alfresco.transactionDocsBatchSize", "100"));
nodeBatchSize = Integer.parseInt(p.getProperty("alfresco.nodeBatchSize", "10"));
threadHandler = new ThreadHandler(p, coreName, "MetadataTracker");

View File

@@ -62,6 +62,7 @@ import java.util.Properties;
public abstract class NodeStateProvider extends AbstractTracker
{
DocRouter docRouter;
private final boolean isMaster;
/** The string representation of the shard key. */
private Optional<String> shardKey;
@@ -70,6 +71,7 @@ public abstract class NodeStateProvider extends AbstractTracker
protected Optional<QName> shardProperty = Optional.empty();
NodeStateProvider(
boolean isMaster,
Properties p,
SOLRAPIClient client,
String coreName,
@@ -77,15 +79,19 @@ public abstract class NodeStateProvider extends AbstractTracker
Type type)
{
super(p, client, coreName, informationServer, type);
this.isMaster = isMaster;
shardMethod = p.getProperty("shard.method", SHARD_METHOD_DBID);
shardKey = ofNullable(p.getProperty(SHARD_KEY_KEY));
firstUpdateShardProperty();
docRouter = DocRouterFactory.getRouter(p, ShardMethodEnum.getShardMethod(shardMethod));
}
NodeStateProvider(Type type)
{
super(type);
this.isMaster = false;
}
private void firstUpdateShardProperty()
@@ -168,7 +174,7 @@ public abstract class NodeStateProvider extends AbstractTracker
*/
ShardState getShardState()
{
TrackerState transactionsTrackerState = super.getTrackerState();
TrackerState transactionsTrackerState = getTrackerState();
TrackerState changeSetsTrackerState =
of(infoSrv.getAdminHandler())
.map(AlfrescoCoreAdminHandler::getTrackerRegistry)

View File

@@ -5,6 +5,7 @@ import static org.alfresco.solr.tracker.Tracker.Type.NODE_STATE_PUBLISHER;
import org.alfresco.httpclient.AuthenticationException;
import org.alfresco.repo.index.shard.ShardState;
import org.alfresco.solr.SolrInformationServer;
import org.alfresco.solr.TrackerState;
import org.alfresco.solr.client.SOLRAPIClient;
import org.apache.commons.codec.EncoderException;
@@ -28,9 +29,14 @@ import java.util.Properties;
*/
public class SlaveNodeStateProvider extends NodeStateProvider
{
public SlaveNodeStateProvider(Properties coreProperties, SOLRAPIClient repositoryClient, String name, SolrInformationServer informationServer)
public SlaveNodeStateProvider(
boolean isMaster,
Properties coreProperties,
SOLRAPIClient repositoryClient,
String name,
SolrInformationServer informationServer)
{
super(coreProperties, repositoryClient, name, informationServer, NODE_STATE_PUBLISHER);
super(isMaster, coreProperties, repositoryClient, name, informationServer, NODE_STATE_PUBLISHER);
}
@Override
@@ -72,4 +78,17 @@ public class SlaveNodeStateProvider extends NodeStateProvider
{
return false;
}
/**
* When running in a slave mode, we need to recreate the tracker state every time.
* This because in that context we don't have any tracker updating the state (e.g. lastIndexedChangeSetCommitTime,
* lastIndexedChangeSetId)
*
* @return a new, fresh and up to date instance of {@link TrackerState}.
*/
@Override
public TrackerState getTrackerState()
{
return infoSrv.getTrackerInitialState();
}
}

View File

@@ -47,7 +47,7 @@ import org.junit.runner.RunWith;
import org.mockito.InOrder;
import org.mockito.Mock;
import org.mockito.Spy;
import org.mockito.runners.MockitoJUnitRunner;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class MetadataTrackerTest
@@ -58,20 +58,23 @@ public class MetadataTrackerTest
@Mock
private SOLRAPIClient repositoryClient;
private String coreName = "theCoreName";
@Mock
private InformationServer srv;
@Spy
private Properties props;
@Mock
private TrackerStats trackerStats;
@Before
public void setUp() throws Exception
public void setUp()
{
doReturn("workspace://SpacesStore").when(props).getProperty("alfresco.stores");
when(srv.getTrackerStats()).thenReturn(trackerStats);
this.metadataTracker = spy(new MetadataTracker(props, repositoryClient, coreName, srv));
String coreName = "theCoreName";
this.metadataTracker = spy(new MetadataTracker(true, props, repositoryClient, coreName, srv));
ModelTracker modelTracker = mock(ModelTracker.class);
when(modelTracker.hasModels()).thenReturn(true);
@@ -197,6 +200,4 @@ public class MetadataTrackerTest
assertSame(nodes4Tx, nodes);
}
}

View File

@@ -102,8 +102,6 @@ public class ModelTrackerTest
when(props.getProperty("alfresco.stores", "workspace://SpacesStore")).thenReturn("workspace://SpacesStore");
when(props.getProperty("alfresco.batch.count", "5000")).thenReturn("5000");
when(props.getProperty("alfresco.maxLiveSearchers", "2")).thenReturn("2");
when(props.getProperty("enable.slave", "false")).thenReturn("false");
when(props.getProperty("enable.master", "true")).thenReturn("true");
when(props.getProperty("shard.count", "1")).thenReturn("1");
when(props.getProperty("shard.instance", "0")).thenReturn("0");
when(this.srv.getTrackerStats()).thenReturn(trackerStats);

View File

@@ -73,8 +73,6 @@ public class SolrTrackerSchedulerTest
props.put("alfresco.stores", "workspace://SpacesStore");
props.put("alfresco.batch.count", "5000");
props.put("alfresco.maxLiveSearchers", "2");
props.put("enable.slave", "false");
props.put("enable.master", "true");
props.put("shard.count", "1");
props.put("shard.instance", "0");
props.put("shard.method", "SHARD_METHOD_DBID");
@@ -128,7 +126,7 @@ public class SolrTrackerSchedulerTest
{
String exp = "0/4 * * * * ? *";
props.put("alfresco.metadata.tracker.cron", exp);
MetadataTracker metadataTracker = new MetadataTracker(props, client, exp, informationServer);
MetadataTracker metadataTracker = new MetadataTracker(true, props, client, exp, informationServer);
this.trackerScheduler.schedule(metadataTracker, CORE_NAME, props);
verify(spiedQuartzScheduler).scheduleJob(any(JobDetail.class), any(Trigger.class));
checkCronExpression(exp);