mirror of
https://github.com/Alfresco/alfresco-community-repo.git
synced 2025-08-07 17:49:17 +00:00
Merged V3.1 to HEAD
13107: Remove accidentally added .project file. 13106: Correction to 13104 - remove accidentally included merge marks 13105: Correction to 13104 - remove accidentally included merge marks 13104: Update continuous build to include Hyperic plugin amongst distributables 13092: Fix ConnectionPoolMBean. Naming of boolean methods didn't match those on DBCP BasicDataSource 13088: Bring hyperic plugin in sync with Derek's recent JMX changes 13076: Extended properties available on ConnectionPoolMBean 13045: Merged V3.0 to v3.1 (record only) 13044: Merged V2.2 to V3.0 13041: Merged V3.1 to V2.2 13037: PostgreSQL upgrade scripts from 2.1.6, 2.2.0, and 2.2.1 git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@13522 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
@@ -24,194 +24,207 @@
|
|||||||
*/
|
*/
|
||||||
package org.alfresco.repo.cache.jgroups;
|
package org.alfresco.repo.cache.jgroups;
|
||||||
|
|
||||||
import java.util.Collections;
|
import net.sf.ehcache.distribution.MulticastKeepaliveHeartbeatSender;
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.StringTokenizer;
|
|
||||||
import java.util.concurrent.LinkedBlockingQueue;
|
|
||||||
import java.util.concurrent.ThreadPoolExecutor;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import org.alfresco.util.EqualsHelper;
|
|
||||||
import org.alfresco.util.TraceableThreadFactory;
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.jgroups.Address;
|
|
||||||
import org.jgroups.Channel;
|
|
||||||
import org.jgroups.Message;
|
|
||||||
import org.jgroups.ReceiverAdapter;
|
|
||||||
import org.jgroups.View;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Receives heartbeats from the {@link JGroupsKeepAliveHeartbeatSender JGroups heartbeat sender}.
|
* Receives heartbeats from any {@link MulticastKeepaliveHeartbeatSender}s out there.
|
||||||
|
* <p/>
|
||||||
|
* Our own multicast heartbeats are ignored.
|
||||||
*
|
*
|
||||||
* @author Derek Hulley
|
* @author Greg Luck
|
||||||
* @since 2.1.3
|
* @version $Id: MulticastKeepaliveHeartbeatReceiver.java 556 2007-10-29 02:06:30Z gregluck $
|
||||||
*/
|
*/
|
||||||
public class JGroupsKeepAliveHeartbeatReceiver extends ReceiverAdapter
|
public abstract class JGroupsKeepaliveHeartbeatReceiver
|
||||||
{
|
{
|
||||||
private static final int MAX_THREADS = 5;
|
// private static final Log LOG = LogFactory.getLog(MulticastKeepaliveHeartbeatReceiver.class.getName());
|
||||||
|
//
|
||||||
private static Log logger = LogFactory.getLog(JGroupsKeepAliveHeartbeatReceiver.class);
|
// private ExecutorService processingThreadPool;
|
||||||
|
// private Set rmiUrlsProcessingQueue = Collections.synchronizedSet(new HashSet());
|
||||||
private final JGroupsRMICacheManagerPeerProvider peerProvider;
|
// private final InetAddress groupMulticastAddress;
|
||||||
private final JGroupsKeepAliveHeartbeatSender heartbeatSender;
|
// private final Integer groupMulticastPort;
|
||||||
private final Channel channel;
|
// private MulticastReceiverThread receiverThread;
|
||||||
private boolean stopped;
|
// private MulticastSocket socket;
|
||||||
private View lastView;
|
// private boolean stopped;
|
||||||
private final ThreadPoolExecutor threadPool;
|
// private final MulticastRMICacheManagerPeerProvider peerProvider;
|
||||||
private final Set<String> rmiUrlsProcessingQueue;
|
//
|
||||||
|
// /**
|
||||||
public JGroupsKeepAliveHeartbeatReceiver(
|
// * Constructor.
|
||||||
JGroupsRMICacheManagerPeerProvider peerProvider,
|
// *
|
||||||
JGroupsKeepAliveHeartbeatSender heartbeatSender,
|
// * @param peerProvider
|
||||||
Channel channel)
|
// * @param multicastAddress
|
||||||
{
|
// * @param multicastPort
|
||||||
this.peerProvider = peerProvider;
|
// */
|
||||||
this.heartbeatSender = heartbeatSender;
|
// public MulticastKeepaliveHeartbeatReceiver(
|
||||||
this.channel = channel;
|
// MulticastRMICacheManagerPeerProvider peerProvider, InetAddress multicastAddress, Integer multicastPort) {
|
||||||
|
// this.peerProvider = peerProvider;
|
||||||
this.rmiUrlsProcessingQueue = Collections.synchronizedSet(new HashSet<String>());
|
// this.groupMulticastAddress = multicastAddress;
|
||||||
|
// this.groupMulticastPort = multicastPort;
|
||||||
// Build the thread pool
|
// }
|
||||||
TraceableThreadFactory threadFactory = new TraceableThreadFactory();
|
//
|
||||||
threadFactory.setThreadDaemon(true);
|
// /**
|
||||||
threadFactory.setThreadPriority(Thread.NORM_PRIORITY + 2);
|
// * Start.
|
||||||
|
// *
|
||||||
this.threadPool = new ThreadPoolExecutor(
|
// * @throws IOException
|
||||||
1, // Maintain one threads
|
// */
|
||||||
1, // We'll increase it, if necessary
|
// final void init() throws IOException {
|
||||||
60, // 1 minute until unused threads get trashed
|
// socket = new MulticastSocket(groupMulticastPort.intValue());
|
||||||
TimeUnit.SECONDS,
|
// socket.joinGroup(groupMulticastAddress);
|
||||||
new LinkedBlockingQueue<Runnable>(),
|
// receiverThread = new MulticastReceiverThread();
|
||||||
threadFactory);
|
// receiverThread.start();
|
||||||
}
|
// processingThreadPool = Executors.newCachedThreadPool();
|
||||||
|
// }
|
||||||
/**
|
//
|
||||||
* Register to receive message on the channel
|
// /**
|
||||||
*/
|
// * Shutdown the heartbeat.
|
||||||
public void init()
|
// */
|
||||||
{
|
// public final void dispose() {
|
||||||
channel.setReceiver(this);
|
// LOG.debug("dispose called");
|
||||||
}
|
// processingThreadPool.shutdownNow();
|
||||||
|
// stopped = true;
|
||||||
/**
|
// receiverThread.interrupt();
|
||||||
* Set the stop flag.
|
// }
|
||||||
*/
|
//
|
||||||
public void dispose()
|
// /**
|
||||||
{
|
// * A multicast receiver which continously receives heartbeats.
|
||||||
stopped = true;
|
// */
|
||||||
}
|
// private final class MulticastReceiverThread extends Thread {
|
||||||
|
//
|
||||||
@Override
|
// /**
|
||||||
public byte[] getState()
|
// * Constructor
|
||||||
{
|
// */
|
||||||
return new byte[] {};
|
// public MulticastReceiverThread() {
|
||||||
}
|
// super("Multicast Heartbeat Receiver Thread");
|
||||||
|
// setDaemon(true);
|
||||||
@Override
|
// }
|
||||||
public void setState(byte[] state)
|
//
|
||||||
{
|
// public final void run() {
|
||||||
// Nothing to do
|
// byte[] buf = new byte[PayloadUtil.MTU];
|
||||||
}
|
// try {
|
||||||
|
// while (!stopped) {
|
||||||
@Override
|
// DatagramPacket packet = new DatagramPacket(buf, buf.length);
|
||||||
public void receive(Message message)
|
// try {
|
||||||
{
|
// socket.receive(packet);
|
||||||
Address localAddress = heartbeatSender.getHeartbeatSourceAddress();
|
// byte[] payload = packet.getData();
|
||||||
Address remoteAddress = message.getSrc();
|
// processPayload(payload);
|
||||||
// Ignore messages from ourselves
|
//
|
||||||
if (EqualsHelper.nullSafeEquals(localAddress, remoteAddress))
|
//
|
||||||
{
|
// } catch (IOException e) {
|
||||||
if (logger.isDebugEnabled())
|
// if (!stopped) {
|
||||||
{
|
// LOG.error("Error receiving heartbeat. " + e.getMessage() +
|
||||||
logger.debug("\n" +
|
// ". Initial cause was " + e.getMessage(), e);
|
||||||
"Ignoring cache peeer URLs heartbeat from self: " + message);
|
// }
|
||||||
}
|
// }
|
||||||
return;
|
// }
|
||||||
}
|
// } catch (Throwable t) {
|
||||||
|
// LOG.error("Multicast receiver thread caught throwable. Cause was " + t.getMessage() + ". Continuing...");
|
||||||
String rmiUrls = new String(message.getBuffer());
|
// }
|
||||||
if (logger.isDebugEnabled())
|
// }
|
||||||
{
|
//
|
||||||
logger.debug("\n" +
|
// private void processPayload(byte[] compressedPayload) {
|
||||||
"Received cache peer URLs heartbeat: \n" +
|
// byte[] payload = PayloadUtil.ungzip(compressedPayload);
|
||||||
" Message: " + message + "\n" +
|
// String rmiUrls = new String(payload);
|
||||||
" Peers: " + rmiUrls);
|
// if (self(rmiUrls)) {
|
||||||
}
|
// return;
|
||||||
// Quickly split them up
|
// }
|
||||||
StringTokenizer tokenizer = new StringTokenizer(rmiUrls, JGroupsKeepAliveHeartbeatSender.URL_DELIMITER, false);
|
// rmiUrls = rmiUrls.trim();
|
||||||
while (!stopped && tokenizer.hasMoreTokens())
|
// if (LOG.isTraceEnabled()) {
|
||||||
{
|
// LOG.trace("rmiUrls received " + rmiUrls);
|
||||||
String rmiUrl = tokenizer.nextToken();
|
// }
|
||||||
// Is it pending?
|
// processRmiUrls(rmiUrls);
|
||||||
if (rmiUrlsProcessingQueue.add(rmiUrl))
|
// }
|
||||||
{
|
//
|
||||||
// Not pending. Shedule it.
|
// /**
|
||||||
ProcessingRunnable runnable = new ProcessingRunnable(rmiUrl);
|
// * This method forks a new executor to process the received heartbeat in a thread pool.
|
||||||
threadPool.execute(runnable);
|
// * That way each remote cache manager cannot interfere with others.
|
||||||
}
|
// * <p/>
|
||||||
else
|
// * In the worst case, we have as many concurrent threads as remote cache managers.
|
||||||
{
|
// *
|
||||||
// It was already waiting to be processed
|
// * @param rmiUrls
|
||||||
// Increase the thread pool size
|
// */
|
||||||
int currentThreadPoolMaxSize = threadPool.getMaximumPoolSize();
|
// private void processRmiUrls(final String rmiUrls) {
|
||||||
if (currentThreadPoolMaxSize < MAX_THREADS)
|
// if (rmiUrlsProcessingQueue.contains(rmiUrls)) {
|
||||||
{
|
// if (LOG.isDebugEnabled()) {
|
||||||
threadPool.setMaximumPoolSize(currentThreadPoolMaxSize + 1);
|
// LOG.debug("We are already processing these rmiUrls. Another heartbeat came before we finished: " + rmiUrls);
|
||||||
}
|
// }
|
||||||
}
|
// return;
|
||||||
}
|
// }
|
||||||
}
|
//
|
||||||
|
// if (processingThreadPool == null) {
|
||||||
/**
|
// return;
|
||||||
* Worker class to go into thread pool
|
// }
|
||||||
*
|
//
|
||||||
* @author Derek Hulley
|
// processingThreadPool.execute(new Runnable() {
|
||||||
*/
|
// public void run() {
|
||||||
private class ProcessingRunnable implements Runnable
|
// try {
|
||||||
{
|
// // Add the rmiUrls we are processing.
|
||||||
private String rmiUrl;
|
// rmiUrlsProcessingQueue.add(rmiUrls);
|
||||||
private ProcessingRunnable(String rmiUrl)
|
// for (StringTokenizer stringTokenizer = new StringTokenizer(rmiUrls,
|
||||||
{
|
// PayloadUtil.URL_DELIMITER); stringTokenizer.hasMoreTokens();) {
|
||||||
this.rmiUrl = rmiUrl;
|
// if (stopped) {
|
||||||
}
|
// return;
|
||||||
public void run()
|
// }
|
||||||
{
|
// String rmiUrl = stringTokenizer.nextToken();
|
||||||
rmiUrlsProcessingQueue.remove(rmiUrl);
|
// registerNotification(rmiUrl);
|
||||||
if (stopped)
|
// if (!peerProvider.peerUrls.containsKey(rmiUrl)) {
|
||||||
{
|
// if (LOG.isDebugEnabled()) {
|
||||||
return;
|
// LOG.debug("Aborting processing of rmiUrls since failed to add rmiUrl: " + rmiUrl);
|
||||||
}
|
// }
|
||||||
peerProvider.registerPeer(rmiUrl);
|
// return;
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
// } finally {
|
||||||
@Override
|
// // Remove the rmiUrls we just processed
|
||||||
public void viewAccepted(View newView)
|
// rmiUrlsProcessingQueue.remove(rmiUrls);
|
||||||
{
|
// }
|
||||||
if (EqualsHelper.nullSafeEquals(lastView, newView))
|
// }
|
||||||
{
|
// });
|
||||||
// No change, so ignore
|
// }
|
||||||
return;
|
//
|
||||||
}
|
//
|
||||||
int lastSize = (lastView == null) ? 0 : lastView.getMembers().size();
|
// /**
|
||||||
int newSize = newView.getMembers().size();
|
// * @param rmiUrls
|
||||||
// Report
|
// * @return true if our own hostname and listener port are found in the list. This then means we have
|
||||||
if (newSize < lastSize)
|
// * caught our onw multicast, and should be ignored.
|
||||||
{
|
// */
|
||||||
logger.warn("\n" +
|
// private boolean self(String rmiUrls) {
|
||||||
"New cluster view with fewer members: \n" +
|
// CacheManager cacheManager = peerProvider.getCacheManager();
|
||||||
" Last View: " + lastView + "\n" +
|
// CacheManagerPeerListener cacheManagerPeerListener = cacheManager.getCachePeerListener();
|
||||||
" New View: " + newView);
|
// if (cacheManagerPeerListener == null) {
|
||||||
}
|
// return false;
|
||||||
else
|
// }
|
||||||
{
|
// List boundCachePeers = cacheManagerPeerListener.getBoundCachePeers();
|
||||||
logger.info("\n" +
|
// if (boundCachePeers == null || boundCachePeers.size() == 0) {
|
||||||
"New cluster view with additional members: \n" +
|
// return false;
|
||||||
" Last View: " + lastView + "\n" +
|
// }
|
||||||
" New View: " + newView);
|
// CachePeer peer = (CachePeer) boundCachePeers.get(0);
|
||||||
}
|
// String cacheManagerUrlBase = null;
|
||||||
lastView = newView;
|
// try {
|
||||||
}
|
// cacheManagerUrlBase = peer.getUrlBase();
|
||||||
|
// } catch (RemoteException e) {
|
||||||
|
// LOG.error("Error geting url base");
|
||||||
|
// }
|
||||||
|
// int baseUrlMatch = rmiUrls.indexOf(cacheManagerUrlBase);
|
||||||
|
// return baseUrlMatch != -1;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// private void registerNotification(String rmiUrl) {
|
||||||
|
// peerProvider.registerPeer(rmiUrl);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// /**
|
||||||
|
// * {@inheritDoc}
|
||||||
|
// */
|
||||||
|
// public final void interrupt() {
|
||||||
|
// try {
|
||||||
|
// socket.leaveGroup(groupMulticastAddress);
|
||||||
|
// } catch (IOException e) {
|
||||||
|
// LOG.error("Error leaving group");
|
||||||
|
// }
|
||||||
|
// socket.close();
|
||||||
|
// super.interrupt();
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
//
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user