List of usage examples for com.google.common.hash Hashing consistentHash
public static int consistentHash(long input, int buckets)
From source file:org.apache.hadoop.hive.ql.exec.tez.HostAffinitySplitLocationProvider.java
@VisibleForTesting public static int determineLocation(List<String> locations, String path, long start, String desc) { byte[] bytes = getHashInputForSplit(path, start); long hash1 = hash1(bytes); int index = Hashing.consistentHash(hash1, locations.size()); String location = locations.get(index); if (LOG.isDebugEnabled()) { LOG.debug(desc + " mapped to index=" + index + ", location=" + location); }// w w w. java 2s . c o m int iter = 1; long hash2 = 0; // Since our probing method is totally bogus, give up after some time. while (location == null && iter < locations.size() * 2) { if (iter == 1) { hash2 = hash2(bytes); } // Note that this is not real double hashing since we have consistent hash on top. index = Hashing.consistentHash(hash1 + iter * hash2, locations.size()); location = locations.get(index); if (LOG.isDebugEnabled()) { LOG.debug(desc + " remapped to index=" + index + ", location=" + location); } ++iter; } return index; }
From source file:com.weibo.api.motan.cluster.loadbalance.SeqServiceLoadBalance.java
@Override protected Referer<T> doSelect(Request request) { checkRequest(request);//from w ww.ja v a 2 s . com final Long userId = (Long) request.getArguments()[0]; final long seqSessionId = SeqSessionCalc.calcSeqSession(userId); final int referListSize = getReferers().size(); final int index = Hashing.consistentHash(seqSessionId, referListSize); return serverRefererList.get(index); }
From source file:org.apache.usergrid.persistence.core.shard.ShardLocator.java
/** * Locate the bucket number given the value, the funnel and the total buckets. * * Assigns to {@code hashCode} a "bucket" in the range {@code [0, buckets)}, in a uniform manner that minimizes the * need for remapping as {@code buckets} grows. That is, {@code consistentHash(h, n)} equals: * * <ul> <li>{@code n - 1}, with approximate probability {@code 1/n} <li>{@code consistentHash(h, n - 1)}, otherwise * (probability {@code 1 - 1/n}) </ul> * * <p>See the <a href="http://en.wikipedia.org/wiki/Consistent_hashing">wikipedia article on consistent hashing</a> * for more information./* ww w . j a v a2 s . co m*/ * * <p>See <a href="http://arxiv.org/pdf/1406.2294v1.pdf">this paper</a> for more details on the algorithm</p> * * * Note that after testing, increasing buckets does NOT yield the expected results. You will need an algorithm * that manually walks a tree. See * */ public int getBucket(T value) { final HashCode hashCode = HASHER.hashObject(value, funnel); int owningIndex = Hashing.consistentHash(hashCode, totalBuckets); return owningIndex; }
From source file:com.weibo.api.motan.cluster.loadbalance.SeqServiceLoadBalance.java
@Override protected void doSelectToHolder(Request request, List<Referer<T>> refersHolder) { checkRequest(request);//from w w w.j a va2 s .co m final Long userId = (Long) request.getArguments()[0]; final long seqSessionId = SeqSessionCalc.calcSeqSession(userId); final int referListSize = getReferers().size(); final int index = Hashing.consistentHash(seqSessionId, referListSize); final Referer<T> referer = getReferers().get(index); if (referer.isAvailable()) { refersHolder.add(referer); } }
From source file:com.qubole.rubix.bookkeeper.BookKeeper.java
@Override public List<com.qubole.rubix.bookkeeper.Location> getCacheStatus(String remotePath, long fileLength, long lastModified, long startBlock, long endBlock, int clusterType) throws TException { initializeClusterManager(clusterType); if (nodeName == null) { log.error("Node name is null for Cluster Type" + ClusterType.findByValue(clusterType)); return null; }//from ww w. ja v a 2s.c om Set<Long> localSplits = new HashSet<>(); long blockNumber = 0; for (long i = 0; i < fileLength; i = i + splitSize) { long end = i + splitSize; if (end > fileLength) { end = fileLength; } String key = remotePath + i + end; HashFunction hf = Hashing.md5(); HashCode hc = hf.hashString(key, Charsets.UTF_8); int nodeIndex = Hashing.consistentHash(hc, nodeListSize); if (nodeIndex == currentNodeIndex) { localSplits.add(blockNumber); } blockNumber++; } FileMetadata md; try { md = fileMetadataCache.get(remotePath, new CreateFileMetadataCallable(remotePath, fileLength, lastModified, conf)); if (md.getLastModified() != lastModified) { invalidate(remotePath); md = fileMetadataCache.get(remotePath, new CreateFileMetadataCallable(remotePath, fileLength, lastModified, conf)); } } catch (ExecutionException e) { log.error(String.format("Could not fetch Metadata for %s : %s", remotePath, Throwables.getStackTraceAsString(e))); throw new TException(e); } endBlock = setCorrectEndBlock(endBlock, fileLength, remotePath); List<Location> blocksInfo = new ArrayList<>((int) (endBlock - startBlock)); int blockSize = CacheConfig.getBlockSize(conf); for (long blockNum = startBlock; blockNum < endBlock; blockNum++) { totalRequests++; long split = (blockNum * blockSize) / splitSize; if (md.isBlockCached(blockNum)) { blocksInfo.add(Location.CACHED); cachedRequests++; } else { if (localSplits.contains(split)) { blocksInfo.add(Location.LOCAL); remoteRequests++; } else { blocksInfo.add(Location.NON_LOCAL); } } } return blocksInfo; }
From source file:org.apache.niolex.commons.hash.DoubleHash.java
/** * Get the pair of server nodes by these hash codes. We guarantee the first and second node are * not the same.<br>/* w w w . java2 s . c o m*/ * * @param primaryHashCode the primary hash code * @param secondaryHashCode the secondary hash code * @return the pair of server nodes */ protected Pair<T, T> getPairNodes(HashCode primaryHashCode, HashCode secondaryHashCode) { final Object[] tmpArray = this.nodeArray; final int length = tmpArray.length; /** * The node array * --------------------------------------- * ^ * The idx1 in any of the positions * ------------------- ------------------- * The idx2 will be in the remains * So the length must - 1 */ final int idx1 = Hashing.consistentHash(primaryHashCode, length); int idx2 = Hashing.consistentHash(secondaryHashCode, length - 1); if (idx2 >= idx1) { ++idx2; } @SuppressWarnings("unchecked") Pair<T, T> r = Pair.<T, T>create((T) tmpArray[idx1], (T) tmpArray[idx2]); return r; }
From source file:com.qubole.rubix.core.CachingFileSystem.java
@Override public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException { if (!clusterManager.isMaster() || cacheSkipped) { // If in worker node, blockLocation does not matter return fs.getFileBlockLocations(file, start, len); }//from www . ja va 2 s . c o m List<String> nodes = clusterManager.getNodes(); if (file == null) { return null; } else if (start >= 0L && len >= 0L) { if (file.getLen() < start) { return new BlockLocation[0]; } else { // Using similar logic of returning all Blocks as FileSystem.getFileBlockLocations does instead of only returning blocks from start till len BlockLocation[] blockLocations = new BlockLocation[(int) Math .ceil((double) file.getLen() / clusterManager.getSplitSize())]; int blockNumber = 0; for (long i = 0; i < file.getLen(); i = i + clusterManager.getSplitSize()) { long end = i + clusterManager.getSplitSize(); if (end > file.getLen()) { end = file.getLen(); } String key = file.getPath().toString() + i + end; HashFunction hf = Hashing.md5(); HashCode hc = hf.hashString(key, Charsets.UTF_8); int nodeIndex = Hashing.consistentHash(hc, nodes.size()); String[] name = new String[] { nodes.get(nodeIndex) }; String[] host = new String[] { nodes.get(nodeIndex) }; blockLocations[blockNumber++] = new BlockLocation(name, host, i, end - i); log.info(String.format("BlockLocation %s %d %d %s totalHosts: %s", file.getPath().toString(), i, end - i, host[0], nodes.size())); } return blockLocations; } } else { throw new IllegalArgumentException("Invalid start or len parameter"); } }
From source file:poke.resources.JobResource.java
@Override public Request process(Request request) { // TODO Auto-generated method stub Request reply = null;// w ww. j a va2 s .c o m String uuid = null; String message = ""; boolean success = false; PhotoHeader imageHeader = request.getHeader().getPhotoHeader(); PhotoPayload imagePayload = request.getBody().getPhotoPayload(); int read = PhotoHeader.RequestType.read.getNumber(); int write = PhotoHeader.RequestType.write.getNumber(); int delete = PhotoHeader.RequestType.delete.getNumber(); Integer leaderId = ElectionManager.getInstance().whoIsTheLeader(); if (Server.getMyId() == leaderId) { // only if it is a response if (request.getHeader().getPhotoHeader().hasResponseFlag()) { logger.info("\n**********\nRECEIVED JOB STATUS" + "\n\n**********"); List<JobDesc> list = request.getBody().getJobStatus().getDataList(); String jobId = request.getBody().getJobStatus().getJobId(); int succesResponse = PhotoHeader.ResponseFlag.success.getNumber(); int responseFlag = imageHeader.getResponseFlag().getNumber(); if (succesResponse == responseFlag) { logger.info("@MInu -> inside the final response from server....."); Request.Builder rep = Request.newBuilder(request); reply = rep.build(); Channel ch = chMap.get(jobId); chMap.remove(jobId); ch.writeAndFlush(reply); } } else if (request.getBody().hasJobOp()) { // apply sharding only if its leader logger.info("\n**********\n RECEIVED NEW JOB REQUEST-Leader" + "\n\n**********"); logger.info("\n**********\n RE-DIRECTING TO A NODE-SHARDING" + "\n\n**********"); String jobId = request.getBody().getJobOp().getJobId(); JobOperation jobOp = request.getBody().getJobOp(); requestMap.put(jobId, request); if (imageHeader.hasRequestType()) { int requestType = imageHeader.getRequestType().getNumber(); // check if we need to put jobAction.equals(JobAction.ADDJOB) condition?? if (requestType == write || addimage.equals(jobOp.getData().getNameSpace())) { logger.info("ADDJOB received"); UUID uniqueKey = UUID.randomUUID(); String key = "T10" + uniqueKey.toString().substring(uniqueKey.toString().length() - 4); // route the request to server shardedNodes = ShardingManager.getInstance().getShardedServers(); // @Test List<Integer> serverIdTestNew = ShardingManager.getInstance().getServerIds(); for (int i = 0; i < serverIdTestNew.size(); i++) { logger.info(i + " : Sharded node is : " + serverIdTestNew.get(i)); } logger.info("The size of the shardedNodes is ----------> " + ShardingManager.getInstance().getShardedServers().size()); //@Test int bucket = Hashing.consistentHash(Hashing.md5().hashString(key), ShardingManager.getInstance().getShardedServers().size()); int server = get(bucket); logger.info("Server to which request to be routed - " + server); // forward to server(node id)- forwardResource Request fwd = ResourceUtil.buildForwardAddMessage(request, server, key); for (NodeDesc nn : configFile.getAdjacent().getAdjacentNodes().values()) { if (nn.getNodeId() == server) { ChannelFuture fut = createChannelAndForward(nn.getHost(), nn.getPort(), fwd, key); fut.awaitUninterruptibly(); boolean good = fut.isSuccess(); logger.info("waiting inside the leader for the response-----"); if (good) logger.info( "Successfully forwarded the request to the sharded node primary! -- " + nn.getNodeId()); break; } } } // read the image by leader and check the mongodb collection /* * add the delete condition together with the get */ else if (getimage.equals(jobOp.getData().getNameSpace()) || requestType == read || deleteimage.equals(jobOp.getData().getNameSpace()) || requestType == delete) { String key = request.getBody().getPhotoPayload().getUuid(); // check mongo collection ReplicaDomain replicaStorage = MongoStorage.getReplicaById(key); // found uuid in my cluster then forward to slave to get response if (replicaStorage != null) { // check if it is a broadcasted message by checking the entryNode field if (imageHeader.hasEntryNode()) { // store the ip address in entry node with jobid String sourceIP = imageHeader.getEntryNode(); // remove the established channel betweent he sourceIP and destinationIP(me) if it is a broadcasted Channel ch = chMap.get(jobId); chMap.remove(jobId); //establish a new channel with the sourceIP and put it in chmap to be taken whenever a response is ready InetSocketAddress socket = new InetSocketAddress(sourceIP, 5570); Channel returnChannel = connectToPublic(socket); chMap.put(jobId, returnChannel); } int primary = replicaStorage.getPrimaryNode(); int secondary1 = replicaStorage.getSecondaryNode1(); int secondary2 = replicaStorage.getSecondaryNode2(); List<Integer> serverIdNew = ShardingManager.getInstance().getServerIds(); int server = 100000; if (serverIdNew.contains(primary)) { server = primary; } else if (serverIdNew.contains(secondary1)) { server = secondary1; } else if (serverIdNew.contains(secondary2)) { server = secondary2; } else { logger.info("ALl the server with this uuid is down: "); } // @Test shardedNodes = ShardingManager.getInstance().getShardedServers(); List<Integer> serverIdTestNew = ShardingManager.getInstance().getServerIds(); for (int i = 0; i < serverIdTestNew.size(); i++) { logger.info(i + " : Sharded node : is : " + serverIdTestNew.get(i)); } // @Test logger.info("server to which request to be routed - " + server); // forward to server(node id)- forwardResource Request fwd = ResourceUtil.buildForwardMessage(request, configFile); for (NodeDesc nn : configFile.getAdjacent().getAdjacentNodes().values()) { if (nn.getNodeId() == server) { if (server != primary && requestType == delete) { // forward the request to both the secondary nodes ChannelFuture futSec = createChannelAndForward(nn.getHost(), nn.getPort(), fwd, key); futSec.awaitUninterruptibly(); boolean goodSec = futSec.isSuccess(); if (goodSec) logger.info( "Forwarded the delete request to the secondary NOde " + server); if (server != secondary2 && serverIdNew.contains(secondary2)) server = secondary2; if (server != secondary1 && serverIdNew.contains(secondary1)) server = secondary1; } ChannelFuture fut = createChannelAndForward(nn.getHost(), nn.getPort(), fwd, key); fut.awaitUninterruptibly(); boolean good = fut.isSuccess(); logger.info("waiting inside the leader after "); if (good) logger.info( "successfully forwarded the request to server : " + nn.getNodeId()); break; } } } // Requested uuid not in my cluster /* broad cast if no entry point else discard * populate the entryPoint in the request with my id-ip */ else { logger.info("Forward to a different cluster"); if (!imageHeader.hasEntryNode()) { // broad cast to all after putting my id String leaderHost = null; for (NodeDesc nn : configFile.getAdjacent().getAdjacentNodes().values()) { if (nn.getNodeId() == leaderId) { leaderHost = nn.getHost(); } } String destHost = null; int destPort = 0; Request request1 = ResourceUtil.buildBroadcastRequest(request, leaderHost); List<String> leaderList = new ArrayList<String>(); leaderList.add(new String("192.168.0.7:5670")); // leaderList.add(new String("192.168.0.60:5673")); // leaderList.add(new String("192.168.0.230:5573")); for (String destination : leaderList) { String[] dest = destination.split(":"); destHost = dest[0]; destPort = Integer.parseInt(dest[1]); ChannelFuture fut = createChannelAndForward(destHost, destPort, request1, null); fut.awaitUninterruptibly(); boolean good = fut.isSuccess(); logger.info( "waiting inside the leader (connected to client) for the response for broadcasted request-----"); if (good) logger.info("successfully broadcasted the request to servers : "); } } // received request has entry point and uuid couldn't foound in this cluster ignore else { //Discard the request // return null; Channel ch = chMap.get(jobId); chMap.remove(jobId); } } } } } } else { // By Slaves only logger.info("\n**********\n RECEIVED NEW JOB REQUEST BY A SLAVE NODE" + "\n\n**********"); JobOperation jobOp = request.getBody().getJobOp(); //Response from Secondary nodes // only if it is a response if (request.getHeader().getPhotoHeader().hasResponseFlag() && (request.getHeader().getReplica().equalsIgnoreCase("broadcastReply"))) { logger.info("\n**********\nRECEIVED JOB STATUS" + "\n\n**********"); String jobId = request.getBody().getJobStatus().getJobId(); int succesResponse = PhotoHeader.ResponseFlag.success.getNumber(); int failureResponse = PhotoHeader.ResponseFlag.failure.getNumber(); int responseFlag = imageHeader.getResponseFlag().getNumber(); // if(succesResponse == responseFlag) // { logger.info("@MInu -> inside the response from primary server....."); Request.Builder rep = Request.newBuilder(request); reply = rep.build(); if (chMap.containsKey(jobId)) { Channel ch = chMap.get(jobId); //respond back if secondary is succes if (succesResponse == responseFlag) { chMap.remove(jobId); ch.writeAndFlush(reply); } else { chMap.remove(jobId); if (checkStoredInPrimary) { checkStoredInPrimary = false; logger.info("***************Stored to mongodb of Primary node**************"); // build response Request primaryReply = null; message = "Successfully store to Primary MongoDB"; Request.Builder rb = Request.newBuilder(); Payload.Builder pb = Payload.newBuilder(); JobStatus.Builder jb = JobStatus.newBuilder(); jb.setStatus(PokeStatus.SUCCESS); jb.setJobId(jobOp.getJobId()); jb.setJobState(JobDesc.JobCode.JOBRECEIVED); pb.setJobStatus(jb.build()); PhotoPayload.Builder pp = PhotoPayload.newBuilder(); // pp.setUuid(uuid); PhotoPayload.Builder newPhotoBldr = PhotoPayload.newBuilder(pp.build()); pb.setPhotoPayload(newPhotoBldr); rb.setBody(pb.build()); // check if we can re-use the same method in resourceutil rb.setHeader(ResourceUtil.buildHeaderResponse(request.getHeader().getRoutingId(), PokeStatus.SUCCESS, message, request.getHeader().getOriginator(), request.getHeader().getTag(), ResponseFlag.success)); primaryReply = rb.build(); ch.writeAndFlush(primaryReply); } } } else { logger.info("Ignoring the response from the Secondary 2!"); return reply; } } else if (imageHeader.hasRequestType() && !(imageHeader.hasEntryNode())) { logger.info("ADDJOB received"); logger.info(jobOp.getData().getNameSpace()); int requestType = imageHeader.getRequestType().getNumber(); String jobId = request.getBody().getJobOp().getJobId(); // check if we need to put jobAction.equals(JobAction.ADDJOB) condition?? if (requestType == write || addimage.equals(jobOp.getData().getNameSpace())) { String key = imagePayload.getUuid(); byte[] image = imagePayload.getData().toByteArray(); long creationDate = System.currentTimeMillis() % 1000; ; int contentLength = imageHeader.getContentLength(); String title = imagePayload.getName(); logger.info("@Minu--->unique key is-after setting: " + key); // TODO -check if there is uuid in the request otherwise forward to the leader // if the message is for me if (contentLength <= 56000) { // if(request.getHeader().hasReplica()) { if (request.getHeader().getReplica().equalsIgnoreCase("broadcast")) { logger.info("store to secondary"); uuid = MongoStorage.addFile(key, title, image, creationDate, image.length); if (uuid == null) { logger.info("Request is not handled by secondary!"); message = "Request is not handled by secondary!"; Request.Builder rb = Request.newBuilder(); Payload.Builder pb = Payload.newBuilder(); JobStatus.Builder jb = JobStatus.newBuilder(); jb.setStatus(PokeStatus.FAILURE); jb.setJobId(jobOp.getJobId()); jb.setJobState(JobDesc.JobCode.JOBRECEIVED); pb.setJobStatus(jb.build()); rb.setBody(pb.build()); // check if we can re-use the same method in resourceutil rb.setHeader(ResourceUtil.buildHeaderResponse(request.getHeader().getRoutingId(), PokeStatus.FAILURE, message, request.getHeader().getOriginator(), request.getHeader().getTag(), ResponseFlag.failure)); reply = rb.build(); return reply; } else { logger.info("***************Stored to mongodb of secondary node**************"); // build response message = "Successfully stored to Secondary MongoDB"; Request.Builder rb = Request.newBuilder(); Payload.Builder pb = Payload.newBuilder(); JobStatus.Builder jb = JobStatus.newBuilder(); jb.setStatus(PokeStatus.SUCCESS); jb.setJobId(jobOp.getJobId()); jb.setJobState(JobDesc.JobCode.JOBRECEIVED); pb.setJobStatus(jb.build()); PhotoPayload.Builder pp = PhotoPayload.newBuilder(); pp.setUuid(uuid); PhotoPayload.Builder newPhotoBldr = PhotoPayload.newBuilder(pp.build()); pb.setPhotoPayload(newPhotoBldr); rb.setBody(pb.build()); // check if we can re-use the same method in resourceutil rb.setHeader(ResourceUtil.buildHeaderResponse(request.getHeader().getRoutingId(), PokeStatus.SUCCESS, message, request.getHeader().getOriginator(), request.getHeader().getTag(), ResponseFlag.success)); reply = rb.build(); return reply; } } else if (!request.getHeader().getReplica().equalsIgnoreCase("broadcast") || !request.getHeader().hasReplica() || !request.getHeader().getReplica().equalsIgnoreCase("broadcastReply")) { List<Integer> serverIds = ShardingManager.getInstance().getServerIds(); int myID = Server.getMyId(); int prev = 1000; int next = 1000; ChannelFuture secondary1 = null, secondary2 = null; boolean sec1good = false, sec2good = false; logger.info("my ID :" + myID); logger.info("size : " + serverIds.size()); List<Integer> activeNodes = new ArrayList<Integer>(); for (int count = 0; count < serverIds.size(); count++) { if (serverIds.get(count) != leaderId) { logger.info("server added: " + serverIds.get(count)); activeNodes.add(serverIds.get(count)); } } logger.info("active node size:" + activeNodes.size()); for (int i = 0; i < activeNodes.size(); i++) { logger.info("serverIds : " + activeNodes.get(i)); if (myID == activeNodes.get(i)) { if (i == 0) { logger.info("i in if-prev:" + i); prev = activeNodes.get(activeNodes.size() - 1); } else { logger.info("i in else-prev :" + i); prev = activeNodes.get(i - 1); } logger.info("prev :" + prev); if (activeNodes.size() == i + 1) { logger.info("i in if-next:" + serverIds.size()); next = activeNodes.get(0); } else { logger.info("i in if-next:" + serverIds.size()); next = activeNodes.get(i + 1); } logger.info("next :" + next); if (prev != 1000 && next != 1000) break; } } Request fwdPrev = ResourceUtil.buildForwardReplicaMessage(request, prev); if (fwdPrev == null) { logger.info("sec1 request is null"); } Request fwdNext = ResourceUtil.buildForwardReplicaMessage(request, next); if (fwdNext == null) { logger.info("sec2 request is null"); } for (NodeDesc nn : configFile.getAdjacent().getAdjacentNodes().values()) { if (nn.getNodeId() == prev) { secondary1 = createChannelAndForward(nn.getHost(), nn.getPort(), fwdPrev, key); // add to primary channel map secondary1.awaitUninterruptibly(); sec1good = secondary1.isSuccess(); } logger.info( "For lloooooopppp ******** checking to which node it should be replicated : " + nn.getNodeId()); if (nn.getNodeId() == next) { logger.info("sending the request to replicaaaaa--------> " + next); secondary2 = createChannelAndForward(nn.getHost(), nn.getPort(), fwdNext, key); // add to primary channel map secondary2.awaitUninterruptibly(); sec2good = secondary2.isSuccess(); logger.info("result of forwarding to replicaaaaaaa ------ > " + sec2good); } if (secondary1 != null && secondary2 != null) break; } if (sec1good && sec2good) { MongoStorage.addReplicas(key, myID, prev, next); logger.info("added to mongo replicaaaaaaaa -------- "); uuid = MongoStorage.addFile(key, title, image, creationDate, image.length); } else { logger.error("Replication Failed!!"); } if (uuid == null) { logger.error("Request is not handled!"); } else { logger.info("***************Stored to mongodb of **************"); // check if it is stored in primary. if so set flag. once you get secondary reponses, check flag and return reply accordingly checkStoredInPrimary = true; //=================== Response from Primary logger.info("***************Stored to mongodb of secondary node**************"); // build response message = "Successfully stored to Secondary MongoDB"; Request.Builder rb = Request.newBuilder(); Payload.Builder pb = Payload.newBuilder(); JobStatus.Builder jb = JobStatus.newBuilder(); jb.setStatus(PokeStatus.SUCCESS); jb.setJobId(jobOp.getJobId()); jb.setJobState(JobDesc.JobCode.JOBRECEIVED); pb.setJobStatus(jb.build()); PhotoPayload.Builder pp = PhotoPayload.newBuilder(); pp.setUuid(uuid); PhotoPayload.Builder newPhotoBldr = PhotoPayload.newBuilder(pp.build()); pb.setPhotoPayload(newPhotoBldr); rb.setBody(pb.build()); // check if we can re-use the same method in resourceutil rb.setHeader(ResourceUtil.buildHeaderResponse(request.getHeader().getRoutingId(), PokeStatus.SUCCESS, message, request.getHeader().getOriginator(), request.getHeader().getTag(), ResponseFlag.success)); reply = rb.build(); return reply; } } } } else if (getimage.equals(jobOp.getData().getNameSpace()) || requestType == read) { String key = imagePayload.getUuid(); logger.info("unique key: " + key); BasicDBObject getImage = MongoStorage.getFileByfId(key); if (getImage == null) { logger.info("Image is not found! "); } else { message = "image details attached"; String imgName = getImage.getString("name"); String uniqueId = getImage.getString("_id"); byte[] imgFile = (byte[]) getImage.get("image"); Request.Builder rb = Request.newBuilder(); Payload.Builder pb = Payload.newBuilder(); JobStatus.Builder jb = JobStatus.newBuilder(); jb.setStatus(PokeStatus.SUCCESS); jb.setJobId(jobOp.getJobId()); jb.setJobState(JobDesc.JobCode.JOBRECEIVED); pb.setJobStatus(jb.build()); PhotoPayload.Builder ppb = PhotoPayload.newBuilder(); ByteString bs = com.google.protobuf.ByteString.copyFrom(imgFile); logger.info("getting the data length as : " + bs.size()); ppb.setData(bs); ppb.setName(imgName); ppb.setUuid(uniqueId); PhotoPayload.Builder newPhotoBldr = PhotoPayload.newBuilder(ppb.build()); pb.setPhotoPayload(newPhotoBldr); rb.setBody(pb.build()); logger.info("message" + message); rb.setHeader(ResourceUtil.buildHeaderResponse(request.getHeader().getRoutingId(), PokeStatus.SUCCESS, message, request.getHeader().getOriginator(), request.getHeader().getTag(), ResponseFlag.success)); reply = rb.build(); return reply; } } else if (deleteimage.equals(jobOp.getData().getNameSpace()) || requestType == delete) { String uniKey = imagePayload.getUuid(); logger.info("unique key: " + uniKey); if (request.getHeader().getReplica().equalsIgnoreCase("broadcast")) { logger.info("uuid: " + imagePayload.getUuid()); MongoStorage.deleteFile(uniKey); logger.info("deleted from secondary replica"); } else { ReplicaDomain replicaData = MongoStorage.getReplicaById(uniKey); int primary = replicaData.getPrimaryNode(); int secondary1 = replicaData.getSecondaryNode1(); int secondary2 = replicaData.getSecondaryNode2(); logger.info("primary -" + primary + ", secondary1- " + secondary1 + "secondary2 -" + secondary2); if (Server.getMyId() == primary) { logger.info("inside primary node : " + Server.getMyId()); Request fwdSec1 = ResourceUtil.buildForwardReplicaMessage(request, secondary1); Request fwdSec2 = ResourceUtil.buildForwardReplicaMessage(request, secondary2); ChannelFuture sec1 = null, sec2 = null; Boolean secResult1 = false, secResult2 = false; for (NodeDesc nn : configFile.getAdjacent().getAdjacentNodes().values()) { if (nn.getNodeId() == secondary1) { sec1 = createChannelAndForward(nn.getHost(), nn.getPort(), fwdSec1, uniKey); sec1.awaitUninterruptibly(); secResult1 = sec1.isSuccess(); } if (nn.getNodeId() == secondary2) { sec2 = createChannelAndForward(nn.getHost(), nn.getPort(), fwdSec2, uniKey); sec2.awaitUninterruptibly(); secResult2 = sec2.isSuccess(); } if (sec1 != null && sec2 != null) break; } if (secResult1 && secResult2) { // if(secResult2){ MongoStorage.deleteReplica(uniKey); } } else if (Server.getMyId() == secondary1 || Server.getMyId() == secondary2) { logger.info("inside secondary node : " + Server.getMyId()); logger.info("delete from secondary"); } MongoStorage.deleteFile(uniKey); Request.Builder rb = Request.newBuilder(); Payload.Builder pb = Payload.newBuilder(); JobStatus.Builder jb = JobStatus.newBuilder(); jb.setStatus(PokeStatus.SUCCESS); jb.setJobId(jobOp.getJobId()); jb.setJobState(JobDesc.JobCode.JOBRECEIVED); pb.setJobStatus(jb.build()); message = "image with " + uniKey + " is removed."; rb.setBody(pb.build()); logger.info("message" + message); rb.setHeader(ResourceUtil.buildHeaderResponse(request.getHeader().getRoutingId(), PokeStatus.SUCCESS, message, request.getHeader().getOriginator(), request.getHeader().getTag(), ResponseFlag.success)); reply = rb.build(); return reply; } } } } return reply; }
From source file:org.hawkular.alerts.engine.impl.PartitionManagerImpl.java
/** * Distribute triggers on nodes using a consistent hashing strategy. * This strategy allows to scale and minimize changes and re-distribution when cluster changes. * * @param entries a list of entries to distribute * @param buckets a table of nodes/*from ww w . j a va 2 s .c o m*/ * @return a map of entries distributed across nodes */ public Map<PartitionEntry, Integer> calculatePartition(List<PartitionEntry> entries, Map<Integer, Integer> buckets) { if (entries == null) { throw new IllegalArgumentException("entries must be not null"); } if (isEmpty(buckets)) { throw new IllegalArgumentException("entries must be not null"); } HashFunction md5 = Hashing.md5(); int numBuckets = buckets.size(); Map<PartitionEntry, Integer> newPartition = new HashMap<>(); for (PartitionEntry entry : entries) { newPartition.put(entry, buckets.get(Hashing.consistentHash(md5.hashInt(entry.hashCode()), numBuckets))); } return newPartition; }
From source file:org.hawkular.alerts.engine.impl.PartitionManagerImpl.java
/** * Distribute a new entry across buckets using a consistent hashing strategy. * * @param newEntry the new entry to distribute * @param buckets a table of nodes/*from w w w.j a va2s.c o m*/ * @return a code of the node which the new entry is placed */ public Integer calculateNewEntry(PartitionEntry newEntry, Map<Integer, Integer> buckets) { if (newEntry == null) { throw new IllegalArgumentException("newEntry must be not null"); } if (isEmpty(buckets)) { throw new IllegalArgumentException("buckets must be not null"); } HashFunction md5 = Hashing.md5(); int numBuckets = buckets.size(); return buckets.get(Hashing.consistentHash(md5.hashInt(newEntry.hashCode()), numBuckets)); }