Example usage for com.google.common.hash Hashing md5

List of usage examples for com.google.common.hash Hashing md5

Introduction

In this page you can find the example usage for com.google.common.hash Hashing md5.

Prototype

public static HashFunction md5() 

Source Link

Document

Returns a hash function implementing the MD5 hash algorithm (128 hash bits) by delegating to the MD5 MessageDigest .

Usage

From source file:org.eclipse.che.api.builder.internal.SourcesManagerImpl.java

private void download(String downloadUrl, java.io.File downloadTo) throws IOException {
    HttpURLConnection conn = null;
    try {/*from   ww  w  . j  a va 2 s .co m*/
        final LinkedList<java.io.File> q = new LinkedList<>();
        q.add(downloadTo);
        final long start = System.currentTimeMillis();
        final List<Pair<String, String>> md5sums = new LinkedList<>();
        while (!q.isEmpty()) {
            java.io.File current = q.pop();
            java.io.File[] list = current.listFiles();
            if (list != null) {
                for (java.io.File f : list) {
                    if (f.isDirectory()) {
                        q.push(f);
                    } else {
                        md5sums.add(Pair.of(com.google.common.io.Files.hash(f, Hashing.md5()).toString(),
                                downloadTo.toPath().relativize(f.toPath()).toString().replace("\\", "/"))); //Replacing of "\" is need for windows support
                    }
                }
            }
        }
        final long end = System.currentTimeMillis();
        if (md5sums.size() > 0) {
            LOG.debug("count md5sums of {} files, time: {}ms", md5sums.size(), (end - start));
        }
        conn = (HttpURLConnection) new URL(downloadUrl).openConnection();
        conn.setConnectTimeout(CONNECT_TIMEOUT);
        conn.setReadTimeout(READ_TIMEOUT);
        final EnvironmentContext context = EnvironmentContext.getCurrent();
        if (context.getUser() != null && context.getUser().getToken() != null) {
            conn.setRequestProperty(HttpHeaders.AUTHORIZATION, context.getUser().getToken());
        }
        if (!md5sums.isEmpty()) {
            conn.setRequestMethod(HttpMethod.POST);
            conn.setRequestProperty("Content-type", MediaType.TEXT_PLAIN);
            conn.setRequestProperty(HttpHeaders.ACCEPT, MediaType.MULTIPART_FORM_DATA);
            conn.setDoOutput(true);
            try (OutputStream output = conn.getOutputStream(); Writer writer = new OutputStreamWriter(output)) {
                for (Pair<String, String> pair : md5sums) {
                    writer.write(pair.first);
                    writer.write(' ');
                    writer.write(pair.second);
                    writer.write('\n');
                }
            }
        }
        final int responseCode = conn.getResponseCode();
        if (responseCode == HttpURLConnection.HTTP_OK) {
            final String contentType = conn.getHeaderField("content-type");
            if (contentType.startsWith(MediaType.MULTIPART_FORM_DATA)) {
                final HeaderParameterParser headerParameterParser = new HeaderParameterParser();
                final String boundary = headerParameterParser.parse(contentType).get("boundary");
                try (InputStream in = conn.getInputStream()) {
                    MultipartStream multipart = new MultipartStream(in, boundary.getBytes());
                    boolean hasMore = multipart.skipPreamble();
                    while (hasMore) {
                        final Map<String, List<String>> headers = parseChunkHeader(
                                CharStreams.readLines(new StringReader(multipart.readHeaders())));
                        final List<String> contentDisposition = headers.get("content-disposition");
                        final String name = headerParameterParser.parse(contentDisposition.get(0)).get("name");
                        if ("updates".equals(name)) {
                            int length = -1;
                            List<String> contentLengthHeader = headers.get("content-length");
                            if (contentLengthHeader != null && !contentLengthHeader.isEmpty()) {
                                length = Integer.parseInt(contentLengthHeader.get(0));
                            }
                            if (length < 0 || length > 204800) {
                                java.io.File tmp = java.io.File.createTempFile("tmp", ".zip", directory);
                                try {
                                    try (FileOutputStream fOut = new FileOutputStream(tmp)) {
                                        multipart.readBodyData(fOut);
                                    }
                                    ZipUtils.unzip(tmp, downloadTo);
                                } finally {
                                    if (tmp.exists()) {
                                        tmp.delete();
                                    }
                                }
                            } else {
                                final ByteArrayOutputStream bOut = new ByteArrayOutputStream(length);
                                multipart.readBodyData(bOut);
                                ZipUtils.unzip(new ByteArrayInputStream(bOut.toByteArray()), downloadTo);
                            }
                        } else if ("removed-paths".equals(name)) {
                            final ByteArrayOutputStream bOut = new ByteArrayOutputStream();
                            multipart.readBodyData(bOut);
                            final String[] removed = JsonHelper.fromJson(
                                    new ByteArrayInputStream(bOut.toByteArray()), String[].class, null);
                            for (String path : removed) {
                                java.io.File f = new java.io.File(downloadTo, path);
                                if (!f.delete()) {
                                    throw new IOException(String.format("Unable delete %s", path));
                                }
                            }
                        } else {
                            // To /dev/null :)
                            multipart.readBodyData(DEV_NULL);
                        }
                        hasMore = multipart.readBoundary();
                    }
                }
            } else {
                try (InputStream in = conn.getInputStream()) {
                    ZipUtils.unzip(in, downloadTo);
                }
            }
        } else if (responseCode != HttpURLConnection.HTTP_NO_CONTENT) {
            throw new IOException(
                    String.format("Invalid response status %d from remote server. ", responseCode));
        }
    } catch (ParseException | JsonParseException e) {
        throw new IOException(e.getMessage(), e);
    } finally {
        if (conn != null) {
            conn.disconnect();
        }
    }
}

From source file:com.android.tools.idea.gradle.project.GradleProjectSyncData.java

@NotNull
private static byte[] createChecksum(@NotNull File file) throws IOException {
    // For files tracked by the IDE we get the content from the virtual files, otherwise we revert to io.
    VirtualFile vf = findFileByIoFile(file, true);
    byte[] data = new byte[] {};
    if (vf != null) {
        vf.refresh(false, false);//from  ww w  .  ja  va  2s .  c  o m
        if (vf.exists()) {
            data = vf.contentsToByteArray();
        }
    } else if (file.exists()) {
        data = toByteArray(file);
    }
    return Hashing.md5().hashBytes(data).asBytes();
}

From source file:poke.resources.JobResource.java

@Override
public Request process(Request request) {
    // TODO Auto-generated method stub
    Request reply = null;//from   www .j a v  a 2s  . c  o m
    String uuid = null;
    String message = "";
    boolean success = false;
    PhotoHeader imageHeader = request.getHeader().getPhotoHeader();
    PhotoPayload imagePayload = request.getBody().getPhotoPayload();

    int read = PhotoHeader.RequestType.read.getNumber();
    int write = PhotoHeader.RequestType.write.getNumber();
    int delete = PhotoHeader.RequestType.delete.getNumber();

    Integer leaderId = ElectionManager.getInstance().whoIsTheLeader();

    if (Server.getMyId() == leaderId) {

        // only if it is a response   
        if (request.getHeader().getPhotoHeader().hasResponseFlag()) {
            logger.info("\n**********\nRECEIVED JOB STATUS" + "\n\n**********");
            List<JobDesc> list = request.getBody().getJobStatus().getDataList();
            String jobId = request.getBody().getJobStatus().getJobId();

            int succesResponse = PhotoHeader.ResponseFlag.success.getNumber();
            int responseFlag = imageHeader.getResponseFlag().getNumber();
            if (succesResponse == responseFlag) {
                logger.info("@MInu -> inside the final response from server.....");
                Request.Builder rep = Request.newBuilder(request);
                reply = rep.build();
                Channel ch = chMap.get(jobId);
                chMap.remove(jobId);
                ch.writeAndFlush(reply);
            }
        }

        else if (request.getBody().hasJobOp()) {
            // apply sharding only if its leader
            logger.info("\n**********\n RECEIVED NEW JOB REQUEST-Leader" + "\n\n**********");
            logger.info("\n**********\n RE-DIRECTING TO A NODE-SHARDING" + "\n\n**********");
            String jobId = request.getBody().getJobOp().getJobId();
            JobOperation jobOp = request.getBody().getJobOp();
            requestMap.put(jobId, request);

            if (imageHeader.hasRequestType()) {

                int requestType = imageHeader.getRequestType().getNumber();

                // check if we need to put jobAction.equals(JobAction.ADDJOB) condition?? 
                if (requestType == write || addimage.equals(jobOp.getData().getNameSpace())) {
                    logger.info("ADDJOB received");

                    UUID uniqueKey = UUID.randomUUID();
                    String key = "T10" + uniqueKey.toString().substring(uniqueKey.toString().length() - 4);

                    // route the request to server
                    shardedNodes = ShardingManager.getInstance().getShardedServers();

                    // @Test
                    List<Integer> serverIdTestNew = ShardingManager.getInstance().getServerIds();
                    for (int i = 0; i < serverIdTestNew.size(); i++) {
                        logger.info(i + " : Sharded node is : " + serverIdTestNew.get(i));
                    }
                    logger.info("The size of the shardedNodes is ----------> "
                            + ShardingManager.getInstance().getShardedServers().size());
                    //@Test 

                    int bucket = Hashing.consistentHash(Hashing.md5().hashString(key),
                            ShardingManager.getInstance().getShardedServers().size());
                    int server = get(bucket);

                    logger.info("Server to which request to be routed - " + server);

                    // forward to server(node id)- forwardResource
                    Request fwd = ResourceUtil.buildForwardAddMessage(request, server, key);

                    for (NodeDesc nn : configFile.getAdjacent().getAdjacentNodes().values()) {
                        if (nn.getNodeId() == server) {
                            ChannelFuture fut = createChannelAndForward(nn.getHost(), nn.getPort(), fwd, key);
                            fut.awaitUninterruptibly();
                            boolean good = fut.isSuccess();
                            logger.info("waiting inside the leader for the response-----");
                            if (good)
                                logger.info(
                                        "Successfully forwarded the request to the sharded node primary! -- "
                                                + nn.getNodeId());
                            break;
                        }
                    }
                }
                // read the image by leader and check the mongodb collection 
                /*
                 * add the delete condition together with the get
                 */
                else if (getimage.equals(jobOp.getData().getNameSpace()) || requestType == read
                        || deleteimage.equals(jobOp.getData().getNameSpace()) || requestType == delete) {

                    String key = request.getBody().getPhotoPayload().getUuid();
                    // check mongo collection
                    ReplicaDomain replicaStorage = MongoStorage.getReplicaById(key);
                    // found uuid in my cluster then forward to slave to get response
                    if (replicaStorage != null) {
                        // check if it is a broadcasted message by checking the entryNode field
                        if (imageHeader.hasEntryNode()) {
                            // store the ip address in entry node with jobid
                            String sourceIP = imageHeader.getEntryNode();
                            // remove the established channel betweent he sourceIP and destinationIP(me) if it is a broadcasted
                            Channel ch = chMap.get(jobId);
                            chMap.remove(jobId);
                            //establish a new channel with the sourceIP and put it in chmap to be taken whenever a response is ready
                            InetSocketAddress socket = new InetSocketAddress(sourceIP, 5570);
                            Channel returnChannel = connectToPublic(socket);
                            chMap.put(jobId, returnChannel);
                        }
                        int primary = replicaStorage.getPrimaryNode();
                        int secondary1 = replicaStorage.getSecondaryNode1();
                        int secondary2 = replicaStorage.getSecondaryNode2();
                        List<Integer> serverIdNew = ShardingManager.getInstance().getServerIds();
                        int server = 100000;
                        if (serverIdNew.contains(primary)) {
                            server = primary;
                        } else if (serverIdNew.contains(secondary1)) {
                            server = secondary1;
                        } else if (serverIdNew.contains(secondary2)) {
                            server = secondary2;
                        } else {
                            logger.info("ALl the server with this uuid is down: ");
                        }

                        // @Test
                        shardedNodes = ShardingManager.getInstance().getShardedServers();
                        List<Integer> serverIdTestNew = ShardingManager.getInstance().getServerIds();
                        for (int i = 0; i < serverIdTestNew.size(); i++) {
                            logger.info(i + " : Sharded node : is : " + serverIdTestNew.get(i));
                        }
                        // @Test

                        logger.info("server to which request to be routed - " + server);
                        // forward to server(node id)- forwardResource
                        Request fwd = ResourceUtil.buildForwardMessage(request, configFile);

                        for (NodeDesc nn : configFile.getAdjacent().getAdjacentNodes().values()) {
                            if (nn.getNodeId() == server) {
                                if (server != primary && requestType == delete) {
                                    // forward the request to both the secondary nodes
                                    ChannelFuture futSec = createChannelAndForward(nn.getHost(), nn.getPort(),
                                            fwd, key);
                                    futSec.awaitUninterruptibly();
                                    boolean goodSec = futSec.isSuccess();
                                    if (goodSec)
                                        logger.info(
                                                "Forwarded the delete request to the secondary NOde " + server);
                                    if (server != secondary2 && serverIdNew.contains(secondary2))
                                        server = secondary2;
                                    if (server != secondary1 && serverIdNew.contains(secondary1))
                                        server = secondary1;
                                }

                                ChannelFuture fut = createChannelAndForward(nn.getHost(), nn.getPort(), fwd,
                                        key);
                                fut.awaitUninterruptibly();
                                boolean good = fut.isSuccess();
                                logger.info("waiting inside the leader after ");
                                if (good)
                                    logger.info(
                                            "successfully forwarded the request to server : " + nn.getNodeId());
                                break;
                            }
                        }
                    }
                    // Requested uuid not in my cluster
                    /* broad cast if no entry point else discard
                     * populate the entryPoint in the request with my id-ip
                     */
                    else {
                        logger.info("Forward to a different cluster");
                        if (!imageHeader.hasEntryNode()) {
                            // broad cast to all after putting my id
                            String leaderHost = null;
                            for (NodeDesc nn : configFile.getAdjacent().getAdjacentNodes().values()) {
                                if (nn.getNodeId() == leaderId) {
                                    leaderHost = nn.getHost();
                                }
                            }
                            String destHost = null;
                            int destPort = 0;
                            Request request1 = ResourceUtil.buildBroadcastRequest(request, leaderHost);

                            List<String> leaderList = new ArrayList<String>();

                            leaderList.add(new String("192.168.0.7:5670"));
                            // leaderList.add(new String("192.168.0.60:5673"));
                            // leaderList.add(new String("192.168.0.230:5573"));

                            for (String destination : leaderList) {
                                String[] dest = destination.split(":");
                                destHost = dest[0];
                                destPort = Integer.parseInt(dest[1]);

                                ChannelFuture fut = createChannelAndForward(destHost, destPort, request1, null);
                                fut.awaitUninterruptibly();
                                boolean good = fut.isSuccess();
                                logger.info(
                                        "waiting inside the leader (connected to client) for the response for broadcasted request-----");
                                if (good)
                                    logger.info("successfully broadcasted the request to servers : ");
                            }
                        }
                        // received request has entry point and uuid couldn't foound in this cluster ignore
                        else {
                            //Discard the request
                            //                        return null;
                            Channel ch = chMap.get(jobId);
                            chMap.remove(jobId);
                        }
                    }
                }
            }
        }
    } else {
        // By Slaves only
        logger.info("\n**********\n RECEIVED NEW JOB REQUEST BY A SLAVE NODE" + "\n\n**********");
        JobOperation jobOp = request.getBody().getJobOp();

        //Response from Secondary nodes

        // only if it is a response   
        if (request.getHeader().getPhotoHeader().hasResponseFlag()
                && (request.getHeader().getReplica().equalsIgnoreCase("broadcastReply"))) {

            logger.info("\n**********\nRECEIVED JOB STATUS" + "\n\n**********");
            String jobId = request.getBody().getJobStatus().getJobId();
            int succesResponse = PhotoHeader.ResponseFlag.success.getNumber();
            int failureResponse = PhotoHeader.ResponseFlag.failure.getNumber();

            int responseFlag = imageHeader.getResponseFlag().getNumber();
            //            if(succesResponse == responseFlag)
            //            {
            logger.info("@MInu -> inside the  response from primary server.....");
            Request.Builder rep = Request.newBuilder(request);
            reply = rep.build();
            if (chMap.containsKey(jobId)) {
                Channel ch = chMap.get(jobId);
                //respond back if secondary is succes
                if (succesResponse == responseFlag) {
                    chMap.remove(jobId);
                    ch.writeAndFlush(reply);
                } else {
                    chMap.remove(jobId);
                    if (checkStoredInPrimary) {
                        checkStoredInPrimary = false;
                        logger.info("***************Stored to mongodb of Primary node**************");
                        // build response
                        Request primaryReply = null;
                        message = "Successfully store to Primary MongoDB";
                        Request.Builder rb = Request.newBuilder();
                        Payload.Builder pb = Payload.newBuilder();

                        JobStatus.Builder jb = JobStatus.newBuilder();
                        jb.setStatus(PokeStatus.SUCCESS);
                        jb.setJobId(jobOp.getJobId());
                        jb.setJobState(JobDesc.JobCode.JOBRECEIVED);

                        pb.setJobStatus(jb.build());

                        PhotoPayload.Builder pp = PhotoPayload.newBuilder();
                        //                        pp.setUuid(uuid);
                        PhotoPayload.Builder newPhotoBldr = PhotoPayload.newBuilder(pp.build());
                        pb.setPhotoPayload(newPhotoBldr);
                        rb.setBody(pb.build());
                        // check if we can re-use the same method in resourceutil
                        rb.setHeader(ResourceUtil.buildHeaderResponse(request.getHeader().getRoutingId(),
                                PokeStatus.SUCCESS, message, request.getHeader().getOriginator(),
                                request.getHeader().getTag(), ResponseFlag.success));

                        primaryReply = rb.build();
                        ch.writeAndFlush(primaryReply);
                    }
                }
            } else {
                logger.info("Ignoring the response from the Secondary 2!");
                return reply;
            }
        }

        else if (imageHeader.hasRequestType() && !(imageHeader.hasEntryNode())) {

            logger.info("ADDJOB received");
            logger.info(jobOp.getData().getNameSpace());
            int requestType = imageHeader.getRequestType().getNumber();
            String jobId = request.getBody().getJobOp().getJobId();

            // check if we need to put jobAction.equals(JobAction.ADDJOB) condition?? 
            if (requestType == write || addimage.equals(jobOp.getData().getNameSpace())) {
                String key = imagePayload.getUuid();
                byte[] image = imagePayload.getData().toByteArray();
                long creationDate = System.currentTimeMillis() % 1000;
                ;
                int contentLength = imageHeader.getContentLength();
                String title = imagePayload.getName();

                logger.info("@Minu--->unique key is-after setting: " + key);
                // TODO -check if there is uuid in the request otherwise forward to the leader
                // if the message is for me

                if (contentLength <= 56000) {
                    //                  if(request.getHeader().hasReplica()) {
                    if (request.getHeader().getReplica().equalsIgnoreCase("broadcast")) {

                        logger.info("store to secondary");
                        uuid = MongoStorage.addFile(key, title, image, creationDate, image.length);
                        if (uuid == null) {
                            logger.info("Request is not handled by secondary!");
                            message = "Request is not handled by secondary!";
                            Request.Builder rb = Request.newBuilder();
                            Payload.Builder pb = Payload.newBuilder();

                            JobStatus.Builder jb = JobStatus.newBuilder();
                            jb.setStatus(PokeStatus.FAILURE);
                            jb.setJobId(jobOp.getJobId());
                            jb.setJobState(JobDesc.JobCode.JOBRECEIVED);

                            pb.setJobStatus(jb.build());
                            rb.setBody(pb.build());
                            // check if we can re-use the same method in resourceutil
                            rb.setHeader(ResourceUtil.buildHeaderResponse(request.getHeader().getRoutingId(),
                                    PokeStatus.FAILURE, message, request.getHeader().getOriginator(),
                                    request.getHeader().getTag(), ResponseFlag.failure));

                            reply = rb.build();
                            return reply;
                        } else {
                            logger.info("***************Stored to mongodb of secondary node**************");
                            // build response
                            message = "Successfully stored to Secondary MongoDB";
                            Request.Builder rb = Request.newBuilder();
                            Payload.Builder pb = Payload.newBuilder();

                            JobStatus.Builder jb = JobStatus.newBuilder();
                            jb.setStatus(PokeStatus.SUCCESS);
                            jb.setJobId(jobOp.getJobId());
                            jb.setJobState(JobDesc.JobCode.JOBRECEIVED);

                            pb.setJobStatus(jb.build());

                            PhotoPayload.Builder pp = PhotoPayload.newBuilder();
                            pp.setUuid(uuid);
                            PhotoPayload.Builder newPhotoBldr = PhotoPayload.newBuilder(pp.build());
                            pb.setPhotoPayload(newPhotoBldr);
                            rb.setBody(pb.build());
                            // check if we can re-use the same method in resourceutil
                            rb.setHeader(ResourceUtil.buildHeaderResponse(request.getHeader().getRoutingId(),
                                    PokeStatus.SUCCESS, message, request.getHeader().getOriginator(),
                                    request.getHeader().getTag(), ResponseFlag.success));

                            reply = rb.build();
                            return reply;
                        }
                    } else if (!request.getHeader().getReplica().equalsIgnoreCase("broadcast")
                            || !request.getHeader().hasReplica()
                            || !request.getHeader().getReplica().equalsIgnoreCase("broadcastReply")) {
                        List<Integer> serverIds = ShardingManager.getInstance().getServerIds();
                        int myID = Server.getMyId();
                        int prev = 1000;
                        int next = 1000;
                        ChannelFuture secondary1 = null, secondary2 = null;
                        boolean sec1good = false, sec2good = false;
                        logger.info("my ID :" + myID);
                        logger.info("size : " + serverIds.size());

                        List<Integer> activeNodes = new ArrayList<Integer>();
                        for (int count = 0; count < serverIds.size(); count++) {
                            if (serverIds.get(count) != leaderId) {
                                logger.info("server added: " + serverIds.get(count));
                                activeNodes.add(serverIds.get(count));
                            }
                        }

                        logger.info("active node size:" + activeNodes.size());
                        for (int i = 0; i < activeNodes.size(); i++) {
                            logger.info("serverIds : " + activeNodes.get(i));
                            if (myID == activeNodes.get(i)) {

                                if (i == 0) {
                                    logger.info("i in if-prev:" + i);
                                    prev = activeNodes.get(activeNodes.size() - 1);

                                } else {
                                    logger.info("i in else-prev :" + i);
                                    prev = activeNodes.get(i - 1);

                                }
                                logger.info("prev :" + prev);
                                if (activeNodes.size() == i + 1) {
                                    logger.info("i in if-next:" + serverIds.size());
                                    next = activeNodes.get(0);

                                } else {
                                    logger.info("i in if-next:" + serverIds.size());
                                    next = activeNodes.get(i + 1);

                                }
                                logger.info("next :" + next);
                                if (prev != 1000 && next != 1000)
                                    break;
                            }
                        }

                        Request fwdPrev = ResourceUtil.buildForwardReplicaMessage(request, prev);
                        if (fwdPrev == null) {
                            logger.info("sec1 request is null");
                        }

                        Request fwdNext = ResourceUtil.buildForwardReplicaMessage(request, next);
                        if (fwdNext == null) {
                            logger.info("sec2 request is null");
                        }
                        for (NodeDesc nn : configFile.getAdjacent().getAdjacentNodes().values()) {
                            if (nn.getNodeId() == prev) {
                                secondary1 = createChannelAndForward(nn.getHost(), nn.getPort(), fwdPrev, key);
                                // add to primary channel map
                                secondary1.awaitUninterruptibly();
                                sec1good = secondary1.isSuccess();
                            }
                            logger.info(
                                    "For lloooooopppp ******** checking to which node it should be replicated : "
                                            + nn.getNodeId());

                            if (nn.getNodeId() == next) {
                                logger.info("sending the request to replicaaaaa--------> " + next);
                                secondary2 = createChannelAndForward(nn.getHost(), nn.getPort(), fwdNext, key);
                                // add to primary channel map
                                secondary2.awaitUninterruptibly();
                                sec2good = secondary2.isSuccess();

                                logger.info("result of forwarding to replicaaaaaaa ------ > " + sec2good);
                            }
                            if (secondary1 != null && secondary2 != null)
                                break;
                        }
                        if (sec1good && sec2good) {
                            MongoStorage.addReplicas(key, myID, prev, next);
                            logger.info("added to mongo replicaaaaaaaa -------- ");
                            uuid = MongoStorage.addFile(key, title, image, creationDate, image.length);
                        } else {
                            logger.error("Replication Failed!!");
                        }
                        if (uuid == null) {
                            logger.error("Request is not handled!");
                        } else {
                            logger.info("***************Stored to mongodb of **************");
                            // check if it is stored in primary. if so set flag. once you get secondary reponses, check flag and return reply accordingly
                            checkStoredInPrimary = true;

                            //=================== Response from Primary
                            logger.info("***************Stored to mongodb of secondary node**************");
                            // build response
                            message = "Successfully stored to Secondary MongoDB";
                            Request.Builder rb = Request.newBuilder();
                            Payload.Builder pb = Payload.newBuilder();

                            JobStatus.Builder jb = JobStatus.newBuilder();
                            jb.setStatus(PokeStatus.SUCCESS);
                            jb.setJobId(jobOp.getJobId());
                            jb.setJobState(JobDesc.JobCode.JOBRECEIVED);

                            pb.setJobStatus(jb.build());

                            PhotoPayload.Builder pp = PhotoPayload.newBuilder();
                            pp.setUuid(uuid);
                            PhotoPayload.Builder newPhotoBldr = PhotoPayload.newBuilder(pp.build());
                            pb.setPhotoPayload(newPhotoBldr);
                            rb.setBody(pb.build());
                            // check if we can re-use the same method in resourceutil
                            rb.setHeader(ResourceUtil.buildHeaderResponse(request.getHeader().getRoutingId(),
                                    PokeStatus.SUCCESS, message, request.getHeader().getOriginator(),
                                    request.getHeader().getTag(), ResponseFlag.success));

                            reply = rb.build();
                            return reply;

                        }
                    }
                }
            } else if (getimage.equals(jobOp.getData().getNameSpace()) || requestType == read) {
                String key = imagePayload.getUuid();
                logger.info("unique key: " + key);

                BasicDBObject getImage = MongoStorage.getFileByfId(key);
                if (getImage == null) {
                    logger.info("Image is not found! ");
                } else {
                    message = "image details attached";
                    String imgName = getImage.getString("name");
                    String uniqueId = getImage.getString("_id");
                    byte[] imgFile = (byte[]) getImage.get("image");

                    Request.Builder rb = Request.newBuilder();
                    Payload.Builder pb = Payload.newBuilder();

                    JobStatus.Builder jb = JobStatus.newBuilder();
                    jb.setStatus(PokeStatus.SUCCESS);
                    jb.setJobId(jobOp.getJobId());
                    jb.setJobState(JobDesc.JobCode.JOBRECEIVED);

                    pb.setJobStatus(jb.build());

                    PhotoPayload.Builder ppb = PhotoPayload.newBuilder();
                    ByteString bs = com.google.protobuf.ByteString.copyFrom(imgFile);
                    logger.info("getting the data length as : " + bs.size());
                    ppb.setData(bs);
                    ppb.setName(imgName);
                    ppb.setUuid(uniqueId);
                    PhotoPayload.Builder newPhotoBldr = PhotoPayload.newBuilder(ppb.build());
                    pb.setPhotoPayload(newPhotoBldr);

                    rb.setBody(pb.build());
                    logger.info("message" + message);
                    rb.setHeader(ResourceUtil.buildHeaderResponse(request.getHeader().getRoutingId(),
                            PokeStatus.SUCCESS, message, request.getHeader().getOriginator(),
                            request.getHeader().getTag(), ResponseFlag.success));
                    reply = rb.build();
                    return reply;
                }
            } else if (deleteimage.equals(jobOp.getData().getNameSpace()) || requestType == delete) {

                String uniKey = imagePayload.getUuid();
                logger.info("unique key: " + uniKey);

                if (request.getHeader().getReplica().equalsIgnoreCase("broadcast")) {
                    logger.info("uuid: " + imagePayload.getUuid());
                    MongoStorage.deleteFile(uniKey);
                    logger.info("deleted from secondary replica");
                } else {
                    ReplicaDomain replicaData = MongoStorage.getReplicaById(uniKey);
                    int primary = replicaData.getPrimaryNode();
                    int secondary1 = replicaData.getSecondaryNode1();
                    int secondary2 = replicaData.getSecondaryNode2();
                    logger.info("primary -" + primary + ", secondary1- " + secondary1 + "secondary2 -"
                            + secondary2);

                    if (Server.getMyId() == primary) {
                        logger.info("inside primary node : " + Server.getMyId());
                        Request fwdSec1 = ResourceUtil.buildForwardReplicaMessage(request, secondary1);
                        Request fwdSec2 = ResourceUtil.buildForwardReplicaMessage(request, secondary2);
                        ChannelFuture sec1 = null, sec2 = null;
                        Boolean secResult1 = false, secResult2 = false;
                        for (NodeDesc nn : configFile.getAdjacent().getAdjacentNodes().values()) {
                            if (nn.getNodeId() == secondary1) {
                                sec1 = createChannelAndForward(nn.getHost(), nn.getPort(), fwdSec1, uniKey);
                                sec1.awaitUninterruptibly();
                                secResult1 = sec1.isSuccess();

                            }

                            if (nn.getNodeId() == secondary2) {
                                sec2 = createChannelAndForward(nn.getHost(), nn.getPort(), fwdSec2, uniKey);
                                sec2.awaitUninterruptibly();
                                secResult2 = sec2.isSuccess();
                            }
                            if (sec1 != null && sec2 != null)
                                break;
                        }
                        if (secResult1 && secResult2) {
                            //                     if(secResult2){
                            MongoStorage.deleteReplica(uniKey);
                        }
                    } else if (Server.getMyId() == secondary1 || Server.getMyId() == secondary2) {
                        logger.info("inside secondary node : " + Server.getMyId());
                        logger.info("delete from secondary");
                    }
                    MongoStorage.deleteFile(uniKey);

                    Request.Builder rb = Request.newBuilder();
                    Payload.Builder pb = Payload.newBuilder();

                    JobStatus.Builder jb = JobStatus.newBuilder();
                    jb.setStatus(PokeStatus.SUCCESS);
                    jb.setJobId(jobOp.getJobId());
                    jb.setJobState(JobDesc.JobCode.JOBRECEIVED);

                    pb.setJobStatus(jb.build());

                    message = "image with " + uniKey + " is removed.";
                    rb.setBody(pb.build());
                    logger.info("message" + message);
                    rb.setHeader(ResourceUtil.buildHeaderResponse(request.getHeader().getRoutingId(),
                            PokeStatus.SUCCESS, message, request.getHeader().getOriginator(),
                            request.getHeader().getTag(), ResponseFlag.success));
                    reply = rb.build();

                    return reply;
                }
            }
        }
    }
    return reply;
}

From source file:com.google.devtools.build.lib.collect.nestedset.NestedSetStore.java

/**
 * Computes and returns the fingerprint for the given NestedSet contents using the given {@link
 * SerializationContext}, while also associating the contents with the computed fingerprint in the
 * store. Recursively does the same for all transitive members (i.e. Object[] members) of the
 * provided contents.//  w  w w  .  j a  v  a2 s  . c  o  m
 */
@VisibleForTesting
public FingerprintComputationResult computeFingerprintAndStore(Object[] contents,
        SerializationContext serializationContext) throws SerializationException, IOException {
    FingerprintComputationResult priorFingerprint = nestedSetCache.fingerprintForContents(contents);
    if (priorFingerprint != null) {
        return priorFingerprint;
    }

    // For every fingerprint computation, we need to use a new memoization table.  This is required
    // to guarantee that the same child will always have the same fingerprint - otherwise,
    // differences in memoization context could cause part of a child to be memoized in one
    // fingerprinting but not in the other.  We expect this clearing of memoization state to be a
    // major source of extra work over the naive serialization approach.  The same value may have to
    // be serialized many times across separate fingerprintings.
    SerializationContext newSerializationContext = serializationContext.getNewMemoizingContext();
    ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
    CodedOutputStream codedOutputStream = CodedOutputStream.newInstance(byteArrayOutputStream);

    ImmutableList.Builder<ListenableFuture<Void>> futureBuilder = ImmutableList.builder();
    try {
        codedOutputStream.writeInt32NoTag(contents.length);
        for (Object child : contents) {
            if (child instanceof Object[]) {
                FingerprintComputationResult fingerprintComputationResult = computeFingerprintAndStore(
                        (Object[]) child, serializationContext);
                futureBuilder.add(fingerprintComputationResult.writeStatus());
                newSerializationContext.serialize(fingerprintComputationResult.fingerprint(),
                        codedOutputStream);
            } else {
                newSerializationContext.serialize(child, codedOutputStream);
            }
        }
        codedOutputStream.flush();
    } catch (IOException e) {
        throw new SerializationException("Could not serialize NestedSet contents", e);
    }

    byte[] serializedBytes = byteArrayOutputStream.toByteArray();
    ByteString fingerprint = ByteString.copyFrom(Hashing.md5().hashBytes(serializedBytes).asBytes());
    futureBuilder.add(nestedSetStorageEndpoint.put(fingerprint, serializedBytes));

    // If this is a NestedSet<NestedSet>, serialization of the contents will itself have writes.
    ListenableFuture<Void> innerWriteFutures = newSerializationContext.createFutureToBlockWritingOn();
    if (innerWriteFutures != null) {
        futureBuilder.add(innerWriteFutures);
    }

    ListenableFuture<Void> writeFuture = Futures.whenAllComplete(futureBuilder.build()).call(() -> null,
            MoreExecutors.directExecutor());
    FingerprintComputationResult fingerprintComputationResult = FingerprintComputationResult.create(fingerprint,
            writeFuture);

    nestedSetCache.put(fingerprintComputationResult, contents);

    return fingerprintComputationResult;
}

From source file:de.monticore.incremental.IncrementalChecker.java

/**
 * Checks whether any of the dependencies of the main input changed; either
 * contentwise or if their actually resolved location changed (this would
 * indicate a change of a dependency version).
 * /*from w w w  .ja v a 2s.co m*/
 * @param stories
 * @param modelPath
 * @return whether any dependency of the main input changed
 */
protected static boolean dependenciesChanged(Map<String, InputStory> stories, ModelPath modelPath) {
    // here we analyze the dependencies of the file we want to check according
    // to the last report
    for (Entry<String, InputStory> story : stories.entrySet()) {
        // for each dependency we get the respective state (hash or "missing")
        // from the last report
        String input = story.getKey();
        InputStory inputStory = story.getValue();

        ModelCoordinate currentResolution = ModelCoordinates
                .createQualifiedCoordinate(Paths.get(inputStory.inputPath));
        currentResolution = modelPath.resolveModel(currentResolution);

        if (!currentResolution.hasLocation()) {
            Log.debug("The dependency " + inputStory.inputPath + " could not be resolved.",
                    IncrementalChecker.class.getName());
            Log.debug("  Previous location was " + input, IncrementalChecker.class.getName());
            return true;
        }

        // if it's a file within a jar file we read it and compare hashes for
        // changes
        if (input.startsWith("jar:file:")) {
            Log.debug("Examining " + input, IncrementalChecker.class.getName());
            try {
                URL url = new URL(input);

                if (!currentResolution.getLocation().sameFile(url)) {
                    // this will detect changes in jar versions etc.
                    Log.debug("The location of the dependency " + inputStory.inputPath + " changed.",
                            IncrementalChecker.class.getName());
                    Log.debug("  Previous location was " + input, IncrementalChecker.class.getName());
                    Log.debug("  Current location is " + currentResolution.getLocation().toString(),
                            IncrementalChecker.class.getName());
                    return true;
                }

                String inputModel = CharStreams.toString(new InputStreamReader(url.openStream()));
                MessageDigest md = MessageDigest.getInstance("MD5");
                md.update(inputModel.getBytes());
                String currentState = Hashing.md5().hashString(inputModel, Charset.forName("UTF-8")).toString();
                if (!currentState.equals(inputStory.state)) {
                    Log.debug("The dependency " + input + " has changed.", IncrementalChecker.class.getName());
                    Log.debug("  Previous state was " + inputStory.state, IncrementalChecker.class.getName());
                    Log.debug("  Current state is " + currentState, IncrementalChecker.class.getName());
                    return true;
                }
            } catch (IOException | NoSuchAlgorithmException e) {
                Log.error("Error during analysis of dependencies for incremental check.", e);
                return true;
            }

        }

        // if it's a regular file we check whether it's state changed (hash and
        // missing vs. there)
        else {
            File file = new File(input);
            String currentState = file.exists() ? IncrementalChecker.getChecksum(input)
                    : InputOutputFilesReporter.MISSING;
            if (!currentState.equals(inputStory.state)) {
                Log.debug("The dependency file " + input + " has changed.", IncrementalChecker.class.getName());
                Log.debug("  Previous state was " + inputStory.state, IncrementalChecker.class.getName());
                Log.debug("  Current state is " + currentState, IncrementalChecker.class.getName());
                return true;
            }
        }
    }
    return false;
}

From source file:org.eclipse.che.api.editor.server.impl.EditorWorkingCopyManager.java

private boolean isWorkingCopyHasUnsavedData(String originalFilePath) {
    try {/* w  ww  .ja va2 s  .com*/
        EditorWorkingCopy workingCopy = workingCopiesStorage.get(originalFilePath);
        if (workingCopy == null) {
            return false;
        }
        String workingCopyContent = workingCopy.getContentAsString();

        String originalFileContent;
        if (fsManager.existsAsFile(originalFilePath)) {
            InputStream inputStream = fsManager.read(originalFilePath);
            originalFileContent = IOUtils.toString(inputStream);
        } else {
            return false;
        }

        if (workingCopyContent == null || originalFileContent == null) {
            return false;
        }

        String workingCopyHash = Hashing.md5().hashString(workingCopyContent, defaultCharset()).toString();
        String originalFileHash = Hashing.md5().hashString(originalFileContent, defaultCharset()).toString();

        return !Objects.equals(workingCopyHash, originalFileHash);
    } catch (NotFoundException | IOException | ServerException | ConflictException e) {
        LOG.error(e.getLocalizedMessage());
    }

    return false;
}

From source file:com.google.api.control.aggregator.CheckRequestAggregator.java

/**
 * Obtains the {@code HashCode} for the contents of {@code value}.
 *
 * @param value a {@code CheckRequest} to be signed
 * @return the {@code HashCode} corresponding to {@code value}
 */// ww w  .  j av  a2s  .c o m
public static HashCode sign(CheckRequest value) {
    Hasher h = Hashing.md5().newHasher();
    Operation o = value.getOperation();
    if (o == null || Strings.isNullOrEmpty(o.getConsumerId()) || Strings.isNullOrEmpty(o.getOperationName())) {
        throw new IllegalArgumentException("CheckRequest should have a valid operation");
    }
    h.putString(o.getConsumerId(), StandardCharsets.UTF_8);
    h.putChar('\0');
    h.putString(o.getOperationName(), StandardCharsets.UTF_8);
    h.putChar('\0');
    Signing.putLabels(h, o.getLabels());
    for (MetricValueSet mvSet : o.getMetricValueSetsList()) {
        h.putString(mvSet.getMetricName(), StandardCharsets.UTF_8);
        h.putChar('\0');
        for (MetricValue metricValue : mvSet.getMetricValuesList()) {
            MetricValues.putMetricValue(h, metricValue);
        }
    }
    return h.hash();
}

From source file:co.cask.tigon.data.util.hbase.HBaseTableUtil.java

public static Location createCoProcessorJar(String filePrefix, Location jarDir,
        Iterable<? extends Class<? extends Coprocessor>> classes) throws IOException {
    StringBuilder buf = new StringBuilder();
    for (Class<? extends Coprocessor> c : classes) {
        buf.append(c.getName()).append(", ");
    }//from  w w  w .j av a 2  s. co  m
    if (buf.length() == 0) {
        return null;
    }

    LOG.debug("Creating jar file for coprocessor classes: " + buf.toString());
    final Hasher hasher = Hashing.md5().newHasher();
    final byte[] buffer = new byte[COPY_BUFFER_SIZE];

    final Map<String, URL> dependentClasses = new HashMap<String, URL>();
    for (Class<? extends Coprocessor> clz : classes) {
        Dependencies.findClassDependencies(clz.getClassLoader(), new Dependencies.ClassAcceptor() {
            @Override
            public boolean accept(String className, final URL classUrl, URL classPathUrl) {
                // Assuming the endpoint and protocol class doesn't have dependencies
                // other than those comes with HBase and Java.
                if (className.startsWith("co.cask")) {
                    if (!dependentClasses.containsKey(className)) {
                        dependentClasses.put(className, classUrl);
                    }
                    return true;
                }
                return false;
            }
        }, clz.getName());
    }

    if (!dependentClasses.isEmpty()) {
        LOG.debug("Adding " + dependentClasses.size() + " classes to jar");
        File jarFile = File.createTempFile(filePrefix, ".jar");
        try {
            JarOutputStream jarOutput = null;
            try {
                jarOutput = new JarOutputStream(new FileOutputStream(jarFile));
                for (Map.Entry<String, URL> entry : dependentClasses.entrySet()) {
                    try {
                        jarOutput.putNextEntry(
                                new JarEntry(entry.getKey().replace('.', File.separatorChar) + ".class"));
                        InputStream inputStream = entry.getValue().openStream();

                        try {
                            int len = inputStream.read(buffer);
                            while (len >= 0) {
                                hasher.putBytes(buffer, 0, len);
                                jarOutput.write(buffer, 0, len);
                                len = inputStream.read(buffer);
                            }
                        } finally {
                            inputStream.close();
                        }
                    } catch (IOException e) {
                        LOG.info("Error writing to jar", e);
                        throw Throwables.propagate(e);
                    }
                }
            } finally {
                if (jarOutput != null) {
                    jarOutput.close();
                }
            }

            // Copy jar file into HDFS
            // Target path is the jarDir + jarMD5.jar
            final Location targetPath = jarDir.append("coprocessor" + hasher.hash().toString() + ".jar");

            // If the file exists and having same since, assume the file doesn't changed
            if (targetPath.exists() && targetPath.length() == jarFile.length()) {
                return targetPath;
            }

            // Copy jar file into filesystem
            if (!jarDir.mkdirs() && !jarDir.exists()) {
                throw new IOException("Fails to create directory: " + jarDir.toURI());
            }
            Files.copy(jarFile, new OutputSupplier<OutputStream>() {
                @Override
                public OutputStream getOutput() throws IOException {
                    return targetPath.getOutputStream();
                }
            });
            return targetPath;
        } finally {
            jarFile.delete();
        }
    }
    // no dependent classes to add
    return null;
}

From source file:co.cask.cdap.data2.util.hbase.HBaseTableUtil.java

public static Location createCoProcessorJar(String filePrefix, Location jarDir,
        Iterable<? extends Class<? extends Coprocessor>> classes) throws IOException {
    StringBuilder buf = new StringBuilder();
    for (Class<? extends Coprocessor> c : classes) {
        buf.append(c.getName()).append(", ");
    }//from ww w .  j  a  v a  2 s .c  om
    if (buf.length() == 0) {
        return null;
    }

    LOG.debug("Creating jar file for coprocessor classes: " + buf.toString());
    final Hasher hasher = Hashing.md5().newHasher();
    final byte[] buffer = new byte[COPY_BUFFER_SIZE];

    final Map<String, URL> dependentClasses = new HashMap<>();
    for (Class<? extends Coprocessor> clz : classes) {
        Dependencies.findClassDependencies(clz.getClassLoader(), new ClassAcceptor() {
            @Override
            public boolean accept(String className, final URL classUrl, URL classPathUrl) {
                // Assuming the endpoint and protocol class doesn't have dependencies
                // other than those comes with HBase, Java and fastutil.
                if (className.startsWith("co.cask") || className.startsWith("it.unimi.dsi.fastutil")) {
                    if (!dependentClasses.containsKey(className)) {
                        dependentClasses.put(className, classUrl);
                    }
                    return true;
                }
                return false;
            }
        }, clz.getName());
    }

    if (!dependentClasses.isEmpty()) {
        LOG.debug("Adding " + dependentClasses.size() + " classes to jar");
        File jarFile = File.createTempFile(filePrefix, ".jar");
        try {
            try (JarOutputStream jarOutput = new JarOutputStream(new FileOutputStream(jarFile))) {
                for (Map.Entry<String, URL> entry : dependentClasses.entrySet()) {
                    try {
                        jarOutput.putNextEntry(
                                new JarEntry(entry.getKey().replace('.', File.separatorChar) + ".class"));

                        try (InputStream inputStream = entry.getValue().openStream()) {
                            int len = inputStream.read(buffer);
                            while (len >= 0) {
                                hasher.putBytes(buffer, 0, len);
                                jarOutput.write(buffer, 0, len);
                                len = inputStream.read(buffer);
                            }
                        }
                    } catch (IOException e) {
                        LOG.info("Error writing to jar", e);
                        throw Throwables.propagate(e);
                    }
                }
            }

            // Copy jar file into HDFS
            // Target path is the jarDir + jarMD5.jar
            final Location targetPath = jarDir.append("coprocessor" + hasher.hash().toString() + ".jar");

            // If the file exists and having same since, assume the file doesn't changed
            if (targetPath.exists() && targetPath.length() == jarFile.length()) {
                return targetPath;
            }

            // Copy jar file into filesystem
            if (!jarDir.mkdirs() && !jarDir.exists()) {
                throw new IOException("Fails to create directory: " + jarDir.toURI());
            }
            Files.copy(jarFile, new OutputSupplier<OutputStream>() {
                @Override
                public OutputStream getOutput() throws IOException {
                    return targetPath.getOutputStream();
                }
            });
            return targetPath;
        } finally {
            jarFile.delete();
        }
    }
    // no dependent classes to add
    return null;
}

From source file:de.monticore.generating.templateengine.reporting.reporter.InputOutputFilesReporter.java

private void writeContent() {

    // the magic TODO AHo: document
    for (Path lateOne : filesThatMatterButAreNotThereInTime) {
        if (modelToArtifactMap.keySet().contains(lateOne)) {
            String toAdd = modelToArtifactMap.get(lateOne).toString() + PARENT_FILE_SEPARATOR
                    + lateOne.toString();
            if (!inputFiles.contains(toAdd)) {
                inputFiles.add(toAdd);/*  w  w  w . j  a v  a2  s.  c om*/
            }
        }
    }

    Collections.sort(inputFiles);
    Collections.sort(outputFiles);

    if (inputFile != null && !inputFile.isEmpty()) {
        String checkSum = IncrementalChecker.getChecksum(inputFile);
        writeLine(inputFile + INPUT_STATE_SEPARATOR + checkSum);

        for (String s : inputFiles) {
            if (s.contains(PARENT_FILE_SEPARATOR)) {
                String[] elements = s.split(PARENT_FILE_SEPARATOR);
                if (elements[0].endsWith(".jar")) {
                    String inputFile = elements[0].concat("!" + File.separator).concat(elements[1]);
                    try {
                        String url = "jar:file:" + inputFile;
                        url = url.replaceAll("\\" + File.separator, "/");
                        URL input = new URL(url);
                        String inputModel = CharStreams.toString(new InputStreamReader(input.openStream()));

                        MessageDigest md = MessageDigest.getInstance("MD5");
                        md.update(inputModel.getBytes());
                        String digest = Hashing.md5().hashString(inputModel, Charset.forName("UTF-8"))
                                .toString();

                        writeLine(s + INPUT_STATE_SEPARATOR + digest);
                    } catch (IOException | NoSuchAlgorithmException e) {
                        Log.warn("0xA0134 Cannot write to log file", e);
                    }

                } else {
                    File inputFile = new File(elements[0].concat(File.separator).concat(elements[1]));
                    if (inputFile.exists()) {
                        checkSum = IncrementalChecker.getChecksum(inputFile.toString());
                        writeLine(s + INPUT_STATE_SEPARATOR + checkSum);
                    } else {
                        writeLine(s + INPUT_STATE_SEPARATOR + MISSING);
                    }
                }
            } else {
                checkSum = IncrementalChecker.getChecksum(s);
                writeLine(s + INPUT_STATE_SEPARATOR + checkSum);
            }
        }

        writeHWCFileHeading();
        for (String hwc : hwcFiles) {
            writeLine(hwc);
        }

        writeOutputFileHeading();
        for (String s : outputFiles) {
            writeLine(s);
        }
    }
}