Example usage for org.apache.commons.codec.digest DigestUtils shaHex

List of usage examples for org.apache.commons.codec.digest DigestUtils shaHex

Introduction

In this page you can find the example usage for org.apache.commons.codec.digest DigestUtils shaHex.

Prototype

@Deprecated
    public static String shaHex(String data) 

Source Link

Usage

From source file:org.apache.directory.studio.ldapbrowser.core.jobs.ImportLdifRunnable.java

/**
 * {@inheritDoc}/*from w  w  w .  j  a  v a  2  s. co m*/
 */
public Object[] getLockedObjects() {
    List<Object> l = new ArrayList<Object>();
    l.add(browserConnection.getUrl() + "_" + DigestUtils.shaHex(ldifFile.toString())); //$NON-NLS-1$
    return l.toArray();
}

From source file:org.apache.hadoop.hdfs.server.datanode.DiskBalancer.java

/**
 * Verifies that plan matches the SHA-1 provided by the client.
 *
 * @param planID - SHA-1 Hex Bytes//from  ww w  . j ava  2s. c  o m
 * @param plan   - Plan String
 * @throws DiskBalancerException
 */
private NodePlan verifyPlanHash(String planID, String plan) throws DiskBalancerException {
    final long sha1Length = 40;
    if (plan == null || plan.length() == 0) {
        LOG.error("Disk Balancer -  Invalid plan.");
        throw new DiskBalancerException("Invalid plan.", DiskBalancerException.Result.INVALID_PLAN);
    }

    if ((planID == null) || (planID.length() != sha1Length)
            || !DigestUtils.shaHex(plan.getBytes(Charset.forName("UTF-8"))).equalsIgnoreCase(planID)) {
        LOG.error("Disk Balancer - Invalid plan hash.");
        throw new DiskBalancerException("Invalid or mis-matched hash.",
                DiskBalancerException.Result.INVALID_PLAN_HASH);
    }

    try {
        return NodePlan.parseJson(plan);
    } catch (IOException ex) {
        throw new DiskBalancerException("Parsing plan failed.", ex,
                DiskBalancerException.Result.MALFORMED_PLAN);
    }
}

From source file:org.apache.hadoop.hdfs.server.diskbalancer.command.CancelCommand.java

/**
 * Cancels a running plan.//from   w  ww  . j  av  a2  s .  c  o m
 *
 * @param planData - Plan data.
 * @throws IOException
 */
private void cancelPlan(String planData) throws IOException {
    Preconditions.checkNotNull(planData);
    NodePlan plan = NodePlan.parseJson(planData);
    String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
    Preconditions.checkNotNull(dataNodeAddress);
    ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);
    String planHash = DigestUtils.shaHex(planData);
    try {
        dataNode.cancelDiskBalancePlan(planHash);
    } catch (DiskBalancerException ex) {
        LOG.error("Cancelling plan on  {} failed. Result: {}, Message: {}", plan.getNodeName(),
                ex.getResult().toString(), ex.getMessage());
        throw ex;
    }
}

From source file:org.apache.hadoop.hdfs.server.diskbalancer.command.ExecuteCommand.java

/**
 * Submits plan to a given data node./*www .  java2s  . c o m*/
 *
 * @param planFile - Plan file name
 * @param planData - Plan data in json format
 * @throws IOException
 */
private void submitPlan(final String planFile, final String planData) throws IOException {
    Preconditions.checkNotNull(planData);
    NodePlan plan = NodePlan.parseJson(planData);
    String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
    Preconditions.checkNotNull(dataNodeAddress);
    ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);
    String planHash = DigestUtils.shaHex(planData);
    try {
        // TODO : Support skipping date check.
        dataNode.submitDiskBalancerPlan(planHash, DiskBalancer.PLAN_VERSION, planFile, planData, false);
    } catch (DiskBalancerException ex) {
        LOG.error("Submitting plan on  {} failed. Result: {}, Message: {}", plan.getNodeName(),
                ex.getResult().toString(), ex.getMessage());
        throw ex;
    }
}

From source file:org.apache.hadoop.hdfs.server.diskbalancer.TestDiskBalancer.java

/**
 * This test simulates a real Data node working with DiskBalancer.
 * <p>// w ww . j a  v  a 2  s.co m
 * Here is the overview of this test.
 * <p>
 * 1. Write a bunch of blocks and move them to one disk to create imbalance.
 * 2. Rewrite  the capacity of the disks in DiskBalancer Model so that planner
 * will produce a move plan. 3. Execute the move plan and wait unitl the plan
 * is done. 4. Verify the source disk has blocks now.
 *
 * @throws Exception
 */
@Test
public void testDiskBalancerEndToEnd() throws Exception {
    Configuration conf = new HdfsConfiguration();
    final int defaultBlockSize = 100;
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    final int numDatanodes = 1;
    final String fileName = "/tmp.txt";
    final Path filePath = new Path(fileName);
    final int blocks = 100;
    final int blocksSize = 1024;
    final int fileLen = blocks * blocksSize;

    // Write a file and restart the cluster
    long[] capacities = new long[] { defaultBlockSize * 2 * fileLen, defaultBlockSize * 2 * fileLen };
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
            .storageCapacities(capacities)
            .storageTypes(new StorageType[] { StorageType.DISK, StorageType.DISK }).storagesPerDatanode(2)
            .build();
    FsVolumeImpl source = null;
    FsVolumeImpl dest = null;
    try {
        cluster.waitActive();
        Random r = new Random();
        FileSystem fs = cluster.getFileSystem(0);
        TestBalancer.createFile(cluster, filePath, fileLen, (short) 1, numDatanodes - 1);

        DFSTestUtil.waitReplication(fs, filePath, (short) 1);
        cluster.restartDataNodes();
        cluster.waitActive();

        // Get the data node and move all data to one disk.
        DataNode dnNode = cluster.getDataNodes().get(numDatanodes - 1);
        try (FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences()) {
            source = (FsVolumeImpl) refs.get(0);
            dest = (FsVolumeImpl) refs.get(1);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
            DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
        }

        cluster.restartDataNodes();
        cluster.waitActive();

        // Start up a disk balancer and read the cluster info.
        final DataNode newDN = cluster.getDataNodes().get(numDatanodes - 1);
        ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(),
                conf);

        DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
        diskBalancerCluster.readClusterInfo();
        List<DiskBalancerDataNode> nodesToProcess = new LinkedList<>();

        // Rewrite the capacity in the model to show that disks need
        // re-balancing.
        setVolumeCapacity(diskBalancerCluster, defaultBlockSize * 2 * fileLen, "DISK");
        // Pick a node to process.
        nodesToProcess.add(diskBalancerCluster.getNodeByUUID(dnNode.getDatanodeUuid()));
        diskBalancerCluster.setNodesToProcess(nodesToProcess);

        // Compute a plan.
        List<NodePlan> clusterplan = diskBalancerCluster.computePlan(0.0f);

        // Now we must have a plan,since the node is imbalanced and we
        // asked the disk balancer to create a plan.
        assertTrue(clusterplan.size() == 1);

        NodePlan plan = clusterplan.get(0);
        plan.setNodeUUID(dnNode.getDatanodeUuid());
        plan.setTimeStamp(Time.now());
        String planJson = plan.toJson();
        String planID = DigestUtils.shaHex(planJson);
        assertNotNull(plan.getVolumeSetPlans());
        assertTrue(plan.getVolumeSetPlans().size() > 0);
        plan.getVolumeSetPlans().get(0).setTolerancePercent(10);

        // Submit the plan and wait till the execution is done.
        newDN.submitDiskBalancerPlan(planID, 1, PLAN_FILE, planJson, false);
        String jmxString = newDN.getDiskBalancerStatus();
        assertNotNull(jmxString);
        DiskBalancerWorkStatus status = DiskBalancerWorkStatus.parseJson(jmxString);
        DiskBalancerWorkStatus realStatus = newDN.queryDiskBalancerPlan();
        assertEquals(realStatus.getPlanID(), status.getPlanID());

        GenericTestUtils.waitFor(new Supplier<Boolean>() {
            @Override
            public Boolean get() {
                try {
                    return newDN.queryDiskBalancerPlan().getResult() == DiskBalancerWorkStatus.Result.PLAN_DONE;
                } catch (IOException ex) {
                    return false;
                }
            }
        }, 1000, 100000);

        //verify that it worked.
        dnNode = cluster.getDataNodes().get(numDatanodes - 1);
        assertEquals(dnNode.queryDiskBalancerPlan().getResult(), DiskBalancerWorkStatus.Result.PLAN_DONE);
        try (FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences()) {
            source = (FsVolumeImpl) refs.get(0);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
        }

        // Tolerance
        long delta = (plan.getVolumeSetPlans().get(0).getBytesToMove() * 10) / 100;
        assertTrue((DiskBalancerTestUtil.getBlockCount(source) * defaultBlockSize + delta) >= plan
                .getVolumeSetPlans().get(0).getBytesToMove());

    } finally {
        cluster.shutdown();
    }
}

From source file:org.apache.hadoop.hdfs.server.diskbalancer.TestDiskBalancer.java

@Test(timeout = 60000)
public void testBalanceDataBetweenMultiplePairsOfVolumes() throws Exception {
    Configuration conf = new HdfsConfiguration();
    final int DEFAULT_BLOCK_SIZE = 2048;
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    final int NUM_DATANODES = 1;
    final long CAP = 512 * 1024;
    final Path testFile = new Path("/testfile");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES)
            .storageCapacities(new long[] { CAP, CAP, CAP, CAP }).storagesPerDatanode(4).build();
    try {/*from   w w  w.j av  a 2s.  c  o  m*/
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        TestBalancer.createFile(cluster, testFile, CAP, (short) 1, 0);

        DFSTestUtil.waitReplication(fs, testFile, (short) 1);
        DataNode dnNode = cluster.getDataNodes().get(0);
        // Move data out of two volumes to make them empty.
        try (FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences()) {
            assertEquals(4, refs.size());
            for (int i = 0; i < refs.size(); i += 2) {
                FsVolumeImpl source = (FsVolumeImpl) refs.get(i);
                FsVolumeImpl dest = (FsVolumeImpl) refs.get(i + 1);
                assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
                DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
                assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
            }
        }

        cluster.restartDataNodes();
        cluster.waitActive();

        // Start up a disk balancer and read the cluster info.
        final DataNode dataNode = cluster.getDataNodes().get(0);
        ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(),
                conf);

        DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
        diskBalancerCluster.readClusterInfo();
        List<DiskBalancerDataNode> nodesToProcess = new LinkedList<>();
        // Rewrite the capacity in the model to show that disks need
        // re-balancing.
        setVolumeCapacity(diskBalancerCluster, CAP, "DISK");
        nodesToProcess.add(diskBalancerCluster.getNodeByUUID(dataNode.getDatanodeUuid()));
        diskBalancerCluster.setNodesToProcess(nodesToProcess);

        // Compute a plan.
        List<NodePlan> clusterPlan = diskBalancerCluster.computePlan(10.0f);

        NodePlan plan = clusterPlan.get(0);
        assertEquals(2, plan.getVolumeSetPlans().size());
        plan.setNodeUUID(dnNode.getDatanodeUuid());
        plan.setTimeStamp(Time.now());
        String planJson = plan.toJson();
        String planID = DigestUtils.shaHex(planJson);

        dataNode.submitDiskBalancerPlan(planID, 1, PLAN_FILE, planJson, false);

        GenericTestUtils.waitFor(new Supplier<Boolean>() {
            @Override
            public Boolean get() {
                try {
                    return dataNode.queryDiskBalancerPlan()
                            .getResult() == DiskBalancerWorkStatus.Result.PLAN_DONE;
                } catch (IOException ex) {
                    return false;
                }
            }
        }, 1000, 100000);
        assertEquals(dataNode.queryDiskBalancerPlan().getResult(), DiskBalancerWorkStatus.Result.PLAN_DONE);

        try (FsDatasetSpi.FsVolumeReferences refs = dataNode.getFSDataset().getFsVolumeReferences()) {
            for (FsVolumeSpi vol : refs) {
                assertTrue(DiskBalancerTestUtil.getBlockCount(vol) > 0);
            }
        }
    } finally {
        cluster.shutdown();
    }
}

From source file:org.apache.hadoop.hdfs.server.diskbalancer.TestDiskBalancerWithMockMover.java

private void executeSubmitPlan(NodePlan plan, DiskBalancer balancer, int version) throws IOException {
    String planJson = plan.toJson();
    String planID = DigestUtils.shaHex(planJson);
    balancer.submitPlan(planID, version, PLAN_FILE, planJson, false);
}

From source file:org.apache.hadoop.hdfs.server.diskbalancer.TestDiskBalancerWithMockMover.java

@Test
public void testSubmitWithNullPlan() throws Exception {
    MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
    NodePlan plan = mockMoverHelper.getPlan();
    DiskBalancer balancer = mockMoverHelper.getBalancer();
    String planJson = plan.toJson();
    String planID = DigestUtils.shaHex(planJson);

    thrown.expect(DiskBalancerException.class);
    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.INVALID_PLAN));

    balancer.submitPlan(planID, 1, "no-plan-file.json", null, false);
}

From source file:org.apache.hadoop.hdfs.server.diskbalancer.TestDiskBalancerWithMockMover.java

@Test
public void testSubmitWithInvalidHash() throws Exception {
    MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
    NodePlan plan = mockMoverHelper.getPlan();
    DiskBalancer balancer = mockMoverHelper.getBalancer();

    String planJson = plan.toJson();
    String planID = DigestUtils.shaHex(planJson);
    char repChar = planID.charAt(0);
    repChar++;/*from  w w w .  j a v a 2s  .  c  o m*/

    thrown.expect(DiskBalancerException.class);
    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.INVALID_PLAN_HASH));
    balancer.submitPlan(planID.replace(planID.charAt(0), repChar), 1, PLAN_FILE, planJson, false);

}

From source file:org.apache.hadoop.hdfs.server.diskbalancer.TestDiskBalancerWithMockMover.java

/**
 * Test Cancel Plan./*  w w w  .ja v  a2s  . c  o m*/
 *
 * @throws Exception
 */
@Test
public void testCancelDiskBalancerPlan() throws Exception {
    MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
    NodePlan plan = mockMoverHelper.getPlan();
    DiskBalancer balancer = mockMoverHelper.getBalancer();

    // ask block mover to delay execution
    mockMoverHelper.getBlockMover().setSleep();
    executeSubmitPlan(plan, balancer);

    String planJson = plan.toJson();
    String planID = DigestUtils.shaHex(planJson);
    balancer.cancelPlan(planID);

    DiskBalancerWorkStatus status = balancer.queryWorkStatus();
    assertEquals(DiskBalancerWorkStatus.Result.PLAN_CANCELLED, status.getResult());

    executeSubmitPlan(plan, balancer);

    // Send a Wrong cancellation request.
    char first = planID.charAt(0);
    first++;
    thrown.expect(DiskBalancerException.class);
    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.NO_SUCH_PLAN));
    balancer.cancelPlan(planID.replace(planID.charAt(0), first));

    // Now cancel the real one
    balancer.cancelPlan(planID);
    mockMoverHelper.getBlockMover().clearSleep(); // unblock mover.

    status = balancer.queryWorkStatus();
    assertEquals(DiskBalancerWorkStatus.Result.PLAN_CANCELLED, status.getResult());

}