List of usage examples for org.apache.commons.codec.digest DigestUtils md5
public static byte[] md5(String data)
From source file:org.stem.IntegrationTestBase.java
protected List<byte[]> generateRandomLoad(int blobsNum) { List<byte[]> generatedKeys = new ArrayList<byte[]>(blobsNum); for (int i = 0; i < blobsNum; i++) { byte[] data = TestUtils.generateRandomBlob(65536); byte[] key = DigestUtils.md5(data); client.put(key, data);//w ww.java 2 s. c om generatedKeys.add(key); System.out.println(String.format("key 0x%s generated", Hex.encodeHexString(key))); } return generatedKeys; }
From source file:org.stem.IntegrationTestBase.java
protected List<byte[]> generateStaticLoad(int blobsNum) { List<byte[]> generatedKeys = new ArrayList<byte[]>(blobsNum); for (int i = 0; i < blobsNum; i++) { byte[] data = TestUtils.generateZeroBlob(65536); data[i] = 1;/* w ww. j a v a2s . c o m*/ byte[] key = DigestUtils.md5(data); client.put(key, data); generatedKeys.add(key); System.out.println(String.format("key 0x%s generated", Hex.encodeHexString(key))); } return generatedKeys; }
From source file:org.stem.ProtocolTest.java
@Test public void testDelete() throws Exception { clusterManagerClient.computeMapping(); StemClient client = new StemClient(); client.start();//from ww w . java 2s . c om byte[] in = TestUtils.generateRandomBlob(65536); byte[] key = DigestUtils.md5(in); client.put(key, in); byte[] out = client.get(key); // STEM is not a random generator, but storage, so data before and after must be equal Assert.assertArrayEquals(out, in); client.delete(key); client.get(key); }
From source file:org.stem.ProtocolTest.java
@Test @Ignore // TODO: it's ignored because it's ran endlessly public void testStorageNodeWritePerformance() throws Exception { StorageNodeClient client = new StorageNodeClient(host, port); client.start();//from w w w . java 2 s.com byte[] blob = TestUtils.generateRandomBlob(65536); byte[] key = DigestUtils.md5(blob); UUID disk = Layout.getInstance().getMountPoints().keySet().iterator().next(); WriteBlobMessage op = new WriteBlobMessage(); op.disk = disk; op.key = key; op.blob = blob; long start, duration; long i = 0; start = System.nanoTime(); while (true) { i++; Message.Response resp = client.execute(op); if (i % 1000 == 0) { duration = System.nanoTime() - start; long rps = i * 1000000000 / duration; System.out.println( String.format("%s req/s, %s/s", rps, FileUtils.byteCountToDisplaySize(rps * blob.length))); start = System.nanoTime(); i = 0; } } }
From source file:org.stem.ProtocolTest.java
@Test @Ignore // TODO: it's ignored because it's ran endlessly public void testClusterWritePerformance() throws Exception { Logger.getLogger("io.netty").setLevel(Level.OFF); clusterManagerClient.computeMapping(); StemClient client = new StemClient(); client.start();/* w ww . j a va 2 s. co m*/ byte[] blob = TestUtils.generateRandomBlob(65536); byte[] key = DigestUtils.md5(blob); long start, duration; long i = 0; start = System.nanoTime(); while (true) { i++; client.put(key, blob); if (i % 1000 == 0) { duration = System.nanoTime() - start; long rps = i * 1000000000 / duration; System.out.println( String.format("%s req/s, %s/s", rps, FileUtils.byteCountToDisplaySize(rps * blob.length))); start = System.nanoTime(); i = 0; } } }
From source file:org.stem.ProtocolTest.java
@Test public void testClusterWrite() throws Exception { clusterManagerClient.computeMapping(); StemClient client = new StemClient(); client.start();//from w w w. j a v a 2s . c o m byte[] in = TestUtils.generateRandomBlob(65536); byte[] key = DigestUtils.md5(in); client.put(key, in); byte[] out = client.get(key); // STEM is not a random generator, but storage, so data before and after must be equal Assert.assertArrayEquals(out, in); }
From source file:org.stem.ProtocolTest.java
@Test @Ignore // TODO: it's ignored because it's ran endlessly public void testMultiSourcesWritePerformance() throws Exception { StorageNodeClient client = new StorageNodeClient(host, port); client.start();//from ww w . j ava 2 s . co m byte[] blob = TestUtils.generateRandomBlob(65536); byte[] key = DigestUtils.md5(blob); Set<UUID> disks = Layout.getInstance().getMountPoints().keySet(); List<WriteBlobMessage> messages = new ArrayList<WriteBlobMessage>(disks.size()); for (UUID disk : disks) { WriteBlobMessage op = new WriteBlobMessage(); op.disk = disk; op.key = key; op.blob = blob; messages.add(op); } int threadsNum = messages.size(); ExecutorService service = Executors.newFixedThreadPool(threadsNum); for (int j = 0; j < threadsNum; ++j) { ClientThread clientThread = new ClientThread(messages.get(j), j); service.submit(clientThread); } service.shutdown(); service.awaitTermination(10, TimeUnit.MINUTES); }
From source file:org.stem.StreamingTest.java
private void checkDataAvailability(List<byte[]> keysGenerated) { int i = 1;/*from w ww. ja va2 s . c om*/ int broken = 0; List<ExtendedBlobDescriptor> brokenDescriptors = new ArrayList<ExtendedBlobDescriptor>(); for (byte[] keyOrig : keysGenerated) { String endpoint = client.getFirstEndpointForKey(keyOrig); ExtendedBlobDescriptor descriptor = client.getFirstDescriptorForKey(keyOrig); byte[] data = client.get(keyOrig); byte[] keyActual = DigestUtils.md5(data); if (keyActual[0] != keyOrig[0]) { broken++; brokenDescriptors.add(descriptor); } Assert.assertArrayEquals(String.format("blob #%s[%s] (%s) located on %s is corrupted", i++, data.length, Hex.encodeHexString(keyOrig), endpoint), keyActual, keyOrig); } // for (ExtendedBlobDescriptor d : brokenDescriptors) // { // System.out.println("0x"+Hex.encodeHexString(d.getKey()) + " is broken"); // } }
From source file:org.talend.spark.operation.HBaseStore.java
public static void run(String zookeeperHost, String zookeeperPort, String table, final String columns, Map<String, String> properties, TalendRDD<List<Object>> rdd, final List<Integer> keyList) throws IOException { Configuration conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", zookeeperHost); conf.set("hbase.zookeeper.property.clientPort", zookeeperPort); conf.set("hbase.mapred.tablecolumns", columns); for (Entry<String, String> e : properties.entrySet()) { conf.set(e.getKey(), e.getValue()); }/*w w w.j av a 2 s . co m*/ TalendPairRDD<ImmutableBytesWritable, Put> hbaseRdd = rdd .mapToPair(new PairFunction<List<Object>, ImmutableBytesWritable, Put>() { private static final long serialVersionUID = 1L; public Tuple2<ImmutableBytesWritable, Put> call(List<Object> t) throws Exception { String key = ""; for (int i : keyList) { key = key + t.get(i); } org.apache.hadoop.hbase.client.Put put = new org.apache.hadoop.hbase.client.Put( DigestUtils.md5("".equals(key) ? t.toString() : key)); String[] cols = columns.split(" "); int i = 0; for (Object o : t) { if (cols.length > i) { put.add(org.apache.hadoop.hbase.util.Bytes.toBytes(cols[i].split(":")[0]), org.apache.hadoop.hbase.util.Bytes.toBytes(cols[i].split(":")[1]), (o != null ? org.apache.hadoop.hbase.util.Bytes.toBytes(o.toString()) : null)); } i++; } return new Tuple2<ImmutableBytesWritable, Put>(new ImmutableBytesWritable(), put); } }); JobConf config = new JobConf(conf); config.set(TableOutputFormat.OUTPUT_TABLE, table); config.setOutputFormat(TableOutputFormat.class); hbaseRdd.saveAsHadoopDataset(config); }
From source file:org.tinymediamanager.core.ImageCache.java
/** * Gets the file name (MD5 hash) of the cached file. * /*w w w. java 2 s .c o m*/ * @param path * the url * @return the cached file name */ public static String getMD5(String path) { try { if (path == null) { return null; } // now uses a simple md5 hash, which should have a fairly low collision // rate, especially for our limited use byte[] key = DigestUtils.md5(path); return new String(Hex.encodeHex(key)); } catch (Exception e) { LOGGER.error("Failed to create cached filename for image: " + path, e); throw new RuntimeException(e); } }