List of usage examples for com.google.common.hash Hashing md5
public static HashFunction md5()
From source file:com.facebook.buck.zip.OverwritingZipOutputStreamImpl.java
@Override public void actuallyPutNextEntry(ZipEntry entry) throws IOException { // We calculate the actual offset when closing the stream, so 0 is fine. currentEntry = new EntryAccounting(clock, entry, /* currentOffset */ 0); long md5 = Hashing.md5().hashUnencodedChars(entry.getName()).asLong(); String name = String.valueOf(md5); File file = new File(scratchDir, name); entries.put(file, currentEntry);//www. j a v a 2 s. c o m if (file.exists() && !file.delete()) { throw new ZipException("Unable to delete existing file: " + entry.getName()); } currentOutput = new BufferedOutputStream(new FileOutputStream(file)); }
From source file:com.globo.galeb.consistenthash.HashAlgorithm.java
/** * Calc Hash./*from www.j av a 2 s .co m*/ * * @param key the key * @return int hash */ public HashAlgorithm hash(Object key) { HashFunction hashAlgorithm; switch (hashType) { case MD5: hashAlgorithm = Hashing.md5(); break; case MURMUR3_32: hashAlgorithm = Hashing.murmur3_32(); break; case SHA256: hashAlgorithm = Hashing.sha256(); break; case SIP24: hashAlgorithm = Hashing.sipHash24(); break; default: hashAlgorithm = Hashing.sipHash24(); break; } if (key instanceof String) { hashCode = hashAlgorithm.newHasher().putString((String) key, Charsets.UTF_8).hash(); } else if (key instanceof Long) { hashCode = hashAlgorithm.newHasher().putLong((Long) key).hash(); } else { hashCode = hashAlgorithm.newHasher().hash(); } return this; }
From source file:pt.ist.fenix.ui.spring.controller.ApiServiceAgreementController.java
@RequestMapping(method = RequestMethod.GET, value = "show") public String showServiceAgreementHtml(Model model, HttpServletRequest request) { String serviceAgreementHtml = getServiceAgreementHtml(); model.addAttribute("serviceAgreement", serviceAgreementHtml); model.addAttribute("serviceAgreementChecksum", Hashing.md5().newHasher().putString(serviceAgreementHtml, Charsets.UTF_8).hash().toString()); PortalLayoutInjector.skipLayoutOn(request); return "fenix-ist/public/showServiceAgreement"; }
From source file:pt.ist.fenixedu.integration.ui.spring.controller.ApiServiceAgreementController.java
@RequestMapping(method = RequestMethod.GET, value = "show") public String showServiceAgreementHtml(Model model, HttpServletRequest request) { String serviceAgreementHtml = getServiceAgreementHtml(); model.addAttribute("serviceAgreement", serviceAgreementHtml); model.addAttribute("serviceAgreementChecksum", Hashing.md5().newHasher().putString(serviceAgreementHtml, Charsets.UTF_8).hash().toString()); PortalLayoutInjector.skipLayoutOn(request); return "fenixedu-ist-integration/public/showServiceAgreement"; }
From source file:org.apache.accumulo.core.sample.AbstractHashSampler.java
/** * Subclasses with options should override this method and call {@code super.init(config)}. *///from w w w .j a va 2s . c om @Override public void init(SamplerConfiguration config) { String hasherOpt = config.getOptions().get("hasher"); String modulusOpt = config.getOptions().get("modulus"); requireNonNull(hasherOpt, "Hasher not specified"); requireNonNull(modulusOpt, "Modulus not specified"); for (String option : config.getOptions().keySet()) { checkArgument(isValidOption(option), "Unknown option : %s", option); } switch (hasherOpt) { case "murmur3_32": hashFunction = Hashing.murmur3_32(); break; case "md5": hashFunction = Hashing.md5(); break; case "sha1": hashFunction = Hashing.sha1(); break; default: throw new IllegalArgumentException("Uknown hahser " + hasherOpt); } modulus = Integer.parseInt(modulusOpt); }
From source file:org.apache.beam.sdk.util.PackageUtil.java
/** * Compute and cache the attributes of a classpath element that we will need to stage it. * * @param classpathElement the file or directory to be staged. * @param stagingPath The base location for staged classpath elements. * @param overridePackageName If non-null, use the given value as the package name * instead of generating one automatically. * @return a {@link PackageAttributes} that containing metadata about the object to be staged. *//* w w w.jav a 2 s . c o m*/ static PackageAttributes createPackageAttributes(File classpathElement, String stagingPath, String overridePackageName) { try { boolean directory = classpathElement.isDirectory(); // Compute size and hash in one pass over file or directory. Hasher hasher = Hashing.md5().newHasher(); OutputStream hashStream = Funnels.asOutputStream(hasher); CountingOutputStream countingOutputStream = new CountingOutputStream(hashStream); if (!directory) { // Files are staged as-is. Files.asByteSource(classpathElement).copyTo(countingOutputStream); } else { // Directories are recursively zipped. ZipFiles.zipDirectory(classpathElement, countingOutputStream); } long size = countingOutputStream.getCount(); String hash = Base64Variants.MODIFIED_FOR_URL.encode(hasher.hash().asBytes()); // Create the DataflowPackage with staging name and location. String uniqueName = getUniqueContentName(classpathElement, hash); String resourcePath = IOChannelUtils.resolve(stagingPath, uniqueName); DataflowPackage target = new DataflowPackage(); target.setName(overridePackageName != null ? overridePackageName : uniqueName); target.setLocation(resourcePath); return new PackageAttributes(size, hash, directory, target); } catch (IOException e) { throw new RuntimeException("Package setup failure for " + classpathElement, e); } }
From source file:org.dspace.ctask.replicate.CompareWithAIP.java
/** * Perform 'Compare with AIP' task/*from www .j a v a2 s . c o m*/ * @param dso DSpace Object to perform on * @return integer which represents Curator return status * @throws IOException */ @Override public int perform(DSpaceObject dso) throws AuthorizeException, IOException, SQLException { ReplicaManager repMan = ReplicaManager.instance(); String id = dso.getHandle(); status = Curator.CURATE_SUCCESS; result = "Checksums of local and remote agree"; PackingSpec spec = repMan.packingSpec(dso); String objId = repMan.storageId(id, spec.getFormat()); //First, make sure this object has an AIP in remote storage if (checkReplica(repMan, dso, spec)) { // generate an archive and calculate it's checksum Path packDir = repMan.stage(repMan.storeGroupName(), id); Path archive = Packager.toPackage(dso, spec, packDir); // RLR recheck String chkSum = Files.hash(archive.toFile(), Hashing.md5()).toString(); //String chkSum = HashCode.fromLong(Files.checksum(archive, "md5")).toString(); //String chkSum = Utils.checksum(archive, "MD5"); // compare with replica String repChkSum = repMan.objectAttribute(repMan.storeGroupName(), objId, "checksum"); if (!chkSum.equals(repChkSum)) { report("Local and remote checksums differ for: " + id); report("Local: " + chkSum + " replica: " + repChkSum); result = "Checksums of local and remote differ for: " + id; status = Curator.CURATE_FAIL; } else { report("Local and remote checksums agree for: " + id); } // if a container, also perform an extent (count) audit - i.e. // does replica store have replicas for each object in container? if (Curator.isContainer(dso) || dso.getType() == Constants.SITE) { auditExtent(repMan, dso, spec); } } setResult(result); return status; }
From source file:org.apache.beam.runners.dataflow.util.PackageUtil.java
/** * Compute and cache the attributes of a classpath element that we will need to stage it. * * @param source the file or directory to be staged. * @param stagingPath The base location for staged classpath elements. * @param overridePackageName If non-null, use the given value as the package name * instead of generating one automatically. * @return a {@link PackageAttributes} that containing metadata about the object to be staged. *//*from w ww . ja v a 2s . c om*/ static PackageAttributes createPackageAttributes(File source, String stagingPath, @Nullable String overridePackageName) { boolean directory = source.isDirectory(); // Compute size and hash in one pass over file or directory. Hasher hasher = Hashing.md5().newHasher(); OutputStream hashStream = Funnels.asOutputStream(hasher); try (CountingOutputStream countingOutputStream = new CountingOutputStream(hashStream)) { if (!directory) { // Files are staged as-is. Files.asByteSource(source).copyTo(countingOutputStream); } else { // Directories are recursively zipped. ZipFiles.zipDirectory(source, countingOutputStream); } countingOutputStream.flush(); long size = countingOutputStream.getCount(); String hash = Base64Variants.MODIFIED_FOR_URL.encode(hasher.hash().asBytes()); // Create the DataflowPackage with staging name and location. String uniqueName = getUniqueContentName(source, hash); String resourcePath = FileSystems.matchNewResource(stagingPath, true) .resolve(uniqueName, StandardResolveOptions.RESOLVE_FILE).toString(); DataflowPackage target = new DataflowPackage(); target.setName(overridePackageName != null ? overridePackageName : uniqueName); target.setLocation(resourcePath); return new PackageAttributes(size, hash, directory, target, source.getPath()); } catch (IOException e) { throw new RuntimeException("Package setup failure for " + source, e); } }
From source file:sockslib.server.manager.HashPasswordProtector.java
private Hasher chooseHasher(HashAlgorithm algorithm) { Hasher hasher = null;/*from w ww. j ava 2 s . c o m*/ switch (algorithm) { case MD5: hasher = Hashing.md5().newHasher(); break; case SHA1: hasher = Hashing.sha1().newHasher(); break; case SHA256: hasher = Hashing.sha256().newHasher(); break; case SHA512: hasher = Hashing.sha512().newHasher(); break; } return hasher; }
From source file:org.jclouds.azureblob.blobstore.strategy.AzureBlobBlockUploadStrategy.java
@Override public String execute(String container, Blob blob) { String blobName = blob.getMetadata().getName(); Payload payload = blob.getPayload(); Long length = payload.getContentMetadata().getContentLength(); checkNotNull(length,/*from w ww . j a v a2s . c o m*/ "please invoke payload.getContentMetadata().setContentLength(length) prior to azure block upload"); checkArgument(length <= (MAX_NUMBER_OF_BLOCKS * MAX_BLOCK_SIZE)); long offset = 0L; List<String> blockIds = Lists.newArrayList(); int blockCount = 0; int totalBlocks = (int) Math.ceil(((double) length) / MAX_BLOCK_SIZE); long bytesWritten = 0; while (offset < length) { blockCount++; long chunkSize = MAX_BLOCK_SIZE; if (length - offset < MAX_BLOCK_SIZE) { chunkSize = length % MAX_BLOCK_SIZE; } bytesWritten += chunkSize; Payload block = slicer.slice(payload, offset, chunkSize); offset += MultipartUploadStrategy.MAX_BLOCK_SIZE; String blockName = blobName + "-" + offset + "-" + new SecureRandom().nextInt(); byte blockIdBytes[] = Hashing.md5().hashBytes(blockName.getBytes()).asBytes(); String blockId = BaseEncoding.base64().encode(blockIdBytes); blockIds.add(blockId); client.putBlock(container, blobName, blockId, block); } checkState(bytesWritten == length, "Wrote %s bytes, but we wanted to write %s bytes", bytesWritten, length); return client.putBlockList(container, blobName, blockIds); }