List of usage examples for com.google.common.hash Hashing sha1
public static HashFunction sha1()
From source file:org.apache.beam.sdk.io.gcp.testing.BigqueryMatcher.java
private String generateHash(@Nonnull List<TableRow> rows) { List<HashCode> rowHashes = Lists.newArrayList(); for (TableRow row : rows) { List<String> cellsInOneRow = Lists.newArrayList(); for (TableCell cell : row.getF()) { cellsInOneRow.add(Objects.toString(cell.getV())); Collections.sort(cellsInOneRow); }/*from w w w . ja v a 2 s. c o m*/ rowHashes.add(Hashing.sha1().hashString(cellsInOneRow.toString(), StandardCharsets.UTF_8)); } return Hashing.combineUnordered(rowHashes).toString(); }
From source file:io.prestosql.operator.scalar.VarbinaryFunctions.java
@Description("compute sha1 hash") @ScalarFunction//from ww w.j ava 2 s. c om @SqlType(StandardTypes.VARBINARY) public static Slice sha1(@SqlType(StandardTypes.VARBINARY) Slice slice) { return Slices.wrappedBuffer(Hashing.sha1().hashBytes(slice.getBytes()).asBytes()); }
From source file:com.android.utils.FileUtils.java
@NonNull public static String sha1(@NonNull File file) throws IOException { return Hashing.sha1().hashBytes(Files.toByteArray(file)).toString(); }
From source file:org.apache.hadoop.hdfs.server.blockmanagement.HashBuckets.java
public static byte[] hash(long blockId, long generationStamp, long numBytes, int replicaState) { return Hashing.sha1().newHasher().putLong(blockId).putLong(generationStamp).putLong(numBytes) .putInt(replicaState).hash().asBytes(); }
From source file:dodola.anole.lib.FileUtils.java
/** * Chooses a directory name, based on a JAR file name, considering exploded-aar and classes.jar. *//*from w w w . ja v a 2 s .co m*/ public static String getDirectoryNameForJar(File inputFile) { // add a hash of the original file path. HashFunction hashFunction = Hashing.sha1(); HashCode hashCode = hashFunction.hashString(inputFile.getAbsolutePath(), Charsets.UTF_16LE); String name = Files.getNameWithoutExtension(inputFile.getName()); if (name.equals("classes") && inputFile.getAbsolutePath().contains("exploded-aar")) { // This naming scheme is coming from DependencyManager#computeArtifactPath. File versionDir = inputFile.getParentFile().getParentFile(); File artifactDir = versionDir.getParentFile(); File groupDir = artifactDir.getParentFile(); name = Joiner.on('-').join(groupDir.getName(), artifactDir.getName(), versionDir.getName()); } name = name + "_" + hashCode.toString(); return name; }
From source file:com.facebook.buck.core.build.engine.buildinfo.DefaultOnDiskBuildInfo.java
@Override public void writeOutputHashes(FileHashCache fileHashCache) throws IOException { ImmutableSortedSet<Path> pathsForArtifact = getPathsForArtifact(); // Grab and record the output hashes in the build metadata so that cache hits avoid re-hashing // file contents. Since we use output hashes for input-based rule keys and for detecting // non-determinism, we would spend a lot of time re-hashing output paths -- potentially in // serialized in a single step. So, do the hashing here to distribute the workload across // several threads and cache the results. ImmutableSortedMap.Builder<String, String> outputHashes = ImmutableSortedMap.naturalOrder(); Hasher hasher = Hashing.sha1().newHasher(); for (Path path : pathsForArtifact) { String pathString = path.toString(); HashCode fileHash = fileHashCache.get(projectFilesystem, path); hasher.putBytes(pathString.getBytes(Charsets.UTF_8)); hasher.putBytes(fileHash.asBytes()); outputHashes.put(pathString, fileHash.toString()); }// ww w .j a v a 2 s. c o m projectFilesystem.writeContentsToPath(ObjectMappers.WRITER.writeValueAsString(outputHashes.build()), metadataDirectory.resolve(BuildInfo.MetadataKey.RECORDED_PATH_HASHES)); projectFilesystem.writeContentsToPath(hasher.hash().toString(), metadataDirectory.resolve(BuildInfo.MetadataKey.OUTPUT_HASH)); }
From source file:org.macgyver.mercator.ucs.UCSScanner.java
String computeMercatorIdFromDn(String dn) {
return Hashing.sha1().hashString(qualifyDn(dn), Charsets.UTF_8).toString();
}
From source file:com.facebook.buck.util.cache.impl.DefaultFileHashCache.java
private HashCodeAndFileType getDirHashCode(Path path) throws IOException { Hasher hasher = Hashing.sha1().newHasher(); PathHashing.hashPath(hasher, this, projectFilesystem, path); return HashCodeAndFileType.ofDirectory(hasher.hash()); }
From source file:com.android.utils.FileUtils.java
/** * Chooses a directory name, based on a JAR file name, considering exploded-aar and classes.jar. *//*from w w w . j a v a 2s .c o m*/ @NonNull public static String getDirectoryNameForJar(@NonNull File inputFile) { // add a hash of the original file path. HashFunction hashFunction = Hashing.sha1(); HashCode hashCode = hashFunction.hashString(inputFile.getAbsolutePath(), Charsets.UTF_16LE); String name = Files.getNameWithoutExtension(inputFile.getName()); if (name.equals("classes") && inputFile.getAbsolutePath().contains("exploded-aar")) { // This naming scheme is coming from DependencyManager#computeArtifactPath. File versionDir = inputFile.getParentFile().getParentFile(); File artifactDir = versionDir.getParentFile(); File groupDir = artifactDir.getParentFile(); name = Joiner.on('-').join(groupDir.getName(), artifactDir.getName(), versionDir.getName()); } name = name + "_" + hashCode.toString(); return name; }
From source file:org.apache.druid.indexing.common.task.MergeTaskBase.java
private static String computeProcessingID(final String dataSource, final List<DataSegment> segments) { final String segmentIDs = Joiner.on("_").join( Iterables.transform(Ordering.natural().sortedCopy(segments), new Function<DataSegment, String>() { @Override//from w ww .j a v a 2 s . com public String apply(DataSegment x) { return StringUtils.format("%s_%s_%s_%s", x.getInterval().getStart(), x.getInterval().getEnd(), x.getVersion(), x.getShardSpec().getPartitionNum()); } })); return StringUtils.format("%s_%s", dataSource, Hashing.sha1().hashString(segmentIDs, StandardCharsets.UTF_8).toString()); }