Example usage for com.google.common.hash Funnels asOutputStream

List of usage examples for com.google.common.hash Funnels asOutputStream

Introduction

In this page you can find the example usage for com.google.common.hash Funnels asOutputStream.

Prototype

public static OutputStream asOutputStream(PrimitiveSink sink) 

Source Link

Document

Wraps a PrimitiveSink as an OutputStream , so it is easy to Funnel#funnel funnel an object to a PrimitiveSink if there is already a way to write the contents of the object to an OutputStream .

Usage

From source file:epoxide.lpa.impl.StringUtil.java

public static String hash(HashFunction hf, Kryo kryo, Object obj) {
    Hasher ho = hf.newHasher();/*from www .j a  va2s  .c o  m*/
    Output out = new Output(Funnels.asOutputStream(ho));
    kryo.writeClassAndObject(out, obj);
    out.close();
    return hex(ho.hash().asBytes());
}

From source file:org.gradle.api.internal.hash.DefaultHasher.java

public HashCode hash(File file) {
    try {/* w  w  w. ja v a  2 s .c o m*/
        com.google.common.hash.Hasher hasher = Hashing.md5().newHasher();
        hasher.putBytes(SIGNATURE);
        Files.copy(file, Funnels.asOutputStream(hasher));
        return hasher.hash();
    } catch (IOException e) {
        throw new UncheckedIOException(String.format("Failed to create MD5 hash for file '%s'.", file), e);
    }
}

From source file:org.gradle.api.internal.hash.DefaultFileHasher.java

@Override
public HashCode hash(File file) {
    try {/*from  w  w w.  jav a 2s .c om*/
        Hasher hasher = createFileHasher();
        Files.copy(file, Funnels.asOutputStream(hasher));
        return hasher.hash();
    } catch (IOException e) {
        throw new UncheckedIOException(String.format("Failed to create MD5 hash for file '%s'.", file), e);
    }
}

From source file:org.sonatype.nexus.common.hash.Hashes.java

/**
 * Computes the hash of the given stream using the given function.
 *///ww  w .  ja v a  2  s . c  o  m
public static HashCode hash(final HashFunction function, final InputStream input) throws IOException {
    Hasher hasher = function.newHasher();
    OutputStream output = Funnels.asOutputStream(hasher);
    ByteStreams.copy(input, output);
    return hasher.hash();
}

From source file:org.apache.beam.runners.core.construction.PipelineResources.java

private static String calculateDirectoryContentHash(File directoryToStage) {
    Hasher hasher = Hashing.sha256().newHasher();
    try (OutputStream hashStream = Funnels.asOutputStream(hasher)) {
        ZipFiles.zipDirectory(directoryToStage, hashStream);
        return hasher.hash().toString();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }/*www  . j a  v a 2  s .  c o m*/
}

From source file:org.apache.beam.sdk.util.PackageUtil.java

/**
 * Compute and cache the attributes of a classpath element that we will need to stage it.
 *
 * @param classpathElement the file or directory to be staged.
 * @param stagingPath The base location for staged classpath elements.
 * @param overridePackageName If non-null, use the given value as the package name
 *                            instead of generating one automatically.
 * @return a {@link PackageAttributes} that containing metadata about the object to be staged.
 *///from   ww  w . j av  a2  s .  c  om
static PackageAttributes createPackageAttributes(File classpathElement, String stagingPath,
        String overridePackageName) {
    try {
        boolean directory = classpathElement.isDirectory();

        // Compute size and hash in one pass over file or directory.
        Hasher hasher = Hashing.md5().newHasher();
        OutputStream hashStream = Funnels.asOutputStream(hasher);
        CountingOutputStream countingOutputStream = new CountingOutputStream(hashStream);

        if (!directory) {
            // Files are staged as-is.
            Files.asByteSource(classpathElement).copyTo(countingOutputStream);
        } else {
            // Directories are recursively zipped.
            ZipFiles.zipDirectory(classpathElement, countingOutputStream);
        }

        long size = countingOutputStream.getCount();
        String hash = Base64Variants.MODIFIED_FOR_URL.encode(hasher.hash().asBytes());

        // Create the DataflowPackage with staging name and location.
        String uniqueName = getUniqueContentName(classpathElement, hash);
        String resourcePath = IOChannelUtils.resolve(stagingPath, uniqueName);
        DataflowPackage target = new DataflowPackage();
        target.setName(overridePackageName != null ? overridePackageName : uniqueName);
        target.setLocation(resourcePath);

        return new PackageAttributes(size, hash, directory, target);
    } catch (IOException e) {
        throw new RuntimeException("Package setup failure for " + classpathElement, e);
    }
}

From source file:org.apache.beam.runners.dataflow.util.PackageUtil.java

/**
 * Compute and cache the attributes of a classpath element that we will need to stage it.
 *
 * @param source the file or directory to be staged.
 * @param stagingPath The base location for staged classpath elements.
 * @param overridePackageName If non-null, use the given value as the package name
 *                            instead of generating one automatically.
 * @return a {@link PackageAttributes} that containing metadata about the object to be staged.
 *///from w w  w.ja v  a 2s .  c  om
static PackageAttributes createPackageAttributes(File source, String stagingPath,
        @Nullable String overridePackageName) {
    boolean directory = source.isDirectory();

    // Compute size and hash in one pass over file or directory.
    Hasher hasher = Hashing.md5().newHasher();
    OutputStream hashStream = Funnels.asOutputStream(hasher);
    try (CountingOutputStream countingOutputStream = new CountingOutputStream(hashStream)) {
        if (!directory) {
            // Files are staged as-is.
            Files.asByteSource(source).copyTo(countingOutputStream);
        } else {
            // Directories are recursively zipped.
            ZipFiles.zipDirectory(source, countingOutputStream);
        }
        countingOutputStream.flush();

        long size = countingOutputStream.getCount();
        String hash = Base64Variants.MODIFIED_FOR_URL.encode(hasher.hash().asBytes());

        // Create the DataflowPackage with staging name and location.
        String uniqueName = getUniqueContentName(source, hash);
        String resourcePath = FileSystems.matchNewResource(stagingPath, true)
                .resolve(uniqueName, StandardResolveOptions.RESOLVE_FILE).toString();
        DataflowPackage target = new DataflowPackage();
        target.setName(overridePackageName != null ? overridePackageName : uniqueName);
        target.setLocation(resourcePath);

        return new PackageAttributes(size, hash, directory, target, source.getPath());
    } catch (IOException e) {
        throw new RuntimeException("Package setup failure for " + source, e);
    }
}

From source file:io.bazel.rules.closure.webfiles.WebfilesWriter.java

/**
 * Adds {@code webfile} {@code data} to zip archive and returns proto index entry.
 *
 * <p>The returned value can be written to the manifest associated with a rule so that parent
 * rules can obtain the data written here.
 *
 * @param webfile original information about webfile
 * @return modified version of {@code webfile} that's suitable for writing to the final manifest
 *//*w ww .  j av a 2 s. c  o m*/
public WebfileInfo writeWebfile(WebfileInfo webfile, @WillNotClose InputStream input) throws IOException {
    checkNotNull(input, "input");
    String name = WebfilesUtils.getZipEntryName(webfile);
    createEntriesForParentDirectories(name);
    ZipEntry entry = new ZipEntry(name);
    entry.setComment(webfile.getRunpath());
    // Build outputs need to be deterministic. Bazel also doesn't care about modified times because
    // it uses the file digest to determine if a file is invalidated. So even if we did copy the
    // time information from the original file, it still might not be a good idea.
    entry.setCreationTime(EPOCH);
    entry.setLastModifiedTime(EPOCH);
    entry.setLastAccessTime(EPOCH);
    if (isAlreadyCompressed(webfile.getWebpath())) {
        // When opting out of compression, ZipOutputStream expects us to do ALL THIS
        entry.setMethod(ZipEntry.STORED);
        if (input instanceof ByteArrayInputStream) {
            entry.setSize(input.available());
            Hasher hasher = Hashing.crc32().newHasher();
            input.mark(-1);
            ByteStreams.copy(input, Funnels.asOutputStream(hasher));
            input.reset();
            entry.setCrc(hasher.hash().padToLong());
        } else {
            byte[] data = ByteStreams.toByteArray(input);
            entry.setSize(data.length);
            entry.setCrc(Hashing.crc32().hashBytes(data).padToLong());
            input = new ByteArrayInputStream(data);
        }
    } else {
        entry.setMethod(ZipEntry.DEFLATED);
    }
    HasherInputStream source = new HasherInputStream(input, Hashing.sha256().newHasher());
    long offset = channel.position();
    zip.putNextEntry(entry);
    ByteStreams.copy(source, zip);
    zip.closeEntry();
    buffer.flush();
    WebfileInfo result = webfile.toBuilder().clearPath() // Now that it's in the zip, we don't need the ctx.action execroot path.
            .setInZip(true).setOffset(offset).setDigest(ByteString.copyFrom(source.hasher.hash().asBytes()))
            .build();
    webfiles.add(result);
    return result;
}

From source file:nextflow.util.CacheHelper.java

/**
 * Hashes the file by reading file content
 *
 * @param hasher The current {@code Hasher} object
 * @param path file The {@code Path} object to hash
 * @return The updated {@code Hasher} object
 */// ww  w  . j  a va2s .c om

static private Hasher hashFileContent(Hasher hasher, Path path) {

    OutputStream output = Funnels.asOutputStream(hasher);
    try {
        Files.copy(path, output);
    } catch (IOException e) {
        throw new IllegalStateException("Unable to hash content: " + path, e);
    } finally {
        FilesEx.closeQuietly(output);
    }

    return hasher;
}