Example usage for com.google.common.io ByteSource wrap

List of usage examples for com.google.common.io ByteSource wrap

Introduction

In this page you can find the example usage for com.google.common.io ByteSource wrap.

Prototype

public static ByteSource wrap(byte[] b) 

Source Link

Document

Returns a view of the given byte array as a ByteSource .

Usage

From source file:com.eightkdata.mongowp.bson.utils.NonIoByteSource.java

private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
    if (in.readBoolean()) {
        delegate = (ByteSource) in.readObject();
    } else {//  w ww .  j a v a  2 s  .c  om
        int size = in.readInt();
        byte[] bytes = new byte[size];
        int read = 0;
        int off = 0;
        int remaining = size;
        while (read != -1 && remaining > 0) {
            read = in.read(bytes, off, remaining);
            off += read;
            remaining -= read;
            assert off + remaining == size;
        }
        if (read == -1) {
            throw new IOException("The end of the stream was reach before it was expected");
        }
        if (remaining < 0) {
            throw new IOException(
                    "A byte array of lenght " + size + " was expected, but only " + off + " bytes were found");
        }
        delegate = ByteSource.wrap(bytes);
    }
}

From source file:org.gaul.s3proxy.NullBlobStore.java

@Override
public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) {
    long length;/*from  ww  w .  j  a  v  a2s  .c o  m*/
    try (InputStream is = payload.openStream()) {
        length = ByteStreams.copy(is, ByteStreams.nullOutputStream());
    } catch (IOException ioe) {
        throw new RuntimeException(ioe);
    }

    byte[] array = Longs.toByteArray(length);
    ByteSourcePayload newPayload = new ByteSourcePayload(ByteSource.wrap(array));
    newPayload.setContentMetadata(payload.getContentMetadata());
    newPayload.getContentMetadata().setContentLength((long) array.length);
    newPayload.getContentMetadata().setContentMD5((HashCode) null);

    MultipartPart part = super.uploadMultipartPart(mpu, partNumber, newPayload);
    return MultipartPart.create(part.partNumber(), length, part.partETag(), part.lastModified());
}

From source file:com.facebook.buck.jvm.java.abi.ClassMirror.java

public ByteSource getStubClassBytes() {
    ClassWriter writer = new ClassWriter(0);
    writer.visit(version, access, name, signature, superName, interfaces);

    if (outerClass != null) {
        writer.visitOuterClass(outerClass.owner, outerClass.name, outerClass.desc);
    }/* www .j  av  a  2 s.c  om*/

    for (InnerClass inner : innerClasses) {
        writer.visitInnerClass(inner.name, inner.outerName, inner.innerName, inner.access);
    }

    for (AnnotationMirror annotation : annotations) {
        annotation.appendTo(writer);
    }

    for (FieldMirror field : fields) {
        field.accept(writer);
    }

    for (MethodMirror method : methods) {
        method.appendTo(writer);
    }
    writer.visitEnd();
    return ByteSource.wrap(writer.toByteArray());
}

From source file:org.apache.druid.storage.hdfs.HdfsDataSegmentPusher.java

@Override
public DataSegment push(final File inDir, final DataSegment segment, final boolean useUniquePath)
        throws IOException {
    // For HDFS, useUniquePath does not affect the directory tree but instead affects the filename, which is of the form
    // '{partitionNum}_index.zip' without unique paths and '{partitionNum}_{UUID}_index.zip' with unique paths.
    final String storageDir = this.getStorageDir(segment, false);

    log.info("Copying segment[%s] to HDFS at location[%s/%s]", segment.getIdentifier(),
            fullyQualifiedStorageDirectory.get(), storageDir);

    Path tmpIndexFile = new Path(StringUtils.format("%s/%s/%s/%s_index.zip",
            fullyQualifiedStorageDirectory.get(), segment.getDataSource(), UUIDUtils.generateUuid(),
            segment.getShardSpec().getPartitionNum()));
    FileSystem fs = tmpIndexFile.getFileSystem(hadoopConfig);

    fs.mkdirs(tmpIndexFile.getParent());
    log.info("Compressing files from[%s] to [%s]", inDir, tmpIndexFile);

    final long size;
    final DataSegment dataSegment;
    try {/*w w  w .j  av  a2 s . co  m*/
        try (FSDataOutputStream out = fs.create(tmpIndexFile)) {
            size = CompressionUtils.zip(inDir, out);
        }

        final String uniquePrefix = useUniquePath ? DataSegmentPusher.generateUniquePath() + "_" : "";
        final Path outIndexFile = new Path(
                StringUtils.format("%s/%s/%d_%sindex.zip", fullyQualifiedStorageDirectory.get(), storageDir,
                        segment.getShardSpec().getPartitionNum(), uniquePrefix));
        final Path outDescriptorFile = new Path(
                StringUtils.format("%s/%s/%d_%sdescriptor.json", fullyQualifiedStorageDirectory.get(),
                        storageDir, segment.getShardSpec().getPartitionNum(), uniquePrefix));

        dataSegment = segment.withLoadSpec(makeLoadSpec(outIndexFile.toUri())).withSize(size)
                .withBinaryVersion(SegmentUtils.getVersionFromDir(inDir));

        final Path tmpDescriptorFile = new Path(tmpIndexFile.getParent(),
                StringUtils.format("%s_descriptor.json", dataSegment.getShardSpec().getPartitionNum()));

        log.info("Creating descriptor file at[%s]", tmpDescriptorFile);
        ByteSource.wrap(jsonMapper.writeValueAsBytes(dataSegment))
                .copyTo(new HdfsOutputStreamSupplier(fs, tmpDescriptorFile));

        // Create parent if it does not exist, recreation is not an error
        fs.mkdirs(outIndexFile.getParent());
        copyFilesWithChecks(fs, tmpDescriptorFile, outDescriptorFile);
        copyFilesWithChecks(fs, tmpIndexFile, outIndexFile);
    } finally {
        try {
            if (fs.exists(tmpIndexFile.getParent()) && !fs.delete(tmpIndexFile.getParent(), true)) {
                log.error("Failed to delete temp directory[%s]", tmpIndexFile.getParent());
            }
        } catch (IOException ex) {
            log.error(ex, "Failed to delete temp directory[%s]", tmpIndexFile.getParent());
        }
    }

    return dataSegment;
}

From source file:com.facebook.buck.apple.BuiltinApplePackage.java

private void appendAdditionalAppleWatchSteps(ImmutableList.Builder<Step> commands) {
    // For .ipas with WatchOS2 support, Apple apparently requires the following for App Store
    // submissions:
    // 1. Have a empty "Symbols" directory on the top level.
    // 2. Copy the unmodified WatchKit stub binary for WatchOS2 apps to WatchKitSupport2/WK
    // We can't use the copy of the binary in the bundle because that has already been re-signed
    // with our own identity.
    ///*from www  . j ava  2  s. c  o m*/
    // For WatchOS1 support: same as above, except:
    // 1. No "Symbols" directory needed.
    // 2. WatchKitSupport instead of WatchKitSupport2.
    for (BuildRule rule : bundle.getDeps()) {
        if (rule instanceof AppleBundle) {
            AppleBundle appleBundle = (AppleBundle) rule;
            if (appleBundle.getBinary().isPresent()) {
                BuildRule binary = appleBundle.getBinary().get();
                if (binary instanceof WriteFile && appleBundle.getPlatformName().startsWith("watch")) {
                    commands.add(new MkdirStep(getProjectFilesystem(), temp.resolve("Symbols")));
                    Path watchKitSupportDir = temp.resolve("WatchKitSupport2");
                    commands.add(new MkdirStep(getProjectFilesystem(), watchKitSupportDir));
                    commands.add(new WriteFileStep(getProjectFilesystem(),
                            ByteSource.wrap(((WriteFile) binary).getFileContents()),
                            watchKitSupportDir.resolve("WK"), true /* executable */
                    ));
                } else {
                    Optional<WriteFile> legacyWatchStub = getLegacyWatchStubFromDeps(appleBundle);
                    if (legacyWatchStub.isPresent()) {
                        Path watchKitSupportDir = temp.resolve("WatchKitSupport");
                        commands.add(new MkdirStep(getProjectFilesystem(), watchKitSupportDir));
                        commands.add(new WriteFileStep(getProjectFilesystem(),
                                ByteSource.wrap(legacyWatchStub.get().getFileContents()),
                                watchKitSupportDir.resolve("WK"), true /* executable */
                        ));
                    }
                }
            }
        }
    }
}

From source file:com.facebook.buck.java.abi.ClassMirror.java

public void writeTo(JarOutputStream jar) throws IOException {
    JarEntry entry = new JarEntry(fileName);
    entry.setTime(0);//  w  ww .j a  v a  2  s  . com

    jar.putNextEntry(entry);
    ClassWriter writer = new ClassWriter(0);
    writer.visit(version, access, name, signature, superName, interfaces);

    if (outerClass != null) {
        writer.visitOuterClass(outerClass.owner, outerClass.name, outerClass.desc);
    }

    for (InnerClass inner : innerClasses) {
        writer.visitInnerClass(inner.name, inner.outerName, inner.innerName, inner.access);
    }

    for (AnnotationMirror annotation : annotations) {
        annotation.appendTo(writer);
    }

    for (FieldMirror field : fields) {
        field.accept(writer);
    }

    for (MethodMirror method : methods) {
        method.appendTo(writer);
    }
    writer.visitEnd();
    ByteSource.wrap(writer.toByteArray()).copyTo(jar);
    jar.closeEntry();
}

From source file:org.jclouds.chef.filters.SignedHeaderAuth.java

@VisibleForTesting
String hashPath(String path) {
    try {//from  w  w  w .j a va  2  s.  c  o m
        return base64().encode(ByteSource.wrap(canonicalPath(path).getBytes(UTF_8)).hash(sha1()).asBytes());
    } catch (Exception e) {
        Throwables.propagateIfPossible(e);
        throw new HttpException("error creating sigature for path: " + path, e);
    }
}

From source file:io.blobkeeper.file.service.FileStorageImpl.java

@Override
public void addFile(@NotNull ReplicationFile replicationFile) {
    log.info("Replicate file {}", replicationFile);

    checkArgument(running, "Storage is not running!");

    DiskIndexElt indexElt = replicationFile.getIndex();
    File file = diskService.getFile(indexElt.getPartition());

    checkNotNull(file, "Blob file is required!");

    InputStream is;//from  w  ww. ja  v a  2 s  .  c  o  m
    try {
        is = ByteSource.wrap(replicationFile.getData()).openStream();
    } catch (IOException e) {
        throw new IllegalArgumentException("Can't wrap the buffer", e);
    }

    ReadableByteChannel dataChannel = newChannel(is);

    try {
        long transferred = file.getFileChannel().transferFrom(dataChannel, indexElt.getOffset(),
                indexElt.getLength());

        if (transferred < indexElt.getLength()) {
            throw new IllegalStateException("Data writing error, transferred " + transferred);
        }
    } catch (IOException e) {
        log.error("Can't add file to the storage", e);

        diskService.updateErrors(indexElt.getPartition().getDisk());

        throw new IllegalArgumentException("Can't add file to the storage");
    } finally {
        if (null != dataChannel) {
            try {
                dataChannel.close();
            } catch (IOException e) {
                /*_*/}
        }
    }
}

From source file:org.jclouds.chef.config.BaseChefHttpApiModule.java

@Provides
@Singleton/*from  w  ww  .  j  ava2  s .c  om*/
@Validator
public Optional<PrivateKey> provideValidatorCredential(Crypto crypto, Injector injector)
        throws InvalidKeySpecException, IOException {
    // Named properties can not be injected as optional here, so let's use the
    // injector to bypass it
    Key<String> key = Key.get(String.class, Names.named(CHEF_VALIDATOR_CREDENTIAL));
    try {
        String validatorCredential = injector.getInstance(key);
        PrivateKey validatorKey = crypto.rsaKeyFactory().generatePrivate(
                Pems.privateKeySpec(ByteSource.wrap(validatorCredential.getBytes(Charsets.UTF_8))));
        return Optional.<PrivateKey>of(validatorKey);
    } catch (ConfigurationException ex) {
        return Optional.<PrivateKey>absent();
    }
}

From source file:org.jclouds.chef.config.ChefHttpApiModule.java

@Provides
@Singleton//from   w w  w .ja  v a2  s.c  om
@Validator
public final Optional<PrivateKey> provideValidatorCredential(Crypto crypto, Injector injector)
        throws InvalidKeySpecException, IOException {
    // Named properties can not be injected as optional here, so let's use the
    // injector to bypass it
    Key<String> key = Key.get(String.class, Names.named(CHEF_VALIDATOR_CREDENTIAL));
    try {
        String validatorCredential = injector.getInstance(key);
        PrivateKey validatorKey = crypto.rsaKeyFactory().generatePrivate(
                Pems.privateKeySpec(ByteSource.wrap(validatorCredential.getBytes(Charsets.UTF_8))));
        return Optional.<PrivateKey>of(validatorKey);
    } catch (ConfigurationException ex) {
        return Optional.<PrivateKey>absent();
    }
}