Example usage for com.google.common.io Closer create

List of usage examples for com.google.common.io Closer create

Introduction

In this page you can find the example usage for com.google.common.io Closer create.

Prototype

public static Closer create() 

Source Link

Document

Creates a new Closer .

Usage

From source file:tachyon.worker.hierarchy.StorageDir.java

/**
 * Copy block file from current StorageDir to another StorageDir
 * /*from  www  . java2  s . co m*/
 * @param blockId Id of the block
 * @param dstDir destination StorageDir
 * @return true if success, false otherwise
 * @throws IOException
 */
boolean copyBlock(long blockId, StorageDir dstDir) throws IOException {
    long size = getBlockSize(blockId);
    if (size == -1) {
        LOG.error("Block file doesn't exist! blockId:" + blockId);
        return false;
    }
    boolean copySuccess = false;
    Closer closer = Closer.create();
    try {
        BlockHandler bhSrc = closer.register(getBlockHandler(blockId));
        BlockHandler bhDst = closer.register(dstDir.getBlockHandler(blockId));
        ByteBuffer srcBuf = bhSrc.read(0, (int) size);
        copySuccess = (bhDst.append(0, srcBuf) == size);
    } finally {
        closer.close();
    }
    if (copySuccess) {
        dstDir.addBlockId(blockId, size);
    }
    return copySuccess;
}

From source file:io.prestosql.orc.reader.StructStreamReader.java

@Override
public void close() {
    try (Closer closer = Closer.create()) {
        for (StreamReader structField : structFields.values()) {
            closer.register(() -> structField.close());
        }//from ww  w.j  a va  2  s.  co  m
    } catch (IOException e) {
        throw new UncheckedIOException(e);
    }
}

From source file:org.apache.gobblin.runtime.job_exec.JobLauncherExecutionDriver.java

protected JobLauncherExecutionDriver(JobSpec jobSpec, Logger log, DriverRunnable runnable) {
    super(runnable);
    _closer = Closer.create();
    _closer.register(runnable.getJobLauncher());
    _log = log;//w  ww . j av a2s  . c  o m
    _jobSpec = jobSpec;
    _jobExec = runnable.getJobExec();
    _callbackDispatcher = _closer.register(runnable.getCallbackDispatcher());
    _jobState = runnable.getJobState();
    _executionList = new ExecutionList();
    _runnable = runnable;
}

From source file:org.jclouds.vsphere.compute.config.VSphereComputeServiceAdapter.java

@Override
public NodeAndInitialCredentials<VirtualMachine> createNodeWithGroupEncodedIntoName(String tag, String name,
        Template template) {/*  w ww .  j  a  va2 s  .  c om*/
    try {
        Closer closer = Closer.create();
        VSphereServiceInstance instance = null;
        try {
            instance = this.serviceInstance.get();
            VSphereHost sphereHost = vSphereHost.get();
            closer.register(instance);
            closer.register(sphereHost);
            Folder rootFolder = instance.getInstance().getRootFolder();

            ComputerNameValidator.INSTANCE.validate(name);

            VirtualMachine master = getVMwareTemplate(template.getImage().getId(), rootFolder);
            ResourcePool resourcePool = checkNotNull(
                    tryFindResourcePool(rootFolder, sphereHost.getHost().getName()), "resourcePool");

            VirtualMachineCloneSpec cloneSpec = new MasterToVirtualMachineCloneSpec(resourcePool,
                    sphereHost.getDatastore(), VSphereApiMetadata.defaultProperties().getProperty(CLONING))
                            .apply(master);

            VSphereTemplateOptions vOptions = VSphereTemplateOptions.class.cast(template.getOptions());
            Set<String> networks = vOptions.getNetworks();

            VirtualMachineConfigSpec virtualMachineConfigSpec = new VirtualMachineConfigSpec();
            virtualMachineConfigSpec.setMemoryMB((long) template.getHardware().getRam());
            if (template.getHardware().getProcessors().size() > 0)
                virtualMachineConfigSpec
                        .setNumCPUs((int) template.getHardware().getProcessors().get(0).getCores());
            else
                virtualMachineConfigSpec.setNumCPUs(1);

            Set<NetworkConfig> networkConfigs = Sets.newHashSet();
            for (String network : networks) {
                NetworkConfig config = networkConfigurationForNetworkAndOptions.apply(network, vOptions);
                networkConfigs.add(config);
            }

            List<VirtualDeviceConfigSpec> updates = Lists.newArrayList();

            long currentDiskSize = 0;
            int numberOfHardDrives = 0;

            int diskKey = 0;

            for (VirtualDevice device : master.getConfig().getHardware().getDevice()) {
                if (device instanceof VirtualDisk) {
                    VirtualDisk vd = (VirtualDisk) device;
                    diskKey = vd.getKey();
                    currentDiskSize += vd.getCapacityInKB();
                    numberOfHardDrives++;
                }
            }

            for (VirtualDevice device : master.getConfig().getHardware().getDevice()) {
                if (device instanceof VirtualEthernetCard) {
                    VirtualDeviceConfigSpec nicSpec = new VirtualDeviceConfigSpec();
                    nicSpec.setOperation(VirtualDeviceConfigSpecOperation.remove);
                    nicSpec.setDevice(device);
                    updates.add(nicSpec);
                } else if (device instanceof VirtualCdrom) {
                    if (vOptions.isoFileName() != null) {
                        VirtualCdrom vCdrom = (VirtualCdrom) device;
                        VirtualDeviceConfigSpec cdSpec = new VirtualDeviceConfigSpec();
                        cdSpec.setOperation(VirtualDeviceConfigSpecOperation.edit);

                        VirtualCdromIsoBackingInfo iso = new VirtualCdromIsoBackingInfo();
                        Datastore datastore = vSphereHost.get().getDatastore();
                        VirtualDeviceConnectInfo cInfo = new VirtualDeviceConnectInfo();
                        cInfo.setStartConnected(true);
                        cInfo.setConnected(true);
                        iso.setDatastore(datastore.getMOR());
                        iso.setFileName("[" + datastore.getName() + "] " + vOptions.isoFileName());

                        vCdrom.setConnectable(cInfo);
                        vCdrom.setBacking(iso);
                        cdSpec.setDevice(vCdrom);
                        updates.add(cdSpec);
                    }
                } else if (device instanceof VirtualFloppy) {
                    if (vOptions.flpFileName() != null) {
                        VirtualFloppy vFloppy = (VirtualFloppy) device;
                        VirtualDeviceConfigSpec floppySpec = new VirtualDeviceConfigSpec();
                        floppySpec.setOperation(VirtualDeviceConfigSpecOperation.edit);

                        VirtualFloppyImageBackingInfo image = new VirtualFloppyImageBackingInfo();
                        Datastore datastore = vSphereHost.get().getDatastore();
                        VirtualDeviceConnectInfo cInfo = new VirtualDeviceConnectInfo();
                        cInfo.setStartConnected(true);
                        cInfo.setConnected(true);
                        image.setDatastore(datastore.getMOR());
                        image.setFileName("[" + datastore.getName() + "] " + vOptions.flpFileName());

                        vFloppy.setConnectable(cInfo);
                        vFloppy.setBacking(image);
                        floppySpec.setDevice(vFloppy);
                        updates.add(floppySpec);
                    }
                } else if (device instanceof VirtualLsiLogicController) {
                    //int unitNumber = master.getConfig().getHardware().getDevice().length;
                    int unitNumber = numberOfHardDrives;
                    List<? extends Volume> volumes = template.getHardware().getVolumes();
                    VirtualLsiLogicController lsiLogicController = (VirtualLsiLogicController) device;
                    String dsName = vSphereHost.get().getDatastore().getName();
                    for (Volume volume : volumes) {
                        long currentVolumeSize = 1024 * 1024 * volume.getSize().longValue();

                        VirtualDeviceConfigSpec diskSpec = new VirtualDeviceConfigSpec();

                        VirtualDisk disk = new VirtualDisk();
                        VirtualDiskFlatVer2BackingInfo diskFileBacking = new VirtualDiskFlatVer2BackingInfo();

                        int ckey = lsiLogicController.getKey();
                        unitNumber++;

                        String fileName = "[" + dsName + "] " + name + "/" + name + unitNumber + ".vmdk";

                        diskFileBacking.setFileName(fileName);
                        diskFileBacking.setDiskMode("persistent");

                        disk.setControllerKey(ckey);
                        disk.setUnitNumber(unitNumber);
                        disk.setBacking(diskFileBacking);
                        long size = currentVolumeSize;
                        disk.setCapacityInKB(size);
                        disk.setKey(-1);

                        diskSpec.setOperation(VirtualDeviceConfigSpecOperation.add);
                        diskSpec.setFileOperation(VirtualDeviceConfigSpecFileOperation.create);
                        diskSpec.setDevice(disk);
                        updates.add(diskSpec);
                    }

                }
            }
            updates.addAll(createNicSpec(networkConfigs));
            virtualMachineConfigSpec
                    .setDeviceChange(updates.toArray(new VirtualDeviceConfigSpec[updates.size()]));

            //                VirtualMachineBootOptions bootOptions = new VirtualMachineBootOptions();
            //                List<VirtualMachineBootOptionsBootableDevice> bootOrder = Lists.newArrayList();
            //
            //                VirtualMachineBootOptionsBootableDiskDevice diskBootDevice = new VirtualMachineBootOptionsBootableDiskDevice();
            //                diskBootDevice.setDeviceKey(diskKey);
            //                bootOrder.add(diskBootDevice);
            //                bootOrder.add(new VirtualMachineBootOptionsBootableCdromDevice());
            //                bootOptions.setBootOrder(bootOrder.toArray(new VirtualMachineBootOptionsBootableDevice[0]));
            //
            //                virtualMachineConfigSpec.setBootOptions(bootOptions);

            cloneSpec.setConfig(virtualMachineConfigSpec);

            vOptions.getPublicKey();

            VirtualMachine cloned = null;
            try {
                cloned = cloneMaster(master, tag, name, cloneSpec);
                Set<String> tagsFromOption = vOptions.getTags();
                if (tagsFromOption.size() > 0) {
                    StringBuilder tags = new StringBuilder();
                    for (String vmTag : vOptions.getTags()) {
                        tags.append(vmTag).append(",");
                    }
                    tags.deleteCharAt(tags.length() - 1);

                    cloned.getServerConnection().getServiceInstance().getCustomFieldsManager().setField(cloned,
                            customFields.get().get(VSphereConstants.JCLOUDS_TAGS).getKey(), tags.toString());
                    cloned.getServerConnection().getServiceInstance().getCustomFieldsManager().setField(cloned,
                            customFields.get().get(VSphereConstants.JCLOUDS_GROUP).getKey(), tag);
                    if (vOptions.postConfiguration())
                        postConfiguration(cloned, name, tag, networkConfigs);
                    else {
                        VSpherePredicate.WAIT_FOR_VMTOOLS(1000 * 60 * 60 * 2, TimeUnit.MILLISECONDS)
                                .apply(cloned);
                    }
                }
            } catch (Exception e) {
                logger.error("Can't clone vm " + master.getName(), e);
                propagate(e);
            }

            if (vOptions.waitOnPort() != null) {

            }

            NodeAndInitialCredentials<VirtualMachine> nodeAndInitialCredentials = new NodeAndInitialCredentials<VirtualMachine>(
                    cloned, cloned.getName(),
                    LoginCredentials.builder().user("root").password(vmInitPassword).build());
            return nodeAndInitialCredentials;
        } catch (Throwable e) { // must catch Throwable
            throw closer.rethrow(e);
        } finally {
            closer.close();
        }
    } catch (Throwable t) {
        Throwables.propagateIfPossible(t);
    }
    return null;
}

From source file:io.prestosql.orc.reader.ListStreamReader.java

@Override
public void close() {
    try (Closer closer = Closer.create()) {
        closer.register(() -> elementStreamReader.close());
    } catch (IOException e) {
        throw new UncheckedIOException(e);
    }//from ww  w .j  a  v a  2 s  . co  m
}

From source file:com.facebook.buck.android.exopackage.RealExopackageDevice.java

@Override
public void installFile(final Path targetDevicePath, final Path source) throws Exception {
    Preconditions.checkArgument(source.isAbsolute());
    Preconditions.checkArgument(targetDevicePath.isAbsolute());
    Closer closer = Closer.create();
    CollectingOutputReceiver receiver = new CollectingOutputReceiver() {

        private boolean startedPayload = false;
        private boolean wrotePayload = false;
        @Nullable/*from   w w w.j  a  v  a 2  s .c o  m*/
        private OutputStream outToDevice;

        @Override
        public void addOutput(byte[] data, int offset, int length) {
            super.addOutput(data, offset, length);
            try {
                if (!startedPayload && getOutput().length() >= AgentUtil.TEXT_SECRET_KEY_SIZE) {
                    LOG.verbose("Got key: %s", getOutput().split("[\\r\\n]", 1)[0]);
                    startedPayload = true;
                    Socket clientSocket = new Socket("localhost", agentPort);
                    closer.register(clientSocket);
                    LOG.verbose("Connected");
                    outToDevice = clientSocket.getOutputStream();
                    closer.register(outToDevice);
                    // Need to wait for client to acknowledge that we've connected.
                }
                if (outToDevice == null) {
                    throw new NullPointerException();
                }
                if (!wrotePayload && getOutput().contains("z1")) {
                    if (outToDevice == null) {
                        throw new NullPointerException("outToDevice was null when protocol says it cannot be");
                    }
                    LOG.verbose("Got z1");
                    wrotePayload = true;
                    outToDevice.write(getOutput().substring(0, AgentUtil.TEXT_SECRET_KEY_SIZE).getBytes());
                    LOG.verbose("Wrote key");
                    com.google.common.io.Files.asByteSource(source.toFile()).copyTo(outToDevice);
                    outToDevice.flush();
                    LOG.verbose("Wrote file");
                }
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    };

    String targetFileName = targetDevicePath.toString();
    String command = "umask 022 && " + agent.get().getAgentCommand() + "receive-file " + agentPort + " "
            + Files.size(source) + " " + targetFileName + " ; echo -n :$?";
    LOG.debug("Executing %s", command);

    // If we fail to execute the command, stash the exception.  My experience during development
    // has been that the exception from checkReceiverOutput is more actionable.
    Exception shellException = null;
    try {
        device.executeShellCommand(command, receiver);
    } catch (Exception e) {
        shellException = e;
    }

    // Close the client socket, if we opened it.
    closer.close();

    try {
        AdbHelper.checkReceiverOutput(command, receiver);
    } catch (Exception e) {
        if (shellException != null) {
            e.addSuppressed(shellException);
        }
        throw e;
    }

    if (shellException != null) {
        throw shellException;
    }

    // The standard Java libraries on Android always create new files un-readable by other users.
    // We use the shell user or root to create these files, so we need to explicitly set the mode
    // to allow the app to read them.  Ideally, the agent would do this automatically, but
    // there's no easy way to do this in Java.  We can drop this if we drop support for the
    // Java agent.
    AdbHelper.executeCommandWithErrorChecking(device, "chmod 644 " + targetFileName);
}

From source file:org.gbif.occurrence.download.file.OccurrenceFileWriterJob.java

/**
 * Executes the job.query and creates a data file that will contains the records from job.from to job.to positions.
 *//*from w ww .ja  va  2s  . c o m*/
@Override
public Result call() throws IOException {
    // Creates a closer
    Closer closer = Closer.create();

    // Calculates the amount of output records
    final int nrOfOutputRecords = fileJob.getTo() - fileJob.getFrom();
    Map<UUID, Long> datasetUsages = Maps.newHashMap();

    // Creates a search request instance using the search request that comes in the fileJob
    SolrQuery solrQuery = createSolrQuery(fileJob.getQuery());

    try {
        ICsvMapWriter intCsvWriter = closer.register(
                new CsvMapWriter(new FileWriterWithEncoding(fileJob.getInterpretedDataFile(), Charsets.UTF_8),
                        CsvPreference.TAB_PREFERENCE));
        ICsvMapWriter verbCsvWriter = closer.register(
                new CsvMapWriter(new FileWriterWithEncoding(fileJob.getVerbatimDataFile(), Charsets.UTF_8),
                        CsvPreference.TAB_PREFERENCE));
        ICsvBeanWriter multimediaCsvWriter = closer.register(
                new CsvBeanWriter(new FileWriterWithEncoding(fileJob.getMultimediaDataFile(), Charsets.UTF_8),
                        CsvPreference.TAB_PREFERENCE));
        int recordCount = 0;
        while (recordCount < nrOfOutputRecords) {
            solrQuery.setStart(fileJob.getFrom() + recordCount);
            // Limit can't be greater than the maximum number of records assigned to this job
            solrQuery
                    .setRows(recordCount + LIMIT > nrOfOutputRecords ? nrOfOutputRecords - recordCount : LIMIT);
            final QueryResponse response = solrServer.query(solrQuery);
            for (Iterator<SolrDocument> itResults = response.getResults().iterator(); itResults
                    .hasNext(); recordCount++) {
                final Integer occKey = (Integer) itResults.next()
                        .getFieldValue(OccurrenceSolrField.KEY.getFieldName());
                // Writes the occurrence record obtained from HBase as Map<String,Object>.
                org.apache.hadoop.hbase.client.Result result = occurrenceMapReader.get(occKey);
                Map<String, String> occurrenceRecordMap = OccurrenceMapReader.buildOccurrenceMap(result);
                Map<String, String> verbOccurrenceRecordMap = OccurrenceMapReader
                        .buildVerbatimOccurrenceMap(result);
                if (occurrenceRecordMap != null) {
                    incrementDatasetUsage(datasetUsages, occurrenceRecordMap);
                    intCsvWriter.write(occurrenceRecordMap, INT_COLUMNS);
                    verbCsvWriter.write(verbOccurrenceRecordMap, VERB_COLUMNS);
                    writeMediaObjects(multimediaCsvWriter, result, occKey);
                } else {
                    LOG.error(String.format("Occurrence id %s not found!", occKey));
                }
            }
        }
    } catch (Exception e) {
        Throwables.propagate(e);
    } finally {
        closer.close();
        // Unlock the assigned lock.
        lock.unlock();
        LOG.info("Lock released, job detail: {} ", fileJob.toString());
    }
    return new Result(fileJob, datasetUsages);
}

From source file:org.glowroot.agent.live.JvmTool.java

private static <T> T processAndClose(InputStream in, InputStreamProcessor<T> processor) throws IOException {
    Closer closer = Closer.create();
    try {/*from w w  w. j  av a  2  s .  c  o m*/
        closer.register(in);
        return processor.process(in);
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}

From source file:gobblin.metastore.FsStateStore.java

/**
 * See {@link StateStore#putAll(String, String, Collection)}.
 *
 * <p>//from www  .ja va2 s.c  o m
 *   This implementation does not support putting the state objects into an existing store as
 *   append is to be supported by the Hadoop SequenceFile (HADOOP-7139).
 * </p>
 */
@Override
public void putAll(String storeName, String tableName, Collection<T> states) throws IOException {
    String tmpTableName = this.useTmpFileForPut ? TMP_FILE_PREFIX + tableName : tableName;
    Path tmpTablePath = new Path(new Path(this.storeRootDir, storeName), tmpTableName);

    if (!this.fs.exists(tmpTablePath) && !create(storeName, tmpTableName)) {
        throw new IOException("Failed to create a state file for table " + tmpTableName);
    }

    Closer closer = Closer.create();
    try {
        @SuppressWarnings("deprecation")
        SequenceFile.Writer writer = closer.register(SequenceFile.createWriter(this.fs, this.conf, tmpTablePath,
                Text.class, this.stateClass, SequenceFile.CompressionType.BLOCK, new DefaultCodec()));
        for (T state : states) {
            writer.append(new Text(Strings.nullToEmpty(state.getId())), state);
        }
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }

    if (this.useTmpFileForPut) {
        Path tablePath = new Path(new Path(this.storeRootDir, storeName), tableName);
        HadoopUtils.renamePath(this.fs, tmpTablePath, tablePath);
    }
}

From source file:gobblin.runtime.Task.java

/**
 * Instantiate a new {@link Task}.//from   www.  j  a  v  a  2 s. c om
 *
 * @param context a {@link TaskContext} containing all necessary information to construct and run a {@link Task}
 * @param taskStateTracker a {@link TaskStateTracker} for tracking task state
 * @param taskExecutor a {@link TaskExecutor} for executing the {@link Task} and its {@link Fork}s
 * @param countDownLatch an optional {@link java.util.concurrent.CountDownLatch} used to signal the task completion
 */
public Task(TaskContext context, TaskStateTracker taskStateTracker, TaskExecutor taskExecutor,
        Optional<CountDownLatch> countDownLatch) {
    this.taskContext = context;
    this.taskState = context.getTaskState();
    this.jobId = this.taskState.getJobId();
    this.taskId = this.taskState.getTaskId();
    this.taskKey = this.taskState.getTaskKey();
    this.taskStateTracker = taskStateTracker;
    this.taskExecutor = taskExecutor;
    this.countDownLatch = countDownLatch;
    this.closer = Closer.create();
    this.closer.register(this.taskState.getTaskBrokerNullable());
    this.extractor = closer
            .register(new InstrumentedExtractorDecorator<>(this.taskState, this.taskContext.getExtractor()));

    this.converter = closer.register(new MultiConverter(this.taskContext.getConverters()));
    try {
        this.rowChecker = closer.register(this.taskContext.getRowLevelPolicyChecker());
    } catch (Exception e) {
        try {
            closer.close();
        } catch (Throwable t) {
            LOG.error("Failed to close all open resources", t);
        }
        throw new RuntimeException("Failed to instantiate row checker.", e);
    }

    this.taskMode = getExecutionModel(this.taskState);
    this.recordsPulled = new AtomicLong(0);
    this.lastRecordPulledTimestampMillis = 0;
    this.shutdownRequested = new AtomicBoolean(false);
    this.shutdownLatch = new CountDownLatch(1);

    // Setup Streaming constructs

    this.watermarkingStrategy = "FineGrain"; // TODO: Configure

    if (isStreamingTask()) {
        Extractor underlyingExtractor = this.taskContext.getRawSourceExtractor();
        if (!(underlyingExtractor instanceof StreamingExtractor)) {
            LOG.error(
                    "Extractor {}  is not an instance of StreamingExtractor but the task is configured to run in continuous mode",
                    underlyingExtractor.getClass().getName());
            throw new TaskInstantiationException("Extraction " + underlyingExtractor.getClass().getName()
                    + " is not an instance of StreamingExtractor but the task is configured to run in continuous mode");
        }

        this.watermarkStorage = Optional.of(taskContext.getWatermarkStorage());
        Config config;
        try {
            config = ConfigUtils.propertiesToConfig(taskState.getProperties());
        } catch (Exception e) {
            LOG.warn("Failed to deserialize taskState into Config.. continuing with an empty config", e);
            config = ConfigFactory.empty();
        }

        long commitIntervalMillis = ConfigUtils.getLong(config,
                TaskConfigurationKeys.STREAMING_WATERMARK_COMMIT_INTERVAL_MILLIS,
                TaskConfigurationKeys.DEFAULT_STREAMING_WATERMARK_COMMIT_INTERVAL_MILLIS);
        if (watermarkingStrategy.equals("FineGrain")) { // TODO: Configure
            this.watermarkTracker = Optional.of(this.closer.register(new FineGrainedWatermarkTracker(config)));
            this.watermarkManager = Optional.of((WatermarkManager) this.closer
                    .register(new TrackerBasedWatermarkManager(this.watermarkStorage.get(),
                            this.watermarkTracker.get(), commitIntervalMillis, Optional.of(this.LOG))));

        } else {
            // writer-based watermarking
            this.watermarkManager = Optional
                    .of((WatermarkManager) this.closer.register(new MultiWriterWatermarkManager(
                            this.watermarkStorage.get(), commitIntervalMillis, Optional.of(this.LOG))));
            this.watermarkTracker = Optional.absent();
        }
    } else {
        this.watermarkManager = Optional.absent();
        this.watermarkTracker = Optional.absent();
        this.watermarkStorage = Optional.absent();
    }
}