Example usage for com.google.common.io Closer register

List of usage examples for com.google.common.io Closer register

Introduction

In this page you can find the example usage for com.google.common.io Closer register.

Prototype


public <C extends Closeable> C register(@Nullable C closeable) 

Source Link

Document

Registers the given closeable to be closed when this Closer is #close closed .

Usage

From source file:org.jclouds.vsphere.compute.config.VSphereComputeServiceAdapter.java

@Override
public Iterable<VirtualMachine> listNodes() {
    Closer closer = Closer.create();
    VSphereServiceInstance instance = serviceInstance.get();
    closer.register(instance);

    try {/*from  w w  w.  j  a  v  a 2 s  .  com*/
        try {
            return listNodes(instance);
        } catch (Throwable e) {
            logger.error("Can't find vm", e);
            throw closer.rethrow(e);
        } finally {
            closer.close();
        }
    } catch (Throwable t) {
        return ImmutableSet.of();
    }
}

From source file:org.jclouds.vsphere.compute.config.VSphereComputeServiceAdapter.java

@Override
public Image getImage(String imageName) {
    Closer closer = Closer.create();
    VSphereServiceInstance instance = serviceInstance.get();
    closer.register(instance);
    try {// w  w w  .  j a va 2 s  .  c om
        try {
            return virtualMachineToImage
                    .apply(getVMwareTemplate(imageName, instance.getInstance().getRootFolder()));
        } catch (Throwable t) {
            throw closer.rethrow(t);
        } finally {
            closer.close();
        }
    } catch (IOException e) {
        Throwables.propagateIfPossible(e);
    }
    return null;
}

From source file:org.gbif.ipt.task.GenerateDCAT.java

/**
 * This method loads the DCAT settings from dcatsettings.properties.
 *//* w ww.  jav a  2 s . c  o  m*/
private Map<String, String> loadDCATSettings() {
    Map<String, String> loadedSettings = Maps.newHashMap();
    Closer closer = Closer.create();
    try {
        InputStream configStream = closer.register(streamUtils.classpathStream(DCAT_SETTINGS));
        if (configStream == null) {
            LOG.error("Failed to load DCAT settings: " + DCAT_SETTINGS);
        } else {
            Properties properties = new Properties();
            properties.load(configStream);
            for (Map.Entry<Object, Object> entry : properties.entrySet()) {
                String key = StringUtils.trim((String) entry.getKey());
                String value = StringUtils.trim((String) entry.getValue());
                if (key != null && value != null) {
                    loadedSettings.put(key, value);
                } else {
                    throw new InvalidConfigException(InvalidConfigException.TYPE.INVALID_PROPERTIES_FILE,
                            "Invalid properties file: " + DCAT_SETTINGS);
                }
            }
            LOG.debug("Loaded static DCAT settings: " + loadedSettings.toString());
        }
    } catch (Exception e) {
        LOG.error("Failed to load DCAT settings from: " + DCAT_SETTINGS, e);
    } finally {
        try {
            closer.close();
        } catch (IOException e) {
            LOG.debug("Failed to close input stream on DCAT settings file: " + DCAT_SETTINGS, e);
        }
    }
    return loadedSettings;
}

From source file:org.jclouds.vsphere.compute.config.VSphereComputeServiceAdapter.java

@Override
public Iterable<Image> listImages() {
    Closer closer = Closer.create();
    VSphereServiceInstance instance = serviceInstance.get();
    closer.register(instance);
    try {/*from   ww w. ja va 2s  .  c o m*/
        try {
            Iterable<VirtualMachine> nodes = listNodes(instance);
            Iterable<VirtualMachine> templates = Iterables.filter(nodes, VSpherePredicate.isTemplatePredicate);
            Iterable<Image> images = Iterables.transform(templates, virtualMachineToImage);
            return FluentIterable.from(images).toList();

        } catch (Throwable t) {
            throw closer.rethrow(t);
        } finally {
            closer.close();
        }
    } catch (Throwable t) {
        return ImmutableSet.of();
    }
}

From source file:org.jclouds.vsphere.compute.config.VSphereComputeServiceAdapter.java

@Override
public void destroyNode(String vmName) {
    Closer closer = Closer.create();
    VSphereServiceInstance instance = serviceInstance.get();
    closer.register(instance);
    try {/*from   w ww .  j  av  a 2 s.c om*/
        try {
            VirtualMachine virtualMachine = getVM(vmName, instance.getInstance().getRootFolder());
            Task powerOffTask = virtualMachine.powerOffVM_Task();
            if (powerOffTask.waitForTask().equals(Task.SUCCESS))
                logger.debug(String.format("VM %s powered off", vmName));
            else
                logger.debug(String.format("VM %s could not be powered off", vmName));

            Task destroyTask = virtualMachine.destroy_Task();
            if (destroyTask.waitForTask().equals(Task.SUCCESS))
                logger.debug(String.format("VM %s destroyed", vmName));
            else
                logger.debug(String.format("VM %s could not be destroyed", vmName));
        } catch (Exception e) {
            logger.error("Can't destroy vm " + vmName, e);
            throw closer.rethrow(e);
        } finally {
            closer.close();
        }
    } catch (IOException e) {
        logger.trace(e.getMessage(), e);
    }

}

From source file:tachyon.worker.tiered.StorageDir.java

/**
 * Copy block file from this StorageDir to another StorageDir, the caller must ensure that this
 * block is locked during copying//from w w  w. jav  a 2  s. c  o  m
 *
 * @param blockId Id of the block
 * @param dstDir destination StorageDir
 * @return true if success, false otherwise
 * @throws IOException
 */
public boolean copyBlock(long blockId, StorageDir dstDir) throws IOException {
    long size = getBlockSize(blockId);
    if (size == -1) {
        LOG.error("Block file doesn't exist! blockId:{}", blockId);
        return false;
    }
    boolean copySuccess = false;
    Closer closer = Closer.create();
    ByteBuffer buffer = null;
    BlockHandler bhDst = null;
    try {
        BlockHandler bhSrc = closer.register(getBlockHandler(blockId));
        bhDst = closer.register(dstDir.getBlockHandler(blockId));
        buffer = bhSrc.read(0, (int) size);
        copySuccess = (bhDst.append(0, buffer) == size);
    } finally {
        closer.close();
        CommonUtils.cleanDirectBuffer(buffer);
    }
    if (copySuccess) {
        Long accessTimeMs = mLastBlockAccessTimeMs.get(blockId);
        if (accessTimeMs != null) {
            dstDir.addBlockId(blockId, size, accessTimeMs, true);
        } else {
            // The block had been freed during our copy. Because we lock the block before copy, the
            // actual block file is not deleted but the blockId is deleted from mLastBlockAccessTimeMs.
            // So we delete the copied block and return the space. We still think copyBlock is
            // successful and return true as nothing needed to be copied.
            bhDst.delete();
            dstDir.returnSpace(Users.MIGRATE_DATA_USER_ID, size);
        }
    }
    return copySuccess;
}

From source file:gobblin.runtime.local.LocalJobManager.java

/**
 * Start the job configuration file monitor.
 *
 * <p>//from   w  w w.ja  va  2 s  .co m
 *     The job configuration file monitor currently only supports monitoring
 *     newly added job configuration files.
 * </p>
 */
private void startJobConfigFileMonitor() throws Exception {
    File jobConfigFileDir = new File(this.properties.getProperty(ConfigurationKeys.JOB_CONFIG_FILE_DIR_KEY));
    FileAlterationObserver observer = new FileAlterationObserver(jobConfigFileDir);
    FileAlterationListener listener = new FileAlterationListenerAdaptor() {
        /**
         * Called when a new job configuration file is dropped in.
         */
        @Override
        public void onFileCreate(File file) {
            int pos = file.getName().lastIndexOf(".");
            String fileExtension = pos >= 0 ? file.getName().substring(pos + 1) : "";
            if (!jobConfigFileExtensions.contains(fileExtension)) {
                // Not a job configuration file, ignore.
                return;
            }

            LOG.info("Detected new job configuration file " + file.getAbsolutePath());
            Properties jobProps = new Properties();
            // First add framework configuration properties
            jobProps.putAll(properties);
            // Then load job configuration properties from the new job configuration file
            loadJobConfig(jobProps, file);

            // Schedule the new job
            try {
                boolean runOnce = Boolean
                        .valueOf(jobProps.getProperty(ConfigurationKeys.JOB_RUN_ONCE_KEY, "false"));
                scheduleJob(jobProps, runOnce ? new RunOnceJobListener() : new EmailNotificationJobListener());
            } catch (Throwable t) {
                LOG.error("Failed to schedule new job loaded from job configuration file "
                        + file.getAbsolutePath(), t);
            }
        }

        /**
         * Called when a job configuration file is changed.
         */
        @Override
        public void onFileChange(File file) {
            int pos = file.getName().lastIndexOf(".");
            String fileExtension = pos >= 0 ? file.getName().substring(pos + 1) : "";
            if (!jobConfigFileExtensions.contains(fileExtension)) {
                // Not a job configuration file, ignore.
                return;
            }

            LOG.info("Detected change to job configuration file " + file.getAbsolutePath());
            Properties jobProps = new Properties();
            // First add framework configuration properties
            jobProps.putAll(properties);
            // Then load the updated job configuration properties
            loadJobConfig(jobProps, file);

            String jobName = jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY);
            try {
                // First unschedule and delete the old job
                unscheduleJob(jobName);
                boolean runOnce = Boolean
                        .valueOf(jobProps.getProperty(ConfigurationKeys.JOB_RUN_ONCE_KEY, "false"));
                // Reschedule the job with the new job configuration
                scheduleJob(jobProps, runOnce ? new RunOnceJobListener() : new EmailNotificationJobListener());
            } catch (Throwable t) {
                LOG.error("Failed to update existing job " + jobName, t);
            }
        }

        private void loadJobConfig(Properties jobProps, File file) {
            Closer closer = Closer.create();
            try {
                Reader propsReader = closer.register(new InputStreamReader(new FileInputStream(file),
                        ConfigurationKeys.DEFAULT_CHARSET_ENCODING));
                jobProps.load(propsReader);
                jobProps.setProperty(ConfigurationKeys.JOB_CONFIG_FILE_PATH_KEY, file.getAbsolutePath());
            } catch (Exception e) {
                LOG.error("Failed to load job configuration from file " + file.getAbsolutePath(), e);
            } finally {
                try {
                    closer.close();
                } catch (IOException e) {
                    LOG.error("unable to close properties file:" + e, e);
                }
            }
        }
    };

    observer.addListener(listener);
    this.fileAlterationMonitor.addObserver(observer);
    this.fileAlterationMonitor.start();
}

From source file:com.spotify.apollo.core.ServiceImpl.java

ListeningScheduledExecutorService createScheduledExecutorService(Closer closer) {
    final ListeningScheduledExecutorService scheduledExecutorService = MoreExecutors.listeningDecorator(
            Executors.newScheduledThreadPool(Math.max(Runtime.getRuntime().availableProcessors(), 2),
                    new ThreadFactoryBuilder().setNameFormat(serviceName + "-scheduled-%d").build()));
    closer.register(asCloseable(scheduledExecutorService));
    return scheduledExecutorService;
}

From source file:org.gbif.occurrence.download.oozie.ArchiveBuilder.java

/**
 * Creates Map with dataset UUIDs and its record counts.
 *///from  w  w  w. ja v  a 2s.  c  o  m
private Map<UUID, Integer> readDatasetCounts(Path citationSrc) throws IOException {
    // the hive query result is a directory with one or more files - read them all into a uuid set
    Map<UUID, Integer> srcDatasets = Maps.newHashMap(); // map of uuids to occurrence counts
    FileStatus[] citFiles = hdfs.listStatus(citationSrc);
    int invalidUuids = 0;
    Closer closer = Closer.create();
    for (FileStatus fs : citFiles) {
        if (!fs.isDirectory()) {
            BufferedReader citationReader = new BufferedReader(
                    new InputStreamReader(hdfs.open(fs.getPath()), Charsets.UTF_8));
            closer.register(citationReader);
            try {
                String line = citationReader.readLine();
                while (line != null) {
                    if (!Strings.isNullOrEmpty(line)) {
                        // we also catch errors for every dataset so we dont break the loop
                        try {
                            Iterator<String> iter = TAB_SPLITTER.split(line).iterator();
                            // play safe and make sure we got a uuid - even though our api doesnt require it
                            UUID key = UUID.fromString(iter.next());
                            Integer count = Integer.parseInt(iter.next());
                            srcDatasets.put(key, count);
                            // small downloads persist dataset usages while builds the citations file
                            if (!isSmallDownload) {
                                persistDatasetUsage(count, downloadId, key);
                            }
                        } catch (IllegalArgumentException e) {
                            // ignore invalid UUIDs
                            LOG.info("Found invalid UUID as datasetId {}", line);
                            invalidUuids++;
                        }
                    }
                    line = citationReader.readLine();
                }
            } finally {
                closer.close();
            }
        }
    }
    if (invalidUuids > 0) {
        LOG.info("Found {} invalid dataset UUIDs", invalidUuids);
    } else {
        LOG.info("All {} dataset UUIDs are valid", srcDatasets.size());
    }
    return srcDatasets;
}

From source file:org.apache.jackrabbit.oak.run.RecoveryCommand.java

@Override
public void execute(String... args) throws Exception {
    MapFactory.setInstance(new MapDBMapFactory());
    Closer closer = Closer.create();
    String h = "recovery mongodb://host:port/database { dryRun }";
    try {/*  w w w. j a v  a  2  s .c o  m*/
        NodeStore store = Utils.bootstrapNodeStore(args, closer, h);
        if (!(store instanceof DocumentNodeStore)) {
            System.err.println("Recovery only available for DocumentNodeStore");
            System.exit(1);
        }
        DocumentNodeStore dns = (DocumentNodeStore) store;
        if (!(dns.getDocumentStore() instanceof MongoDocumentStore)) {
            System.err.println("Recovery only available for MongoDocumentStore");
            System.exit(1);
        }
        MongoDocumentStore docStore = (MongoDocumentStore) dns.getDocumentStore();
        LastRevRecoveryAgent agent = new LastRevRecoveryAgent(dns);
        MongoMissingLastRevSeeker seeker = new MongoMissingLastRevSeeker(docStore, dns.getClock());
        CloseableIterable<NodeDocument> docs = seeker.getCandidates(0);
        closer.register(docs);
        boolean dryRun = Arrays.asList(args).contains("dryRun");
        agent.recover(docs.iterator(), dns.getClusterId(), dryRun);
    } catch (Throwable e) {
        throw closer.rethrow(e);
    } finally {
        closer.close();
    }
}