Example usage for com.google.common.io Closer register

List of usage examples for com.google.common.io Closer register

Introduction

In this page you can find the example usage for com.google.common.io Closer register.

Prototype


public <C extends Closeable> C register(@Nullable C closeable) 

Source Link

Document

Registers the given closeable to be closed when this Closer is #close closed .

Usage

From source file:tachyon.shell.TfsShell.java

/**
 * Load a file or directory in Tachyon space, makes it resident in memory.
 *
 * @param filePath The TachyonURI path to load into Tachyon memory
 * @throws IOException/*from w  ww.ja va2  s .  c  o m*/
 */
public void load(TachyonURI filePath) throws IOException {
    try {
        TachyonFile fd = mTfs.open(filePath);
        FileInfo fInfo = mTfs.getInfo(fd);
        if (fInfo.isFolder) {
            List<FileInfo> files = mTfs.listStatus(fd);
            Collections.sort(files);
            for (FileInfo file : files) {
                TachyonURI newPath = new TachyonURI(file.getPath());
                load(newPath);
            }
        } else {
            if (fInfo.getInMemoryPercentage() == 100) {
                // The file has already been fully loaded into Tachyon memory.
                return;
            }
            Closer closer = Closer.create();
            try {
                InStreamOptions op = new InStreamOptions.Builder(mTachyonConf)
                        .setTachyonStorageType(TachyonStorageType.STORE).build();
                FileInStream in = closer.register(mTfs.getInStream(fd, op));
                byte[] buf = new byte[8 * Constants.MB];
                while (in.read(buf) != -1) {
                }
            } catch (Exception e) {
                throw closer.rethrow(e);
            } finally {
                closer.close();
            }
        }
        System.out.println(filePath + " loaded");
    } catch (TachyonException e) {
        throw new IOException(e.getMessage());
    }
}

From source file:org.ow2.proactive.scheduler.rest.SchedulerClient.java

@Override
public void renewSession() throws NotConnectedException {
    Closer closer = Closer.create();
    try {/*from ww w  . j  a va2 s. co m*/
        LoginForm loginForm = new LoginForm();
        loginForm.setUsername(connectionInfo.getLogin());
        loginForm.setPassword(connectionInfo.getPassword());
        if (connectionInfo.getCredentialFile() != null) {
            FileInputStream inputStream = new FileInputStream(connectionInfo.getCredentialFile());
            closer.register(inputStream);
            loginForm.setCredential(inputStream);
        }
        sid = restApi().loginOrRenewSession(sid, loginForm);

    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        try {
            closer.close();
        } catch (IOException e) {
            // ignore
        }
    }
}

From source file:com.spotify.apollo.core.ServiceImpl.java

InstanceImpl initInstance(CoreModule coreModule, Set<ApolloModule> modules, Closer closer,
        ListeningExecutorService executorService, ListeningScheduledExecutorService scheduledExecutorService,
        CountDownLatch shutdownRequested, CountDownLatch stopped) {
    List<ApolloModule> modulesSortedOnPriority = FluentIterable.from(modules)
            .toSortedList(Ordering.natural().reverse().onResultOf(ModulePriorityOrdering.INSTANCE));

    Iterable<Module> allModules = concat(of(coreModule), modulesSortedOnPriority);
    Injector injector = Guice.createInjector(Stage.PRODUCTION, allModules);

    Set<Key<?>> keysToLoad = Sets.newLinkedHashSet();
    for (ApolloModule apolloModule : modulesSortedOnPriority) {
        LOG.info("Loaded module {}", apolloModule);
        keysToLoad.addAll(apolloModule.getLifecycleManaged());
    }/*w ww.j a  v  a2s  .co  m*/

    for (Key<?> key : keysToLoad) {
        Object obj = injector.getInstance(key);
        if (Closeable.class.isAssignableFrom(obj.getClass())) {
            LOG.info("Managing lifecycle of {}", key.getTypeLiteral());
            closer.register(Closeable.class.cast(obj));
        }
    }

    return new InstanceImpl(injector, executorService, scheduledExecutorService, shutdownRequested, stopped);
}

From source file:com.spotify.styx.StyxScheduler.java

@Override
public void create(Environment environment) {
    final Config config = environment.config();
    final Closer closer = environment.closer();

    final Thread.UncaughtExceptionHandler uncaughtExceptionHandler = (thread, throwable) -> LOG
            .error("Thread {} threw {}", thread, throwable);
    final ThreadFactory schedulerTf = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("styx-scheduler-%d").setUncaughtExceptionHandler(uncaughtExceptionHandler).build();
    final ThreadFactory eventTf = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("styx-event-worker-%d").setUncaughtExceptionHandler(uncaughtExceptionHandler)
            .build();/*from   w w w  .  j  a  v  a2  s . c  o  m*/
    final ThreadFactory dockerRunnerTf = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("styx-docker-runner-%d").setUncaughtExceptionHandler(uncaughtExceptionHandler)
            .build();

    final ScheduledExecutorService executor = executorFactory.create(3, schedulerTf);
    final ExecutorService eventWorker = Executors.newFixedThreadPool(16, eventTf);
    final ExecutorService dockerRunnerExecutor = Executors.newSingleThreadExecutor(dockerRunnerTf);
    closer.register(executorCloser("scheduler", executor));
    closer.register(executorCloser("event-worker", eventWorker));
    closer.register(executorCloser("docker-runner", dockerRunnerExecutor));

    final Stats stats = statsFactory.apply(environment);
    final WorkflowCache workflowCache = new InMemWorkflowCache();
    final Storage storage = instrument(Storage.class, storageFactory.apply(environment), stats, time);

    warmUpCache(workflowCache, storage);

    final QueuedStateManager stateManager = closer.register(new QueuedStateManager(time, eventWorker, storage));

    final Config staleStateTtlConfig = config.getConfig(STYX_STALE_STATE_TTL_CONFIG);
    final TimeoutConfig timeoutConfig = TimeoutConfig.createFromConfig(staleStateTtlConfig);

    final Supplier<String> dockerId = new CachedSupplier<>(storage::globalDockerRunnerId, time);
    final DockerRunner routingDockerRunner = DockerRunner.routing(
            id -> dockerRunnerFactory.create(id, environment, stateManager, executor, stats), dockerId);
    final DockerRunner dockerRunner = instrument(DockerRunner.class, routingDockerRunner, stats, time);
    final Publisher publisher = publisherFactory.apply(environment);

    final RateLimiter submissionRateLimiter = RateLimiter.create(DEFAULT_SUBMISSION_RATE_PER_SEC);

    final OutputHandler[] outputHandlers = new OutputHandler[] { transitionLogger(""),
            new DockerRunnerHandler(dockerRunner, stateManager, storage, submissionRateLimiter,
                    dockerRunnerExecutor),
            new TerminationHandler(retryUtil, stateManager), new MonitoringHandler(time, stats),
            new PublisherHandler(publisher), new ExecutionDescriptionHandler(storage, stateManager) };
    final StateFactory stateFactory = (workflowInstance) -> RunState.fresh(workflowInstance, time,
            outputHandlers);

    final TriggerListener trigger = new StateInitializingTrigger(stateFactory, stateManager, storage);
    final TriggerManager triggerManager = new TriggerManager(trigger, time, storage, stats);

    final WorkflowInitializer workflowInitializer = new WorkflowInitializer(storage, time);
    final Consumer<Workflow> workflowRemoveListener = workflowRemoved(storage);
    final Consumer<Workflow> workflowChangeListener = workflowChanged(workflowCache, workflowInitializer, stats,
            stateManager);

    final Scheduler scheduler = new Scheduler(time, timeoutConfig, stateManager, workflowCache, storage,
            trigger);

    restoreState(storage, outputHandlers, stateManager);
    startTriggerManager(triggerManager, executor);
    startScheduleSources(environment, executor, workflowChangeListener, workflowRemoveListener);
    startScheduler(scheduler, executor);
    startRuntimeConfigUpdate(storage, executor, submissionRateLimiter);
    setupMetrics(stateManager, workflowCache, storage, submissionRateLimiter, stats);

    final SchedulerResource schedulerResource = new SchedulerResource(stateManager, trigger, storage, time);

    environment.routingEngine().registerAutoRoute(Route.sync("GET", "/ping", rc -> "pong"))
            .registerRoutes(schedulerResource.routes());

    this.stateManager = stateManager;
    this.scheduler = scheduler;
    this.triggerManager = triggerManager;
}

From source file:org.apache.jackrabbit.oak.run.DataStoreCheckCommand.java

@Override
public void execute(String... args) throws Exception {
    OptionParser parser = new OptionParser();
    parser.allowsUnrecognizedOptions();/*from ww w .j a  va2 s.  com*/

    String helpStr = "datastorecheck [--id] [--ref] [--consistency] [--store <path>|<mongo_uri>] "
            + "[--s3ds <s3ds_config>|--fds <fds_config>] [--dump <path>]";

    Closer closer = Closer.create();
    try {
        // Options for operations requested
        OptionSpecBuilder idOp = parser.accepts("id", "Get ids");
        OptionSpecBuilder refOp = parser.accepts("ref", "Get references");
        OptionSpecBuilder consistencyOp = parser.accepts("consistency", "Check consistency");

        // Node Store - needed for --ref, --consistency
        ArgumentAcceptingOptionSpec<String> store = parser.accepts("store", "Node Store")
                .requiredIf(refOp, consistencyOp).withRequiredArg().ofType(String.class);
        // Optional argument to specify the dump path
        ArgumentAcceptingOptionSpec<String> dump = parser.accepts("dump", "Dump Path").withRequiredArg()
                .ofType(String.class);
        OptionSpec segmentTar = parser.accepts("segment-tar", "Use oak-segment-tar instead of oak-segment");

        OptionSpec<?> help = parser.acceptsAll(asList("h", "?", "help"), "show help").forHelp();

        // Required rules (any one of --id, --ref, --consistency)
        idOp.requiredUnless(refOp, consistencyOp);
        refOp.requiredUnless(idOp, consistencyOp);
        consistencyOp.requiredUnless(idOp, refOp);

        OptionSet options = null;
        try {
            options = parser.parse(args);
        } catch (Exception e) {
            System.err.println(e);
            parser.printHelpOn(System.err);
            return;
        }

        if (options.has(help)) {
            parser.printHelpOn(System.out);
            return;
        }

        String dumpPath = JAVA_IO_TMPDIR.value();
        if (options.has(dump)) {
            dumpPath = options.valueOf(dump);
        }

        GarbageCollectableBlobStore blobStore = null;
        BlobReferenceRetriever marker = null;
        if (options.has(store)) {
            String source = options.valueOf(store);
            if (source.startsWith(MongoURI.MONGODB_PREFIX)) {
                MongoClientURI uri = new MongoClientURI(source);
                MongoClient client = new MongoClient(uri);
                DocumentNodeStore nodeStore = new DocumentMK.Builder()
                        .setMongoDB(client.getDB(uri.getDatabase())).getNodeStore();
                closer.register(Utils.asCloseable(nodeStore));
                blobStore = (GarbageCollectableBlobStore) nodeStore.getBlobStore();
                marker = new DocumentBlobReferenceRetriever(nodeStore);
            } else if (options.has(segmentTar)) {
                marker = SegmentTarUtils.newBlobReferenceRetriever(source, closer);
            } else {
                FileStore fileStore = openFileStore(source);
                closer.register(Utils.asCloseable(fileStore));
                marker = new SegmentBlobReferenceRetriever(fileStore.getTracker());
            }
        }

        // Initialize S3/FileDataStore if configured
        GarbageCollectableBlobStore dataStore = Utils.bootstrapDataStore(args, closer);
        if (dataStore != null) {
            blobStore = dataStore;
        }

        // blob store still not initialized means configuration not supported
        if (blobStore == null) {
            System.err.println("Operation not defined for SegmentNodeStore without external datastore");
            parser.printHelpOn(System.err);
            return;
        }

        FileRegister register = new FileRegister(options);
        closer.register(register);

        if (options.has(idOp) || options.has(consistencyOp)) {
            retrieveBlobIds(blobStore, register.createFile(idOp, dumpPath));
        }

        if (options.has(refOp) || options.has(consistencyOp)) {
            retrieveBlobReferences(blobStore, marker, register.createFile(refOp, dumpPath));
        }

        if (options.has(consistencyOp)) {
            checkConsistency(register.get(idOp), register.get(refOp),
                    register.createFile(consistencyOp, dumpPath));
        }
    } catch (Throwable t) {
        t.printStackTrace();
    } finally {
        closer.close();
    }
}

From source file:org.apache.aurora.scheduler.discovery.CuratorSingletonService.java

@Override
public synchronized void lead(InetSocketAddress endpoint, Map<String, InetSocketAddress> additionalEndpoints,
        LeadershipListener listener) throws LeadException, InterruptedException {

    requireNonNull(endpoint);// w ww  . j  a v a2 s  .c  o  m
    requireNonNull(additionalEndpoints);
    requireNonNull(listener);

    Closer closer = Closer.create();

    CountDownLatch giveUpLeadership = new CountDownLatch(1);

    // We do not use the suggested `LeaderSelectorListenerAdapter` or the LeaderLatch class
    // because we want to have precise control over state changes. By default the listener and the
    // latch class treat `SUSPENDED` (connection loss) as fatal and a reason to lose leadership.
    // To make the scheduler resilient to connection blips and long GC pauses, we only treat
    // `LOST` (session loss) as fatal.

    ExecutorService executor = AsyncUtil.singleThreadLoggingScheduledExecutor("LeaderSelector-%d", LOG);

    LeaderSelectorListener leaderSelectorListener = new LeaderSelectorListener() {
        @Override
        public void takeLeadership(CuratorFramework curatorFramework) throws Exception {
            listener.onLeading(new LeaderControl() {
                @Override
                public void advertise() throws AdvertiseException, InterruptedException {
                    advertiser.advertise(closer, endpoint, additionalEndpoints);
                }

                @Override
                public void leave() throws LeaveException {
                    try {
                        giveUpLeadership.countDown();
                        closer.close();
                    } catch (IOException e) {
                        throw new LeaveException("Failed to abdicate leadership of group at " + groupPath, e);
                    }
                }
            });

            // The contract is to block as long as we want leadership. The leader never gives up
            // leadership voluntarily, only when asked to shutdown so we block until our shutdown
            // callback has been executed or we have lost our ZK connection.
            giveUpLeadership.await();
        }

        @Override
        public void stateChanged(CuratorFramework curatorFramework, ConnectionState newState) {
            if (newState == ConnectionState.LOST) {
                giveUpLeadership.countDown();
                listener.onDefeated();
                throw new CancelLeadershipException();
            }

        }
    };

    LeaderSelector leaderSelector = new LeaderSelector(client, groupPath, executor, leaderSelectorListener);

    leaderSelector.setId(endpoint.getHostName());

    try {
        leaderSelector.start();
    } catch (Exception e) {
        // NB: We failed to lead; so we never could have advertised and there is no need to close the
        // closer.
        throw new LeadException("Failed to begin awaiting leadership of group " + groupPath, e);
    }
    closer.register(leaderSelector);
}

From source file:com.metamx.druid.indexing.coordinator.ForkingTaskRunner.java

@Override
public ListenableFuture<TaskStatus> run(final Task task) {
    synchronized (tasks) {
        if (!tasks.containsKey(task.getId())) {
            tasks.put(task.getId(), new ForkingTaskRunnerWorkItem(task, exec.submit(new Callable<TaskStatus>() {
                @Override/*  w w w .  j  ava 2 s.  co m*/
                public TaskStatus call() {
                    final String attemptUUID = UUID.randomUUID().toString();
                    final File taskDir = new File(config.getBaseTaskDir(), task.getId());
                    final File attemptDir = new File(taskDir, attemptUUID);

                    final ProcessHolder processHolder;

                    try {
                        final Closer closer = Closer.create();
                        try {
                            if (!attemptDir.mkdirs()) {
                                throw new IOException(
                                        String.format("Could not create directories: %s", attemptDir));
                            }

                            final File taskFile = new File(attemptDir, "task.json");
                            final File statusFile = new File(attemptDir, "status.json");
                            final File logFile = new File(attemptDir, "log");

                            // time to adjust process holders
                            synchronized (tasks) {
                                final ForkingTaskRunnerWorkItem taskWorkItem = tasks.get(task.getId());

                                if (taskWorkItem.shutdown) {
                                    throw new IllegalStateException("Task has been shut down!");
                                }

                                if (taskWorkItem == null) {
                                    throw new ISE("WTF?! TaskInfo disappeared for task: %s", task.getId());
                                }

                                if (taskWorkItem.processHolder != null) {
                                    throw new ISE("WTF?! TaskInfo already has a process holder for task: %s",
                                            task.getId());
                                }

                                final List<String> command = Lists.newArrayList();
                                final int childPort = findUnusedPort();
                                final String childHost = String.format(config.getHostPattern(), childPort);

                                command.add(config.getJavaCommand());
                                command.add("-cp");
                                command.add(config.getJavaClasspath());

                                Iterables.addAll(command, Splitter.on(CharMatcher.WHITESPACE).omitEmptyStrings()
                                        .split(config.getJavaOptions()));

                                for (String propName : props.stringPropertyNames()) {
                                    for (String allowedPrefix : config.getAllowedPrefixes()) {
                                        if (propName.startsWith(allowedPrefix)) {
                                            command.add(String.format("-D%s=%s", propName,
                                                    props.getProperty(propName)));
                                        }
                                    }
                                }

                                // Override child JVM specific properties
                                for (String propName : props.stringPropertyNames()) {
                                    if (propName.startsWith(CHILD_PROPERTY_PREFIX)) {
                                        command.add(String.format("-D%s=%s",
                                                propName.substring(CHILD_PROPERTY_PREFIX.length()),
                                                props.getProperty(propName)));
                                    }
                                }

                                String nodeType = task.getNodeType();
                                if (nodeType != null) {
                                    command.add(String.format("-Ddruid.executor.nodeType=%s", nodeType));
                                }

                                command.add(String.format("-Ddruid.host=%s", childHost));
                                command.add(String.format("-Ddruid.port=%d", childPort));

                                command.add(config.getMainClass());
                                command.add(taskFile.toString());
                                command.add(statusFile.toString());

                                jsonMapper.writeValue(taskFile, task);

                                log.info("Running command: %s", Joiner.on(" ").join(command));
                                taskWorkItem.processHolder = new ProcessHolder(
                                        new ProcessBuilder(ImmutableList.copyOf(command))
                                                .redirectErrorStream(true).start(),
                                        logFile, childPort);

                                processHolder = taskWorkItem.processHolder;
                                processHolder.registerWithCloser(closer);
                            }

                            log.info("Logging task %s output to: %s", task.getId(), logFile);

                            final InputStream fromProc = processHolder.process.getInputStream();
                            final OutputStream toLogfile = closer
                                    .register(Files.newOutputStreamSupplier(logFile).getOutput());

                            boolean runFailed = true;

                            ByteStreams.copy(fromProc, toLogfile);
                            final int statusCode = processHolder.process.waitFor();
                            log.info("Process exited with status[%d] for task: %s", statusCode, task.getId());

                            if (statusCode == 0) {
                                runFailed = false;
                            }

                            // Upload task logs

                            // XXX: Consider uploading periodically for very long-lived tasks to prevent
                            // XXX: bottlenecks at the end or the possibility of losing a lot of logs all
                            // XXX: at once.

                            taskLogPusher.pushTaskLog(task.getId(), logFile);

                            if (!runFailed) {
                                // Process exited successfully
                                return jsonMapper.readValue(statusFile, TaskStatus.class);
                            } else {
                                // Process exited unsuccessfully
                                return TaskStatus.failure(task.getId());
                            }
                        } catch (Throwable t) {
                            throw closer.rethrow(t);
                        } finally {
                            closer.close();
                        }
                    } catch (Exception e) {
                        log.info(e, "Exception caught during execution");
                        throw Throwables.propagate(e);
                    } finally {
                        try {
                            synchronized (tasks) {
                                final ForkingTaskRunnerWorkItem taskWorkItem = tasks.remove(task.getId());
                                if (taskWorkItem != null && taskWorkItem.processHolder != null) {
                                    taskWorkItem.processHolder.process.destroy();
                                }
                            }

                            log.info("Removing temporary directory: %s", attemptDir);
                            FileUtils.deleteDirectory(attemptDir);
                        } catch (Exception e) {
                            log.error(e, "Suppressing exception caught while cleaning up task");
                        }
                    }
                }
            })));
        }

        return tasks.get(task.getId()).getResult();
    }
}