Example usage for com.google.common.util.concurrent Service state

List of usage examples for com.google.common.util.concurrent Service state

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Service state.

Prototype

State state();

Source Link

Document

Returns the lifecycle state of the service.

Usage

From source file:org.libdohj.cate.controller.MainController.java

/**
 * Connect to the specified network./*w  w w .  jav a2 s .c om*/
 *
 * @param params network parameters for the relay network to connect to.
 * @param dataDir directory to store data files in.
 * @return the service that has been started. Can be used to test start
 * completes successfully.
 */
public Service connectTo(final NetworkParameters params, final File dataDir) {
    final Context context = new Context(params);
    final NetworkThreadFactory threadFactory = new NetworkThreadFactory(context);
    final ExecutorService executor = Executors.newSingleThreadExecutor(threadFactory);
    final Network network = new Network(context, this, dataDir, executor, this::registerWallet);
    final StringProperty statusProperty = new SimpleStringProperty("Starting");

    threadFactory.setUncaughtExceptionHandler(buildUncaughtExceptionHandler(network));
    networks.add(network);

    // Add a listener to shut down the executor service once the network service
    // it's responsible for terminates.
    network.addListener(new Service.Listener() {
        @Override
        public void starting() {
            statusProperty.setValue(resources.getString("walletList.networkStatus.starting"));
        }

        @Override
        public void running() {
            statusProperty.setValue(resources.getString("walletList.networkStatus.running"));
        }

        @Override
        public void stopping(Service.State from) {
            statusProperty.setValue(resources.getString("walletList.networkStatus.stopping"));
        }

        @Override
        public void terminated(Service.State from) {
            executor.shutdown();
            Platform.runLater(() -> {
                activeNetworks.remove(network);
            });
            statusProperty.setValue(resources.getString("walletList.networkStatus.terminated"));
        }

        @Override
        public void failed(Service.State from, Throwable failure) {
            statusProperty.setValue(resources.getString("walletList.networkStatus.failed"));
        }
    }, executor);

    final NetworkDetail detail = new NetworkDetail(executor, statusProperty);
    networkDetails.put(network, detail);

    final Service service = network.startAsync();
    return service;
}

From source file:co.cask.tigon.internal.app.runtime.distributed.AbstractDistributedProgramRunner.java

/**
 * Adds a listener to the given TwillController to delete local temp files when the program has started/terminated.
 * The local temp files could be removed once the program is started, since Twill would keep the files in
 * HDFS and no long needs the local temp files once program is started.
 *
 * @return The same TwillController instance.
 *//*from  w  w  w.  ja  v  a2  s .  c om*/
private TwillController addCleanupListener(TwillController controller, final File hConfFile,
        final File cConfFile, final Program program, final File programDir) {

    final AtomicBoolean deleted = new AtomicBoolean(false);
    controller.addListener(new ServiceListenerAdapter() {
        @Override
        public void running() {
            cleanup();
        }

        @Override
        public void terminated(Service.State from) {
            cleanup();
        }

        @Override
        public void failed(Service.State from, Throwable failure) {
            cleanup();
        }

        private void cleanup() {
            if (deleted.compareAndSet(false, true)) {
                LOG.debug("Cleanup tmp files for {}: {} {} {}", program.getName(), hConfFile, cConfFile,
                        program.getJarLocation().toURI());
                hConfFile.delete();
                cConfFile.delete();
                try {
                    program.getJarLocation().delete();
                } catch (IOException e) {
                    LOG.warn("Failed to delete program jar {}", program.getJarLocation().toURI(), e);
                }
                try {
                    FileUtils.deleteDirectory(programDir);
                } catch (IOException e) {
                    LOG.warn("Failed to delete program directory {}", programDir, e);
                }
            }
        }
    }, Threads.SAME_THREAD_EXECUTOR);
    return controller;
}

From source file:gobblin.runtime.spec_catalog.TopologyCatalog.java

@Override
public void put(Spec spec) {
    try {/*from   ww w. j a  va  2 s  .  c  o m*/
        Preconditions.checkState(state() == Service.State.RUNNING,
                String.format("%s is not running.", this.getClass().getName()));
        Preconditions.checkNotNull(spec);

        log.info(String.format("Adding TopologySpec with URI: %s and Config: %s", spec.getUri(),
                ((TopologySpec) spec).getConfigAsProperties()));
        if (specStore.exists(spec.getUri())) {
            specStore.updateSpec(spec);
            this.listeners.onUpdateSpec(spec);
        } else {
            specStore.addSpec(spec);
            this.listeners.onAddSpec(spec);
        }

    } catch (IOException | SpecNotFoundException e) {
        throw new RuntimeException("Cannot add Spec to Spec store: " + spec, e);
    }
}

From source file:com.griddynamics.jagger.engine.e1.process.WorkloadProcess.java

private void removeThreads(int count) {
    Preconditions.checkState(!threads.isEmpty());
    Preconditions.checkState(threads.size() >= count);

    Collection<Future<Service.State>> futures = Lists.newLinkedList();

    Iterator<WorkloadService> iterator = threads.iterator();
    for (int i = 0; i < count; i++) {
        WorkloadService service = iterator.next();
        futures.add(service.stop());//from ww  w  .ja v  a2s .com
    }

    for (Future<Service.State> future : futures) {
        Futures.get(future, timeoutsConfiguration.getWorkloadStopTimeout());
    }
}

From source file:co.cask.cdap.internal.app.runtime.spark.SparkProgramRunner.java

/**
 * Creates a service listener to reactor on state changes on {@link SparkRuntimeService}.
 *//* w w  w .  j a  v a  2s  .  c  om*/
private Service.Listener createRuntimeServiceListener(final Id.Program programId, final RunId runId,
        Arguments arguments) {

    final String twillRunId = arguments.getOption(ProgramOptionConstants.TWILL_RUN_ID);
    final String workflowName = arguments.getOption(ProgramOptionConstants.WORKFLOW_NAME);
    final String workflowNodeId = arguments.getOption(ProgramOptionConstants.WORKFLOW_NODE_ID);
    final String workflowRunId = arguments.getOption(ProgramOptionConstants.WORKFLOW_RUN_ID);

    return new ServiceListenerAdapter() {
        @Override
        public void starting() {
            //Get start time from RunId
            long startTimeInSeconds = RunIds.getTime(runId, TimeUnit.SECONDS);
            if (startTimeInSeconds == -1) {
                // If RunId is not time-based, use current time as start time
                startTimeInSeconds = TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis());
            }

            if (workflowName == null) {
                store.setStart(programId, runId.getId(), startTimeInSeconds, null, twillRunId);
            } else {
                // Program started by Workflow
                store.setWorkflowProgramStart(programId, runId.getId(), workflowName, workflowRunId,
                        workflowNodeId, startTimeInSeconds, null, twillRunId);
            }
        }

        @Override
        public void terminated(Service.State from) {
            if (from == Service.State.STOPPING) {
                // Service was killed
                store.setStop(programId, runId.getId(),
                        TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()),
                        ProgramController.State.KILLED.getRunStatus());
            } else {
                // Service completed by itself.
                store.setStop(programId, runId.getId(),
                        TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()),
                        ProgramController.State.COMPLETED.getRunStatus());
            }
        }

        @Override
        public void failed(Service.State from, Throwable failure) {
            store.setStop(programId, runId.getId(), TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()),
                    ProgramController.State.ERROR.getRunStatus());
        }
    };
}

From source file:org.quackbot.Controller.java

/**
 * Makes all bots quit servers//from w  w  w. j  a  v a  2s.c  o m
 */
public void shutdown() throws InterruptedException {
    botManager.stopAndWait();
    state = Service.State.STOPPING;
    if (!shutdownHook.isAlive())
        Runtime.getRuntime().removeShutdownHook(shutdownHook);
    bots.clear();
    log.info("Killed all bots");
    state = Service.State.TERMINATED;
}

From source file:org.apache.gobblin.runtime.spec_catalog.TopologyCatalog.java

@Override
public void put(Spec spec) {
    try {/*from w  ww.  j a  v a2  s .com*/
        Preconditions.checkState(state() == Service.State.RUNNING,
                String.format("%s is not running.", this.getClass().getName()));
        Preconditions.checkNotNull(spec);

        log.info(String.format("Adding TopologySpec with URI: %s and Config: %s", spec.getUri(),
                ((TopologySpec) spec).getConfigAsProperties()));
        specStore.addSpec(spec);
        this.listeners.onAddSpec(spec);
    } catch (IOException e) {
        throw new RuntimeException("Cannot add Spec to Spec store: " + spec, e);
    }
}

From source file:com.griddynamics.jagger.engine.e1.process.WorkloadWorker.java

@Override
public void configure() {
    onCommandReceived(StartWorkloadProcess.class).execute(new CommandExecutor<StartWorkloadProcess, String>() {
        public Qualifier<StartWorkloadProcess> getQualifier() {
            return Qualifier.of(StartWorkloadProcess.class);
        }/*from ww  w. j  a v a  2 s  .  c  o m*/

        public String execute(StartWorkloadProcess command, NodeContext nodeContext) {
            log.debug("Processing command {}", command);
            int poolSize = command.getPoolSize();

            if (poolSize < command.getThreads()) {
                throw new IllegalStateException("Error! Pool size is less then thread count");
            }

            WorkloadProcess process = new WorkloadProcess(command.getSessionId(), command, nodeContext,
                    Executors.newFixedThreadPool(poolSize,
                            new ThreadFactoryBuilder().setNameFormat("workload-thread %d")
                                    .setUncaughtExceptionHandler(ExceptionLogger.INSTANCE).build()),
                    timeoutsConfiguration);
            String processId = generateId();
            processes.put(processId, process);
            pools.put(processId, poolSize);
            process.start();
            return processId;
        }
    });

    onCommandReceived(ChangeWorkloadConfiguration.class)
            .execute(new CommandExecutor<ChangeWorkloadConfiguration, Boolean>() {
                public Qualifier<ChangeWorkloadConfiguration> getQualifier() {
                    return Qualifier.of(ChangeWorkloadConfiguration.class);
                }

                public Boolean execute(ChangeWorkloadConfiguration command, NodeContext nodeContext) {
                    Preconditions.checkArgument(command.getProcessId() != null, "Process id cannot be null");

                    Integer poolSize = pools.get(command.getProcessId());
                    if (poolSize < command.getConfiguration().getThreads()) {
                        throw new IllegalStateException("Error! Pool size is less then thread count");
                    }

                    WorkloadProcess process = getProcess(command.getProcessId());

                    process.changeConfiguration(command.getConfiguration());

                    return true;
                }
            });

    onCommandReceived(PollWorkloadProcessStatus.class)
            .execute(new CommandExecutor<PollWorkloadProcessStatus, WorkloadStatus>() {

                public Qualifier<PollWorkloadProcessStatus> getQualifier() {
                    return Qualifier.of(PollWorkloadProcessStatus.class);
                }

                public WorkloadStatus execute(PollWorkloadProcessStatus command, NodeContext nodeContext) {
                    Preconditions.checkArgument(command.getProcessId() != null, "Process id cannot be null");

                    WorkloadProcess process = getProcess(command.getProcessId());
                    return process.getStatus();
                }
            });

    onCommandReceived(StopWorkloadProcess.class)
            .execute(new CommandExecutor<StopWorkloadProcess, WorkloadStatus>() {
                public Qualifier<StopWorkloadProcess> getQualifier() {
                    return Qualifier.of(StopWorkloadProcess.class);
                }

                public WorkloadStatus execute(StopWorkloadProcess command, NodeContext nodeContext) {
                    log.debug("Going to stop process {} on kernel {}", command.getProcessId(),
                            nodeContext.getId().getIdentifier());

                    Preconditions.checkArgument(command.getProcessId() != null, "Process id cannot be null");

                    WorkloadProcess process = getProcess(command.getProcessId());
                    process.stop();
                    processes.remove(command.getProcessId());
                    logWriter.flush();
                    return process.getStatus();
                }
            });

    onCommandReceived(PerformCalibration.class).execute(new CommandExecutor<PerformCalibration, Boolean>() {
        @Override
        public Qualifier<PerformCalibration> getQualifier() {
            return Qualifier.of(PerformCalibration.class);
        }

        @Override
        public Boolean execute(PerformCalibration command, NodeContext nodeContext) {
            ScenarioFactory<Object, Object, Object> scenarioFactory = command.getScenarioFactory();

            Scenario<Object, Object, Object> scenario = scenarioFactory.get(nodeContext);
            int calibrationSamplesCount = scenarioFactory.getCalibrationSamplesCount();

            CalibrationInfoCollector calibrationInfoCollector = new CalibrationInfoCollector(
                    command.getSessionId(), command.getTaskId(), nodeContext);

            ExecutorService executor = Executors.newSingleThreadExecutor(
                    new ThreadFactoryBuilder().setNameFormat("workload-calibration-thread %d")
                            .setUncaughtExceptionHandler(ExceptionLogger.INSTANCE).build());
            WorkloadService calibrationThread = WorkloadService.builder(scenario)
                    .addCollector(calibrationInfoCollector).useExecutor(executor)
                    .buildServiceWithPredefinedSamples(calibrationSamplesCount);

            ListenableFuture<Service.State> start = calibrationThread.start();

            Futures.get(start, timeoutsConfiguration.getCalibrationStartTimeout());

            Services.awaitTermination(calibrationThread, timeoutsConfiguration.getCalibrationTimeout());

            final Map<Pair<Object, Object>, Throwable> errors = calibrationInfoCollector.getErrors();
            if (!errors.isEmpty()) {
                log.error("Calibration failed for {} samples", errors.size());
                return false;
            }

            executor.shutdown();
            logWriter.flush();
            return true;
        }
    });

}

From source file:gobblin.runtime.spec_catalog.TopologyCatalog.java

@Override
public void remove(URI uri) {
    try {/* www.  j av  a  2 s. co m*/
        Preconditions.checkState(state() == Service.State.RUNNING,
                String.format("%s is not running.", this.getClass().getName()));
        Preconditions.checkNotNull(uri);

        log.info(String.format("Removing TopologySpec with URI: %s", uri));
        Spec spec = specStore.getSpec(uri);
        this.listeners.onDeleteSpec(spec.getUri(), spec.getVersion());
        specStore.deleteSpec(uri);

    } catch (IOException | SpecNotFoundException e) {
        throw new RuntimeException("Cannot delete Spec from Spec store for URI: " + uri, e);
    }
}

From source file:co.cask.cdap.internal.app.runtime.batch.MapReduceProgramRunner.java

@Override
public ProgramController run(final Program program, ProgramOptions options) {
    // Extract and verify parameters
    ApplicationSpecification appSpec = program.getApplicationSpecification();
    Preconditions.checkNotNull(appSpec, "Missing application specification.");

    ProgramType processorType = program.getType();
    Preconditions.checkNotNull(processorType, "Missing processor type.");
    Preconditions.checkArgument(processorType == ProgramType.MAPREDUCE,
            "Only MAPREDUCE process type is supported.");

    MapReduceSpecification spec = appSpec.getMapReduce().get(program.getName());
    Preconditions.checkNotNull(spec, "Missing MapReduceSpecification for %s", program.getName());

    // Optionally get runId. If the map-reduce started by other program (e.g. Workflow), it inherit the runId.
    Arguments arguments = options.getArguments();

    final RunId runId = RunIds.fromString(arguments.getOption(ProgramOptionConstants.RUN_ID));

    long logicalStartTime = arguments.hasOption(ProgramOptionConstants.LOGICAL_START_TIME)
            ? Long.parseLong(arguments.getOption(ProgramOptionConstants.LOGICAL_START_TIME))
            : System.currentTimeMillis();

    String programNameInWorkflow = arguments.getOption(ProgramOptionConstants.PROGRAM_NAME_IN_WORKFLOW);

    WorkflowToken workflowToken = null;//  w w w. j  ava2s.  c o  m
    if (arguments.hasOption(ProgramOptionConstants.WORKFLOW_TOKEN)) {
        workflowToken = GSON.fromJson(arguments.getOption(ProgramOptionConstants.WORKFLOW_TOKEN),
                BasicWorkflowToken.class);
    }

    final AdapterDefinition adapterSpec = getAdapterSpecification(arguments);

    MapReduce mapReduce;
    try {
        mapReduce = new InstantiatorFactory(false).get(TypeToken.of(program.<MapReduce>getMainClass()))
                .create();
    } catch (Exception e) {
        LOG.error("Failed to instantiate MapReduce class for {}", spec.getClassName(), e);
        throw Throwables.propagate(e);
    }

    final PluginInstantiator pluginInstantiator = createPluginInstantiator(adapterSpec,
            program.getClassLoader());
    try {
        final DynamicMapReduceContext context = new DynamicMapReduceContext(program, null, runId, null,
                options.getUserArguments(), spec, logicalStartTime, programNameInWorkflow, workflowToken,
                discoveryServiceClient, metricsCollectionService, txSystemClient, datasetFramework, adapterSpec,
                pluginInstantiator);

        Reflections.visit(mapReduce, TypeToken.of(mapReduce.getClass()),
                new PropertyFieldSetter(context.getSpecification().getProperties()),
                new MetricsFieldSetter(context.getMetrics()), new DataSetFieldSetter(context));

        // note: this sets logging context on the thread level
        LoggingContextAccessor.setLoggingContext(context.getLoggingContext());

        final Service mapReduceRuntimeService = new MapReduceRuntimeService(cConf, hConf, mapReduce, spec,
                context, program.getJarLocation(), locationFactory, streamAdmin, txSystemClient, usageRegistry);
        mapReduceRuntimeService.addListener(
                createRuntimeServiceListener(program, runId, adapterSpec, pluginInstantiator, arguments),
                Threads.SAME_THREAD_EXECUTOR);

        final ProgramController controller = new MapReduceProgramController(mapReduceRuntimeService, context);

        LOG.info("Starting MapReduce Job: {}", context.toString());
        // if security is not enabled, start the job as the user we're using to access hdfs with.
        // if this is not done, the mapred job will be launched as the user that runs the program
        // runner, which is probably the yarn user. This may cause permissions issues if the program
        // tries to access cdap data. For example, writing to a FileSet will fail, as the yarn user will
        // be running the job, but the data directory will be owned by cdap.
        if (!MapReduceContextProvider.isLocal(hConf) && !UserGroupInformation.isSecurityEnabled()) {
            String runAs = cConf.get(Constants.CFG_HDFS_USER);
            try {
                UserGroupInformation.createRemoteUser(runAs)
                        .doAs(new PrivilegedExceptionAction<ListenableFuture<Service.State>>() {
                            @Override
                            public ListenableFuture<Service.State> run() throws Exception {
                                return mapReduceRuntimeService.start();
                            }
                        });
            } catch (Exception e) {
                LOG.error("Exception running mapreduce job as user {}.", runAs, e);
                throw Throwables.propagate(e);
            }
        } else {
            mapReduceRuntimeService.start();
        }
        return controller;
    } catch (Exception e) {
        if (pluginInstantiator != null) {
            Closeables.closeQuietly(pluginInstantiator);
        }
        throw Throwables.propagate(e);
    }
}