Example usage for com.google.common.base Stopwatch createUnstarted

List of usage examples for com.google.common.base Stopwatch createUnstarted

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch createUnstarted.

Prototype

@CheckReturnValue
public static Stopwatch createUnstarted() 

Source Link

Document

Creates (but does not start) a new stopwatch using System#nanoTime as its time source.

Usage

From source file:com.arpnetworking.metrics.mad.performance.FilePerfTestBase.java

/**
 * Runs a filter./*from   w  w  w  .j ava  2 s . co  m*/
 *
 * @param pipelineConfigurationFile Pipeline configuration file.
 * @param duration Timeout period.
 * @param variables Substitution key-value pairs into pipeline configuration file.
 * @throws IOException if configuration cannot be loaded.
 */
protected void benchmark(final String pipelineConfigurationFile, final Duration duration,
        final ImmutableMap<String, String> variables) throws IOException {
    // Replace any variables in the configuration file
    String configuration = Resources.toString(Resources.getResource(pipelineConfigurationFile), Charsets.UTF_8);
    for (final Map.Entry<String, String> entry : variables.entrySet()) {
        configuration = configuration.replace(entry.getKey(), entry.getValue());
    }

    // Load the specified stock configuration
    final PipelineConfiguration stockPipelineConfiguration = new StaticConfiguration.Builder()
            .addSource(new JsonNodeLiteralSource.Builder().setSource(configuration).build())
            .setObjectMapper(PipelineConfiguration.createObjectMapper(_injector)).build()
            .getRequiredAs(PipelineConfiguration.class);

    // Canary tracking
    LOGGER.info(String.format("Expected canaries; periods=%s", stockPipelineConfiguration.getPeriods()));
    final CountDownLatch latch = new CountDownLatch(stockPipelineConfiguration.getPeriods().size());
    final Set<Period> periods = Sets.newConcurrentHashSet();

    // Create custom "canary" sink
    final ListeningSink sink = new ListeningSink((periodicData) -> {
        if (periodicData != null) {
            for (final String metricName : periodicData.getData().keys()) {
                if (TestFileGenerator.CANARY.equals(metricName)) {
                    if (periods.add(periodicData.getPeriod())) {
                        LOGGER.info(String.format("Canary flew; filter=%s, period=%s", this.getClass(),
                                periodicData.getPeriod()));
                        latch.countDown();
                    }
                }
            }
        }
        return null;
    });

    // Add the custom "canary" sink
    final List<Sink> benchmarkSinks = Lists.newArrayList(stockPipelineConfiguration.getSinks());
    benchmarkSinks.add(sink);

    // Create the custom configuration
    final PipelineConfiguration benchmarkPipelineConfiguration = OvalBuilder.<PipelineConfiguration, PipelineConfiguration.Builder>clone(
            stockPipelineConfiguration).setSinks(benchmarkSinks).build();

    // Instantiate the pipeline
    final Pipeline pipeline = new Pipeline(benchmarkPipelineConfiguration);

    // Execute the pipeline until the canary flies the coop
    try {
        LOGGER.debug(String.format("Launching pipeline; configuration=%s", pipelineConfigurationFile));
        final Stopwatch timer = Stopwatch.createUnstarted();
        timer.start();
        pipeline.launch();

        if (!latch.await(duration.getMillis(), TimeUnit.MILLISECONDS)) {
            LOGGER.error("Test timed out");
            throw new RuntimeException("Test timed out");
        }

        timer.stop();
        LOGGER.info(String.format("Performance filter result; filter=%s, seconds=%s", this.getClass(),
                timer.elapsed(TimeUnit.SECONDS)));

    } catch (final InterruptedException e) {
        Thread.interrupted();
        throw new RuntimeException("Test interrupted");
    } finally {
        pipeline.shutdown();
    }
}

From source file:org.jclouds.virtualbox.functions.CreateAndInstallVm.java

@Override
public IMachine apply(MasterSpec masterSpec) {
    VmSpec vmSpec = masterSpec.getVmSpec();
    IsoSpec isoSpec = masterSpec.getIsoSpec();
    String masterName = vmSpec.getVmName();
    IMachine masterMachine = checkNotNull(createAndRegisterMachineFromIsoIfNotAlreadyExists.apply(masterSpec),
            "master machine");
    // Launch machine and wait for it to come online
    machineController.ensureMachineIsLaunched(masterName);
    String installationKeySequence = isoSpec.getInstallationKeySequence().replace("PRECONFIGURATION_URL",
            preconfigurationUrl);/*from   w ww . j  a v a 2s.  c  om*/
    configureOsInstallationWithKeyboardSequence(masterName, installationKeySequence);

    masterMachine.setExtraData(GUEST_OS_USER, masterSpec.getLoginCredentials().getUser());
    masterMachine.setExtraData(GUEST_OS_PASSWORD, masterSpec.getLoginCredentials().getPassword());

    SshClient client = sshClientForIMachine.apply(masterMachine);
    logger.debug(">> awaiting installation to finish node(%s)", masterName);
    Stopwatch stopwatch = Stopwatch.createUnstarted();
    stopwatch.start();
    checkState(sshResponds.apply(client), "timed out waiting for guest %s to be accessible via ssh",
            masterName);
    stopwatch.stop();
    logger.debug(String.format("Elapsed time for the OS installation: %d minutes",
            TimeUnit.SECONDS.convert(stopwatch.elapsed(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS)));
    NodeMetadata nodeMetadata = imachineToNodeMetadata.apply(masterMachine);

    logger.debug(">> awaiting post-installation actions on vm: %s", masterName);
    ListenableFuture<ExecResponse> execCleanup = machineUtils.runScriptOnNode(nodeMetadata,
            call("cleanupUdevIfNeeded"), RunScriptOptions.NONE);
    ExecResponse cleanupResponse = Futures.getUnchecked(execCleanup);
    checkState(cleanupResponse.getExitStatus() == 0, "post-installation actions on vm(%s) failed", masterName);

    logger.debug(">> awaiting installation of guest additions on vm: %s", masterName);
    ListenableFuture<ExecResponse> execInstallGA = machineUtils.runScriptOnNode(nodeMetadata,
            new InstallGuestAdditions(vmSpec, version), RunScriptOptions.NONE);

    logger.debug(">> check installation of guest additions on vm: %s", masterName);
    ListenableFuture<ExecResponse> checkGAinstallation = machineUtils.runScriptOnNode(nodeMetadata,
            call("checkVBoxService"), RunScriptOptions.NONE);
    ExecResponse checkGAinstallationResponse = Futures.getUnchecked(checkGAinstallation);
    checkState(checkGAinstallationResponse.getExitStatus() == 0,
            "check installation of guest additions on vm(%s) " + "failed", masterName);

    machineController.ensureMachineIsShutdown(masterName);

    // detach DVD and ISOs, if needed
    Iterable<IMediumAttachment> mediumAttachments = Iterables.filter(
            masterMachine.getMediumAttachmentsOfController("IDE Controller"),
            new Predicate<IMediumAttachment>() {
                public boolean apply(IMediumAttachment in) {
                    return in.getMedium() != null && in.getMedium().getDeviceType().equals(DeviceType.DVD);
                }
            });
    for (IMediumAttachment iMediumAttachment : mediumAttachments) {
        logger.debug("<< iMedium(%s) detached from (%s)", iMediumAttachment.getMedium().getName(),
                masterMachine.getName());
        machineUtils.sharedLockMachineAndApply(masterMachine.getName(), new DetachDistroMediumFromMachine(
                iMediumAttachment.getController(), iMediumAttachment.getPort(), iMediumAttachment.getDevice()));
    }
    return masterMachine;
}

From source file:org.sonatype.sisu.bl.support.DefaultBundle.java

/**
 * Starts application and waits for it to boot. if successfully started sets the state to running.
 * <p/>/* ww  w.j av  a2s. co m*/
 * {@inheritDoc}
 *
 * @throws Exception if a problem occurred during startup of application, wait period or it could not determine if
 *                   application is started in specified timeout
 * @see Bundle#start()
 */
@Override
public void doStart() {
    bootingTime = Time.millis(0);
    final Stopwatch bootingWatch = Stopwatch.createUnstarted();
    try {
        startApplication();
        running = true;
        getRunningBundles().add(this);
        bootingWatch.start();
        waitForBoot();
    } catch (RuntimeException e) {
        doStop();
        throw e;
    } finally {
        if (bootingWatch.isRunning()) {
            bootingWatch.stop();
        }
        bootingTime = Time.millis(bootingWatch.elapsed(TimeUnit.MILLISECONDS));
    }
}

From source file:io.ecarf.core.cloud.task.processor.reason.phase2.DoReasonTask9.java

@Override
public void run() throws IOException {

    GoogleCloudService cloud = (GoogleCloudService) this.getCloudService();

    Stopwatch stopwatch1 = Stopwatch.createUnstarted();
    Stopwatch stopwatch2 = Stopwatch.createUnstarted();

    this.setup(cloud);

    String decoratedTable = table;
    int emptyRetries = 0;
    int totalInferredTriples = 0;

    int maxRetries;
    if (this.retries == null) {
        maxRetries = Config.getIntegerProperty(Constants.REASON_RETRY_KEY, 6);

    } else {/*from ww  w .j  av a2 s .c  o  m*/
        maxRetries = this.retries;
    }

    int cycleSleep;
    if (this.sleep == null) {
        cycleSleep = Config.getIntegerProperty(Constants.REASON_SLEEP_KEY, 20);
    } else {

        cycleSleep = this.sleep;
    }

    this.ddLimit = Config.getIntegerProperty(Constants.REASON_DATA_DIRECT_DOWNLOAD_LIMIT, 1_200_000);
    int streamingThreshold = Config.getIntegerProperty("ecarf.io.reasoning.streaming.threshold", 100000);
    String instanceId = cloud.getInstanceId();

    int processors = Runtime.getRuntime().availableProcessors();

    if (processors > 1) {
        this.executor = Utils.createFixedThreadPool(processors);
    }

    int count = 0;

    QueryGenerator<Long> generator = new QueryGenerator<Long>(schemaTerms, null);

    // timestamp loop
    do {

        // First of all run all the queries asynchronously and remember the jobId and filename for each term
        generator.setDecoratedTable(decoratedTable);

        String query = generator.getQuery();
        log.debug("Generated Query: " + query);

        String queryResultFilePrefix = instanceId + "_QueryResults_" + count;

        String jobId = cloud.startBigDataQuery(query, new BigDataTable(this.table));
        //QueryResult   queryResult = QueryResult.create().setFilename(queryResultFilePrefix).setJobId(jobId);

        long start = System.currentTimeMillis();

        // block and wait for each job to complete then save results to a file
        QueryStats stats = cloud.saveBigQueryResultsToFile(jobId, queryResultFilePrefix, this.bucket,
                processors, this.ddLimit);

        BigInteger rows = stats.getTotalRows();

        this.totalBytes = this.totalBytes + stats.getTotalProcessedBytes();

        Set<Long> productiveTerms = new HashSet<>();
        Set<String> inferredTriplesFiles = new HashSet<>();
        int interimInferredTriples = 0;

        // only process if triples are found matching this term
        if ((rows != null) && !BigInteger.ZERO.equals(rows)) {

            stopwatch1.start();

            interimInferredTriples = this.inferAndSaveTriplesToFile(stats, productiveTerms, processors,
                    inferredTriplesFiles);

            this.totalRows = this.totalRows.add(rows);

            stopwatch1.stop();

        } else {
            log.info("Skipping query as no data is found");
        }

        totalInferredTriples += interimInferredTriples;

        if (interimInferredTriples > 0) {

            // stream smaller numbers of inferred triples
            // try uploading from cloud storage

            log.info("Inserting " + interimInferredTriples + ", inferred triples into Big Data table for "
                    + productiveTerms.size() + " productive terms. Filename: " + inferredTriplesFiles);

            if (interimInferredTriples <= streamingThreshold) {
                // stream the data

                Set<Triple> inferredTriples = new HashSet<>();
                for (String inferredTriplesFile : inferredTriplesFiles) {
                    TripleUtils.loadCompressedCSVTriples(inferredTriplesFile, true, inferredTriples);
                }

                log.info("Total triples to stream into Big Data: " + inferredTriples.size());
                cloud.streamObjectsIntoBigData(inferredTriples,
                        TableUtils.getBigQueryEncodedTripleTable(table));

                log.info("All inferred triples are streamed into Big Data table");

            } else {

                List<String> cloudStorageFiles = new ArrayList<>();
                // load the data through cloud storage
                // upload the file to cloud storage
                for (String inferredTriplesFile : inferredTriplesFiles) {
                    log.info("Uploading inferred triples file into cloud storage: " + inferredTriplesFile);
                    StorageObject file = cloud.uploadFileToCloudStorage(inferredTriplesFile, bucket);
                    log.info("File " + file + ", uploaded successfully. Now loading it into big data.");
                    cloudStorageFiles.add(file.getUri());
                }

                jobId = cloud.loadCloudStorageFilesIntoBigData(cloudStorageFiles,
                        TableUtils.getBigQueryEncodedTripleTable(table), false);

                log.info(
                        "All inferred triples are loaded into Big Data table through cloud storage, completed jobId: "
                                + jobId);

            }

            // reset empty retries
            emptyRetries = 0;

            stopwatch2.reset();

        } else {
            log.info("No new inferred triples");
            // increment empty retries
            emptyRetries++;

            if (!stopwatch2.isRunning()) {
                stopwatch2.start();
            }
        }

        log.info("Total inferred triples so far = " + totalInferredTriples + ", current retry count: "
                + emptyRetries);

        if (emptyRetries < maxRetries) {
            ApiUtils.block(cycleSleep);

            // FIXME move into the particular cloud implementation service
            long elapsed = System.currentTimeMillis() - start;
            decoratedTable = "[" + table + "@-" + elapsed + "-]";

            log.info("Using table decorator: " + decoratedTable + ". Empty retries count: " + emptyRetries);
        }

        count++;

    } while (emptyRetries < maxRetries); // end timestamp loop

    executor.shutdown();
    log.info("Finished reasoning, total inferred triples = " + totalInferredTriples);
    //log.info("Number of avoided duplicate terms = " + this.duplicates);
    log.info("Total rows retrieved from big data = " + this.totalRows);
    log.info("Total processed GBytes = " + ((double) this.totalBytes / FileUtils.ONE_GB));
    log.info("Total process reasoning time (serialization in inf file) = " + stopwatch1);
    log.info("Total time spent in empty inference cycles = " + stopwatch2);
}

From source file:es.usc.citius.composit.core.composition.optimization.FunctionalDominanceOptimizer.java

public ServiceMatchNetwork<E, T> optimize(ServiceMatchNetwork<E, T> network) {
    // Analyze functional dominance between services. This optimization
    // identifies all dominant services using the semantic inputs and outputs
    // and the existing matches between the concepts in the graph.
    Stopwatch globalWatch = Stopwatch.createStarted();
    Stopwatch localWatch = Stopwatch.createUnstarted();
    List<Set<Operation<E>>> optimized = new ArrayList<Set<Operation<E>>>(network.numberOfLevels());
    log.debug("Starting functional dominance optimization...");
    for (int i = 0; i < network.numberOfLevels(); i++) {
        // Analyze input dominance
        log.debug(" > Analyzing functional dominance on {} (network level {})", network.getOperationsAtLevel(i),
                i);//  w ww.  ja  va2s  .  com
        localWatch.start();
        Collection<Collection<Operation<E>>> groups = functionalInputEquivalence(network, i);
        localWatch.stop();
        log.debug("\t\tInput equivalence groups: {} (computed in {})", groups, localWatch.toString());
        localWatch.reset();
        // For each equivalent group in this level, check the output dominance
        Set<Operation<E>> nonDominatedServices = new HashSet<Operation<E>>();
        for (Collection<Operation<E>> group : groups) {
            log.debug("\t\tAnalyzing output dominance for group {}", group);
            localWatch.start();
            Collection<Collection<Operation<E>>> nonDominatedGroups = functionalOutputDominance(group, network,
                    i);
            localWatch.stop();
            log.debug("\t\t\t+ Non-dominated groups detected: {} (computed in {})", nonDominatedGroups,
                    localWatch.toString());
            log.debug("\t\t\t+ Size before / after output dominance {}/{}", group.size(),
                    nonDominatedGroups.size());
            // Pick one non dominated service for each group randomly.
            for (Collection<Operation<E>> ndGroup : nonDominatedGroups) {
                Operation<E> representant = ndGroup.iterator().next();
                log.debug("\t\t\t\t- {} has been selected as the representative service of the group {}",
                        representant, ndGroup);
                nonDominatedServices.add(representant);
            }
        }
        optimized.add(nonDominatedServices);
    }
    localWatch.reset().start();
    DirectedAcyclicSMN<E, T> optimizedNetwork = new DirectedAcyclicSMN<E, T>(
            new HashLeveledServices<E>(optimized), network);
    localWatch.stop();
    log.debug(" > Functional optimized match network computed in {}", localWatch.toString());
    log.debug("Functional Dominance Optimization done in {}. Size before/after {}/{}.",
            globalWatch.stop().toString(), network.listOperations().size(),
            optimizedNetwork.listOperations().size());
    return optimizedNetwork;
}

From source file:org.opendaylight.protocol.bgp.rib.impl.stats.peer.BGPSessionStatsImpl.java

public BGPSessionStatsImpl(@Nonnull final BGPSessionImpl session, @Nonnull final Open remoteOpen,
        final int holdTimerValue, final int keepAlive, @Nonnull final Channel channel,
        @Nonnull final Optional<BGPSessionPreferences> localPreferences,
        @Nonnull final Collection<BgpTableType> tableTypes, @Nonnull final List<AddressFamilies> addPathTypes) {
    this.session = session;
    this.sessionStopwatch = Stopwatch.createUnstarted();
    this.stats = new BgpSessionState();
    this.stats.setHoldtimeCurrent(holdTimerValue);
    this.stats.setKeepaliveCurrent(keepAlive);
    this.stats.setLocalPeerPreferences(setLocalPeerPref(remoteOpen, channel, tableTypes, addPathTypes));
    this.stats.setRemotePeerPreferences(setRemotePeerPref(channel, localPreferences));
    this.errMsgs.setErrorReceivedTotal(errMsgsRecvTotal);
    this.errMsgs.setErrorSentTotal(errMsgsSentTotal);
    this.errMsgs.setErrorReceived(new ArrayList<>());
    this.errMsgs.setErrorSent(new ArrayList<>());
    initMsgs();//from   ww w  . jav  a 2 s.co m
}

From source file:com.facebook.presto.operator.HttpPageBufferClient.java

public HttpPageBufferClient(HttpClient httpClient, DataSize maxResponseSize, Duration minErrorDuration,
        URI location, ClientCallback clientCallback, BlockEncodingSerde blockEncodingSerde,
        ScheduledExecutorService executor) {
    this(httpClient, maxResponseSize, minErrorDuration, location, clientCallback, blockEncodingSerde, executor,
            Stopwatch.createUnstarted());
}

From source file:org.opendaylight.controller.netconf.persist.impl.ConfigPusherImpl.java

/**
 * First calls {@link #getOperationServiceWithRetries(java.util.Set, String)} in order to wait until
 * expected capabilities are present, then tries to push configuration. If {@link ConflictingVersionException}
 * is caught, whole process is retried - new service instance need to be obtained from the factory. Closes
 * {@link NetconfOperationService} after each use.
 *//*  w  w w.  j  a  v a 2  s .  c  o m*/
private synchronized EditAndCommitResponse pushConfigWithConflictingVersionRetries(
        ConfigSnapshotHolder configSnapshotHolder) throws NetconfDocumentedException {
    ConflictingVersionException lastException;
    Stopwatch stopwatch = Stopwatch.createUnstarted();
    do {
        String idForReporting = configSnapshotHolder.toString();
        SortedSet<String> expectedCapabilities = checkNotNull(configSnapshotHolder.getCapabilities(),
                "Expected capabilities must not be null - %s, check %s", idForReporting,
                configSnapshotHolder.getClass().getName());
        try (NetconfOperationService operationService = getOperationServiceWithRetries(expectedCapabilities,
                idForReporting)) {
            if (!stopwatch.isRunning()) {
                stopwatch.start();
            }
            return pushConfig(configSnapshotHolder, operationService);
        } catch (ConflictingVersionException e) {
            lastException = e;
            LOG.info("Conflicting version detected, will retry after timeout");
            sleep();
        }
    } while (stopwatch.elapsed(TimeUnit.MILLISECONDS) < conflictingVersionTimeoutMillis);
    throw new IllegalStateException("Max wait for conflicting version stabilization timeout after "
            + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms", lastException);
}

From source file:org.ow2.proactive.scheduler.task.TaskLauncher.java

public void doTask(ExecutableContainer executableContainer, TaskResult[] previousTasksResults,
        TaskTerminateNotification terminateNotification) {
    logger.info("Task started " + taskId.getJobId().getReadableName() + " : " + taskId.getReadableName());

    this.taskKiller = this.replaceTaskKillerWithDoubleTimeoutValueIfRunAsMe(executableContainer.isRunAsUser());

    WallTimer wallTimer = new WallTimer(initializer.getWalltime(), taskKiller);

    Stopwatch taskStopwatchForFailures = Stopwatch.createUnstarted();

    TaskResultImpl taskResult;/*w w w . ja  v  a 2  s  . c om*/

    TaskDataspaces dataspaces = null;

    try {
        addShutdownHook();
        // lock the cache space cleaning mechanism
        DataSpaceNodeConfigurationAgent.lockCacheSpaceCleaning();
        dataspaces = factory.createTaskDataspaces(taskId, initializer.getNamingService(),
                executableContainer.isRunAsUser());

        File taskLogFile = taskLogger.createFileAppender(dataspaces.getScratchFolder());

        progressFileReader.start(dataspaces.getScratchFolder(), taskId);

        TaskContext context = new TaskContext(executableContainer, initializer, previousTasksResults,
                new NodeDataSpacesURIs(dataspaces.getScratchURI(), dataspaces.getCacheURI(),
                        dataspaces.getInputURI(), dataspaces.getOutputURI(), dataspaces.getUserURI(),
                        dataspaces.getGlobalURI()),
                progressFileReader.getProgressFile().toString(), getHostname(), decrypter);

        File workingDir = getTaskWorkingDir(context, dataspaces);

        logger.info("Task working dir: " + workingDir);
        logger.info("Cache space: " + context.getNodeDataSpaceURIs().getCacheURI());
        logger.info("Input space: " + context.getNodeDataSpaceURIs().getInputURI());
        logger.info("Output space: " + context.getNodeDataSpaceURIs().getOutputURI());
        logger.info("User space: " + context.getNodeDataSpaceURIs().getUserURI());
        logger.info("Global space: " + context.getNodeDataSpaceURIs().getGlobalURI());
        logger.info("Scheduler rest url: " + context.getSchedulerRestUrl());

        wallTimer.start();

        dataspaces.copyInputDataToScratch(initializer.getFilteredInputFiles(fileSelectorsFilters(context))); // should handle interrupt

        if (decrypter != null) {
            decrypter.setCredentials(executableContainer.getCredentials());
        }

        TaskExecutor taskExecutor = factory.createTaskExecutor(workingDir);

        taskStopwatchForFailures.start();
        taskResult = taskExecutor.execute(context, taskLogger.getOutputSink(), taskLogger.getErrorSink());
        taskStopwatchForFailures.stop();

        switch (taskKiller.getStatus()) {
        case WALLTIME_REACHED:
            taskResult = getWalltimedTaskResult(taskStopwatchForFailures);
            sendResultToScheduler(terminateNotification, taskResult);
            return;
        case KILLED_MANUALLY:
            // killed by Scheduler, no need to send results back
            return;
        }

        dataspaces.copyScratchDataToOutput(
                initializer.getFilteredOutputFiles(fileSelectorsFilters(context, taskResult)));

        wallTimer.stop();

        copyTaskLogsToUserSpace(taskLogFile, dataspaces);
        taskResult.setLogs(taskLogger.getLogs());

        sendResultToScheduler(terminateNotification, taskResult);
    } catch (Throwable taskFailure) {
        wallTimer.stop();

        switch (taskKiller.getStatus()) {
        case WALLTIME_REACHED:
            taskResult = getWalltimedTaskResult(taskStopwatchForFailures);
            sendResultToScheduler(terminateNotification, taskResult);
            break;
        case KILLED_MANUALLY:
            // killed by Scheduler, no need to send results back
            return;
        default:
            logger.info("Failed to execute task", taskFailure);
            taskFailure.printStackTrace(taskLogger.getErrorSink());
            taskResult = new TaskResultImpl(taskId, taskFailure, taskLogger.getLogs(),
                    taskStopwatchForFailures.elapsed(TimeUnit.MILLISECONDS));
            sendResultToScheduler(terminateNotification, taskResult);
        }
    } finally {
        try {
            progressFileReader.stop();
            taskLogger.close();

            if (dataspaces != null) {
                dataspaces.close();
            }
            // unlocks the cache space cleaning thread
            DataSpaceNodeConfigurationAgent.unlockCacheSpaceCleaning();
            removeShutdownHook();
        } finally {
            terminate();
        }
    }
}

From source file:org.ow2.proactive.scheduler.task.executors.InProcessTaskExecutor.java

/**
 * Executes a task inside a task context.
 *
 * @param taskContext Task context to execute.
 * @param output      Standard output sink.
 * @param error       Error sink./*from ww  w  . ja  v a2  s.  c o  m*/
 * @return Returns the task result.
 */
@Override
public TaskResultImpl execute(TaskContext taskContext, PrintStream output, PrintStream error) {
    ScriptHandler scriptHandler = ScriptLoader.createLocalHandler();
    String nodesFile = null;
    SchedulerNodeClient schedulerNodeClient = null;
    RemoteSpace userSpaceClient = null;
    RemoteSpace globalSpaceClient = null;
    try {
        nodesFile = writeNodesFile(taskContext);
        VariablesMap variables = new VariablesMap();
        variables.setInheritedMap(taskContextVariableExtractor.extractVariables(taskContext, nodesFile, false));
        variables.setScopeMap(taskContextVariableExtractor.extractScopeVariables(taskContext));
        Map<String, String> resultMetadata = new HashMap<>();
        Map<String, String> thirdPartyCredentials = forkedTaskVariablesManager
                .extractThirdPartyCredentials(taskContext);
        schedulerNodeClient = forkedTaskVariablesManager.createSchedulerNodeClient(taskContext);
        userSpaceClient = forkedTaskVariablesManager.createDataSpaceNodeClient(taskContext, schedulerNodeClient,
                IDataSpaceClient.Dataspace.USER);
        globalSpaceClient = forkedTaskVariablesManager.createDataSpaceNodeClient(taskContext,
                schedulerNodeClient, IDataSpaceClient.Dataspace.GLOBAL);

        forkedTaskVariablesManager.addBindingsToScriptHandler(scriptHandler, taskContext, variables,
                thirdPartyCredentials, schedulerNodeClient, userSpaceClient, globalSpaceClient, resultMetadata);

        Stopwatch stopwatch = Stopwatch.createUnstarted();
        TaskResultImpl taskResult;
        try {
            stopwatch.start();
            Serializable result = execute(taskContext, output, error, scriptHandler, thirdPartyCredentials,
                    variables);
            stopwatch.stop();
            taskResult = new TaskResultImpl(taskContext.getTaskId(), result, null,
                    stopwatch.elapsed(TimeUnit.MILLISECONDS));
        } catch (Throwable e) {
            stopwatch.stop();
            e.printStackTrace(error);
            taskResult = new TaskResultImpl(taskContext.getTaskId(), e, null,
                    stopwatch.elapsed(TimeUnit.MILLISECONDS));
        }

        executeFlowScript(taskContext.getControlFlowScript(), scriptHandler, output, error, taskResult);

        taskResult.setPropagatedVariables(
                SerializationUtil.serializeVariableMap(variables.getPropagatedVariables()));
        taskResult.setMetadata(resultMetadata);

        return taskResult;
    } catch (Throwable e) {
        e.printStackTrace(error);
        return new TaskResultImpl(taskContext.getTaskId(), e);
    } finally {
        if (nodesFile != null && !nodesFile.isEmpty()) {
            FileUtils.deleteQuietly(new File(nodesFile));
        }
    }
}