Example usage for java.util.concurrent Future isDone

List of usage examples for java.util.concurrent Future isDone

Introduction

In this page you can find the example usage for java.util.concurrent Future isDone.

Prototype

boolean isDone();

Source Link

Document

Returns true if this task completed.

Usage

From source file:org.apache.asterix.external.feed.test.InputHandlerTest.java

@Test
public void testMemoryVarSizeFrameWithSpillWithDiscard() {
    try {/*from   w  w w .ja va  2 s. com*/
        int numberOfMemoryFrames = 50;
        int numberOfSpillFrames = 50;
        int notDiscarded = 0;
        int totalMinFrames = 0;
        IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE);
        // Spill budget = Memory budget, No discard
        FeedPolicyAccessor fpa = createFeedPolicyAccessor(true, true, DEFAULT_FRAME_SIZE * numberOfSpillFrames,
                DISCARD_ALLOWANCE);
        // Non-Active Writer
        TestControlledFrameWriter writer = FrameWriterTestUtils.create(DEFAULT_FRAME_SIZE, false);
        writer.freeze();
        // FramePool
        ConcurrentFramePool framePool = new ConcurrentFramePool(NODE_ID,
                numberOfMemoryFrames * DEFAULT_FRAME_SIZE, DEFAULT_FRAME_SIZE);
        FeedRuntimeInputHandler handler = createInputHandler(ctx, writer, fpa, framePool);
        handler.open();
        ByteBuffer buffer1 = ByteBuffer.allocate(DEFAULT_FRAME_SIZE);
        ByteBuffer buffer2 = ByteBuffer.allocate(DEFAULT_FRAME_SIZE * 2);
        ByteBuffer buffer3 = ByteBuffer.allocate(DEFAULT_FRAME_SIZE * 3);
        ByteBuffer buffer4 = ByteBuffer.allocate(DEFAULT_FRAME_SIZE * 4);
        ByteBuffer buffer5 = ByteBuffer.allocate(DEFAULT_FRAME_SIZE * 5);
        while (true) {
            if (totalMinFrames + 1 < numberOfMemoryFrames) {
                handler.nextFrame(buffer1);
                notDiscarded++;
                totalMinFrames++;
            } else {
                break;
            }
            if (totalMinFrames + 2 < numberOfMemoryFrames) {
                notDiscarded++;
                totalMinFrames += 2;
                handler.nextFrame(buffer2);
            } else {
                break;
            }
            if (totalMinFrames + 3 < numberOfMemoryFrames) {
                notDiscarded++;
                totalMinFrames += 3;
                handler.nextFrame(buffer3);
            } else {
                break;
            }
        }
        // Now we need to verify that the frame pool memory has been consumed!
        Assert.assertTrue(framePool.remaining() < 3);
        Assert.assertEquals(0, handler.getNumSpilled());
        Assert.assertEquals(0, handler.getNumStalled());
        Assert.assertEquals(0, handler.getNumDiscarded());
        while (true) {
            if (handler.getNumSpilled() < numberOfSpillFrames) {
                notDiscarded++;
                handler.nextFrame(buffer3);
            } else {
                break;
            }
            if (handler.getNumSpilled() < numberOfSpillFrames) {
                notDiscarded++;
                handler.nextFrame(buffer4);
            } else {
                break;
            }
            if (handler.getNumSpilled() < numberOfSpillFrames) {
                notDiscarded++;
                handler.nextFrame(buffer5);
            } else {
                break;
            }
        }
        Assert.assertTrue(framePool.remaining() < 3);
        Assert.assertEquals(handler.framesOnDisk(), handler.getNumSpilled());
        Assert.assertEquals(handler.framesOnDisk(), numberOfSpillFrames);
        Assert.assertEquals(0, handler.getNumStalled());
        Assert.assertEquals(0, handler.getNumDiscarded());
        // We can only discard one frame
        double numDiscarded = 0;
        boolean nextShouldDiscard = ((numDiscarded + 1.0) / (handler.getTotal() + 1.0)) <= fpa
                .getMaxFractionDiscard();
        while (nextShouldDiscard) {
            handler.nextFrame(buffer5);
            numDiscarded++;
            nextShouldDiscard = ((numDiscarded + 1.0) / (handler.getTotal() + 1.0)) <= fpa
                    .getMaxFractionDiscard();
        }
        Assert.assertTrue(framePool.remaining() < 3);
        Assert.assertEquals(handler.framesOnDisk(), handler.getNumSpilled());
        Assert.assertEquals(0, handler.getNumStalled());
        Assert.assertEquals((int) numDiscarded, handler.getNumDiscarded());
        // Next Call should block since we're exceeding the discard allowance
        Future<?> result = EXECUTOR.submit(new Pusher(buffer5, handler));
        if (result.isDone()) {
            Assert.fail("The producer should switch to stall mode since it is exceeding the discard allowance");
        }
        // consume memory frames
        writer.unfreeze();
        result.get();
        handler.close();
        Assert.assertEquals(writer.nextFrameCount(), notDiscarded + 1);
    } catch (Throwable th) {
        th.printStackTrace();
        Assert.fail();
    }
    Assert.assertNull(cause);
}

From source file:org.apache.asterix.external.feed.test.InputHandlerTest.java

@Test
public void testMemoryFixedSizeFrameNoDiskNoDiscard() {
    try {/*w ww.  ja  va 2s.  c o  m*/
        IHyracksTaskContext ctx = TestUtils.create(DEFAULT_FRAME_SIZE);
        // No spill, No discard
        FeedPolicyAccessor fpa = createFeedPolicyAccessor(false, false, 0L, DISCARD_ALLOWANCE);
        // Non-Active Writer
        TestControlledFrameWriter writer = FrameWriterTestUtils.create(DEFAULT_FRAME_SIZE, false);
        writer.freeze();
        // FramePool
        ConcurrentFramePool framePool = new ConcurrentFramePool(NODE_ID, FEED_MEM_BUDGET, DEFAULT_FRAME_SIZE);

        FeedRuntimeInputHandler handler = createInputHandler(ctx, writer, fpa, framePool);
        handler.open();
        VSizeFrame frame = new VSizeFrame(ctx);
        // add NUM_FRAMES times
        for (int i = 0; i < NUM_FRAMES; i++) {
            handler.nextFrame(frame.getBuffer());
        }
        // Next call should block we will do it in a different thread
        Future<?> result = EXECUTOR.submit(new Pusher(frame.getBuffer(), handler));
        // Check that the nextFrame didn't return
        if (result.isDone()) {
            Assert.fail();
        } else {
            // Check that no records were discarded
            Assert.assertEquals(handler.getNumDiscarded(), 0);
            // Check that no records were spilled
            Assert.assertEquals(handler.getNumSpilled(), 0);
            // Check that no records were discarded
            // Check that the inputHandler subscribed to the framePool
            // Check that number of stalled is not greater than 1
            Assert.assertTrue(handler.getNumStalled() <= 1);
            writer.kick();
        }
        result.get();
        writer.unfreeze();
        handler.close();
    } catch (Throwable th) {
        th.printStackTrace();
        Assert.fail();
    }
    Assert.assertNull(cause);
}

From source file:net.sibcolombia.sibsp.service.portal.implementation.ResourceManagerImplementation.java

/**
 * Checks if a resource is locked due some background processing.
 * While doing so it checks the known futures for completion.
 * If completed the resource is updated with the status messages and the lock is removed.
 */// w  ww.  jav  a2  s  .  c o m
public boolean isLocked(String shortname) {
    if (processFutures.containsKey(shortname)) {
        // is listed as locked but task might be finished, check
        Future<Integer> f = processFutures.get(shortname);
        if (f.isDone()) {
            try {
                Integer coreRecords = f.get();
                Resource res = get(shortname);
                res.setRecordsPublished(coreRecords);
                save(res);
                return false;
            } catch (InterruptedException e) {
                log.info("Process interrupted for resource " + shortname);
            } catch (CancellationException e) {
                log.info("Process canceled for resource " + shortname);
            } catch (ExecutionException e) {
                log.error("Process for resource " + shortname + " aborted due to error: " + e.getMessage());
            } finally {
                processFutures.remove(shortname);
            }
        }
        return true;
    }
    return false;
}

From source file:org.wso2.siddhi.extension.input.transport.kafka.KafkaSourceTestCase.java

@Test
public void testAKafkaPauseAndResume() throws InterruptedException {
    try {/*from  ww w. j  a  v a 2  s.c o  m*/
        log.info("Test to verify the pause and resume functionality of Kafka source");
        String topics[] = new String[] { "kafka_topic3" };
        createTopic(topics, 2);
        SiddhiManager siddhiManager = new SiddhiManager();
        siddhiManager.setExtension("source.mapper:text", TextSourceMapper.class);
        ExecutionPlanRuntime executionPlanRuntime = siddhiManager
                .createExecutionPlanRuntime("@Plan:name('TestExecutionPlan') "
                        + "define stream BarStream (symbol string, price float, volume long); "
                        + "@info(name = 'query1') "
                        + "@source(type='kafka', topic='kafka_topic3', group.id='test1', threading"
                        + ".option='partition.wise', "
                        + "bootstrap.servers='localhost:9092', partition.no.list='0,1', " + "@map(type='text'))"
                        + "Define stream FooStream (symbol string, price float, volume long);"
                        + "from FooStream select symbol, price, volume insert into BarStream;");
        executionPlanRuntime.addCallback("BarStream", new StreamCallback() {
            @Override
            public void receive(Event[] events) {
                for (Event event : events) {
                    System.out.println(event);
                    eventArrived = true;
                    count++;
                }

            }
        });
        executionPlanRuntime.start();
        Future eventSender = executorService.submit(new Runnable() {
            @Override
            public void run() {
                kafkaPublisher(topics, 2, 4);
            }
        });
        while (!eventSender.isDone()) {
            Thread.sleep(1000);
        }
        Thread.sleep(2000);
        assertEquals(4, count);
        assertTrue(eventArrived);

        Collection<List<Source>> sources = executionPlanRuntime.getSources();
        // pause the transports
        sources.forEach(e -> e.forEach(Source::pause));

        init2();
        eventSender = executorService.submit(new Runnable() {
            @Override
            public void run() {
                kafkaPublisher(topics, 2, 4);
            }
        });
        while (!eventSender.isDone()) {
            Thread.sleep(1000);
        }
        Thread.sleep(5000);
        assertFalse(eventArrived);

        // resume the transports
        sources.forEach(e -> e.forEach(Source::resume));
        Thread.sleep(2000);
        assertEquals(4, count);
        assertTrue(eventArrived);

        executionPlanRuntime.shutdown();
    } catch (ZkTimeoutException ex) {
        log.warn("No zookeeper may not be available.", ex);
    }
}

From source file:org.finra.dm.service.impl.JobServiceImpl.java

@Override
public Job createAndStartJob(JobCreateRequest request, boolean isAsync) throws Exception {
    // Perform the validation.
    validateJobCreateRequest(request);//from w w  w .  java 2  s .  c  om

    // Get the namespace and ensure it exists.
    NamespaceEntity namespaceEntity = dmDaoHelper.getNamespaceEntity(request.getNamespace());

    // Get the job definition and ensure it exists.
    JobDefinitionEntity jobDefinitionEntity = dmDao.getJobDefinitionByAltKey(request.getNamespace(),
            request.getJobName());
    if (jobDefinitionEntity == null) {
        throw new ObjectNotFoundException("Job definition with name \"" + request.getJobName()
                + "\" doesn't exist for namespace \"" + request.getNamespace() + "\".");
    }

    // Build the parameters map
    Map<String, Object> mergedParameters = getParameters(jobDefinitionEntity, request);

    // Create a process instance holder to check for a handle to the process instance once it is created.
    ProcessInstanceHolder processInstanceHolder = new ProcessInstanceHolder();
    ProcessInstance processInstance = null;

    if (isAsync) {
        // Create and start the job asynchronously.
        Future<Void> future = activitiProcessInstanceCreator.createAndStartProcessInstanceAsync(
                jobDefinitionEntity.getActivitiId(), mergedParameters, processInstanceHolder);

        // Keep looping until a process instance was created (although not necessarily started) or until the the job has been created and started via
        // the async method.
        while ((!future.isDone()) && (processInstance == null)) {
            // Try to get the process instance from the holder. It should be available once it is created, but before it is started.
            processInstance = processInstanceHolder.getProcessInstance();

            // If we don't have a process instance yet, sleep for a short time to give the async method time to move forward.
            if (processInstance == null) {
                Thread.sleep(100);
            }
        }

        // Try to get the process instance from the holder one last time in case the job future is done and we didn't get it after the sleep above.
        processInstance = processInstanceHolder.getProcessInstance();

        // Cause an exception to be thrown assuming an exception caused the future to complete, but the process instance to be null.
        try {
            future.get();
        } catch (ExecutionException e) {
            /*
             * Throwing a illegal argument exception here since we have no idea what the cause of this exception is.
             * We can try adding custom handling for each type of known types of exception, but that would become quickly unmaintainable.
             * For now we will assume the user can do something about it.
             */
            throw new IllegalArgumentException("Error executing job. See cause for details.", e);
        }

        // If we don't have a process instance, this should mean that we weren't able to create the job and calling get should thrown the exception
        // to the caller as to why we couldn't.
        if (processInstance == null) {
            // If we get here, that means the future completed, but nobody populated the process instance which shouldn't happen. Just throw
            // and exception so we're aware of the problem.
            throw new IllegalStateException(
                    "Unable to create process instance for unknown reason for job definition \""
                            + jobDefinitionEntity.getName() + "\" and Activiti Id \""
                            + jobDefinitionEntity.getActivitiId() + "\".");
        }
    } else {
        activitiProcessInstanceCreator.createAndStartProcessInstanceSync(jobDefinitionEntity.getActivitiId(),
                mergedParameters, processInstanceHolder);
        processInstance = processInstanceHolder.getProcessInstance();
    }

    // If we get here, we have a newly created process instance. Log to know it was created successfully.
    LOGGER.info("Created process instance with Id: " + processInstance.getProcessInstanceId()
            + " for process definition Id: " + jobDefinitionEntity.getActivitiId() + " with merged parameters: "
            + mergedParameters);

    // Create and return the job object.
    return createJobFromRequest(namespaceEntity.getCode(), jobDefinitionEntity.getName(), mergedParameters,
            processInstance.getProcessInstanceId());
}

From source file:org.apache.hadoop.contrib.bkjournal.TestBookKeeperJournalManager.java

/**
 * Tests that concurrent calls to format will still allow one to succeed.
 *//*from ww  w  .  j a v  a 2s.co m*/
@Test
public void testConcurrentFormat() throws Exception {
    final URI uri = BKJMUtil.createJournalURI("/hdfsjournal-concurrentformat");
    final NamespaceInfo nsi = newNSInfo();

    // populate with data first
    BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri, nsi);
    bkjm.format(nsi);
    for (int i = 1; i < 100 * 2; i += 2) {
        bkjm.startLogSegment(i, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
        bkjm.finalizeLogSegment(i, i + 1);
    }
    bkjm.close();

    final int numThreads = 40;
    List<Callable<ThreadStatus>> threads = new ArrayList<Callable<ThreadStatus>>();
    final CyclicBarrier barrier = new CyclicBarrier(numThreads);

    for (int i = 0; i < numThreads; i++) {
        threads.add(new Callable<ThreadStatus>() {
            public ThreadStatus call() {
                BookKeeperJournalManager bkjm = null;
                try {
                    bkjm = new BookKeeperJournalManager(conf, uri, nsi);
                    barrier.await();
                    bkjm.format(nsi);
                    return ThreadStatus.COMPLETED;
                } catch (IOException ioe) {
                    LOG.info("Exception formatting ", ioe);
                    return ThreadStatus.GOODEXCEPTION;
                } catch (InterruptedException ie) {
                    LOG.error("Interrupted. Something is broken", ie);
                    Thread.currentThread().interrupt();
                    return ThreadStatus.BADEXCEPTION;
                } catch (Exception e) {
                    LOG.error("Some other bad exception", e);
                    return ThreadStatus.BADEXCEPTION;
                } finally {
                    if (bkjm != null) {
                        try {
                            bkjm.close();
                        } catch (IOException ioe) {
                            LOG.error("Error closing journal manager", ioe);
                        }
                    }
                }
            }
        });
    }
    ExecutorService service = Executors.newFixedThreadPool(numThreads);
    List<Future<ThreadStatus>> statuses = service.invokeAll(threads, 60, TimeUnit.SECONDS);
    int numCompleted = 0;
    for (Future<ThreadStatus> s : statuses) {
        assertTrue(s.isDone());
        assertTrue("Thread threw invalid exception",
                s.get() == ThreadStatus.COMPLETED || s.get() == ThreadStatus.GOODEXCEPTION);
        if (s.get() == ThreadStatus.COMPLETED) {
            numCompleted++;
        }
    }
    LOG.info("Completed " + numCompleted + " formats");
    assertTrue("No thread managed to complete formatting", numCompleted > 0);
}

From source file:org.wso2.siddhi.extension.input.transport.kafka.KafkaSourceTestCase.java

@Test
public void testRecoveryOnFailureOfMultipleNodeWithKafka() throws InterruptedException {
    try {//from   w  w  w .j a va 2 s . c o m
        log.info(
                "Test to verify recovering process of multiple Siddhi nodes on a failure when Kafka is the event"
                        + " source");
        String topics[] = new String[] { "kafka_topic5", "kafka_topic6" };
        createTopic(topics, 1);
        // 1st node
        PersistenceStore persistenceStore = new InMemoryPersistenceStore();
        SiddhiManager siddhiManager1 = new SiddhiManager();
        siddhiManager1.setPersistenceStore(persistenceStore);
        siddhiManager1.setExtension("inputmapper:text", TextSourceMapper.class);

        // 2nd node
        PersistenceStore persistenceStore1 = new InMemoryPersistenceStore();
        SiddhiManager siddhiManager2 = new SiddhiManager();
        siddhiManager2.setPersistenceStore(persistenceStore1);
        siddhiManager2.setExtension("inputmapper:text", TextSourceMapper.class);

        String query1 = "@Plan:name('TestExecutionPlan') "
                + "@sink(type='kafka', topic='kafka_topic6', bootstrap.servers='localhost:9092', partition"
                + ".no='0', " + "@map(type='text'))" + "define stream BarStream (count long); "
                + "@source(type='kafka', topic='kafka_topic5', group.id='test', "
                + "threading.option='topic.wise', bootstrap.servers='localhost:9092', partition.no.list='0', "
                + "@map(type='text'))" + "Define stream FooStream (symbol string, price float, volume long);"
                + "@info(name = 'query1') "
                + "from FooStream select count(symbol) as count insert into BarStream;";

        String query2 = "@Plan:name('TestExecutionPlan') " + "define stream BarStream (count long); "
                + "@source(type='kafka', topic='kafka_topic6', "
                + "threading.option='topic.wise', bootstrap.servers='localhost:9092', partition.no.list='0', "
                + "@map(type='text'))" + "Define stream FooStream (number long);" + "@info(name = 'query1') "
                + "from FooStream select count(number) as count insert into BarStream;";

        ExecutionPlanRuntime executionPlanRuntime1 = siddhiManager1.createExecutionPlanRuntime(query1);
        ExecutionPlanRuntime executionPlanRuntime2 = siddhiManager2.createExecutionPlanRuntime(query2);

        executionPlanRuntime2.addCallback("BarStream", new StreamCallback() {
            @Override
            public void receive(Event[] events) {
                for (Event event : events) {
                    eventArrived = true;
                    System.out.println(event);
                    count = Math.toIntExact((long) event.getData(0));
                }

            }
        });

        // start the execution plan
        executionPlanRuntime1.start();
        executionPlanRuntime2.start();
        // let it initialize
        Thread.sleep(2000);

        // start publishing events to Kafka
        Future eventSender = executorService.submit(new Runnable() {
            @Override
            public void run() {
                kafkaPublisher(new String[] { "kafka_topic5" }, 1, 50, 1000);
            }
        });

        // wait for some time
        Thread.sleep(28000);
        // initiate a checkpointing task
        Future perisistor1 = executionPlanRuntime1.persist();
        Future perisistor2 = executionPlanRuntime2.persist();
        // waits till the checkpointing task is done
        while (!perisistor1.isDone() && !perisistor2.isDone()) {
            Thread.sleep(100);
        }
        // let few more events to be published
        Thread.sleep(5000);
        // initiate a execution plan shutdown - to demonstrate a node failure
        executionPlanRuntime1.shutdown();
        executionPlanRuntime2.shutdown();
        // let few events to be published while the execution plan is down
        Thread.sleep(5000);
        // recreate the execution plan
        executionPlanRuntime1 = siddhiManager1.createExecutionPlanRuntime(query1);
        executionPlanRuntime2 = siddhiManager2.createExecutionPlanRuntime(query2);
        executionPlanRuntime2.addCallback("BarStream", new StreamCallback() {
            @Override
            public void receive(Event[] events) {
                for (Event event : events) {
                    eventArrived = true;
                    System.out.println(event);
                    count = Math.toIntExact((long) event.getData(0));
                }

            }
        });
        // start the execution plan
        executionPlanRuntime1.start();
        executionPlanRuntime2.start();
        // immediately trigger a restore from last revision
        executionPlanRuntime1.restoreLastRevision();
        executionPlanRuntime2.restoreLastRevision();
        Thread.sleep(5000);

        // waits till all the events are published
        while (!eventSender.isDone()) {
            Thread.sleep(2000);
        }

        Thread.sleep(20000);
        assertTrue(eventArrived);
        // assert the count
        assertEquals(50, count);

        executionPlanRuntime1.shutdown();
        executionPlanRuntime2.shutdown();
    } catch (ZkTimeoutException ex) {
        log.warn("No zookeeper may not be available.", ex);
    }
}

From source file:org.ebayopensource.turmeric.runtime.tests.common.sif.tester.ServicePayloadExecutor.java

@SuppressWarnings({ "rawtypes", "unchecked" })
private void invokeAsyncPush(ExecutionScope scope, Service svc, List<Object> outParams) throws Exception {
    Dispatch dispatch = null;//  ww  w . jav  a 2  s  . co  m
    Future future = null;

    try {
        dispatch = svc.createDispatch(operationName);
        GenericAsyncHandler handler = new GenericAsyncHandler<MyMessage>();

        if (useInParams) {
            future = dispatch.invokeAsync(message, handler);
        } else {
            future = dispatch.invokeAsync(null, handler);
        }

        while (!future.isDone()) {
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                e.printStackTrace();
                break;
            }
        }

        if (handler.hasError()) {
            throw (ExecutionException) handler.getError();
        }

        outParams.add(handler.get());

        Response response = handler.getResponse();
        assertNotExpectingException();
        if (assertPayload != null) {
            byte payload[] = (byte[]) response.getContext().get("PAYLOAD");
            assertPayload.assertPayload(scope, svc, payload);
        }
        if (assertResponse != null) {
            assertResponse.assertResponse(scope, svc, new ResponseAssertableResponse(response));
        }
    } finally {

    }
}

From source file:com.baifendian.swordfish.execserver.runner.flow.FlowRunner.java

/**
 * ? DAG: 1. ? start  2. , ??? 3. , ?, ,  END, ,  4 4.
 * , ?, ?, ?,  5 5. ?, ??, ??,  2 6. ?,  SUCCESS <p>
 * END: ??, ; ??/*w  ww .j  a v  a 2 s . c om*/
 */

private FlowStatus runFlow(Graph<String, FlowNode, FlowNodeRelation> dagGraph) {
    // ??, ??
    Semaphore semaphore = new Semaphore(0);

    //  dagGraph ??, ??
    try {
        for (String nodeName : dagGraph.topologicalSort()) {
            ExecutionNode executionNode = flowDao.queryExecutionNode(executionFlow.getId(), nodeName);

            // ?
            if (executionNode != null && executionNode.getStatus().typeIsFinished()) {
                dagGraph.removeVertex(nodeName);
            }
        }
    } catch (Exception e) {
        logger.error("Get topological of graph failed.", e);
        return FlowStatus.FAILED;
    }

    // 
    Collection<String> startVertex = dagGraph.getStartVertex();

    // ??
    for (String nodeName : startVertex) {
        if (!executionNodeMap.containsKey(nodeName)) {
            // ?
            ExecutionNode executionNode = insertExecutionNode(executionFlow, nodeName);

            // 
            executionNodeMap.put(nodeName, executionNode);

            // ??
            submitNodeRunner(dagGraph.getVertex(nodeName), executionNode, semaphore);
        }
    }

    // ?
    FlowStatus status = FlowStatus.SUCCESS;

    // ?, 
    while (!activeNodeRunners.isEmpty()) {
        boolean acquire = false;

        try {
            // , , 
            acquire = semaphore.tryAcquire(calcNodeTimeout(), TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            logger.error(e.getMessage(), e);
        } catch (ExecTimeoutException e) {
            logger.error(e.getMessage(), e);
        }

        // ?, ?
        if (!acquire) {
            clean(true);
            return FlowStatus.FAILED;
        }

        // ?, ?
        boolean done = false;

        while (!done) {
            // ?
            try {
                Thread.sleep(50);
            } catch (InterruptedException e) {
                logger.error(e.getMessage(), e);
            }

            // ??, ??
            for (Map.Entry<NodeRunner, Future<Boolean>> entry : activeNodeRunners.entrySet()) {
                NodeRunner nodeRunner = entry.getKey();
                Future<Boolean> future = entry.getValue();

                // ?
                if (future.isDone()) {
                    // ?
                    done = true;

                    // , 
                    activeNodeRunners.remove(nodeRunner);

                    Boolean value = false;

                    Date now = new Date();

                    try {
                        value = future.get();
                    } catch (CancellationException e) {
                        logger.error("task has been cancel");

                        // ?
                        clean(true);
                        return FlowStatus.KILL;
                    } catch (InterruptedException e) {
                        logger.error(e.getMessage(), e);
                    } catch (ExecutionException e) {
                        logger.error(e.getMessage(), e);
                    }

                    // 
                    if (!value) {
                        // ?, ???
                        ExecutionNode executionNode = executionNodeMap.get(nodeRunner.getNodename());

                        // ,  2, ?? 2 
                        if (executionNode.getAttempt() < maxTryTimes) {
                            executionNode.incAttempt();

                            // ?
                            flowDao.updateExecutionNode(executionNode);

                            // ???
                            submitNodeRunner(dagGraph.getVertex(nodeRunner.getNodename()), executionNode,
                                    semaphore);
                        } else {
                            // ??
                            status = FlowStatus.FAILED;

                            executionNode.setEndTime(now);
                            executionNode.setStatus(status);

                            // ?
                            flowDao.updateExecutionNode(executionNode);

                            if (failurePolicyType == FailurePolicyType.END) {
                                clean(true);
                                return status;
                            }
                        }
                    } else { // ?
                        // ?
                        ExecutionNode executionNode = executionNodeMap.get(nodeRunner.getNodename());

                        executionNode.setEndTime(now);
                        executionNode.setStatus(FlowStatus.SUCCESS);

                        flowDao.updateExecutionNode(executionNode);

                        // ?, ?, ???
                        for (String nodeName : dagGraph.getPostNode(nodeRunner.getNodename())) {
                            if (!executionNodeMap.containsKey(nodeName)
                                    && isPreNodesAllSuccess(dagGraph.getPreNode(nodeName))) {
                                // ?
                                ExecutionNode newExecutionNode = insertExecutionNode(executionFlow, nodeName);

                                // 
                                executionNodeMap.put(nodeName, newExecutionNode);

                                // ??
                                submitNodeRunner(dagGraph.getVertex(nodeName), newExecutionNode, semaphore);
                            }
                        }
                    }

                    break;
                }
            }
        }
    }

    return status;
}

From source file:org.asqatasun.webapp.orchestrator.AsqatasunOrchestratorImpl.java

/**
 * //w w  w  .j a  v  a  2  s .  c om
 * @param auditTimeoutThread
 * @param act
 * @return 
 */
private Audit submitAuditAndLaunch(AuditTimeoutThread auditTimeoutThread, Act act) {
    synchronized (auditTimeoutThread) {
        Future submitedThread = threadPoolTaskExecutor.submit(auditTimeoutThread);
        while (submitedThread != null && !submitedThread.isDone()) {
            try {
                Thread.sleep(500);
            } catch (InterruptedException ex) {
                LOGGER.error("", ex);
            }
            if (auditTimeoutThread.isDurationExceedsDelay()) {
                LOGGER.debug("Audit Duration ExceedsDelay. The audit result "
                        + "is now managed in an asynchronous way.");
                break;
            }
        }
        if (null != auditTimeoutThread.getException()) {
            LOGGER.error("new KrashAuditException()");
            throw new KrashAuditException(auditTimeoutThread.getException());
        }
        return auditTimeoutThread.getAudit();
    }
}