Example usage for java.util.concurrent.locks LockSupport parkNanos

List of usage examples for java.util.concurrent.locks LockSupport parkNanos

Introduction

In this page you can find the example usage for java.util.concurrent.locks LockSupport parkNanos.

Prototype

public static void parkNanos(long nanos) 

Source Link

Document

Disables the current thread for thread scheduling purposes, for up to the specified waiting time, unless the permit is available.

Usage

From source file:metlos.executors.batch.BatchCpuThrottlingExecutor.java

@Override
protected void afterExecute(Runnable r, Throwable t) {
    ThreadUsageRecord rec = getThreadUsageRecord();

    //compute the CPU usage for the execution that just happened

    long startTime = rec.startTime;
    long now = System.nanoTime();
    long initialCpuTime = rec.initialCpuTime;
    long finalCpuTime = threadBean.getCurrentThreadCpuTime();

    long cpuTime = finalCpuTime - initialCpuTime;

    if (cpuTime == 0) {
        //collecting cpu time is not very accurate, so let's wait until
        //we have some data to go with
        return;// www  . ja va2s  .c om
    }

    long duration = now - startTime;

    //now figure out how long to wait so that the overall CPU usage gets into
    //the limit

    //we know what is the allowed usage we must fit into
    float allowedUsage = getMaximumCpuUsage() / getPoolSize();

    //and we know an alternative expression for allowed usage:
    //allowedUsage = cpuTime / (duration + correction);
    //  ||
    //  \/
    //correction = (cpuTime - allowedUsage * duration) / allowedUsage

    long correction = (long) ((cpuTime - allowedUsage * duration) / allowedUsage);

    if (LOG.isTraceEnabled()) {
        //long durationMs = duration / 1000000;
        //long cpuTimeMs = cpuTime / 1000000;
        //long correctionMs = correction / 1000000;
        LOG.trace("Execution correction: tasks duration=" + duration + "ns, initialCpuTime=" + initialCpuTime
                + ", finalCpuTime=" + finalCpuTime + ", cpuTime=" + cpuTime + "ns, correction=" + correction
                + "ns, poolsize=" + getPoolSize() + ", allowedUsage=" + allowedUsage);
    }

    //reset the time collection
    rec.startTime = 0;

    currentlyExecutingTasks.decrementAndGet();

    if (correction > 0) {
        LockSupport.parkNanos(correction);
    }
}

From source file:oz.hadoop.yarn.api.core.DataProcessorImpl.java

/**
 * //from  ww w .ja va 2 s .c o m
 * @param ipRegexFilter
 * @return
 */
private int getIndexOfAvailableDelegate(String ipRegexFilter) {
    int index = -1;
    boolean found = false;
    while (!found && this.active) {
        for (int i = 0; i < this.busyDelegatesFlags.length && !found; i++) {
            found = this.busyDelegatesFlags[i].compareAndSet(false, true);
            if (found) {
                if (isMatch(index, ipRegexFilter)) {
                    index = i;
                } else {
                    found = false;
                    if (this.busyDelegatesFlags[index].compareAndSet(true, false)) {
                        logger.error(
                                "Failed to release 'busyDelegatesFlag'. Should never happen. Concurrency issue; if you see this message, REPORT!");
                        DataProcessorImpl.this.stop();
                        throw new IllegalStateException(
                                "Should never happen. Concurrency issue, if you see this message, REPORT!");
                    }
                }
            }
        }
        LockSupport.parkNanos(10000);
    }
    return index;
}

From source file:com.espertech.esper.filter.FilterServiceBase.java

private void retryableMatchEvent(EventBean theEvent, Collection<FilterHandle> matches) {
    // Install lock backoff exception handler that retries the evaluation.
    try {/*  w  ww.  j a  v a  2 s.co m*/
        eventTypeIndex.matchEvent(theEvent, matches);
    } catch (FilterLockBackoffException ex) {
        // retry on lock back-off
        // lock-backoff may occur when stateful evaluations take place such as boolean expressions that are subqueries
        // statements that contain subqueries in pattern filter expression can themselves modify filters, leading to a theoretically possible deadlock
        long delayNs = 10;
        while (true) {
            try {
                // yield
                try {
                    Thread.sleep(0);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                }

                // delay
                LockSupport.parkNanos(delayNs);
                if (delayNs < 1000000000) {
                    delayNs = delayNs * 2;
                }

                // evaluate
                matches.clear();
                eventTypeIndex.matchEvent(theEvent, matches);
                break;
            } catch (FilterLockBackoffException ex2) {
                // retried
            }
        }
    }
}

From source file:oz.hadoop.yarn.api.net.ApplicationContainerServerImpl.java

/**
 * Will stop this server, closing all the remaining connection to Application Container 
 * clients waiting if necessary. /* w  w  w.ja  v a2  s. c o  m*/
 * Closing such connection will force Application Container clients to stop essentially 
 * stopping and exiting Application Containers, thus stopping the entire application.
 * 
 * @param force
 *       boolean flag indicating if this application should be terminated immediately or 
 *       should it shut down gracefully allowing currently running Application Container 
 *       processes to finish.
 */
@Override
void preStop(boolean force) {
    // Need to make a copy so we can remove entries without affecting the global map so it could be cleaned at the end
    Map<SelectionKey, ContainerDelegate> cDelegates = new HashMap<>(this.containerDelegates);
    boolean working = cDelegates.size() > 0;
    while (working) {
        Iterator<SelectionKey> containerSelectionKeys = cDelegates.keySet().iterator();
        while (containerSelectionKeys.hasNext()) {
            SelectionKey selectionKey = containerSelectionKeys.next();
            ContainerDelegate containerDelegate = cDelegates.get(selectionKey);
            containerDelegate.suspend();
            if (!force) {
                if (containerDelegate.available() || !selectionKey.channel().isOpen()) {
                    containerSelectionKeys.remove();
                }
            } else {
                containerSelectionKeys.remove();
            }
        }
        working = cDelegates.size() > 0;
        if (working) {
            if (logger.isTraceEnabled()) {
                logger.trace("Waiting for remaining " + cDelegates.size() + " containers to finish");
            }
            LockSupport.parkNanos(10000000);
        } else {
            this.containerDelegates.clear();
        }
    }
    for (int i = 0; i < this.expectedClientContainers; i++) {
        this.expectedClientContainersMonitor.countDown();
    }
}

From source file:oz.hadoop.yarn.api.core.ApplicationContainerLauncherEmulatorImpl.java

/**
 * /*w w w . jav  a 2 s  .c  om*/
 */
private void awaitApplicationContainerStart(ApplicationContainer applicationContainer, AtomicBoolean errorFlag)
        throws Exception {
    Field clientField = ReflectionUtils.getFieldAndMakeAccessible(ApplicationContainer.class, "client");
    while (clientField.get(applicationContainer) == null && !errorFlag.get()) {
        LockSupport.parkNanos(1000000);
        if (Thread.currentThread().isInterrupted()) {
            logger.warn("Interrupted while waiting for Application Container to start");
            break;
        }
    }
}

From source file:oz.hadoop.yarn.api.core.LocalApplicationLaunchTests.java

@Test(timeout = 2000)
public void validateJavaContainerLaunchSelfShutdown() throws Exception {
    YarnApplication<Void> yarnApplication = YarnAssembly
            .forApplicationContainer(SimpleEchoContainer.class, ByteBuffer.wrap("Hello".getBytes()))
            .containerCount(2).memory(512).withApplicationMaster().maxAttempts(2).priority(2)
            .build("sample-yarn-application");
    assertFalse(yarnApplication.isRunning());
    yarnApplication.launch();/*from  ww w. java 2s  .  co  m*/
    while (yarnApplication.isRunning()) {
        LockSupport.parkNanos(1000000);
    }
    assertEquals(0, yarnApplication.liveContainers());
    assertFalse(yarnApplication.isRunning());
}

From source file:com.ottogroup.bi.spqr.pipeline.component.queue.chronicle.DefaultStreamingMessageQueueTest.java

/**
 * Inserts a configurable number of messages into a {@link Chronicle} and measures the
 * duration it takes to read the content from it using the {@link DefaultStreamingMessageQueue} implementation
 */// ww w.  j  av a2  s .c om
//   @Test
public void testNext_performanceTest() throws Exception {

    Properties props = new Properties();
    props.put(DefaultStreamingMessageQueue.CFG_CHRONICLE_QUEUE_DELETE_ON_EXIT, "true");
    props.put(DefaultStreamingMessageQueue.CFG_CHRONICLE_QUEUE_PATH, System.getProperty("java.io.tmpdir"));
    final DefaultStreamingMessageQueue inbox = new DefaultStreamingMessageQueue();
    inbox.setId("testNext_performanceTest");
    inbox.initialize(props);

    final StreamingMessageQueueProducer producer = inbox.getProducer();
    final StreamingMessageQueueConsumer consumer = inbox.getConsumer();

    final CountDownLatch latch = new CountDownLatch(numberOfMessagesPerfTest);

    ExecutorService svc = Executors.newCachedThreadPool();

    Future<Integer> producerDurationFuture = svc.submit(new Callable<Integer>() {

        public Integer call() {
            StreamingDataMessage object = new StreamingDataMessage(new byte[] { 01, 2, 3, 4, 5, 6, 7, 9 },
                    System.currentTimeMillis());
            long s1 = System.nanoTime();
            for (int i = 0; i < numberOfMessagesPerfTest; i++) {
                producer.insert(object);
            }
            long s2 = System.nanoTime();
            return (int) (s2 - s1);
        }
    });

    Future<Integer> durationFuture = svc.submit(new Callable<Integer>() {
        public Integer call() {
            StreamingDataMessage msg = null;
            long start = System.nanoTime();
            while (true) {
                msg = consumer.next();
                if (msg != null) {
                    latch.countDown();
                    if (latch.getCount() == 0)
                        break;
                } else {
                    LockSupport.parkNanos(1);
                }

            }
            long end = System.nanoTime();
            return (int) (end - start);
        }
    });

    try {
        Assert.assertTrue("Failed to receive expected number of messages", latch.await(10, TimeUnit.SECONDS));
    } catch (InterruptedException e) {
        Assert.fail("Failed to receive expected number of messages");
    }

    int producerDuration = producerDurationFuture.get();
    int duration = durationFuture.get();

    double messagesPerNano = ((double) numberOfMessagesPerfTest / (double) duration);
    double messagesPerNanoRounded = (double) Math.round(messagesPerNano * 10000) / 10000;

    double messagesPerMilli = messagesPerNano * 1000000;
    messagesPerMilli = (double) Math.round(messagesPerMilli * 100) / 100;

    long messagesPerSecondTmps = Math.round(messagesPerNano * 1000000 * 1000);
    double messagesPerSecond = (double) Math.round(messagesPerSecondTmps);
    ;

    double nanosPerMessage = ((double) duration / (double) numberOfMessagesPerfTest);
    nanosPerMessage = (double) Math.round(nanosPerMessage * 100) / 100;

    logger.info("message count: " + numberOfMessagesPerfTest);
    logger.info(
            "message producing: " + producerDuration + "ns, " + TimeUnit.NANOSECONDS.toMillis(producerDuration)
                    + "ms, " + TimeUnit.NANOSECONDS.toSeconds(producerDuration) + "s");
    logger.info("message consumption: " + duration + "ns, " + TimeUnit.NANOSECONDS.toMillis(duration) + "ms, "
            + TimeUnit.NANOSECONDS.toSeconds(duration) + "s");
    logger.info("message throughput: " + messagesPerNanoRounded + " msgs/ns, " + messagesPerMilli + " msgs/ms, "
            + messagesPerSecond + " msgs/s");

    svc.shutdownNow();
}

From source file:oz.hadoop.yarn.api.core.LocalApplicationLaunchTests.java

@Test(timeout = 5000)
@Ignore // fix to adgust for API changes
public void validateJavaContainerLaunchAndVariableProcessTimeWithForcedShutdown() throws Exception {
    final YarnApplication<Void> yarnApplication = YarnAssembly
            .forApplicationContainer(VariableProcessingTime.class, ByteBuffer.wrap("Hello".getBytes()))
            .containerCount(6).memory(512).withApplicationMaster().maxAttempts(2).priority(2)
            .build("sample-yarn-application");
    ExecutorService executor = Executors.newCachedThreadPool();
    executor.execute(new Runnable() {
        @Override/*from w w w. j av a2 s.c om*/
        public void run() {
            yarnApplication.launch();
        }
    });
    // wait till all 6 are active
    while (yarnApplication.liveContainers() != 6) {
        LockSupport.parkNanos(10000);
    }
    System.out.println("Running: " + yarnApplication.isRunning());
    // wait till some begin to shutdown
    while (yarnApplication.liveContainers() == 6) {
        LockSupport.parkNanos(10000);
    }
    System.out.println("Running: " + yarnApplication.isRunning());
    assertTrue(yarnApplication.isRunning());
    yarnApplication.terminate();
    assertEquals(0, yarnApplication.liveContainers());
    assertFalse(yarnApplication.isRunning());
}

From source file:oz.hadoop.yarn.api.core.LocalApplicationLaunchTests.java

@Test(timeout = 2000)
public void validateContainerLaunchWithCommandSelfShutdown() throws Exception {
    YarnApplication<Void> yarnApplication = YarnAssembly.forApplicationContainer("date").containerCount(2)
            .memory(512).withApplicationMaster().maxAttempts(2).priority(2).build("sample-yarn-application");

    assertEquals(0, yarnApplication.liveContainers());
    yarnApplication.launch();/*from w  w  w.  j a v  a  2  s  .c o m*/
    while (yarnApplication.isRunning()) {
        LockSupport.parkNanos(1000000);
    }
    assertFalse(yarnApplication.isRunning());
}

From source file:oz.hadoop.yarn.api.core.LocalApplicationLaunchTests.java

@Test(timeout = 2000)
public void validateContainerLaunchWithInfiniteCommandForcedShutdown() throws Exception {
    ClassPathResource resource = new ClassPathResource("infinite", this.getClass());
    final YarnApplication<Void> yarnApplication = YarnAssembly
            .forApplicationContainer(resource.getFile().getAbsolutePath()).containerCount(3).memory(512)
            .withApplicationMaster().maxAttempts(2).priority(2).build("sample-yarn-application");

    assertEquals(0, yarnApplication.liveContainers());
    assertFalse(yarnApplication.isRunning());

    ExecutorService executor = Executors.newCachedThreadPool();
    executor.execute(new Runnable() {
        @Override/*www . ja  v a  2 s  .  co  m*/
        public void run() {
            yarnApplication.launch();
        }
    });
    while (yarnApplication.liveContainers() != 3) {
        LockSupport.parkNanos(1000);
    }
    assertTrue(yarnApplication.isRunning());
    yarnApplication.terminate();
    assertEquals(0, yarnApplication.liveContainers());
}