Example usage for java.util.concurrent TimeUnit MILLISECONDS

List of usage examples for java.util.concurrent TimeUnit MILLISECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit MILLISECONDS.

Prototype

TimeUnit MILLISECONDS

To view the source code for java.util.concurrent TimeUnit MILLISECONDS.

Click Source Link

Document

Time unit representing one thousandth of a second.

Usage

From source file:com.topekalabs.synchronization.LockTest.java

@Test
public void testLockAndUnlock() {
    final long timeout = 500;

    ExecutorService es = Executors.newSingleThreadExecutor();
    Future<Integer> future = es.submit(new Callable<Integer>() {
        @Override/*from  w  w  w  . j a  va 2 s.  c  o m*/
        public Integer call() throws Exception {
            Lock lock = lockClass.newInstance();
            lock.lock();
            lock.unlock();
            return 1;
        }
    });

    try {
        future.get(timeout, TimeUnit.MILLISECONDS);
    } catch (InterruptedException | ExecutionException | TimeoutException ex) {
        ErrorThrower.kill(ex);
    }

    es.shutdown();
}

From source file:info.losd.galen.repository.InfluxdbHealthcheckRepo.java

@Override
public void save(HealthcheckDetails s) {
    Point point = Point.measurement("statistic").time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
            .tag("healthcheck", s.getHealthcheck().getName()).field("response_time", s.getDuration())
            .field("status_code", s.getStatusCode()).build();

    influxDB.write(db.getName(), "default", point);
}

From source file:ru.asmsoft.p2p.storage.MemoryNodesRepository.java

@PostConstruct
public void init() {

    Cache<String, Node> cache = CacheBuilder.newBuilder()
            .expireAfterAccess(config.getHeartbeatPeriodExpired(), TimeUnit.MILLISECONDS).removalListener(this)
            .build();//from ww w .ja  v  a  2  s .c o m
    nodes = cache.asMap();

    // Initialization delay (Heartbeat period * 2)
    new Timer().schedule(new TimerTask() {
        @Override
        public void run() {
            logger.debug("NodesRepository is ready.");
            isInitialised = true;
        }
    }, config.getHeartbeatPeriod() * 2);

}

From source file:com.ebay.pulsar.metriccalculator.processor.SOJPipelineStatisticsCollector.java

public SOJPipelineStatisticsCollector() {
    timer = MCScheduler.getMCScheduler();
    timer.scheduleWithFixedDelay(new MetricChecker(), ONE_MINUTE, ONE_MINUTE, TimeUnit.MILLISECONDS);
}

From source file:com.ksc.http.timers.request.HttpRequestTimer.java

/**
 * Start the timer with the specified timeout and return a object that can be used to track the
 * state of the timer and cancel it if need be.
 *
 * @param apacheRequest//  w  w  w .jav a 2s.c  om
 *            HTTP request this timer will abort if triggered.
 * @param requestTimeoutMillis
 *            A positive value here enables the timer, a non-positive value disables it and
 *            returns a dummy tracker task
 * @return Implementation of {@link HttpRequestAbortTaskTrackerImpl} to query the state of the
 *         task and cancel it if appropriate
 */
public HttpRequestAbortTaskTracker startTimer(final HttpRequestBase apacheRequest,
        final int requestTimeoutMillis) {
    if (isTimeoutDisabled(requestTimeoutMillis)) {
        return NoOpHttpRequestAbortTaskTracker.INSTANCE;
    } else if (executor == null) {
        initializeExecutor();
    }
    HttpRequestAbortTaskImpl timerTask = new HttpRequestAbortTaskImpl(apacheRequest);
    ScheduledFuture<?> timerTaskFuture = executor.schedule(timerTask, requestTimeoutMillis,
            TimeUnit.MILLISECONDS);
    return new HttpRequestAbortTaskTrackerImpl(timerTask, timerTaskFuture);
}

From source file:com.netflix.curator.framework.imps.TestTempFramework.java

@Test
public void testInactivity() throws Exception {
    final CuratorTempFrameworkImpl client = (CuratorTempFrameworkImpl) CuratorFrameworkFactory.builder()
            .connectString(server.getConnectString()).retryPolicy(new RetryOneTime(1))
            .buildTemp(1, TimeUnit.SECONDS);
    try {/*  www .j  a v  a  2s  .  c om*/
        ScheduledExecutorService service = Executors.newScheduledThreadPool(1);
        Runnable command = new Runnable() {
            @Override
            public void run() {
                client.updateLastAccess();
            }
        };
        service.scheduleAtFixedRate(command, 10, 10, TimeUnit.MILLISECONDS);
        client.inTransaction().create().forPath("/foo", "data".getBytes()).and().commit();
        service.shutdownNow();
        Thread.sleep(2000);

        Assert.assertNull(client.getCleanup());
        Assert.assertNull(client.getClient());
    } finally {
        IOUtils.closeQuietly(client);
    }
}

From source file:org.zalando.zmon.actuator.ZmonMetricsFilterTest.java

@Before
public void setUp() {
    ConsoleReporter reporter = ConsoleReporter.forRegistry(metricRegistry).convertRatesTo(TimeUnit.SECONDS)
            .convertDurationsTo(TimeUnit.MILLISECONDS).build();
    reporter.start(2, TimeUnit.SECONDS);

    restTemplate = new RestTemplate();
    restTemplate.setErrorHandler(new ResponseErrorHandler() {

        @Override/* ww  w. jav a 2s  . com*/
        public boolean hasError(final ClientHttpResponse response) throws IOException {

            // we want them all to pass
            return false;
        }

        @Override
        public void handleError(final ClientHttpResponse response) throws IOException {
        }
    });

}

From source file:com.netflix.spinnaker.kork.metrics.SpectatorMetricWriter.java

@Override
public void set(Metric<?> value) {
    if (value.getName().startsWith("histogram.")) {
        registry.distributionSummary(value.getName()).record(value.getValue().longValue());
    } else if (value.getName().startsWith("timer.")) {
        registry.timer(value.getName()).record(value.getValue().longValue(), TimeUnit.MILLISECONDS);
    } else {//from   www  . j av a2 s  .c  o m
        final Id id = registry.createId(value.getName());
        final AtomicDouble gauge = getGaugeStorage(id);
        gauge.set(value.getValue().doubleValue());

        registry.gauge(id, gauge);
    }

}

From source file:org.kaaproject.kaa.client.channel.failover.DefaultFailoverManagerTest.java

@Before
public void setUp() {
    channelManager = Mockito.mock(KaaChannelManager.class);
    context = Mockito.mock(ExecutorContext.class);
    Mockito.when(context.getScheduledExecutor()).thenReturn(Executors.newScheduledThreadPool(1));
    FailoverStrategy failoverStrategy = new DefaultFailoverStrategy(BOOTSTRAP_RETRY_PERIOD, 1, 1,
            TimeUnit.MILLISECONDS);
    failoverManager = new DefaultFailoverManager(channelManager, context, failoverStrategy,
            RESOLUTION_TIMEOUT_MS, TimeUnit.MILLISECONDS);
    resolutionProgressMap = Mockito/*from  w  w w .ja v a  2 s.  com*/
            .spy(new HashMap<ServerType, DefaultFailoverManager.AccessPointIdResolution>());
    ReflectionTestUtils.setField(failoverManager, "resolutionProgressMap", resolutionProgressMap);
}

From source file:com.streamsets.datacollector.util.ClusterUtil.java

public static void setupCluster(String testName, String pipelineJson, YarnConfiguration yarnConfiguration)
        throws Exception {
    System.setProperty("sdc.testing-mode", "true");
    System.setProperty(MiniSDCTestingUtility.PRESERVE_TEST_DIR, "true");
    yarnConfiguration.set("yarn.nodemanager.delete.debug-delay-sec", "600");
    miniSDCTestingUtility = new MiniSDCTestingUtility();
    File dataTestDir = miniSDCTestingUtility.getDataTestDir();

    //copy spark files under the test data directory into a dir called "spark"
    File sparkHome = ClusterUtil.createSparkHome(dataTestDir);

    //start mini yarn cluster
    miniYarnCluster = miniSDCTestingUtility.startMiniYarnCluster(testName, 1, 1, 1, yarnConfiguration);
    Configuration config = miniYarnCluster.getConfig();

    long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
    while (config.get(YarnConfiguration.RM_ADDRESS).split(":")[1] == "0") {
        if (System.currentTimeMillis() > deadline) {
            throw new IllegalStateException("Timed out waiting for RM to come up.");
        }//from   w w w  . j av a  2 s .co m
        LOG.debug("RM address still not set in configuration, waiting...");
        TimeUnit.MILLISECONDS.sleep(100);
    }
    LOG.debug("RM at " + config.get(YarnConfiguration.RM_ADDRESS));

    Properties sparkHadoopProps = new Properties();
    for (Map.Entry<String, String> entry : config) {
        sparkHadoopProps.setProperty("spark.hadoop." + entry.getKey(), entry.getValue());
    }

    LOG.debug("Creating spark properties file at " + dataTestDir);
    File propertiesFile = new File(dataTestDir, "spark.properties");
    propertiesFile.createNewFile();
    FileOutputStream sdcOutStream = new FileOutputStream(propertiesFile);
    sparkHadoopProps.store(sdcOutStream, null);
    sdcOutStream.flush();
    sdcOutStream.close();
    // Need to pass this property file to spark-submit for it pick up yarn confs
    System.setProperty(SPARK_PROPERTY_FILE, propertiesFile.getAbsolutePath());

    File sparkBin = new File(sparkHome, "bin");
    for (File file : sparkBin.listFiles()) {
        MiniSDCTestingUtility.setExecutePermission(file.toPath());
    }

    miniSDC = miniSDCTestingUtility.createMiniSDC(MiniSDC.ExecutionMode.CLUSTER);
    miniSDC.startSDC();
    serverURI = miniSDC.getServerURI();
    miniSDC.createPipeline(pipelineJson);
    miniSDC.startPipeline();

    int attempt = 0;
    //Hard wait for 2 minutes
    while (miniSDC.getListOfSlaveSDCURI().size() == 0 && attempt < 24) {
        Thread.sleep(5000);
        attempt++;
        LOG.debug("Attempt no: " + attempt + " to retrieve list of slaves");
    }
    if (miniSDC.getListOfSlaveSDCURI().size() == 0) {
        throw new IllegalStateException("Timed out waiting for slaves to come up.");
    }
}