Example usage for java.util.concurrent TimeUnit MINUTES

List of usage examples for java.util.concurrent TimeUnit MINUTES

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit MINUTES.

Prototype

TimeUnit MINUTES

To view the source code for java.util.concurrent TimeUnit MINUTES.

Click Source Link

Document

Time unit representing sixty seconds.

Usage

From source file:cloudExplorer.Daemon.java

void start() {

    sync_config_file = (Home + File.separator + "s3config.sync");

    mainmenu();//from  w ww.  j  av  a2  s.co m
    if (!gui) {
        messageParser("\n\nCloud Explorer will sync the directory listed in the config file:\n\n"
                + sync_config_file + " to S3 every 5 minutes.");
    }
    try {
        File s3config = new File(s3_config_file);
        if (s3config.exists()) {
        } else {
            messageParser("\nError: Sync config file not found.");
            if (!gui) {
                System.exit(-1);
            }
        }

        File syncconfig = new File(sync_config_file);
        if (syncconfig.exists()) {
        } else {
            messageParser("\nError: Sync config file not found.");
            if (!gui) {
                System.exit(-1);
            }
        }

        saved_s3_configs = loadConfig(this.s3_config_file).toString().split(" ");
        loadS3credentials();

        saved_directory_to_sync = loadConfig(sync_config_file).toString().split(" ");
        bucket = saved_directory_to_sync[1];

        dirToSync = new File(saved_directory_to_sync[0]);

        File syncDIR = new File(saved_directory_to_sync[0]);
        if (syncDIR.exists()) {

            messageParser("\n\nDirectory to sync: " + dirToSync.toString() + "  Bucket: " + bucket);

            new Thread(new Runnable() {
                public void run() {
                    try {
                        reloadObjects();
                        SyncToS3(dirToSync);
                        //syncFromS3(dirToSync.toString());
                        Thread.sleep(TimeUnit.MINUTES.toMillis(5));
                        if (gui) {
                            mainFrame.jTextArea1.setText("");
                        }
                        run();

                    } catch (InterruptedException e) {
                    }
                }
            }).start();

        } else {
            messageParser("\nError: " + syncDIR.toString() + " does not exist");
        }
    } catch (Exception Start) {
    }

}

From source file:org.thevortex.lighting.jinks.icons.WinkIconService.java

/**
 * Check to see if the cache needs refreshing.
 *
 * @throws IOException can't get it/*from  www  .jav a2  s.  co m*/
 */
private synchronized void checkCache() throws IOException {
    // if the cache exists, we're done
    if (!iconCache.isEmpty())
        return;

    // get it and start the background updates
    updateCache();

    Runnable command = () -> {
        try {
            WinkIconService.this.updateCache();

            // get bytes
            logger.info("Fetching bytes for empty values.");
            for (WinkIcon icon : iconCache.values()) {
                if (!hasBytes(icon.getImageBytes())) {
                    String url = cacheType.getSelector(icon);
                    icon.setImageBytes(WinkIconService.this.getBytes(url));
                }
            }
        } catch (IOException e) {
            logger.error("Error updating icon cache", e);
        }
    };
    scheduler.scheduleAtFixedRate(command, 1, timeout.toMinutes(), TimeUnit.MINUTES);
}

From source file:com.pinterest.terrapin.client.TerrapinClient.java

private void init(FileSetViewManager fileSetViewManager, String clusterName, int targetPort,
        int connectTimeoutMs, int timeoutMs) throws Exception {
    this.statsPrefix = "terrapin-client-" + clusterName + "-";
    this.fileSetViewManager = fileSetViewManager;
    this.thriftClientCache = CacheBuilder.newBuilder().maximumSize(5000).expireAfterAccess(60, TimeUnit.MINUTES)
            .removalListener(// ww w. j av  a2 s  .  co  m
                    new RemovalListener<String, Pair<Service<ThriftClientRequest, byte[]>, TerrapinServerInternal.ServiceIface>>() {
                        @Override
                        public void onRemoval(
                                RemovalNotification<String, Pair<Service<ThriftClientRequest, byte[]>, TerrapinServerInternal.ServiceIface>> removalNotification) {
                            removalNotification.getValue().getLeft().release();
                            LOG.info("Closing client connections to " + removalNotification.getKey());
                        }
                    })
            .build();
    this.targetPort = targetPort;
    this.connectTimeoutMs = connectTimeoutMs;
    this.timeoutMs = timeoutMs;
    this.connectionfuturePool = new ExecutorServiceFuturePool(Executors.newFixedThreadPool(10));
}

From source file:nl.esciencecenter.osmium.job.XenonManager.java

/**
 * Terminates any running Xenon processes and stops the job poller.
 *
 * @throws InterruptedException If waiting for job to complete failed
 * @throws XenonException If Xenon is unable to stop
 *///from  w  w  w  .  j ava 2 s.c  o  m
public void stop() throws InterruptedException, XenonException {
    executor.shutdown();
    // JobsPoller can be in middle of fetching job statuses so give it 1 minute to finish before interrupting it
    executor.awaitTermination(1, TimeUnit.MINUTES);
    poller.stop();
    XenonFactory.endXenon(xenon);
}

From source file:org.opencastproject.userdirectory.jpa.JpaUserAndRoleProvider.java

/**
 * Callback for activation of this component.
 * //from   w  ww  . j  av  a  2  s . c  o  m
 * @param cc
 *          the component context
 */
public void activate(ComponentContext cc) {
    logger.debug("activate");

    // Setup the caches
    cache = new MapMaker().expireAfterWrite(1, TimeUnit.MINUTES)
            .makeComputingMap(new Function<String, Object>() {
                public Object apply(String id) {
                    String[] key = id.split(DELIMITER);
                    logger.trace("Loading user '{}':'{}' from database", key[0], key[1]);
                    User user = loadUser(key[0], key[1]);
                    return user == null ? nullToken : user;
                }
            });

    // Set up persistence
    emf = persistenceProvider.createEntityManagerFactory("org.opencastproject.userdirectory",
            persistenceProperties);
}

From source file:com.cloudant.tests.util.SimpleHttpServer.java

/**
 * Wait up to 2 minutes for the server, longer than that and we give up on the test
 *
 * @throws InterruptedException//  w ww .  j ava  2s.co m
 */
public void await() throws InterruptedException {
    semaphore.tryAcquire(2, TimeUnit.MINUTES);
}

From source file:io.kamax.mxisd.invitation.InvitationManager.java

@PostConstruct
private void postConstruct() {
    gson = new Gson();

    log.info("Loading saved invites");
    Collection<ThreePidInviteIO> ioList = storage.getInvites();
    ioList.forEach(io -> {/*  w  w w.  j av a2  s  .  co m*/
        log.info("Processing invite {}", gson.toJson(io));
        ThreePidInvite invite = new ThreePidInvite(new MatrixID(io.getSender()), io.getMedium(),
                io.getAddress(), io.getRoomId(), io.getProperties());

        ThreePidInviteReply reply = new ThreePidInviteReply(getId(invite), invite, io.getToken(), "");
        invitations.put(reply.getId(), reply);
    });

    // FIXME export such madness into matrix-java-sdk with a nice wrapper to talk to a homeserver
    try {
        SSLContext sslContext = SSLContextBuilder.create().loadTrustMaterial(new TrustSelfSignedStrategy())
                .build();
        HostnameVerifier hostnameVerifier = new NoopHostnameVerifier();
        SSLConnectionSocketFactory sslSocketFactory = new SSLConnectionSocketFactory(sslContext,
                hostnameVerifier);
        client = HttpClients.custom().setSSLSocketFactory(sslSocketFactory).build();
    } catch (Exception e) {
        // FIXME do better...
        throw new RuntimeException(e);
    }

    log.info("Setting up invitation mapping refresh timer");
    refreshTimer = new Timer();
    refreshTimer.scheduleAtFixedRate(new TimerTask() {
        @Override
        public void run() {
            try {
                lookupMappingsForInvites();
            } catch (Throwable t) {
                log.error("Error when running background mapping refresh", t);
            }
        }
    }, 5000L, TimeUnit.MILLISECONDS.convert(cfg.getResolution().getTimer(), TimeUnit.MINUTES));
}

From source file:com.zaxxer.hikari.benchmark.BenchBase.java

protected void setupHikari() {
    HikariConfig config = new HikariConfig();
    config.setMinimumIdle(MIN_POOL_SIZE);
    config.setMaximumPoolSize(maxPoolSize);
    config.setConnectionTimeout(8000);/*from   w  w  w  .  j av  a  2s.  c o  m*/
    config.setIdleTimeout(TimeUnit.MINUTES.toMillis(30));
    config.setJdbc4ConnectionTest(true);
    config.setAutoCommit(false);
    config.setTransactionIsolation("TRANSACTION_READ_COMMITTED");
    config.setDataSourceClassName("com.zaxxer.hikari.benchmark.stubs.StubDataSource");

    DS = new HikariDataSource(config);
}

From source file:edu.wisc.jmeter.dao.JdbcMonitorDao.java

public JdbcMonitorDao(JdbcTemplate jdbcTemplate, PlatformTransactionManager transactionManager,
        int purgeOldFailures, int purgeOldStatus) {
    this.jdbcTemplate = new NamedParameterJdbcTemplate(jdbcTemplate);
    this.transactionTemplate = new TransactionTemplate(transactionManager);

    this.purgeOldFailure = TimeUnit.MILLISECONDS.convert(purgeOldFailures, TimeUnit.MINUTES);
    this.purgeOldStatus = TimeUnit.MILLISECONDS.convert(purgeOldStatus, TimeUnit.MINUTES);
}

From source file:io.druid.tests.indexer.ITKafkaTest.java

@Test
public void testKafka() {
    LOG.info("Starting test: ITKafkaTest");

    // create topic
    try {/*from  w  w  w.  j a  va 2 s.co m*/
        int sessionTimeoutMs = 10000;
        int connectionTimeoutMs = 10000;
        String zkHosts = config.getZookeeperHosts();
        zkClient = new ZkClient(zkHosts, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$);
        int numPartitions = 1;
        int replicationFactor = 1;
        Properties topicConfig = new Properties();
        AdminUtils.createTopic(zkClient, TOPIC_NAME, numPartitions, replicationFactor, topicConfig);
    } catch (TopicExistsException e) {
        // it's ok if the topic already exists
    } catch (Exception e) {
        throw new ISE(e, "could not create kafka topic");
    }

    String indexerSpec;

    // replace temp strings in indexer file
    try {
        LOG.info("indexerFile name: [%s]", INDEXER_FILE);
        indexerSpec = getTaskAsString(INDEXER_FILE).replaceAll("%%DATASOURCE%%", DATASOURCE)
                .replaceAll("%%TOPIC%%", TOPIC_NAME)
                .replaceAll("%%ZOOKEEPER_SERVER%%", config.getZookeeperHosts())
                .replaceAll("%%GROUP_ID%%", Long.toString(System.currentTimeMillis()))
                .replaceAll("%%SHUTOFFTIME%%",
                        new DateTime(
                                System.currentTimeMillis() + TimeUnit.MINUTES.toMillis(2 * MINUTES_TO_SEND))
                                        .toString());
        LOG.info("indexerFile: [%s]\n", indexerSpec);
    } catch (Exception e) {
        // log here so the message will appear in the console output
        LOG.error("could not read indexer file [%s]", INDEXER_FILE);
        throw new ISE(e, "could not read indexer file [%s]", INDEXER_FILE);
    }

    // start indexing task
    taskID = indexer.submitTask(indexerSpec);
    LOG.info("-------------SUBMITTED TASK");

    // set up kafka producer
    Properties properties = new Properties();
    properties.put("metadata.broker.list", config.getKafkaHost());
    LOG.info("kafka host: [%s]", config.getKafkaHost());
    properties.put("serializer.class", "kafka.serializer.StringEncoder");
    properties.put("request.required.acks", "1");
    properties.put("producer.type", "async");
    ProducerConfig producerConfig = new ProducerConfig(properties);
    Producer<String, String> producer = new Producer<String, String>(producerConfig);

    DateTimeZone zone = DateTimeZone.forID("UTC");
    // format for putting into events
    DateTimeFormatter event_fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");

    DateTime dt = new DateTime(zone); // timestamp to put on events
    dtFirst = dt; // timestamp of 1st event
    dtLast = dt; // timestamp of last event
    // stop sending events when time passes this
    DateTime dtStop = dtFirst.plusMinutes(MINUTES_TO_SEND).plusSeconds(30);

    // these are used to compute the expected aggregations
    int added = 0;
    int num_events = 0;

    // send data to kafka
    while (dt.compareTo(dtStop) < 0) { // as long as we're within the time span
        num_events++;
        added += num_events;
        // construct the event to send
        String event = String.format(event_template, event_fmt.print(dt), num_events, 0, num_events);
        LOG.info("sending event: [%s]", event);
        try {
            // Send event to kafka
            KeyedMessage<String, String> message = new KeyedMessage<String, String>(TOPIC_NAME, event);
            producer.send(message);
        } catch (Exception ioe) {
            throw Throwables.propagate(ioe);
        }

        try {
            Thread.sleep(DELAY_BETWEEN_EVENTS_SECS * 1000);
        } catch (InterruptedException ex) {
            /* nothing */ }
        dtLast = dt;
        dt = new DateTime(zone);
    }

    producer.close();

    // put the timestamps into the query structure
    String query_response_template = null;
    InputStream is = ITKafkaTest.class.getResourceAsStream(QUERIES_FILE);
    if (null == is) {
        throw new ISE("could not open query file: %s", QUERIES_FILE);
    }

    try {
        query_response_template = IOUtils.toString(is, "UTF-8");
    } catch (IOException e) {
        throw new ISE(e, "could not read query file: %s", QUERIES_FILE);
    }

    String queryStr = query_response_template.replaceAll("%%DATASOURCE%%", DATASOURCE)
            // time boundary
            .replace("%%TIMEBOUNDARY_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtFirst))
            .replace("%%TIMEBOUNDARY_RESPONSE_MAXTIME%%", TIMESTAMP_FMT.print(dtLast))
            .replace("%%TIMEBOUNDARY_RESPONSE_MINTIME%%", TIMESTAMP_FMT.print(dtFirst))
            // time series
            .replace("%%TIMESERIES_QUERY_START%%", INTERVAL_FMT.print(dtFirst))
            .replace("%%TIMESERIES_QUERY_END%%", INTERVAL_FMT.print(dtFirst.plusMinutes(MINUTES_TO_SEND + 2)))
            .replace("%%TIMESERIES_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtFirst))
            .replace("%%TIMESERIES_ADDED%%", Integer.toString(added))
            .replace("%%TIMESERIES_NUMEVENTS%%", Integer.toString(num_events));

    // this query will probably be answered from the realtime task
    try {
        this.queryHelper.testQueriesFromString(queryStr, 2);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }

    // wait for segments to be handed off
    try {
        RetryUtil.retryUntil(new Callable<Boolean>() {
            @Override
            public Boolean call() throws Exception {
                return coordinator.areSegmentsLoaded(DATASOURCE);
            }
        }, true, 30000, 10, "Real-time generated segments loaded");
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
    LOG.info("segments are present");
    segmentsExist = true;

    // this query will be answered by historical
    try {
        this.queryHelper.testQueriesFromString(queryStr, 2);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}