Example usage for java.util.concurrent TimeUnit MINUTES

List of usage examples for java.util.concurrent TimeUnit MINUTES

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit MINUTES.

Prototype

TimeUnit MINUTES

To view the source code for java.util.concurrent TimeUnit MINUTES.

Click Source Link

Document

Time unit representing sixty seconds.

Usage

From source file:br.unb.cic.bionimbuz.services.monitor.MonitoringService.java

@Override
public void start(List<Listeners> listeners) {
    try {//from www .  j  a  v a 2 s  . c  o  m
        this.checkPeers();
        // checkPendingSave();
    } catch (final Exception e) {
        LOGGER.error("[MonitoringService] Exception checkPeers" + e.getMessage());
    }
    this.listeners = listeners;
    if (listeners != null) {
        listeners.add(this);
    }
    this.schedExecService.scheduleAtFixedRate(this, 0, TIME_TO_RUN, TimeUnit.MINUTES);
}

From source file:com.jivesoftware.sdk.service.filter.JiveSignedFetchValidator.java

public void authenticate(ContainerRequestContext request) {
    String authorization = request.getHeaderString(HttpHeaders.AUTHORIZATION);

    if (!authorization.startsWith(JIVE_EXTN) || !authorization.contains(QUERY_PARAM_SIGNATURE)) {
        logger.log(Level.INFO, "Jive authorization isn't properly formatted: " + authorization);
        throw BAD_REQUEST;
    } // end if//  w  w w.j ava  2 s  . c  om

    Map<String, String> paramMap = getParamsFromAuthz(authorization);
    String signature = paramMap.remove(PARAM_SIGNATURE);
    String algorithm = paramMap.get(PARAM_ALGORITHM);
    String clientId = paramMap.get(PARAM_CLIENT_ID);
    String jiveUrl = paramMap.get(PARAM_JIVE_URL);
    String tenantId = paramMap.get(PARAM_TENANT_ID);
    String timeStampStr = paramMap.get(PARAM_TIMESTAMP);

    if (!JiveSDKUtils.isAllExist(algorithm, clientId, jiveUrl, tenantId, timeStampStr)) {
        //TODO: LOG
        System.err.println("Jive authorization is partial: " + paramMap);
        throw BAD_REQUEST;
    } // end if

    long timeStamp = Long.parseLong(timeStampStr);
    long millisPassed = System.currentTimeMillis() - timeStamp;
    if (millisPassed < 0 || millisPassed > TimeUnit.MINUTES.toMillis(5)) {
        //TODO: LOG
        System.err.println("Jive authorization is rejected since it's " + millisPassed
                + "ms old (max. allowed is 5 minutes): " + paramMap);
        throw UNAUTHORIZED;
    } // end if

    //JiveInstance jiveInstance = jiveInstanceProvider.getInstanceByTenantId(tenantId);
    JiveInstance jiveInstance = jiveAddOnApplication.getJiveInstanceProvider().getInstanceByTenantId(tenantId);

    if (jiveInstance == null) {
        //TODO: LOG
        System.err.println("Jive authorization failed due to invalid tenant ID: " + tenantId);
        throw UNAUTHORIZED;
    } // end if

    String expectedClientId = jiveInstance.getClientId();
    if (!clientId.equals(expectedClientId)) {
        String msg = String.format(
                "Jive authorization failed due to missing Client ID: Actual [%s], Expected [%s]", clientId,
                expectedClientId);
        //TODO: LOG
        System.err.println(msg);
        throw UNAUTHORIZED;
    } // end if

    String clientSecret = jiveInstance.getClientSecret();
    String paramStrWithoutSignature = authorization.substring(JIVE_EXTN.length(),
            authorization.indexOf(QUERY_PARAM_SIGNATURE));

    try {
        String expectedSignature = sign(paramStrWithoutSignature, clientSecret, algorithm);
        if (expectedSignature.equals(signature)) {
            //SAVING jiveInstance INSTANCE TO REQUEST
            //TODO:
            request.setProperty(JIVE_INSTANCE, jiveInstance);
        } else {
            //TODO: LOG
            System.err.println(
                    "Jive authorization failed due to tampered signature! Original authz: " + authorization);
            throw UNAUTHORIZED;
        } // end if
    } catch (Exception e) {
        //TODO: LOG
        System.err.println("Failed validating Jive auth. scheme" + e.getMessage());
        throw UNAUTHORIZED;
    } // end try/catch

}

From source file:se.vgregion.pubsub.push.impl.DefaultPushSubscriberManager.java

@Override
public URI pollForRetrieval() throws InterruptedException {
    URI url = retrieverQueue.poll(5, TimeUnit.MINUTES);

    if (url == null) {
        LOG.info("DefaultPushSubscriberManager timed out waiting. Size of queue: {}", retrieverQueue.size());
    }//from  w w  w  .j  a v  a 2s.  com
    return url;
}

From source file:com.epam.ta.reportportal.util.analyzer.IssuesAnalyzerService.java

public IssuesAnalyzerService() {
    processingIds = CacheBuilder.newBuilder().maximumSize(MAXIMUM_SIZE)
            .expireAfterWrite(CACHE_ITEM_LIVE, TimeUnit.MINUTES).build(new CacheLoader<String, String>() {
                @Override//from ww w  .j  ava 2  s.c o m
                public String load(String key) {
                    return "";
                }
            });
}

From source file:de.thm.arsnova.FeedbackStorage.java

private List<User> cleanFeedbackVotesInSession(final Session session, final int cleanupFeedbackDelayInMins) {
    final long timelimitInMillis = TimeUnit.MILLISECONDS.convert(cleanupFeedbackDelayInMins, TimeUnit.MINUTES);
    final Date maxAllowedTime = new Date(System.currentTimeMillis() - timelimitInMillis);

    final Map<User, FeedbackStorageObject> sessionFeedbacks = data.get(session);
    final List<User> affectedUsers = new ArrayList<User>();

    for (final Map.Entry<User, FeedbackStorageObject> entry : sessionFeedbacks.entrySet()) {
        final User user = entry.getKey();
        final FeedbackStorageObject feedback = entry.getValue();
        final boolean timeIsUp = feedback.getTimestamp().before(maxAllowedTime);
        if (timeIsUp) {
            sessionFeedbacks.remove(user);
            affectedUsers.add(user);// ww  w.j  av a2s .c  o  m
        }
    }
    return affectedUsers;
}

From source file:mServer.search.MserverSearch.java

@SuppressWarnings("deprecation")
public boolean filmeSuchen(MserverSearchTask aktSearchTask) {
    boolean ret = true;
    try {/*  w  w  w. j  a  v a 2 s.  co m*/
        // ===========================================
        // den nchsten Suchlauf starten
        MserverLog.systemMeldung("");
        MserverLog.systemMeldung("-----------------------------------");
        MserverLog.systemMeldung("Filmsuche starten");
        crawler = new Crawler();

        // was und wie
        CrawlerConfig.senderLoadHow = aktSearchTask.loadHow();
        CrawlerConfig.updateFilmliste = aktSearchTask.updateFilmliste();
        CrawlerConfig.nurSenderLaden = arrLesen(aktSearchTask.arr[MserverSearchTask.SUCHEN_SENDER_NR].trim());
        CrawlerConfig.orgFilmlisteErstellen = aktSearchTask.orgListeAnlegen();
        CrawlerConfig.orgFilmliste = MserverDaten.system[MserverKonstanten.SYSTEM_FILMLISTE_ORG_NR];

        // live-steams
        CrawlerConfig.importLive = MserverDaten.system[MserverKonstanten.SYSTEM_IMPORT_LIVE_NR];

        // und noch evtl. ein paar Imports von Filmlisten anderer Server
        CrawlerConfig.importUrl_1__anhaengen = MserverDaten.system[MserverKonstanten.SYSTEM_IMPORT_URL_1_NR];
        CrawlerConfig.importUrl_2__anhaengen = MserverDaten.system[MserverKonstanten.SYSTEM_IMPORT_URL_2_NR];

        // fr die alte Filmliste
        CrawlerConfig.importOld = MserverDaten.system[MserverKonstanten.SYSTEM_IMPORT_OLD_NR];
        CrawlerConfig.importAkt = MserverDatumZeit
                .getNameAkt(MserverDaten.system[MserverKonstanten.SYSTEM_IMPORT_AKT_NR]);

        // Rest
        Config.setUserAgent(MserverDaten.getUserAgent());
        CrawlerConfig.proxyUrl = MserverDaten.system[MserverKonstanten.SYSTEM_PROXY_URL_NR];
        CrawlerConfig.proxyPort = MserverDaten.getProxyPort();
        Config.debug = MserverDaten.debug;

        Log.setLogfile(MserverDaten.getLogDatei(MserverKonstanten.LOG_FILE_NAME_MSEARCH));

        Thread t = new Thread(crawler);
        t.setName("Crawler");
        t.start();
        MserverLog.systemMeldung("Filme suchen gestartet");
        // ===========================================
        // warten auf das Ende
        //int warten = aktSearchTask.allesLaden() == true ? MvSKonstanten.WARTEZEIT_ALLES_LADEN : MvSKonstanten.WARTEZEIT_UPDATE_LADEN;
        int warten = aktSearchTask.getWaitTime()/*Minuten*/;
        MserverLog.systemMeldung("Max Laufzeit[Min]: " + warten);
        MserverLog.systemMeldung("-----------------------------------");

        TimeUnit.MINUTES.timedJoin(t, warten);

        // ===========================================
        // erst mal schauen ob noch was luft
        if (t != null) {
            if (t.isAlive()) {
                MserverLog.fehlerMeldung(915147623, MserverSearch.class.getName(),
                        "Der letzte Suchlauf luft noch");
                if (crawler != null) {
                    MserverLog.systemMeldung("");
                    MserverLog.systemMeldung("");
                    MserverLog.systemMeldung("================================");
                    MserverLog.systemMeldung("================================");
                    MserverLog.systemMeldung("und wird jetzt gestoppt");
                    MserverLog.systemMeldung(
                            "Zeit: " + FastDateFormat.getInstance("dd.MM.yyyy HH:mm:ss").format(new Date()));
                    MserverLog.systemMeldung("================================");
                    MserverLog.systemMeldung("================================");
                    MserverLog.systemMeldung("");
                    //und jetzt STOPPEN!!!!!!!!
                    crawler.stop();
                }

                int w;
                if (loadLongMax())
                    w = 30; // 30 Minuten bei langen Lufen
                else
                    w = 20;// 20 Minuten warten, das Erstellen/Komprimieren der Liste dauert
                TimeUnit.MINUTES.timedJoin(t, w);

                if (t.isAlive()) {
                    MserverLog.systemMeldung("");
                    MserverLog.systemMeldung("");
                    MserverLog.systemMeldung("================================");
                    MserverLog.systemMeldung("================================");
                    MserverLog.systemMeldung("und noch gekillt");
                    MserverLog.systemMeldung(
                            "Zeit: " + FastDateFormat.getInstance("dd.MM.yyyy HH:mm:ss").format(new Date()));
                    MserverLog.systemMeldung("================================");
                    MserverLog.systemMeldung("================================");
                    MserverLog.systemMeldung("");
                    ret = false;
                }
                //jetzt ist Schicht im Schacht
                t.stop();
            }
        }
    } catch (Exception ex) {
        MserverLog.fehlerMeldung(636987308, MserverSearch.class.getName(), "filmeSuchen", ex);
    }
    int l = crawler.getListeFilme().size();
    MserverLog.systemMeldung("");
    MserverLog.systemMeldung("");
    MserverLog.systemMeldung("================================");
    MserverLog.systemMeldung("Filmliste Anzahl Filme: " + l);
    if (l < 10_000) {
        //dann hat was nicht gepasst
        MserverLog.systemMeldung("   Fehler!!");
        MserverLog.systemMeldung("================================");
        ret = false;
    } else {
        MserverLog.systemMeldung("   dann ist alles OK");
        MserverLog.systemMeldung("================================");

    }
    MserverLog.systemMeldung("filmeSuchen beendet");
    crawler = null;
    return ret;
}

From source file:com.datatorrent.contrib.dimensions.DimensionsQueryExecutorTest.java

private void simpleQueryCountHelper(int rollingCount) {
    final String publisher = "google";
    final String advertiser = "safeway";

    final long impressions = 10L;
    final double cost = 1.0;

    String eventSchemaString = SchemaUtils.jarResourceFileToString("dimensionsTestSchema.json");

    String basePath = testMeta.getDir();
    TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
    hdsFile.setBasePath(basePath);// ww  w  . j  a va2s  . co  m

    AppDataSingleSchemaDimensionStoreHDHT store = new AppDataSingleSchemaDimensionStoreHDHT();

    store.setCacheWindowDuration(2);
    store.setConfigurationSchemaJSON(eventSchemaString);
    store.setFileStore(hdsFile);
    store.setFlushIntervalCount(1);
    store.setFlushSize(0);

    store.setup(new OperatorContextTestHelper.TestIdOperatorContext(1, new DefaultAttributeMap()));

    DimensionalConfigurationSchema eventSchema = store.configurationSchema;
    DimensionsQueryExecutor dqe = new DimensionsQueryExecutor(store, store.schemaRegistry);

    store.beginWindow(0L);

    long currentTime = 0L;

    List<Map<String, HDSQuery>> hdsQueries = Lists.newArrayList();
    List<Map<String, EventKey>> eventKeys = Lists.newArrayList();

    for (int rollingCounter = 0;; currentTime += TimeUnit.MINUTES.toMillis(1L)) {
        Aggregate aggregate = AppDataSingleSchemaDimensionStoreHDHTTest.createEvent(eventSchema, publisher,
                advertiser, currentTime, TimeBucket.MINUTE, impressions, cost);

        store.input.put(aggregate);

        issueHDSQuery(store, aggregate.getEventKey());

        Map<String, HDSQuery> aggregatorToQuery = Maps.newHashMap();
        aggregatorToQuery.put("SUM", store.getQueries().values().iterator().next());
        hdsQueries.add(aggregatorToQuery);

        Map<String, EventKey> aggregatorToEventKey = Maps.newHashMap();
        aggregatorToEventKey.put("SUM", aggregate.getEventKey());
        eventKeys.add(aggregatorToEventKey);

        rollingCounter++;

        if (rollingCounter == rollingCount) {
            break;
        }
    }

    QueryMeta queryMeta = new QueryMeta();
    queryMeta.setHdsQueries(hdsQueries);
    queryMeta.setEventKeys(eventKeys);

    GPOMutable keys = AppDataSingleSchemaDimensionStoreHDHTTest.createQueryKey(eventSchema, publisher,
            advertiser);
    Map<String, Set<String>> fieldToAggregators = Maps.newHashMap();
    fieldToAggregators.put("impressions", Sets.newHashSet("SUM"));
    fieldToAggregators.put("cost", Sets.newHashSet("SUM"));

    FieldsAggregatable fieldsAggregatable = new FieldsAggregatable(fieldToAggregators);

    DataQueryDimensional query = new DataQueryDimensional("1", DataQueryDimensional.TYPE, currentTime,
            currentTime, TimeBucket.MINUTE, keys, fieldsAggregatable, true);
    query.setSlidingAggregateSize(rollingCount);

    DataResultDimensional drd = (DataResultDimensional) dqe.executeQuery(query, queryMeta, new MutableLong(1L));

    store.endWindow();

    Assert.assertEquals(1, drd.getValues().size());
    Assert.assertEquals(impressions * rollingCount,
            drd.getValues().get(0).get("SUM").getFieldLong("impressions"));

    store.teardown();
}

From source file:ezbake.discovery.stethoscope.server.StethoscopeServiceHandler.java

@Override
public TProcessor getThriftProcessor() {
    this.configuration = new EzProperties(getConfigurationProperties(), true);
    this.serviceDiscoveryClient = new ServiceDiscoveryClient(configuration);

    int expireMinutes = configuration.getInteger(STETHOSCOPE_SERVICE_WRITE_EXPIRE_TIME, 15);
    boolean shouldRemoveEntriesFromZookeeper = configuration
            .getBoolean(STETHOSCOPE_ACTUALLY_REMOVE_FROM_ZOOKEEPER, false);
    logger.info("Stethoscope will wait {} minutes before timing something out after write", expireMinutes);

    Multimap<String, String> servicesToIgnore = getServicesToIgnore(
            configuration.getProperty(STETHOSCOPE_SERVICES_TO_IGNORE, ""));

    for (Map.Entry<String, String> entry : servicesToIgnore.entries()) {
        logger.info("Application: {}, Service: {} will NOT be removed from zookeeper", entry.getKey(),
                entry.getValue());/*from  www. j  a v  a  2 s.  c o m*/
    }

    if (shouldRemoveEntriesFromZookeeper) {
        logger.info("Stethoscope will remove entries from zookeeper");
    } else {
        logger.info("Stethoscope will NOT remove entries from zookeeper");
    }

    this.serviceCache = CacheBuilder.newBuilder().expireAfterWrite(expireMinutes, TimeUnit.MINUTES)
            .removalListener(new StethoscopeCacheRemovalListener(serviceDiscoveryClient,
                    shouldRemoveEntriesFromZookeeper, servicesToIgnore))
            .build();

    this.scheduler = Executors.newScheduledThreadPool(1);
    int cleanupMinutes = configuration.getInteger(STETHOSCOPE_SERVICE_CLEANUP_TIME, 10);
    logger.info("Stethoscope will wait {} minutes before running the clean up thread!", cleanupMinutes);
    scheduler.scheduleAtFixedRate(new CacheMaintenanceRunnable(), 0, cleanupMinutes, TimeUnit.MINUTES);
    populateCacheFromZookeeper();
    return new StethoscopeService.Processor(this);
}

From source file:com.pinterest.terrapin.controller.TerrapinControllerHandler.java

private void startThriftServer(int thriftPort) throws UnknownHostException {
    TerrapinController.ServiceIface serviceImpl = new TerrapinControllerServiceImpl(this.configuration,
            this.zkManager, this.hdfsClient, this.helixAdmin, this.clusterName);
    TerrapinController.Service service = new TerrapinController.Service(serviceImpl,
            new TBinaryProtocol.Factory());

    this.server = ServerBuilder.safeBuild(service,
            ServerBuilder.get().name("TerrapinController").codec(ThriftServerFramedCodec.get())
                    .hostConnectionMaxIdleTime(Duration.fromTimeUnit(
                            configuration.getInt(Constants.THRIFT_CONN_MAX_IDLE_TIME, 1), TimeUnit.MINUTES))
                    .maxConcurrentRequests(configuration.getInt(Constants.THRIFT_MAX_CONCURRENT_REQUESTS, 100))
                    .reportTo(new OstrichStatsReceiver(Stats.get("")))
                    .bindTo(new InetSocketAddress(thriftPort)));
    new OstrichAdminService(configuration.getInt(Constants.OSTRICH_METRICS_PORT, 9999)).start();
}

From source file:io.fabric8.itests.basic.cloud.FabricRackspaceContainerTest.java

/**
 * Starts an ensemble server on EC2, configures the security groups and join the ensemble.
 *
 * @throws InterruptedException/* ww w .  jav  a  2s  . c  o m*/
 * @throws java.io.IOException
 */
@Test
public void testRackspaceAgentCreation() throws InterruptedException, IOException {
    if (!isReady()) {
        System.out.println("Rackspace is not setup correctly. This test will not run.");
        System.out.println("To properly run this test, you need to setup with maven the following properties:");
        System.out.println("fabricitest.rackspace.identity \t The rackspace access id");
        System.out.println("fabricitest.rackspace.credential \t The rackspace access key");
        System.out.println("fabricitest.rackspace.image  \t The rackspace (java ready) image");
        System.out.println("fabricitest.rackspace.user  \t The user under which the agent will run");
        return;
    }

    System.out
            .println(executeCommand("features:install jclouds-cloudserver-us fabric-jclouds jclouds-commands"));

    executeCommand("fabric:cloud-service-add --provider cloudservers-us --identity " + identity
            + " --credential " + credential);

    ComputeService computeService = ServiceLocator.awaitService(bundleContext, ComputeService.class, 3,
            TimeUnit.MINUTES);

    //The compute service needs some time to properly initialize.
    System.out.println(executeCommand(String.format(
            "fabric:container-create-cloud --provider cloudservers-us --group %s --ensemble-server ensemble1",
            group), 10 * 60000L, false));
    String publicIp = getNodePublicIp(computeService);
    assertNotNull(publicIp);
    System.out.println(executeCommand("fabric:join -n " + publicIp + ":2181", 10 * 60000L, false));
    Thread.sleep(DEFAULT_TIMEOUT);
    System.out.println(executeCommand("fabric:join " + publicIp + ":2181", 10 * 60000L, false));
    String agentList = executeCommand("fabric:container-list");
    System.out.println(agentList);
    assertTrue(agentList.contains("root") && agentList.contains("ensemble1"));

}