Example usage for java.util.concurrent TimeUnit MINUTES

List of usage examples for java.util.concurrent TimeUnit MINUTES

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit MINUTES.

Prototype

TimeUnit MINUTES

To view the source code for java.util.concurrent TimeUnit MINUTES.

Click Source Link

Document

Time unit representing sixty seconds.

Usage

From source file:uk.ac.cam.cl.dtg.segue.api.managers.UserAccountManager.java

/**
 * Create an instance of the user manager class.
 * //w  w  w.ja v a  2 s . com
 * @param database
 *            - an IUserDataManager that will support persistence.
 * @param questionDb
 *            - allows this class to instruct the questionDB to merge an anonymous user with a registered user.  
 * @param properties
 *            - A property loader
 * @param providersToRegister
 *            - A map of known authentication providers.
 * @param dtoMapper
 *            - the preconfigured DO to DTO object mapper for user objects.
 * @param emailQueue
 *            - the preconfigured communicator manager for sending e-mails.
 * @param logManager
 *            - so that we can log events for users.
 * @param userAuthenticationManager
 *            - Class responsible for handling sessions, passwords and linked accounts.
 */
@Inject
public UserAccountManager(final IUserDataManager database, final QuestionManager questionDb,
        final PropertiesLoader properties,
        final Map<AuthenticationProvider, IAuthenticator> providersToRegister, final MapperFacade dtoMapper,
        final EmailManager emailQueue, final ILogManager logManager,
        final UserAuthenticationManager userAuthenticationManager) {
    this(database, questionDb, properties, providersToRegister, dtoMapper, emailQueue,
            CacheBuilder.newBuilder().expireAfterAccess(ANONYMOUS_SESSION_DURATION_IN_MINUTES, TimeUnit.MINUTES)
                    .<String, AnonymousUser>build(),
            logManager, userAuthenticationManager);
}

From source file:terrastore.startup.Startup.java

public void start() throws Exception {
    try {//from   w  w w  .jav a2s.c o  m
        // TODO: make connection timeout configurable.
        if (TCMaster.getInstance().connect(master, 3, TimeUnit.MINUTES) == true) {
            verifyNodeHost();
            verifyWorkerThreads();
            printInfo();
            setupSystemParams();
            //
            ApplicationContext context = startContext();
            startCoordinator(context);
            startJsonHttpServer(context);
        } else {
            throw new MasterConnectionException("Unable to connect to master: " + master);
        }
    } catch (Exception ex) {
        LOG.error(ex.getMessage(), ex);
        throw ex;
    }
}

From source file:com.hpe.application.automation.tools.octane.buildLogs.LogDispatcher.java

@Override
protected void doExecute(TaskListener listener) {
    if (logsQueue.peekFirst() == null) {
        return;/*from   w  w  w . ja  v  a  2s.  com*/
    }

    MqmRestClient mqmRestClient = initMqmRestClient();
    if (mqmRestClient == null) {
        logger.warn(
                "there are pending build logs, but MQM server location is not specified, build logs can't be submitted");
        logsQueue.remove();
        return;
    }

    ResultQueue.QueueItem item;

    while ((item = logsQueue.peekFirst()) != null) {
        if (retryModel.isQuietPeriod()) {
            logger.info("there are pending logs, but we are in quiet period");
            return;
        }

        Run build = getBuildFromQueueItem(item);
        if (build == null) {
            logger.warn("build and/or project [" + item.getProjectName() + " #" + item.getBuildNumber()
                    + "] no longer exists, pending build logs won't be submitted");
            logsQueue.remove();
            continue;
        }

        String jobCiId = BuildHandlerUtils.getJobCiId(build);
        try {
            if (item.getWorkspace() == null) {
                //
                //  initial queue item flow - no workspaces, works with workspaces retrieval and loop ever each of them
                //
                logger.info("retrieving all workspaces that logs of [" + jobCiId + "] are relevant to...");
                List<String> workspaces = mqmRestClient.getJobWorkspaceId(
                        ConfigurationService.getModel().getIdentity(), BuildHandlerUtils.getJobCiId(build));
                if (workspaces.isEmpty()) {
                    logger.info("[" + jobCiId
                            + "] is not part of any Octane pipeline in any workspace, log won't be sent");
                } else {
                    logger.info("logs of [" + jobCiId + "] found to be relevant to " + workspaces.size()
                            + " workspace/s");
                    CountDownLatch latch = new CountDownLatch(workspaces.size());
                    for (String workspaceId : workspaces) {
                        logDispatcherExecutors.execute(new SendLogsExecutor(mqmRestClient, build, item,
                                workspaceId, logsQueue, latch));
                    }

                    boolean completedResult = latch.await(TIMEOUT, TimeUnit.MINUTES);
                    if (!completedResult) {
                        logger.error("timed out sending logs to " + workspaces.size() + " workspace/s");
                    }
                }
                logsQueue.remove();
            } else {
                //
                //  secondary queue item flow - workspace is known, we are in retry flow
                //
                logger.info("");
                transferBuildLogs(build, mqmRestClient, item);
            }
        } catch (Exception e) {
            logger.error("fatally failed to fetch relevant workspaces OR to send log for build "
                    + item.getProjectName() + " #" + item.getBuildNumber() + " to workspace "
                    + item.getWorkspace() + ", will not retry this one", e);
        }
    }
}

From source file:co.cask.hydrator.plugin.CopybookTest.java

@Test
public void testCopybookReaderWithRequiredFields() throws Exception {

    Schema schema = Schema.recordOf("record",
            Schema.Field.of("DTAR020-KEYCODE-NO", Schema.nullableOf(Schema.of(Schema.Type.STRING))),
            Schema.Field.of("DTAR020-DATE", Schema.nullableOf(Schema.of(Schema.Type.DOUBLE))),
            Schema.Field.of("DTAR020-DEPT-NO", Schema.nullableOf(Schema.of(Schema.Type.DOUBLE))),
            Schema.Field.of("DTAR020-QTY-SOLD", Schema.nullableOf(Schema.of(Schema.Type.DOUBLE))),
            Schema.Field.of("DTAR020-SALE-PRICE", Schema.nullableOf(Schema.of(Schema.Type.DOUBLE))));

    Map<String, String> sourceProperties = new ImmutableMap.Builder<String, String>()
            .put(Constants.Reference.REFERENCE_NAME, "TestCase")
            .put("binaryFilePath", "src/test/resources/DTAR020_FB.bin").put("copybookContents", cblContents)
            .put("drop", "DTAR020-STORE-NO").build();

    ETLStage source = new ETLStage("CopybookReader",
            new ETLPlugin("CopybookReader", BatchSource.PLUGIN_TYPE, sourceProperties, null));

    String outputDatasetName = "output-batchsourcetest";
    ETLStage sink = new ETLStage("sink", MockSink.getPlugin(outputDatasetName));

    ETLBatchConfig etlConfig = ETLBatchConfig.builder("* * * * *").addStage(source).addStage(sink)
            .addConnection(source.getName(), sink.getName()).build();

    AppRequest<ETLBatchConfig> appRequest = new AppRequest<>(ETLBATCH_ARTIFACT, etlConfig);
    Id.Application appId = Id.Application.from(Id.Namespace.DEFAULT, "CopybookReaderTest");
    ApplicationManager appManager = deployApplication(appId, appRequest);

    MapReduceManager mrManager = appManager.getMapReduceManager(ETLMapReduce.NAME);
    mrManager.start();//from ww w.  ja  va  2s .c  o  m
    mrManager.waitForFinish(5, TimeUnit.MINUTES);

    DataSetManager<Table> outputManager = getDataset(outputDatasetName);
    List<StructuredRecord> output = MockSink.readOutput(outputManager);

    Assert.assertEquals("Expected records", 2, output.size());

    Map<String, Double> result = new HashMap<>();
    result.put((String) output.get(0).get("DTAR020-KEYCODE-NO"),
            (Double) output.get(0).get("DTAR020-SALE-PRICE"));
    result.put((String) output.get(1).get("DTAR020-KEYCODE-NO"),
            (Double) output.get(1).get("DTAR020-SALE-PRICE"));

    Assert.assertEquals(4.87, result.get("63604808").doubleValue(), 0.1);
    Assert.assertEquals(5.01, result.get("69694158").doubleValue(), 0.1);
    Assert.assertEquals("Expected schema", output.get(0).getSchema(), schema);
}

From source file:de.otto.mongodb.profiler.web.OpProfileController.java

@RequestMapping(value = "/{id:.+}/chart-data/moved-documents", method = RequestMethod.GET, produces = {
        JSON_TYPE_1, JSON_TYPE_2 })// w ww .j a  v a  2s  .  c o m
public HttpEntity<String> getMovedDocumentChartData(@PathVariable("connectionId") final String connectionId,
        @PathVariable("databaseName") final String databaseName, @PathVariable("id") final String id,
        @RequestParam(value = "sampleRate", required = false) Long sampleRate)
        throws ResourceNotFoundException {

    final ProfiledDatabase database = requireDatabase(connectionId, databaseName);
    final OpProfile profile = requireProfile(database, id);

    if (!(profile instanceof UpdateProfile)) {
        throw new ResourceNotFoundException("No update profile found!");
    }

    if (sampleRate == null) {
        sampleRate = 15L;
    }

    final DocumentMoveMeasure measure = ((UpdateProfile) profile).getMoveMeasure();

    final ChronoSampler<DocumentMoveMeasure.Mark, MovedDocumentsSample> sampler = new ChronoSampler<>(
            sampleRate, TimeUnit.MINUTES, MOVED_DOCUMENTS_REDUCTION, lowerBoundary(-24));

    for (DocumentMoveMeasure.Mark mark : measure.getMarks()) {
        sampler.add(mark.time, mark);
    }

    final List<MovedDocumentsSample> samples = sampler.finish();

    final JsonArray ratioValues = new JsonArray();
    final JsonArray totalValues = new JsonArray();
    final JsonArray movedValues = new JsonArray();
    for (MovedDocumentsSample sample : samples) {
        final JsonArray ratioValue = new JsonArray();
        ratioValue.add(new JsonPrimitive(Long.valueOf(sample.time)));
        ratioValue.add(new JsonPrimitive(
                Integer.valueOf(sample.ratio.movePointRight(2).setScale(0, BigDecimal.ROUND_UP).intValue())));
        ratioValues.add(ratioValue);
        final JsonArray totalValue = new JsonArray();
        totalValue.add(new JsonPrimitive(Long.valueOf(sample.time)));
        totalValue.add(new JsonPrimitive(Long.valueOf(sample.normal + sample.moved)));
        totalValues.add(totalValue);
        final JsonArray movedValue = new JsonArray();
        movedValue.add(new JsonPrimitive(Long.valueOf(sample.time)));
        movedValue.add(new JsonPrimitive(Long.valueOf(sample.moved)));
        movedValues.add(movedValue);
    }

    final JsonObject ratioJson = new JsonObject();
    ratioJson.add("key", new JsonPrimitive("Ratio"));
    ratioJson.add("values", ratioValues);

    final JsonObject totalJson = new JsonObject();
    totalJson.add("key", new JsonPrimitive("Total"));
    totalJson.add("values", totalValues);

    final JsonObject movedJson = new JsonObject();
    movedJson.add("key", new JsonPrimitive("Moved"));
    movedJson.add("values", movedValues);

    final JsonObject json = new JsonObject();
    json.add("ratio", ratioJson);
    json.add("total", totalJson);
    json.add("moved", movedJson);

    return new HttpEntity<>(json.toString());
}

From source file:com.datatorrent.demos.dimensions.generic.DimensionStoreOperatorTest.java

@Test
public void testQueryFromHDSWithSubsetKeys() throws Exception {
    File file = new File(testInfo.getDir());
    FileUtils.deleteDirectory(file);//from   w  w w . j  av  a  2s. c o  m

    DimensionStoreOperator hdsOut = new DimensionStoreOperator() {
        @Override
        public void setup(OperatorContext arg0) {
            super.setup(arg0);
            super.writeExecutor = super.queryExecutor = MoreExecutors.sameThreadExecutor(); // synchronous processing
        }
    };
    TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
    hdsOut.setFileStore(hdsFile);
    hdsFile.setBasePath(testInfo.getDir());
    EventSchema eventSchema = GenericAggregateSerializerTest.getEventSchema();
    GenericAggregator aggregator = new GenericAggregator(eventSchema);
    aggregator.init("time=MINUTES:pubId:adId:adUnit");
    hdsOut.setEventSchemaJSON(GenericAggregateSerializerTest.TEST_SCHEMA_JSON);
    hdsOut.setAggregator(aggregator);
    hdsOut.setMaxCacheSize(1);
    hdsOut.setFlushIntervalCount(0);
    hdsOut.setup(null);

    CollectorTestSink<DimensionStoreOperator.HDSRangeQueryResult> queryResults = new CollectorTestSink<DimensionStoreOperator.HDSRangeQueryResult>();
    @SuppressWarnings({ "unchecked", "rawtypes" })
    CollectorTestSink<Object> tmp = (CollectorTestSink) queryResults;
    hdsOut.queryResult.setSink(tmp);

    hdsOut.beginWindow(1);

    long baseTime = System.currentTimeMillis();
    long baseMinute = TimeUnit.MILLISECONDS.convert(TimeUnit.MINUTES.convert(baseTime, TimeUnit.MILLISECONDS),
            TimeUnit.MINUTES);

    // Events ae1 and ae2 fall into same aggregation as they have same key
    Map<String, Object> eventMap = Maps.newHashMap();
    eventMap.put("timestamp", baseMinute);
    eventMap.put("pubId", 1);
    eventMap.put("adUnit", 3);
    eventMap.put("clicks", 10L);

    GenericAggregate ae1 = new GenericAggregate(eventSchema.convertMapToGenericEvent(eventMap));
    hdsOut.input.process(ae1);

    // Modify click count and create new event
    eventMap.put("clicks", 20L);
    GenericAggregate ae2 = new GenericAggregate(eventSchema.convertMapToGenericEvent(eventMap));
    hdsOut.input.process(ae2);

    // Modify clicks to 10 and time by 1 minute and create new event
    eventMap.put("timestamp", baseMinute + TimeUnit.MILLISECONDS.convert(1, TimeUnit.MINUTES));
    eventMap.put("clicks", 10L);
    GenericAggregate ae3 = new GenericAggregate(eventSchema.convertMapToGenericEvent(eventMap));
    hdsOut.input.process(ae3);

    hdsOut.endWindow();

    hdsOut.beginWindow(2);

    JSONObject keys = new JSONObject();
    keys.put("pubId", 1);
    keys.put("adUnit", 3);

    JSONObject query = new JSONObject();
    query.put("numResults", "20");
    query.put("keys", keys);
    query.put("id", "query1");
    query.put("startTime", baseMinute);
    query.put("endTime", baseMinute + TimeUnit.MILLISECONDS.convert(20, TimeUnit.MINUTES));

    hdsOut.query.process(query.toString());

    Assert.assertEquals("timeSeriesQueries " + hdsOut.rangeQueries, 1, hdsOut.rangeQueries.size());
    DimensionStoreOperator.HDSRangeQuery aq = hdsOut.rangeQueries.values().iterator().next();
    Assert.assertEquals("numTimeUnits " + hdsOut.rangeQueries, baseMinute, aq.startTime);

    hdsOut.endWindow();

    Assert.assertEquals("queryResults " + queryResults.collectedTuples, 1, queryResults.collectedTuples.size());
    HDSRangeQueryResult r = queryResults.collectedTuples.iterator().next();
    Assert.assertEquals("result points " + r, 2, r.data.size());

    // ae1 object is stored as referenced in cache, and when new tuple is aggregated,
    // the new values are updated in ae1 itself, causing following check to fail.
    //Assert.assertEquals("clicks", ae1.clicks + ae2.clicks, r.data.get(0).clicks);
    Assert.assertEquals("clicks", 30L, r.data.get(0).get("clicks"));
    Assert.assertEquals("clicks", eventSchema.getValue(ae3, "clicks"), r.data.get(1).get("clicks"));
    // when data is returned from HDHT, all keys are part of response,
    // not present keys will have 0 values.
    Assert.assertEquals("from HDHT", 0, r.data.get(0).get("adId"));
    // when data is returned from Cache, not specified keys will not
    // be present in the map.
    Assert.assertEquals("from cache", 0, r.data.get(1).get("adId"));
}

From source file:co.marcin.novaguilds.util.StringUtils.java

public static String secondsToString(long seconds, TimeUnit unit) {
    if (seconds <= 0) {
        seconds = 0;//from   w  w  w  .  j  av a  2s .com
    }

    int minute = 60;
    int hour = 60 * minute;
    int day = hour * 24;
    int week = day * 7;
    int month = day * 31;
    int year = 31536000;

    long years = seconds / year;
    seconds = seconds % year;

    long months = seconds / month;
    seconds = seconds % month;

    long weeks = seconds / week;
    seconds = seconds % week;

    long days = seconds / day;
    seconds = seconds % day;

    long hours = seconds / hour;
    seconds = seconds % hour;

    long minutes = seconds / minute;
    seconds = seconds % minute;

    String stringYears = "", stringMonths = "", stringWeeks = "", stringDays = "", stringHours = "",
            stringSeconds = "", stringMinutes = "";

    if (years > 0) {
        Message form = years > 1 ? Message.TIMEUNIT_YEAR_PLURAL : Message.TIMEUNIT_YEAR_SINGULAR;
        stringYears = years + " " + form.get() + " ";
    }

    if (months > 0) {
        Message form = months > 1 ? Message.TIMEUNIT_MONTH_PLURAL : Message.TIMEUNIT_MONTH_SINGULAR;
        stringMonths = months + " " + form.get() + " ";
    }

    if (weeks > 0) {
        Message form = weeks > 1 ? Message.TIMEUNIT_WEEK_PLURAL : Message.TIMEUNIT_WEEK_SINGULAR;
        stringWeeks = weeks + " " + form.get() + " ";
    }

    if (days > 0) {
        Message form = days > 1 ? Message.TIMEUNIT_DAY_PLURAL : Message.TIMEUNIT_DAY_SINGULAR;
        stringDays = days + " " + form.get() + " ";
    }

    if (hours > 0) {
        Message form = hours > 1 ? Message.TIMEUNIT_HOUR_PLURAL : Message.TIMEUNIT_HOUR_SINGULAR;
        stringHours = hours + " " + form.get() + " ";
    }

    if (minutes > 0) {
        Message form = minutes > 1 ? Message.TIMEUNIT_MINUTE_PLURAL : Message.TIMEUNIT_MINUTE_SINGULAR;
        stringMinutes = minutes + " " + form.get() + " ";
    }

    if (seconds > 0 || (seconds == 0 && minutes == 0 && hours == 0 && days == 0 && weeks == 0 && months == 0
            && years == 0)) {
        Message form = seconds == 1 ? Message.TIMEUNIT_SECOND_SINGULAR : Message.TIMEUNIT_SECOND_PLURAL;
        stringSeconds = seconds + " " + form.get() + " ";
    }

    if (unit == TimeUnit.DAYS && days > 0) {
        stringHours = "";
        stringMinutes = "";
        stringSeconds = "";
    } else if (unit == TimeUnit.HOURS && hours > 0) {
        stringMinutes = "";
        stringSeconds = "";
    } else if (unit == TimeUnit.MINUTES && minutes > 0) {
        stringSeconds = "";
    }

    String r = stringYears + stringMonths + stringWeeks + stringDays + stringHours + stringMinutes
            + stringSeconds;
    r = r.substring(0, r.length() - 1);
    return r;
}

From source file:ddf.catalog.cache.solr.impl.SolrCache.java

private void configureCacheExpirationScheduler() {
    shutdownCacheExpirationScheduler();//from  ww w.j a  va 2s.  c o m
    LOGGER.info("Configuring cache expiration scheduler with an expiration interval of {} minute(s).",
            expirationIntervalInMinutes);
    scheduler = Executors.newSingleThreadScheduledExecutor();
    scheduler.scheduleAtFixedRate(new ExpirationRunner(), 0, expirationIntervalInMinutes, TimeUnit.MINUTES);
}

From source file:co.cask.hydrator.plugin.batch.spark.test.GDTreeTest.java

private void testSinglePhaseWithSparkSink() throws Exception {
    /*/* ww w.  j a  va2 s .c o m*/
     * source --> sparksink
     */
    String inputTable = "flight-data";
    Map<String, String> properties = new ImmutableMap.Builder<String, String>()
            .put("fileSetName", "gd-tree-model").put("path", "output")
            .put("featuresToInclude",
                    "dofM,dofW,carrier,originId,destId,scheduleDepTime,scheduledArrTime,elapsedTime")
            .put("labelField", "delayed").put("maxClass", "2").put("maxDepth", "10").put("maxIteration", "5")
            .build();

    ETLBatchConfig etlConfig = ETLBatchConfig.builder("* * * * *")
            .addStage(new ETLStage("source", MockSource.getPlugin(inputTable, getTrainerSchema(schema))))
            .addStage(new ETLStage("customsink",
                    new ETLPlugin(GDTreeTrainer.PLUGIN_NAME, SparkSink.PLUGIN_TYPE, properties, null)))
            .addConnection("source", "customsink").build();

    AppRequest<ETLBatchConfig> appRequest = new AppRequest<>(DATAPIPELINE_ARTIFACT, etlConfig);
    ApplicationId appId = NamespaceId.DEFAULT.app("SinglePhaseApp");
    ApplicationManager appManager = deployApplication(appId.toId(), appRequest);

    // send records from sample data to train the model
    List<StructuredRecord> messagesToWrite = new ArrayList<>();
    messagesToWrite.addAll(getInputData());

    // write records to source
    DataSetManager<Table> inputManager = getDataset(Id.Namespace.DEFAULT, inputTable);
    MockSource.writeInput(inputManager, messagesToWrite);

    // manually trigger the pipeline
    WorkflowManager workflowManager = appManager.getWorkflowManager(SmartWorkflow.NAME);
    workflowManager.start();
    workflowManager.waitForFinish(5, TimeUnit.MINUTES);
}

From source file:hudson.security.TokenBasedRememberMeServices2SEC868Test.java

@Test
@Issue("SECURITY-868")
public void rememberMeToken_skipExpirationCheck() throws Exception {
    boolean previousConfig = TokenBasedRememberMeServices2.SKIP_TOO_FAR_EXPIRATION_DATE_CHECK;
    try {//from w w  w .  j ava 2  s  .com
        TokenBasedRememberMeServices2.SKIP_TOO_FAR_EXPIRATION_DATE_CHECK = true;

        j.jenkins.setDisableRememberMe(false);

        HudsonPrivateSecurityRealm realm = new HudsonPrivateSecurityRealm(false, false, null);
        TokenBasedRememberMeServices2 tokenService = (TokenBasedRememberMeServices2) realm
                .getSecurityComponents().rememberMe;
        j.jenkins.setSecurityRealm(realm);

        String username = "alice";
        User alice = realm.createAccount(username, username);

        { // a malicious cookie with expiration too far in the future should not work
            JenkinsRule.WebClient wc = j.createWebClient();

            // by default we have 14 days of validity, 
            // here we increase artificially the duration of validity, that could be used to have permanent access
            long oneDay = TimeUnit.DAYS.toMillis(1);
            Cookie cookie = createRememberMeCookie(tokenService, oneDay, alice);
            wc.getCookieManager().addCookie(cookie);

            // the application should not use the cookie to connect
            assertUserConnected(wc, username);
        }

        { // a hand crafted cookie with regular expiration duration works
            JenkinsRule.WebClient wc = j.createWebClient();

            // by default we have 14 days of validity, 
            // here we reduce a bit the expiration date to simulate an "old" cookie (regular usage)
            long minusFiveMinutes = TimeUnit.MINUTES.toMillis(-5);
            Cookie cookie = createRememberMeCookie(tokenService, minusFiveMinutes, alice);
            wc.getCookieManager().addCookie(cookie);

            // if we reactivate the remember me feature, it's ok
            assertUserConnected(wc, username);
        }
    } finally {
        TokenBasedRememberMeServices2.SKIP_TOO_FAR_EXPIRATION_DATE_CHECK = previousConfig;
    }
}