Example usage for org.joda.time.format PeriodFormat getDefault

List of usage examples for org.joda.time.format PeriodFormat getDefault

Introduction

In this page you can find the example usage for org.joda.time.format PeriodFormat getDefault.

Prototype

public static PeriodFormatter getDefault() 

Source Link

Document

Gets the default formatter that outputs words in English.

Usage

From source file:azkaban.app.Scheduler.java

License:Apache License

/**
 * Schedule this job to run on a recurring basis beginning at the given
 * dateTime and repeating every period units of time forever
 * /*from  w w w. jav  a  2  s  .c  om*/
 * @param jobId The id for the job to schedule
 * @param dateTime The date on which to first start the job
 * @param period The period on which the job repeats
 */
public ScheduledFuture<?> schedule(String jobId, DateTime dateTime, ReadablePeriod period, boolean ignoreDep) {
    logger.info("Scheduling job '" + jobId + "' for " + _dateFormat.print(dateTime) + " with a period of "
            + PeriodFormat.getDefault().print(period));
    return schedule(new ScheduledJob(jobId, dateTime, period, ignoreDep), true);
}

From source file:azkaban.app.Scheduler.java

License:Apache License

/**
 * Schedule the given job to run at the next occurance of the partially
 * specified date, and repeating on the given period. For example if the
 * partial date is 12:00pm then the job will kick of the next time it is
 * 12:00pm/* w w w . ja  v a  2s  .  co m*/
 * 
 * @param jobId An id for the job
 * @param partial A description of the date to run on
 * @param period The period on which the job should repeat
 */
public ScheduledFuture<?> schedule(String jobId, ReadablePartial partial, ReadablePeriod period,
        boolean ignoreDep) {
    // compute the next occurrence of this date
    DateTime now = new DateTime();
    DateTime date = now.withFields(partial);
    if (period != null) {
        date = updatedTime(date, period);
    } else if (now.isAfter(date)) {
        // Will try to schedule non recurring for tomorrow
        date = date.plusDays(1);
    }

    if (now.isAfter(date)) {
        // Schedule is non recurring.
        logger.info("Scheduled Job " + jobId + " was originally scheduled for " + _dateFormat.print(date));
        return null;
    }

    logger.info("Scheduling job '" + jobId + "' for " + _dateFormat.print(date)
            + (period != null ? " with a period of " + PeriodFormat.getDefault().print(period) : ""));
    return schedule(new ScheduledJob(jobId, date, period, ignoreDep), true);
}

From source file:azkaban.app.Scheduler.java

License:Apache License

private ScheduledFuture<?> schedule(final ScheduledJob schedJob, boolean saveResults) {
    // fail fast if there is a problem with this job
    _jobManager.validateJob(schedJob.getId());

    Duration wait = new Duration(new DateTime(), schedJob.getScheduledExecution());
    if (wait.getMillis() < -1000) {
        logger.warn("Job " + schedJob.getId() + " is scheduled for "
                + DateTimeFormat.shortDateTime().print(schedJob.getScheduledExecution()) + " which is "
                + (PeriodFormat.getDefault().print(wait.toPeriod()))
                + " in the past, adjusting scheduled date to now.");
        wait = new Duration(0);
    }/*w  w  w .j a  v  a 2 s .  co m*/

    // mark the job as scheduled
    _scheduled.put(schedJob.getId(), schedJob);

    if (saveResults) {
        try {
            saveSchedule();
        } catch (IOException e) {
            throw new RuntimeException("Error saving schedule after scheduling job " + schedJob.getId());
        }
    }

    ScheduledRunnable runnable = new ScheduledRunnable(schedJob);
    schedJob.setScheduledRunnable(runnable);
    return _executor.schedule(runnable, wait.getMillis(), TimeUnit.MILLISECONDS);
}

From source file:azkaban.app.Scheduler.java

License:Apache License

private void sendSuccessEmail(ScheduledJob job, Duration duration, String senderAddress,
        List<String> emailList) {
    if ((emailList == null || emailList.isEmpty()) && _jobSuccessEmail != null) {
        emailList = Arrays.asList(_jobSuccessEmail);
    }//from w  ww .ja v  a  2  s  .  c om

    if (emailList != null && _mailman != null) {
        try {
            _mailman.sendEmailIfPossible(senderAddress, emailList,
                    "Job '" + job.getId() + "' has completed on " + InetAddress.getLocalHost().getHostName()
                            + "!",
                    "The job '" + job.getId() + "' completed in "
                            + PeriodFormat.getDefault().print(duration.toPeriod()) + ".");
        } catch (UnknownHostException uhe) {
            logger.error(uhe);
        }
    }
}

From source file:colossal.pipe.BaseOptions.java

License:Apache License

private Period parseDuration() {
    PeriodFormatter[] toTry = { PeriodFormat.getDefault(), ISOPeriodFormat.standard(),
            ISOPeriodFormat.alternate(), ISOPeriodFormat.alternateExtended(),
            ISOPeriodFormat.alternateExtendedWithWeeks(), ISOPeriodFormat.alternateWithWeeks() };
    for (PeriodFormatter f : toTry) {
        try {/*from  w ww .  j a  v a 2 s .c o m*/
            return f.parsePeriod(duration);
        } catch (IllegalArgumentException iae) {
            // skip to next
        }
    }
    throw new IllegalArgumentException("Can't parse: " + duration);
}

From source file:com.amazonaws.services.kinesis.log4j.FilePublisher.java

License:Open Source License

public static void main(String[] args) throws IOException {
    if (args.length != 1) {
        System.err.println("Usage: java " + FilePublisher.class.getName() + " <file_path>");
        System.err.println();/* ww  w .  ja v a 2  s. co m*/
        System.err.println(
                "<file_path>\t-\tabsolute path for the input file, this file will be read line by line and ");
        System.err.println("\t\t\tpublished to Kinesis");
        System.exit(1);
    }
    String fileAbsolutePath = args[0];
    File logFile = new File(fileAbsolutePath);
    if (!logFile.exists() || !logFile.canRead()) {
        System.err.println("File " + args[0] + " doesn't exist or is not readable.");
        System.exit(2);
    }

    Logger kinesisLogger = Logger.getLogger("KinesisLogger");
    int i = 0;
    DateTime startTime = DateTime.now();
    BufferedReader reader = new BufferedReader(new FileReader(logFile));
    LOGGER.info("Started reading: " + fileAbsolutePath);
    String line = null;
    while ((line = reader.readLine()) != null) {
        kinesisLogger.info(line);
        i++;
        if (i % 100 == 0 && LOGGER.isDebugEnabled()) {
            LOGGER.debug("Total " + i + " records written to logger");
        }
    }
    reader.close();
    long bufferedRecordsCount = getBufferedRecordsCountFromKinesisAppenders();
    while (bufferedRecordsCount > 0) {
        LOGGER.info("Publisher threads within log4j appender are still working on sending "
                + bufferedRecordsCount + " buffered records to Kinesis");
        try {
            Thread.sleep(SLEEP_INTERVAL);
        } catch (InterruptedException e) {
            // do nothing
        }
        bufferedRecordsCount = getBufferedRecordsCountFromKinesisAppenders();
    }
    LOGGER.info("Published " + i + " records from " + fileAbsolutePath + " to the logger, took "
            + PeriodFormat.getDefault().print(new Period(startTime, DateTime.now())));
}

From source file:com.amazonaws.services.kinesis.log4j.helpers.AsyncBatchPutHandler.java

License:Open Source License

/**
 * This method is invoked when a log record is successfully sent to Kinesis.
 * Though this is not too useful for production use cases, it provides a good
 * debugging tool while tweaking parameters for the appender.
 */// www .ja  va  2 s.c  o m
@Override
public void onSuccess(PutRecordsRequest request, PutRecordsResult result) {
    int currentSuccessfulCount = amazonKinesisPutRecordsHelper
            .getSuccessCountAndaddFailedRecordsBackToQueue(request, result);
    successfulRequestCount = successfulRequestCount + currentSuccessfulCount;

    if ((logger.isInfoEnabled() && (successfulRequestCount + failedRequestCount)
            % amazonKinesisPutRecordsHelper.getBatchSize() == 0) || logger.isDebugEnabled()) {
        logger.info("Appender (" + appenderName + ") made " + successfulRequestCount
                + " successful put requests out of total " + (successfulRequestCount + failedRequestCount)
                + " in " + PeriodFormat.getDefault().print(new Period(startTime, DateTime.now()))
                + " since start");
    }
}

From source file:com.amazonaws.services.kinesis.log4j.helpers.AsyncPutCallStatsReporter.java

License:Open Source License

/**
 * This method is invoked when a log record is successfully sent to Kinesis.
 * Though this is not too useful for production use cases, it provides a good
 * debugging tool while tweaking parameters for the appender.
 *//*from  ww  w  .  j av  a2  s. co m*/
@Override
public void onSuccess(PutRecordRequest request, PutRecordResult result) {
    successfulRequestCount++;
    if (logger.isDebugEnabled() && (successfulRequestCount + failedRequestCount) % 3000 == 0) {
        logger.debug("Appender (" + appenderName + ") made " + successfulRequestCount
                + " successful put requests out of total " + (successfulRequestCount + failedRequestCount)
                + " in " + PeriodFormat.getDefault().print(new Period(startTime, DateTime.now()))
                + " since start");
    }
}

From source file:com.brighttag.agathon.dao.zerg.ZergDaoModule.java

License:Apache License

@Provides
@Singleton/*  w w w . jav  a2  s  . c  om*/
AsyncHttpClientConfig provideAsyncHttpClientConfig(
        @Named(ZERG_CONNECTION_TIMEOUT_PROPERTY) Duration connectionTimeout,
        @Named(ZERG_REQUEST_TIMEOUT_PROPERTY) Duration requestTimeout) {
    PeriodFormatter formatter = PeriodFormat.getDefault();
    log.info("Using connection timeout {} and request timeout {}",
            formatter.print(connectionTimeout.toPeriod()), formatter.print(requestTimeout.toPeriod()));
    return new AsyncHttpClientConfig.Builder().setAllowPoolingConnection(true)
            .setConnectionTimeoutInMs(Ints.saturatedCast(connectionTimeout.getMillis()))
            .setRequestTimeoutInMs(Ints.saturatedCast(requestTimeout.getMillis())).setFollowRedirects(true)
            .setMaximumNumberOfRedirects(3).setMaxRequestRetry(1).build();
}

From source file:com.cloudhopper.commons.util.demo.UptimeMain.java

License:Apache License

public static void main(String[] args) {

    //Period period = new Period(uptime, PeriodType.standard().withYearsRemoved().withWeeksRemoved().withMonthsRemoved().withMillisRemoved());
    //MutablePeriod period = new Duration(uptime).toPeriod().toMutablePeriod();

    long uptime = UPTIME_56_SECS;

    // ah, ha -- this is super important -- need to normalize the period!
    PeriodType periodType = PeriodType.standard().withYearsRemoved().withMonthsRemoved().withWeeksRemoved()
            .withMillisRemoved();//from   w ww.  ja  v a 2  s.co  m
    Period period = new Period(uptime).normalizedStandard(periodType);

    System.out.println("Uptime: " + uptime + " ms");
    System.out.println("Weeks: " + period.getWeeks());
    System.out.println("Days: " + period.getDays());
    System.out.println("Millis: " + period.getMillis() + " ms");

    // print out the uptime
    String uptimeStyleString = PeriodFormatterUtil.getStandardUptimeStyle().print(period);
    String linuxStyleString = PeriodFormatterUtil.getLinuxUptimeStyle().print(period);

    System.out.println(uptimeStyleString);
    System.out.println(linuxStyleString);

    PeriodFormatter fmt = new PeriodFormatterBuilder().printZeroNever().appendDays()
            .appendSuffix(" day ", " days ").appendHours().appendSuffix(" hours ").appendMinutes()
            .appendSuffix(" mins ").printZeroAlways().appendSeconds().appendSuffix(" secs ").toFormatter();

    String str0 = fmt.print(period);
    System.out.println(str0);

    String str1 = PeriodFormat.getDefault().print(period);
    System.out.println(str1);
}