Example usage for java.time OffsetDateTime toLocalDate

List of usage examples for java.time OffsetDateTime toLocalDate

Introduction

In this page you can find the example usage for java.time OffsetDateTime toLocalDate.

Prototype

public LocalDate toLocalDate() 

Source Link

Document

Gets the LocalDate part of this date-time.

Usage

From source file:Main.java

public static void main(String[] args) {
    OffsetDateTime o = OffsetDateTime.now();
    LocalDate l = o.toLocalDate();
    System.out.println(l);/*from ww w.j  a v a  2 s .c o  m*/
}

From source file:org.openmhealth.shim.ihealth.mapper.IHealthDataPointMapper.java

/**
 * @param dateTimeInUnixSecondsWithLocalTimeOffset A unix epoch timestamp in local time.
 * @param timeZoneString The time zone offset as a String (e.g., "+0200","-2").
 * @return The date time with the correct offset.
 *//*from  ww w. j ava  2  s.  c  om*/
protected static OffsetDateTime getDateTimeAtStartOfDayWithCorrectOffset(
        Long dateTimeInUnixSecondsWithLocalTimeOffset, String timeZoneString) {

    // Since the timestamps are in local time, we can use the local date time provided by rendering the timestamp
    // in UTC, then translating that local time to the appropriate offset.
    OffsetDateTime dateTimeFromOffsetInstant = ofInstant(
            ofEpochSecond(dateTimeInUnixSecondsWithLocalTimeOffset), ZoneId.of("Z"));

    return dateTimeFromOffsetInstant.toLocalDate().atStartOfDay().atOffset(ZoneOffset.of(timeZoneString));
}

From source file:com.epam.dlab.backendapi.service.impl.SchedulerJobServiceImpl.java

/**
 * Checks if scheduler's time data satisfies existing time parameters.
 *
 * @param dto           scheduler job data.
 * @param dateTime      existing time data.
 * @param desiredStatus target exploratory status which has influence for time/date checking ('running' status
 *                      requires for checking start time, 'stopped' - for end time, 'terminated' - for
 *                      'terminatedDateTime').
 * @return true/false./* w  ww.j  av a 2  s .  c  o  m*/
 */
private boolean isSchedulerJobDtoSatisfyCondition(SchedulerJobDTO dto, OffsetDateTime dateTime,
        UserInstanceStatus desiredStatus) {
    ZoneOffset zOffset = dto.getTimeZoneOffset();
    OffsetDateTime roundedDateTime = OffsetDateTime.of(dateTime.toLocalDate(),
            LocalTime.of(dateTime.toLocalTime().getHour(), dateTime.toLocalTime().getMinute()),
            dateTime.getOffset());

    LocalDateTime convertedDateTime = ZonedDateTime
            .ofInstant(roundedDateTime.toInstant(), ZoneId.ofOffset(TIMEZONE_PREFIX, zOffset))
            .toLocalDateTime();

    return desiredStatus == TERMINATED
            ? Objects.nonNull(dto.getTerminateDateTime())
                    && convertedDateTime.toLocalDate().equals(dto.getTerminateDateTime().toLocalDate())
                    && convertedDateTime.toLocalTime().equals(getDesiredTime(dto, desiredStatus))
            : !convertedDateTime.toLocalDate().isBefore(dto.getBeginDate())
                    && isFinishDateMatchesCondition(dto, convertedDateTime)
                    && getDaysRepeat(dto, desiredStatus)
                            .contains(convertedDateTime.toLocalDate().getDayOfWeek())
                    && convertedDateTime.toLocalTime().equals(getDesiredTime(dto, desiredStatus));
}

From source file:org.openmhealth.shim.runkeeper.RunkeeperShim.java

protected ResponseEntity<ShimDataResponse> getData(OAuth2RestOperations restTemplate,
        ShimDataRequest shimDataRequest) throws ShimException {

    String dataTypeKey = shimDataRequest.getDataTypeKey().trim().toUpperCase();

    RunkeeperDataType runkeeperDataType;
    try {/*  w  w w. j a  v  a2  s.com*/
        runkeeperDataType = RunkeeperDataType.valueOf(dataTypeKey);
    } catch (NullPointerException | IllegalArgumentException e) {
        throw new ShimException("Null or Invalid data type parameter: " + dataTypeKey
                + " in shimDataRequest, cannot retrieve data.");
    }

    /***
     * Setup default date parameters
     */
    OffsetDateTime now = OffsetDateTime.now();

    OffsetDateTime startDateTime = shimDataRequest.getStartDateTime() == null ? now.minusDays(1)
            : shimDataRequest.getStartDateTime();

    OffsetDateTime endDateTime = shimDataRequest.getEndDateTime() == null ? now.plusDays(1)
            : shimDataRequest.getEndDateTime();

    /*
    Runkeeper defaults to returning a maximum of 25 entries per request (pageSize = 25 by default), so
    we override the default by specifying an arbitrarily large number as the pageSize.
     */
    long numToReturn = 100_000;

    UriComponentsBuilder uriBuilder = UriComponentsBuilder.fromUriString(DATA_URL)
            .pathSegment(runkeeperDataType.getEndPointUrl())
            .queryParam("noEarlierThan", startDateTime.toLocalDate())
            .queryParam("noLaterThan", endDateTime.toLocalDate()).queryParam("pageSize", numToReturn)
            .queryParam("detail", true); // added to all endpoints to support summaries

    HttpHeaders headers = new HttpHeaders();
    headers.set("Accept", runkeeperDataType.getDataTypeHeader());

    ResponseEntity<JsonNode> responseEntity;
    try {
        responseEntity = restTemplate.exchange(uriBuilder.build().encode().toUri(), GET,
                new HttpEntity<JsonNode>(headers), JsonNode.class);
    } catch (HttpClientErrorException | HttpServerErrorException e) {
        // FIXME figure out how to handle this
        logger.error("A request for RunKeeper data failed.", e);
        throw e;
    }

    if (shimDataRequest.getNormalize()) {
        RunkeeperDataPointMapper<?> dataPointMapper;
        switch (runkeeperDataType) {
        case ACTIVITY:
            dataPointMapper = new RunkeeperPhysicalActivityDataPointMapper();
            break;
        case CALORIES:
            dataPointMapper = new RunkeeperCaloriesBurnedDataPointMapper();
            break;
        default:
            throw new UnsupportedOperationException();
        }

        return ok().body(ShimDataResponse.result(SHIM_KEY,
                dataPointMapper.asDataPoints(singletonList(responseEntity.getBody()))));
    } else {
        return ok().body(ShimDataResponse.result(SHIM_KEY, responseEntity.getBody()));
    }
}

From source file:com.epam.dlab.backendapi.service.impl.SchedulerJobServiceImpl.java

@Override
public void executeStopResourceJob(boolean isAppliedForClusters) {
    OffsetDateTime currentDateTime = OffsetDateTime.now();
    List<SchedulerJobData> jobsToStop = getSchedulerJobsForAction(STOPPED, currentDateTime,
            isAppliedForClusters);/*from ww w  .j a va 2s  .  c  o  m*/
    if (!jobsToStop.isEmpty()) {
        log.debug(isAppliedForClusters ? "Scheduler computational resource stop job is executing..."
                : "Scheduler exploratory stop job is executing...");
        log.info(CURRENT_DATETIME_INFO,
                LocalTime.of(currentDateTime.toLocalTime().getHour(),
                        currentDateTime.toLocalTime().getMinute()),
                currentDateTime.toLocalDate(), currentDateTime.getDayOfWeek());
        log.info(isAppliedForClusters ? "Quantity of clusters for stopping: {}"
                : "Quantity of exploratories for stopping: {}", jobsToStop.size());
        jobsToStop.forEach(job -> changeResourceStatusTo(STOPPED, job, isAppliedForClusters));
    }
}

From source file:com.epam.dlab.backendapi.service.impl.SchedulerJobServiceImpl.java

@Override
public void executeStartResourceJob(boolean isAppliedForClusters) {
    OffsetDateTime currentDateTime = OffsetDateTime.now();
    List<SchedulerJobData> jobsToStart = getSchedulerJobsForAction(UserInstanceStatus.RUNNING, currentDateTime,
            isAppliedForClusters);//from   ww  w.  ja v  a 2s .c  om
    if (!jobsToStart.isEmpty()) {
        log.debug(isAppliedForClusters ? "Scheduler computational resource start job is executing..."
                : "Scheduler exploratory start job is executing...");
        log.info(CURRENT_DATETIME_INFO,
                LocalTime.of(currentDateTime.toLocalTime().getHour(),
                        currentDateTime.toLocalTime().getMinute()),
                currentDateTime.toLocalDate(), currentDateTime.getDayOfWeek());
        log.info(isAppliedForClusters ? "Quantity of clusters for starting: {}"
                : "Quantity of exploratories for starting: {}", jobsToStart.size());
        jobsToStart
                .forEach(job -> changeResourceStatusTo(UserInstanceStatus.RUNNING, job, isAppliedForClusters));
    }

}

From source file:org.openmhealth.shim.misfit.MisfitShim.java

@Override
protected ResponseEntity<ShimDataResponse> getData(OAuth2RestOperations restTemplate,
        ShimDataRequest shimDataRequest) throws ShimException {

    final MisfitDataTypes misfitDataType;
    try {//from  w  w w  . ja v  a 2  s  . co m
        misfitDataType = MisfitDataTypes.valueOf(shimDataRequest.getDataTypeKey().trim().toUpperCase());
    } catch (NullPointerException | IllegalArgumentException e) {
        throw new ShimException("Null or Invalid data type parameter: " + shimDataRequest.getDataTypeKey()
                + " in shimDataRequest, cannot retrieve data.");
    }

    // TODO don't truncate dates
    OffsetDateTime now = OffsetDateTime.now();

    OffsetDateTime startDateTime = shimDataRequest.getStartDateTime() == null ? now.minusDays(1)
            : shimDataRequest.getStartDateTime();

    OffsetDateTime endDateTime = shimDataRequest.getEndDateTime() == null ? now.plusDays(1)
            : shimDataRequest.getEndDateTime();

    if (Duration.between(startDateTime, endDateTime).toDays() > MAX_DURATION_IN_DAYS) {
        endDateTime = startDateTime.plusDays(MAX_DURATION_IN_DAYS - 1); // TODO when refactoring, break apart queries
    }

    UriComponentsBuilder uriBuilder = UriComponentsBuilder.fromUriString(DATA_URL);

    for (String pathSegment : Splitter.on("/").split(misfitDataType.getEndPoint())) {
        uriBuilder.pathSegment(pathSegment);
    }

    uriBuilder.queryParam("start_date", startDateTime.toLocalDate()) // TODO convert ODT to LocalDate properly
            .queryParam("end_date", endDateTime.toLocalDate()).queryParam("detail", true); // added to all endpoints to support summaries

    ResponseEntity<JsonNode> responseEntity;
    try {
        responseEntity = restTemplate.getForEntity(uriBuilder.build().encode().toUri(), JsonNode.class);
    } catch (HttpClientErrorException | HttpServerErrorException e) {
        // FIXME figure out how to handle this
        logger.error("A request for Misfit data failed.", e);
        throw e;
    }

    if (shimDataRequest.getNormalize()) {

        MisfitDataPointMapper<?> dataPointMapper;

        switch (misfitDataType) {
        case ACTIVITIES:
            dataPointMapper = physicalActivityMapper;
            break;
        case SLEEP:
            dataPointMapper = sleepDurationMapper;
            break;
        case STEPS:
            dataPointMapper = stepCountMapper;
            break;
        default:
            throw new UnsupportedOperationException();
        }

        return ok().body(ShimDataResponse.result(SHIM_KEY,
                dataPointMapper.asDataPoints(singletonList(responseEntity.getBody()))));
    } else {
        return ok().body(ShimDataResponse.result(SHIM_KEY, responseEntity.getBody()));
    }
}

From source file:com.epam.dlab.backendapi.service.impl.SchedulerJobServiceImpl.java

@Override
public void executeTerminateResourceJob(boolean isAppliedForClusters) {
    OffsetDateTime currentDateTime = OffsetDateTime.now();
    List<SchedulerJobData> jobsToTerminate = getSchedulerJobsForAction(UserInstanceStatus.TERMINATED,
            currentDateTime, isAppliedForClusters);
    if (!jobsToTerminate.isEmpty()) {
        log.debug(isAppliedForClusters ? "Scheduler computational resource terminate job is executing..."
                : "Scheduler exploratory terminate job is executing...");
        log.info(CURRENT_DATETIME_INFO,//  w  w  w .j a v a2 s . co m
                LocalTime.of(currentDateTime.toLocalTime().getHour(),
                        currentDateTime.toLocalTime().getMinute()),
                currentDateTime.toLocalDate(), currentDateTime.getDayOfWeek());
        log.info(isAppliedForClusters ? "Quantity of clusters for terminating: {}"
                : "Quantity of exploratories for terminating: {}", jobsToTerminate.size());
        jobsToTerminate.forEach(
                job -> changeResourceStatusTo(UserInstanceStatus.TERMINATED, job, isAppliedForClusters));
    }
}