Example usage for org.joda.time DateTimeZone forID

List of usage examples for org.joda.time DateTimeZone forID

Introduction

In this page you can find the example usage for org.joda.time DateTimeZone forID.

Prototype

@FromString
public static DateTimeZone forID(String id) 

Source Link

Document

Gets a time zone instance for the specified time zone id.

Usage

From source file:imas.planning.entity.FlightEntity.java

private String convertTimezone(Date date, String countryName) {

    DateTime original = new DateTime(date.getTime());
    DateTimeZone dtz = DateTimeZone.getDefault();
    original.withZone(dtz);/*from  w w w .  j  a  va 2s .c  om*/

    Set<String> tzIds = DateTimeZone.getAvailableIDs();
    for (String timeZoneId : tzIds) {
        if (timeZoneId.contains(countryName)) {
            dtz = DateTimeZone.forID(timeZoneId);
            break;
        }
    }
    DateTime dt = original.toDateTime(dtz);
    DateTimeFormatter dtfOut = DateTimeFormat.forPattern("MMM dd yyyy HH:mm:ss zzz");
    return dtfOut.print(dt);

}

From source file:io.confluent.connect.hdfs.HdfsSinkTask.java

License:Apache License

@Override
public void start(Map<String, String> props) {
    Set<TopicPartition> assignment = context.assignment();
    ;/*w ww  .  j a v  a2 s .  c o  m*/
    try {
        HdfsSinkConnectorConfig connectorConfig = new HdfsSinkConnectorConfig(props);
        boolean hiveIntegration = connectorConfig.getBoolean(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
        if (hiveIntegration) {
            Compatibility compatibility = SchemaUtils.getCompatibility(
                    connectorConfig.getString(HdfsSinkConnectorConfig.SCHEMA_COMPATIBILITY_CONFIG));
            if (compatibility == Compatibility.NONE) {
                throw new ConfigException(
                        "Hive Integration requires schema compatibility to be BACKWARD, FORWARD or FULL");
            }
        }

        //check that timezone it setup correctly in case of scheduled rotation
        if (connectorConfig.getLong(HdfsSinkConnectorConfig.ROTATE_SCHEDULE_INTERVAL_MS_CONFIG) > 0) {
            String timeZoneString = connectorConfig.getString(HdfsSinkConnectorConfig.TIMEZONE_CONFIG);
            if (timeZoneString.equals("")) {
                throw new ConfigException(HdfsSinkConnectorConfig.TIMEZONE_CONFIG, timeZoneString,
                        "Timezone cannot be empty when using scheduled file rotation.");
            }
            DateTimeZone.forID(timeZoneString);
        }

        int schemaCacheSize = connectorConfig.getInt(HdfsSinkConnectorConfig.SCHEMA_CACHE_SIZE_CONFIG);
        avroData = new AvroData(schemaCacheSize);
        hdfsWriter = new DataWriter(connectorConfig, context, avroData);
        recover(assignment);
        if (hiveIntegration) {
            syncWithHive();
        }
    } catch (ConfigException e) {
        throw new ConnectException("Couldn't start HdfsSinkConnector due to configuration error.", e);
    } catch (ConnectException e) {
        log.info("Couldn't start HdfsSinkConnector:", e);
        log.info("Shutting down HdfsSinkConnector.");
        if (hdfsWriter != null) {
            hdfsWriter.close(assignment);
            hdfsWriter.stop();
        }
    }
}

From source file:io.confluent.connect.hdfs.partitioner.DailyPartitioner.java

License:Apache License

@Override
public void configure(Map<String, Object> config) {
    String localeString = (String) config.get(HdfsSinkConnectorConfig.LOCALE_CONFIG);
    if (localeString.equals("")) {
        throw new ConfigException(HdfsSinkConnectorConfig.LOCALE_CONFIG, localeString,
                "Locale cannot be empty.");
    }/*www. j  a va  2  s.  co m*/
    String timeZoneString = (String) config.get(HdfsSinkConnectorConfig.TIMEZONE_CONFIG);
    if (timeZoneString.equals("")) {
        throw new ConfigException(HdfsSinkConnectorConfig.TIMEZONE_CONFIG, timeZoneString,
                "Timezone cannot be empty.");
    }
    String hiveIntString = (String) config.get(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
    boolean hiveIntegration = hiveIntString != null && hiveIntString.toLowerCase().equals("true");
    Locale locale = new Locale(localeString);
    DateTimeZone timeZone = DateTimeZone.forID(timeZoneString);
    init(partitionDurationMs, pathFormat, locale, timeZone, hiveIntegration);
}

From source file:io.confluent.connect.hdfs.partitioner.TimeBasedPartitioner.java

License:Apache License

@Override
public void configure(Map<String, Object> config) {
    long partitionDurationMs = (long) config.get(HdfsSinkConnectorConfig.PARTITION_DURATION_MS_CONFIG);
    if (partitionDurationMs < 0) {
        throw new ConfigException(HdfsSinkConnectorConfig.PARTITION_DURATION_MS_CONFIG, partitionDurationMs,
                "Partition duration needs to be a positive.");
    }/*  w  ww  .j a v a2 s .com*/

    String pathFormat = (String) config.get(HdfsSinkConnectorConfig.PATH_FORMAT_CONFIG);
    if (pathFormat.equals("")) {
        throw new ConfigException(HdfsSinkConnectorConfig.PATH_FORMAT_CONFIG, pathFormat,
                "Path format cannot be empty.");
    }

    String localeString = (String) config.get(HdfsSinkConnectorConfig.LOCALE_CONFIG);
    if (localeString.equals("")) {
        throw new ConfigException(HdfsSinkConnectorConfig.LOCALE_CONFIG, localeString,
                "Locale cannot be empty.");
    }
    String timeZoneString = (String) config.get(HdfsSinkConnectorConfig.TIMEZONE_CONFIG);
    if (timeZoneString.equals("")) {
        throw new ConfigException(HdfsSinkConnectorConfig.TIMEZONE_CONFIG, timeZoneString,
                "Timezone cannot be empty.");
    }

    String hiveIntString = (String) config.get(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
    boolean hiveIntegration = hiveIntString != null && hiveIntString.toLowerCase().equals("true");

    Locale locale = new Locale(localeString);
    DateTimeZone timeZone = DateTimeZone.forID(timeZoneString);
    init(partitionDurationMs, pathFormat, locale, timeZone, hiveIntegration);
}

From source file:io.confluent.connect.hdfs.partitioner.TimeUtils.java

License:Apache License

public static String encodeTimestamp(long partitionDurationMs, String pathFormat, String timeZoneString,
        long timestamp) {
    DateTimeZone timeZone = DateTimeZone.forID(timeZoneString);
    DateTimeFormatter formatter = DateTimeFormat.forPattern(pathFormat).withZone(timeZone);
    DateTime partition = new DateTime(getPartition(partitionDurationMs, timestamp, timeZone));
    return partition.toString(formatter);
}

From source file:io.confluent.connect.hdfs.TopicPartitionWriter.java

License:Apache License

public TopicPartitionWriter(TopicPartition tp, Storage storage, RecordWriterProvider writerProvider,
        Partitioner partitioner, HdfsSinkConnectorConfig connectorConfig, SinkTaskContext context,
        AvroData avroData, HiveMetaStore hiveMetaStore, HiveUtil hive, SchemaFileReader schemaFileReader,
        ExecutorService executorService, Queue<Future<Void>> hiveUpdateFutures) {
    this.tp = tp;
    this.connectorConfig = connectorConfig;
    this.context = context;
    this.avroData = avroData;
    this.storage = storage;
    this.writerProvider = writerProvider;
    this.partitioner = partitioner;
    this.url = storage.url();
    this.conf = storage.conf();
    this.schemaFileReader = schemaFileReader;

    topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG);
    flushSize = connectorConfig.getInt(HdfsSinkConnectorConfig.FLUSH_SIZE_CONFIG);
    rotateIntervalMs = connectorConfig.getLong(HdfsSinkConnectorConfig.ROTATE_INTERVAL_MS_CONFIG);
    rotateScheduleIntervalMs = connectorConfig
            .getLong(HdfsSinkConnectorConfig.ROTATE_SCHEDULE_INTERVAL_MS_CONFIG);
    timeoutMs = connectorConfig.getLong(HdfsSinkConnectorConfig.RETRY_BACKOFF_CONFIG);
    compatibility = SchemaUtils//from   ww  w . ja v a  2  s.  co m
            .getCompatibility(connectorConfig.getString(HdfsSinkConnectorConfig.SCHEMA_COMPATIBILITY_CONFIG));

    String logsDir = connectorConfig.getString(HdfsSinkConnectorConfig.LOGS_DIR_CONFIG);
    wal = storage.wal(logsDir, tp);

    buffer = new LinkedList<>();
    writers = new HashMap<>();
    tempFiles = new HashMap<>();
    appended = new HashSet<>();
    startOffsets = new HashMap<>();
    offsets = new HashMap<>();
    state = State.RECOVERY_STARTED;
    failureTime = -1L;
    offset = -1L;
    sawInvalidOffset = false;
    extension = writerProvider.getExtension();
    zeroPadOffsetFormat = "%0"
            + connectorConfig.getInt(HdfsSinkConnectorConfig.FILENAME_OFFSET_ZERO_PAD_WIDTH_CONFIG) + "d";

    hiveIntegration = connectorConfig.getBoolean(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
    if (hiveIntegration) {
        hiveDatabase = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_DATABASE_CONFIG);
        this.hiveMetaStore = hiveMetaStore;
        this.hive = hive;
        this.executorService = executorService;
        this.hiveUpdateFutures = hiveUpdateFutures;
        hivePartitions = new HashSet<>();
    }

    if (rotateScheduleIntervalMs > 0) {
        timeZone = DateTimeZone.forID(connectorConfig.getString(HdfsSinkConnectorConfig.TIMEZONE_CONFIG));
    }

    // Initialize rotation timers
    updateRotationTimers();
}

From source file:io.confluent.connect.s3.TopicPartitionWriter.java

License:Open Source License

TopicPartitionWriter(TopicPartition tp, RecordWriterProvider<S3SinkConnectorConfig> writerProvider,
        Partitioner<FieldSchema> partitioner, S3SinkConnectorConfig connectorConfig, SinkTaskContext context,
        Time time) {//from   w w  w  .  jav a 2  s .  c om
    this.connectorConfig = connectorConfig;
    this.time = time;
    this.tp = tp;
    this.context = context;
    this.writerProvider = writerProvider;
    this.partitioner = partitioner;
    this.timestampExtractor = partitioner instanceof TimeBasedPartitioner
            ? ((TimeBasedPartitioner) partitioner).getTimestampExtractor()
            : null;
    flushSize = connectorConfig.getInt(S3SinkConnectorConfig.FLUSH_SIZE_CONFIG);
    topicsDir = connectorConfig.getString(StorageCommonConfig.TOPICS_DIR_CONFIG);
    rotateIntervalMs = connectorConfig.getLong(S3SinkConnectorConfig.ROTATE_INTERVAL_MS_CONFIG);
    if (rotateIntervalMs > 0 && timestampExtractor == null) {
        log.warn(
                "Property '{}' is set to '{}ms' but partitioner is not an instance of '{}'. This property"
                        + " is ignored.",
                S3SinkConnectorConfig.ROTATE_INTERVAL_MS_CONFIG, rotateIntervalMs,
                TimeBasedPartitioner.class.getName());
    }
    rotateScheduleIntervalMs = connectorConfig
            .getLong(S3SinkConnectorConfig.ROTATE_SCHEDULE_INTERVAL_MS_CONFIG);
    if (rotateScheduleIntervalMs > 0) {
        timeZone = DateTimeZone.forID(connectorConfig.getString(PartitionerConfig.TIMEZONE_CONFIG));
    }
    timeoutMs = connectorConfig.getLong(S3SinkConnectorConfig.RETRY_BACKOFF_CONFIG);
    compatibility = StorageSchemaCompatibility
            .getCompatibility(connectorConfig.getString(HiveConfig.SCHEMA_COMPATIBILITY_CONFIG));

    buffer = new LinkedList<>();
    commitFiles = new HashMap<>();
    writers = new HashMap<>();
    currentSchemas = new HashMap<>();
    startOffsets = new HashMap<>();
    state = State.WRITE_STARTED;
    failureTime = -1L;
    currentOffset = -1L;
    dirDelim = connectorConfig.getString(StorageCommonConfig.DIRECTORY_DELIM_CONFIG);
    fileDelim = connectorConfig.getString(StorageCommonConfig.FILE_DELIM_CONFIG);
    extension = writerProvider.getExtension();
    zeroPadOffsetFormat = "%0"
            + connectorConfig.getInt(S3SinkConnectorConfig.FILENAME_OFFSET_ZERO_PAD_WIDTH_CONFIG) + "d";

    // Initialize scheduled rotation timer if applicable
    setNextScheduledRotation();
}

From source file:io.confluent.connect.storage.partitioner.DailyPartitioner.java

License:Apache License

@Override
public void configure(Map<String, Object> config) {
    long partitionDurationMs = TimeUnit.HOURS.toMillis(24);
    String delim = (String) config.get(StorageCommonConfig.DIRECTORY_DELIM_CONFIG);
    pathFormat = "'year'=YYYY" + delim + "'month'=MM" + delim + "'day'=dd" + delim;

    String localeString = (String) config.get(PartitionerConfig.LOCALE_CONFIG);
    if (localeString.equals("")) {
        throw new ConfigException(PartitionerConfig.LOCALE_CONFIG, localeString, "Locale cannot be empty.");
    }//ww  w.j  a v  a2s  . co  m

    String timeZoneString = (String) config.get(PartitionerConfig.TIMEZONE_CONFIG);
    if (timeZoneString.equals("")) {
        throw new ConfigException(PartitionerConfig.TIMEZONE_CONFIG, timeZoneString,
                "Timezone cannot be empty.");
    }

    Locale locale = new Locale(localeString);
    DateTimeZone timeZone = DateTimeZone.forID(timeZoneString);
    init(partitionDurationMs, pathFormat, locale, timeZone, config);
}

From source file:io.confluent.connect.storage.partitioner.HourlyPartitioner.java

License:Apache License

@Override
public void configure(Map<String, Object> config) {
    String localeString = (String) config.get(PartitionerConfig.LOCALE_CONFIG);
    if (localeString.equals("")) {
        throw new ConfigException(PartitionerConfig.LOCALE_CONFIG, localeString, "Locale cannot be empty.");
    }// w ww  .  ja  v a  2s .c o  m
    String timeZoneString = (String) config.get(PartitionerConfig.TIMEZONE_CONFIG);
    if (timeZoneString.equals("")) {
        throw new ConfigException(PartitionerConfig.TIMEZONE_CONFIG, timeZoneString,
                "Timezone cannot be empty.");
    }
    Locale locale = new Locale(localeString);
    DateTimeZone timeZone = DateTimeZone.forID(timeZoneString);
    init(PARTITION_DURATION_MS, PATH_FORMAT, locale, timeZone, config);
}

From source file:io.confluent.connect.storage.partitioner.TimeBasedPartitioner.java

License:Open Source License

@Override
public void configure(Map<String, Object> config) {
    super.configure(config);
    long partitionDurationMsProp = (long) config.get(PartitionerConfig.PARTITION_DURATION_MS_CONFIG);
    if (partitionDurationMsProp < 0) {
        throw new ConfigException(PartitionerConfig.PARTITION_DURATION_MS_CONFIG, partitionDurationMsProp,
                "Partition duration needs to be a positive.");
    }//from  w  ww  .  j a v a2s. co m

    String pathFormat = (String) config.get(PartitionerConfig.PATH_FORMAT_CONFIG);
    if (StringUtils.isBlank(pathFormat) || pathFormat.equals(delim)) {
        throw new ConfigException(PartitionerConfig.PATH_FORMAT_CONFIG, pathFormat,
                "Path format cannot be empty.");
    } else if (!StringUtils.isBlank(delim) && pathFormat.endsWith(delim)) {
        // Delimiter has been added by the user at the end of the path format string. Removing.
        pathFormat = pathFormat.substring(0, pathFormat.length() - delim.length());
    }

    String localeString = (String) config.get(PartitionerConfig.LOCALE_CONFIG);
    if (StringUtils.isBlank(localeString)) {
        throw new ConfigException(PartitionerConfig.LOCALE_CONFIG, localeString, "Locale cannot be empty.");
    }

    String timeZoneString = (String) config.get(PartitionerConfig.TIMEZONE_CONFIG);
    if (StringUtils.isBlank(timeZoneString)) {
        throw new ConfigException(PartitionerConfig.TIMEZONE_CONFIG, timeZoneString,
                "Timezone cannot be empty.");
    }

    Locale locale = new Locale(localeString);
    DateTimeZone timeZone = DateTimeZone.forID(timeZoneString);
    init(partitionDurationMsProp, pathFormat, locale, timeZone, config);
}