List of usage examples for org.joda.time DateTimeZone getDefault
public static DateTimeZone getDefault()
From source file:graphene.util.time.JodaTimeUtil.java
License:Apache License
public static void test_localDate_shift_joda_tz() { System.out.println("Test LocalDate with shifted JodaTime timezone"); final DateTimeZone originalTZ = DateTimeZone.getDefault(); final DateTimeZone losAngelesTZ = DateTimeZone.forID("America/Los_Angeles"); DateTimeZone.setDefault(losAngelesTZ); final LocalDate ld0 = new LocalDate(losAngelesTZ); System.out.println(// w w w . jav a2 s. c o m "ld0 LocalDate(losAngelesTZ) = " + ld0 + " when default TZ = " + DateTimeZone.getDefault()); DateTimeZone.setDefault(losAngelesTZ); final LocalDate ld1 = new LocalDate(); System.out.println( "ld1 LocalDate() = " + ld1 + " when default TZ = " + DateTimeZone.getDefault()); final java.sql.Date d0 = toSQLDate(ld1); System.out .println("d0 toSQLDate(ld0) = " + d0 + " when default TZ = " + DateTimeZone.getDefault()); final java.sql.Date d1 = toSQLDate(ld1); System.out .println("d1 toSQLDate(ld1) = " + d1 + " when default TZ = " + DateTimeZone.getDefault()); DateTimeZone.setDefault(originalTZ); System.out .println("d1 toSQLDate(ld1) = " + d1 + " when default TZ = " + DateTimeZone.getDefault()); DateTimeZone.setDefault(originalTZ); final LocalDate ld2 = toLocalDate(d1); System.out.println( "ld2 toLocalDate(d1) = " + ld2 + " when default TZ = " + DateTimeZone.getDefault()); DateTimeZone.setDefault(originalTZ); if (!ld2.equals(ld1)) { throw new IllegalStateException(); } }
From source file:imas.planning.entity.FlightEntity.java
private String convertTimezone(Date date, String countryName) { DateTime original = new DateTime(date.getTime()); DateTimeZone dtz = DateTimeZone.getDefault(); original.withZone(dtz);/*ww w .java2 s .c o m*/ Set<String> tzIds = DateTimeZone.getAvailableIDs(); for (String timeZoneId : tzIds) { if (timeZoneId.contains(countryName)) { dtz = DateTimeZone.forID(timeZoneId); break; } } DateTime dt = original.toDateTime(dtz); DateTimeFormatter dtfOut = DateTimeFormat.forPattern("MMM dd yyyy HH:mm:ss zzz"); return dtfOut.print(dt); }
From source file:io.coala.dsol.util.DsolUtil.java
License:Apache License
/** * @return current replication time as replication ISO date * @throws DSOLException// w w w .j a va 2 s . com */ public static DateTime getDateTime(final SimulatorInterface simulator) { return getDateTime(simulator, DateTimeZone.getDefault()); }
From source file:io.coala.dsol.util.DsolUtil.java
License:Apache License
/** @return specified simulation time and unit as replication ISO date */ public static DateTime toDateTime(final Number simTime, final Treatment treatment) { return toDateTime(simTime, treatment, DateTimeZone.getDefault()); }
From source file:io.coala.xml.XmlUtil.java
License:Apache License
/** * @param date a JAXP {@link XMLGregorianCalendar} * @return/* w w w. j a v a 2 s . c o m*/ */ public static DateTime toDateTime(final XMLGregorianCalendar date) { final DateTimeZone timeZone = date.getTimezone() == DatatypeConstants.FIELD_UNDEFINED ? DateTimeZone.getDefault() : DateTimeZone.forOffsetMillis(date.getTimezone() * 60 * 1000); return new DateTime(date.getYear(), date.getMonth(), date.getDay(), date.getHour(), date.getMinute(), date.getSecond(), date.getMillisecond(), timeZone); }
From source file:io.coala.xml.XmlUtil.java
License:Apache License
/** * @param date//from www .j a v a 2 s .co m * @return a JAXP {@link XMLGregorianCalendar} */ public static XMLGregorianCalendar toDateTime(final Date date) { return toDateTime(new DateTime(date, DateTimeZone.getDefault())); }
From source file:io.prestosql.plugin.hive.GenericHiveRecordCursor.java
License:Apache License
private static long getLongExpressedValue(Object value, DateTimeZone hiveTimeZone) { if (value instanceof Date) { long storageTime = ((Date) value).getTime(); // convert date from VM current time zone to UTC long utcMillis = storageTime + DateTimeZone.getDefault().getOffset(storageTime); return TimeUnit.MILLISECONDS.toDays(utcMillis); }/* w ww. ja v a 2 s .c o m*/ if (value instanceof Timestamp) { // The Hive SerDe parses timestamps using the default time zone of // this JVM, but the data might have been written using a different // time zone. We need to convert it to the configured time zone. // the timestamp that Hive parsed using the JVM time zone long parsedJvmMillis = ((Timestamp) value).getTime(); // remove the JVM time zone correction from the timestamp DateTimeZone jvmTimeZone = DateTimeZone.getDefault(); long hiveMillis = jvmTimeZone.convertUTCToLocal(parsedJvmMillis); // convert to UTC using the real time zone for the underlying data long utcMillis = hiveTimeZone.convertLocalToUTC(hiveMillis, false); return utcMillis; } if (value instanceof Float) { return floatToRawIntBits(((Float) value)); } return ((Number) value).longValue(); }
From source file:io.prestosql.plugin.hive.HiveMetadataFactory.java
License:Apache License
public HiveMetadataFactory(ExtendedHiveMetastore metastore, HdfsEnvironment hdfsEnvironment, HivePartitionManager partitionManager, DateTimeZone timeZone, int maxConcurrentFileRenames, boolean allowCorruptWritesForTesting, boolean skipDeletionForAlter, boolean skipTargetCleanupOnRollback, boolean writesToNonManagedTablesEnabled, boolean createsOfNonManagedTablesEnabled, long perTransactionCacheMaximumSize, int maxPartitions, TypeManager typeManager, LocationService locationService, TableParameterCodec tableParameterCodec, JsonCodec<PartitionUpdate> partitionUpdateCodec, ExecutorService executorService, TypeTranslator typeTranslator, String prestoVersion) { this.allowCorruptWritesForTesting = allowCorruptWritesForTesting; this.skipDeletionForAlter = skipDeletionForAlter; this.skipTargetCleanupOnRollback = skipTargetCleanupOnRollback; this.writesToNonManagedTablesEnabled = writesToNonManagedTablesEnabled; this.createsOfNonManagedTablesEnabled = createsOfNonManagedTablesEnabled; this.perTransactionCacheMaximumSize = perTransactionCacheMaximumSize; this.metastore = requireNonNull(metastore, "metastore is null"); this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null"); this.partitionManager = requireNonNull(partitionManager, "partitionManager is null"); this.timeZone = requireNonNull(timeZone, "timeZone is null"); this.typeManager = requireNonNull(typeManager, "typeManager is null"); this.locationService = requireNonNull(locationService, "locationService is null"); this.tableParameterCodec = requireNonNull(tableParameterCodec, "tableParameterCodec is null"); this.partitionUpdateCodec = requireNonNull(partitionUpdateCodec, "partitionUpdateCodec is null"); this.typeTranslator = requireNonNull(typeTranslator, "typeTranslator is null"); this.prestoVersion = requireNonNull(prestoVersion, "prestoVersion is null"); this.maxPartitions = maxPartitions; if (!allowCorruptWritesForTesting && !timeZone.equals(DateTimeZone.getDefault())) { log.warn("Hive writes are disabled. " + "To write data to Hive, your JVM timezone must match the Hive storage timezone. " + "Add -Duser.timezone=%s to your JVM arguments", timeZone.getID()); }/*w ww . j ava2s . c o m*/ renameExecution = new BoundedExecutor(executorService, maxConcurrentFileRenames); }
From source file:io.prestosql.plugin.hive.HiveQueryRunner.java
License:Apache License
public static DistributedQueryRunner createQueryRunner(Iterable<TpchTable<?>> tables, Map<String, String> extraProperties, String security, Map<String, String> extraHiveProperties) throws Exception { assertEquals(DateTimeZone.getDefault(), TIME_ZONE, "Timezone not configured correctly. Add -Duser.timezone=America/Bahia_Banderas to your JVM arguments"); setupLogging();// w w w .j a v a 2 s .c o m DistributedQueryRunner queryRunner = new DistributedQueryRunner( createSession(Optional.of(new SelectedRole(ROLE, Optional.of("admin")))), 4, extraProperties); try { queryRunner.installPlugin(new TpchPlugin()); queryRunner.createCatalog("tpch", "tpch"); File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile(); HiveClientConfig hiveClientConfig = new HiveClientConfig(); HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration( new HdfsConfigurationInitializer(hiveClientConfig), ImmutableSet.of()); HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig, new NoHdfsAuthentication()); FileHiveMetastore metastore = new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test"); metastore.createDatabase(createDatabaseMetastoreObject(TPCH_SCHEMA)); metastore.createDatabase(createDatabaseMetastoreObject(TPCH_BUCKETED_SCHEMA)); queryRunner.installPlugin(new HivePlugin(HIVE_CATALOG, Optional.of(metastore))); Map<String, String> hiveProperties = ImmutableMap.<String, String>builder().putAll(extraHiveProperties) .put("hive.time-zone", TIME_ZONE.getID()).put("hive.security", security) .put("hive.max-partitions-per-scan", "1000").put("hive.assume-canonical-partition-keys", "true") .put("hive.collect-column-statistics-on-write", "true").build(); Map<String, String> hiveBucketedProperties = ImmutableMap.<String, String>builder() .putAll(hiveProperties).put("hive.max-initial-split-size", "10kB") // so that each bucket has multiple splits .put("hive.max-split-size", "10kB") // so that each bucket has multiple splits .put("hive.storage-format", "TEXTFILE") // so that there's no minimum split size for the file .put("hive.compression-codec", "NONE") // so that the file is splittable .build(); queryRunner.createCatalog(HIVE_CATALOG, HIVE_CATALOG, hiveProperties); queryRunner.createCatalog(HIVE_BUCKETED_CATALOG, HIVE_CATALOG, hiveBucketedProperties); copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(Optional.empty()), tables); copyTpchTablesBucketed(queryRunner, "tpch", TINY_SCHEMA_NAME, createBucketedSession(Optional.empty()), tables); return queryRunner; } catch (Exception e) { queryRunner.close(); throw e; } }
From source file:io.prestosql.plugin.hive.parquet.AbstractTestParquetReader.java
License:Apache License
@BeforeClass public void setUp() { assertEquals(DateTimeZone.getDefault(), HIVE_STORAGE_TIME_ZONE); // Parquet has excessive logging at INFO level parquetLogger = Logger.getLogger("org.apache.parquet.hadoop"); parquetLogger.setLevel(Level.WARNING); }