List of usage examples for org.joda.time DateTimeZone forOffsetMillis
public static DateTimeZone forOffsetMillis(int millisOffset)
From source file:org.apache.marmotta.kiwi.io.KiWiIO.java
License:Apache License
/** * Read a KiWiDateLiteral serialized with writeDateLiteral from a DataInput source * * @param input the source//from w w w. j av a2 s .c om * @return the de-serialized KiWiDateLiteral * @throws IOException */ public static KiWiDateLiteral readDateLiteral(DataInput input) throws IOException { long id = input.readLong(); if (id == -1) { return null; } else { DateTime content = new DateTime(input.readLong(), DateTimeZone.forOffsetMillis(input.readInt())); KiWiUriResource dtype = readURI(input); Date created = new Date(input.readLong()); KiWiDateLiteral r = new KiWiDateLiteral(content, dtype, created); r.setId(id); return r; } }
From source file:org.apache.marmotta.kiwi.persistence.KiWiConnection.java
License:Apache License
/** * Construct an appropriate KiWiNode from the result of an SQL query. The method will not change the * ResultSet iterator, only read its values, so it needs to be executed for each row separately. * @param row//from w w w. ja v a2s . c o m * @return */ protected KiWiNode constructNodeFromDatabase(ResultSet row) throws SQLException { // column order; id,ntype,svalue,ivalue,dvalue,tvalue,tzoffset,bvalue,lang,ltype,createdAt // 1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 ,10 .11 long id = row.getLong(1); KiWiNode cached = nodeCache.get(id); // lookup element in cache first, so we can avoid reconstructing it if it is already there if (cached != null) { return cached; } String ntype = row.getString(2); if ("uri".equals(ntype)) { KiWiUriResource result = new KiWiUriResource(row.getString(3), new Date(row.getTimestamp(11, calendarUTC).getTime())); result.setId(id); cacheNode(result); return result; } else if ("bnode".equals(ntype)) { KiWiAnonResource result = new KiWiAnonResource(row.getString(3), new Date(row.getTimestamp(11, calendarUTC).getTime())); result.setId(id); cacheNode(result); return result; } else if ("string".equals(ntype)) { final KiWiStringLiteral result = new KiWiStringLiteral(row.getString(3), new Date(row.getTimestamp(11, calendarUTC).getTime())); result.setId(id); if (row.getString(9) != null) { result.setLocale(getLocale(row.getString(9))); } if (row.getLong(10) != 0) { result.setType((KiWiUriResource) loadNodeById(row.getLong(10))); } cacheNode(result); return result; } else if ("int".equals(ntype)) { KiWiIntLiteral result = new KiWiIntLiteral(row.getLong(4), null, new Date(row.getTimestamp(11, calendarUTC).getTime())); result.setId(id); if (row.getLong(10) != 0) { result.setType((KiWiUriResource) loadNodeById(row.getLong(10))); } cacheNode(result); return result; } else if ("double".equals(ntype)) { KiWiDoubleLiteral result = new KiWiDoubleLiteral(row.getDouble(5), null, new Date(row.getTimestamp(11, calendarUTC).getTime())); result.setId(id); if (row.getLong(10) != 0) { result.setType((KiWiUriResource) loadNodeById(row.getLong(10))); } cacheNode(result); return result; } else if ("boolean".equals(ntype)) { KiWiBooleanLiteral result = new KiWiBooleanLiteral(row.getBoolean(8), null, new Date(row.getTimestamp(11, calendarUTC).getTime())); result.setId(id); if (row.getLong(10) != 0) { result.setType((KiWiUriResource) loadNodeById(row.getLong(10))); } cacheNode(result); return result; } else if ("date".equals(ntype)) { KiWiDateLiteral result = new KiWiDateLiteral(); result.setCreated(new Date(row.getTimestamp(11, calendarUTC).getTime())); DateTime dvalue = new DateTime(row.getTimestamp(6, calendarUTC).getTime(), DateTimeZone.forOffsetMillis(row.getInt(7) * 1000)); if (row.getLong(10) != 0) { result.setType((KiWiUriResource) loadNodeById(row.getLong(10))); } result.setId(id); result.setDateContent(dvalue); cacheNode(result); return result; } else { throw new IllegalArgumentException( "unknown node type in database result for node id " + id + ": " + ntype); } }
From source file:org.apache.pig.backend.hadoop.DateTimeWritable.java
License:Apache License
public void readFields(DataInput in) throws IOException { value = new DateTime(in.readLong(), DateTimeZone.forOffsetMillis(in.readShort() * ONE_MINUTE)); }
From source file:org.apache.pig.backend.hadoop.executionengine.fetch.FetchLauncher.java
License:Apache License
private void init(PhysicalPlan pp, POStore poStore) throws IOException { poStore.setStoreImpl(new FetchPOStoreImpl(pigContext)); poStore.setUp();/*from w w w. j av a 2 s . c om*/ TaskAttemptID taskAttemptID = HadoopShims.getNewTaskAttemptID(); HadoopShims.setTaskAttemptId(conf, taskAttemptID); if (!PlanHelper.getPhysicalOperators(pp, POStream.class).isEmpty()) { MapRedUtil.setupStreamingDirsConfSingle(poStore, pigContext, conf); } String currentTime = Long.toString(System.currentTimeMillis()); conf.set("pig.script.submitted.timestamp", currentTime); conf.set("pig.job.submitted.timestamp", currentTime); PhysicalOperator.setReporter(new FetchProgressableReporter()); SchemaTupleBackend.initialize(conf, pigContext); UDFContext udfContext = UDFContext.getUDFContext(); udfContext.addJobConf(conf); udfContext.setClientSystemProps(pigContext.getProperties()); udfContext.serialize(conf); PigMapReduce.sJobConfInternal.set(conf); String dtzStr = conf.get("pig.datetime.default.tz"); if (dtzStr != null && dtzStr.length() > 0) { // ensure that the internal timezone is uniformly in UTC offset style DateTimeZone.setDefault(DateTimeZone.forOffsetMillis(DateTimeZone.forID(dtzStr).getOffset(null))); } boolean aggregateWarning = "true".equalsIgnoreCase(conf.get("aggregate.warning")); PigStatusReporter pigStatusReporter = PigStatusReporter.getInstance(); pigStatusReporter.setContext(new FetchTaskContext(new FetchContext())); PigHadoopLogger pigHadoopLogger = PigHadoopLogger.getInstance(); pigHadoopLogger.setReporter(pigStatusReporter); pigHadoopLogger.setAggregate(aggregateWarning); PhysicalOperator.setPigLogger(pigHadoopLogger); }
From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigGenericMapBase.java
License:Apache License
/** * Configures the mapper with the map plan and the * reproter thread// w ww . jav a 2 s .c o m */ @SuppressWarnings("unchecked") @Override public void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration job = context.getConfiguration(); SpillableMemoryManager.configure(ConfigurationUtil.toProperties(job)); PigMapReduce.sJobContext = context; PigMapReduce.sJobConfInternal.set(context.getConfiguration()); PigMapReduce.sJobConf = context.getConfiguration(); inIllustrator = inIllustrator(context); PigContext .setPackageImportList((ArrayList<String>) ObjectSerializer.deserialize(job.get("udf.import.list"))); pigContext = (PigContext) ObjectSerializer.deserialize(job.get("pig.pigContext")); // This attempts to fetch all of the generated code from the distributed cache, and resolve it SchemaTupleBackend.initialize(job, pigContext); if (pigContext.getLog4jProperties() != null) PropertyConfigurator.configure(pigContext.getLog4jProperties()); if (mp == null) mp = (PhysicalPlan) ObjectSerializer.deserialize(job.get("pig.mapPlan")); stores = PlanHelper.getPhysicalOperators(mp, POStore.class); // To be removed if (mp.isEmpty()) log.debug("Map Plan empty!"); else { ByteArrayOutputStream baos = new ByteArrayOutputStream(); mp.explain(baos); log.debug(baos.toString()); } keyType = ((byte[]) ObjectSerializer.deserialize(job.get("pig.map.keytype")))[0]; // till here pigReporter = new ProgressableReporter(); // Get the UDF specific context MapRedUtil.setupUDFContext(job); if (!(mp.isEmpty())) { PigSplit split = (PigSplit) context.getInputSplit(); List<OperatorKey> targetOpKeys = split.getTargetOps(); ArrayList<PhysicalOperator> targetOpsAsList = new ArrayList<PhysicalOperator>(); for (OperatorKey targetKey : targetOpKeys) { targetOpsAsList.add(mp.getOperator(targetKey)); } roots = targetOpsAsList.toArray(new PhysicalOperator[1]); leaf = mp.getLeaves().get(0); } PigStatusReporter pigStatusReporter = PigStatusReporter.getInstance(); pigStatusReporter.setContext(new MRTaskContext(context)); log.info( "Aliases being processed per job phase (AliasName[line,offset]): " + job.get("pig.alias.location")); String dtzStr = PigMapReduce.sJobConfInternal.get().get("pig.datetime.default.tz"); if (dtzStr != null && dtzStr.length() > 0) { // ensure that the internal timezone is uniformly in UTC offset style DateTimeZone.setDefault(DateTimeZone.forOffsetMillis(DateTimeZone.forID(dtzStr).getOffset(null))); } }
From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigGenericMapBaseRollupSample.java
License:Apache License
/** * Configures the mapper with the map plan and the * reproter thread/* w w w .j av a 2 s . c om*/ */ @SuppressWarnings("unchecked") @Override public void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration job = context.getConfiguration(); SpillableMemoryManager.configure(ConfigurationUtil.toProperties(job)); PigMapReduce.sJobContext = context; PigMapReduce.sJobConfInternal.set(context.getConfiguration()); PigMapReduce.sJobConf = context.getConfiguration(); inIllustrator = inIllustrator(context); PigContext .setPackageImportList((ArrayList<String>) ObjectSerializer.deserialize(job.get("udf.import.list"))); pigContext = (PigContext) ObjectSerializer.deserialize(job.get("pig.pigContext")); // This attempts to fetch all of the generated code from the distributed cache, and resolve it SchemaTupleBackend.initialize(job, pigContext); if (pigContext.getLog4jProperties() != null) PropertyConfigurator.configure(pigContext.getLog4jProperties()); if (mp == null) mp = (PhysicalPlan) ObjectSerializer.deserialize(job.get("pig.mapPlan")); stores = PlanHelper.getPhysicalOperators(mp, POStore.class); // To be removed if (mp.isEmpty()) log.debug("Map Plan empty!"); else { ByteArrayOutputStream baos = new ByteArrayOutputStream(); mp.explain(baos); log.debug(baos.toString()); } keyType = ((byte[]) ObjectSerializer.deserialize(job.get("pig.map.keytype")))[0]; // till here pigReporter = new ProgressableReporter(); // Get the UDF specific context MapRedUtil.setupUDFContext(job); if (!(mp.isEmpty())) { PigSplit split = (PigSplit) context.getInputSplit(); List<OperatorKey> targetOpKeys = split.getTargetOps(); ArrayList<PhysicalOperator> targetOpsAsList = new ArrayList<PhysicalOperator>(); for (OperatorKey targetKey : targetOpKeys) { targetOpsAsList.add(mp.getOperator(targetKey)); } roots = targetOpsAsList.toArray(new PhysicalOperator[1]); leaf = mp.getLeaves().get(0); } PigStatusReporter.setContext(context); log.info( "Aliases being processed per job phase (AliasName[line,offset]): " + job.get("pig.alias.location")); String dtzStr = PigMapReduce.sJobConfInternal.get().get("pig.datetime.default.tz"); if (dtzStr != null && dtzStr.length() > 0) { // ensure that the internal timezone is uniformly in UTC offset style DateTimeZone.setDefault(DateTimeZone.forOffsetMillis(DateTimeZone.forID(dtzStr).getOffset(null))); } }
From source file:org.apache.pig.builtin.ToDate3ARGS.java
License:Apache License
public DateTime exec(Tuple input) throws IOException { if (input == null || input.size() < 1 || input.get(0) == null) { return null; }/*from w w w . j a v a2 s . co m*/ DateTimeFormatter dtf = DateTimeFormat.forPattern(DataType.toString(input.get(1))); DateTimeZone dtz = DateTimeZone .forOffsetMillis(DateTimeZone.forID(DataType.toString(input.get(2))).getOffset(null)); return dtf.withZone(dtz).parseDateTime(DataType.toString(input.get(0))); }
From source file:org.apache.pig.data.BinInterSedes.java
License:Apache License
/** * Expects binInterSedes data types (NOT DataType types!) * <p>//from ww w .j a v a 2 s . co m * * @see org.apache.pig.data.InterSedes#readDatum(java.io.DataInput, byte) */ @Override public Object readDatum(DataInput in, byte type) throws IOException, ExecException { switch (type) { case TUPLE_0: case TUPLE_1: case TUPLE_2: case TUPLE_3: case TUPLE_4: case TUPLE_5: case TUPLE_6: case TUPLE_7: case TUPLE_8: case TUPLE_9: case TUPLE: case TINYTUPLE: case SMALLTUPLE: return SedesHelper.readGenericTuple(in, type); case BAG: case TINYBAG: case SMALLBAG: return readBag(in, type); case MAP: case TINYMAP: case SMALLMAP: return readMap(in, type); case INTERNALMAP: return readInternalMap(in); case INTEGER_0: return Integer.valueOf(0); case INTEGER_1: return Integer.valueOf(1); case INTEGER_INBYTE: return Integer.valueOf(in.readByte()); case INTEGER_INSHORT: return Integer.valueOf(in.readShort()); case INTEGER: return Integer.valueOf(in.readInt()); case LONG_0: return Long.valueOf(0); case LONG_1: return Long.valueOf(1); case LONG_INBYTE: return Long.valueOf(in.readByte()); case LONG_INSHORT: return Long.valueOf(in.readShort()); case LONG_ININT: return Long.valueOf(in.readInt()); case LONG: return Long.valueOf(in.readLong()); case DATETIME: return new DateTime(in.readLong(), DateTimeZone.forOffsetMillis(in.readShort() * ONE_MINUTE)); case FLOAT: return Float.valueOf(in.readFloat()); case DOUBLE: return Double.valueOf(in.readDouble()); case BIGINTEGER: return readBigInteger(in); case BIGDECIMAL: return readBigDecimal(in); case BOOLEAN_TRUE: return Boolean.valueOf(true); case BOOLEAN_FALSE: return Boolean.valueOf(false); case BYTE: return Byte.valueOf(in.readByte()); case TINYBYTEARRAY: case SMALLBYTEARRAY: case BYTEARRAY: return new DataByteArray(SedesHelper.readBytes(in, type)); case CHARARRAY: case SMALLCHARARRAY: return SedesHelper.readChararray(in, type); case GENERIC_WRITABLECOMPARABLE: return readWritable(in); case SCHEMA_TUPLE_BYTE_INDEX: case SCHEMA_TUPLE_SHORT_INDEX: case SCHEMA_TUPLE: return readSchemaTuple(in, type); case NULL: return null; default: throw new RuntimeException("Unexpected data type " + type + " found in stream."); } }
From source file:org.apache.pig.data.DataReaderWriter.java
License:Apache License
public static Object readDatum(DataInput in, byte type) throws IOException, ExecException { switch (type) { case DataType.TUPLE: return bytesToTuple(in); case DataType.BAG: return bytesToBag(in); case DataType.MAP: return bytesToMap(in); case DataType.INTERNALMAP: return bytesToInternalMap(in); case DataType.INTEGER: return Integer.valueOf(in.readInt()); case DataType.LONG: return Long.valueOf(in.readLong()); case DataType.FLOAT: return Float.valueOf(in.readFloat()); case DataType.DOUBLE: return Double.valueOf(in.readDouble()); case DataType.BIGINTEGER: return new BigInteger(((DataByteArray) readDatum(in, in.readByte())).get()); case DataType.BIGDECIMAL: return new BigDecimal((String) readDatum(in, in.readByte())); case DataType.BOOLEAN: return Boolean.valueOf(in.readBoolean()); case DataType.BYTE: return Byte.valueOf(in.readByte()); case DataType.DATETIME: return new DateTime(in.readLong(), DateTimeZone.forOffsetMillis(in.readShort() * ONE_MINUTE)); case DataType.BYTEARRAY: { int size = in.readInt(); byte[] ba = new byte[size]; in.readFully(ba);//from w ww . ja va 2 s . c om return new DataByteArray(ba); } case DataType.BIGCHARARRAY: return bytesToBigCharArray(in); case DataType.CHARARRAY: return bytesToCharArray(in); case DataType.GENERIC_WRITABLECOMPARABLE: return bytesToWritable(in); case DataType.NULL: return null; default: throw new RuntimeException("Unexpected data type " + type + " found in stream."); } }
From source file:org.apache.pig.data.SchemaTuple.java
License:Apache License
protected static DateTime read(DataInput in, DateTime v) throws IOException { return new DateTime(in.readLong(), DateTimeZone.forOffsetMillis(in.readShort() * ONE_MINUTE)); }