List of usage examples for org.apache.hadoop.io DataInputBuffer DataInputBuffer
public DataInputBuffer()
From source file:org.apache.accumulo.server.tabletserver.log.MultiReader.java
License:Apache License
private static void copy(Writable src, Writable dest) throws IOException { // not exactly efficient... DataOutputBuffer output = new DataOutputBuffer(); src.write(output);// w w w. j ava 2s. com DataInputBuffer input = new DataInputBuffer(); input.reset(output.getData(), output.getLength()); dest.readFields(input); }
From source file:org.apache.blur.lucene.serializer.ProtoSerializer.java
License:Apache License
public static void main(String[] args) throws ParseException, IOException { QueryParser parser = new QueryParser(Version.LUCENE_40, "", new StandardAnalyzer(Version.LUCENE_40)); Query query = parser.parse("a:v1 b:v2 c:v3~ c:asda*asda"); SuperQuery superQuery = new SuperQuery(query, ScoreType.SUPER, new Term("_primedoc_")); QueryWritable queryWritable = new QueryWritable(superQuery); DataOutputBuffer buffer = new DataOutputBuffer(); queryWritable.write(buffer);//from ww w .jav a 2s. c om buffer.close(); System.out.println(new String(buffer.getData(), 0, buffer.getLength())); QueryWritable qw = new QueryWritable(); DataInputBuffer in = new DataInputBuffer(); in.reset(buffer.getData(), 0, buffer.getLength()); qw.readFields(in); System.out.println("------------"); System.out.println(qw.getQuery()); System.out.println("------------"); while (true) { run(superQuery); } }
From source file:org.apache.blur.lucene.serializer.ProtoSerializer.java
License:Apache License
private static void run(Query query) throws IOException { DataOutputBuffer buffer = new DataOutputBuffer(); DataInputBuffer in = new DataInputBuffer(); QueryWritable outQw = new QueryWritable(); QueryWritable inQw = new QueryWritable(); long s = System.nanoTime(); int count = 100000; for (int i = 0; i < count; i++) { outQw.setQuery(query);//from ww w. j av a2 s . com outQw.write(buffer); in.reset(buffer.getData(), 0, buffer.getLength()); inQw.readFields(in); buffer.reset(); } long e = System.nanoTime(); System.out.println((e - s) / 1000000.0 / (double) count); // System.out.println((e - s) / (double) count); }
From source file:org.apache.blur.lucene.serializer.QueryWritableTest.java
License:Apache License
@Test public void testTermQuery() throws IOException { TermQuery query = new TermQuery(new Term("field", "value")); QueryWritable queryWritable = new QueryWritable(); queryWritable.setQuery(query);//ww w . j a v a 2s . c om DataOutputBuffer out = new DataOutputBuffer(); queryWritable.write(out); byte[] data = out.getData(); int length = out.getLength(); DataInputBuffer in = new DataInputBuffer(); in.reset(data, length); QueryWritable newQueryWritable = new QueryWritable(); newQueryWritable.readFields(in); Query termQuery = newQueryWritable.getQuery(); assertEquals(query, termQuery); }
From source file:org.apache.gobblin.compaction.mapreduce.orc.OrcKeyComparator.java
License:Apache License
@Override public void setConf(Configuration conf) { super.setConf(conf); if (null != conf) { // The MapReduce framework will be using this comparator to sort OrcKey objects // output from the map phase, so use the schema defined for the map output key // and the data model non-raw compare() implementation. schema = TypeDescription.fromString(conf.get(OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA.getAttribute())); OrcStruct orcRecordModel1 = (OrcStruct) OrcStruct.createValue(schema); OrcStruct orcRecordModel2 = (OrcStruct) OrcStruct.createValue(schema); if (key1 == null) { key1 = new OrcKey(); }/*from w w w . j a v a 2 s. c om*/ if (key2 == null) { key2 = new OrcKey(); } if (buffer == null) { buffer = new DataInputBuffer(); } key1.key = orcRecordModel1; key2.key = orcRecordModel2; } }
From source file:org.apache.gora.util.IOUtils.java
License:Apache License
/** Deserializes the object in the given datainput using * available Hadoop serializations./* w w w . ja v a 2 s . com*/ * @throws IOException * @throws ClassNotFoundException */ public static <T> T deserialize(Configuration conf, byte[] in, T obj) throws IOException, ClassNotFoundException { DataInputBuffer buffer = new DataInputBuffer(); buffer.reset(in, in.length); return deserialize(conf, buffer, obj); }
From source file:org.apache.gora.util.TestIOUtils.java
License:Apache License
private void testNullFieldsWith(Object... values) throws IOException { DataOutputBuffer out = new DataOutputBuffer(); DataInputBuffer in = new DataInputBuffer(); IOUtils.writeNullFieldsInfo(out, values); in.reset(out.getData(), out.getLength()); boolean[] ret = IOUtils.readNullFieldsInfo(in); //assert//from ww w.j a v a 2 s . c o m assertEquals(values.length, ret.length); for (int i = 0; i < values.length; i++) { assertEquals(values[i] == null, ret[i]); } }
From source file:org.apache.hama.bsp.BSPPeerImpl.java
License:Apache License
@SuppressWarnings("unchecked") public final void initInput() throws IOException { InputSplit inputSplit = null;//ww w . j av a2 s. com // reinstantiate the split try { if (splitClass != null) { inputSplit = (InputSplit) ReflectionUtils.newInstance(conf.getClassByName(splitClass), conf); } } catch (ClassNotFoundException exp) { IOException wrap = new IOException("Split class " + splitClass + " not found"); wrap.initCause(exp); throw wrap; } if (inputSplit != null) { DataInputBuffer splitBuffer = new DataInputBuffer(); splitBuffer.reset(split.getBytes(), 0, split.getLength()); inputSplit.readFields(splitBuffer); if (in != null) { in.close(); } in = new TrackedRecordReader<K1, V1>(bspJob.getInputFormat().getRecordReader(inputSplit, bspJob), getCounter(BSPPeerImpl.PeerCounter.TASK_INPUT_RECORDS), getCounter(BSPPeerImpl.PeerCounter.IO_BYTES_READ)); this.splitSize = inputSplit.getLength(); } }
From source file:org.apache.hama.bsp.TestClusterStatus.java
License:Apache License
public final void testWriteAndReadFields() throws IOException { DataOutputBuffer out = new DataOutputBuffer(); DataInputBuffer in = new DataInputBuffer(); ClusterStatus status1;// w w w . ja v a 2 s.co m Map<String, GroomServerStatus> grooms = new HashMap<String, GroomServerStatus>(); for (int i = 0; i < 10; i++) { int num = rnd.nextInt(); String groomName = "groom_" + num; String peerName = "peerhost:" + num; grooms.put(groomName, new GroomServerStatus(peerName, new ArrayList<TaskStatus>(0), 25, 2)); } int tasks = rnd.nextInt(100); int maxTasks = rnd.nextInt(100); BSPMaster.State state = BSPMaster.State.RUNNING; status1 = new ClusterStatus(grooms, tasks, maxTasks, state); status1.write(out); in.reset(out.getData(), out.getLength()); ClusterStatus status2 = new ClusterStatus(); status2.readFields(in); for (Entry<String, GroomServerStatus> entry : status2.getActiveGroomServerStatus().entrySet()) { assertEquals(entry.getValue().getMaxTasks(), 2); assertEquals(entry.getValue().getFailures(), 25); } Map<String, String> grooms_s = new HashMap<String, String>(status1.getActiveGroomNames()); Map<String, String> grooms_o = new HashMap<String, String>(status2.getActiveGroomNames()); assertEquals(status1.getGroomServers(), status2.getGroomServers()); assertTrue(grooms_s.entrySet().containsAll(grooms_o.entrySet())); assertTrue(grooms_o.entrySet().containsAll(grooms_s.entrySet())); assertEquals(status1.getTasks(), status2.getTasks()); assertEquals(status1.getMaxTasks(), status2.getMaxTasks()); }
From source file:org.apache.hyracks.dataflow.hadoop.mapreduce.KVIterator.java
License:Apache License
public KVIterator(HadoopHelper helper, RecordDescriptor recordDescriptor) { this.helper = helper; accessor = new FrameTupleAccessor(recordDescriptor); kBuffer = new DataInputBuffer(); vBuffer = new DataInputBuffer(); }