List of usage examples for org.apache.hadoop.io LongWritable set
public void set(long value)
From source file:org.apache.giraph.types.LongToLongWritableWrapper.java
License:Apache License
@Override public void wrap(Long javaValue, LongWritable writableValue) { writableValue.set(javaValue); }
From source file:org.apache.giraph.types.ops.collections.array.WLongArrayList.java
License:Apache License
@Override public void getIntoW(int index, LongWritable to) { to.set(getLong(index)); }
From source file:org.apache.giraph.types.ops.collections.array.WLongArrayList.java
License:Apache License
@Override public void popIntoW(LongWritable to) { to.set(popLong()); }
From source file:org.apache.giraph.types.ops.LongTypeOps.java
License:Apache License
@Override public void set(LongWritable to, LongWritable from) { to.set(from.get()); }
From source file:org.apache.giraph.types.ShortToLongWritableWrapper.java
License:Apache License
@Override public void wrap(Short javaValue, LongWritable writableValue) { writableValue.set(javaValue); }
From source file:org.apache.giraph.writable.kryo.KryoWritableTest.java
License:Apache License
@Test public void testLongWritable() throws Exception { LongWritable from = new LongWritable(0); LongWritable to = new LongWritable(0); for (int i = 0; i < longTestTimes; i++) { from.set(i); WritableUtils.copyInto(from, to, true); assertEquals(i, to.get());/*w w w . j a va2s . co m*/ } }
From source file:org.apache.giraph.writable.kryo.KryoWritableTest.java
License:Apache License
@Test public void testLongListWritable() throws Exception { WLongArrayList from = new WLongArrayList(longListTestSize); LongWritable value = new LongWritable(); for (int i = 0; i < longListTestSize; i++) { value.set(i); from.addW(value);/*from w w w .ja v a 2 s . c o m*/ } WLongArrayList to = new WLongArrayList(longListTestSize); value.set(0); for (int i = 0; i < longListTestTimes; i++) { from.setW((2 * i) % longListTestSize, value); WritableUtils.copyInto(from, to, true); } }
From source file:org.apache.hama.bsp.LineRecordReader.java
License:Apache License
/** Read a line. */ @Override/*from www . java 2s. c om*/ public synchronized boolean next(LongWritable key, Text value) throws IOException { while (pos < end) { key.set(pos); int newSize = in.readLine(value, maxLineLength, Math.max((int) Math.min(Integer.MAX_VALUE, end - pos), maxLineLength)); if (newSize == 0) { return false; } pos += newSize; if (newSize < maxLineLength) { return true; } // line too long. try again LOG.info("Skipped line of size " + newSize + " at pos " + (pos - newSize)); } return false; }
From source file:org.apache.hama.computemodel.mapreduce.Mapper.java
License:Apache License
@Override protected void compute( BSPPeer<K1, V1, K2, V2, WritableKeyValues<? extends WritableComparable<?>, ? extends Writable>> peer) throws IOException { this.memoryQueue = new PriorityQueue<WritableKeyValues<K2, V2>>(); this.globalKeyDistribution = new long[peer.getNumPeers()][peer.getNumPeers()]; int myId = peer.getPeerId(); OutputCollector<K2, V2> collector = new BSPMapperOutputCollector<K1, V1, K2, V2>(peer, memoryQueue, globalKeyDistribution[myId]); KeyValuePair<K1, V1> record = null; while ((record = peer.readNext()) != null) { map(record.getKey(), record.getValue(), collector); }//from ww w . j a va 2 s . c o m Comparator<V2> valComparator = null; Configuration conf = peer.getConfiguration(); Class<?> comparatorClass = conf.getClass(VALUE_COMPARATOR_CLASS, null); if (comparatorClass != null) { valComparator = (Comparator<V2>) ReflectionUtils.newInstance(comparatorClass, conf); } Reducer<K2, V2, K2, V2> combiner = null; Class<?> combinerClass = conf.getClass(COMBINER_CLASS, null); if (combinerClass != null) { combiner = (Reducer<K2, V2, K2, V2>) ReflectionUtils.newInstance(combinerClass, conf); } ExecutorService service = Executors.newFixedThreadPool(1); Future<Integer> future = service.submit(new CombineAndSortThread<K2, V2>(peer.getConfiguration(), this.memoryQueue, valComparator, combiner)); String[] peers = peer.getAllPeerNames(); IntWritable keyPartition = new IntWritable(); LongWritable value = new LongWritable(); WritableKeyValues<IntWritable, IntWritable> myIdTuple = new WritableKeyValues<IntWritable, IntWritable>( new IntWritable(peer.getPeerId()), new IntWritable(-1)); int peerId = peer.getPeerId(); for (int keyNumber = 0; keyNumber < globalKeyDistribution[0].length; ++keyNumber) { keyPartition.set(keyNumber); value.set(globalKeyDistribution[peerId][keyNumber]); myIdTuple.setValue(keyPartition); for (String peerName : peers) { peer.send(peerName, new WritableKeyValues<WritableKeyValues<IntWritable, IntWritable>, LongWritable>(myIdTuple, value)); } } peer.save(KEY_DIST, this.globalKeyDistribution); peer.save(COMBINER_FUTURE, future); peer.save(MESSAGE_QUEUE, this.memoryQueue); }
From source file:org.apache.hawq.pxf.plugins.hdfs.ChunkRecordReader.java
License:Apache License
/** * Fetches the next data chunk from the file split. The size of the chunk is * a class hardcoded parameter - CHUNK_SIZE. This behaviour sets this reader * apart from the other readers which will fetch one record and stop when * reaching a record delimiter./*www. j a v a 2 s . com*/ * * @param key - output parameter. When method returns will contain the key - * the number of the start byte of the chunk * @param value - output parameter. When method returns will contain the * value - the chunk, a byte array inside the ChunkWritable * instance * @return false - when end of split was reached * @throws IOException if an I/O error occurred while reading the next chunk * or line */ @Override public synchronized boolean next(LongWritable key, ChunkWritable value) throws IOException { /* * Usually a record is spread between the end of current split and the * beginning of next split. So when reading the last record in the split * we usually need to cross over to the next split. This tricky logic is * implemented in ChunkReader.readLine(). In order not to rewrite this * logic we will read the lust chunk in the split with readLine(). For a * split of 120M, reading the last 1M line by line doesn't have a huge * impact. Applying a factor to the last chunk to make sure we start * before the last record. */ float factor = 1.5f; int limit = (int) (factor * CHUNK_SIZE); long curPos = getFilePosition(); int newSize = 0; while (curPos <= end) { key.set(pos); if ((end - curPos) > limit) { newSize = in.readChunk(value, CHUNK_SIZE); } else { newSize = in.readLine(value, Math.max(maxBytesToConsume(pos), maxLineLength)); } if (newSize == 0) { break; } pos += newSize; if (pos == fileLength) { /* * in case text file last character is not * a linefeed */ if (value.box[value.box.length - 1] != '\n') { int newLen = value.box.length + 1; byte[] tmp = new byte[newLen]; System.arraycopy(value.box, 0, tmp, 0, newLen - 1); tmp[newLen - 1] = '\n'; value.box = tmp; } } return true; } /* * if we got here, either newSize was 0 or curPos is bigger than end */ return false; }