List of usage examples for org.apache.hadoop.io BytesWritable getBytes
@Override public byte[] getBytes()
From source file:crunch.MaxTemperature.java
License:Apache License
@Test public void test() throws IOException { // vv BytesWritableTest BytesWritable b = new BytesWritable(new byte[] { 3, 5 }); byte[] bytes = serialize(b); assertThat(StringUtils.byteToHexString(bytes), is("000000020305")); // ^^ BytesWritableTest // vv BytesWritableTest-Capacity b.setCapacity(11);/*w ww. jav a 2 s . co m*/ assertThat(b.getLength(), is(2)); assertThat(b.getBytes().length, is(11)); // ^^ BytesWritableTest-Capacity }
From source file:edu.arizona.cs.hadoop.fs.irods.output.HirodsSequenceFileAsBinaryOutputFormat.java
License:Apache License
@Override public RecordWriter<BytesWritable, BytesWritable> getRecordWriter(TaskAttemptContext context) throws IOException { final SequenceFile.Writer out = getSequenceWriter(context, getSequenceFileOutputKeyClass(context), getSequenceFileOutputValueClass(context)); return new RecordWriter<BytesWritable, BytesWritable>() { private WritableValueBytes wvaluebytes = new WritableValueBytes(); @Override//from w w w.jav a2 s. c o m public void write(BytesWritable bkey, BytesWritable bvalue) throws IOException { wvaluebytes.reset(bvalue); out.appendRaw(bkey.getBytes(), 0, bkey.getLength(), wvaluebytes); wvaluebytes.reset(null); } @Override public void close(TaskAttemptContext context) throws IOException { out.close(); } }; }
From source file:edu.stolaf.cs.wmrserver.streaming.PipeMapRed.java
License:Apache License
/** * Write a value to the output stream using UTF-8 encoding * @param value output value//from w ww . j a va 2 s . c o m * @throws IOException */ void write(Object value) throws IOException { byte[] bval; int valSize; if (value instanceof BytesWritable) { BytesWritable val = (BytesWritable) value; bval = val.getBytes(); valSize = val.getLength(); } else if (value instanceof Text) { Text val = (Text) value; bval = val.getBytes(); valSize = val.getLength(); } else { String sval = value.toString(); bval = sval.getBytes("UTF-8"); valSize = bval.length; } clientOut_.write(bval, 0, valSize); }
From source file:edu.uci.ics.hyracks.hdfs.scheduler.IPProximityNcCollectionBuilder.java
License:Apache License
@Override public INcCollection build(Map<String, NodeControllerInfo> ncNameToNcInfos, final Map<String, List<String>> ipToNcMapping, final Map<String, Integer> ncNameToIndex, String[] NCs, final int[] workloads, final int slotLimit) { final TreeMap<BytesWritable, IntWritable> availableIpsToSlots = new TreeMap<BytesWritable, IntWritable>(); for (int i = 0; i < workloads.length; i++) { if (workloads[i] < slotLimit) { byte[] rawip; try { rawip = ncNameToNcInfos.get(NCs[i]).getNetworkAddress().lookupIpAddress(); } catch (UnknownHostException e) { // QQQ Should probably have a neater solution than this throw new RuntimeException(e); }/*from w w w . j a v a 2s . c o m*/ BytesWritable ip = new BytesWritable(rawip); IntWritable availableSlot = availableIpsToSlots.get(ip); if (availableSlot == null) { availableSlot = new IntWritable(slotLimit - workloads[i]); availableIpsToSlots.put(ip, availableSlot); } else { availableSlot.set(slotLimit - workloads[i] + availableSlot.get()); } } } return new INcCollection() { @Override public String findNearestAvailableSlot(InputSplit split) { try { String[] locs = split.getLocations(); int minDistance = Integer.MAX_VALUE; BytesWritable currentCandidateIp = null; if (locs == null || locs.length > 0) { for (int j = 0; j < locs.length; j++) { /** * get all the IP addresses from the name */ InetAddress[] allIps = InetAddress.getAllByName(locs[j]); for (InetAddress ip : allIps) { BytesWritable splitIp = new BytesWritable(ip.getAddress()); /** * if the node controller exists */ BytesWritable candidateNcIp = availableIpsToSlots.floorKey(splitIp); if (candidateNcIp == null) { candidateNcIp = availableIpsToSlots.ceilingKey(splitIp); } if (candidateNcIp != null) { if (availableIpsToSlots.get(candidateNcIp).get() > 0) { byte[] candidateIP = candidateNcIp.getBytes(); byte[] splitIP = splitIp.getBytes(); int candidateInt = candidateIP[0] << 24 | (candidateIP[1] & 0xFF) << 16 | (candidateIP[2] & 0xFF) << 8 | (candidateIP[3] & 0xFF); int splitInt = splitIP[0] << 24 | (splitIP[1] & 0xFF) << 16 | (splitIP[2] & 0xFF) << 8 | (splitIP[3] & 0xFF); int distance = Math.abs(candidateInt - splitInt); if (minDistance > distance) { minDistance = distance; currentCandidateIp = candidateNcIp; } } } } } } else { for (Entry<BytesWritable, IntWritable> entry : availableIpsToSlots.entrySet()) { if (entry.getValue().get() > 0) { currentCandidateIp = entry.getKey(); break; } } } if (currentCandidateIp != null) { /** * Update the entry of the selected IP */ IntWritable availableSlot = availableIpsToSlots.get(currentCandidateIp); availableSlot.set(availableSlot.get() - 1); if (availableSlot.get() == 0) { availableIpsToSlots.remove(currentCandidateIp); } /** * Update the entry of the selected NC */ List<String> dataLocations = ipToNcMapping .get(InetAddress.getByAddress(currentCandidateIp.getBytes()).getHostAddress()); for (String nc : dataLocations) { int ncIndex = ncNameToIndex.get(nc); if (workloads[ncIndex] < slotLimit) { return nc; } } } /** not scheduled */ return null; } catch (Exception e) { throw new IllegalStateException(e); } } @Override public int numAvailableSlots() { return availableIpsToSlots.size(); } }; }
From source file:edu.umd.JBizz.BooleanRetrievalCompressed.java
License:Apache License
private ArrayListWritable<PairOfInts> fetchPostings(String term) throws IOException { Text key = new Text(); //PairOfWritables<IntWritable, ArrayListWritable<PairOfInts>> value = //new PairOfWritables<IntWritable, ArrayListWritable<PairOfInts>>(); ArrayListWritable<PairOfInts> poi = new ArrayListWritable<PairOfInts>(); BytesWritable value = new BytesWritable(); PairOfInts pair = new PairOfInts(); key.set(term);//from www. j a v a 2s . com index.get(key, value); byte[] vals = value.getBytes(); DataInputStream dis = new DataInputStream(new ByteArrayInputStream(vals)); int j = 0; int i = 0; int sentinel = 0; while (sentinel == 0) { j = WritableUtils.readVInt(dis); i = WritableUtils.readVInt(dis); if (i == 0 || j == 0) { sentinel = 1; } else { pair.set(j, i); poi.add(new PairOfInts(j, i)); } } return poi; }
From source file:edu.umd.shrawanraina.BooleanRetrievalCompressed.java
License:Apache License
private PairOfWritables<IntWritable, ArrayListWritable<PairOfInts>> readPost(BytesWritable value) throws IOException { byte[] val = value.getBytes(); ByteArrayInputStream postings = new ByteArrayInputStream(val); DataInputStream stream = new DataInputStream(postings); ArrayListWritable<PairOfInts> pInt = new ArrayListWritable<PairOfInts>(); int currentDocNo = 0; int dgap = 0; int tf = -1;/* w ww. ja va 2 s .c om*/ int postingList = WritableUtils.readVInt(stream); for (int i = 0; i < postingList; i++) { dgap = WritableUtils.readVInt(stream); tf = WritableUtils.readVInt(stream); currentDocNo = currentDocNo + dgap; pInt.add(new PairOfInts(currentDocNo, tf)); } //System.out.println("posting: "+new IntWritable(postingList)); //System.out.println("pInt: "+pInt); return new PairOfWritables<IntWritable, ArrayListWritable<PairOfInts>>(new IntWritable(postingList), pInt); }
From source file:edu.usc.pgroup.louvain.hadoop.MapCommunity.java
License:Apache License
@Override protected void map(Text key, BytesWritable value, Context context) throws IOException, InterruptedException { /**/*from ww w . j ava 2 s .c om*/ * FileFormat * * Metis format * ****** * Remote */ //example: 4elt_0.part String fileName = key.toString(); String _parts[] = fileName.split("_"); String dotParts[] = _parts[1].split("\\."); InputStream inputStream = new ByteArrayInputStream(value.getBytes()); int rank = Integer.parseInt(dotParts[0]); if (verbose) { System.out.println("Begin"); } try { Community c = new Community(inputStream, -1, nb_pass, precision); Graph g = null; boolean improvement = true; double mod = c.modularity(), new_mod; int level = 0; if (verbose) { System.out.print("" + rank + ":" + "level " + level); System.out.print(" start computation"); System.out.println(" network size: " + c.getG().getNb_nodes() + " nodes, " + c.getG().getNb_links() + " links, " + c.getG().getTotal_weight() + " weight."); } improvement = c.one_level(); new_mod = c.modularity(); if (++level == display_level) g.display(); if (display_level == -1) { String filepath = outpath + File.separator + "out_" + level + "_" + rank + ".txt"; c.display_partition(filepath); } g = c.partition2graph_binary(); if (verbose) { System.out.println(" network size: " + c.getG().getNb_nodes() + " nodes, " + c.getG().getNb_links() + " links, " + c.getG().getTotal_weight() + " weight."); } GraphMessage msg = createGraphMessage(g, c, rank); //Send to reducer ByteArrayOutputStream bos = new ByteArrayOutputStream(); ObjectOutputStream oo = new ObjectOutputStream(bos); oo.writeObject(msg); context.write(new Text("one"), new BytesWritable(bos.toByteArray())); } catch (Exception e) { e.printStackTrace(); throw new InterruptedException(e.toString()); } }
From source file:edu.usc.pgroup.louvain.hadoop.ReduceCommunity.java
License:Apache License
private Graph reconstructGraph(Iterable<BytesWritable> values) throws Exception { Iterator<BytesWritable> it = values.iterator(); SortedMap<Integer, GraphMessage> map = new TreeMap<Integer, GraphMessage>(); //Load data/*from w w w . j ava 2s .c o m*/ while (it.hasNext()) { BytesWritable bytesWritable = it.next(); ByteArrayInputStream inputStream = new ByteArrayInputStream(bytesWritable.getBytes()); try { ObjectInputStream objectInputStream = new ObjectInputStream(inputStream); GraphMessage msg = (GraphMessage) objectInputStream.readObject(); map.put(msg.getCurrentPartition(), msg); } catch (IOException e) { e.printStackTrace(); throw new Exception(e); } } // Renumber int gap = 0; int degreeGap = 0; Path pt = new Path(outpath + File.separator + "Map-Partition-Sizes"); FileSystem fs = FileSystem.get(new Configuration()); if (fs.exists(pt)) { fs.delete(pt, true); } BufferedWriter br = new BufferedWriter(new OutputStreamWriter(fs.create(pt, true))); PrintWriter out = new PrintWriter(br); for (int i = 0; i < map.keySet().size(); i++) { GraphMessage msg = map.get(i); long currentDegreelen = msg.getDegrees()[msg.getDegrees().length - 1]; if (i != 0) { for (int j = 0; j < msg.getLinks().length; j++) { msg.getLinks()[j] += gap; } for (int j = 0; j < msg.getRemoteMap().length; j++) { msg.getRemoteMap()[j].source += gap; } for (int j = 0; j < msg.getN2c().length; j++) { msg.getN2c()[j] += gap; } for (int j = 0; j < msg.getDegrees().length; j++) { msg.getDegrees()[j] += degreeGap; } } out.println("" + i + "," + msg.getNb_nodes()); gap += msg.getNb_nodes(); degreeGap += currentDegreelen; } out.flush(); out.close(); //Integrate Graph graph = new Graph(); for (int i = 0; i < map.keySet().size(); i++) { GraphMessage msg = map.get(i); Collections.addAll(graph.getDegrees().getList(), msg.getDegrees()); Collections.addAll(graph.getLinks().getList(), msg.getLinks()); Collections.addAll(graph.getWeights().getList(), msg.getWeights()); graph.setNb_links(graph.getNb_links() + msg.getNb_links()); graph.setNb_nodes((int) (graph.getNb_nodes() + msg.getNb_nodes())); graph.setTotal_weight(graph.getTotal_weight() + msg.getTotal_weight()); } //Merge local done. Map<Integer, Vector<Integer>> remoteEdges = new HashMap<Integer, Vector<Integer>>(); Map<Integer, Vector<Float>> remoteWeighs = new HashMap<Integer, Vector<Float>>(); for (int i = 0; i < map.keySet().size(); i++) { Map<HashMap.SimpleEntry<Integer, Integer>, Float> m = new HashMap<AbstractMap.SimpleEntry<Integer, Integer>, Float>(); GraphMessage msg = map.get(i); for (int j = 0; j < msg.getRemoteMap().length; j++) { RemoteMap remoteMap = msg.getRemoteMap()[j]; int sink = remoteMap.sink; int sinkPart = remoteMap.sinkPart; int target = map.get(sinkPart).getN2c()[sink]; HashMap.SimpleEntry<Integer, Integer> key = new HashMap.SimpleEntry<Integer, Integer>( remoteMap.source, target); if (m.containsKey(key)) { m.put(key, m.get(key) + 1.0f); } else { m.put(key, 1.0f); } } graph.setNb_links(graph.getNb_links() + m.size()); Iterator<HashMap.SimpleEntry<Integer, Integer>> itr = m.keySet().iterator(); while (itr.hasNext()) { HashMap.SimpleEntry<Integer, Integer> key = itr.next(); float w = m.get(key); if (remoteEdges.containsKey(key.getKey())) { remoteEdges.get(key.getKey()).getList().add(key.getValue()); if (remoteWeighs.containsKey(key.getKey())) { remoteWeighs.get(key.getKey()).getList().add(w); } } else { Vector<Integer> list = new Vector<Integer>(); list.getList().add(key.getValue()); remoteEdges.put(key.getKey(), list); Vector<Float> wList = new Vector<Float>(); wList.getList().add(w); remoteWeighs.put(key.getKey(), wList); } } } graph.addRemoteEdges(remoteEdges, remoteWeighs); //Merge Remote Done return graph; }
From source file:edu.yale.cs.hadoopdb.connector.DBInputSplit.java
License:Apache License
/** * Deserializes DBChunk /*from w w w . jav a2 s . com*/ */ private DBChunk deserializeChunk(DataInput in) throws IOException { BytesWritable br = new BytesWritable(); br.readFields(in); byte[] buf = br.getBytes(); ObjectInputStream byte_stream = new ObjectInputStream(new ByteArrayInputStream(buf)); DBChunk chunk = null; try { chunk = (DBChunk) byte_stream.readObject(); } catch (ClassNotFoundException e) { throw new IOException(e); } return chunk; }
From source file:gaffer.accumulostore.key.core.AbstractCoreKeyAccumuloElementConverter.java
License:Apache License
@Override public Properties getPropertiesFromValue(final String group, final Value value) throws AccumuloElementConversionException { final Properties properties = new Properties(); if (value == null || value.getSize() == 0) { return properties; }//from www . j av a 2 s . c om final MapWritable map = new MapWritable(); try (final InputStream inStream = new ByteArrayInputStream(value.get()); final DataInputStream dataStream = new DataInputStream(inStream)) { map.readFields(dataStream); } catch (final IOException e) { throw new AccumuloElementConversionException("Failed to read map writable from value", e); } final StoreElementDefinition elementDefinition = storeSchema.getElement(group); if (null == elementDefinition) { throw new AccumuloElementConversionException("No StoreElementDefinition found for group " + group + " is this group in your Store Schema or do your table iterators need updating?"); } for (final Writable writeableKey : map.keySet()) { final String propertyName = writeableKey.toString(); final BytesWritable propertyValueBytes = (BytesWritable) map.get(writeableKey); try { properties.put(propertyName, elementDefinition.getProperty(propertyName).getSerialiser() .deserialise(propertyValueBytes.getBytes())); } catch (final SerialisationException e) { throw new AccumuloElementConversionException("Failed to deserialise property " + propertyName, e); } } return properties; }