Example usage for org.apache.hadoop.io DataInputBuffer reset

List of usage examples for org.apache.hadoop.io DataInputBuffer reset

Introduction

In this page you can find the example usage for org.apache.hadoop.io DataInputBuffer reset.

Prototype

public void reset(byte[] input, int length) 

Source Link

Document

Resets the data that the buffer reads.

Usage

From source file:com.scaleoutsoftware.soss.hserver.ObjectPrefetcher.java

License:Apache License

@SuppressWarnings("unchecked")
void startPrefetching() {
    new Thread(new Runnable() {

        public void run() {
            try {
                ObjectReader reader = BucketStore.getObjectReader(initialSize);
                DataInputBuffer buffer = new DataInputBuffer();
                for (StateServerKey id : keys) {
                    reader.read(id);//from   w  w  w.  j a v  a 2  s  .c om

                    ObjectDescriptor<T> objectDescriptor = unused.take();
                    objectDescriptor.key = new CachedObjectId<T>(id);

                    if (isWritable) {
                        buffer.reset(reader.getBuffer(), reader.getLength());
                        if (objectDescriptor.object == null) {
                            objectDescriptor.object = ReflectionUtils.newInstance(objectClass, configuration);
                        }
                        ((Writable) objectDescriptor.object).readFields(buffer);
                    } else {
                        objectDescriptor.object = (T) ObjectArray.deserialize(reader.getBuffer(), 0,
                                reader.getLength(), serializer);
                    }
                    readyToBeServed.put(objectDescriptor);
                }
                readyToBeServed.put(endToken);
            } catch (Exception e) {
                //Save the exception to be later rethrown by next()
                exception = e;
            }
        }
    }).start();
}

From source file:com.scaleunlimited.classify.datum.ModelDatum.java

License:Apache License

public BaseModel getModel() throws Exception {
    String className = _tupleEntry.getString(MODEL_FN);
    BytesWritable modelData = (BytesWritable) (_tupleEntry.getObject(MODEL_DATA_FN));
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(modelData.getBytes(), modelData.getLength());
    BaseModel model = (BaseModel) Class.forName(className).newInstance();
    model.readFields(dib);/*w  w w .  ja va 2s. c o  m*/
    return model;
}

From source file:com.sequoiadb.hadoop.io.BSONWritable.java

License:Apache License

protected synchronized void copy(Writable other) {
    if (other != null) {
        try {//w ww .  j  a  v  a 2  s  .  c o  m
            DataOutputBuffer out = new DataOutputBuffer();
            other.write(out);
            DataInputBuffer in = new DataInputBuffer();
            in.reset(out.getData(), out.getLength());
            readFields(in);

        } catch (IOException e) {
            throw new IllegalArgumentException("map cannot be copied: " + e.getMessage());
        }

    } else {
        throw new IllegalArgumentException("source map cannot be null");
    }
}

From source file:com.vertica.hadoop.VerticaConfiguration.java

License:Apache License

/**
 * Return static input parameters if set
 * //w  ww . jav  a  2 s .  com
 * @return Collection of list of objects representing input parameters
 * @throws IOException
 */
public Collection<List<Object>> getInputParameters() throws IOException {
    Collection<List<Object>> values = null;
    String[] query_params = conf.getStrings(QUERY_PARAMS_PROP);
    if (query_params != null) {
        values = new ArrayList<List<Object>>();
        for (String str_params : query_params) {
            DataInputBuffer in = new DataInputBuffer();
            in.reset(StringUtils.hexStringToByte(str_params), str_params.length());
            int sz = in.readInt();
            ArrayList<Object> params = new ArrayList<Object>();
            for (int count = 0; count < sz; count++) {
                int type = in.readInt();
                params.add(VerticaRecord.readField(type, in));
            }
            values.add(params);
        }
    }
    return values;
}

From source file:cosmos.impl.KeyValueToMultimapQueryResult.java

License:Apache License

public MultimapRecord apply(Entry<Key, Value> input) {
    DataInputBuffer buf = new DataInputBuffer();
    buf.reset(input.getValue().get(), input.getValue().getSize());

    try {//from  w  w w .j av a 2s  . c  o  m
        return MultimapRecord.recreate(buf);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:cosmos.results.MultimapQueryResultTest.java

License:Apache License

@Test
public void identityWritableEquality() throws Exception {
    Multimap<Column, RecordValue<?>> data = HashMultimap.create();

    data.put(Column.create("TEXT"), RecordValue.create("foo", VIZ));
    data.put(Column.create("TEXT"), RecordValue.create("bar", VIZ));

    MultimapRecord mqr = new MultimapRecord(data, "1", VIZ);

    DataOutputBuffer out = new DataOutputBuffer();
    mqr.write(out);//  ww w. j ava  2s . c  o m

    DataInputBuffer in = new DataInputBuffer();

    byte[] bytes = out.getData();
    in.reset(bytes, out.getLength());

    MultimapRecord mqr2 = MultimapRecord.recreate(in);

    Assert.assertEquals(mqr, mqr2);
}

From source file:io.hops.metadata.adaptor.INodeDALAdaptor.java

License:Apache License

@Override
public org.apache.hadoop.hdfs.server.namenode.INode convertDALtoHDFS(INode hopINode) throws StorageException {
    org.apache.hadoop.hdfs.server.namenode.INode inode = null;
    if (hopINode != null) {
        DataInputBuffer buffer = new DataInputBuffer();
        buffer.reset(hopINode.getPermission(), hopINode.getPermission().length);
        PermissionStatus ps = null;/*  w ww.ja v  a 2 s .  com*/
        try {
            ps = PermissionStatus.read(buffer);
        } catch (IOException e) {
            throw new StorageException(e);
        }

        if (hopINode.isDir()) {
            if (hopINode.isDirWithQuota()) {
                inode = new INodeDirectoryWithQuota(hopINode.getName(), ps);
            } else {
                String iname = (hopINode.getName().length() == 0) ? INodeDirectory.ROOT_NAME
                        : hopINode.getName();
                inode = new INodeDirectory(iname, ps);
            }

            inode.setAccessTimeNoPersistance(hopINode.getAccessTime());
            inode.setModificationTimeNoPersistance(hopINode.getModificationTime());
        } else if (hopINode.getSymlink() != null) {
            inode = new INodeSymlink(hopINode.getSymlink(), hopINode.getModificationTime(),
                    hopINode.getAccessTime(), ps);
        } else {
            if (hopINode.isUnderConstruction()) {
                DatanodeID dnID = (hopINode.getClientNode() == null || hopINode.getClientNode().isEmpty())
                        ? null
                        : new DatanodeID(hopINode.getClientNode());

                inode = new INodeFileUnderConstruction(ps, INodeFile.getBlockReplication(hopINode.getHeader()),
                        INodeFile.getPreferredBlockSize(hopINode.getHeader()), hopINode.getModificationTime(),
                        hopINode.getClientName(), hopINode.getClientMachine(), dnID);

                inode.setAccessTimeNoPersistance(hopINode.getAccessTime());
            } else {
                inode = new INodeFile(ps, hopINode.getHeader(), hopINode.getModificationTime(),
                        hopINode.getAccessTime());
            }
            ((INodeFile) inode).setGenerationStampNoPersistence(hopINode.getGenerationStamp());
        }
        inode.setIdNoPersistance(hopINode.getId());
        inode.setLocalNameNoPersistance(hopINode.getName());
        inode.setParentIdNoPersistance(hopINode.getParentId());
        inode.setSubtreeLocked(hopINode.isSubtreeLocked());
        inode.setSubtreeLockOwner(hopINode.getSubtreeLockOwner());
    }
    return inode;
}

From source file:net.sf.katta.zk.ZKClient.java

License:Apache License

private Writable readWritable(final Writable writable, byte[] data) throws KattaException {
    final DataInputBuffer buffer = new DataInputBuffer();
    buffer.reset(data, data.length);
    try {//from ww w  .  j a v a  2 s .c o m
        writable.readFields(buffer);
    } catch (final IOException e) {
        throw new KattaException("unable to read data into Writable", e);
    } finally {
        try {
            buffer.close();
        } catch (IOException e) {
            LOG.warn("could not close data buffer", e);
        }
    }
    return writable;
}

From source file:org.apache.accumulo.core.replication.ReplicationTarget.java

License:Apache License

/**
 * Deserialize a ReplicationTarget//  w  w w  .  j a v  a2  s. c  om
 *
 * @param t
 *          Serialized copy
 * @return the deserialized version
 */
public static ReplicationTarget from(Text t) {
    ReplicationTarget target = new ReplicationTarget();
    DataInputBuffer buffer = new DataInputBuffer();
    buffer.reset(t.getBytes(), t.getLength());

    try {
        target.readFields(buffer);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    return target;
}

From source file:org.apache.accumulo.core.replication.ReplicationTarget.java

License:Apache License

/**
 * Deserialize a ReplicationTarget// w w w .  jav  a 2s.  c  om
 *
 * @param s
 *          Serialized copy
 * @return the deserialized version
 */
public static ReplicationTarget from(String s) {
    ReplicationTarget target = new ReplicationTarget();
    DataInputBuffer buffer = new DataInputBuffer();
    buffer.reset(s.getBytes(UTF_8), s.length());

    try {
        target.readFields(buffer);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    return target;
}