Example usage for org.apache.hadoop.io DataInputBuffer reset

List of usage examples for org.apache.hadoop.io DataInputBuffer reset

Introduction

In this page you can find the example usage for org.apache.hadoop.io DataInputBuffer reset.

Prototype

public void reset(byte[] input, int length) 

Source Link

Document

Resets the data that the buffer reads.

Usage

From source file:org.apache.accumulo.core.replication.ReplicationTargetTest.java

License:Apache License

@Test
public void writableOut() throws Exception {
    ReplicationTarget expected = new ReplicationTarget("foo", "bar", "1");
    DataOutputBuffer buffer = new DataOutputBuffer();
    expected.write(buffer);/*  w  ww.j  a va 2 s. c om*/

    DataInputBuffer input = new DataInputBuffer();
    input.reset(buffer.getData(), buffer.getLength());
    ReplicationTarget actual = new ReplicationTarget();
    actual.readFields(input);
}

From source file:org.apache.accumulo.core.replication.ReplicationTargetTest.java

License:Apache License

@Test
public void writableOutWithNulls() throws Exception {
    ReplicationTarget expected = new ReplicationTarget(null, null, null);
    DataOutputBuffer buffer = new DataOutputBuffer();
    expected.write(buffer);//from ww w  .  j  a  v a  2  s . co  m

    DataInputBuffer input = new DataInputBuffer();
    input.reset(buffer.getData(), buffer.getLength());
    ReplicationTarget actual = new ReplicationTarget();
    actual.readFields(input);
}

From source file:org.apache.accumulo.core.tabletserver.log.LogEntry.java

License:Apache License

static public LogEntry fromBytes(byte bytes[]) throws IOException {
    DataInputBuffer inp = new DataInputBuffer();
    inp.reset(bytes, bytes.length);
    KeyExtent extent = new KeyExtent();
    extent.readFields(inp);/*w  w  w .j av  a 2s. co m*/
    long timestamp = inp.readLong();
    String server = inp.readUTF();
    String filename = inp.readUTF();
    return new LogEntry(extent, timestamp, server, filename);
}

From source file:org.apache.accumulo.master.Master.java

License:Apache License

public MergeInfo getMergeInfo(String tableId) {
    synchronized (mergeLock) {
        try {/* w ww .  j  av a  2 s . co m*/
            String path = ZooUtil.getRoot(getInstance().getInstanceID()) + Constants.ZTABLES + "/" + tableId
                    + "/merge";
            if (!ZooReaderWriter.getInstance().exists(path))
                return new MergeInfo();
            byte[] data = ZooReaderWriter.getInstance().getData(path, new Stat());
            DataInputBuffer in = new DataInputBuffer();
            in.reset(data, data.length);
            MergeInfo info = new MergeInfo();
            info.readFields(in);
            return info;
        } catch (KeeperException.NoNodeException ex) {
            log.info("Error reading merge state, it probably just finished");
            return new MergeInfo();
        } catch (Exception ex) {
            log.warn("Unexpected error reading merge state", ex);
            return new MergeInfo();
        }
    }
}

From source file:org.apache.accumulo.master.state.MergeStats.java

License:Apache License

public static void main(String[] args) throws Exception {
    ClientOpts opts = new ClientOpts();
    opts.parseArgs(MergeStats.class.getName(), args);

    Connector conn = opts.getConnector();
    Map<String, String> tableIdMap = conn.tableOperations().tableIdMap();
    for (Entry<String, String> entry : tableIdMap.entrySet()) {
        final String table = entry.getKey(), tableId = entry.getValue();
        String path = ZooUtil.getRoot(conn.getInstance().getInstanceID()) + Constants.ZTABLES + "/" + tableId
                + "/merge";
        MergeInfo info = new MergeInfo();
        if (ZooReaderWriter.getInstance().exists(path)) {
            byte[] data = ZooReaderWriter.getInstance().getData(path, new Stat());
            DataInputBuffer in = new DataInputBuffer();
            in.reset(data, data.length);
            info.readFields(in);/*from  www. j a v  a 2s  . co m*/
        }
        System.out.println(String.format("%25s  %10s %10s %s", table, info.getState(), info.getOperation(),
                info.getExtent()));
    }
}

From source file:org.apache.accumulo.server.logger.LogFileTest.java

License:Apache License

static private void readWrite(LogEvents event, long seq, int tid, String filename, KeyExtent tablet,
        Mutation[] mutations, LogFileKey keyResult, LogFileValue valueResult) throws IOException {
    LogFileKey key = new LogFileKey();
    key.event = event;//ww w.j a  v a2  s . c o m
    key.seq = seq;
    key.tid = tid;
    key.filename = filename;
    key.tablet = tablet;
    key.tserverSession = keyResult.tserverSession;
    LogFileValue value = new LogFileValue();
    value.mutations = Arrays.asList(mutations != null ? mutations : new Mutation[0]);
    DataOutputBuffer out = new DataOutputBuffer();
    key.write(out);
    value.write(out);
    out.flush();
    DataInputBuffer in = new DataInputBuffer();
    in.reset(out.getData(), out.size());
    keyResult.readFields(in);
    valueResult.readFields(in);
    assertTrue(key.compareTo(keyResult) == 0);
    assertEquals(value.mutations, valueResult.mutations);
    assertTrue(in.read() == -1);
}

From source file:org.apache.accumulo.server.master.Master.java

License:Apache License

public MergeInfo getMergeInfo(Text tableId) {
    synchronized (mergeLock) {
        try {/* w  ww.  j a v a  2  s .  c o m*/
            String path = ZooUtil.getRoot(instance.getInstanceID()) + Constants.ZTABLES + "/"
                    + tableId.toString() + "/merge";
            if (!ZooReaderWriter.getInstance().exists(path))
                return new MergeInfo();
            byte[] data = ZooReaderWriter.getInstance().getData(path, new Stat());
            DataInputBuffer in = new DataInputBuffer();
            in.reset(data, data.length);
            MergeInfo info = new MergeInfo();
            info.readFields(in);
            return info;
        } catch (KeeperException.NoNodeException ex) {
            log.info("Error reading merge state, it probably just finished");
            return new MergeInfo();
        } catch (Exception ex) {
            log.warn("Unexpected error reading merge state", ex);
            return new MergeInfo();
        }
    }
}

From source file:org.apache.accumulo.server.master.state.MergeStats.java

License:Apache License

public static void main(String[] args) throws Exception {
    ClientOpts opts = new ClientOpts();
    opts.parseArgs(MergeStats.class.getName(), args);

    Connector conn = opts.getConnector();
    Map<String, String> tableIdMap = conn.tableOperations().tableIdMap();
    for (String table : tableIdMap.keySet()) {
        String tableId = tableIdMap.get(table);
        String path = ZooUtil.getRoot(conn.getInstance().getInstanceID()) + Constants.ZTABLES + "/"
                + tableId.toString() + "/merge";
        MergeInfo info = new MergeInfo();
        if (ZooReaderWriter.getInstance().exists(path)) {
            byte[] data = ZooReaderWriter.getInstance().getData(path, new Stat());
            DataInputBuffer in = new DataInputBuffer();
            in.reset(data, data.length);
            info.readFields(in);/*from   ww  w . j  a  v  a2s .  c  o  m*/
        }
        System.out.println(String.format("%25s  %10s %10s %s", table, info.state, info.operation, info.extent));
    }
}

From source file:org.apache.accumulo.server.master.state.TabletStateChangeIterator.java

License:Apache License

private Set<KeyExtent> parseMigrations(String migrations) {
    if (migrations == null)
        return Collections.emptySet();
    try {//  ww  w .j  a va  2 s.  co m
        Set<KeyExtent> result = new HashSet<>();
        DataInputBuffer buffer = new DataInputBuffer();
        byte[] data = Base64.getDecoder().decode(migrations);
        buffer.reset(data, data.length);
        while (buffer.available() > 0) {
            KeyExtent extent = new KeyExtent();
            extent.readFields(buffer);
            result.add(extent);
        }
        return result;
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}

From source file:org.apache.accumulo.server.master.state.TabletStateChangeIterator.java

License:Apache License

private Map<String, MergeInfo> parseMerges(String merges) {
    if (merges == null)
        return null;
    try {/*  w  w w. j  a va 2s .com*/
        Map<String, MergeInfo> result = new HashMap<>();
        DataInputBuffer buffer = new DataInputBuffer();
        byte[] data = Base64.getDecoder().decode(merges);
        buffer.reset(data, data.length);
        while (buffer.available() > 0) {
            MergeInfo mergeInfo = new MergeInfo();
            mergeInfo.readFields(buffer);
            result.put(mergeInfo.extent.getTableId(), mergeInfo);
        }
        return result;
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}