Example usage for org.apache.hadoop.io DataOutputBuffer DataOutputBuffer

List of usage examples for org.apache.hadoop.io DataOutputBuffer DataOutputBuffer

Introduction

In this page you can find the example usage for org.apache.hadoop.io DataOutputBuffer DataOutputBuffer.

Prototype

public DataOutputBuffer() 

Source Link

Document

Constructs a new empty buffer.

Usage

From source file:org.apache.accumulo.core.replication.ReplicationTargetTest.java

License:Apache License

@Test
public void writableOutWithNulls() throws Exception {
    ReplicationTarget expected = new ReplicationTarget(null, null, null);
    DataOutputBuffer buffer = new DataOutputBuffer();
    expected.write(buffer);//from   w  ww. java2  s .com

    DataInputBuffer input = new DataInputBuffer();
    input.reset(buffer.getData(), buffer.getLength());
    ReplicationTarget actual = new ReplicationTarget();
    actual.readFields(input);
}

From source file:org.apache.accumulo.core.replication.ReplicationTargetTest.java

License:Apache License

@Test
public void staticFromTextHelper() throws Exception {
    ReplicationTarget expected = new ReplicationTarget("foo", "bar", "1");
    DataOutputBuffer buffer = new DataOutputBuffer();
    expected.write(buffer);/*from  w  w w. j a v a  2s .  c om*/
    Text t = new Text();
    t.set(buffer.getData(), 0, buffer.getLength());

    Assert.assertEquals(expected, ReplicationTarget.from(t));
}

From source file:org.apache.accumulo.core.replication.ReplicationTargetTest.java

License:Apache License

@Test
public void staticToTextHelper() throws Exception {
    ReplicationTarget expected = new ReplicationTarget("foo", "bar", "1");
    DataOutputBuffer buffer = new DataOutputBuffer();
    expected.write(buffer);/*from w  w w  .  j  a va 2s .  co m*/
    Text t = new Text();
    t.set(buffer.getData(), 0, buffer.getLength());

    Assert.assertEquals(t, expected.toText());
}

From source file:org.apache.accumulo.core.replication.ReplicationTargetTest.java

License:Apache License

@Test
public void staticFromStringHelper() throws Exception {
    ReplicationTarget expected = new ReplicationTarget("foo", "bar", "1");
    DataOutputBuffer buffer = new DataOutputBuffer();
    expected.write(buffer);//from   w w  w.  j a  v  a  2s .co m
    Text t = new Text();
    t.set(buffer.getData(), 0, buffer.getLength());

    Assert.assertEquals(expected, ReplicationTarget.from(t.toString()));
}

From source file:org.apache.accumulo.core.tabletserver.log.LogEntry.java

License:Apache License

public byte[] toBytes() throws IOException {
    DataOutputBuffer out = new DataOutputBuffer();
    extent.write(out);//from  w  ww .jav  a 2  s .  c o m
    out.writeLong(timestamp);
    out.writeUTF(server);
    out.writeUTF(filename);
    return Arrays.copyOf(out.getData(), out.getLength());
}

From source file:org.apache.accumulo.master.Master.java

License:Apache License

public void setMergeState(MergeInfo info, MergeState state)
        throws IOException, KeeperException, InterruptedException {
    synchronized (mergeLock) {
        String path = ZooUtil.getRoot(getInstance().getInstanceID()) + Constants.ZTABLES + "/"
                + info.getExtent().getTableId() + "/merge";
        info.setState(state);/*www.  j av  a  2  s.com*/
        if (state.equals(MergeState.NONE)) {
            ZooReaderWriter.getInstance().recursiveDelete(path, NodeMissingPolicy.SKIP);
        } else {
            DataOutputBuffer out = new DataOutputBuffer();
            try {
                info.write(out);
            } catch (IOException ex) {
                throw new RuntimeException("Unlikely", ex);
            }
            ZooReaderWriter.getInstance().putPersistentData(path, out.getData(),
                    state.equals(MergeState.STARTED) ? ZooUtil.NodeExistsPolicy.FAIL
                            : ZooUtil.NodeExistsPolicy.OVERWRITE);
        }
        mergeLock.notifyAll();
    }
    nextEvent.event("Merge state of %s set to %s", info.getExtent(), state);
}

From source file:org.apache.accumulo.master.replication.WorkMaker.java

License:Apache License

protected void addWorkRecord(Text file, Value v, Map<String, String> targets, String sourceTableId) {
    log.info("Adding work records for " + file + " to targets " + targets);
    try {// www. j av a 2  s . c o m
        Mutation m = new Mutation(file);

        ReplicationTarget target = new ReplicationTarget();
        DataOutputBuffer buffer = new DataOutputBuffer();
        Text t = new Text();
        for (Entry<String, String> entry : targets.entrySet()) {
            buffer.reset();

            // Set up the writable
            target.setPeerName(entry.getKey());
            target.setRemoteIdentifier(entry.getValue());
            target.setSourceTableId(sourceTableId);
            target.write(buffer);

            // Throw it in a text for the mutation
            t.set(buffer.getData(), 0, buffer.getLength());

            // Add it to the work section
            WorkSection.add(m, t, v);
        }
        try {
            writer.addMutation(m);
        } catch (MutationsRejectedException e) {
            log.warn("Failed to write work mutations for replication, will retry", e);
        }
    } catch (IOException e) {
        log.warn("Failed to serialize data to Text, will retry", e);
    } finally {
        try {
            writer.flush();
        } catch (MutationsRejectedException e) {
            log.warn("Failed to write work mutations for replication, will retry", e);
        }
    }
}

From source file:org.apache.accumulo.master.state.MergeInfoTest.java

License:Apache License

MergeInfo readWrite(MergeInfo info) throws Exception {
    DataOutputBuffer buffer = new DataOutputBuffer();
    info.write(buffer);//w  w w . ja v a2  s. c  o m
    DataInputBuffer in = new DataInputBuffer();
    in.reset(buffer.getData(), 0, buffer.getLength());
    MergeInfo info2 = new MergeInfo();
    info2.readFields(in);
    Assert.assertEquals(info.getExtent(), info2.getExtent());
    Assert.assertEquals(info.getState(), info2.getState());
    Assert.assertEquals(info.getOperation(), info2.getOperation());
    return info2;
}

From source file:org.apache.accumulo.server.logger.LogFileTest.java

License:Apache License

static private void readWrite(LogEvents event, long seq, int tid, String filename, KeyExtent tablet,
        Mutation[] mutations, LogFileKey keyResult, LogFileValue valueResult) throws IOException {
    LogFileKey key = new LogFileKey();
    key.event = event;/*from  ww w.  ja va 2s. c  o m*/
    key.seq = seq;
    key.tid = tid;
    key.filename = filename;
    key.tablet = tablet;
    key.tserverSession = keyResult.tserverSession;
    LogFileValue value = new LogFileValue();
    value.mutations = Arrays.asList(mutations != null ? mutations : new Mutation[0]);
    DataOutputBuffer out = new DataOutputBuffer();
    key.write(out);
    value.write(out);
    out.flush();
    DataInputBuffer in = new DataInputBuffer();
    in.reset(out.getData(), out.size());
    keyResult.readFields(in);
    valueResult.readFields(in);
    assertTrue(key.compareTo(keyResult) == 0);
    assertEquals(value.mutations, valueResult.mutations);
    assertTrue(in.read() == -1);
}

From source file:org.apache.accumulo.server.master.Master.java

License:Apache License

public void setMergeState(MergeInfo info, MergeState state)
        throws IOException, KeeperException, InterruptedException {
    synchronized (mergeLock) {
        String path = ZooUtil.getRoot(instance.getInstanceID()) + Constants.ZTABLES + "/"
                + info.getExtent().getTableId().toString() + "/merge";
        info.setState(state);//ww  w.j  a  v  a  2  s  .  c  om
        if (state.equals(MergeState.NONE)) {
            ZooReaderWriter.getInstance().recursiveDelete(path, NodeMissingPolicy.SKIP);
        } else {
            DataOutputBuffer out = new DataOutputBuffer();
            try {
                info.write(out);
            } catch (IOException ex) {
                throw new RuntimeException("Unlikely", ex);
            }
            ZooReaderWriter.getInstance().putPersistentData(path, out.getData(),
                    state.equals(MergeState.STARTED) ? ZooUtil.NodeExistsPolicy.FAIL
                            : ZooUtil.NodeExistsPolicy.OVERWRITE);
        }
        mergeLock.notifyAll();
    }
    nextEvent.event("Merge state of %s set to %s", info.getExtent(), state);
}