Example usage for java.io DataOutputStream size

List of usage examples for java.io DataOutputStream size

Introduction

In this page you can find the example usage for java.io DataOutputStream size.

Prototype

public final int size() 

Source Link

Document

Returns the current value of the counter written, the number of bytes written to this data output stream so far.

Usage

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex.java

@Test
public void testSecondaryIndexBinarySearch() throws IOException {
    int numTotalKeys = 99;
    assertTrue(numTotalKeys % 2 == 1); // Ensure no one made this even.

    // We only add odd-index keys into the array that we will binary-search.
    int numSearchedKeys = (numTotalKeys - 1) / 2;

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dos = new DataOutputStream(baos);

    dos.writeInt(numSearchedKeys);//from  w  w w . j  a v  a2 s .  c  o m
    int curAllEntriesSize = 0;
    int numEntriesAdded = 0;

    // Only odd-index elements of this array are used to keep the secondary
    // index entries of the corresponding keys.
    int secondaryIndexEntries[] = new int[numTotalKeys];

    for (int i = 0; i < numTotalKeys; ++i) {
        byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i * 2);
        KeyValue cell = new KeyValue(k, Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("val"));
        //KeyValue cell = new KeyValue.KeyOnlyKeyValue(k, 0, k.length);
        keys.add(cell.getKey());
        String msgPrefix = "Key #" + i + " (" + Bytes.toStringBinary(k) + "): ";
        StringBuilder padding = new StringBuilder();
        while (msgPrefix.length() + padding.length() < 70)
            padding.append(' ');
        msgPrefix += padding;
        if (i % 2 == 1) {
            dos.writeInt(curAllEntriesSize);
            secondaryIndexEntries[i] = curAllEntriesSize;
            LOG.info(msgPrefix + "secondary index entry #" + ((i - 1) / 2) + ", offset " + curAllEntriesSize);
            curAllEntriesSize += cell.getKey().length + HFileBlockIndex.SECONDARY_INDEX_ENTRY_OVERHEAD;
            ++numEntriesAdded;
        } else {
            secondaryIndexEntries[i] = -1;
            LOG.info(msgPrefix + "not in the searched array");
        }
    }

    // Make sure the keys are increasing.
    for (int i = 0; i < keys.size() - 1; ++i)
        assertTrue(KeyValue.COMPARATOR.compare(new KeyValue.KeyOnlyKeyValue(keys.get(i), 0, keys.get(i).length),
                new KeyValue.KeyOnlyKeyValue(keys.get(i + 1), 0, keys.get(i + 1).length)) < 0);

    dos.writeInt(curAllEntriesSize);
    assertEquals(numSearchedKeys, numEntriesAdded);
    int secondaryIndexOffset = dos.size();
    assertEquals(Bytes.SIZEOF_INT * (numSearchedKeys + 2), secondaryIndexOffset);

    for (int i = 1; i <= numTotalKeys - 1; i += 2) {
        assertEquals(dos.size(), secondaryIndexOffset + secondaryIndexEntries[i]);
        long dummyFileOffset = getDummyFileOffset(i);
        int dummyOnDiskSize = getDummyOnDiskSize(i);
        LOG.debug("Storing file offset=" + dummyFileOffset + " and onDiskSize=" + dummyOnDiskSize
                + " at offset " + dos.size());
        dos.writeLong(dummyFileOffset);
        dos.writeInt(dummyOnDiskSize);
        LOG.debug("Stored key " + ((i - 1) / 2) + " at offset " + dos.size());
        dos.write(keys.get(i));
    }

    dos.writeInt(curAllEntriesSize);

    ByteBuffer nonRootIndex = ByteBuffer.wrap(baos.toByteArray());
    for (int i = 0; i < numTotalKeys; ++i) {
        byte[] searchKey = keys.get(i);
        byte[] arrayHoldingKey = new byte[searchKey.length + searchKey.length / 2];

        // To make things a bit more interesting, store the key we are looking
        // for at a non-zero offset in a new array.
        System.arraycopy(searchKey, 0, arrayHoldingKey, searchKey.length / 2, searchKey.length);

        KeyValue.KeyOnlyKeyValue cell = new KeyValue.KeyOnlyKeyValue(arrayHoldingKey, searchKey.length / 2,
                searchKey.length);
        int searchResult = BlockIndexReader.binarySearchNonRootIndex(cell, nonRootIndex, KeyValue.COMPARATOR);
        String lookupFailureMsg = "Failed to look up key #" + i + " (" + Bytes.toStringBinary(searchKey) + ")";

        int expectedResult;
        int referenceItem;

        if (i % 2 == 1) {
            // This key is in the array we search as the element (i - 1) / 2. Make
            // sure we find it.
            expectedResult = (i - 1) / 2;
            referenceItem = i;
        } else {
            // This key is not in the array but between two elements on the array,
            // in the beginning, or in the end. The result should be the previous
            // key in the searched array, or -1 for i = 0.
            expectedResult = i / 2 - 1;
            referenceItem = i - 1;
        }

        assertEquals(lookupFailureMsg, expectedResult, searchResult);

        // Now test we can get the offset and the on-disk-size using a
        // higher-level API function.s
        boolean locateBlockResult = (BlockIndexReader.locateNonRootIndexEntry(nonRootIndex, cell,
                KeyValue.COMPARATOR) != -1);

        if (i == 0) {
            assertFalse(locateBlockResult);
        } else {
            assertTrue(locateBlockResult);
            String errorMsg = "i=" + i + ", position=" + nonRootIndex.position();
            assertEquals(errorMsg, getDummyFileOffset(referenceItem), nonRootIndex.getLong());
            assertEquals(errorMsg, getDummyOnDiskSize(referenceItem), nonRootIndex.getInt());
        }
    }

}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex.java

public void testBlockIndexChunk() throws IOException {
    BlockIndexChunk c = new BlockIndexChunk();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    int N = 1000;
    int[] numSubEntriesAt = new int[N];
    int numSubEntries = 0;
    for (int i = 0; i < N; ++i) {
        baos.reset();//from   w w w . jav a2 s  .  c  om
        DataOutputStream dos = new DataOutputStream(baos);
        c.writeNonRoot(dos);
        assertEquals(c.getNonRootSize(), dos.size());

        baos.reset();
        dos = new DataOutputStream(baos);
        c.writeRoot(dos);
        assertEquals(c.getRootSize(), dos.size());

        byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
        numSubEntries += rand.nextInt(5) + 1;
        keys.add(k);
        c.add(k, getDummyFileOffset(i), getDummyOnDiskSize(i), numSubEntries);
    }

    // Test the ability to look up the entry that contains a particular
    // deeper-level index block's entry ("sub-entry"), assuming a global
    // 0-based ordering of sub-entries. This is needed for mid-key calculation.
    for (int i = 0; i < N; ++i) {
        for (int j = i == 0 ? 0 : numSubEntriesAt[i - 1]; j < numSubEntriesAt[i]; ++j) {
            assertEquals(i, c.getEntryBySubEntry(j));
        }
    }
}

From source file:org.apache.hadoop.mapreduce.task.reduce.TestFetcher.java

@Test
public void testCorruptedIFile() throws Exception {
    final int fetcher = 7;
    Path onDiskMapOutputPath = new Path(name.getMethodName() + "/foo");
    Path shuffledToDisk = OnDiskMapOutput.getTempPath(onDiskMapOutputPath, fetcher);
    fs = FileSystem.getLocal(job).getRaw();
    IFileWrappedMapOutput<Text, Text> odmo = new OnDiskMapOutput<Text, Text>(map1ID, mm, 100L, job, fetcher,
            true, fs, onDiskMapOutputPath);

    String mapData = "MAPDATA12345678901234567890";

    ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 14, 10, 1);
    ByteArrayOutputStream bout = new ByteArrayOutputStream();
    DataOutputStream dos = new DataOutputStream(bout);
    IFileOutputStream ios = new IFileOutputStream(dos);
    header.write(dos);//from  w  w  w  .j ava 2 s  .  c  o m

    int headerSize = dos.size();
    try {
        ios.write(mapData.getBytes());
    } finally {
        ios.close();
    }

    int dataSize = bout.size() - headerSize;

    // Ensure that the OnDiskMapOutput shuffler can successfully read the data.
    MapHost host = new MapHost("TestHost", "http://test/url");
    ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray());
    try {
        // Read past the shuffle header.
        bin.read(new byte[headerSize], 0, headerSize);
        odmo.shuffle(host, bin, dataSize, dataSize, metrics, Reporter.NULL);
    } finally {
        bin.close();
    }

    // Now corrupt the IFile data.
    byte[] corrupted = bout.toByteArray();
    corrupted[headerSize + (dataSize / 2)] = 0x0;

    try {
        bin = new ByteArrayInputStream(corrupted);
        // Read past the shuffle header.
        bin.read(new byte[headerSize], 0, headerSize);
        odmo.shuffle(host, bin, dataSize, dataSize, metrics, Reporter.NULL);
        fail("OnDiskMapOutput.shuffle didn't detect the corrupted map partition file");
    } catch (ChecksumException e) {
        LOG.info("The expected checksum exception was thrown.", e);
    } finally {
        bin.close();
    }

    // Ensure that the shuffled file can be read.
    IFileInputStream iFin = new IFileInputStream(fs.open(shuffledToDisk), dataSize, job);
    try {
        iFin.read(new byte[dataSize], 0, dataSize);
    } finally {
        iFin.close();
    }
}

From source file:org.apache.jackrabbit.core.persistence.bundle.util.BundleBinding.java

/**
 * Serializes a <code>NodePropBundle</code> to a data output stream
 *
 * @param out the output stream/* w ww.  jav a2  s.  c  o m*/
 * @param bundle the bundle to serialize
 * @throws IOException if an I/O error occurs.
 */
public void writeBundle(DataOutputStream out, NodePropBundle bundle) throws IOException {
    long size = out.size();

    // primaryType and version
    out.writeInt((VERSION_CURRENT << 24) | nsIndex.stringToIndex(bundle.getNodeTypeName().getNamespaceURI()));
    out.writeInt(nameIndex.stringToIndex(bundle.getNodeTypeName().getLocalName()));

    // parentUUID
    writeID(out, bundle.getParentId());

    // definitionId
    out.writeUTF(bundle.getNodeDefId().toString());

    // mixin types
    for (Name name : bundle.getMixinTypeNames()) {
        writeIndexedQName(out, name);
    }
    writeIndexedQName(out, null);

    // properties
    for (Name pName : bundle.getPropertyNames()) {
        // skip redundant primaryType, mixinTypes and uuid properties
        if (pName.equals(NameConstants.JCR_PRIMARYTYPE) || pName.equals(NameConstants.JCR_MIXINTYPES)
                || pName.equals(NameConstants.JCR_UUID)) {
            continue;
        }
        NodePropBundle.PropertyEntry pState = bundle.getPropertyEntry(pName);
        if (pState == null) {
            log.error("PropertyState missing in bundle: " + pName);
        } else {
            writeIndexedQName(out, pName);
            writeState(out, pState);
        }
    }
    writeIndexedQName(out, null);

    // write uuid flag
    out.writeBoolean(bundle.isReferenceable());

    // child nodes (list of uuid/name pairs)
    for (NodePropBundle.ChildNodeEntry entry : bundle.getChildNodeEntries()) {
        writeID(out, entry.getId()); // uuid
        writeQName(out, entry.getName()); // name
    }
    writeID(out, null);

    // write mod count
    writeModCount(out, bundle.getModCount());

    // write shared set
    for (NodeId nodeId : bundle.getSharedSet()) {
        writeID(out, nodeId);
    }
    writeID(out, null);

    // set size of bundle
    bundle.setSize(out.size() - size);
}

From source file:org.apache.jackrabbit.core.persistence.bundle.util.BundleBinding.java

/**
 * Serializes a <code>NodePropBundle</code> to a data output stream
 *
 * @param out the output stream//from  w  w w  . j av  a2  s.co m
 * @param bundle the bundle to serialize
 * @throws IOException if an I/O error occurs.
 */
public void writeBundle(DataOutputStream out, NodePropBundle bundle) throws IOException {
    long size = out.size();

    // primaryType and version
    out.writeInt((VERSION_CURRENT << 24) | nsIndex.stringToIndex(bundle.getNodeTypeName().getNamespaceURI()));
    out.writeInt(nameIndex.stringToIndex(bundle.getNodeTypeName().getLocalName()));

    // parentUUID
    writeID(out, bundle.getParentId());

    // definitionId
    out.writeUTF("");

    // mixin types
    Iterator iter = bundle.getMixinTypeNames().iterator();
    while (iter.hasNext()) {
        writeIndexedQName(out, (Name) iter.next());
    }
    writeIndexedQName(out, null);

    // properties
    iter = bundle.getPropertyNames().iterator();
    while (iter.hasNext()) {
        Name pName = (Name) iter.next();
        // skip redundant primaryType, mixinTypes and uuid properties
        if (pName.equals(NameConstants.JCR_PRIMARYTYPE) || pName.equals(NameConstants.JCR_MIXINTYPES)
                || pName.equals(NameConstants.JCR_UUID)) {
            continue;
        }
        NodePropBundle.PropertyEntry pState = bundle.getPropertyEntry(pName);
        if (pState == null) {
            log.error("PropertyState missing in bundle: " + pName);
        } else {
            writeIndexedQName(out, pName);
            writeState(out, pState);
        }
    }
    writeIndexedQName(out, null);

    // write uuid flag
    out.writeBoolean(bundle.isReferenceable());

    // child nodes (list of uuid/name pairs)
    iter = bundle.getChildNodeEntries().iterator();
    while (iter.hasNext()) {
        NodePropBundle.ChildNodeEntry entry = (NodePropBundle.ChildNodeEntry) iter.next();
        writeID(out, entry.getId()); // uuid
        writeQName(out, entry.getName()); // name
    }
    writeID(out, null);

    // write mod count
    writeModCount(out, bundle.getModCount());

    // write shared set
    iter = bundle.getSharedSet().iterator();
    while (iter.hasNext()) {
        writeID(out, (NodeId) iter.next());
    }
    writeID(out, null);

    // set size of bundle
    bundle.setSize(out.size() - size);
}

From source file:org.globus.gsi.gssapi.test.GlobusGSSContextTest.java

private void runWrapTests(boolean privacy, boolean reqConf, int qop) throws Exception {

    assertTrue("client ctx not established.", clientContext.isEstablished());
    assertTrue("server ctx not established.", serverContext.isEstablished());

    int[] msgSize = { 10, 100, 1000, 10000, 16384, 100000 };

    for (int i = 0; i < msgSize.length; i++) {

        ByteArrayOutputStream out = new ByteArrayOutputStream();
        DataOutputStream dout = new DataOutputStream(out);

        while (dout.size() < msgSize[i]) {
            dout.writeLong(System.currentTimeMillis());
        }/*from  w w  w .j a va 2 s.  co m*/

        byte[] msg = out.toByteArray();

        MessageProp wProp = new MessageProp(qop, reqConf);

        byte[] wToken = clientContext.wrap(msg, 0, msg.length, wProp);

        assertEquals(privacy, wProp.getPrivacy());
        assertEquals(qop, wProp.getQOP());

        MessageProp uwProp = new MessageProp(reqConf);

        logger.debug("UNWRAPING HALF (" + (wToken.length / 2) + " BYTES) OF TOKEN OF LENGTH: " + wToken.length);
        byte[] uwToken1 = serverContext.unwrap(wToken, 0, wToken.length / 2, uwProp);

        byte[] uwToken2 = serverContext.unwrap(wToken, wToken.length / 2, wToken.length - (wToken.length / 2),
                uwProp);
        if (uwToken2 == null) {
            fail("unwrap of token unsuccessful; length: " + wToken.length);
        }

        assertEquals(privacy, uwProp.getPrivacy());
        assertEquals(qop, uwProp.getQOP());

        assertEquals(msg.length, ((uwToken1 != null) ? uwToken1.length : 0) + uwToken2.length);

        if (uwToken1 != null) {
            for (int j = 0; j < uwToken1.length; j++) {
                assertEquals(msg[j], uwToken1[j]);
            }
        }
        for (int j = 0; j < uwToken2.length; j++) {
            assertEquals(msg[((uwToken1 != null) ? uwToken1.length : 0) + j], uwToken2[j]);
        }

    }
}

From source file:org.lightjason.agentspeak.consistency.metric.CNCD.java

/**
 * compression algorithm/* ww  w  . j  a v a  2 s.  co  m*/
 *
 * @param p_input input string
 * @return number of compression bytes
 * @warning counting stream returns the correct number of bytes after flushing
 */
private double compress(final String p_input) {
    final DataOutputStream l_counting = new DataOutputStream(new NullOutputStream());

    try (final InputStream l_input = new ByteArrayInputStream(p_input.getBytes(StandardCharsets.UTF_8));
            final OutputStream l_compress = m_compression.get(l_counting)) {
        IOUtils.copy(l_input, l_compress);
    } catch (final IOException l_exception) {
        return 0;
    }

    return l_counting.size();
}

From source file:org.lightjason.agentspeak.language.CCommon.java

/**
 * compression algorithm/* w w  w  .  jav a2s  .c  o m*/
 *
 * @param p_compression compression algorithm
 * @param p_input input string
 * @return number of compression bytes
 * @warning counting stream returns the correct number of bytes after flushing
 */
private static double compress(final ECompression p_compression, final String p_input) {
    final DataOutputStream l_counting = new DataOutputStream(new NullOutputStream());

    try (final InputStream l_input = new ByteArrayInputStream(p_input.getBytes(StandardCharsets.UTF_8));
            final OutputStream l_compress = p_compression.get(l_counting)) {
        IOUtils.copy(l_input, l_compress);
    } catch (final IOException l_exception) {
        return 0;
    }

    return l_counting.size();
}

From source file:org.mule.transport.tcp.protocols.LengthProtocol.java

@Override
protected void writeByteArray(OutputStream os, byte[] data) throws IOException {
    // Write the length and then the data.
    DataOutputStream dos = new DataOutputStream(os);
    dos.writeInt(data.length);/*from   ww  w  . ja va 2  s  .  co  m*/
    dos.write(data);
    // DataOutputStream size is SIZE_INT + the byte length, due to the writeInt call
    // this should fix EE-1494
    if (dos.size() != data.length + SIZE_INT) {
        // only flush if the sizes don't match up
        dos.flush();
    }
}