Example usage for com.google.common.io ByteStreams newDataOutput

List of usage examples for com.google.common.io ByteStreams newDataOutput

Introduction

In this page you can find the example usage for com.google.common.io ByteStreams newDataOutput.

Prototype

public static ByteArrayDataOutput newDataOutput(ByteArrayOutputStream byteArrayOutputSteam) 

Source Link

Document

Returns a new ByteArrayDataOutput instance which writes to the given ByteArrayOutputStream .

Usage

From source file:co.cask.cdap.data2.transaction.stream.StreamConsumerStateStore.java

/**
 * Encodes list of {@link StreamFileOffset} into bytes.
 *///  w  w w.  j a v  a2  s .  c  om
private byte[] encodeOffsets(Iterable<StreamFileOffset> offsets) throws IOException {
    // Assumption: Each offset encoded into ~40 bytes and there are 8 offsets (number of live files)
    ByteArrayDataOutput output = ByteStreams.newDataOutput(320);
    encodeOffsets(offsets, output);
    return output.toByteArray();
}

From source file:org.locationtech.geogig.storage.postgresql.v9.PGIndexDatabase.java

@Override
public IndexInfo createIndexInfo(String treeName, String attributeName, IndexType strategy,
        @Nullable Map<String, Object> metadata) {
    IndexInfo index = new IndexInfo(treeName, attributeName, strategy, metadata);
    final String sql = format(
            "INSERT INTO %s (repository, treeName, attributeName, strategy, metadata) VALUES(?, ?, ?, ?, ?)",
            config.getTables().index());

    try (Connection cx = PGStorage.newConnection(dataSource)) {
        cx.setAutoCommit(false);//from  w w w  . j ava2s  . c  o  m
        try (PreparedStatement ps = cx
                .prepareStatement(log(sql, LOG, repositoryId, treeName, attributeName, strategy, metadata));
                ByteArrayOutputStream outStream = new ByteArrayOutputStream()) {
            ps.setInt(1, repositoryId);
            ps.setString(2, treeName);
            ps.setString(3, attributeName);
            ps.setString(4, strategy.toString());
            final Map<String, Object> indexMetadata = index.getMetadata();
            if (indexMetadata.isEmpty()) {
                ps.setNull(5, java.sql.Types.OTHER, "bytea");
            } else {
                DataOutput out = ByteStreams.newDataOutput(outStream);
                valueEncoder.writeMap(indexMetadata, out);
                ps.setBytes(5, outStream.toByteArray());
            }
            ps.executeUpdate();
            cx.commit();
        } catch (SQLException | IOException e) {
            rollbackAndRethrow(cx, e);
        } finally {
            cx.setAutoCommit(true);
        }
    } catch (SQLException e) {
        throw new RuntimeException(e);
    }
    return index;
}

From source file:net.minecraftforge.gradle.patcher.TaskGenBinPatches.java

private void createBinPatches(HashMap<String, byte[]> patches, String root, File base, File target)
        throws Exception {
    JarFile cleanJ = new JarFile(base);
    JarFile dirtyJ = new JarFile(target);

    for (Map.Entry<String, String> entry : obfMapping.entrySet()) {
        String obf = entry.getKey();
        String srg = entry.getValue();

        if (!patchedFiles.contains(obf)) // Not in the list of patch files.. we didn't edit it.
        {//  ww w  .j  av  a  2 s. c  om
            continue;
        }

        JarEntry cleanE = cleanJ.getJarEntry(obf + ".class");
        JarEntry dirtyE = dirtyJ.getJarEntry(obf + ".class");

        if (dirtyE == null) //Something odd happened.. a base MC class wasn't in the obfed jar?
        {
            continue;
        }

        byte[] clean = (cleanE != null ? ByteStreams.toByteArray(cleanJ.getInputStream(cleanE)) : new byte[0]);
        byte[] dirty = ByteStreams.toByteArray(dirtyJ.getInputStream(dirtyE));

        byte[] diff = delta.compute(clean, dirty);

        ByteArrayDataOutput out = ByteStreams.newDataOutput(diff.length + 50);
        out.writeUTF(obf); // Clean name
        out.writeUTF(obf.replace('/', '.')); // Source Notch name
        out.writeUTF(srg.replace('/', '.')); // Source SRG Name
        out.writeBoolean(cleanE != null); // Exists in Clean
        if (cleanE != null) {
            out.writeInt(adlerHash(clean)); // Hash of Clean file
        }
        out.writeInt(diff.length); // Patch length
        out.write(diff); // Patch

        patches.put(root + srg.replace('/', '.') + ".binpatch", out.toByteArray());
    }

    cleanJ.close();
    dirtyJ.close();
}

From source file:org.locationtech.geogig.storage.postgresql.PGIndexDatabase.java

@Override
public IndexInfo updateIndexInfo(String treeName, String attributeName, IndexType strategy,
        Map<String, Object> metadata) {
    IndexInfo index = new IndexInfo(treeName, attributeName, strategy, metadata);
    final String deleteSql = format(
            "DELETE FROM %s WHERE repository = ? AND treeName = ? AND attributeName = ?",
            config.getTables().index());
    final String insertSql = format(
            "INSERT INTO %s (repository, treeName, attributeName, strategy, metadata) VALUES(?, ?, ?, ?, ?)",
            config.getTables().index());

    try (Connection cx = PGStorage.newConnection(dataSource)) {
        cx.setAutoCommit(false);//from  w w w. jav  a 2 s.  com
        try {
            try (PreparedStatement ps = cx
                    .prepareStatement(log(deleteSql, LOG, repositoryId, treeName, attributeName))) {
                ps.setInt(1, repositoryId);
                ps.setString(2, treeName);
                ps.setString(3, attributeName);
                ps.executeUpdate();
            }
            try (PreparedStatement ps = cx.prepareStatement(
                    log(insertSql, LOG, repositoryId, treeName, attributeName, strategy, metadata));
                    ByteArrayOutputStream outStream = new ByteArrayOutputStream()) {
                ps.setInt(1, repositoryId);
                ps.setString(2, treeName);
                ps.setString(3, attributeName);
                ps.setString(4, strategy.toString());
                if (index.getMetadata() != null) {
                    DataOutput out = ByteStreams.newDataOutput(outStream);
                    DataStreamValueSerializerV2.write(index.getMetadata(), out);
                    ps.setBytes(5, outStream.toByteArray());
                } else {
                    ps.setNull(5, java.sql.Types.OTHER, "bytea");
                }
                ps.executeUpdate();
            } catch (IOException e) {
                rollbackAndRethrow(cx, e);
            }
            cx.commit();
        } catch (SQLException e) {
            cx.rollback();
        } finally {
            cx.setAutoCommit(true);
        }
    } catch (SQLException e) {
        throw propagate(e);
    }
    return index;
}

From source file:de.nx42.maps4cim.header.CustomHeader.java

@Override
public byte[] generateHeader() throws IOException {

    // first part
    ByteArrayDataOutput outP1 = ByteStreams.newDataOutput(4096);

    // static intro
    outP1.write(intro);/*from   w ww  .ja v a  2  s .c  o  m*/
    outP1.write(formatHeaderString(staticString01));
    // gap of 4 bytes
    outP1.write(new byte[4]);

    // dates and timestamps
    outP1.writeLong(DateUtils.dateToTicks(unusedDate1));
    outP1.writeLong(DateUtils.dateToTicks(unusedDate2));
    outP1.writeLong(DateUtils.dateToTicks(lastSaved));
    outP1.writeLong(DateUtils.dateToTicks(mapCreated));
    outP1.writeLong(workTime1);
    outP1.writeLong(workTime2);

    // static data
    outP1.write(staticBinary01);
    outP1.write(formatHeaderString(staticString02));

    // map name
    outP1.write(formatHeaderString(mapName));
    if (buildingSet == BuildingSet.EUROPEAN) {
        outP1.write(formatHeaderString(staticString02eur01));
    }

    // map overview image
    outP1.write(pngLength);
    outP1.write(png);

    // static data
    outP1.write(staticBinary02);
    if (buildingSet == BuildingSet.EUROPEAN) {
        outP1.write(formatHeaderString(staticString02eur02));
        outP1.write(staticBinary02eur);
    }
    outP1.write(formatHeaderString(staticString03));
    outP1.write(new byte[34]);
    outP1.write(staticBinary03);
    outP1.write(formatHeaderString(staticString04));
    outP1.write(formatHeaderString(staticString05));

    // second part
    ByteArrayDataOutput outP2 = ByteStreams.newDataOutput(256);

    // static data
    outP2.write(intro);
    outP2.write(formatHeaderString(staticString06));
    outP2.write(staticBinary04);
    for (String s : staticStrings07) {
        outP2.write(formatHeaderString(s));
    }
    outP2.write(staticBinary05);

    // combine the parts
    ByteArrayDataOutput out = ByteStreams.newDataOutput(4352);

    byte[] p1 = outP1.toByteArray();
    out.write(p1);
    // fill with 0s until next next free index % 4096 = 0
    out.write(new byte[((p1.length / 4096) + 1) * 4096 - p1.length]);

    byte[] p2 = outP2.toByteArray();
    out.write(p2);
    // fill with 0s until 256 bytes are filled after the beginning of p2
    out.write(new byte[256 - p2.length]);

    // return combined result
    return out.toByteArray();
}

From source file:org.locationtech.geogig.storage.postgresql.v9.PGIndexDatabase.java

@Override
public IndexInfo updateIndexInfo(String treeName, String attributeName, IndexType strategy,
        Map<String, Object> metadata) {
    IndexInfo index = new IndexInfo(treeName, attributeName, strategy, metadata);
    final String deleteSql = format(
            "DELETE FROM %s WHERE repository = ? AND treeName = ? AND attributeName = ?",
            config.getTables().index());
    final String insertSql = format(
            "INSERT INTO %s (repository, treeName, attributeName, strategy, metadata) VALUES(?, ?, ?, ?, ?)",
            config.getTables().index());

    try (Connection cx = PGStorage.newConnection(dataSource)) {
        cx.setAutoCommit(false);//  ww  w  .j a  v a  2s.c o  m
        try {
            try (PreparedStatement ps = cx
                    .prepareStatement(log(deleteSql, LOG, repositoryId, treeName, attributeName))) {
                ps.setInt(1, repositoryId);
                ps.setString(2, treeName);
                ps.setString(3, attributeName);
                ps.executeUpdate();
            }
            try (PreparedStatement ps = cx.prepareStatement(
                    log(insertSql, LOG, repositoryId, treeName, attributeName, strategy, metadata));
                    ByteArrayOutputStream outStream = new ByteArrayOutputStream()) {
                ps.setInt(1, repositoryId);
                ps.setString(2, treeName);
                ps.setString(3, attributeName);
                ps.setString(4, strategy.toString());
                final Map<String, Object> indexMetadata = index.getMetadata();
                if (indexMetadata.isEmpty()) {
                    ps.setNull(5, java.sql.Types.OTHER, "bytea");
                } else {
                    DataOutput out = ByteStreams.newDataOutput(outStream);
                    valueEncoder.writeMap(indexMetadata, out);
                    ps.setBytes(5, outStream.toByteArray());
                }
                ps.executeUpdate();
            } catch (IOException e) {
                rollbackAndRethrow(cx, e);
            }
            cx.commit();
        } catch (SQLException e) {
            cx.rollback();
        } finally {
            cx.setAutoCommit(true);
        }
    } catch (SQLException e) {
        throw new RuntimeException(e);
    }
    return index;
}

From source file:org.caffinitas.ohc.linked.CheckOHCacheImpl.java

private byte[] value(V value) {
    try {/*from  w ww .j a v  a  2  s  .  c  o  m*/
        ByteArrayDataOutput output = ByteStreams.newDataOutput(valueSerializer.serializedSize(value));
        valueSerializer.serialize(value, output);
        return output.toByteArray();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:co.cask.cdap.data2.transaction.stream.AbstractStreamFileConsumer.java

/**
 * Try to claim a stream event offset./*  w  ww  . j  a va  2  s.  com*/
 *
 * @return The row key for writing to the state table if successfully claimed or {@code null} if not claimed.
 */
private byte[] claimEntry(StreamFileOffset offset, byte[] claimedStateContent) throws IOException {
    ByteArrayDataOutput out = ByteStreams.newDataOutput(50);
    out.writeLong(consumerConfig.getGroupId());
    StreamUtils.encodeOffset(out, offset);
    byte[] row = out.toByteArray();

    SortedMap<byte[], byte[]> rowStates = getInitRowStates(row);

    // See if the entry should be ignored. If it is in the rowStates with null value, then it should be ignored.
    byte[] rowState = rowStates.get(row);
    if (rowStates.containsKey(row) && rowState == null) {
        return null;
    }

    // Only need to claim entry if FIFO and group size > 1
    if (consumerConfig.getDequeueStrategy() == DequeueStrategy.FIFO && consumerConfig.getGroupSize() > 1) {
        return claimFifoEntry(row, claimedStateContent, rowState) ? row : null;
    }

    // For Hash, RR and FIFO with group size == 1, no need to claim and check,
    // as it's already handled by the readFilter
    return row;
}