Example usage for org.apache.hadoop.io WritableUtils writeVLong

List of usage examples for org.apache.hadoop.io WritableUtils writeVLong

Introduction

In this page you can find the example usage for org.apache.hadoop.io WritableUtils writeVLong.

Prototype

public static void writeVLong(DataOutput stream, long i) throws IOException 

Source Link

Document

Serializes a long to a binary stream with zero-compressed encoding.

Usage

From source file:org.apache.phoenix.expression.function.RoundDateExpression.java

License:Apache License

@Override
public void write(DataOutput output) throws IOException {
    super.write(output);
    WritableUtils.writeVLong(output, divBy);
}

From source file:org.apache.phoenix.filter.EncodedQualifiersColumnProjectionFilter.java

License:Apache License

@Override
public void write(DataOutput output) throws IOException {
    WritableUtils.writeCompressedByteArray(output, this.emptyCFName);
    long[] longArrayOfBitSet = trackedColumns.toLongArray();
    WritableUtils.writeVInt(output, longArrayOfBitSet.length);
    for (Long l : longArrayOfBitSet) {
        WritableUtils.writeVLong(output, l);
    }//from   w w w  .  j a v a2s  . c  om
    WritableUtils.writeVInt(output, encodingScheme.ordinal());
    WritableUtils.writeVInt(output, this.conditionOnlyCfs.size());
    for (byte[] f : this.conditionOnlyCfs) {
        WritableUtils.writeCompressedByteArray(output, f);
    }
}

From source file:org.apache.phoenix.hive.mapreduce.PhoenixInputSplit.java

License:Apache License

@Override
public void write(DataOutput out) throws IOException {
    super.write(out);

    Preconditions.checkNotNull(scans);/*from w  w w  .ja  v  a 2s  .com*/
    WritableUtils.writeVInt(out, scans.size());
    for (Scan scan : scans) {
        ClientProtos.Scan protoScan = ProtobufUtil.toScan(scan);
        byte[] protoScanBytes = protoScan.toByteArray();
        WritableUtils.writeVInt(out, protoScanBytes.length);
        out.write(protoScanBytes);
    }

    WritableUtils.writeString(out, query);
    WritableUtils.writeVLong(out, regionSize);
}

From source file:org.apache.phoenix.mapreduce.FormatToBytesWritableMapper.java

License:Apache License

/**
 * Collect all column values for the same Row. RowKey may be different if indexes are involved,
 * so it writes a separate record for each unique RowKey
 *
 * @param context    Current mapper context
 * @param tableName Table index in tableNames list
 * @param lkv        List of KV values that will be combined in a single ImmutableBytesWritable
 * @throws IOException/*from www . j  a  v  a 2s .  c  o m*/
 * @throws InterruptedException
 */

private void writeAggregatedRow(Context context, String tableName, List<KeyValue> lkv)
        throws IOException, InterruptedException {
    ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
    DataOutputStream outputStream = new DataOutputStream(bos);
    ImmutableBytesWritable outputKey = null;
    if (!lkv.isEmpty()) {
        for (KeyValue cell : lkv) {
            if (outputKey == null || Bytes.compareTo(outputKey.get(), outputKey.getOffset(),
                    outputKey.getLength(), cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) != 0) {
                // This a the first RowKey or a different from previous
                if (outputKey != null) { //It's a different RowKey, so we need to write it
                    ImmutableBytesWritable aggregatedArray = new ImmutableBytesWritable(bos.toByteArray());
                    outputStream.close();
                    context.write(new TableRowkeyPair(tableName, outputKey), aggregatedArray);
                }
                outputKey = new ImmutableBytesWritable(cell.getRowArray(), cell.getRowOffset(),
                        cell.getRowLength());
                bos = new ByteArrayOutputStream(1024);
                outputStream = new DataOutputStream(bos);
            }
            /*
            The order of aggregation: type, index of column, length of value, value itself
             */
            int i = findIndex(cell);
            if (i == -1) {
                //That may happen when we load only local indexes. Since KV pairs for both
                // table and local index are going to the same physical table at that point
                // we skip those KVs that are not belongs to loca index
                continue;
            }
            outputStream.writeByte(cell.getTypeByte());
            WritableUtils.writeVLong(outputStream, cell.getTimestamp());
            WritableUtils.writeVInt(outputStream, i);
            WritableUtils.writeVInt(outputStream, cell.getValueLength());
            outputStream.write(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());

        }
        ImmutableBytesWritable aggregatedArray = new ImmutableBytesWritable(bos.toByteArray());
        outputStream.close();
        context.write(new TableRowkeyPair(tableName, outputKey), aggregatedArray);
    }
}

From source file:org.apache.phoenix.mapreduce.PhoenixInputSplit.java

License:Apache License

@Override
public void write(DataOutput output) throws IOException {
    WritableUtils.writeString(output, regionLocation);
    WritableUtils.writeVLong(output, regionSize);

    Preconditions.checkNotNull(scans);/* www  .j a v a  2 s. c o m*/
    WritableUtils.writeVInt(output, scans.size());
    for (Scan scan : scans) {
        ClientProtos.Scan protoScan = ProtobufUtil.toScan(scan);
        byte[] protoScanBytes = protoScan.toByteArray();
        WritableUtils.writeVInt(output, protoScanBytes.length);
        output.write(protoScanBytes);
    }
}

From source file:org.apache.tez.common.counters.FileSystemCounterGroup.java

License:Apache License

/**
 * FileSystemGroup ::= #scheme (scheme #counter (key value)*)*
 */// w w  w  .jav  a  2 s  . c o m
@Override
public void write(DataOutput out) throws IOException {
    WritableUtils.writeVInt(out, map.size()); // #scheme
    for (Map.Entry<String, Object[]> entry : map.entrySet()) {
        WritableUtils.writeString(out, entry.getKey()); // scheme
        // #counter for the above scheme
        WritableUtils.writeVInt(out, numSetCounters(entry.getValue()));
        for (Object counter : entry.getValue()) {
            if (counter == null)
                continue;
            FSCounter c = (FSCounter) ((TezCounter) counter).getUnderlyingCounter();
            WritableUtils.writeVInt(out, c.key.ordinal()); // key
            WritableUtils.writeVLong(out, c.getValue()); // value
        }
    }
}

From source file:org.apache.tez.common.counters.FrameworkCounterGroup.java

License:Apache License

/**
 * FrameworkGroup ::= #counter (key value)*
 *///w  w w .j a  v a2s .co  m
@Override
@SuppressWarnings("unchecked")
public void write(DataOutput out) throws IOException {
    WritableUtils.writeVInt(out, size());
    for (int i = 0; i < counters.length; ++i) {
        TezCounter counter = (C) counters[i];
        if (counter != null) {
            WritableUtils.writeVInt(out, i);
            WritableUtils.writeVLong(out, counter.getValue());
        }
    }
}

From source file:org.apache.tez.engine.common.shuffle.impl.ShuffleHeader.java

License:Apache License

public void write(DataOutput out) throws IOException {
    Text.writeString(out, mapId);
    WritableUtils.writeVLong(out, compressedLength);
    WritableUtils.writeVLong(out, uncompressedLength);
    WritableUtils.writeVInt(out, forReduce);
}

From source file:org.commoncrawl.rpc.base.shared.BinaryProtocol.java

License:Open Source License

public void writeVLong(DataOutput out, long l) throws IOException {
    WritableUtils.writeVLong(out, l);
}

From source file:org.commoncrawl.service.crawler.SegmentLoader.java

License:Open Source License

@SuppressWarnings("unchecked")
public static CrawlSegmentFPMap loadCrawlSegmentFPInfo(int listId, int segmentId, String crawlerName,
        CancelOperationCallback cancelCallback) throws IOException {

    CrawlSegmentFPMap fpMap = new CrawlSegmentFPMap();

    WritableName.setName(CrawlSegmentHost.class, "org.crawlcommons.protocol.CrawlSegmentHost");

    // construct hdfs path to segment ... 
    Path hdfsPath;/*from  ww  w. j  a v  a 2 s.co m*/
    if (segmentId != -1)
        hdfsPath = new Path(
                CrawlEnvironment.getCrawlSegmentDataDirectory() + "/" + listId + "/" + segmentId + "/");
    else
        hdfsPath = new Path(CrawlEnvironment.getCrawlSegmentDataDirectory() + "/");

    Path workUnitDetailPath = new Path(hdfsPath, crawlerName);

    SequenceFile.Reader reader = null;

    try {
        FileSystem hdfs = CrawlEnvironment.getDefaultFileSystem();
        reader = new SequenceFile.Reader(hdfs, workUnitDetailPath, CrawlEnvironment.getHadoopConfig());

        LongWritable hostFP = new LongWritable();
        CrawlSegmentHost segmentHost = new CrawlSegmentHost();

        DataOutputBuffer outputBuffer = new DataOutputBuffer();

        int segmentUrlCount = 0;
        while (reader.next(hostFP, segmentHost) && cancelCallback.cancelOperation() == false) {
            // and update url count ... 
            segmentUrlCount += segmentHost.getUrlTargets().size();

            // set the url vector to the appropriate size ... 
            for (CrawlSegmentURL url : segmentHost.getUrlTargets()) {

                WritableUtils.writeVLong(outputBuffer, segmentHost.getHostFP());
                WritableUtils.writeVLong(outputBuffer, url.getUrlFP());
            }
        }
        outputBuffer.flush();
        // ok set the urlfp stream 
        fpMap.setURLFPBuffer(segmentUrlCount, outputBuffer.getData(), outputBuffer.getLength());
        // now initialize the 

        if (cancelCallback.cancelOperation()) {
            return null;
        } else {
            return fpMap;
        }
    } finally {
        if (reader != null)
            reader.close();
    }
}