Example usage for org.apache.hadoop.io WritableUtils writeString

List of usage examples for org.apache.hadoop.io WritableUtils writeString

Introduction

In this page you can find the example usage for org.apache.hadoop.io WritableUtils writeString.

Prototype

public static void writeString(DataOutput out, String s) throws IOException 

Source Link

Usage

From source file:org.apache.phoenix.mapreduce.PhoenixInputSplit.java

License:Apache License

@Override
public void write(DataOutput output) throws IOException {
    WritableUtils.writeString(output, regionLocation);
    WritableUtils.writeVLong(output, regionSize);

    Preconditions.checkNotNull(scans);/*from w  ww  .  j  a v a  2 s .  c  om*/
    WritableUtils.writeVInt(output, scans.size());
    for (Scan scan : scans) {
        ClientProtos.Scan protoScan = ProtobufUtil.toScan(scan);
        byte[] protoScanBytes = protoScan.toByteArray();
        WritableUtils.writeVInt(output, protoScanBytes.length);
        output.write(protoScanBytes);
    }
}

From source file:org.apache.tez.common.counters.FileSystemCounterGroup.java

License:Apache License

/**
 * FileSystemGroup ::= #scheme (scheme #counter (key value)*)*
 *///from w w  w. j  a v  a2 s.c  o m
@Override
public void write(DataOutput out) throws IOException {
    WritableUtils.writeVInt(out, map.size()); // #scheme
    for (Map.Entry<String, Object[]> entry : map.entrySet()) {
        WritableUtils.writeString(out, entry.getKey()); // scheme
        // #counter for the above scheme
        WritableUtils.writeVInt(out, numSetCounters(entry.getValue()));
        for (Object counter : entry.getValue()) {
            if (counter == null)
                continue;
            FSCounter c = (FSCounter) ((TezCounter) counter).getUnderlyingCounter();
            WritableUtils.writeVInt(out, c.key.ordinal()); // key
            WritableUtils.writeVLong(out, c.getValue()); // value
        }
    }
}

From source file:org.apache.tez.engine.records.TezDependentTaskCompletionEvent.java

License:Apache License

@Override
public void write(DataOutput out) throws IOException {
    taskAttemptId.write(out);/* w  ww .  j a v a2  s .co  m*/
    //    out.writeBoolean(isMap);
    WritableUtils.writeEnum(out, status);
    WritableUtils.writeString(out, taskTrackerHttp);
    WritableUtils.writeVInt(out, taskRunTime);
    WritableUtils.writeVInt(out, eventId);
}

From source file:org.apache.tez.mapreduce.hadoop.MRTaskStatus.java

License:Apache License

@Override
public void write(DataOutput out) throws IOException {
    taskAttemptId.write(out);/*from w  w w  . ja  va 2  s  .  com*/
    WritableUtils.writeEnum(out, state);
    out.writeFloat(progress);
    WritableUtils.writeString(out, diagnostics);
    WritableUtils.writeString(out, userStatusInfo);
    WritableUtils.writeEnum(out, phase);

    counters.write(out);

    out.writeLong(localOutputSize);
    out.writeLong(startTime);
    out.writeLong(finishTime);
    out.writeLong(sortFinishTime);
    out.writeLong(mapFinishTime);
    out.writeLong(shuffleFinishTime);

    out.writeInt(failedTaskDependencies.size());
    for (TezTaskAttemptID taskAttemptId : failedTaskDependencies) {
        taskAttemptId.write(out);
    }
}

From source file:org.cloudata.core.parallel.hadoop.InputTableInfo.java

License:Apache License

@Override
public void write(DataOutput out) throws IOException {
    WritableUtils.writeString(out, tableName);
    rowFilter.write(out);//  ww w.  j a  va  2s  .co m

    WritableUtils.writeString(out, joinTableName);
    joinRowFilter.write(out);

    if (rowScan) {
        WritableUtils.writeString(out, "Y");
    } else {
        WritableUtils.writeString(out, "N");
    }

    WritableUtils.writeString(out, mergeEvaluatorClass);
}

From source file:org.example.processOutliers.writables.VarStringWritable.java

License:Apache License

@Override
public void write(DataOutput out) throws IOException {
    WritableUtils.writeString(out, this.value);
}

From source file:org.kiji.hive.io.KijiRowDataWritable.java

License:Apache License

/**
 * Helper method for the {@link org.apache.hadoop.io.Writable} interface that for writing
 * KijiRowDataWritable objects.  If passed a KijiColumnName, it will replace the data for the
 * specified column(relevant for paging through results).
 *
 * @param out DataOutput for the Hadoop Writable to write to.
 * @param pageData map of columns to paged data to be substituted(or an empty map if there are
 *                 no pages to substitute).
 * @throws IOException if there was an issue.
 *//*from   w w w . j  a v a  2  s . c  om*/
protected void writeWithPages(DataOutput out,
        Map<KijiColumnName, NavigableMap<Long, KijiCellWritable>> pageData) throws IOException {

    // Write the EntityId
    mEntityId.write(out);

    // Count the total number of columns to write.
    Set<KijiColumnName> columnNames = Sets.newHashSet();
    for (KijiColumnName columnName : mWritableData.keySet()) {
        if (!mKijiQualifierPagers.containsKey(columnName.getFamily())) {
            columnNames.add(columnName);
        }
    }
    columnNames.addAll(pageData.keySet());
    WritableUtils.writeVInt(out, columnNames.size());

    // Write the unpaged data.
    for (Entry<KijiColumnName, NavigableMap<Long, KijiCellWritable>> entry : mWritableData.entrySet()) {
        KijiColumnName kijiColumnName = entry.getKey();
        if (!pageData.containsKey(kijiColumnName)
                && !mKijiQualifierPagers.containsKey(kijiColumnName.getFamily())) {
            // Only write if it's not part of the paged data.
            writeColumn(out, kijiColumnName, entry.getValue());
        }
    }

    // Write paged data if any.
    for (Entry<KijiColumnName, NavigableMap<Long, KijiCellWritable>> entry : pageData.entrySet()) {
        writeColumn(out, entry.getKey(), entry.getValue());
    }

    WritableUtils.writeVInt(out, mSchemas.size());
    for (Map.Entry<KijiColumnName, Schema> entry : mSchemas.entrySet()) {
        WritableUtils.writeString(out, entry.getKey().getName());
        WritableUtils.writeString(out, entry.getValue().toString());
    }
}

From source file:org.kiji.hive.io.KijiRowDataWritable.java

License:Apache License

/**
 * Helper function to write a column and its associated data.
 *
 * @param out DataOutput for the Hadoop Writable to write to.
 * @param kijiColumnName to write//from w w w  . j  a va 2s  .c o m
 * @param data to write
 * @throws IOException if there was an issue.
 */
private void writeColumn(DataOutput out, KijiColumnName kijiColumnName,
        NavigableMap<Long, KijiCellWritable> data) throws IOException {
    WritableUtils.writeString(out, kijiColumnName.getName());
    WritableUtils.writeVInt(out, data.size()); // number in the timeseries
    for (Map.Entry<Long, KijiCellWritable> cellEntry : data.entrySet()) {
        WritableUtils.writeVLong(out, cellEntry.getKey());
        cellEntry.getValue().write(out);
    }
}

From source file:org.opencb.hpg.bigdata.tools.utils.ChunkKey.java

License:Apache License

@Override
public void write(DataOutput out) throws IOException {
    WritableUtils.writeString(out, name);
    out.writeLong(chunk);
}

From source file:PrescribedDrugData.DrugsWithCost.java

@Override
public void write(DataOutput d) throws IOException {
    WritableUtils.writeString(d, TotalDrugSupply);
    WritableUtils.writeString(d, TotalDrugCost);

}