Example usage for org.apache.hadoop.io WritableUtils toByteArray

List of usage examples for org.apache.hadoop.io WritableUtils toByteArray

Introduction

In this page you can find the example usage for org.apache.hadoop.io WritableUtils toByteArray.

Prototype

public static byte[] toByteArray(Writable... writables) 

Source Link

Document

Convert writables to a byte array

Usage

From source file:brickhouse.hbase.ArrayValuePutUDF.java

License:Apache License

public Object evaluate(DeferredObject[] arg0) throws HiveException {
    try {//  www. j  a  v a 2 s.  co m
        if (table == null)
            table = HTableFactory.getHTable(configMap);

        String key = keyInspector.getPrimitiveJavaObject(arg0[1].get());
        Object listValObj = arg0[2].get();
        int listValLen = listValInspector.getListLength(listValObj);

        List<String> columnValues = new ArrayList<String>();
        for (int i = 0; i < listValLen; ++i) {
            Object uninspValue = listValInspector.getListElement(listValObj, i);
            columnValues.add(valueInspector.getPrimitiveJavaObject(uninspValue));
        }

        // Make sure that key is present but allow for empty array to be inserted into hbase cf
        if (key != null) {
            Put thePut = new Put(key.getBytes());

            //Serialize values into byte array
            thePut.add(configMap.get(HTableFactory.FAMILY_TAG).getBytes(),
                    configMap.get(HTableFactory.QUALIFIER_TAG).getBytes(),
                    WritableUtils.toByteArray(ValueSerde.serialize(columnValues)));

            table.put(thePut);
            table.flushCommits();

            getReporter().incrCounter(ArrayValuePutUDFCounter.NUMBER_OF_SUCCESSFUL_PUTS, 1);
        } else {
            getReporter().incrCounter(ArrayValuePutUDFCounter.NULL_KEY_INSERT_FAILURE, 1);
        }

        return "Put " + listValLen + " values for key: " + key + " into " + table.getName() + " Family "
                + configMap.get(HTableFactory.FAMILY_TAG) + " ; Qualifier = "
                + configMap.get(HTableFactory.QUALIFIER_TAG);
    } catch (Exception exc) {
        LOG.error(" Error while trying HBase PUT ", exc);
        throw new RuntimeException(exc);
    }
}

From source file:com.cloudera.branchreduce.impl.thrift.LordProxy.java

License:Open Source License

@Override
public synchronized void updateGlobalState(G globalState) {
    UpdateGlobalStateRequest req = new UpdateGlobalStateRequest();
    req.setGlobalState(WritableUtils.toByteArray(globalState));
    try {/*from  ww w  . ja v  a  2 s.c o  m*/
        client.updateGlobalState(req);
    } catch (TException e) {
        LOG.error("Transport exception updating global state", e);
    }
}

From source file:com.cloudera.branchreduce.impl.thrift.VassalProxy.java

License:Open Source License

@Override
public synchronized void startTasks(List<T> tasks, G globalState) {
    StartTasksRequest req = new StartTasksRequest();
    req.setGlobalState(WritableUtils.toByteArray(globalState));
    for (T task : tasks) {
        req.addToTasks(ByteBuffer.wrap(WritableUtils.toByteArray(task)));
    }/*from   www .j a  v  a 2  s. c  o  m*/
    try {
        client.startTasks(req);
    } catch (TException e) {
        LOG.error("Transport exception starting tasks", e);
    }
}

From source file:com.cloudera.branchreduce.impl.thrift.Writables.java

License:Open Source License

public static ByteBuffer toByteBuffer(Writable writable) {
    return ByteBuffer.wrap(WritableUtils.toByteArray(writable));
}

From source file:gaffer.accumulo.ConversionUtils.java

License:Apache License

/**
 * Gets a {@link Value} from a {@link SetOfStatistics} by using the
 * its {@link SetOfStatistics#write} method.
 * /*from   w  ww. j ava 2s .  c om*/
 * @param statistics
 * @return
 */
public static Value getValueFromSetOfStatistics(SetOfStatistics statistics) {
    return new Value(WritableUtils.toByteArray(statistics));
}

From source file:gaffer.accumulostore.key.core.AbstractCoreKeyAccumuloElementConverter.java

License:Apache License

@Override
public Value getValueFromProperties(final Properties properties, final String group)
        throws AccumuloElementConversionException {
    final MapWritable map = new MapWritable();
    for (final Map.Entry<String, Object> entry : properties.entrySet()) {
        final String propertyName = entry.getKey();
        final StorePropertyDefinition propertyDefinition = storeSchema.getElement(group)
                .getProperty(propertyName);
        if (propertyDefinition != null) {
            if (StorePositions.VALUE.isEqual(propertyDefinition.getPosition())) {
                try {
                    map.put(new Text(propertyName),
                            new BytesWritable(propertyDefinition.getSerialiser().serialise(entry.getValue())));
                } catch (final SerialisationException e) {
                    throw new AccumuloElementConversionException("Failed to serialise property " + propertyName,
                            e);//from w w w .  ja  v a  2s  .c  o m
                }
            }
        }
    }
    if (map.isEmpty()) {
        return new Value();
    }
    return new Value(WritableUtils.toByteArray(map));
}

From source file:org.apache.accumulo.master.tableOps.compact.CompactRange.java

License:Apache License

public CompactRange(NamespaceId namespaceId, TableId tableId, byte[] startRow, byte[] endRow,
        List<IteratorSetting> iterators, CompactionStrategyConfig compactionStrategy)
        throws AcceptableThriftTableOperationException {

    requireNonNull(namespaceId, "Invalid argument: null namespaceId");
    requireNonNull(tableId, "Invalid argument: null tableId");
    requireNonNull(iterators, "Invalid argument: null iterator list");
    requireNonNull(compactionStrategy, "Invalid argument: null compactionStrategy");

    this.tableId = tableId;
    this.namespaceId = namespaceId;
    this.startRow = startRow.length == 0 ? null : startRow;
    this.endRow = endRow.length == 0 ? null : endRow;

    if (iterators.size() > 0 || !compactionStrategy.equals(CompactionStrategyConfigUtil.DEFAULT_STRATEGY)) {
        this.config = WritableUtils.toByteArray(
                new UserCompactionConfig(this.startRow, this.endRow, iterators, compactionStrategy));
    } else {//from w  w w . j  ava2 s . com
        log.info("No iterators or compaction strategy");
    }

    if (this.startRow != null && this.endRow != null && new Text(startRow).compareTo(new Text(endRow)) >= 0)
        throw new AcceptableThriftTableOperationException(tableId.canonical(), null, TableOperation.COMPACT,
                TableOperationExceptionType.BAD_RANGE, "start row must be less than end row");
}

From source file:org.apache.accumulo.master.tableOps.CompactRange.java

License:Apache License

public CompactRange(String tableId, byte[] startRow, byte[] endRow, List<IteratorSetting> iterators,
        CompactionStrategyConfig compactionStrategy) throws AcceptableThriftTableOperationException {

    requireNonNull(tableId, "Invalid argument: null tableId");
    requireNonNull(iterators, "Invalid argument: null iterator list");
    requireNonNull(compactionStrategy, "Invalid argument: null compactionStrategy");

    this.tableId = tableId;
    this.startRow = startRow.length == 0 ? null : startRow;
    this.endRow = endRow.length == 0 ? null : endRow;

    if (iterators.size() > 0 || !compactionStrategy.equals(CompactionStrategyConfigUtil.DEFAULT_STRATEGY)) {
        this.config = WritableUtils.toByteArray(
                new UserCompactionConfig(this.startRow, this.endRow, iterators, compactionStrategy));
    } else {/*from   w  w  w  .  j  av a 2  s  . c o  m*/
        log.info("No iterators or compaction strategy");
    }

    if (this.startRow != null && this.endRow != null && new Text(startRow).compareTo(new Text(endRow)) >= 0)
        throw new AcceptableThriftTableOperationException(tableId, null, TableOperation.COMPACT,
                TableOperationExceptionType.BAD_RANGE, "start row must be less than end row");
}

From source file:org.apache.accumulo.server.master.tableOps.CompactionDriver.java

License:Apache License

public CompactRange(String tableId, byte[] startRow, byte[] endRow, List<IteratorSetting> iterators)
        throws ThriftTableOperationException {
    this.tableId = tableId;
    this.startRow = startRow.length == 0 ? null : startRow;
    this.endRow = endRow.length == 0 ? null : endRow;

    if (iterators.size() > 0) {
        this.iterators = WritableUtils
                .toByteArray(new CompactionIterators(this.startRow, this.endRow, iterators));
    } else {// www.  j  a v a 2s  .  c o  m
        iterators = null;
    }

    if (this.startRow != null && this.endRow != null && new Text(startRow).compareTo(new Text(endRow)) >= 0)
        throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT,
                TableOperationExceptionType.BAD_RANGE, "start row must be less than end row");
}

From source file:org.apache.crunch.io.hbase.HFileOutputFormatForCrunch.java

License:Apache License

@Override
public RecordWriter<Object, KeyValue> getRecordWriter(final TaskAttemptContext context)
        throws IOException, InterruptedException {
    Path outputPath = getDefaultWorkFile(context, "");
    Configuration conf = context.getConfiguration();
    FileSystem fs = outputPath.getFileSystem(conf);

    final boolean compactionExclude = conf.getBoolean(COMPACTION_EXCLUDE_CONF_KEY, false);

    String hcolStr = conf.get(HCOLUMN_DESCRIPTOR_KEY);
    if (hcolStr == null) {
        throw new AssertionError(HCOLUMN_DESCRIPTOR_KEY + " is not set in conf");
    }//from   w  w  w. ja  va  2 s. co  m
    byte[] hcolBytes;
    try {
        hcolBytes = Hex.decodeHex(hcolStr.toCharArray());
    } catch (DecoderException e) {
        throw new AssertionError("Bad hex string: " + hcolStr);
    }
    HColumnDescriptor hcol = new HColumnDescriptor();
    hcol.readFields(new DataInputStream(new ByteArrayInputStream(hcolBytes)));
    LOG.info("Output path: " + outputPath);
    LOG.info("HColumnDescriptor: " + hcol.toString());
    final HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withPath(fs, outputPath)
            .withBlockSize(hcol.getBlocksize()).withCompression(hcol.getCompression())
            .withComparator(KeyValue.KEY_COMPARATOR)
            .withDataBlockEncoder(new HFileDataBlockEncoderImpl(hcol.getDataBlockEncoding()))
            .withChecksumType(Store.getChecksumType(conf)).withBytesPerChecksum(Store.getBytesPerChecksum(conf))
            .create();

    return new RecordWriter<Object, KeyValue>() {
        @Override
        public void write(Object row, KeyValue kv) throws IOException {
            if (kv.getTimestamp() == HConstants.LATEST_TIMESTAMP) {
                kv.updateLatestStamp(now);
            }
            writer.append(kv);
            trt.includeTimestamp(kv);
        }

        @Override
        public void close(TaskAttemptContext c) throws IOException, InterruptedException {
            writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
            writer.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
                    Bytes.toBytes(context.getTaskAttemptID().toString()));
            writer.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
            writer.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY,
                    Bytes.toBytes(compactionExclude));
            writer.appendFileInfo(StoreFile.TIMERANGE_KEY, WritableUtils.toByteArray(trt));
            writer.close();
        }
    };
}