Example usage for org.apache.thrift TSerializer TSerializer

List of usage examples for org.apache.thrift TSerializer TSerializer

Introduction

In this page you can find the example usage for org.apache.thrift TSerializer TSerializer.

Prototype

public TSerializer(TProtocolFactory protocolFactory) 

Source Link

Document

Create a new TSerializer.

Usage

From source file:adept.mappers.thrift.MappingTests.java

License:Apache License

private void SerializationTests(final adept.common.HltContentContainer hltAdept)
        throws TException, IOException {
    final XMLSerializer xmlSerializer = new XMLSerializer(SerializationType.XML);
    final TSerializer thriftSerializer = new TSerializer(new TBinaryProtocol.Factory());
    final TDeserializer tdeserializer = new TDeserializer(new TBinaryProtocol.Factory());
    final ThriftAdeptMapper mapper = ThriftAdeptMapper.getInstance();

    // thrift round tripping through serialization
    final thrift.adept.common.HltContentContainer hltThrift = mapper.convert(hltAdept);
    final byte[] thriftBytes = thriftSerializer.serialize(hltThrift);
    final File thriftFile = File.createTempFile("thriftBytes", "thrift");
    thriftFile.deleteOnExit();//w w w  . j a  v a 2  s.  c om
    Files.asByteSink(thriftFile).write(thriftBytes);
    final thrift.adept.common.HltContentContainer serializationTrippedThrift = new thrift.adept.common.HltContentContainer();
    tdeserializer.deserialize(serializationTrippedThrift, Files.asByteSource(thriftFile).read());

    // adept fun times
    final String adeptString = xmlSerializer.serializeAsString(hltAdept);
    final File adeptFile = File.createTempFile("adeptString", "xml");
    adeptFile.deleteOnExit();
    Files.asCharSink(adeptFile, Charsets.UTF_8).write(adeptString);
    final adept.common.HltContentContainer serializedTrippedAdept = (adept.common.HltContentContainer) xmlSerializer
            .deserializeString(Files.asCharSource(adeptFile, Charsets.UTF_8).read(),
                    adept.common.HltContentContainer.class);

    assertTrue(areTheSameByLimitedThriftScope(hltAdept, hltThrift));
    // check that to/from serialization does the right thing
    assertTrue(areTheSameByLimitedThriftScope(serializedTrippedAdept, hltThrift));
    assertTrue(areTheSameByLimitedThriftScope(hltAdept, serializationTrippedThrift));
    assertTrue(areTheSameByLimitedThriftScope(serializedTrippedAdept, serializationTrippedThrift));
}

From source file:andromache.config.CassandraConfigHelper.java

License:Apache License

private static String thriftToString(TBase object) {
    assert object != null;
    // this is so awful it's kind of cool!
    TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory());
    try {/*from  ww  w  . j  a  va  2  s  . c  o m*/
        return Hex.bytesToHex(serializer.serialize(object));
    } catch (TException e) {
        throw new RuntimeException(e);
    }
}

From source file:ch.usi.da.dlog.message.Command.java

License:Open Source License

public static byte[] toByteArray(Command c) {
    TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory());
    Cmd cmd = toCmd(c);/*w w  w .j av a  2  s .  co  m*/
    try {
        return serializer.serialize(cmd);
    } catch (TException e) {
        return new byte[0];
    }
}

From source file:ch.usi.da.dlog.message.Message.java

License:Open Source License

public static byte[] toByteArray(Message m) {
    TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory());
    ch.usi.da.dlog.thrift.gen.Message msg = new ch.usi.da.dlog.thrift.gen.Message();
    msg.setId(m.getID());/*from ww w  . ja  va 2 s.com*/
    msg.setFrom(m.getFrom());
    msg.setTo(m.getTo());
    List<Cmd> cmds = new ArrayList<Cmd>();
    for (Command c : m.getCommands()) {
        cmds.add(Command.toCmd(c));
    }
    msg.setCommands(cmds);
    try {
        return serializer.serialize(msg);
    } catch (TException e) {
        return new byte[0];
    }
}

From source file:ch.usi.da.smr.message.Message.java

License:Open Source License

public static byte[] toByteArray(Message m) {
    TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory());
    ch.usi.da.smr.thrift.gen.Message msg = new ch.usi.da.smr.thrift.gen.Message();
    msg.setId(m.getID());/*from   w  w  w.  jav  a  2 s  .c  o  m*/
    msg.setFrom(m.getFrom());
    msg.setTo(m.getTo());
    List<Cmd> cmds = new ArrayList<Cmd>();
    for (Command c : m.getCommands()) {
        cmds.add(Command.toCmd(c));
    }
    msg.setCommands(cmds);
    try {
        return serializer.serialize(msg);
    } catch (TException e) {
        return new byte[0];
    }
}

From source file:com.bigdata.dastor.db.RangeSliceCommand.java

License:Apache License

public void serialize(RangeSliceCommand sliceCommand, DataOutputStream dos) throws IOException {
    dos.writeUTF(sliceCommand.keyspace);
    dos.writeUTF(sliceCommand.column_family);
    dos.writeInt(sliceCommand.super_column == null ? 0 : sliceCommand.super_column.length);
    if (sliceCommand.super_column != null)
        dos.write(sliceCommand.super_column);

    TSerializer ser = new TSerializer(new TBinaryProtocol.Factory());
    FBUtilities.serialize(ser, sliceCommand.predicate, dos);
    AbstractBounds.serializer().serialize(sliceCommand.range, dos);
    dos.writeInt(sliceCommand.max_keys);
}

From source file:com.cloudera.impala.catalog.Db.java

License:Apache License

/**
 * Adds the user defined function fn to metastore DB params. fn is
 * serialized to thrift using TBinaryProtocol and then base64-encoded
 * to be compatible with the HMS' representation of params.
 *//*w  w  w .  j  a v  a 2 s  . c om*/
private boolean addFunctionToDbParams(Function fn) {
    Preconditions.checkState(fn.getBinaryType() != TFunctionBinaryType.BUILTIN
            && fn.getBinaryType() != TFunctionBinaryType.JAVA);
    try {
        TSerializer serializer = new TSerializer(new TCompactProtocol.Factory());
        byte[] serializedFn = serializer.serialize(fn.toThrift());
        String base64Fn = Base64.encodeBase64String(serializedFn);
        String fnKey = FUNCTION_INDEX_PREFIX + fn.signatureString();
        if (base64Fn.length() > HIVE_METASTORE_DB_PARAM_LIMIT_BYTES) {
            throw new ImpalaRuntimeException("Serialized function size exceeded HMS 4K byte limit");
        }
        putToHmsParameters(fnKey, base64Fn);
    } catch (ImpalaException | TException e) {
        LOG.error("Error adding function " + fn.getName() + " to DB params", e);
        return false;
    }
    return true;
}

From source file:com.cloudera.impala.catalog.PartitionStatsUtil.java

License:Apache License

/**
 * Serialises a TPartitionStats object to a MetaStore partition object, for later
 * persistence to the HMS itself./*  ww w  .  j  a  va 2  s  .  c om*/
 */
public static void partStatsToParameters(TPartitionStats partStats,
        org.apache.hadoop.hive.metastore.api.Partition msPartition) {
    // null stats means logically delete the stats from this partition
    if (partStats == null) {
        deletePartStats(msPartition);
        return;
    }

    // The HMS has a 4k (as of CDH5.2) limit on the length of any parameter string. The
    // serialised version of the partition stats is often larger than this. Therefore, we
    // naively 'chunk' the byte string into 4k pieces, and store the number of pieces in a
    // separate parameter field.
    //
    // The object itself is first serialised by Thrift, and then base-64 encoded to be a
    // valid string. This inflates its length somewhat; we may want to consider a
    // different scheme or at least understand why this scheme doesn't seem much more
    // effective than an ASCII representation.
    try {
        TCompactProtocol.Factory protocolFactory = new TCompactProtocol.Factory();
        TSerializer serializer = new TSerializer(protocolFactory);
        byte[] serialized = serializer.serialize(partStats);
        String base64 = new String(Base64.encodeBase64(serialized));
        List<String> chunks = chunkStringForHms(base64, HMS_MAX_CHUNKLEN);
        msPartition.putToParameters(INTERMEDIATE_STATS_NUM_CHUNKS, Integer.toString(chunks.size()));
        for (int i = 0; i < chunks.size(); ++i) {
            msPartition.putToParameters(INTERMEDIATE_STATS_CHUNK_PREFIX + i, chunks.get(i));
        }
    } catch (TException e) {
        LOG.info("Error saving partition stats: ", e);
        // TODO: What to throw here?
    }
}

From source file:com.cloudera.impala.common.JniUtil.java

License:Apache License

/**
     * Collect the JVM's memory statistics into a thrift structure for translation into
     * Impala metrics by the backend. A synthetic 'total' memory pool is included with
     * aggregate statistics for all real pools.
     *///from   ww  w .  ja v  a 2s  . com
    public static byte[] getJvmMetrics(byte[] argument) throws ImpalaException {
        TGetJvmMetricsRequest request = new TGetJvmMetricsRequest();
        JniUtil.deserializeThrift(protocolFactory_, request, argument);

        TGetJvmMetricsResponse jvmMetrics = new TGetJvmMetricsResponse();
        jvmMetrics.setMemory_pools(new ArrayList<TJvmMemoryPool>());
        TJvmMemoryPool totalUsage = new TJvmMemoryPool();
        boolean is_total = request.getMemory_pool() != null && request.getMemory_pool().equals("total");

        if (request.get_all || is_total) {
            totalUsage.setName("total");
            jvmMetrics.getMemory_pools().add(totalUsage);
        }
        for (MemoryPoolMXBean memBean : ManagementFactory.getMemoryPoolMXBeans()) {
            if (request.get_all || is_total || memBean.getName().equals(request.getMemory_pool())) {
                TJvmMemoryPool usage = new TJvmMemoryPool();
                MemoryUsage beanUsage = memBean.getUsage();
                usage.setCommitted(beanUsage.getCommitted());
                usage.setInit(beanUsage.getInit());
                usage.setMax(beanUsage.getMax());
                usage.setUsed(beanUsage.getUsed());
                usage.setName(memBean.getName());

                totalUsage.committed += beanUsage.getCommitted();
                totalUsage.init += beanUsage.getInit();
                totalUsage.max += beanUsage.getMax();
                totalUsage.used += beanUsage.getUsed();

                MemoryUsage peakUsage = memBean.getPeakUsage();
                usage.setPeak_committed(peakUsage.getCommitted());
                usage.setPeak_init(peakUsage.getInit());
                usage.setPeak_max(peakUsage.getMax());
                usage.setPeak_used(peakUsage.getUsed());

                totalUsage.peak_committed += peakUsage.getCommitted();
                totalUsage.peak_init += peakUsage.getInit();
                totalUsage.peak_max += peakUsage.getMax();
                totalUsage.peak_used += peakUsage.getUsed();

                if (!is_total) {
                    jvmMetrics.getMemory_pools().add(usage);
                    if (!request.get_all)
                        break;
                }
            }
        }
        TSerializer serializer = new TSerializer(protocolFactory_);
        try {
            return serializer.serialize(jvmMetrics);
        } catch (TException e) {
            throw new InternalException(e.getMessage());
        }
    }

From source file:com.cloudera.impala.extdatasource.ExternalDataSourceExecutor.java

License:Apache License

public byte[] prepare(byte[] thriftParams) throws ImpalaException {
    TPrepareParams params = new TPrepareParams();
    JniUtil.deserializeThrift(protocolFactory_, params, thriftParams);
    TPrepareResult result = prepare(params);
    try {/*from w ww.  j  a  v a 2  s  .c  o  m*/
        return new TSerializer(protocolFactory_).serialize(result);
    } catch (TException e) {
        throw new InternalException(e.getMessage(), e);
    }
}