Example usage for org.apache.thrift.protocol TBinaryProtocol TBinaryProtocol

List of usage examples for org.apache.thrift.protocol TBinaryProtocol TBinaryProtocol

Introduction

In this page you can find the example usage for org.apache.thrift.protocol TBinaryProtocol TBinaryProtocol.

Prototype

public TBinaryProtocol(TTransport trans, long stringLengthLimit, long containerLengthLimit) 

Source Link

Usage

From source file:com.adintellig.hbase.thrift.DemoClient.java

License:Apache License

private void run() throws IOError, TException, IllegalArgument, AlreadyExists {

    TTransport transport = new TSocket(host, port);
    TProtocol protocol = new TBinaryProtocol(transport, true, true);
    Hbase.Client client = new Hbase.Client(protocol);

    transport.open();//from  w w  w.j a  v  a  2  s.  com

    byte[] t = bytes("demo_table");

    //
    // Scan all tables, look for the demo table and delete it.
    //
    System.out.println("scanning tables...");
    for (ByteBuffer name : client.getTableNames()) {
        System.out.println("  found: " + utf8(name.array()));
        if (utf8(name.array()).equals(utf8(t))) {
            if (client.isTableEnabled(name)) {
                System.out.println("    disabling table: " + utf8(name.array()));
                client.disableTable(name);
            }
            System.out.println("    deleting table: " + utf8(name.array()));
            client.deleteTable(name);
        }
    }

    //
    // Create the demo table with two column families, entry: and unused:
    //
    ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>();
    ColumnDescriptor col = null;
    col = new ColumnDescriptor();
    col.name = ByteBuffer.wrap(bytes("entry:"));
    col.maxVersions = 10;
    columns.add(col);
    col = new ColumnDescriptor();
    col.name = ByteBuffer.wrap(bytes("unused:"));
    columns.add(col);

    System.out.println("creating table: " + utf8(t));
    try {
        client.createTable(ByteBuffer.wrap(t), columns);
    } catch (AlreadyExists ae) {
        System.out.println("WARN: " + ae.message);
    }

    System.out.println("column families in " + utf8(t) + ": ");
    Map<ByteBuffer, ColumnDescriptor> columnMap = client.getColumnDescriptors(ByteBuffer.wrap(t));
    for (ColumnDescriptor col2 : columnMap.values()) {
        System.out.println(
                "  column: " + utf8(col2.name.array()) + ", maxVer: " + Integer.toString(col2.maxVersions));
    }

    //
    // Test UTF-8 handling
    //
    byte[] invalid = { (byte) 'f', (byte) 'o', (byte) 'o', (byte) '-', (byte) 0xfc, (byte) 0xa1, (byte) 0xa1,
            (byte) 0xa1, (byte) 0xa1 };
    byte[] valid = { (byte) 'f', (byte) 'o', (byte) 'o', (byte) '-', (byte) 0xE7, (byte) 0x94, (byte) 0x9F,
            (byte) 0xE3, (byte) 0x83, (byte) 0x93, (byte) 0xE3, (byte) 0x83, (byte) 0xBC, (byte) 0xE3,
            (byte) 0x83, (byte) 0xAB };

    ArrayList<Mutation> mutations;
    // non-utf8 is fine for data
    mutations = new ArrayList<Mutation>();
    mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(invalid), false));
    client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("foo")), mutations, null);

    // try empty strings
    mutations = new ArrayList<Mutation>();
    mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:")), ByteBuffer.wrap(bytes("")), false));
    client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("")), mutations, null);

    // this row name is valid utf8
    mutations = new ArrayList<Mutation>();
    mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(valid), false));
    client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(valid), mutations, null);

    // non-utf8 is now allowed in row names because HBase stores values as binary
    ByteBuffer bf = ByteBuffer.wrap(invalid);

    mutations = new ArrayList<Mutation>();
    mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(invalid), false));
    client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(invalid), mutations, null);

    // Run a scanner on the rows we just created
    ArrayList<ByteBuffer> columnNames = new ArrayList<ByteBuffer>();
    columnNames.add(ByteBuffer.wrap(bytes("entry:")));

    System.out.println("Starting scanner...");
    int scanner = client.scannerOpen(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("")), columnNames, null);

    while (true) {
        List<TRowResult> entry = client.scannerGet(scanner);
        if (entry.isEmpty()) {
            break;
        }
        printRow(entry);
    }

    //
    // Run some operations on a bunch of rows
    //
    for (int i = 100; i >= 0; --i) {
        // format row keys as "00000" to "00100"
        NumberFormat nf = NumberFormat.getInstance();
        nf.setMinimumIntegerDigits(5);
        nf.setGroupingUsed(false);
        byte[] row = bytes(nf.format(i));

        mutations = new ArrayList<Mutation>();
        mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("unused:")),
                ByteBuffer.wrap(bytes("DELETE_ME")), false));
        client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, null);
        printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), null));
        client.deleteAllRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), null);

        mutations = new ArrayList<Mutation>();
        mutations.add(
                new Mutation(false, ByteBuffer.wrap(bytes("entry:num")), ByteBuffer.wrap(bytes("0")), false));
        mutations.add(
                new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(bytes("FOO")), false));
        client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, null);
        printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), null));

        Mutation m = null;
        mutations = new ArrayList<Mutation>();
        m = new Mutation();
        m.column = ByteBuffer.wrap(bytes("entry:foo"));
        m.isDelete = true;
        mutations.add(m);
        m = new Mutation();
        m.column = ByteBuffer.wrap(bytes("entry:num"));
        m.value = ByteBuffer.wrap(bytes("-1"));
        mutations.add(m);
        client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, null);
        printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), null));

        mutations = new ArrayList<Mutation>();
        mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:num")),
                ByteBuffer.wrap(bytes(Integer.toString(i))), false));
        mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:sqr")),
                ByteBuffer.wrap(bytes(Integer.toString(i * i))), false));
        client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, null);
        printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), null));

        // sleep to force later timestamp
        try {
            Thread.sleep(50);
        } catch (InterruptedException e) {
            // no-op
        }

        mutations.clear();
        m = new Mutation();
        m.column = ByteBuffer.wrap(bytes("entry:num"));
        m.value = ByteBuffer.wrap(bytes("-999"));
        mutations.add(m);
        m = new Mutation();
        m.column = ByteBuffer.wrap(bytes("entry:sqr"));
        m.isDelete = true;
        client.mutateRowTs(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, 1, null); // shouldn't override latest
        printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), null));

        List<TCell> versions = client.getVer(ByteBuffer.wrap(t), ByteBuffer.wrap(row),
                ByteBuffer.wrap(bytes("entry:num")), 10, null);
        printVersions(ByteBuffer.wrap(row), versions);
        if (versions.isEmpty()) {
            System.out.println("FATAL: wrong # of versions");
            System.exit(-1);
        }

        List<TCell> result = client.get(ByteBuffer.wrap(t), ByteBuffer.wrap(row),
                ByteBuffer.wrap(bytes("entry:foo")), null);
        if (result.isEmpty() == false) {
            System.out.println("FATAL: shouldn't get here");
            System.exit(-1);
        }

        System.out.println("");
    }

    // scan all rows/columnNames

    columnNames.clear();
    for (ColumnDescriptor col2 : client.getColumnDescriptors(ByteBuffer.wrap(t)).values()) {
        System.out.println("column with name: " + new String(col2.name.array()));
        System.out.println(col2.toString());

        columnNames.add(col2.name);
    }

    System.out.println("Starting scanner...");
    scanner = client.scannerOpenWithStop(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("00020")),
            ByteBuffer.wrap(bytes("00040")), columnNames, null);

    while (true) {
        List<TRowResult> entry = client.scannerGet(scanner);
        if (entry.isEmpty()) {
            System.out.println("Scanner finished");
            break;
        }
        printRow(entry);
    }

    transport.close();
}

From source file:com.bigdata.dastor.client.RingCache.java

License:Apache License

public void refreshEndPointMap() {
    for (String seed : seeds_) {
        try {//from w  ww .j  av a 2 s  . c  om
            TSocket socket = new TSocket(seed, port_);
            TBinaryProtocol binaryProtocol = new TBinaryProtocol(socket, false, false);
            Dastor.Client client = new Dastor.Client(binaryProtocol);
            socket.open();

            Map<String, String> tokenToHostMap = (Map<String, String>) JSONValue
                    .parse(client.get_string_property(DastorThriftServer.TOKEN_MAP));

            BiMap<Token, InetAddress> tokenEndpointMap = HashBiMap.create();
            for (Map.Entry<String, String> entry : tokenToHostMap.entrySet()) {
                Token token = StorageService.getPartitioner().getTokenFactory().fromString(entry.getKey());
                String host = entry.getValue();
                try {
                    tokenEndpointMap.put(token, InetAddress.getByName(host));
                } catch (UnknownHostException e) {
                    throw new AssertionError(e); // host strings are IPs
                }
            }

            tokenMetadata = new TokenMetadata(tokenEndpointMap);

            break;
        } catch (TException e) {
            /* let the Exception go and try another seed. log this though */
            logger_.debug("Error contacting seed " + seed + " " + e.getMessage());
        }
    }
}

From source file:com.bustleandflurry.camel.component.scribe.ScribeProducer.java

License:Apache License

public ScribeProducer(ScribeEndpoint endpoint) {
    super(endpoint);
    this.endpoint = endpoint;

    if (LOG.isDebugEnabled()) {
        LOG.debug("Binding to server address: " + this.endpoint.getAddress() + " using port: "
                + String.valueOf(this.endpoint.getPort()));
    }// www . j av  a2  s .  c  o m

    transport = new TFramedTransport(
            new TSocket(this.endpoint.getAddress().getHostAddress(), this.endpoint.getPort()));
    TBinaryProtocol tBinaryProtocol = new TBinaryProtocol(transport, false, false);
    client = new Scribe.Client(tBinaryProtocol);
}

From source file:com.cloudera.flume.handlers.scribe.TestScribeSource.java

License:Apache License

/**
 * Test that events can be sent and received, and that the correct metadata is
 * extracted./*from   www. ja  v  a 2  s  . c o  m*/
 */
@Test
public void testScribeEventSourceAPI() throws IOException, TException, InterruptedException {
    ScribeEventSource src = new ScribeEventSource();
    src.open();

    // Open the client connection
    TTransport transport = new TSocket("localhost", FlumeConfiguration.get().getScribeSourcePort());
    // scribe clients used framed transports
    transport = new TFramedTransport(transport);
    // scribe clients do not use strict write
    TProtocol protocol = new TBinaryProtocol(transport, false, false);
    transport.open();
    scribe.Client client = new scribe.Client(protocol);

    // Note - there is a tiny possibility of a race here, which is why we retry
    for (int i = 0; i < 3; ++i) {
        if (client.getStatus() != fb_status.ALIVE) {
            Thread.sleep(500);
        } else {
            break;
        }
    }
    assertEquals("ScribeEventSource did not come up in time!", fb_status.ALIVE, client.getStatus());

    LogEntry l1 = new LogEntry("mycategory", "mymessage");
    List<LogEntry> logs = new ArrayList<LogEntry>();
    logs.add(l1);
    client.Log(logs);

    Event e = src.next();

    src.close();

    assertEquals("mymessage", new String(e.getBody()), "mymessage");
    assertEquals("mycategory", new String(e.getAttrs().get(ScribeEventSource.SCRIBE_CATEGORY)));
}

From source file:com.cloudera.flume.handlers.scribe.TestScribeSource.java

License:Apache License

@Test
public void testOpenClose() throws IOException, TException, InterruptedException {
    EventSource src = ScribeEventSource.builder().build("45872");
    for (int i = 0; i < 10; ++i) {
        src.open();//from   www.  ja v  a 2s  . c o  m
        src.close();
    }
    src.open();

    // Open the client connection
    TTransport transport = new TSocket("localhost", 45872);
    transport = new TFramedTransport(transport);
    // scribe clients do not use strict write
    TProtocol protocol = new TBinaryProtocol(transport, false, false);
    transport.open();
    scribe.Client client = new scribe.Client(protocol);

    // Note - there is a tiny possibility of a race here, which is why we retry
    for (int i = 0; i < 3; ++i) {
        if (client.getStatus() != fb_status.ALIVE) {
            Thread.sleep(500);
        } else {
            break;
        }
    }
    assertEquals("ScribeEventSource did not come up in time!", fb_status.ALIVE, client.getStatus());
    src.close();
}

From source file:com.facebook.infrastructure.importer.DataImporter.java

License:Apache License

public Cassandra.Client connect() throws SocketException {
    //      String host = "hadoop034.sf2p.facebook.com";
    String[] hosts = new String[] { "hadoop038.sf2p.facebook.com", "hadoop039.sf2p.facebook.com",
            "hadoop040.sf2p.facebook.com", "hadoop041.sf2p.facebook.com" };
    int port = 9160;

    //TNonBlockingSocket socket = new TNonBlockingSocket(hosts[roundRobin_], port);
    TSocket socket = new TSocket("hadoop071.sf2p.facebook.com", port);
    roundRobin_ = (roundRobin_ + 1) % 4;
    if (transport_ != null)
        transport_.close();//from  www  .  ja v a  2s  .c o  m
    transport_ = socket;

    TBinaryProtocol binaryProtocol = new TBinaryProtocol(transport_, false, false);
    Cassandra.Client peerstorageClient = new Cassandra.Client(binaryProtocol);
    try {
        transport_.open();
    } catch (Exception e) {
        e.printStackTrace();
    }
    return peerstorageClient;
}

From source file:com.facebook.infrastructure.importer.StressTest.java

License:Apache License

public Cassandra.Client connect() throws SocketException {
    int port = 9160;
    TSocket socket = new TSocket(server_, port);
    if (transport_ != null)
        transport_.close();/*from  ww  w . j  av a 2  s.c o m*/
    transport_ = socket;

    TBinaryProtocol binaryProtocol = new TBinaryProtocol(transport_, false, false);
    Cassandra.Client peerstorageClient = new Cassandra.Client(binaryProtocol);
    try {
        transport_.open();
    } catch (Exception e) {
        e.printStackTrace();
    }
    return peerstorageClient;
}

From source file:com.facebook.presto.cassandra.CassandraThriftConnectionFactory.java

License:Apache License

public Cassandra.Client createConnection(String host, Integer port, String factoryClassName)
        throws IOException {
    try {//from w w w. j  a va2 s . c om
        TTransport transport = getClientTransportFactory(factoryClassName).openTransport(host, port);
        return new Cassandra.Client(new TBinaryProtocol(transport, true, true));
    } catch (Exception e) {
        throw new IOException("Unable to connect to server " + host + ":" + port, e);
    }
}

From source file:com.github.odiszapc.casskit.repair.CassandraClientFactory.java

License:Apache License

@Override
public ICassandraClient newClient(String host, int port, String ks, int frameSize) throws Exception {

    TTransport transport = new TFramedTransport(new TSocket(host, port), frameSize);
    TBinaryProtocol protocol = new TBinaryProtocol(transport, true, true);
    Cassandra.Client client = new Cassandra.Client(protocol);
    transport.open();/*ww w .  j a  v a2s .c om*/
    client.set_keyspace(ks);

    return new CassandraClient(client, ks);
}

From source file:com.impetus.client.cassandra.schemamanager.CassandraSchemaManager.java

License:Apache License

/**
 * initiate client method initiates the client.
 * // w  w w  .j  a  v  a 2 s.  c  om
 * @return boolean value ie client started or not.
 * 
 */
protected boolean initiateClient() {
    Throwable message = null;

    for (String host : hosts) {
        if (host == null || !StringUtils.isNumeric(port) || port.isEmpty()) {
            log.error("Host or port should not be null, Port should be numeric.");
            throw new IllegalArgumentException("Host or port should not be null, Port should be numeric.");
        }
        int thriftPort = externalProperties.get(CassandraConstants.THRIFT_PORT) != null
                ? Integer.parseInt((String) externalProperties.get(CassandraConstants.THRIFT_PORT))
                : Integer.parseInt(port);
        TSocket socket = new TSocket(host, thriftPort);
        TTransport transport = new TFramedTransport(socket);
        TProtocol protocol = new TBinaryProtocol(transport, true, true);
        cassandra_client = new Cassandra.Client(protocol);
        try {
            if (!socket.isOpen()) {
                socket.open();
                if (userName != null) {
                    Map<String, String> credentials = new HashMap<String, String>();
                    credentials.put("username", userName);
                    credentials.put("password", password);
                    AuthenticationRequest auth_request = new AuthenticationRequest(credentials);
                    cassandra_client.login(auth_request);
                }
            }
            return true;
        } catch (TTransportException e) {
            message = e;
            log.warn("Error while opening socket for host {}, skipping for next available node ", host);
        } catch (Exception e) {
            log.error("Error during creating schema in cassandra, Caused by: .", e);
            throw new SchemaGenerationException(e, "Cassandra");
        }
    }
    throw new SchemaGenerationException("Error while opening socket, Caused by: .", message, "Cassandra");
}