Example usage for java.nio ByteBuffer wrap

List of usage examples for java.nio ByteBuffer wrap

Introduction

In this page you can find the example usage for java.nio ByteBuffer wrap.

Prototype

public static ByteBuffer wrap(byte[] array) 

Source Link

Document

Creates a new byte buffer by wrapping the given byte array.

Usage

From source file:KinesisStreamDataProducer.java

public static void main(String[] args) throws Exception {
    init();/*  www  .j a  v a 2  s.c om*/

    final String myStreamName = "sparkStream";

    logger.info("Putting records in stream : " + myStreamName);
    // Write records to the stream
    for (int j = 0; j < 100; j++) {
        PutRecordRequest putRecordRequest = new PutRecordRequest();
        putRecordRequest.setStreamName(myStreamName);
        putRecordRequest.setData(
                ByteBuffer.wrap(String.format("testData-%d testData-%d testData-%d", j, j, j).getBytes()));
        putRecordRequest.setPartitionKey(String.format("partitionKey-%d", j));
        PutRecordResult putRecordResult = kinesisClient.putRecord(putRecordRequest);
        System.out.println("Successfully putrecord, partition key : " + putRecordRequest.getPartitionKey()
                + ", ShardID : " + putRecordResult.getShardId());
    }
}

From source file:com.pinterest.terrapin.client.ClientTool.java

public static void main(String[] args) throws Exception {
    PropertiesConfiguration config = new PropertiesConfiguration(System.getProperty("terrapin.config"));
    TerrapinClient client = new TerrapinClient(config, 9090, 1000, 5000);
    String key = args[1];/* w w w.jav a  2 s.c o m*/
    TerrapinSingleResponse response = client.getOne(args[0], // fileset
            ByteBuffer.wrap(key.getBytes())).get();
    if (response.isSetErrorCode()) {
        System.out.println("Got error " + response.getErrorCode().toString());
    } else if (response.isSetValue()) {
        System.out.println("Got value.");
        System.out.println(new String(response.getValue()));
    } else {
        System.out.println("Key " + key + " not found.");
    }
    System.exit(0);
}

From source file:com.twitter.distributedlog.basic.ConsoleProxyMultiWriter.java

public static void main(String[] args) throws Exception {
    if (2 != args.length) {
        System.out.println(HELP);
        return;//ww w .j  a va 2 s.c  o m
    }

    String finagleNameStr = args[0];
    final String streamList = args[1];

    DistributedLogClient client = DistributedLogClientBuilder.newBuilder()
            .clientId(ClientId.apply("console-proxy-writer")).name("console-proxy-writer").thriftmux(true)
            .finagleNameStr(finagleNameStr).build();
    String[] streamNameList = StringUtils.split(streamList, ',');
    DistributedLogMultiStreamWriter multiStreamWriter = DistributedLogMultiStreamWriter.newBuilder()
            .streams(Lists.newArrayList(streamNameList)).bufferSize(0).client(client).flushIntervalMs(0)
            .firstSpeculativeTimeoutMs(10000).maxSpeculativeTimeoutMs(20000).requestTimeoutMs(50000).build();

    // Setup Terminal
    Terminal terminal = Terminal.setupTerminal();
    ConsoleReader reader = new ConsoleReader();
    String line;
    while ((line = reader.readLine(PROMPT_MESSAGE)) != null) {
        multiStreamWriter.write(ByteBuffer.wrap(line.getBytes(UTF_8)))
                .addEventListener(new FutureEventListener<DLSN>() {
                    @Override
                    public void onFailure(Throwable cause) {
                        System.out.println("Encountered error on writing data");
                        cause.printStackTrace(System.err);
                        Runtime.getRuntime().exit(0);
                    }

                    @Override
                    public void onSuccess(DLSN value) {
                        // done
                    }
                });
    }

    multiStreamWriter.close();
    client.close();
}

From source file:com.knewton.mapreduce.cassandra.WriteSampleSSTable.java

/**
 * Writes a sample SSTable that can be used for running the example job {@link SSTableMRExample}
 *
 * @param args/* w ww.  j  a  v  a2s  .c  o  m*/
 *            Args to be parsed
 * @throws Exception
 */
public static void main(String[] args) throws Exception {
    buildParametersFromArgs(args);

    IPartitioner partitioner = StorageService.getPartitioner();
    String schema = String.format(
            "CREATE TABLE %s.%s (studentid 'LongType', " + "eventid 'LongType'," + "data 'BytesType', "
                    + "PRIMARY KEY (studentid, eventid))" + " WITH COMPACT STORAGE",
            KEYSPACE_NAME, COLUMN_FAMILY_NAME);

    String insertStatement = String.format("INSERT INTO %s.%s (studentid, eventid, data) " + "VALUES (?, ?, ?)",
            KEYSPACE_NAME, COLUMN_FAMILY_NAME);

    CQLSSTableWriter tableWriter = CQLSSTableWriter.builder().inDirectory(tableDirectory)
            .withPartitioner(partitioner).forTable(schema).using(insertStatement).build();

    for (int i = 0; i < numberOfStudents; i++) {
        for (int j = 0; j < eventsPerStudent; j++) {
            StudentEvent studentEvent = RandomStudentEventGenerator.getRandomStudentEvent();

            ByteBuffer columnValue = ByteBuffer
                    .wrap(RandomStudentEventGenerator.serializeStudentEventData(studentEvent.getData()));

            tableWriter.addRow(RandomStudentEventGenerator.getRandomId(), studentEvent.getId(), columnValue);
        }
    }

    tableWriter.close();
}

From source file:com.boundary.aws.kinesis.Sample.java

public static void main(String[] args) throws Exception {
    init();/*from w  w  w.  j  av  a 2s.c om*/

    final String myStreamName = "boundary-test-stream";
    final Integer myStreamSize = 1;

    // Create a stream. The number of shards determines the provisioned
    // throughput.

    CreateStreamRequest createStreamRequest = new CreateStreamRequest();
    createStreamRequest.setStreamName(myStreamName);
    createStreamRequest.setShardCount(myStreamSize);

    kinesisClient.createStream(createStreamRequest);
    // The stream is now being created.
    LOG.info("Creating Stream : " + myStreamName);
    waitForStreamToBecomeAvailable(myStreamName);

    // list all of my streams
    ListStreamsRequest listStreamsRequest = new ListStreamsRequest();
    listStreamsRequest.setLimit(10);
    ListStreamsResult listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
    List<String> streamNames = listStreamsResult.getStreamNames();
    while (listStreamsResult.isHasMoreStreams()) {
        if (streamNames.size() > 0) {
            listStreamsRequest.setExclusiveStartStreamName(streamNames.get(streamNames.size() - 1));
        }

        listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
        streamNames.addAll(listStreamsResult.getStreamNames());

    }
    LOG.info("Printing my list of streams : ");

    // print all of my streams.
    if (!streamNames.isEmpty()) {
        System.out.println("List of my streams: ");
    }
    for (int i = 0; i < streamNames.size(); i++) {
        System.out.println(streamNames.get(i));
    }

    LOG.info("Putting records in stream : " + myStreamName);
    // Write 10 records to the stream
    for (int j = 0; j < 100; j++) {
        PutRecordRequest putRecordRequest = new PutRecordRequest();
        putRecordRequest.setStreamName(myStreamName);
        putRecordRequest.setData(ByteBuffer.wrap(String.format("testData-%d", j).getBytes()));
        putRecordRequest.setPartitionKey(String.format("partitionKey-%d", j));
        PutRecordResult putRecordResult = kinesisClient.putRecord(putRecordRequest);
        System.out.println("Successfully putrecord, partition key : " + putRecordRequest.getPartitionKey()
                + ", ShardID : " + putRecordResult.getShardId());
    }

    // Delete the stream.
    LOG.info("Deleting stream : " + myStreamName);
    DeleteStreamRequest deleteStreamRequest = new DeleteStreamRequest();
    deleteStreamRequest.setStreamName(myStreamName);

    kinesisClient.deleteStream(deleteStreamRequest);
    // The stream is now being deleted.
    LOG.info("Stream is now being deleted : " + myStreamName);
}

From source file:AmazonKinesisSample.java

public static void main(String[] args) throws Exception {
    init();/*from   ww w. j a va 2  s .  c o  m*/

    final String myStreamName = "myFirstStream";
    final Integer myStreamSize = 1;

    // Create a stream. The number of shards determines the provisioned throughput.

    CreateStreamRequest createStreamRequest = new CreateStreamRequest();
    createStreamRequest.setStreamName(myStreamName);
    createStreamRequest.setShardCount(myStreamSize);

    kinesisClient.createStream(createStreamRequest);
    // The stream is now being created.
    LOG.info("Creating Stream : " + myStreamName);
    waitForStreamToBecomeAvailable(myStreamName);

    // list all of my streams
    ListStreamsRequest listStreamsRequest = new ListStreamsRequest();
    listStreamsRequest.setLimit(10);
    ListStreamsResult listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
    List<String> streamNames = listStreamsResult.getStreamNames();
    while (listStreamsResult.isHasMoreStreams()) {
        if (streamNames.size() > 0) {
            listStreamsRequest.setExclusiveStartStreamName(streamNames.get(streamNames.size() - 1));
        }

        listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
        streamNames.addAll(listStreamsResult.getStreamNames());

    }
    LOG.info("Printing my list of streams : ");

    // print all of my streams.
    if (!streamNames.isEmpty()) {
        System.out.println("List of my streams: ");
    }
    for (int i = 0; i < streamNames.size(); i++) {
        System.out.println(streamNames.get(i));
    }

    LOG.info("Putting records in stream : " + myStreamName);
    // Write 10 records to the stream
    for (int j = 0; j < 10; j++) {
        PutRecordRequest putRecordRequest = new PutRecordRequest();
        putRecordRequest.setStreamName(myStreamName);
        putRecordRequest.setData(ByteBuffer.wrap(String.format("testData-%d", j).getBytes()));
        putRecordRequest.setPartitionKey(String.format("partitionKey-%d", j));
        PutRecordResult putRecordResult = kinesisClient.putRecord(putRecordRequest);
        System.out.println("Successfully putrecord, partition key : " + putRecordRequest.getPartitionKey()
                + ", ShardID : " + putRecordResult.getShardId());
    }

    // Delete the stream.
    LOG.info("Deleting stream : " + myStreamName);
    DeleteStreamRequest deleteStreamRequest = new DeleteStreamRequest();
    deleteStreamRequest.setStreamName(myStreamName);

    kinesisClient.deleteStream(deleteStreamRequest);
    // The stream is now being deleted.
    LOG.info("Stream is now being deleted : " + myStreamName);
}

From source file:AmazonKinesisCreate.java

public static void main(String[] args) throws Exception {
    init();//from ww w . j  a  v  a  2  s .  c  o m

    final String myStreamName = "philsteststream";
    final Integer myStreamSize = 1;

    // Create a stream. The number of shards determines the provisioned throughput.

    CreateStreamRequest createStreamRequest = new CreateStreamRequest();
    createStreamRequest.setStreamName(myStreamName);
    createStreamRequest.setShardCount(myStreamSize);

    // pt
    kinesisClient.createStream(createStreamRequest);

    // The stream is now being created.
    LOG.info("Creating Stream : " + myStreamName);
    waitForStreamToBecomeAvailable(myStreamName);

    // list all of my streams
    ListStreamsRequest listStreamsRequest = new ListStreamsRequest();
    listStreamsRequest.setLimit(10);
    ListStreamsResult listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
    List<String> streamNames = listStreamsResult.getStreamNames();
    while (listStreamsResult.isHasMoreStreams()) {
        if (streamNames.size() > 0) {
            listStreamsRequest.setExclusiveStartStreamName(streamNames.get(streamNames.size() - 1));
        }

        listStreamsResult = kinesisClient.listStreams(listStreamsRequest);
        streamNames.addAll(listStreamsResult.getStreamNames());

    }
    LOG.info("Printing my list of streams : ");

    // print all of my streams.
    if (!streamNames.isEmpty()) {
        System.out.println("List of my streams: ");
    }
    for (int i = 0; i < streamNames.size(); i++) {
        System.out.println(streamNames.get(i));
    }

    LOG.info("Putting records in stream : " + myStreamName);
    // Write 10 records to the stream
    for (int j = 0; j < 10; j++) {

        try {
            PutRecordRequest putRecordRequest = new PutRecordRequest();
            putRecordRequest.setStreamName(myStreamName);
            putRecordRequest.setData(ByteBuffer.wrap(String.format("testData-%d", j).getBytes()));
            putRecordRequest.setPartitionKey(String.format("partitionKey-%d", j));
            PutRecordResult putRecordResult = kinesisClient.putRecord(putRecordRequest);
            System.out.println("Successfully putrecord, partition key : " + putRecordRequest.getPartitionKey()
                    + ", ShardID : " + putRecordResult.getShardId());
            Thread.sleep(1000);

        } catch (Exception e) {
            e.printStackTrace();
        }

    }

    // Delete the stream.

    /*
    LOG.info("Deleting stream : " + myStreamName);
    DeleteStreamRequest deleteStreamRequest = new DeleteStreamRequest();
    deleteStreamRequest.setStreamName(myStreamName);
            
    kinesisClient.deleteStream(deleteStreamRequest);
    // The stream is now being deleted.
    LOG.info("Stream is now being deleted : " + myStreamName);
            
    LOG.info("Streaming completed" + myStreamName);
    */

}

From source file:com.openteach.diamond.network.waverider.command.Command.java

public static void main(String[] args) {

    ByteArrayOutputStream bout = null;
    ObjectOutputStream objOutputStream = null;

    try {// w  w w . j a  v  a  2s  . c  o  m
        bout = new ByteArrayOutputStream();
        objOutputStream = new ObjectOutputStream(bout);
        SlaveState slaveState = new SlaveState();
        slaveState.setId(1L);
        slaveState.setIsMasterCandidate(false);
        objOutputStream.writeObject(slaveState);
        objOutputStream.flush();
        Command command = CommandFactory.createHeartbeatCommand(ByteBuffer.wrap(bout.toByteArray()));

        ByteBuffer buffer = command.marshall();
        Command cmd = Command.unmarshall(buffer);
        SlaveState ss = SlaveState.fromByteBuffer(cmd.getPayLoad());
        System.out.println(cmd.toString());
    } catch (IOException e) {
        throw new RuntimeException(e);
    } finally {
        try {
            if (objOutputStream != null) {
                objOutputStream.close();
            }
            if (bout != null) {
                bout.close();
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}

From source file:com.splout.db.dnode.TCPStreamer.java

/**
 * This main method can be used for testing the TCP interface directly to a
 * local DNode. Will ask for protocol input from Stdin and print output to
 * Stdout/*from  w w w  . j av a2 s . c o  m*/
 */
public static void main(String[] args) throws UnknownHostException, IOException, SerializationException {
    SploutConfiguration config = SploutConfiguration.get();
    Socket clientSocket = new Socket("localhost", config.getInt(DNodeProperties.STREAMING_PORT));

    DataInputStream inFromServer = new DataInputStream(new BufferedInputStream(clientSocket.getInputStream()));
    DataOutputStream outToServer = new DataOutputStream(
            new BufferedOutputStream(clientSocket.getOutputStream()));

    BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
    System.out.println("Enter tablespace: ");
    String tablespace = reader.readLine();

    System.out.println("Enter version number: ");
    long versionNumber = Long.parseLong(reader.readLine());

    System.out.println("Enter partition: ");
    int partition = Integer.parseInt(reader.readLine());

    System.out.println("Enter query: ");
    String query = reader.readLine();

    outToServer.writeUTF(tablespace);
    outToServer.writeLong(versionNumber);
    outToServer.writeInt(partition);
    outToServer.writeUTF(query);

    outToServer.flush();

    byte[] buffer = new byte[0];
    boolean read;
    do {
        read = false;
        int nBytes = inFromServer.readInt();
        if (nBytes > 0) {
            buffer = new byte[nBytes];
            int inRead = inFromServer.read(buffer);
            if (inRead > 0) {
                Object[] res = ResultSerializer.deserialize(ByteBuffer.wrap(buffer), Object[].class);
                read = true;
                System.out.println(Arrays.toString(res));
            }
        }
    } while (read);

    clientSocket.close();
}

From source file:Main.java

public static int bytesToInt(byte[] in) {
    return ByteBuffer.wrap(in).getInt(); //note: big-endian by default
}