Example usage for org.apache.hadoop.io BytesWritable setSize

List of usage examples for org.apache.hadoop.io BytesWritable setSize

Introduction

In this page you can find the example usage for org.apache.hadoop.io BytesWritable setSize.

Prototype

public void setSize(int size) 

Source Link

Document

Change the size of the buffer.

Usage

From source file:com.facebook.hive.orc.lazy.LazyBinaryTreeReader.java

License:Open Source License

@Override
public Object next(Object previous) throws IOException {
    BytesWritable result = null;
    if (valuePresent) {
        if (previous == null) {
            result = new BytesWritable();
        } else {/*w  w w  .  ja v  a  2  s . c  o  m*/
            result = (BytesWritable) previous;
        }
        int len = (int) lengths.next();
        result.setSize(len);
        int offset = 0;
        while (len > 0) {
            int written = stream.read(result.getBytes(), offset, len);
            if (written < 0) {
                throw new EOFException("Can't finish byte read from " + stream);
            }
            len -= written;
            offset += written;
        }
    }
    return result;
}

From source file:eu.scape_project.tb.lsdr.seqfileutility.SequenceFileWriter.java

License:Apache License

private void writeFileContent(File file) throws IOException {
    long fileLength = file.length();
    if (fileLength <= Integer.MAX_VALUE) {
        Text key = new Text();
        String filePath = file.getAbsolutePath();
        String keyPath = FilenameUtils.separatorsToUnix(filePath);
        key.set(keyPath);//from  w w w.j  av a2  s .c o m

        FileInputStream fis = new FileInputStream(file);
        byte[] byteArray = new byte[(int) fileLength];
        byte[] buf = new byte[BUFFER_SIZE];
        int bytesRead = fis.read(buf);

        int offset = 0;
        int chunk_count = 0;
        while (bytesRead != -1) {
            System.arraycopy(buf, 0, byteArray, offset, bytesRead);
            offset += bytesRead;
            bytesRead = fis.read(buf);
            chunk_count++;
        }

        BytesWritable value = new BytesWritable(byteArray);
        int len = (int) fileLength;
        value.setSize(len);
        filecount++;
        logger.info(this.getId() + ": " + filecount + ":" + key);
        writer.append(key, value);
        fis.close();
    } else {
        logger.warn("File " + file.getAbsolutePath() + " is too large to be "
                + "added to a sequence file (skipped).");
    }
}

From source file:org.apache.orc.TestColumnStatistics.java

License:Apache License

private static BytesWritable bytes(int... items) {
    BytesWritable result = new BytesWritable();
    result.setSize(items.length);
    for (int i = 0; i < items.length; ++i) {
        result.getBytes()[i] = (byte) items[i];
    }//from  ww  w . j ava  2 s . c o  m
    return result;
}

From source file:org.commoncrawl.service.stats.StatsServiceServer.java

License:Open Source License

@Override
public void writeStatsRecord(final AsyncContext<WriteStatsRecordRequest, WriteStatsRecordResponse> rpcContext)
        throws RPCException {
    LOG.info("Received WriteRequest from:" + rpcContext.getClientChannel().toString() + " Group:"
            + rpcContext.getInput().getRecordGroup());

    final TimeSeriesDataFile<BytesWritable> file = getFileGivenGroupName(
            rpcContext.getInput().getRecordGroup());

    getDefaultThreadPool().execute(new ConcurrentTask<Long>(getEventLoop(),

            new Callable<Long>() {

                @Override//from w w w  .  j  a v  a 2 s . c  o m
                public Long call() throws Exception {

                    BytesWritable data = new BytesWritable(
                            rpcContext.getInput().getRecord().getData().getReadOnlyBytes());
                    data.setSize(rpcContext.getInput().getRecord().getData().getCount());
                    long recordPositionOut = file
                            .appendRecordToLogFile(rpcContext.getInput().getRecord().getTimestamp(), data);
                    LOG.info("Wrote Record with Timestamp:" + rpcContext.getInput().getRecord().getTimestamp()
                            + " to Group:" + rpcContext.getInput().getRecordGroup());

                    return recordPositionOut;
                }
            },

            new CompletionCallback<Long>() {

                @Override
                public void taskComplete(Long loadResult) {
                    try {
                        rpcContext.getOutput().setRecordPositon(loadResult);
                        rpcContext.completeRequest();
                    } catch (RPCException e) {
                        LOG.error(CCStringUtils.stringifyException(e));
                    }
                }

                @Override
                public void taskFailed(Exception e) {
                    LOG.error(CCStringUtils.stringifyException(e));
                    rpcContext.setStatus(Status.Error_RequestFailed);
                    rpcContext.setErrorDesc(CCStringUtils.stringifyException(e));
                    try {
                        rpcContext.completeRequest();
                    } catch (RPCException e1) {
                        LOG.error(CCStringUtils.stringifyException(e1));
                    }
                }

            }));
}

From source file:voldemort.store.readonly.mr.utils.HadoopUtils.java

License:Apache License

/**
 * Tag the BytesWritable with an integer at the END
 *///ww  w  . j  a  v a  2s. c  o m
public static void appendTag(BytesWritable writable, int tag) {
    int size = writable.getLength();

    if (writable.getCapacity() < size + 4) {
        // BytesWritable preserves old values
        writable.setCapacity(size + 4);
    }

    ByteUtils.writeInt(writable.getBytes(), tag, size);
    writable.setSize(size + 4);
}