Example usage for java.nio ByteBuffer get

List of usage examples for java.nio ByteBuffer get

Introduction

In this page you can find the example usage for java.nio ByteBuffer get.

Prototype

public ByteBuffer get(byte[] dest, int off, int len) 

Source Link

Document

Reads bytes from the current position into the specified byte array, starting at the specified offset, and increases the position by the number of bytes read.

Usage

From source file:net.cellcloud.talk.stuff.PrimitiveSerializer.java

/** ???
 *///from  www.j ava 2s.c  om
public static void read(Primitive primitive, InputStream stream) {
    /*
    ??
    [version]{sutff}...{stuff}[dialect@tracker]
    
    [01.00]{sub=cloud:string}{pre=add:string}[Action@Ambrose]
    */

    try {
        byte phase = PARSE_PHASE_UNKNOWN;
        int read = 0;

        ByteBuffer buf = ByteBuffer.allocate(BLOCK);
        byte[] type = new byte[3];
        byte[] value = null;
        byte[] literal = null;
        int length = 0;

        while ((read = stream.read()) >= 0) {

            // ?
            switch (phase) {

            case PARSE_PHASE_VALUE:
                // 
                if (read == '\\') {
                    // ?
                    int next = stream.read();
                    if (next == TOKEN_OPEN_BRACE || next == TOKEN_CLOSE_BRACE || next == TOKEN_OPERATE_ASSIGN
                            || next == TOKEN_OPERATE_DECLARE) {
                        buf.put((byte) next);
                        ++length;
                    } else {
                        buf.put((byte) read);
                        buf.put((byte) next);
                        length += 2;
                    }

                    // 
                    continue;
                }

                if (read == TOKEN_OPERATE_DECLARE) {
                    // ?
                    buf.flip();
                    value = new byte[length];
                    buf.get(value, 0, length);
                    buf.clear();

                    phase = PARSE_PHASE_LITERAL;
                    length = 0;
                    continue;
                }

                buf.put((byte) read);
                ++length;
                break;

            case PARSE_PHASE_TYPE:
                if (read == TOKEN_OPERATE_ASSIGN) {
                    // ?
                    buf.flip();
                    buf.get(type);
                    buf.clear();

                    phase = PARSE_PHASE_VALUE;
                    length = 0;
                    continue;
                }
                // 
                buf.put((byte) read);
                break;

            case PARSE_PHASE_LITERAL:
                if (read == TOKEN_CLOSE_BRACE) {
                    // ??
                    buf.flip();
                    literal = new byte[length];
                    buf.get(literal, 0, length);
                    buf.clear();

                    // 
                    injectStuff(primitive, type, value, literal);

                    phase = PARSE_PHASE_DIALECT;
                    length = 0;
                    continue;
                }
                buf.put((byte) read);
                ++length;
                break;

            case PARSE_PHASE_STUFF:
                if (read == TOKEN_OPEN_BRACE) {
                    // ?
                    phase = PARSE_PHASE_TYPE;
                    buf.clear();
                }
                break;

            case PARSE_PHASE_VERSION:
                if (read == TOKEN_CLOSE_BRACKET) {
                    // ??
                    phase = PARSE_PHASE_STUFF;
                    continue;
                }
                buf.put((byte) read);
                break;

            case PARSE_PHASE_DIALECT:
                if (read == TOKEN_OPEN_BRACE) {
                    phase = PARSE_PHASE_TYPE;
                    buf.clear();
                } else if (read == TOKEN_OPEN_BRACKET) {
                    // ?
                    buf.clear();
                } else if (read == TOKEN_CLOSE_BRACKET) {
                    // ??
                    deserializeDialect(primitive, new String(buf.array(), 0, length, Charset.forName("UTF-8")));
                } else {
                    // ?
                    buf.put((byte) read);
                    ++length;
                }
                break;

            default:
                if (read == TOKEN_OPEN_BRACE) {
                    phase = PARSE_PHASE_TYPE;
                    buf.clear();
                } else if (read == TOKEN_OPEN_BRACKET) {
                    phase = PARSE_PHASE_VERSION;
                    buf.clear();
                }
                break;
            }
        }

        buf.clear();

    } catch (IOException e) {
        Logger.log(PrimitiveSerializer.class, e, LogLevel.ERROR);
    }
}

From source file:com.servoy.j2db.util.Utils.java

public static byte[] readFile(File f, long size) {
    if (f != null && f.exists()) {

        FileInputStream fis = null;
        try {/*w  ww . jav  a  2  s  . c  o m*/
            int length = (int) f.length();
            fis = new FileInputStream(f);
            FileChannel fc = fis.getChannel();
            if (size > length || size < 0)
                size = length;
            ByteBuffer bb = ByteBuffer.allocate((int) size);
            fc.read(bb);
            bb.rewind();
            byte[] bytes = null;
            if (bb.hasArray()) {
                bytes = bb.array();
            } else {
                bytes = new byte[(int) size];
                bb.get(bytes, 0, (int) size);
            }
            return bytes;
        } catch (Exception e) {
            Debug.error("Error reading file: " + f, e); //$NON-NLS-1$
        } finally {
            try {
                if (fis != null)
                    fis.close();
            } catch (Exception ex) {
            }
        }

        //         ByteArrayOutputStream sb = new ByteArrayOutputStream();
        //         try
        //         {
        //            FileInputStream is = new FileInputStream(f);
        //            BufferedInputStream bis = new BufferedInputStream(is);
        //            streamCopy(bis, sb);
        //            closeInputStream(bis);
        //         }
        //         catch (Exception e)
        //         {
        //            Debug.error(e);
        //         }
        //         return sb.toByteArray();
    }
    return null;
}

From source file:org.openpilot_nonag.uavtalk.UAVTalk.java

/**
 * Send an object through the telemetry link.
 * @throws IOException//from w  w w. ja  v a2 s  . c  o  m
 * @param[in] obj Object handle to send
 * @param[in] type Transaction type \return Success (true), Failure (false)
 */
private boolean transmitSingleObject(int type, long objId, long instId, UAVObject obj) throws IOException {
    int length = 0;

    assert (objMngr != null && outStream != null);

    // IMPORTANT : obj can be null (when type is NACK for example)

    // Determine data length
    if (type == TYPE_OBJ_REQ || type == TYPE_ACK || type == TYPE_NACK) {
        length = 0;
    } else {
        length = obj.getNumBytes();
    }

    ByteBuffer bbuf = ByteBuffer.allocate(MAX_PACKET_LENGTH);
    bbuf.order(ByteOrder.LITTLE_ENDIAN);

    // Setup type and object id fields
    bbuf.put((byte) (SYNC_VAL & 0xff));
    bbuf.put((byte) (type & 0xff));
    bbuf.putShort((short) (length + HEADER_LENGTH));
    bbuf.putInt((int) objId);
    bbuf.putShort((short) (instId & 0xffff));

    // Check length
    if (length >= MAX_PAYLOAD_LENGTH) {
        ++stats.txErrors;
        return false;
    }

    // Copy data (if any)
    if (length > 0)
        try {
            if (obj.pack(bbuf) == 0) {
                ++stats.txErrors;
                return false;
            }
        } catch (Exception e) {
            ++stats.txErrors;
            // TODO Auto-generated catch block
            e.printStackTrace();
            return false;
        }

    // Calculate checksum
    bbuf.put((byte) (updateCRC(0, bbuf.array(), bbuf.position()) & 0xff));

    int packlen = bbuf.position();
    bbuf.position(0);
    byte[] dst = new byte[packlen];
    bbuf.get(dst, 0, packlen);

    outStream.write(dst);

    // Update stats
    ++stats.txObjects;
    stats.txBytes += bbuf.position();
    stats.txObjectBytes += length;

    // Done
    return true;
}

From source file:org.alfresco.contentstore.patch.PatchServiceImpl.java

@Override
public void updatePatchDocument(PatchDocument patchDocument, NodeChecksums checksums, ByteBuffer data) {
    int blockSize = checksums.getBlockSize();

    patchDocument.setBlockSize(blockSize);

    int i = 0;/*  ww w .  ja  v a 2 s.  co m*/

    Adler32 adlerInfo = new Adler32(hasher);
    int lastMatchIndex = 0;
    ByteBuffer currentPatch = ByteBuffer.allocate(600000); // TODO

    int currentPatchSize = 0;

    for (;;) {
        int chunkSize = 0;
        // determine the size of the next data chuck to evaluate. Default to
        // blockSize, but clamp to end of data
        if ((i + blockSize) > data.limit()) {
            chunkSize = data.limit() - i;
            adlerInfo.reset(); // need to reset this because the rolling
                               // checksum doesn't work correctly on a final
                               // non-aligned block
        } else {
            chunkSize = blockSize;
        }

        int matchedBlock = adlerInfo.checkMatch(lastMatchIndex, checksums, data, i, i + chunkSize - 1);
        if (matchedBlock != -1) {
            //                try
            //                {
            //                    String y = hasher.md5(data, i, i + chunkSize - 1);
            //                    System.out.println("y = " + y);
            //                }
            //                catch (NoSuchAlgorithmException e)
            //                {
            //                    // TODO Auto-generated catch block
            //                    e.printStackTrace();
            //                }
            // if we have a match, do the following:
            // 1) add the matched block index to our tracking buffer
            // 2) check to see if there's a current patch. If so, add it to
            // the patch document.
            // 3) jump forward blockSize bytes and continue
            patchDocument.addMatchedBlock(matchedBlock);

            if (currentPatchSize > 0) {
                // there are outstanding patches, add them to the list
                // create the patch and append it to the patches buffer
                currentPatch.flip();
                int size = currentPatch.limit();
                byte[] dst = new byte[size];
                currentPatch.get(dst, 0, size);
                Patch patch = new Patch(lastMatchIndex, size, dst);
                patchDocument.addPatch(patch);
                currentPatch.clear();
            }

            lastMatchIndex = matchedBlock;

            i += chunkSize;

            adlerInfo.reset();

            continue;
        } else {
            // while we don't have a block match, append bytes to the
            // current patch
            logger.debug("limit = " + currentPatch.limit() + ", position = " + currentPatch.position());
            currentPatch.put(data.get(i));
            currentPatchSize++;
        }
        if (i >= data.limit() - 1) {
            break;
        }
        i++;
    } // end for each byte in the data

    if (currentPatchSize > 0) {
        currentPatch.flip();
        int size = currentPatch.limit();
        byte[] dst = new byte[size];
        currentPatch.get(dst, 0, size);
        Patch patch = new Patch(lastMatchIndex, size, dst);
        patchDocument.addPatch(patch);
    }
}

From source file:edu.harvard.iq.dvn.ingest.dsb.impl.DvnNewJavaFieldCutter.java

public void cutColumns(InputStream in, int noCardsPerCase, int caseLength, String delimitor, String tabFileName)
        throws IOException {

    if (delimitor == null) {
        delimitor = defaultDelimitor;/*from  www .  ja v a  2 s .c  o  m*/
    }

    OUT_LEN = colwidth; // calculated by parseList
    dbgLog.fine("out_len=" + OUT_LEN);

    String firstline = null;

    if (caseLength == 0) {

        int cread;
        int ccounter = 0;

        firstline = "";

        while (caseLength == 0 && (cread = in.read()) != -1) {
            ccounter++;
            if (cread == '\n') {
                caseLength = ccounter;
            }
            char c = (char) cread;
            firstline = firstline + c;
        }

    }

    if (caseLength == 0) {
        throw new IOException("Subsetting failed: could not read incoming byte stream. "
                + "(Requested file may be unavailable or missing)");

    }

    REC_LEN = caseLength;
    dbgLog.fine("REC_LEN=" + REC_LEN);

    for (int i = 0; i < cargSet.get(Long.valueOf(noCardsPerCase)).size(); i++) {
        int varEndOffset = cargSet.get(Long.valueOf(noCardsPerCase)).get(i).get(1);

        if (REC_LEN <= varEndOffset + 1) {
            throw new IOException("Failed to subset incoming byte stream. Invalid input. "
                    + "(Detected the first record of " + REC_LEN + " bytes; "
                    + "one of the columns requested ends at " + varEndOffset + " bytes).");
        }
    }

    Boolean dottednotation = false;
    Boolean foundData = false;

    // cutting a data file

    ReadableByteChannel rbc = Channels.newChannel(in);
    // input byte-buffer size = row-length + 1(=> new line char)
    ByteBuffer inbuffer = ByteBuffer.allocate(REC_LEN);

    OutputStream outs = new FileOutputStream(tabFileName);
    WritableByteChannel outc = Channels.newChannel(outs);
    ByteBuffer outbuffer = null;

    int pos = 0;
    int offset = 0;
    int outoffset = 0;

    int begin = 0;
    int end = 0;
    int blankoffset = 0;

    int blanktail = 0;
    int k;

    try {
        // lc: line counter
        int lc = 0;
        while (firstline != null || rbc.read(inbuffer) != -1) {

            if (firstline != null) {
                // we have the first line saved as a String:
                inbuffer.put(firstline.getBytes());
                firstline = null;
            }

            // calculate i-th card number
            lc++;
            k = lc % noCardsPerCase;
            if (k == 0) {
                k = noCardsPerCase;
            }
            //out.println("***** " +lc+ "-th line, recod k=" + k + " *****");
            byte[] line_read = new byte[OUT_LEN];
            byte[] junk = new byte[REC_LEN];
            byte[] line_final = new byte[OUT_LEN];

            //out.println("READ: " + offset);
            inbuffer.rewind();

            offset = 0;
            outoffset = 0;

            // how many variables are cut from this k-th card
            int noColumns = cargSet.get(Long.valueOf(k)).size();

            //out.println("noColumns=" + noColumns);
            //out.println("cargSet k =" + cargSet.get(Long.valueOf(k)));

            for (int i = 0; i < noColumns; i++) {
                //out.println("**** " + i +"-th col ****");
                begin = cargSet.get(Long.valueOf(k)).get(i).get(0); // bounds[2 * i];
                end = cargSet.get(Long.valueOf(k)).get(i).get(1); // bounds[2 * i + 1];

                //out.println("i: begin: " + begin + "\ti: end:" + end);

                try {
                    // throw away offect bytes
                    if (begin - offset - 1 > 0) {
                        inbuffer.get(junk, 0, (begin - offset - 1));
                    }
                    // get requested bytes
                    inbuffer.get(line_read, outoffset, (end - begin + 1));
                    // set outbound data
                    outbounds[2 * i] = outoffset;
                    outbounds[2 * i + 1] = outoffset + (end - begin);
                    // current position moved to outoffset
                    pos = outoffset;

                    dottednotation = false;
                    foundData = false;

                    blankoffset = 0;
                    blanktail = 0;

                    // as position increases
                    while (pos <= (outoffset + (end - begin))) {

                        //out.println("pos=" + pos + "\tline_read[pos]=" +
                        //    new String(line_read).replace("\000", "\052"));

                        // decimal octal
                        // 48 =>0 60
                        // 46 => . 56
                        // 32 = space 40

                        // dot: 
                        if (line_read[pos] == '\056') {
                            dottednotation = true;
                        }

                        // space:
                        if (line_read[pos] == '\040') {
                            if (foundData) {
                                blanktail = blanktail > 0 ? blanktail : pos - 1;
                            } else {
                                blankoffset = pos + 1;
                            }
                        } else {
                            foundData = true;
                            blanktail = 0;
                        }

                        pos++;
                    }
                    // increase the outoffset by width
                    outoffset += (end - begin + 1);
                    // dot false
                    if (!dottednotation) {
                        if (blankoffset > 0) {
                            // set outbound value to blankoffset
                            outbounds[2 * i] = blankoffset;
                        }
                        if (blanktail > 0) {
                            outbounds[2 * i + 1] = blanktail;
                        }
                    }

                } catch (BufferUnderflowException bufe) {
                    //bufe.printStackTrace();
                    throw new IOException(bufe.getMessage());
                }
                // set offset to the value of end-position
                offset = end;
            }

            outoffset = 0;
            // for each var
            for (int i = 0; i < noColumns; i++) {
                begin = outbounds[2 * i];
                end = outbounds[2 * i + 1];
                //out.println("begin=" + begin + "\t end=" + end);
                for (int j = begin; j <= end; j++) {
                    line_final[outoffset++] = line_read[j];
                }

                if (i < (noColumns - 1)) {
                    line_final[outoffset++] = '\011'; // tab x09
                } else {
                    if (k == cargSet.size()) {
                        line_final[outoffset++] = '\012'; // LF x0A
                    } else {
                        line_final[outoffset++] = '\011'; // tab x09
                    }
                }
            }
            //out.println("line_final=" +
            //    new String(line_final).replace("\000", "\052"));
            outbuffer = ByteBuffer.wrap(line_final, 0, outoffset);
            outc.write(outbuffer);
            inbuffer.clear();

        } // while loop
    } catch (IOException ex) {
        //ex.printStackTrace();
        throw new IOException("Failed to subset incoming fixed-field stream: " + ex.getMessage());
    }

}

From source file:org.alfresco.contentstore.patch.PatchServiceImpl.java

private void updatePatchDocument(PatchDocument patchDocument, NodeChecksums checksums, Reader reader)
        throws IOException {
    ByteBuffer data = ByteBuffer.allocate(blockSize * 20);

    int blockSize = checksums.getBlockSize();

    int i = 0;//from ww  w. jav  a2 s  . c  o  m

    Adler32 adlerInfo = new Adler32(hasher);
    int lastMatchIndex = 1; // starts at 1
    ByteBuffer currentPatch = ByteBuffer.allocate(5000000); // TODO

    int x = 0;

    for (;;) {
        if (x == 0 || i >= data.limit()) {
            data.clear();
            i = 0;
            int numRead = reader.read(data);
            if (numRead <= 0) {
                break;
            }
            data.flip();
            x += numRead;
        }

        int chunkSize = 0;
        // determine the size of the next data chuck to evaluate. Default to
        // blockSize, but clamp to end of data
        if ((i + blockSize) > data.limit()) {
            chunkSize = data.limit() - i;
            adlerInfo.reset(); // need to reset this because the rolling
                               // checksum doesn't work correctly on a final
                               // non-aligned block
        } else {
            chunkSize = blockSize;
        }

        int end = i + chunkSize - 1;

        int matchedBlockIndex = adlerInfo.checkMatch(lastMatchIndex, checksums, data, i, end);
        if (matchedBlockIndex != -1) {
            //                try
            //                {
            //                    String y = hasher.md5(data, i, end);
            //                    System.out.println("y = " + y + ", x = " + x + ", i = " + i + ", end = " + end);
            //                }
            //                catch (NoSuchAlgorithmException e)
            //                {
            //                    // TODO Auto-generated catch block
            //                    e.printStackTrace();
            //                }

            // if we have a match, do the following:
            // 1) add the matched block index to our tracking buffer
            // 2) check to see if there's a current patch. If so, add it to
            // the patch document.
            // 3) jump forward blockSize bytes and continue
            patchDocument.addMatchedBlock(matchedBlockIndex);

            if (currentPatch.position() > 0) {
                // there are outstanding patches, add them to the list
                // create the patch and append it to the patches buffer
                currentPatch.flip();
                int size = currentPatch.limit();
                byte[] dst = new byte[size];
                currentPatch.get(dst, 0, size);
                Patch patch = new Patch(lastMatchIndex, size, dst);
                patchDocument.addPatch(patch);
                currentPatch.clear();
            }

            lastMatchIndex = matchedBlockIndex;

            i += chunkSize;

            adlerInfo.reset();
        } else {
            // while we don't have a block match, append bytes to the
            // current patch
            if (currentPatch.position() >= currentPatch.limit()) {
                //                    System.out.println("count=" + (x + i));
                //                    System.out.println("count1=" + currentPatch.position() + ", " + currentPatch.limit());
                //                    System.out.println(matchedBlockIndexes);
                //                    System.out.println(patches);
            }
            currentPatch.put(data.get(i));
            i++;
        }
    } // end for each byte in the data

    if (currentPatch.position() > 0) {
        currentPatch.flip();
        int size = currentPatch.limit();
        byte[] dst = new byte[size];
        currentPatch.get(dst, 0, size);
        Patch patch = new Patch(lastMatchIndex, size, dst);
        patchDocument.addPatch(patch);
    }
}

From source file:edu.brown.hstore.PartitionExecutor.java

private Map<Integer, List<VoltTable>> getFragmentInputs(AbstractTransaction ts, WorkFragment fragment,
        Map<Integer, List<VoltTable>> inputs) {
    Map<Integer, List<VoltTable>> attachedInputs = ts.getAttachedInputDependencies();
    assert (attachedInputs != null);
    boolean is_local = (ts instanceof LocalTransaction);

    if (d)/* www  . ja  v  a  2 s . c om*/
        LOG.debug(String.format("%s - Attempting to retrieve input dependencies for WorkFragment [isLocal=%s]",
                ts, is_local));
    for (int i = 0, cnt = fragment.getFragmentIdCount(); i < cnt; i++) {
        WorkFragment.InputDependency input_dep_ids = fragment.getInputDepId(i);
        for (int input_dep_id : input_dep_ids.getIdsList()) {
            if (input_dep_id == HStoreConstants.NULL_DEPENDENCY_ID)
                continue;

            // If the Transaction is on the same HStoreSite, then all the
            // input dependencies will be internal and can be retrieved
            // locally
            if (is_local) {
                List<VoltTable> deps = ((LocalTransaction) ts).getInternalDependency(input_dep_id);
                assert (deps != null);
                assert (inputs.containsKey(input_dep_id) == false);
                inputs.put(input_dep_id, deps);
                if (d)
                    LOG.debug(
                            String.format("%s - Retrieved %d INTERNAL VoltTables for DependencyId #%d\n" + deps,
                                    ts, deps.size(), input_dep_id));
            }
            // Otherwise they will be "attached" inputs to the
            // RemoteTransaction handle
            // We should really try to merge these two concepts into a
            // single function call
            else if (attachedInputs.containsKey(input_dep_id)) {
                List<VoltTable> deps = attachedInputs.get(input_dep_id);
                List<VoltTable> pDeps = null;
                // XXX: Do we actually need to copy these???
                // XXX: I think we only need to copy if we're debugging the
                // tables!
                if (d) { // this.firstPartition == false) {
                    pDeps = new ArrayList<VoltTable>();
                    for (VoltTable vt : deps) {
                        // TODO: Move into VoltTableUtil
                        ByteBuffer buffer = vt.getTableDataReference();
                        byte arr[] = new byte[vt.getUnderlyingBufferSize()]; // FIXME
                        buffer.get(arr, 0, arr.length);
                        pDeps.add(new VoltTable(ByteBuffer.wrap(arr), true));
                    }
                } else {
                    pDeps = deps;
                }
                inputs.put(input_dep_id, pDeps);
                if (d)
                    LOG.debug(String.format("%s - Retrieved %d ATTACHED VoltTables for DependencyId #%d in %s",
                            ts, deps.size(), input_dep_id));
            }

        } // FOR (inputs)
    } // FOR (fragments)
    if (d) {
        if (inputs.isEmpty() == false) {

            LOG.debug(String.format("%s - Retrieved %d InputDependencies for %s on partition %d", ts,
                    inputs.size(), fragment.getFragmentIdList(), fragment.getPartitionId())); // StringUtil.formatMaps(inputs)));

            LOG.debug(String.format("%s - Retrieved %d InputDependencies for %s on partition %d", ts,
                    inputs.size(), fragment.getFragmentIdList(), fragment.getPartitionId())); // StringUtil.formatMaps(inputs)));

        } else if (fragment.getNeedsInput()) {
            LOG.warn(String.format("%s - No InputDependencies retrieved for %s on partition %d", ts,
                    fragment.getFragmentIdList(), fragment.getPartitionId()));
        }
    }
    return (inputs);
}

From source file:com.eucalyptus.walrus.WalrusFSManager.java

private String getMultipartData(ObjectInfo objectInfo, GetObjectType request, GetObjectResponseType response)
        throws WalrusException {
    //get all parts
    PartInfo searchPart = new PartInfo(request.getBucket(), request.getKey());
    searchPart.setCleanup(false);//from w ww. j  av  a 2  s. com
    searchPart.setUploadId(objectInfo.getUploadId());
    List<PartInfo> parts;
    EntityTransaction db = Entities.get(PartInfo.class);
    try {
        Criteria partCriteria = Entities.createCriteria(PartInfo.class);
        partCriteria.setReadOnly(true);
        partCriteria.add(Example.create(searchPart));
        partCriteria.add(Restrictions.isNotNull("partNumber"));
        partCriteria.addOrder(Order.asc("partNumber"));

        parts = partCriteria.list();
        if (parts.size() == 0) {
            throw new InternalErrorException(
                    "No parts found corresponding to uploadId: " + objectInfo.getUploadId());
        }
    } finally {
        db.rollback();
    }

    if (request.getInlineData()) {
        if ((objectInfo.getSize() * 4) > WalrusProperties.MAX_INLINE_DATA_SIZE) {
            throw new InlineDataTooLargeException(request.getBucket() + "/" + request.getKey());
        }
        String base64Data = "";
        for (PartInfo part : parts) {
            byte[] bytes = new byte[102400];
            int bytesRead = 0, offset = 0;
            try {
                FileIO fileIO = storageManager.prepareForRead(part.getBucketName(), part.getObjectName());
                while ((bytesRead = fileIO.read(offset)) > 0) {
                    ByteBuffer buffer = fileIO.getBuffer();
                    if (buffer != null) {
                        buffer.get(bytes, 0, bytesRead);
                        base64Data += new String(bytes, 0, bytesRead);
                        offset += bytesRead;
                    }
                }
                fileIO.finish();
            } catch (Exception e) {
                LOG.error(e, e);
                throw new InternalErrorException(e);
            }
        }
        return Hashes.base64encode(base64Data);
    } else {
        response.setHasStreamingData(true);
        // support for large objects
        storageManager.getMultipartObject(response, parts, request.getIsCompressed());
        return null;
    }
}

From source file:com.eucalyptus.objectstorage.WalrusManager.java

public GetObjectResponseType getObject(GetObjectType request) throws EucalyptusCloudException {
    GetObjectResponseType reply = (GetObjectResponseType) request.getReply();
    // Must explicitly set to true for streaming large objects.
    reply.setHasStreamingData(false);/* w  ww  .ja v a 2 s .c  om*/
    String bucketName = request.getBucket();
    String objectKey = request.getKey();
    Context ctx = Contexts.lookup();
    Account account = ctx.getAccount();
    Boolean deleteAfterGet = request.getDeleteAfterGet();
    if (deleteAfterGet == null) {
        deleteAfterGet = false;
    }

    Boolean getTorrent = request.getGetTorrent();
    if (getTorrent == null) {
        getTorrent = false;
    }

    Boolean getMetaData = request.getGetMetaData();
    if (getMetaData == null) {
        getMetaData = false;
    }

    Boolean getData = request.getGetData();
    if (getData == null) {
        getData = false;
    }

    EntityWrapper<BucketInfo> db = EntityWrapper.get(BucketInfo.class);
    BucketInfo bucketInfo = new BucketInfo(bucketName);
    List<BucketInfo> bucketList = db.queryEscape(bucketInfo);

    if (bucketList.size() > 0) {
        BucketInfo bucket = bucketList.get(0);
        BucketLogData logData = bucket.getLoggingEnabled() ? request.getLogData() : null;
        boolean versioning = false;
        if (bucket.isVersioningEnabled()) {
            versioning = true;
        }
        EntityWrapper<ObjectInfo> dbObject = db.recast(ObjectInfo.class);
        ObjectInfo searchObjectInfo = new ObjectInfo(bucketName, objectKey);
        searchObjectInfo.setVersionId(request.getVersionId());
        searchObjectInfo.setDeleted(false);
        if (request.getVersionId() == null) {
            searchObjectInfo.setLast(true);
        }
        List<ObjectInfo> objectInfos = dbObject.queryEscape(searchObjectInfo);
        if (objectInfos.size() > 0) {
            ObjectInfo objectInfo = objectInfos.get(0);
            if (ctx.hasAdministrativePrivileges() || (objectInfo.canRead(account.getAccountNumber())
                    && (objectInfo.isGlobalRead() || Lookups.checkPrivilege(PolicySpec.S3_GETOBJECT,
                            PolicySpec.VENDOR_S3, PolicySpec.S3_RESOURCE_OBJECT,
                            PolicySpec.objectFullName(bucketName, objectKey), null)))) {
                String objectName = objectInfo.getObjectName();
                DefaultHttpResponse httpResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1,
                        HttpResponseStatus.OK);
                if (getMetaData) {
                    List<MetaDataInfo> metaDataInfos = objectInfo.getMetaData();
                    for (MetaDataInfo metaDataInfo : metaDataInfos) {
                        httpResponse.addHeader(WalrusProperties.AMZ_META_HEADER_PREFIX + metaDataInfo.getName(),
                                metaDataInfo.getValue());
                    }
                }
                if (getTorrent) {
                    if (objectInfo.isGlobalRead()) {
                        if (!WalrusProperties.enableTorrents) {
                            LOG.warn("Bittorrent support has been disabled. Please check pre-requisites");
                            throw new EucalyptusCloudException("Torrents disabled");
                        }
                        EntityWrapper<TorrentInfo> dbTorrent = EntityWrapper.get(TorrentInfo.class);
                        TorrentInfo torrentInfo = new TorrentInfo(bucketName, objectKey);
                        TorrentInfo foundTorrentInfo;
                        String absoluteObjectPath = storageManager.getObjectPath(bucketName, objectName);
                        try {
                            foundTorrentInfo = dbTorrent.getUniqueEscape(torrentInfo);
                        } catch (EucalyptusCloudException ex) {
                            String torrentFile = objectName + ".torrent";
                            String torrentFilePath = storageManager.getObjectPath(bucketName, torrentFile);
                            TorrentCreator torrentCreator = new TorrentCreator(absoluteObjectPath, objectKey,
                                    objectName, torrentFilePath, WalrusProperties.getTrackerUrl());
                            try {
                                torrentCreator.create();
                            } catch (Exception e) {
                                LOG.error(e);
                                throw new EucalyptusCloudException(
                                        "could not create torrent file " + torrentFile);
                            }
                            torrentInfo.setTorrentFile(torrentFile);
                            dbTorrent.add(torrentInfo);
                            foundTorrentInfo = torrentInfo;
                        }
                        dbTorrent.commit();
                        String torrentFile = foundTorrentInfo.getTorrentFile();
                        String torrentFilePath = storageManager.getObjectPath(bucketName, torrentFile);
                        TorrentClient torrentClient = new TorrentClient(torrentFilePath, absoluteObjectPath);
                        Torrents.addClient(bucketName + objectKey, torrentClient);
                        torrentClient.start();
                        // send torrent
                        String key = bucketName + "." + objectKey;
                        String randomKey = key + "." + Hashes.getRandom(10);
                        request.setRandomKey(randomKey);

                        File torrent = new File(torrentFilePath);
                        if (torrent.exists()) {
                            Date lastModified = objectInfo.getLastModified();
                            db.commit();
                            long torrentLength = torrent.length();
                            if (logData != null) {
                                updateLogData(bucket, logData);
                                logData.setObjectSize(torrentLength);
                            }
                            storageManager.sendObject(request, httpResponse, bucketName, torrentFile,
                                    torrentLength, null,
                                    DateUtils.format(lastModified.getTime(), DateUtils.RFC822_DATETIME_PATTERN),
                                    "application/x-bittorrent",
                                    "attachment; filename=" + objectKey + ".torrent;",
                                    request.getIsCompressed(), null, logData);

                            return null;
                        } else {
                            // No torrent exists
                            db.rollback();
                            String errorString = "Could not get torrent file " + torrentFilePath;
                            LOG.error(errorString);
                            throw new EucalyptusCloudException(errorString);
                        }
                    } else {
                        // No global object read permission
                        db.rollback();
                        throw new AccessDeniedException("Key", objectKey, logData);
                    }
                }
                Date lastModified = objectInfo.getLastModified();
                Long size = objectInfo.getSize();
                String etag = objectInfo.getEtag();
                String contentType = objectInfo.getContentType();
                String contentDisposition = objectInfo.getContentDisposition();
                db.commit();
                if (logData != null) {
                    updateLogData(bucket, logData);
                    logData.setObjectSize(size);
                }
                String versionId = null;
                if (versioning) {
                    versionId = objectInfo.getVersionId();
                }
                if (request.getGetData()) {
                    if (request.getInlineData()) {
                        if ((size * 4) > WalrusProperties.MAX_INLINE_DATA_SIZE) {
                            throw new InlineDataTooLargeException(bucketName + "/" + objectKey);
                        }
                        byte[] bytes = new byte[102400];
                        int bytesRead = 0, offset = 0;
                        String base64Data = "";
                        try {
                            FileIO fileIO = storageManager.prepareForRead(bucketName, objectName);
                            while ((bytesRead = fileIO.read(offset)) > 0) {
                                ByteBuffer buffer = fileIO.getBuffer();
                                if (buffer != null) {
                                    buffer.get(bytes, 0, bytesRead);
                                    base64Data += new String(bytes, 0, bytesRead);
                                    offset += bytesRead;
                                }
                            }
                            fileIO.finish();
                        } catch (Exception e) {
                            LOG.error(e, e);
                            throw new EucalyptusCloudException(e);
                        }
                        reply.setBase64Data(Hashes.base64encode(base64Data));

                        // fireUsageEvent For Get Object
                    } else {
                        reply.setHasStreamingData(true);
                        // support for large objects
                        storageManager.sendObject(request, httpResponse, bucketName, objectName, size, etag,
                                DateUtils.format(lastModified.getTime(), DateUtils.RFC822_DATETIME_PATTERN),
                                contentType, contentDisposition, request.getIsCompressed(), versionId, logData);

                        // fireUsageEvent For Get Object
                        return null;
                    }
                } else {
                    // Request is for headers/metadata only
                    storageManager.sendHeaders(request, httpResponse, size, etag,
                            DateUtils.format(lastModified.getTime(), DateUtils.RFC822_DATETIME_PATTERN),
                            contentType, contentDisposition, versionId, logData);
                    return null;

                }
                reply.setEtag(etag);
                reply.setLastModified(DateUtils.format(lastModified, DateUtils.RFC822_DATETIME_PATTERN));
                reply.setSize(size);
                reply.setContentType(contentType);
                reply.setContentDisposition(contentDisposition);
                Status status = new Status();
                status.setCode(200);
                status.setDescription("OK");
                reply.setStatus(status);
                return reply;
            } else {
                // Permissions not sufficient
                // Fix for EUCA-2782. Different exceptions are thrown based
                // on the request type so that the downstream logic can
                // differentiate
                db.rollback();
                if (getData) {
                    throw new AccessDeniedException("Key", objectKey, logData);
                } else {
                    throw new HeadAccessDeniedException("Key", objectKey, logData);
                }
            }
        } else {
            // Key not found
            // Fix for EUCA-2782. Different exceptions are thrown based on
            // the request type so that the downstream logic can
            // differentiate
            db.rollback();
            if (getData) {
                throw new NoSuchEntityException(objectKey);
            } else {
                throw new HeadNoSuchEntityException(objectKey);
            }
        }
    } else {
        // Bucket doesn't exist
        // Fix for EUCA-2782. Different exceptions are thrown based on the
        // request type so that the downstream logic can differentiate
        db.rollback();
        if (getData) {
            throw new NoSuchBucketException(bucketName);
        } else {
            throw new HeadNoSuchBucketException(bucketName);
        }
    }
}

From source file:com.eucalyptus.walrus.WalrusFSManager.java

@Override
public GetObjectResponseType getObject(GetObjectType request) throws WalrusException {
    GetObjectResponseType reply = (GetObjectResponseType) request.getReply();
    // Must explicitly set to true for streaming large objects.
    reply.setHasStreamingData(false);/*  w w  w. j  av a  2s  .  c om*/
    String bucketName = request.getBucket();
    String objectKey = request.getKey();
    Context ctx = Contexts.lookup();
    Account account = ctx.getAccount();
    Boolean deleteAfterGet = request.getDeleteAfterGet();
    if (deleteAfterGet == null) {
        deleteAfterGet = false;
    }

    Boolean getMetaData = request.getGetMetaData();
    if (getMetaData == null) {
        getMetaData = false;
    }

    Boolean getData = request.getGetData();
    if (getData == null) {
        getData = false;
    }

    EntityWrapper<BucketInfo> db = EntityWrapper.get(BucketInfo.class);
    BucketInfo bucketInfo = new BucketInfo(bucketName);
    List<BucketInfo> bucketList = db.queryEscape(bucketInfo);

    if (bucketList.size() > 0) {
        BucketInfo bucket = bucketList.get(0);
        BucketLogData logData = bucket.getLoggingEnabled() ? request.getLogData() : null;
        boolean versioning = false;
        if (bucket.isVersioningEnabled()) {
            versioning = true;
        }
        EntityWrapper<ObjectInfo> dbObject = db.recast(ObjectInfo.class);
        ObjectInfo searchObjectInfo = new ObjectInfo(bucketName, objectKey);
        searchObjectInfo.setVersionId(request.getVersionId());
        searchObjectInfo.setDeleted(false);
        if (request.getVersionId() == null) {
            searchObjectInfo.setLast(true);
        }
        List<ObjectInfo> objectInfos = dbObject.queryEscape(searchObjectInfo);
        if (objectInfos.size() > 0) {
            ObjectInfo objectInfo = objectInfos.get(0);
            String objectName = objectInfo.getObjectName();
            if (getMetaData) {
                ArrayList<MetaDataEntry> metaData = new ArrayList<MetaDataEntry>();
                List<MetaDataInfo> metaDataInfos = objectInfo.getMetaData();
                for (MetaDataInfo metaDataInfo : metaDataInfos) {
                    metaData.add(new MetaDataEntry(metaDataInfo.getName(), metaDataInfo.getValue()));
                }
                reply.setMetaData(metaData);
            }

            Date lastModified = objectInfo.getLastModified();
            Long size = objectInfo.getSize();
            String etag = objectInfo.getEtag();
            String contentType = objectInfo.getContentType();
            String contentDisposition = objectInfo.getContentDisposition();
            db.commit();

            if (logData != null) {
                updateLogData(bucket, logData);
                logData.setObjectSize(size);
            }
            String versionId = null;
            if (versioning) {
                versionId = objectInfo.getVersionId();
            }
            if (request.getGetData()) {
                //check if this is a multipart object
                if (objectInfo.isMultipart()) {
                    String inlineData = getMultipartData(objectInfo, request, reply);
                    if (inlineData != null) {
                        reply.setBase64Data(inlineData);
                    }
                } else {
                    if (request.getInlineData()) {
                        if ((size * 4) > WalrusProperties.MAX_INLINE_DATA_SIZE) {
                            throw new InlineDataTooLargeException(bucketName + "/" + objectKey);
                        }
                        byte[] bytes = new byte[102400];
                        int bytesRead = 0, offset = 0;
                        String base64Data = "";
                        try {
                            FileIO fileIO = storageManager.prepareForRead(bucketName, objectName);
                            while ((bytesRead = fileIO.read(offset)) > 0) {
                                ByteBuffer buffer = fileIO.getBuffer();
                                if (buffer != null) {
                                    buffer.get(bytes, 0, bytesRead);
                                    base64Data += new String(bytes, 0, bytesRead);
                                    offset += bytesRead;
                                }
                            }
                            fileIO.finish();
                        } catch (Exception e) {
                            LOG.error(e, e);
                            throw new InternalErrorException(e);
                        }
                        reply.setBase64Data(Hashes.base64encode(base64Data));

                    } else {
                        reply.setHasStreamingData(true);
                        // support for large objects
                        //fill in reply with useful things
                        storageManager.getObject(bucketName, objectName, reply, size,
                                request.getIsCompressed());
                    }
                }
            }
            reply.setEtag(etag);
            reply.setLastModified(lastModified);
            reply.setVersionId(versionId);
            reply.setSize(size);
            reply.setContentType(contentType);
            reply.setContentDisposition(contentDisposition);
            Status status = new Status();
            status.setCode(200);
            status.setDescription("OK");
            reply.setStatus(status);
            return reply;
        } else {
            // Key not found
            // Fix for EUCA-2782. Different exceptions are thrown based on
            // the request type so that the downstream logic can
            // differentiate
            db.rollback();
            if (getData) {
                throw new NoSuchEntityException(objectKey);
            } else {
                throw new HeadNoSuchEntityException(objectKey);
            }
        }
    } else {
        // Bucket doesn't exist
        // Fix for EUCA-2782. Different exceptions are thrown based on the
        // request type so that the downstream logic can differentiate
        db.rollback();
        if (getData) {
            throw new NoSuchBucketException(bucketName);
        } else {
            throw new HeadNoSuchBucketException(bucketName);
        }
    }
}