Example usage for org.apache.cassandra.utils ByteBufferUtil readWithShortLength

List of usage examples for org.apache.cassandra.utils ByteBufferUtil readWithShortLength

Introduction

In this page you can find the example usage for org.apache.cassandra.utils ByteBufferUtil readWithShortLength.

Prototype

public static ByteBuffer readWithShortLength(DataInput in) throws IOException 

Source Link

Usage

From source file:com.cloudian.support.RowFinder.java

License:Apache License

private void find() throws IOException {

    // get ColumnFamilyStore instance
    System.out.println("Opening keyspace " + ksName + " ...");
    Keyspace keyspace = Keyspace.open(ksName);
    System.out.println("Opened keyspace " + ksName);

    System.out.println("Getting column family " + cfName + " ...");
    ColumnFamilyStore cfStore = keyspace.getColumnFamilyStore(cfName);
    Collection<SSTableReader> ssTables = cfStore.getSSTables();
    System.out.println("Got column family " + cfName);

    ByteBuffer buff = ByteBufferUtil.bytes(this.rowKey);
    IPartitioner<?> partitioner = cfStore.partitioner;

    System.out.println(this.rowKey + " is included in the following files");
    System.out.println("==============================================================================");
    System.out.println("FINE_NAME, COLUMN_INFO, CONTIGUOUS_TOMBSTONED_COLUMNS(over " + threshold + ")");
    System.out.println("==============================================================================");
    for (SSTableReader reader : ssTables) {

        if (reader.getBloomFilter().isPresent(buff)) {

            // seek to row key
            RandomAccessReader dfile = reader.openDataReader();
            RowIndexEntry entry = reader.getPosition(partitioner.decorateKey(buff), SSTableReader.Operator.EQ);
            if (entry == null)
                continue;
            dfile.seek(entry.position);/*from w ww.j  a va2 s  .  c o  m*/

            // read some
            ByteBufferUtil.readWithShortLength(dfile);
            if (reader.descriptor.version.hasRowSizeAndColumnCount)
                dfile.readLong();
            DeletionInfo deletionInfo = new DeletionInfo(DeletionTime.serializer.deserialize(dfile));
            int columnCount = reader.descriptor.version.hasRowSizeAndColumnCount ? dfile.readInt()
                    : Integer.MAX_VALUE;

            // get iterator
            Iterator<OnDiskAtom> atomIterator = reader.metadata.getOnDiskIterator(dfile, columnCount,
                    reader.descriptor.version);

            // iterate
            System.out.print(new File(reader.getFilename()).getName());
            boolean isContiguous = false;
            int contiguousTombstonedColumns = 0;
            String contiguousTombstonedColumnsStart = null;
            int live = 0;
            int deleted = 0;
            int rangeTombstone = 0;
            StringBuffer sb = new StringBuffer();
            while (atomIterator.hasNext()) {

                OnDiskAtom atom = atomIterator.next();

                if (atom instanceof Column) {

                    if (atom instanceof DeletedColumn) {

                        deleted++;

                        if (!isContiguous) {

                            isContiguous = true;
                            contiguousTombstonedColumnsStart = ByteBufferUtil.string(atom.name());

                        }

                        contiguousTombstonedColumns++;

                    } else {

                        live++;

                        if (isContiguous) {

                            // print
                            if (contiguousTombstonedColumns >= this.threshold) {

                                sb.append(", [" + contiguousTombstonedColumnsStart + "|"
                                        + contiguousTombstonedColumns + "]");

                            }

                            // reset
                            contiguousTombstonedColumns = 0;
                            contiguousTombstonedColumnsStart = null;
                            isContiguous = false;

                        }

                    }

                } else if (atom instanceof RangeTombstone) {

                    rangeTombstone++;

                    int localDeletionTime = atom.getLocalDeletionTime();
                    ByteBuffer min = ((RangeTombstone) atom).min;
                    ByteBuffer max = ((RangeTombstone) atom).max;
                    String minString = ByteBufferUtil.string(min);
                    String maxString = ByteBufferUtil.string(max);

                    sb.append(", [" + minString + ", " + maxString + "(" + localDeletionTime + ")]");

                }

                // if it ends with finished columns
                if (contiguousTombstonedColumns >= this.threshold) {

                    sb.append(
                            ", [" + contiguousTombstonedColumnsStart + "|" + contiguousTombstonedColumns + "]");

                }

            }

            System.out.print(", (live, deleted, range tombstone)=(" + live + ", " + deleted + ", "
                    + rangeTombstone + ")");
            System.out.println(sb.toString());

        }

    }

}

From source file:com.datastax.brisk.BriskServer.java

License:Apache License

/**
 * Retrieves a local subBlock/* w  ww.j  av a2 s .c  o  m*/
 * 
 * @param blockId row key
 * @param sblockId SubBlock column name
 * @param offset inside the sblock
 * @return a local sublock
 * @throws TException
 */
private LocalBlock getLocalSubBlock(String subBlockCFName, ByteBuffer blockId, ByteBuffer sblockId, int offset)
        throws TException {
    DecoratedKey<Token<?>> decoratedKey = new DecoratedKey<Token<?>>(
            StorageService.getPartitioner().getToken(blockId), blockId);

    Table table = Table.open(cfsKeyspace);
    ColumnFamilyStore sblockStore = table.getColumnFamilyStore(subBlockCFName);

    Collection<SSTableReader> sstables = sblockStore.getSSTables();

    for (SSTableReader sstable : sstables) {

        long position = sstable.getPosition(decoratedKey, Operator.EQ);

        if (position == -1)
            continue;

        String filename = sstable.descriptor.filenameFor(Component.DATA);
        RandomAccessFile raf = null;
        int mappedLength = -1;
        MappedByteBuffer mappedData = null;
        MappedFileDataInput file = null;
        try {
            raf = new RandomAccessFile(filename, "r");
            assert position < raf.length();

            mappedLength = (raf.length() - position) < Integer.MAX_VALUE ? (int) (raf.length() - position)
                    : Integer.MAX_VALUE;

            mappedData = raf.getChannel().map(FileChannel.MapMode.READ_ONLY, position, mappedLength);

            file = new MappedFileDataInput(mappedData, filename, 0);

            if (file == null)
                continue;

            //Verify key was found in data file
            DecoratedKey keyInDisk = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor,
                    ByteBufferUtil.readWithShortLength(file));
            assert keyInDisk.equals(decoratedKey) : String.format("%s != %s in %s", keyInDisk, decoratedKey,
                    file.getPath());

            long rowSize = SSTableReader.readRowSize(file, sstable.descriptor);

            assert rowSize > 0;
            assert rowSize < mappedLength;

            Filter bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.usesOldBloomFilter);

            //verify this column in in this version of the row.
            if (!bf.isPresent(sblockId))
                continue;

            List<IndexHelper.IndexInfo> indexList = IndexHelper.deserializeIndex(file);

            // we can stop early if bloom filter says none of the
            // columns actually exist -- but,
            // we can't stop before initializing the cf above, in
            // case there's a relevant tombstone
            ColumnFamilySerializer serializer = ColumnFamily.serializer();
            try {
                ColumnFamily cf = serializer
                        .deserializeFromSSTableNoColumns(ColumnFamily.create(sstable.metadata), file);

                if (cf.isMarkedForDelete())
                    continue;

            } catch (Exception e) {
                e.printStackTrace();

                throw new IOException(serializer + " failed to deserialize " + sstable.getColumnFamilyName()
                        + " with " + sstable.metadata + " from " + file, e);
            }

            Integer sblockLength = null;

            if (indexList == null)
                sblockLength = seekToSubColumn(sstable.metadata, file, sblockId);
            else
                sblockLength = seekToSubColumn(sstable.metadata, file, sblockId, indexList);

            if (sblockLength == null || sblockLength < 0)
                continue;

            int bytesReadFromStart = mappedLength - (int) file.bytesRemaining();

            if (logger.isDebugEnabled())
                logger.debug("BlockLength = " + sblockLength + " Availible " + file.bytesRemaining());

            assert offset <= sblockLength : String.format("%d > %d", offset, sblockLength);

            long dataOffset = position + bytesReadFromStart;

            if (file.bytesRemaining() == 0 || sblockLength == 0)
                continue;

            return new LocalBlock(file.getPath(), dataOffset + offset, sblockLength - offset);

        } catch (IOException e) {
            throw new TException(e);
        } finally {
            FileUtils.closeQuietly(raf);
        }
    }

    return null;
}

From source file:com.datastax.brisk.BriskServer.java

License:Apache License

/**
 * Checks if the current column is the one we are looking for
 * @param metadata/*  w  ww  . ja v  a  2s .c o m*/
 * @param file
 * @param sblockId
 * @return if > 0 the length to read from current file offset. if -1 not relevent. if null out of bounds
 */
private Integer isSubBlockFound(CFMetaData metadata, FileDataInput file, ByteBuffer sblockId)
        throws IOException {
    ByteBuffer name = ByteBufferUtil.readWithShortLength(file);

    //Stop if we've gone too far (return null)
    if (metadata.comparator.compare(name, sblockId) > 0)
        return null;

    // verify column type;
    int b = file.readUnsignedByte();

    // skip ts (since we know block ids are unique)
    long ts = file.readLong();
    int sblockLength = file.readInt();

    if (!name.equals(sblockId) || (b & ColumnSerializer.DELETION_MASK) != 0
            || (b & ColumnSerializer.EXPIRATION_MASK) != 0) {
        FileUtils.skipBytesFully(file, sblockLength);
        return -1;
    }

    return sblockLength;
}

From source file:com.fullcontact.sstable.hadoop.IndexOffsetScanner.java

License:Apache License

/**
 * Get the next offset from the SSTable index.
 * @return SSTable offset.//from w w w.j a  v a2  s. co m
 */
public long next() {
    try {
        ByteBufferUtil.readWithShortLength(input);

        final long offset = input.readLong();

        // TODO: Because this is version ic > ia promotedIndex is true and we need to handle it. See C* Descriptor
        skipPromotedIndex(input);

        return offset;
    } catch (IOException e) {
        throw new IOError(e);
    }
}

From source file:com.fullcontact.sstable.hadoop.mapreduce.SSTableRowRecordReader.java

License:Apache License

@Override
public boolean nextKeyValue() throws IOException, InterruptedException {

    if (!hasMore()) {
        return false;
    }//w  ww. ja  v a 2  s  . co  m

    // Read the key and set it.
    final ByteBuffer keyBytes = ByteBufferUtil.readWithShortLength(getReader());
    setCurrentKey(keyBytes);

    // Read the data size.
    // TODO: may have to take into account the fact that files can support long or int depending on Cassandra version.
    final long dataSize = getReader().readLong();

    // Read the value and set it.
    final SSTableIdentityIterator ssTableIdentityIterator = getIdentityIterator(keyBytes, dataSize);
    setCurrentValue(ssTableIdentityIterator);

    return true;
}

From source file:com.netflix.aegisthus.io.commitlog.CommitLogScanner.java

License:Apache License

@SuppressWarnings("unused")
public String next(int filter) {
    int serializedSize;
    if (cache.size() > 0) {
        // if we are here we are reading rows that we already reported the
        // size of.
        datasize = 0;// w w  w  .j a v  a 2  s. com
        return cache.remove(0);
    }
    try {
        outer: while (true) {
            serializedSize = input.readInt();
            if (serializedSize == 0) {
                return null;
            }
            long claimedSizeChecksum = input.readLong();
            byte[] buffer = new byte[(int) (serializedSize * 1.2)];
            input.readFully(buffer, 0, serializedSize);
            long claimedChecksum = input.readLong();

            // two checksums plus the int for the size
            datasize = serializedSize + 8 + 8 + 4;

            FastByteArrayInputStream bufIn = new FastByteArrayInputStream(buffer, 0, serializedSize);
            DataInput ris = new DataInputStream(bufIn);
            String table = ris.readUTF();
            ByteBuffer key = ByteBufferUtil.readWithShortLength(ris);
            Map<Integer, ColumnFamily> modifications = new HashMap<Integer, ColumnFamily>();
            int size = ris.readInt();
            for (int i = 0; i < size; ++i) {
                Integer cfid = Integer.valueOf(ris.readInt());
                if (filter >= 0 && cfid != filter) {
                    continue outer;
                }
                if (!ris.readBoolean()) {
                    continue;
                }
                cfid = Integer.valueOf(ris.readInt());
                int localDeletionTime = ris.readInt();
                long markedForDeleteAt = ris.readLong();
                sb = new StringBuilder();
                sb.append("{");
                insertKey(sb, BytesType.instance.getString(key));
                sb.append("{");
                insertKey(sb, "deletedAt");
                sb.append(markedForDeleteAt);
                sb.append(", ");
                insertKey(sb, "columns");
                sb.append("[");
                int columns = ris.readInt();
                serializeColumns(sb, columns, ris);
                sb.append("]");
                sb.append("}}");
                cache.add(sb.toString());
            }
            if (cache.size() > 0) {
                return cache.remove(0);
            } else {
                return null;
            }
        }
    } catch (Exception e) {
        throw new IOError(e);
    }
}

From source file:com.netflix.aegisthus.io.sstable.IndexScanner.java

License:Apache License

@Override
public Pair<String, Long> next() {
    try {//w  w w. j a  v a2 s. com
        String key = BytesType.instance.getString(ByteBufferUtil.readWithShortLength(input));
        Long offset = input.readLong();
        if (version.hasPromotedIndexes) {
            skipPromotedIndexes();
        }
        return Pair.create(key, offset);
    } catch (IOException e) {
        throw new IOError(e);
    }

}