Example usage for java.nio ByteBuffer equals

List of usage examples for java.nio ByteBuffer equals

Introduction

In this page you can find the example usage for java.nio ByteBuffer equals.

Prototype

public boolean equals(Object other) 

Source Link

Document

Checks whether this byte buffer is equal to another object.

Usage

From source file:Main.java

public static void main(String[] argv) throws Exception {
    ByteBuffer bbuf = ByteBuffer.allocate(10);
    int capacity = bbuf.capacity(); // 10
    System.out.println(capacity);
    bbuf.putShort(2, (short) 123);

    ByteBuffer bb = bbuf.duplicate();

    System.out.println(bb.equals(bb));
}

From source file:io.mycat.util.ByteBufferUtil.java

public static boolean isPrefix(ByteBuffer prefix, ByteBuffer value) {
    if (prefix.remaining() > value.remaining()) {
        return false;
    }/*from w w  w . j a va  2  s.  c  o  m*/

    int diff = value.remaining() - prefix.remaining();
    return prefix.equals(value.duplicate().limit(value.remaining() - diff));
}

From source file:org.alfresco.contentstore.ContentStoreTest.java

private void assertFileEquals(InputStream expected, InputStream actual, State state) throws IOException {
    ByteBuffer bb1 = ByteBuffer.allocate(1024);
    ByteBuffer bb2 = ByteBuffer.allocate(1024);
    int count1 = 0;
    int count2 = 0;

    try (ReadableByteChannel channel = Channels.newChannel(expected);
            ReadableByteChannel channel1 = Channels.newChannel(actual)) {
        int i1 = channel.read(bb1);
        bb1.flip();//  w  w  w . java2 s  .  c  o m

        int i2 = channel1.read(bb2);
        bb2.flip();

        if (i1 == i2) {
            count1 += i1;
            count2 += i2;
            assertTrue("Not equal at " + state, bb1.equals(bb2));
        } else {
            fail("Not equal at " + state);
        }
    }
}

From source file:org.apache.cassandra.db.ColumnFamilyStore.java

public List<Row> scan(IndexClause clause, AbstractBounds range, IFilter dataFilter) {
    // Start with the most-restrictive indexed clause, then apply remaining clauses
    // to each row matching that clause.
    // TODO: allow merge join instead of just one index + loop
    IndexExpression primary = highestSelectivityPredicate(clause);
    ColumnFamilyStore indexCFS = getIndexedColumnFamilyStore(primary.column_name);
    if (logger.isDebugEnabled())
        logger.debug("Primary scan clause is " + getComparator().getString(primary.column_name));
    assert indexCFS != null;
    DecoratedKey indexKey = indexCFS.partitioner.decorateKey(primary.value);

    // if the slicepredicate doesn't contain all the columns for which we have expressions to evaluate,
    // it needs to be expanded to include those too
    IFilter firstFilter = dataFilter;/*from  w ww .  j a  va  2s .com*/
    NamesQueryFilter extraFilter = null;
    if (clause.expressions.size() > 1) {
        if (dataFilter instanceof SliceQueryFilter) {
            // if we have a high chance of getting all the columns in a single index slice, do that.
            // otherwise, create an extraFilter to fetch by name the columns referenced by the additional expressions.
            if (getMaxRowSize() < DatabaseDescriptor.getColumnIndexSize()) {
                logger.debug("Expanding slice filter to entire row to cover additional expressions");
                firstFilter = new SliceQueryFilter(ByteBufferUtil.EMPTY_BYTE_BUFFER,
                        ByteBufferUtil.EMPTY_BYTE_BUFFER, ((SliceQueryFilter) dataFilter).reversed,
                        Integer.MAX_VALUE);
            } else {
                logger.debug("adding extraFilter to cover additional expressions");
                SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(getComparator());
                for (IndexExpression expr : clause.expressions) {
                    if (expr == primary)
                        continue;
                    columns.add(expr.column_name);
                }
                extraFilter = new NamesQueryFilter(columns);
            }
        } else {
            logger.debug("adding columns to firstFilter to cover additional expressions");
            // just add in columns that are not part of the resultset
            assert dataFilter instanceof NamesQueryFilter;
            SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(getComparator());
            for (IndexExpression expr : clause.expressions) {
                if (expr == primary || ((NamesQueryFilter) dataFilter).columns.contains(expr.column_name))
                    continue;
                columns.add(expr.column_name);
            }
            if (columns.size() > 0) {
                columns.addAll(((NamesQueryFilter) dataFilter).columns);
                firstFilter = new NamesQueryFilter(columns);
            }
        }
    }

    List<Row> rows = new ArrayList<Row>();
    ByteBuffer startKey = clause.start_key;
    QueryPath path = new QueryPath(columnFamily);

    // we need to store last data key accessed to avoid duplicate results
    // because in the while loop new iteration we can access the same column if start_key was not set
    ByteBuffer lastDataKey = null;

    // fetch row keys matching the primary expression, fetch the slice predicate for each
    // and filter by remaining expressions.  repeat until finished w/ assigned range or index row is exhausted.
    outer: while (true) {
        /* we don't have a way to get the key back from the DK -- we just have a token --
         * so, we need to loop after starting with start_key, until we get to keys in the given `range`.
         * But, if the calling StorageProxy is doing a good job estimating data from each range, the range
         * should be pretty close to `start_key`. */
        if (logger.isDebugEnabled())
            logger.debug(String.format("Scanning index %s starting with %s", expressionString(primary),
                    indexCFS.getComparator().getString(startKey)));

        // We shouldn't fetch only 1 row as this provides buggy paging in case the first row doesn't satisfy all clauses
        int count = Math.max(clause.count, 2);
        QueryFilter indexFilter = QueryFilter.getSliceFilter(indexKey,
                new QueryPath(indexCFS.getColumnFamilyName()), startKey, ByteBufferUtil.EMPTY_BYTE_BUFFER,
                false, count);
        ColumnFamily indexRow = indexCFS.getColumnFamily(indexFilter);
        logger.debug("fetched {}", indexRow);
        if (indexRow == null)
            break;

        ByteBuffer dataKey = null;
        int n = 0;
        for (IColumn column : indexRow.getSortedColumns()) {
            if (column.isMarkedForDelete())
                continue;
            dataKey = column.name();
            n++;

            DecoratedKey dk = partitioner.decorateKey(dataKey);
            if (!range.right.equals(partitioner.getMinimumToken()) && range.right.compareTo(dk.token) < 0)
                break outer;
            if (!range.contains(dk.token) || dataKey.equals(lastDataKey))
                continue;

            // get the row columns requested, and additional columns for the expressions if necessary
            ColumnFamily data = getColumnFamily(new QueryFilter(dk, path, firstFilter));
            assert data != null : String.format(
                    "No data found for %s in %s:%s (original filter %s) from expression %s", firstFilter, dk,
                    path, dataFilter, expressionString(primary));
            logger.debug("fetched data row {}", data);
            if (extraFilter != null) {
                // we might have gotten the expression columns in with the main data slice, but
                // we can't know for sure until that slice is done.  So, we'll do the extra query
                // if we go through and any expression columns are not present.
                for (IndexExpression expr : clause.expressions) {
                    if (expr != primary && data.getColumn(expr.column_name) == null) {
                        data.addAll(getColumnFamily(new QueryFilter(dk, path, extraFilter)));
                        break;
                    }
                }
            }

            if (satisfies(data, clause, primary)) {
                logger.debug("row {} satisfies all clauses", data);
                // cut the resultset back to what was requested, if necessary
                if (firstFilter != dataFilter) {
                    ColumnFamily expandedData = data;
                    data = expandedData.cloneMeShallow();
                    IColumnIterator iter = dataFilter.getMemtableColumnIterator(expandedData, dk,
                            getComparator());
                    new QueryFilter(dk, path, dataFilter).collectCollatedColumns(data, iter, gcBefore());
                }

                rows.add(new Row(dk, data));
            }

            if (rows.size() == clause.count)
                break outer;
        }
        if (n < clause.count || startKey.equals(dataKey))
            break;

        lastDataKey = startKey = dataKey;
    }

    return rows;
}

From source file:org.apache.hadoop.hbase.filter.ParseFilter.java

/**
 * Parses the filterString and constructs a filter using it
 * <p>/*  w ww .  j a  v  a2s  .co  m*/
 * @param filterStringAsByteArray filter string given by the user
 * @return filter object we constructed
 */
public Filter parseFilterString(byte[] filterStringAsByteArray) throws CharacterCodingException {
    // stack for the operators and parenthesis
    Stack<ByteBuffer> operatorStack = new Stack<ByteBuffer>();
    // stack for the filter objects
    Stack<Filter> filterStack = new Stack<Filter>();

    Filter filter = null;
    for (int i = 0; i < filterStringAsByteArray.length; i++) {
        if (filterStringAsByteArray[i] == ParseConstants.LPAREN) {
            // LPAREN found
            operatorStack.push(ParseConstants.LPAREN_BUFFER);
        } else if (filterStringAsByteArray[i] == ParseConstants.WHITESPACE
                || filterStringAsByteArray[i] == ParseConstants.TAB) {
            // WHITESPACE or TAB found
            continue;
        } else if (checkForOr(filterStringAsByteArray, i)) {
            // OR found
            i += ParseConstants.OR_ARRAY.length - 1;
            reduce(operatorStack, filterStack, ParseConstants.OR_BUFFER);
            operatorStack.push(ParseConstants.OR_BUFFER);
        } else if (checkForAnd(filterStringAsByteArray, i)) {
            // AND found
            i += ParseConstants.AND_ARRAY.length - 1;
            reduce(operatorStack, filterStack, ParseConstants.AND_BUFFER);
            operatorStack.push(ParseConstants.AND_BUFFER);
        } else if (checkForSkip(filterStringAsByteArray, i)) {
            // SKIP found
            i += ParseConstants.SKIP_ARRAY.length - 1;
            reduce(operatorStack, filterStack, ParseConstants.SKIP_BUFFER);
            operatorStack.push(ParseConstants.SKIP_BUFFER);
        } else if (checkForWhile(filterStringAsByteArray, i)) {
            // WHILE found
            i += ParseConstants.WHILE_ARRAY.length - 1;
            reduce(operatorStack, filterStack, ParseConstants.WHILE_BUFFER);
            operatorStack.push(ParseConstants.WHILE_BUFFER);
        } else if (filterStringAsByteArray[i] == ParseConstants.RPAREN) {
            // RPAREN found
            if (operatorStack.empty()) {
                throw new IllegalArgumentException("Mismatched parenthesis");
            }
            ByteBuffer argumentOnTopOfStack = operatorStack.peek();
            while (!(argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER))) {
                filterStack.push(popArguments(operatorStack, filterStack));
                if (operatorStack.empty()) {
                    throw new IllegalArgumentException("Mismatched parenthesis");
                }
                argumentOnTopOfStack = operatorStack.pop();
            }
        } else {
            // SimpleFilterExpression found
            byte[] filterSimpleExpression = extractFilterSimpleExpression(filterStringAsByteArray, i);
            i += (filterSimpleExpression.length - 1);
            filter = parseSimpleFilterExpression(filterSimpleExpression);
            filterStack.push(filter);
        }
    }

    // Finished parsing filterString
    while (!operatorStack.empty()) {
        filterStack.push(popArguments(operatorStack, filterStack));
    }
    filter = filterStack.pop();
    if (!filterStack.empty()) {
        throw new IllegalArgumentException("Incorrect Filter String");
    }
    return filter;
}

From source file:org.apache.hadoop.hbase.filter.ParseFilter.java

/**
 * Pops an argument from the operator stack and the number of arguments required by the operator
 * from the filterStack and evaluates them
 * <p>//from ww  w .  j a v a2 s.c o  m
 * @param operatorStack the stack containing the operators
 * @param filterStack the stack containing the filters
 * @return the evaluated filter
 */
public static Filter popArguments(Stack<ByteBuffer> operatorStack, Stack<Filter> filterStack) {
    ByteBuffer argumentOnTopOfStack = operatorStack.peek();

    if (argumentOnTopOfStack.equals(ParseConstants.OR_BUFFER)) {
        // The top of the stack is an OR
        try {
            ArrayList<Filter> listOfFilters = new ArrayList<Filter>();
            while (!operatorStack.empty() && operatorStack.peek().equals(ParseConstants.OR_BUFFER)) {
                Filter filter = filterStack.pop();
                listOfFilters.add(0, filter);
                operatorStack.pop();
            }
            Filter filter = filterStack.pop();
            listOfFilters.add(0, filter);
            Filter orFilter = new FilterList(FilterList.Operator.MUST_PASS_ONE, listOfFilters);
            return orFilter;
        } catch (EmptyStackException e) {
            throw new IllegalArgumentException("Incorrect input string - an OR needs two filters");
        }

    } else if (argumentOnTopOfStack.equals(ParseConstants.AND_BUFFER)) {
        // The top of the stack is an AND
        try {
            ArrayList<Filter> listOfFilters = new ArrayList<Filter>();
            while (!operatorStack.empty() && operatorStack.peek().equals(ParseConstants.AND_BUFFER)) {
                Filter filter = filterStack.pop();
                listOfFilters.add(0, filter);
                operatorStack.pop();
            }
            Filter filter = filterStack.pop();
            listOfFilters.add(0, filter);
            Filter andFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, listOfFilters);
            return andFilter;
        } catch (EmptyStackException e) {
            throw new IllegalArgumentException("Incorrect input string - an AND needs two filters");
        }

    } else if (argumentOnTopOfStack.equals(ParseConstants.SKIP_BUFFER)) {
        // The top of the stack is a SKIP
        try {
            Filter wrappedFilter = filterStack.pop();
            Filter skipFilter = new SkipFilter(wrappedFilter);
            operatorStack.pop();
            return skipFilter;
        } catch (EmptyStackException e) {
            throw new IllegalArgumentException("Incorrect input string - a SKIP wraps a filter");
        }

    } else if (argumentOnTopOfStack.equals(ParseConstants.WHILE_BUFFER)) {
        // The top of the stack is a WHILE
        try {
            Filter wrappedFilter = filterStack.pop();
            Filter whileMatchFilter = new WhileMatchFilter(wrappedFilter);
            operatorStack.pop();
            return whileMatchFilter;
        } catch (EmptyStackException e) {
            throw new IllegalArgumentException("Incorrect input string - a WHILE wraps a filter");
        }

    } else if (argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER)) {
        // The top of the stack is a LPAREN
        try {
            Filter filter = filterStack.pop();
            operatorStack.pop();
            return filter;
        } catch (EmptyStackException e) {
            throw new IllegalArgumentException("Incorrect Filter String");
        }

    } else {
        throw new IllegalArgumentException("Incorrect arguments on operatorStack");
    }
}

From source file:org.apache.hadoop.hbase.filter.ParseFilter.java

/**
 * Takes a compareOperator symbol as a byte array and returns the corresponding CompareOperator
 * <p>//from  w ww.jav a  2  s  . c o  m
 * @param compareOpAsByteArray the comparatorOperator symbol as a byte array
 * @return the Compare Operator
 */
public static CompareFilter.CompareOp createCompareOp(byte[] compareOpAsByteArray) {
    ByteBuffer compareOp = ByteBuffer.wrap(compareOpAsByteArray);
    if (compareOp.equals(ParseConstants.LESS_THAN_BUFFER))
        return CompareOp.LESS;
    else if (compareOp.equals(ParseConstants.LESS_THAN_OR_EQUAL_TO_BUFFER))
        return CompareOp.LESS_OR_EQUAL;
    else if (compareOp.equals(ParseConstants.GREATER_THAN_BUFFER))
        return CompareOp.GREATER;
    else if (compareOp.equals(ParseConstants.GREATER_THAN_OR_EQUAL_TO_BUFFER))
        return CompareOp.GREATER_OR_EQUAL;
    else if (compareOp.equals(ParseConstants.NOT_EQUAL_TO_BUFFER))
        return CompareOp.NOT_EQUAL;
    else if (compareOp.equals(ParseConstants.EQUAL_TO_BUFFER))
        return CompareOp.EQUAL;
    else
        throw new IllegalArgumentException("Invalid compare operator");
}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlock.java

static void assertBuffersEqual(ByteBuffer expectedBuffer, ByteBuffer actualBuffer,
        Compression.Algorithm compression, DataBlockEncoding encoding, boolean pread) {
    if (!actualBuffer.equals(expectedBuffer)) {
        int prefix = 0;
        int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit());
        while (prefix < minLimit && expectedBuffer.get(prefix) == actualBuffer.get(prefix)) {
            prefix++;//  w w  w  .ja  v a  2s.c om
        }

        fail(String.format(
                "Content mismath for compression %s, encoding %s, "
                        + "pread %s, commonPrefix %d, expected %s, got %s",
                compression, encoding, pread, prefix, nextBytesToStr(expectedBuffer, prefix),
                nextBytesToStr(actualBuffer, prefix)));
    }
}

From source file:org.apache.hadoop.hdfs.tools.offlineEditsViewer.TestOfflineEditsViewer.java

/**
 * Compare two files, ignore trailing zeros at the end, for edits log the
 * trailing zeros do not make any difference, throw exception is the files are
 * not same//from w w  w.  ja v a  2s  .  c  o m
 *
 * @param filenameSmall first file to compare (doesn't have to be smaller)
 * @param filenameLarge second file to compare (doesn't have to be larger)
 */
private boolean filesEqualIgnoreTrailingZeros(String filenameSmall, String filenameLarge) throws IOException {

    ByteBuffer small = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameSmall));
    ByteBuffer large = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameLarge));
    // OEV outputs with the latest layout version, so tweak the old file's
    // contents to have latest version so checkedin binary files don't
    // require frequent updates
    small.put(3, (byte) NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);

    // now correct if it's otherwise
    if (small.capacity() > large.capacity()) {
        ByteBuffer tmpByteBuffer = small;
        small = large;
        large = tmpByteBuffer;
        String tmpFilename = filenameSmall;
        filenameSmall = filenameLarge;
        filenameLarge = tmpFilename;
    }

    // compare from 0 to capacity of small
    // the rest of the large should be all zeros
    small.position(0);
    small.limit(small.capacity());
    large.position(0);
    large.limit(small.capacity());

    // compares position to limit
    if (!small.equals(large)) {
        return false;
    }

    // everything after limit should be 0xFF
    int i = large.limit();
    large.clear();
    for (; i < large.capacity(); i++) {
        if (large.get(i) != FSEditLogOpCodes.OP_INVALID.getOpCode()) {
            return false;
        }
    }

    return true;
}