Example usage for org.apache.commons.lang3.tuple MutablePair getLeft

List of usage examples for org.apache.commons.lang3.tuple MutablePair getLeft

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple MutablePair getLeft.

Prototype

@Override
public L getLeft() 

Source Link

Usage

From source file:org.apache.giraph.block_app.reducers.array.ArrayReduce.java

/**
 * Registers one new reducer, that will reduce array of objects,
 * by reducing individual elements using {@code elementReduceOp}.
 *
 * This function will return ReducerArrayHandle to it, by which
 * individual elements can be manipulated separately.
 *
 * @param fixedSize Number of elements//from  ww  w . java 2s  . c  o  m
 * @param elementReduceOp ReduceOperation for individual elements
 * @param createFunction Function for creating a reducer
 * @return Created ReducerArrayHandle
 */
public static <S, T extends Writable> ReducerArrayHandle<S, T> createArrayHandles(final int fixedSize,
        ReduceOperation<S, T> elementReduceOp, CreateReducerFunctionApi createFunction) {
    final ReducerHandle<Pair<IntRef, S>, ArrayWritable<T>> reduceHandle = createFunction
            .createReducer(new ArrayReduce<>(fixedSize, elementReduceOp));

    final IntRef curIndex = new IntRef(0);
    final MutablePair<IntRef, S> reusablePair = MutablePair.of(new IntRef(0), null);
    final ReducerHandle<S, T> elementReduceHandle = new ReducerHandle<S, T>() {
        @Override
        public T getReducedValue(MasterGlobalCommUsage master) {
            ArrayWritable<T> result = reduceHandle.getReducedValue(master);
            return result.get()[curIndex.value];
        }

        @Override
        public void reduce(S valueToReduce) {
            reusablePair.getLeft().value = curIndex.value;
            reusablePair.setRight(valueToReduce);
            reduceHandle.reduce(reusablePair);
        }

        @Override
        public BroadcastHandle<T> broadcastValue(BlockMasterApi master) {
            throw new UnsupportedOperationException();
        }
    };

    return new ReducerArrayHandle<S, T>() {
        @Override
        public ReducerHandle<S, T> get(int index) {
            curIndex.value = index;
            return elementReduceHandle;
        }

        @Override
        public int getStaticSize() {
            return fixedSize;
        }

        @Override
        public int getReducedSize(BlockMasterApi master) {
            return getStaticSize();
        }

        @Override
        public BroadcastArrayHandle<T> broadcastValue(BlockMasterApi master) {
            final BroadcastHandle<ArrayWritable<T>> broadcastHandle = reduceHandle.broadcastValue(master);
            final IntRef curIndex = new IntRef(0);
            final BroadcastHandle<T> elementBroadcastHandle = new BroadcastHandle<T>() {
                @Override
                public T getBroadcast(WorkerBroadcastUsage worker) {
                    ArrayWritable<T> result = broadcastHandle.getBroadcast(worker);
                    return result.get()[curIndex.value];
                }
            };
            return new BroadcastArrayHandle<T>() {
                @Override
                public BroadcastHandle<T> get(int index) {
                    curIndex.value = index;
                    return elementBroadcastHandle;
                }

                @Override
                public int getStaticSize() {
                    return fixedSize;
                }

                @Override
                public int getBroadcastedSize(WorkerBroadcastUsage worker) {
                    return getStaticSize();
                }
            };
        }
    };
}

From source file:org.apache.giraph.block_app.reducers.array.BasicArrayReduce.java

/**
 * Registers one new reducer, that will reduce BasicArray,
 * by reducing individual elements using {@code elementReduceOp},
 * with predefined size.// w w  w. ja va 2s.com
 *
 * This function will return ReducerArrayHandle, by which
 * individual elements can be manipulated separately.
 *
 * @param fixedSize Number of elements
 * @param typeOps TypeOps of individual elements
 * @param elementReduceOp ReduceOperation for individual elements
 * @param createFunction Function for creating a reducer
 * @return Created ReducerArrayHandle
 */
public static <S, R extends Writable> ReducerArrayHandle<S, R> createArrayHandles(final int fixedSize,
        final PrimitiveTypeOps<R> typeOps, ReduceOperation<S, R> elementReduceOp,
        CreateReducerFunctionApi createFunction) {
    final ReducerHandle<Pair<IntRef, S>, WArrayList<R>> reduceHandle = createFunction
            .createReducer(new BasicArrayReduce<>(fixedSize, typeOps, elementReduceOp));
    final IntRef curIndex = new IntRef(0);
    final R reusableValue = typeOps.create();
    final R initialValue = elementReduceOp.createInitialValue();
    final MutablePair<IntRef, S> reusablePair = MutablePair.of(new IntRef(0), null);
    final ReducerHandle<S, R> elementReduceHandle = new ReducerHandle<S, R>() {
        @Override
        public R getReducedValue(MasterGlobalCommUsage master) {
            WArrayList<R> result = reduceHandle.getReducedValue(master);
            if (fixedSize == -1 && curIndex.value >= result.size()) {
                typeOps.set(reusableValue, initialValue);
            } else {
                result.getIntoW(curIndex.value, reusableValue);
            }
            return reusableValue;
        }

        @Override
        public void reduce(S valueToReduce) {
            reusablePair.getLeft().value = curIndex.value;
            reusablePair.setRight(valueToReduce);
            reduceHandle.reduce(reusablePair);
        }

        @Override
        public BroadcastHandle<R> broadcastValue(BlockMasterApi master) {
            throw new UnsupportedOperationException();
        }
    };

    return new ReducerArrayHandle<S, R>() {
        @Override
        public ReducerHandle<S, R> get(int index) {
            curIndex.value = index;
            return elementReduceHandle;
        }

        @Override
        public int getStaticSize() {
            if (fixedSize == -1) {
                throw new UnsupportedOperationException("Cannot call size, when one is not specified upfront");
            }
            return fixedSize;
        }

        @Override
        public int getReducedSize(BlockMasterApi master) {
            return reduceHandle.getReducedValue(master).size();
        }

        @Override
        public BroadcastArrayHandle<R> broadcastValue(BlockMasterApi master) {
            final BroadcastHandle<WArrayList<R>> broadcastHandle = reduceHandle.broadcastValue(master);
            final IntRef curIndex = new IntRef(0);
            final R reusableValue = typeOps.create();
            final BroadcastHandle<R> elementBroadcastHandle = new BroadcastHandle<R>() {
                @Override
                public R getBroadcast(WorkerBroadcastUsage worker) {
                    WArrayList<R> result = broadcastHandle.getBroadcast(worker);
                    if (fixedSize == -1 && curIndex.value >= result.size()) {
                        typeOps.set(reusableValue, initialValue);
                    } else {
                        result.getIntoW(curIndex.value, reusableValue);
                    }
                    return reusableValue;
                }
            };
            return new BroadcastArrayHandle<R>() {
                @Override
                public BroadcastHandle<R> get(int index) {
                    curIndex.value = index;
                    return elementBroadcastHandle;
                }

                @Override
                public int getStaticSize() {
                    if (fixedSize == -1) {
                        throw new UnsupportedOperationException(
                                "Cannot call size, when one is not specified upfront");
                    }
                    return fixedSize;
                }

                @Override
                public int getBroadcastedSize(WorkerBroadcastUsage worker) {
                    return broadcastHandle.getBroadcast(worker).size();
                }
            };
        }
    };
}

From source file:org.apache.giraph.comm.flow_control.CreditBasedFlowControl.java

@Override
public void messageAckReceived(int taskId, long requestId, int response) {
    boolean ignoreCredit = shouldIgnoreCredit(response);
    short credit = getCredit(response);
    int timestamp = getTimestamp(response);
    MutablePair<AdjustableSemaphore, Integer> pair = (MutablePair<AdjustableSemaphore, Integer>) perWorkerOpenRequestMap
            .get(taskId);/*  w  w w  . j  a v  a 2  s .c  o  m*/
    AdjustableSemaphore openRequestPermit = pair.getLeft();
    // Release a permit on open requests if we received ACK of a request other
    // than a Resume request (resume requests are always sent regardless of
    // number of open requests)
    if (!resumeRequestsId.get(taskId).remove(requestId)) {
        openRequestPermit.release();
    } else if (LOG.isDebugEnabled()) {
        LOG.debug("messageAckReceived: ACK of resume received from " + taskId + " timestamp=" + timestamp);
    }
    if (!ignoreCredit) {
        synchronized (pair) {
            if (compareTimestamps(timestamp, pair.getRight()) > 0) {
                pair.setRight(timestamp);
                openRequestPermit.setMaxPermits(credit);
            } else if (LOG.isDebugEnabled()) {
                LOG.debug("messageAckReceived: received out-of-order messages." + "Received timestamp="
                        + timestamp + " and current timestamp=" + pair.getRight());
            }
        }
    }
    // Since we received a response and we changed the credit of the sender
    // client, we may be able to send some more requests to the sender
    // client. So, we try to send as much request as we can to the sender
    // client.
    trySendCachedRequests(taskId);
}

From source file:org.apache.giraph.comm.flow_control.CreditBasedFlowControl.java

/**
 * Process a resume signal came from a given worker
 *
 * @param clientId id of the worker that sent the signal
 * @param credit the credit value sent along with the resume signal
 * @param requestId timestamp (request id) of the resume signal
 *//*w  w w.j  a v a2  s .c o  m*/
public void processResumeSignal(int clientId, short credit, long requestId) {
    int timestamp = (int) (requestId & 0xFFFF);
    if (LOG.isDebugEnabled()) {
        LOG.debug("processResumeSignal: resume signal from " + clientId + " with timestamp=" + timestamp);
    }
    MutablePair<AdjustableSemaphore, Integer> pair = (MutablePair<AdjustableSemaphore, Integer>) perWorkerOpenRequestMap
            .get(clientId);
    synchronized (pair) {
        if (compareTimestamps(timestamp, pair.getRight()) > 0) {
            pair.setRight(timestamp);
            pair.getLeft().setMaxPermits(credit);
        } else if (LOG.isDebugEnabled()) {
            LOG.debug("processResumeSignal: received out-of-order messages. " + "Received timestamp="
                    + timestamp + " and current timestamp=" + pair.getRight());
        }
    }
    trySendCachedRequests(clientId);
}

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

@Override
public <M extends Writable> void addPartitionIncomingMessages(int partitionId, VertexIdMessages<I, M> messages)
        throws IOException {
    if (conf.getIncomingMessageClasses().useMessageCombiner()) {
        ((MessageStore<I, M>) incomingMessageStore).addPartitionMessages(partitionId, messages);
    } else {/*w  ww  .j  a v  a 2s. c o m*/
        MetaPartition meta = partitions.get(partitionId);
        checkNotNull(meta, "addPartitionIncomingMessages: trying to add " + "messages to partition "
                + partitionId + " which does not exist " + "in the partition set of this worker!");

        synchronized (meta) {
            switch (meta.getState()) {
            case INACTIVE:
            case ACTIVE:
                // A partition might be in memory, but its message store might still
                // be on disk. This happens because while we are loading the partition
                // to memory, we only load its current messages, not the incoming
                // messages. If a new superstep has been started, while the partition
                // is still in memory, the incoming message store in the previous
                // superstep (which is the current messages in the current superstep)
                // is on disk.
                // This may also happen when a partition is offloaded to disk while
                // it was unprocessed, and then again loaded in the same superstep for
                // processing.
                Boolean isMsgOnDisk = incomingMessagesOnDisk.get(partitionId);
                if (isMsgOnDisk == null || !isMsgOnDisk) {
                    ((MessageStore<I, M>) incomingMessageStore).addPartitionMessages(partitionId, messages);
                    break;
                }
                // Continue to IN_TRANSIT and ON_DISK cases as the partition is in
                // memory, but it's messages are not yet loaded
                // CHECKSTYLE: stop FallThrough
            case IN_TRANSIT:
            case ON_DISK:
                // CHECKSTYLE: resume FallThrough
                List<VertexIdMessages<I, Writable>> newMessages = new ArrayList<VertexIdMessages<I, Writable>>();
                newMessages.add((VertexIdMessages<I, Writable>) messages);
                int length = messages.getSerializedSize();
                Pair<Integer, List<VertexIdMessages<I, Writable>>> newPair = new MutablePair<>(length,
                        newMessages);
                messageBufferRWLock.readLock().lock();
                Pair<Integer, List<VertexIdMessages<I, Writable>>> oldPair = pendingIncomingMessages
                        .putIfAbsent(partitionId, newPair);
                if (oldPair != null) {
                    synchronized (oldPair) {
                        MutablePair<Integer, List<VertexIdMessages<I, Writable>>> pair = (MutablePair<Integer, List<VertexIdMessages<I, Writable>>>) oldPair;
                        pair.setLeft(pair.getLeft() + length);
                        pair.getRight().add((VertexIdMessages<I, Writable>) messages);
                    }
                }
                messageBufferRWLock.readLock().unlock();
                // In the case that the number of partitions is asked to be fixed by
                // the user, we should offload the message buffers as necessary.
                if (isNumPartitionsFixed && pendingIncomingMessages.get(partitionId).getLeft() > minBuffSize) {
                    try {
                        spillPartitionMessages(partitionId);
                    } catch (IOException e) {
                        throw new IllegalStateException("addPartitionIncomingMessages: "
                                + "spilling message buffers for partition " + partitionId + " failed!");
                    }
                }
                break;
            default:
                throw new IllegalStateException("addPartitionIncomingMessages: " + "illegal state "
                        + meta.getState() + " for partition " + meta.getId());
            }
        }
    }
}

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings("SF_SWITCH_FALLTHROUGH")
public void addPartitionEdges(Integer partitionId, VertexIdEdges<I, E> edges) {
    if (!isInitialized.get()) {
        initialize();/*  w  ww. j a  va 2 s  .co m*/
    }

    MetaPartition meta = new MetaPartition(partitionId);
    MetaPartition temp = partitions.putIfAbsent(partitionId, meta);
    if (temp != null) {
        meta = temp;
    }

    boolean createPartition = false;
    synchronized (meta) {
        switch (meta.getState()) {
        case INIT:
            Partition<I, V, E> partition = conf.createPartition(partitionId, context);
            meta.setPartition(partition);
            // This is set to processed so that in the very next iteration cycle,
            // when startIteration is called, all partitions seem to be processed
            // and ready for the next iteration cycle. Otherwise, startIteration
            // fails in its sanity check due to finding an unprocessed partition.
            meta.setProcessed(true);
            numPartitionsInMem.getAndIncrement();
            meta.setState(State.INACTIVE);
            synchronized (processedPartitions) {
                processedPartitions.get(State.INACTIVE).add(partitionId);
                processedPartitions.notifyAll();
            }
            createPartition = true;
            // Continue to INACTIVE case to add the edges to the partition
            // CHECKSTYLE: stop FallThrough
        case INACTIVE:
            // CHECKSTYLE: resume FallThrough
            edgeStore.addPartitionEdges(partitionId, edges);
            break;
        case IN_TRANSIT:
        case ON_DISK:
            // Adding edges to in-memory buffer of the partition
            List<VertexIdEdges<I, E>> newEdges = new ArrayList<VertexIdEdges<I, E>>();
            newEdges.add(edges);
            int length = edges.getSerializedSize();
            Pair<Integer, List<VertexIdEdges<I, E>>> newPair = new MutablePair<>(length, newEdges);
            edgeBufferRWLock.readLock().lock();
            Pair<Integer, List<VertexIdEdges<I, E>>> oldPair = pendingInputEdges.putIfAbsent(partitionId,
                    newPair);
            if (oldPair != null) {
                synchronized (oldPair) {
                    MutablePair<Integer, List<VertexIdEdges<I, E>>> pair = (MutablePair<Integer, List<VertexIdEdges<I, E>>>) oldPair;
                    pair.setLeft(pair.getLeft() + length);
                    pair.getRight().add(edges);
                }
            }
            edgeBufferRWLock.readLock().unlock();
            // In the case that the number of partitions is asked to be fixed by the
            // user, we should offload the edge store as necessary.
            if (isNumPartitionsFixed && pendingInputEdges.get(partitionId).getLeft() > minBuffSize) {
                try {
                    spillPartitionInputEdgeStore(partitionId);
                } catch (IOException e) {
                    throw new IllegalStateException("addPartitionEdges: spilling " + "edge store for partition "
                            + partitionId + " failed!");
                }
            }
            break;
        default:
            throw new IllegalStateException(
                    "illegal state " + meta.getState() + " for partition " + meta.getId());
        }
    }
    // If creation of a new partition is violating the policy of maximum number
    // of partitions in memory, we should spill a partition to disk.
    if (createPartition && numPartitionsInMem.get() > maxPartitionsInMem.get()) {
        swapOnePartitionToDisk();
    }
}

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings("SF_SWITCH_FALLTHROUGH")
public void addPartitionVertices(Integer partitionId, ExtendedDataOutput extendedDataOutput) {
    if (!isInitialized.get()) {
        initialize();/*  w  ww.ja v a 2 s .c o m*/
    }

    MetaPartition meta = new MetaPartition(partitionId);
    MetaPartition temp = partitions.putIfAbsent(partitionId, meta);
    if (temp != null) {
        meta = temp;
    }

    boolean createPartition = false;
    synchronized (meta) {
        switch (meta.getState()) {
        case INIT:
            Partition<I, V, E> partition = conf.createPartition(partitionId, context);
            meta.setPartition(partition);
            // Look at the comments in 'addPartitionVertices' for why we set the
            // this to true.
            meta.setProcessed(true);
            numPartitionsInMem.getAndIncrement();
            meta.setState(State.INACTIVE);
            synchronized (processedPartitions) {
                processedPartitions.get(State.INACTIVE).add(partitionId);
                processedPartitions.notifyAll();
            }
            createPartition = true;
            // Continue to INACTIVE case to add the vertices to the partition
            // CHECKSTYLE: stop FallThrough
        case INACTIVE:
            // CHECKSTYLE: resume FallThrough
            meta.getPartition().addPartitionVertices(new VertexIterator<I, V, E>(extendedDataOutput, conf));
            break;
        case IN_TRANSIT:
        case ON_DISK:
            // Adding vertices to in-memory buffer of the partition
            List<ExtendedDataOutput> vertices = new ArrayList<ExtendedDataOutput>();
            vertices.add(extendedDataOutput);
            int length = extendedDataOutput.getPos();
            Pair<Integer, List<ExtendedDataOutput>> newPair = new MutablePair<>(length, vertices);
            vertexBufferRWLock.readLock().lock();
            Pair<Integer, List<ExtendedDataOutput>> oldPair = pendingInputVertices.putIfAbsent(partitionId,
                    newPair);
            if (oldPair != null) {
                synchronized (oldPair) {
                    MutablePair<Integer, List<ExtendedDataOutput>> pair = (MutablePair<Integer, List<ExtendedDataOutput>>) oldPair;
                    pair.setLeft(pair.getLeft() + length);
                    pair.getRight().add(extendedDataOutput);
                }
            }
            vertexBufferRWLock.readLock().unlock();
            // In the case that the number of partitions is asked to be fixed by the
            // user, we should offload the edge store as necessary.
            if (isNumPartitionsFixed && pendingInputVertices.get(partitionId).getLeft() > minBuffSize) {
                try {
                    spillPartitionInputVertexBuffer(partitionId);
                } catch (IOException e) {
                    throw new IllegalStateException("addPartitionVertices: spilling "
                            + "vertex buffer for partition " + partitionId + " failed!");
                }
            }
            break;
        default:
            throw new IllegalStateException(
                    "illegal state " + meta.getState() + " for partition " + meta.getId());
        }
    }
    // If creation of a new partition is violating the policy of maximum number
    // of partitions in memory, we should spill a partition to disk.
    if (createPartition && numPartitionsInMem.get() > maxPartitionsInMem.get()) {
        swapOnePartitionToDisk();
    }
}

From source file:org.apache.hyracks.storage.am.common.TreeIndexTestUtils.java

protected void addFilterField(IIndexTestContext ctx, MutablePair<ITupleReference, ITupleReference> minMax)
        throws HyracksDataException {
    //Duplicate the PK field as a filter field at the end of the tuple to be inserted.
    int filterField = ctx.getFieldCount();
    ITupleReference currTuple = ctx.getTuple();
    ArrayTupleBuilder filterBuilder = new ArrayTupleBuilder(1);
    filterBuilder.addField(currTuple.getFieldData(filterField), currTuple.getFieldStart(filterField),
            currTuple.getFieldLength(filterField));
    IBinaryComparator comparator = ctx.getComparatorFactories()[0].createBinaryComparator();
    ArrayTupleReference filterOnlyTuple = new ArrayTupleReference();
    filterOnlyTuple.reset(filterBuilder.getFieldEndOffsets(), filterBuilder.getByteArray());
    if (minMax == null) {
        minMax = MutablePair.of(filterOnlyTuple, filterOnlyTuple);
    } else if (compareFilterTuples(minMax.getLeft(), filterOnlyTuple, comparator) > 0) {
        minMax.setLeft(filterOnlyTuple);
    } else if (compareFilterTuples(minMax.getRight(), filterOnlyTuple, comparator) < 0) {
        minMax.setRight(filterOnlyTuple);
    }/*  www . j  ava2 s  .c o m*/
}

From source file:org.apache.hyracks.storage.am.lsm.btree.LSMBTreeFilterMergeTestDriver.java

@Override
protected void runTest(ISerializerDeserializer[] fieldSerdes, int numKeys, BTreeLeafFrameType leafType,
        ITupleReference lowKey, ITupleReference highKey, ITupleReference prefixLowKey,
        ITupleReference prefixHighKey) throws Exception {
    OrderedIndexTestContext ctx = createTestContext(fieldSerdes, numKeys, leafType, true);
    ctx.getIndex().create();//from w  w  w . ja  v a2  s.c  o m
    ctx.getIndex().activate();
    // Start off with one tree bulk loaded.
    // We assume all fieldSerdes are of the same type. Check the first one
    // to determine which field types to generate.
    if (fieldSerdes[0] instanceof IntegerSerializerDeserializer) {
        orderedIndexTestUtils.bulkLoadIntTuples(ctx, numTuplesToInsert, true, getRandom());
    } else if (fieldSerdes[0] instanceof UTF8StringSerializerDeserializer) {
        orderedIndexTestUtils.bulkLoadStringTuples(ctx, numTuplesToInsert, true, getRandom());
    }

    int maxTreesToMerge = AccessMethodTestsConfig.LSM_BTREE_MAX_TREES_TO_MERGE;
    ILSMIndexAccessor accessor = (ILSMIndexAccessor) ctx.getIndexAccessor();
    IBinaryComparator comp = ctx.getComparatorFactories()[0].createBinaryComparator();
    for (int i = 0; i < maxTreesToMerge; i++) {
        int flushed = 0;
        for (; flushed < i; flushed++) {
            Pair<ITupleReference, ITupleReference> minMax = null;
            if (fieldSerdes[0] instanceof IntegerSerializerDeserializer) {
                minMax = orderedIndexTestUtils.insertIntTuples(ctx, numTuplesToInsert, true, getRandom());
            } else {
                minMax = orderedIndexTestUtils.insertStringTuples(ctx, numTuplesToInsert, true, getRandom());
            }
            if (minMax != null) {
                ILSMComponentFilter f = ((LSMBTree) ctx.getIndex()).getCurrentMemoryComponent()
                        .getLSMComponentFilter();
                Pair<ITupleReference, ITupleReference> obsMinMax = filterToMinMax(f);
                Assert.assertEquals(0,
                        TreeIndexTestUtils.compareFilterTuples(obsMinMax.getLeft(), minMax.getLeft(), comp));
                Assert.assertEquals(0,
                        TreeIndexTestUtils.compareFilterTuples(obsMinMax.getRight(), minMax.getRight(), comp));
            }

            StubIOOperationCallback stub = new StubIOOperationCallback();
            BlockingIOOperationCallbackWrapper waiter = new BlockingIOOperationCallbackWrapper(stub);
            accessor.scheduleFlush(waiter);
            waiter.waitForIO();
            if (minMax != null) {
                Pair<ITupleReference, ITupleReference> obsMinMax = filterToMinMax(
                        stub.getLastNewComponent().getLSMComponentFilter());
                Assert.assertEquals(0,
                        TreeIndexTestUtils.compareFilterTuples(obsMinMax.getLeft(), minMax.getLeft(), comp));
                Assert.assertEquals(0,
                        TreeIndexTestUtils.compareFilterTuples(obsMinMax.getRight(), minMax.getRight(), comp));
            }
        }

        List<ILSMDiskComponent> flushedComponents = ((LSMBTree) ctx.getIndex()).getImmutableComponents();
        MutablePair<ITupleReference, ITupleReference> expectedMergeMinMax = null;
        for (ILSMDiskComponent f : flushedComponents) {
            Pair<ITupleReference, ITupleReference> componentMinMax = filterToMinMax(f.getLSMComponentFilter());
            if (expectedMergeMinMax == null) {
                expectedMergeMinMax = MutablePair.of(componentMinMax.getLeft(), componentMinMax.getRight());
            }
            if (TreeIndexTestUtils.compareFilterTuples(expectedMergeMinMax.getLeft(), componentMinMax.getLeft(),
                    comp) > 0) {
                expectedMergeMinMax.setLeft(componentMinMax.getLeft());
            }
            if (TreeIndexTestUtils.compareFilterTuples(expectedMergeMinMax.getRight(),
                    componentMinMax.getRight(), comp) < 0) {
                expectedMergeMinMax.setRight(componentMinMax.getRight());
            }
        }
        accessor.scheduleMerge(NoOpIOOperationCallback.INSTANCE,
                ((LSMBTree) ctx.getIndex()).getImmutableComponents());

        flushedComponents = ((LSMBTree) ctx.getIndex()).getImmutableComponents();
        Pair<ITupleReference, ITupleReference> mergedMinMax = filterToMinMax(
                flushedComponents.get(0).getLSMComponentFilter());
        Assert.assertEquals(0, TreeIndexTestUtils.compareFilterTuples(expectedMergeMinMax.getLeft(),
                mergedMinMax.getLeft(), comp));
        Assert.assertEquals(0, TreeIndexTestUtils.compareFilterTuples(expectedMergeMinMax.getRight(),
                mergedMinMax.getRight(), comp));

        orderedIndexTestUtils.checkPointSearches(ctx);
        orderedIndexTestUtils.checkScan(ctx);
        orderedIndexTestUtils.checkDiskOrderScan(ctx);
        orderedIndexTestUtils.checkRangeSearch(ctx, lowKey, highKey, true, true);
        if (prefixLowKey != null && prefixHighKey != null) {
            orderedIndexTestUtils.checkRangeSearch(ctx, prefixLowKey, prefixHighKey, true, true);
        }
    }
    ctx.getIndex().deactivate();
    ctx.getIndex().destroy();
}

From source file:org.diorite.config.serialization.comments.CommentsNodeImpl.java

public Map<String, MutablePair<String, CommentsNodeImpl>> copyMap(CommentsNodeImpl parent) {
    Map<String, MutablePair<String, CommentsNodeImpl>> result = new HashMap<>(this.dataMap.size());
    for (Entry<String, MutablePair<String, CommentsNodeImpl>> entry : this.dataMap.entrySet()) {
        String keyToCpy = entry.getKey();
        MutablePair<String, CommentsNodeImpl> valueToCpy = entry.getValue();
        CommentsNodeImpl nodeToCpy = valueToCpy.getRight();
        CommentsNodeImpl copiedNode;/*from w w w. ja v  a  2s.  co m*/
        if (nodeToCpy == null) {
            copiedNode = null;
        } else {
            copiedNode = new CommentsNodeImpl(parent);
            copiedNode.dataMap.putAll(nodeToCpy.copyMap(parent));
        }
        MutablePair<String, CommentsNodeImpl> copied = new MutablePair<>(valueToCpy.getLeft(), copiedNode);
        result.put(keyToCpy, copied);
    }
    return result;
}