Example usage for org.apache.commons.lang3.tuple MutablePair setLeft

List of usage examples for org.apache.commons.lang3.tuple MutablePair setLeft

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple MutablePair setLeft.

Prototype

public void setLeft(final L left) 

Source Link

Document

Sets the left element of the pair.

Usage

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

@Override
public <M extends Writable> void addPartitionIncomingMessages(int partitionId, VertexIdMessages<I, M> messages)
        throws IOException {
    if (conf.getIncomingMessageClasses().useMessageCombiner()) {
        ((MessageStore<I, M>) incomingMessageStore).addPartitionMessages(partitionId, messages);
    } else {//ww  w  . j a va 2  s .  c om
        MetaPartition meta = partitions.get(partitionId);
        checkNotNull(meta, "addPartitionIncomingMessages: trying to add " + "messages to partition "
                + partitionId + " which does not exist " + "in the partition set of this worker!");

        synchronized (meta) {
            switch (meta.getState()) {
            case INACTIVE:
            case ACTIVE:
                // A partition might be in memory, but its message store might still
                // be on disk. This happens because while we are loading the partition
                // to memory, we only load its current messages, not the incoming
                // messages. If a new superstep has been started, while the partition
                // is still in memory, the incoming message store in the previous
                // superstep (which is the current messages in the current superstep)
                // is on disk.
                // This may also happen when a partition is offloaded to disk while
                // it was unprocessed, and then again loaded in the same superstep for
                // processing.
                Boolean isMsgOnDisk = incomingMessagesOnDisk.get(partitionId);
                if (isMsgOnDisk == null || !isMsgOnDisk) {
                    ((MessageStore<I, M>) incomingMessageStore).addPartitionMessages(partitionId, messages);
                    break;
                }
                // Continue to IN_TRANSIT and ON_DISK cases as the partition is in
                // memory, but it's messages are not yet loaded
                // CHECKSTYLE: stop FallThrough
            case IN_TRANSIT:
            case ON_DISK:
                // CHECKSTYLE: resume FallThrough
                List<VertexIdMessages<I, Writable>> newMessages = new ArrayList<VertexIdMessages<I, Writable>>();
                newMessages.add((VertexIdMessages<I, Writable>) messages);
                int length = messages.getSerializedSize();
                Pair<Integer, List<VertexIdMessages<I, Writable>>> newPair = new MutablePair<>(length,
                        newMessages);
                messageBufferRWLock.readLock().lock();
                Pair<Integer, List<VertexIdMessages<I, Writable>>> oldPair = pendingIncomingMessages
                        .putIfAbsent(partitionId, newPair);
                if (oldPair != null) {
                    synchronized (oldPair) {
                        MutablePair<Integer, List<VertexIdMessages<I, Writable>>> pair = (MutablePair<Integer, List<VertexIdMessages<I, Writable>>>) oldPair;
                        pair.setLeft(pair.getLeft() + length);
                        pair.getRight().add((VertexIdMessages<I, Writable>) messages);
                    }
                }
                messageBufferRWLock.readLock().unlock();
                // In the case that the number of partitions is asked to be fixed by
                // the user, we should offload the message buffers as necessary.
                if (isNumPartitionsFixed && pendingIncomingMessages.get(partitionId).getLeft() > minBuffSize) {
                    try {
                        spillPartitionMessages(partitionId);
                    } catch (IOException e) {
                        throw new IllegalStateException("addPartitionIncomingMessages: "
                                + "spilling message buffers for partition " + partitionId + " failed!");
                    }
                }
                break;
            default:
                throw new IllegalStateException("addPartitionIncomingMessages: " + "illegal state "
                        + meta.getState() + " for partition " + meta.getId());
            }
        }
    }
}

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings("SF_SWITCH_FALLTHROUGH")
public void addPartitionEdges(Integer partitionId, VertexIdEdges<I, E> edges) {
    if (!isInitialized.get()) {
        initialize();/*from  w w w.j av  a 2s. c o m*/
    }

    MetaPartition meta = new MetaPartition(partitionId);
    MetaPartition temp = partitions.putIfAbsent(partitionId, meta);
    if (temp != null) {
        meta = temp;
    }

    boolean createPartition = false;
    synchronized (meta) {
        switch (meta.getState()) {
        case INIT:
            Partition<I, V, E> partition = conf.createPartition(partitionId, context);
            meta.setPartition(partition);
            // This is set to processed so that in the very next iteration cycle,
            // when startIteration is called, all partitions seem to be processed
            // and ready for the next iteration cycle. Otherwise, startIteration
            // fails in its sanity check due to finding an unprocessed partition.
            meta.setProcessed(true);
            numPartitionsInMem.getAndIncrement();
            meta.setState(State.INACTIVE);
            synchronized (processedPartitions) {
                processedPartitions.get(State.INACTIVE).add(partitionId);
                processedPartitions.notifyAll();
            }
            createPartition = true;
            // Continue to INACTIVE case to add the edges to the partition
            // CHECKSTYLE: stop FallThrough
        case INACTIVE:
            // CHECKSTYLE: resume FallThrough
            edgeStore.addPartitionEdges(partitionId, edges);
            break;
        case IN_TRANSIT:
        case ON_DISK:
            // Adding edges to in-memory buffer of the partition
            List<VertexIdEdges<I, E>> newEdges = new ArrayList<VertexIdEdges<I, E>>();
            newEdges.add(edges);
            int length = edges.getSerializedSize();
            Pair<Integer, List<VertexIdEdges<I, E>>> newPair = new MutablePair<>(length, newEdges);
            edgeBufferRWLock.readLock().lock();
            Pair<Integer, List<VertexIdEdges<I, E>>> oldPair = pendingInputEdges.putIfAbsent(partitionId,
                    newPair);
            if (oldPair != null) {
                synchronized (oldPair) {
                    MutablePair<Integer, List<VertexIdEdges<I, E>>> pair = (MutablePair<Integer, List<VertexIdEdges<I, E>>>) oldPair;
                    pair.setLeft(pair.getLeft() + length);
                    pair.getRight().add(edges);
                }
            }
            edgeBufferRWLock.readLock().unlock();
            // In the case that the number of partitions is asked to be fixed by the
            // user, we should offload the edge store as necessary.
            if (isNumPartitionsFixed && pendingInputEdges.get(partitionId).getLeft() > minBuffSize) {
                try {
                    spillPartitionInputEdgeStore(partitionId);
                } catch (IOException e) {
                    throw new IllegalStateException("addPartitionEdges: spilling " + "edge store for partition "
                            + partitionId + " failed!");
                }
            }
            break;
        default:
            throw new IllegalStateException(
                    "illegal state " + meta.getState() + " for partition " + meta.getId());
        }
    }
    // If creation of a new partition is violating the policy of maximum number
    // of partitions in memory, we should spill a partition to disk.
    if (createPartition && numPartitionsInMem.get() > maxPartitionsInMem.get()) {
        swapOnePartitionToDisk();
    }
}

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings("SF_SWITCH_FALLTHROUGH")
public void addPartitionVertices(Integer partitionId, ExtendedDataOutput extendedDataOutput) {
    if (!isInitialized.get()) {
        initialize();//from   w  ww  .j a v  a 2 s  .  c o  m
    }

    MetaPartition meta = new MetaPartition(partitionId);
    MetaPartition temp = partitions.putIfAbsent(partitionId, meta);
    if (temp != null) {
        meta = temp;
    }

    boolean createPartition = false;
    synchronized (meta) {
        switch (meta.getState()) {
        case INIT:
            Partition<I, V, E> partition = conf.createPartition(partitionId, context);
            meta.setPartition(partition);
            // Look at the comments in 'addPartitionVertices' for why we set the
            // this to true.
            meta.setProcessed(true);
            numPartitionsInMem.getAndIncrement();
            meta.setState(State.INACTIVE);
            synchronized (processedPartitions) {
                processedPartitions.get(State.INACTIVE).add(partitionId);
                processedPartitions.notifyAll();
            }
            createPartition = true;
            // Continue to INACTIVE case to add the vertices to the partition
            // CHECKSTYLE: stop FallThrough
        case INACTIVE:
            // CHECKSTYLE: resume FallThrough
            meta.getPartition().addPartitionVertices(new VertexIterator<I, V, E>(extendedDataOutput, conf));
            break;
        case IN_TRANSIT:
        case ON_DISK:
            // Adding vertices to in-memory buffer of the partition
            List<ExtendedDataOutput> vertices = new ArrayList<ExtendedDataOutput>();
            vertices.add(extendedDataOutput);
            int length = extendedDataOutput.getPos();
            Pair<Integer, List<ExtendedDataOutput>> newPair = new MutablePair<>(length, vertices);
            vertexBufferRWLock.readLock().lock();
            Pair<Integer, List<ExtendedDataOutput>> oldPair = pendingInputVertices.putIfAbsent(partitionId,
                    newPair);
            if (oldPair != null) {
                synchronized (oldPair) {
                    MutablePair<Integer, List<ExtendedDataOutput>> pair = (MutablePair<Integer, List<ExtendedDataOutput>>) oldPair;
                    pair.setLeft(pair.getLeft() + length);
                    pair.getRight().add(extendedDataOutput);
                }
            }
            vertexBufferRWLock.readLock().unlock();
            // In the case that the number of partitions is asked to be fixed by the
            // user, we should offload the edge store as necessary.
            if (isNumPartitionsFixed && pendingInputVertices.get(partitionId).getLeft() > minBuffSize) {
                try {
                    spillPartitionInputVertexBuffer(partitionId);
                } catch (IOException e) {
                    throw new IllegalStateException("addPartitionVertices: spilling "
                            + "vertex buffer for partition " + partitionId + " failed!");
                }
            }
            break;
        default:
            throw new IllegalStateException(
                    "illegal state " + meta.getState() + " for partition " + meta.getId());
        }
    }
    // If creation of a new partition is violating the policy of maximum number
    // of partitions in memory, we should spill a partition to disk.
    if (createPartition && numPartitionsInMem.get() > maxPartitionsInMem.get()) {
        swapOnePartitionToDisk();
    }
}

From source file:org.apache.hyracks.storage.am.common.TreeIndexTestUtils.java

protected void addFilterField(IIndexTestContext ctx, MutablePair<ITupleReference, ITupleReference> minMax)
        throws HyracksDataException {
    //Duplicate the PK field as a filter field at the end of the tuple to be inserted.
    int filterField = ctx.getFieldCount();
    ITupleReference currTuple = ctx.getTuple();
    ArrayTupleBuilder filterBuilder = new ArrayTupleBuilder(1);
    filterBuilder.addField(currTuple.getFieldData(filterField), currTuple.getFieldStart(filterField),
            currTuple.getFieldLength(filterField));
    IBinaryComparator comparator = ctx.getComparatorFactories()[0].createBinaryComparator();
    ArrayTupleReference filterOnlyTuple = new ArrayTupleReference();
    filterOnlyTuple.reset(filterBuilder.getFieldEndOffsets(), filterBuilder.getByteArray());
    if (minMax == null) {
        minMax = MutablePair.of(filterOnlyTuple, filterOnlyTuple);
    } else if (compareFilterTuples(minMax.getLeft(), filterOnlyTuple, comparator) > 0) {
        minMax.setLeft(filterOnlyTuple);
    } else if (compareFilterTuples(minMax.getRight(), filterOnlyTuple, comparator) < 0) {
        minMax.setRight(filterOnlyTuple);
    }//from  www  .j av a 2  s .c  om
}

From source file:org.apache.hyracks.storage.am.lsm.btree.LSMBTreeFilterMergeTestDriver.java

@Override
protected void runTest(ISerializerDeserializer[] fieldSerdes, int numKeys, BTreeLeafFrameType leafType,
        ITupleReference lowKey, ITupleReference highKey, ITupleReference prefixLowKey,
        ITupleReference prefixHighKey) throws Exception {
    OrderedIndexTestContext ctx = createTestContext(fieldSerdes, numKeys, leafType, true);
    ctx.getIndex().create();//from w  w  w .ja  va 2s  .c  om
    ctx.getIndex().activate();
    // Start off with one tree bulk loaded.
    // We assume all fieldSerdes are of the same type. Check the first one
    // to determine which field types to generate.
    if (fieldSerdes[0] instanceof IntegerSerializerDeserializer) {
        orderedIndexTestUtils.bulkLoadIntTuples(ctx, numTuplesToInsert, true, getRandom());
    } else if (fieldSerdes[0] instanceof UTF8StringSerializerDeserializer) {
        orderedIndexTestUtils.bulkLoadStringTuples(ctx, numTuplesToInsert, true, getRandom());
    }

    int maxTreesToMerge = AccessMethodTestsConfig.LSM_BTREE_MAX_TREES_TO_MERGE;
    ILSMIndexAccessor accessor = (ILSMIndexAccessor) ctx.getIndexAccessor();
    IBinaryComparator comp = ctx.getComparatorFactories()[0].createBinaryComparator();
    for (int i = 0; i < maxTreesToMerge; i++) {
        int flushed = 0;
        for (; flushed < i; flushed++) {
            Pair<ITupleReference, ITupleReference> minMax = null;
            if (fieldSerdes[0] instanceof IntegerSerializerDeserializer) {
                minMax = orderedIndexTestUtils.insertIntTuples(ctx, numTuplesToInsert, true, getRandom());
            } else {
                minMax = orderedIndexTestUtils.insertStringTuples(ctx, numTuplesToInsert, true, getRandom());
            }
            if (minMax != null) {
                ILSMComponentFilter f = ((LSMBTree) ctx.getIndex()).getCurrentMemoryComponent()
                        .getLSMComponentFilter();
                Pair<ITupleReference, ITupleReference> obsMinMax = filterToMinMax(f);
                Assert.assertEquals(0,
                        TreeIndexTestUtils.compareFilterTuples(obsMinMax.getLeft(), minMax.getLeft(), comp));
                Assert.assertEquals(0,
                        TreeIndexTestUtils.compareFilterTuples(obsMinMax.getRight(), minMax.getRight(), comp));
            }

            StubIOOperationCallback stub = new StubIOOperationCallback();
            BlockingIOOperationCallbackWrapper waiter = new BlockingIOOperationCallbackWrapper(stub);
            accessor.scheduleFlush(waiter);
            waiter.waitForIO();
            if (minMax != null) {
                Pair<ITupleReference, ITupleReference> obsMinMax = filterToMinMax(
                        stub.getLastNewComponent().getLSMComponentFilter());
                Assert.assertEquals(0,
                        TreeIndexTestUtils.compareFilterTuples(obsMinMax.getLeft(), minMax.getLeft(), comp));
                Assert.assertEquals(0,
                        TreeIndexTestUtils.compareFilterTuples(obsMinMax.getRight(), minMax.getRight(), comp));
            }
        }

        List<ILSMDiskComponent> flushedComponents = ((LSMBTree) ctx.getIndex()).getImmutableComponents();
        MutablePair<ITupleReference, ITupleReference> expectedMergeMinMax = null;
        for (ILSMDiskComponent f : flushedComponents) {
            Pair<ITupleReference, ITupleReference> componentMinMax = filterToMinMax(f.getLSMComponentFilter());
            if (expectedMergeMinMax == null) {
                expectedMergeMinMax = MutablePair.of(componentMinMax.getLeft(), componentMinMax.getRight());
            }
            if (TreeIndexTestUtils.compareFilterTuples(expectedMergeMinMax.getLeft(), componentMinMax.getLeft(),
                    comp) > 0) {
                expectedMergeMinMax.setLeft(componentMinMax.getLeft());
            }
            if (TreeIndexTestUtils.compareFilterTuples(expectedMergeMinMax.getRight(),
                    componentMinMax.getRight(), comp) < 0) {
                expectedMergeMinMax.setRight(componentMinMax.getRight());
            }
        }
        accessor.scheduleMerge(NoOpIOOperationCallback.INSTANCE,
                ((LSMBTree) ctx.getIndex()).getImmutableComponents());

        flushedComponents = ((LSMBTree) ctx.getIndex()).getImmutableComponents();
        Pair<ITupleReference, ITupleReference> mergedMinMax = filterToMinMax(
                flushedComponents.get(0).getLSMComponentFilter());
        Assert.assertEquals(0, TreeIndexTestUtils.compareFilterTuples(expectedMergeMinMax.getLeft(),
                mergedMinMax.getLeft(), comp));
        Assert.assertEquals(0, TreeIndexTestUtils.compareFilterTuples(expectedMergeMinMax.getRight(),
                mergedMinMax.getRight(), comp));

        orderedIndexTestUtils.checkPointSearches(ctx);
        orderedIndexTestUtils.checkScan(ctx);
        orderedIndexTestUtils.checkDiskOrderScan(ctx);
        orderedIndexTestUtils.checkRangeSearch(ctx, lowKey, highKey, true, true);
        if (prefixLowKey != null && prefixHighKey != null) {
            orderedIndexTestUtils.checkRangeSearch(ctx, prefixLowKey, prefixHighKey, true, true);
        }
    }
    ctx.getIndex().deactivate();
    ctx.getIndex().destroy();
}

From source file:org.apache.pulsar.broker.service.Consumer.java

/**
 * Dispatch a list of entries to the consumer. <br/>
 * <b>It is also responsible to release entries data and recycle entries object.</b>
 *
 * @return a promise that can be use to track when all the data has been written into the socket
 *//*  w w w  . java  2s .com*/
public Pair<ChannelPromise, Integer> sendMessages(final List<Entry> entries) {
    final ChannelHandlerContext ctx = cnx.ctx();
    final MutablePair<ChannelPromise, Integer> sentMessages = new MutablePair<ChannelPromise, Integer>();
    final ChannelPromise writePromise = ctx.newPromise();
    sentMessages.setLeft(writePromise);
    if (entries.isEmpty()) {
        if (log.isDebugEnabled()) {
            log.debug("[{}] List of messages is empty, triggering write future immediately for consumerId {}",
                    subscription, consumerId);
        }
        writePromise.setSuccess();
        sentMessages.setRight(0);
        return sentMessages;
    }

    try {
        sentMessages.setRight(updatePermitsAndPendingAcks(entries));
    } catch (PulsarServerException pe) {
        log.warn("[{}] [{}] consumer doesn't support batch-message {}", subscription, consumerId,
                cnx.getRemoteEndpointProtocolVersion());

        subscription.markTopicWithBatchMessagePublished();
        sentMessages.setRight(0);
        // disconnect consumer: it will update dispatcher's availablePermits and resend pendingAck-messages of this
        // consumer to other consumer
        disconnect();
        return sentMessages;
    }

    ctx.channel().eventLoop().execute(() -> {
        for (int i = 0; i < entries.size(); i++) {
            Entry entry = entries.get(i);
            PositionImpl pos = (PositionImpl) entry.getPosition();
            MessageIdData.Builder messageIdBuilder = MessageIdData.newBuilder();
            MessageIdData messageId = messageIdBuilder.setLedgerId(pos.getLedgerId())
                    .setEntryId(pos.getEntryId()).build();

            ByteBuf metadataAndPayload = entry.getDataBuffer();
            // increment ref-count of data and release at the end of process: so, we can get chance to call entry.release
            metadataAndPayload.retain();
            // skip checksum by incrementing reader-index if consumer-client doesn't support checksum verification
            if (cnx.getRemoteEndpointProtocolVersion() < ProtocolVersion.v6.getNumber()) {
                readChecksum(metadataAndPayload);
            }

            if (log.isDebugEnabled()) {
                log.debug("[{}] Sending message to consumerId {}, entry id {}", subscription, consumerId,
                        pos.getEntryId());
            }

            // We only want to pass the "real" promise on the last entry written
            ChannelPromise promise = ctx.voidPromise();
            if (i == (entries.size() - 1)) {
                promise = writePromise;
            }
            ctx.write(Commands.newMessage(consumerId, messageId, metadataAndPayload), promise);
            messageId.recycle();
            messageIdBuilder.recycle();
            entry.release();
        }

        ctx.flush();
    });

    return sentMessages;
}

From source file:org.diorite.config.serialization.comments.CommentsNodeImpl.java

@Override
public void setComment(String path, @Nullable String comment) {
    MutablePair<String, CommentsNodeImpl> nodePair = this.dataMap.computeIfAbsent(path,
            k -> new MutablePair<>(null, null));
    if (comment == null) {
        nodePair.setLeft(null);
        return;//from  w  w w. ja v a 2 s.  c  o m
    }
    nodePair.setLeft(comment);
}

From source file:org.jsweet.input.typescriptdef.ast.Scanner.java

protected Pair<TypeDeclaration, FunctionDeclaration> findSuperMethod(TypeDeclaration declaringType,
        FunctionDeclaration method) {//from  w  w  w  .j a v a 2  s. com
    MutablePair<TypeDeclaration, FunctionDeclaration> superMethodInfos = new MutablePair<>();
    applyToSuperMethod(declaringType, method, (superType, superMethod) -> {
        superMethodInfos.setLeft(superType);
        superMethodInfos.setRight(superMethod);
    });

    return superMethodInfos.getRight() == null ? null : superMethodInfos;
}

From source file:org.opendaylight.openflowplugin.impl.util.BarrierUtil.java

/**
 * chain a barrier message - regardless of previous result and use given {@link Function} to combine
 * original result and barrier result//from  w  w w .j a  va 2  s.  c  om
 *
 * @param <T>                type of input future
 * @param input              future to chain barrier to
 * @param nodeRef            target device
 * @param transactionService barrier service
 * @param compositeTransform
 * @return future holding both results (input and of the barrier)
 */
public static <T> ListenableFuture<RpcResult<T>> chainBarrier(final ListenableFuture<RpcResult<T>> input,
        final NodeRef nodeRef, final FlowCapableTransactionService transactionService,
        final Function<Pair<RpcResult<T>, RpcResult<Void>>, RpcResult<T>> compositeTransform) {
    final MutablePair<RpcResult<T>, RpcResult<Void>> resultPair = new MutablePair<>();

    // store input result and append barrier
    final ListenableFuture<RpcResult<Void>> barrierResult = Futures.transform(input,
            new AsyncFunction<RpcResult<T>, RpcResult<Void>>() {
                @Override
                public ListenableFuture<RpcResult<Void>> apply(@Nullable final RpcResult<T> interInput)
                        throws Exception {
                    resultPair.setLeft(interInput);
                    final SendBarrierInput barrierInput = createSendBarrierInput(nodeRef);
                    return JdkFutureAdapters.listenInPoolThread(transactionService.sendBarrier(barrierInput));
                }
            });
    // store barrier result and return initiated pair
    final ListenableFuture<Pair<RpcResult<T>, RpcResult<Void>>> compositeResult = Futures
            .transform(barrierResult, new Function<RpcResult<Void>, Pair<RpcResult<T>, RpcResult<Void>>>() {
                @Nullable
                @Override
                public Pair<RpcResult<T>, RpcResult<Void>> apply(@Nullable final RpcResult<Void> input) {
                    resultPair.setRight(input);
                    return resultPair;
                }
            });
    // append assembling transform to barrier result
    return Futures.transform(compositeResult, compositeTransform);
}

From source file:org.openlmis.fulfillment.Resource2Db.java

Pair<List<String>, List<Object[]>> resourceCsvToBatchedPair(final Resource resource) throws IOException {
    XLOGGER.entry(resource.getDescription());

    // parse CSV//from   w ww. j av  a2 s  . c o  m
    try (InputStreamReader isReader = new InputStreamReader(
            new BOMInputStream(resource.getInputStream(), ByteOrderMark.UTF_8))) {
        CSVParser parser = CSVFormat.DEFAULT.withHeader().withNullString("").parse(isReader);

        // read header row
        MutablePair<List<String>, List<Object[]>> readData = new MutablePair<>();
        readData.setLeft(new ArrayList<>(parser.getHeaderMap().keySet()));
        XLOGGER.info("Read header: " + readData.getLeft());

        // read data rows
        List<Object[]> rows = new ArrayList<>();
        for (CSVRecord record : parser.getRecords()) {
            if (!record.isConsistent()) {
                throw new IllegalArgumentException("CSV record inconsistent: " + record);
            }

            List theRow = IteratorUtils.toList(record.iterator());
            rows.add(theRow.toArray());
        }
        readData.setRight(rows);

        XLOGGER.exit("Records read: " + readData.getRight().size());
        return readData;
    }
}