Example usage for org.apache.commons.lang3.tuple MutablePair MutablePair

List of usage examples for org.apache.commons.lang3.tuple MutablePair MutablePair

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple MutablePair MutablePair.

Prototype

public MutablePair(final L left, final R right) 

Source Link

Document

Create a new pair instance.

Usage

From source file:org.amanzi.awe.catalog.neo.listeners.NeoCatalogListener.java

/**
 * Returns Pair, that contains necessary layer and model
 * /* w ww  .  ja  v a2s .  c  o m*/
 * @param map map
 * @param gis model
 * @return layer or null
 */
private Pair<IGISModel, ILayer> getLayerModelPair(final IMap map, final IGISModel gis) {
    final Pair<IGISModel, ILayer> resultPair = new MutablePair<IGISModel, ILayer>(gis, null);
    try {
        for (final ILayer layer : map.getMapLayers()) {
            final IGeoResource resource = layer.findGeoResource(IGISModel.class);
            if (resource == null) {
                continue;
            }
            final IGISModel resolvedElement = resource.resolve(IGISModel.class, null);
            if (resolvedElement.getName().equals(gis.getName())) {
                // clear previous selected elements
                resultPair.setValue(layer);
                return resultPair;
            }
        }
        return resultPair;
    } catch (final IOException e) {
        LOGGER.error("Error on computing Model->Layer pair", e);
        return resultPair;
    }
}

From source file:org.apache.apex.malhar.contrib.kinesis.AbstractKinesisInputOperator.java

/**
 * Implement InputOperator Interface.// w ww  . j  av  a2  s . c o m
 */
@Override
public void emitTuples() {
    if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
        return;
    }
    int count = consumer.getQueueSize();
    if (maxTuplesPerWindow > 0) {
        count = Math.min(count, maxTuplesPerWindow - emitCount);
    }
    for (int i = 0; i < count; i++) {
        Pair<String, Record> data = consumer.pollRecord();
        String shardId = data.getFirst();
        String recordId = data.getSecond().getSequenceNumber();
        emitTuple(data);
        MutablePair<String, Integer> shardOffsetAndCount = currentWindowRecoveryState.get(shardId);
        if (shardOffsetAndCount == null) {
            currentWindowRecoveryState.put(shardId, new MutablePair<String, Integer>(recordId, 1));
        } else {
            shardOffsetAndCount.setRight(shardOffsetAndCount.right + 1);
        }
        shardPosition.put(shardId, recordId);
    }
    emitCount += count;
}

From source file:org.apache.apex.malhar.kafka.AbstractKafkaInputOperator.java

@Override
public void endWindow() {
    // copy current offset track to history memory
    Map<AbstractKafkaPartitioner.PartitionMeta, Long> offsetsWithWindow = new HashMap<>(offsetTrack);
    offsetHistory.add(Pair.of(currentWindowId, offsetsWithWindow));

    //update metrics
    metrics.updateMetrics(clusters, consumerWrapper.getAllConsumerMetrics());

    //update the windowDataManager
    if (isIdempotent()) {
        try {//from www . j a  va2s. c o m
            Map<AbstractKafkaPartitioner.PartitionMeta, Pair<Long, Long>> windowData = new HashMap<>();
            for (Map.Entry<AbstractKafkaPartitioner.PartitionMeta, Long> e : windowStartOffset.entrySet()) {
                windowData.put(e.getKey(),
                        new MutablePair<>(e.getValue(), offsetTrack.get(e.getKey()) - e.getValue()));
            }
            windowDataManager.save(windowData, currentWindowId);
        } catch (IOException e) {
            DTThrowable.rethrow(e);
        }
    }
}

From source file:org.apache.apex.malhar.lib.db.jdbc.AbstractJdbcPollInputOperator.java

/**
 * Execute the query and transfer results to the emit queue.
 * @param preparedStatement PreparedStatement to execute the query and fetch results.
 *///from  ww w  . j a v a 2s .  c om
protected int insertDbDataInQueue(PreparedStatement preparedStatement)
        throws SQLException, InterruptedException {
    int resultCount = 0;
    preparedStatement.setFetchSize(getFetchSize());
    ResultSet result = preparedStatement.executeQuery();
    while (execute && result.next()) {
        T obj = getTuple(result);
        if (obj == null) {
            continue;
        }
        while (execute && !emitQueue.offer(obj)) {
            Thread.sleep(DEFAULT_SLEEP_TIME);
        }
        if (isPollerPartition && rebaseOffset) {
            if (prevKey == null) {
                prevKey = extractKey(result);
            } else if (this.fetchedKeyAndOffset.get() == null) {
                // track key change
                Object nextKey = extractKey(result);
                if (!nextKey.equals(prevKey)) {
                    // new key, ready for rebase (WHERE key > ?)
                    fetchedKeyAndOffset.set(new MutablePair<>(prevKey, lastOffset + resultCount));
                }
            }
        }
        resultCount++;
    }
    result.close();
    preparedStatement.close();
    return resultCount;
}

From source file:org.apache.apex.malhar.lib.window.accumulation.Average.java

@Override
public MutablePair<Double, Long> defaultAccumulatedValue() {
    return new MutablePair<>(0.0, 0L);
}

From source file:org.apache.giraph.comm.flow_control.CreditBasedFlowControl.java

@Override
public void sendRequest(int destTaskId, WritableRequest request) {
    Pair<AdjustableSemaphore, Integer> pair = perWorkerOpenRequestMap.get(destTaskId);
    // Check if this is the first time sending a request to a worker. If so, we
    // should the worker id to necessary bookkeeping data structure.
    if (pair == null) {
        pair = new MutablePair<>(new AdjustableSemaphore(maxOpenRequestsPerWorker), -1);
        Pair<AdjustableSemaphore, Integer> temp = perWorkerOpenRequestMap.putIfAbsent(destTaskId, pair);
        perWorkerUnsentRequestMap.putIfAbsent(destTaskId, new ArrayDeque<WritableRequest>());
        resumeRequestsId.putIfAbsent(destTaskId, Sets.<Long>newConcurrentHashSet());
        if (temp != null) {
            pair = temp;//from   w  ww  .j  a  v  a  2  s. c o  m
        }
    }
    AdjustableSemaphore openRequestPermit = pair.getLeft();
    // Try to reserve a spot for the request amongst the open requests of
    // the destination worker.
    boolean shouldSend = openRequestPermit.tryAcquire();
    boolean shouldCache = false;
    while (!shouldSend) {
        // We should not send the request, and should cache the request instead.
        // It may be possible that the unsent message cache is also full, so we
        // should try to acquire a space on the cache, and if there is no extra
        // space in unsent request cache, we should wait until some space
        // become available. However, it is possible that during the time we are
        // waiting on the unsent messages cache, actual buffer for open requests
        // frees up space.
        try {
            shouldCache = unsentRequestPermit.tryAcquire(unsentWaitMsecs, TimeUnit.MILLISECONDS);
        } catch (InterruptedException e) {
            throw new IllegalStateException(
                    "shouldSend: failed " + "while waiting on the unsent request cache to have some more "
                            + "room for extra unsent requests!");
        }
        if (shouldCache) {
            break;
        }
        // We may have an open spot in the meantime that we were waiting on the
        // unsent requests.
        shouldSend = openRequestPermit.tryAcquire();
        if (shouldSend) {
            break;
        }
        // The current thread will be at this point only if it could not make
        // space amongst open requests for the destination worker and has been
        // timed-out in trying to acquire a space amongst unsent messages. So,
        // we should report logs, report progress, and check for request
        // failures.
        nettyClient.logAndSanityCheck();
    }
    // Either shouldSend == true or shouldCache == true
    if (shouldCache) {
        Deque<WritableRequest> unsentRequests = perWorkerUnsentRequestMap.get(destTaskId);
        // This synchronize block is necessary for the following reason:
        // Once we are at this point, it means there was no room for this
        // request to become an open request, hence we have to put it into
        // unsent cache. Consider the case that since last time we checked if
        // there is any room for an additional open request so far, all open
        // requests are delivered and their acknowledgements are also processed.
        // Now, if we put this request in the unsent cache, it is not being
        // considered to become an open request, as the only one who checks
        // on this matter would be the one who receives an acknowledgment for an
        // open request for the destination worker. So, a lock is necessary
        // to forcefully serialize the execution if this scenario is about to
        // happen.
        synchronized (unsentRequests) {
            shouldSend = openRequestPermit.tryAcquire();
            if (!shouldSend) {
                aggregateUnsentRequests.getAndIncrement();
                unsentRequests.add(request);
                return;
            }
        }
        // We found a spot amongst open requests to send this request. So, this
        // request won't be cached anymore.
        unsentRequestPermit.release();
    }
    nettyClient.doSend(destTaskId, request);
}

From source file:org.apache.giraph.ooc.data.DiskBackedDataStore.java

/**
 * Adds a data entry for a given partition to the current data store. If data
 * of a given partition in data store is already offloaded to disk, adds the
 * data entry to appropriate raw data buffer list.
 *
 * @param partitionId id of the partition to add the data entry to
 * @param entry data entry to add//ww  w .j  a  v  a2  s  .c o  m
 */
protected void addEntry(int partitionId, T entry) {
    // Addition of data entries to a data store is much more common than
    // out-of-core operations. Besides, in-memory data store implementations
    // existing in the code base already account for parallel addition to data
    // stores. Therefore, using read lock would optimize for parallel addition
    // to data stores, specially for cases where the addition should happen for
    // partitions that are entirely in memory.
    ReadWriteLock rwLock = getPartitionLock(partitionId);
    rwLock.readLock().lock();
    if (hasPartitionDataOnDisk.contains(partitionId)) {
        List<T> entryList = new ArrayList<>();
        entryList.add(entry);
        int entrySize = entrySerializedSize(entry);
        MutablePair<Integer, List<T>> newPair = new MutablePair<>(entrySize, entryList);
        Pair<Integer, List<T>> oldPair = dataBuffers.putIfAbsent(partitionId, newPair);
        if (oldPair != null) {
            synchronized (oldPair) {
                newPair = (MutablePair<Integer, List<T>>) oldPair;
                newPair.setLeft(oldPair.getLeft() + entrySize);
                newPair.getRight().add(entry);
            }
        }
    } else {
        addEntryToInMemoryPartitionData(partitionId, entry);
    }
    rwLock.readLock().unlock();
}

From source file:org.apache.giraph.ooc.data.OutOfCoreDataManager.java

/**
 * Adds a data entry for a given partition to the current data store. If data
 * of a given partition in data store is already offloaded to disk, adds the
 * data entry to appropriate raw data buffer list.
 *
 * @param partitionId id of the partition to add the data entry to
 * @param entry data entry to add//from w  ww  .  j a  v a2s  .  c  o m
 */
protected void addEntry(int partitionId, T entry) {
    // Addition of data entries to a data store is much more common than
    // out-of-core operations. Besides, in-memory data store implementations
    // existing in the code base already account for parallel addition to data
    // stores. Therefore, using read lock would optimize for parallel addition
    // to data stores, specially for cases where the addition should happen for
    // partitions that are entirely in memory.
    ReadWriteLock rwLock = getPartitionLock(partitionId);
    rwLock.readLock().lock();
    if (hasPartitionDataOnDisk.contains(partitionId)) {
        List<T> entryList = new ArrayList<>();
        entryList.add(entry);
        int entrySize = entrySerializedSize(entry);
        MutablePair<Integer, List<T>> newPair = new MutablePair<>(entrySize, entryList);
        Pair<Integer, List<T>> oldPair = dataBuffers.putIfAbsent(partitionId, newPair);
        if (oldPair != null) {
            synchronized (oldPair) {
                newPair = (MutablePair<Integer, List<T>>) oldPair;
                newPair.setLeft(oldPair.getLeft() + entrySize);
                newPair.getRight().add(entry);
            }
        }
    } else {
        addEntryToImMemoryPartitionData(partitionId, entry);
    }
    rwLock.readLock().unlock();
}

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

@Override
public <M extends Writable> void addPartitionIncomingMessages(int partitionId, VertexIdMessages<I, M> messages)
        throws IOException {
    if (conf.getIncomingMessageClasses().useMessageCombiner()) {
        ((MessageStore<I, M>) incomingMessageStore).addPartitionMessages(partitionId, messages);
    } else {//  w  w  w. j a va2s. com
        MetaPartition meta = partitions.get(partitionId);
        checkNotNull(meta, "addPartitionIncomingMessages: trying to add " + "messages to partition "
                + partitionId + " which does not exist " + "in the partition set of this worker!");

        synchronized (meta) {
            switch (meta.getState()) {
            case INACTIVE:
            case ACTIVE:
                // A partition might be in memory, but its message store might still
                // be on disk. This happens because while we are loading the partition
                // to memory, we only load its current messages, not the incoming
                // messages. If a new superstep has been started, while the partition
                // is still in memory, the incoming message store in the previous
                // superstep (which is the current messages in the current superstep)
                // is on disk.
                // This may also happen when a partition is offloaded to disk while
                // it was unprocessed, and then again loaded in the same superstep for
                // processing.
                Boolean isMsgOnDisk = incomingMessagesOnDisk.get(partitionId);
                if (isMsgOnDisk == null || !isMsgOnDisk) {
                    ((MessageStore<I, M>) incomingMessageStore).addPartitionMessages(partitionId, messages);
                    break;
                }
                // Continue to IN_TRANSIT and ON_DISK cases as the partition is in
                // memory, but it's messages are not yet loaded
                // CHECKSTYLE: stop FallThrough
            case IN_TRANSIT:
            case ON_DISK:
                // CHECKSTYLE: resume FallThrough
                List<VertexIdMessages<I, Writable>> newMessages = new ArrayList<VertexIdMessages<I, Writable>>();
                newMessages.add((VertexIdMessages<I, Writable>) messages);
                int length = messages.getSerializedSize();
                Pair<Integer, List<VertexIdMessages<I, Writable>>> newPair = new MutablePair<>(length,
                        newMessages);
                messageBufferRWLock.readLock().lock();
                Pair<Integer, List<VertexIdMessages<I, Writable>>> oldPair = pendingIncomingMessages
                        .putIfAbsent(partitionId, newPair);
                if (oldPair != null) {
                    synchronized (oldPair) {
                        MutablePair<Integer, List<VertexIdMessages<I, Writable>>> pair = (MutablePair<Integer, List<VertexIdMessages<I, Writable>>>) oldPair;
                        pair.setLeft(pair.getLeft() + length);
                        pair.getRight().add((VertexIdMessages<I, Writable>) messages);
                    }
                }
                messageBufferRWLock.readLock().unlock();
                // In the case that the number of partitions is asked to be fixed by
                // the user, we should offload the message buffers as necessary.
                if (isNumPartitionsFixed && pendingIncomingMessages.get(partitionId).getLeft() > minBuffSize) {
                    try {
                        spillPartitionMessages(partitionId);
                    } catch (IOException e) {
                        throw new IllegalStateException("addPartitionIncomingMessages: "
                                + "spilling message buffers for partition " + partitionId + " failed!");
                    }
                }
                break;
            default:
                throw new IllegalStateException("addPartitionIncomingMessages: " + "illegal state "
                        + meta.getState() + " for partition " + meta.getId());
            }
        }
    }
}

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings("SF_SWITCH_FALLTHROUGH")
public void addPartitionEdges(Integer partitionId, VertexIdEdges<I, E> edges) {
    if (!isInitialized.get()) {
        initialize();/*from  w w w  .j  a  va 2  s . c  om*/
    }

    MetaPartition meta = new MetaPartition(partitionId);
    MetaPartition temp = partitions.putIfAbsent(partitionId, meta);
    if (temp != null) {
        meta = temp;
    }

    boolean createPartition = false;
    synchronized (meta) {
        switch (meta.getState()) {
        case INIT:
            Partition<I, V, E> partition = conf.createPartition(partitionId, context);
            meta.setPartition(partition);
            // This is set to processed so that in the very next iteration cycle,
            // when startIteration is called, all partitions seem to be processed
            // and ready for the next iteration cycle. Otherwise, startIteration
            // fails in its sanity check due to finding an unprocessed partition.
            meta.setProcessed(true);
            numPartitionsInMem.getAndIncrement();
            meta.setState(State.INACTIVE);
            synchronized (processedPartitions) {
                processedPartitions.get(State.INACTIVE).add(partitionId);
                processedPartitions.notifyAll();
            }
            createPartition = true;
            // Continue to INACTIVE case to add the edges to the partition
            // CHECKSTYLE: stop FallThrough
        case INACTIVE:
            // CHECKSTYLE: resume FallThrough
            edgeStore.addPartitionEdges(partitionId, edges);
            break;
        case IN_TRANSIT:
        case ON_DISK:
            // Adding edges to in-memory buffer of the partition
            List<VertexIdEdges<I, E>> newEdges = new ArrayList<VertexIdEdges<I, E>>();
            newEdges.add(edges);
            int length = edges.getSerializedSize();
            Pair<Integer, List<VertexIdEdges<I, E>>> newPair = new MutablePair<>(length, newEdges);
            edgeBufferRWLock.readLock().lock();
            Pair<Integer, List<VertexIdEdges<I, E>>> oldPair = pendingInputEdges.putIfAbsent(partitionId,
                    newPair);
            if (oldPair != null) {
                synchronized (oldPair) {
                    MutablePair<Integer, List<VertexIdEdges<I, E>>> pair = (MutablePair<Integer, List<VertexIdEdges<I, E>>>) oldPair;
                    pair.setLeft(pair.getLeft() + length);
                    pair.getRight().add(edges);
                }
            }
            edgeBufferRWLock.readLock().unlock();
            // In the case that the number of partitions is asked to be fixed by the
            // user, we should offload the edge store as necessary.
            if (isNumPartitionsFixed && pendingInputEdges.get(partitionId).getLeft() > minBuffSize) {
                try {
                    spillPartitionInputEdgeStore(partitionId);
                } catch (IOException e) {
                    throw new IllegalStateException("addPartitionEdges: spilling " + "edge store for partition "
                            + partitionId + " failed!");
                }
            }
            break;
        default:
            throw new IllegalStateException(
                    "illegal state " + meta.getState() + " for partition " + meta.getId());
        }
    }
    // If creation of a new partition is violating the policy of maximum number
    // of partitions in memory, we should spill a partition to disk.
    if (createPartition && numPartitionsInMem.get() > maxPartitionsInMem.get()) {
        swapOnePartitionToDisk();
    }
}