Example usage for org.apache.commons.lang3.tuple ImmutablePair ImmutablePair

List of usage examples for org.apache.commons.lang3.tuple ImmutablePair ImmutablePair

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple ImmutablePair ImmutablePair.

Prototype

public ImmutablePair(final L left, final R right) 

Source Link

Document

Create a new pair instance.

Usage

From source file:io.pravega.segmentstore.server.host.stat.AutoScaleProcessor.java

void notifyCreated(String segmentStreamName, byte type, long targetRate) {
    if (type != WireCommands.CreateSegment.NO_SCALE) {
        cache.put(segmentStreamName,//from   ww  w.  j  ava  2 s.c  om
                new ImmutablePair<>(System.currentTimeMillis(), System.currentTimeMillis()));
    }
}

From source file:it.polimi.diceH2020.SPACE4CloudWS.solvers.solversImpl.SPNSolver.SPNSolver.java

private Pair<List<File>, List<File>> generateSPNModel(@NotNull SolutionPerJob solPerJob) throws IOException {
    int nContainers = solPerJob.getNumberContainers();
    ClassParameters jobClass = solPerJob.getJob();
    JobProfile prof = solPerJob.getProfile();
    double mAvg = prof.get("mavg");
    double rAvg = prof.get("ravg");
    double shTypAvg = prof.get("shtypavg");
    double think = jobClass.getThink();

    int nUsers = solPerJob.getNumberUsers();
    int NM = (int) prof.get("nm");
    int NR = (int) prof.get("nr");

    String prefix = filePrefix(solPerJob);

    final Technology technology = dataService.getScenario().getTechnology();
    String netFileContent = new PNNetFileBuilder().setTechnology(technology).setCores(nContainers)
            .setMapRate(1 / mAvg).setReduceRate(1 / (rAvg + shTypAvg)).setThinkRate(1 / think).build();
    File netFile = fileUtility.provideTemporaryFile(prefix, ".net");
    fileUtility.writeContentToFile(netFileContent, netFile);

    String defFileContent = new PNDefFileBuilder().setTechnology(technology).setConcurrency(nUsers)
            .setNumberOfMapTasks(NM).setNumberOfReduceTasks(NR).build();
    File defFile = fileUtility.provideTemporaryFile(prefix, ".def");
    fileUtility.writeContentToFile(defFileContent, defFile);

    label = dataService.getScenario().getTechnology() != Technology.STORM ? "end" : "nCores_2";
    File statFile = writeStatFile(solPerJob, label);

    List<File> lst = new ArrayList<>(3);
    lst.add(netFile);//from ww  w  .  ja  va2  s  .c  o m
    lst.add(defFile);
    lst.add(statFile);
    return new ImmutablePair<>(lst, new ArrayList<>());
}

From source file:com.github.aptd.simulation.datamodel.CXMLReader.java

/**
 * create the platforms of all stations//  w w w  .  j av a  2s .  com
 *
 * @param p_network network component
 * @param p_agents map with agent asl scripts
 * @param p_factory factory
 * @param p_time time reference
 * @return unmodifyable map with platforms
 */
private static Map<String, IPlatform<?>> platform(final Network p_network, final Map<String, String> p_agents,
        final IFactory p_factory, final ITime p_time) {
    final Map<String, IElement.IGenerator<IPlatform<?>>> l_generators = new ConcurrentHashMap<>();
    final Set<IAction> l_actions = CCommon.actionsFromPackage().collect(Collectors.toSet());
    return Collections.<String, IPlatform<?>>unmodifiableMap(p_network.getInfrastructure()
            .getOperationControlPoints().getOcp().parallelStream()
            .flatMap(ocp -> ocp.getAny().stream().filter(a -> a instanceof StationLayout).findAny()
                    .map(a -> ((StationLayout) a).getPlatform().stream()
                            .map(p -> new ImmutablePair<EOcp, PlatformType>(ocp, p)))
                    .orElse(Stream.of()))
            .filter(i -> i.getRight().getAgentRef() != null)
            .map(i -> l_generators
                    .computeIfAbsent(i.getRight().getAgentRef().getAgent(),
                            a -> platformgenerator(p_factory,
                                    p_agents.get(i.getRight().getAgentRef().getAgent()), l_actions, p_time))
                    .generatesingle(i.getLeft().getId() + "-track-" + i.getRight().getNumber(),
                            i.getLeft().getId()))
            .collect(Collectors.toMap(IElement::id, i -> i)));
}

From source file:com.linkedin.pinot.routing.builder.KafkaLowLevelConsumerRoutingTableBuilder.java

@Override
public List<ServerToSegmentSetMap> computeRoutingTableFromExternalView(String tableName,
        ExternalView externalView, List<InstanceConfig> instanceConfigList) {
    // We build the routing table based off the external view here. What we want to do is to make sure that we uphold
    // the guarantees clients expect (no duplicate records, eventual consistency) and spreading the load as equally as
    // possible between the servers.
    ///*from  w w w . j  a  v a  2s .com*/
    // Each Kafka partition contains a fraction of the data, so we need to make sure that we query all partitions.
    // Because in certain unlikely degenerate scenarios, we can consume overlapping data until segments are flushed (at
    // which point the overlapping data is discarded during the reconciliation process with the controller), we need to
    // ensure that the query that is sent has only one partition in CONSUMING state in order to avoid duplicate records.
    //
    // Because we also want to want to spread the load as equally as possible between servers, we use a weighted random
    // replica selection that favors picking replicas with fewer segments assigned to them, thus having an approximately
    // equal distribution of load between servers.
    //
    // For example, given three replicas with 1, 2 and 3 segments assigned to each, the replica with one segment should
    // have a weight of 2, which is the maximum segment count minus the segment count for that replica. Thus, each
    // replica other than the replica(s) with the maximum segment count should have a chance of getting a segment
    // assigned to it. This corresponds to alternative three below:
    //
    // Alternative 1 (weight is sum of segment counts - segment count in that replica):
    // (6 - 1) = 5 -> P(0.4166)
    // (6 - 2) = 4 -> P(0.3333)
    // (6 - 3) = 3 -> P(0.2500)
    //
    // Alternative 2 (weight is max of segment counts - segment count in that replica + 1):
    // (3 - 1) + 1 = 3 -> P(0.5000)
    // (3 - 2) + 1 = 2 -> P(0.3333)
    // (3 - 3) + 1 = 1 -> P(0.1666)
    //
    // Alternative 3 (weight is max of segment counts - segment count in that replica):
    // (3 - 1) = 2 -> P(0.6666)
    // (3 - 2) = 1 -> P(0.3333)
    // (3 - 3) = 0 -> P(0.0000)
    //
    // Of those three weighting alternatives, the third one has the smallest standard deviation of the number of
    // segments assigned per replica, so it corresponds to the weighting strategy used for segment assignment. Empirical
    // testing shows that for 20 segments and three replicas, the standard deviation of each alternative is respectively
    // 2.112, 1.496 and 0.853.
    //
    // This algorithm works as follows:
    // 1. Gather all segments and group them by Kafka partition, sorted by sequence number
    // 2. Ensure that for each partition, we have at most one partition in consuming state
    // 3. Sort all the segments to be used during assignment in ascending order of replicas
    // 4. For each segment to be used during assignment, pick a random replica, weighted by the number of segments
    //    assigned to each replica.

    // 1. Gather all segments and group them by Kafka partition, sorted by sequence number
    Map<String, SortedSet<SegmentName>> sortedSegmentsByKafkaPartition = new HashMap<String, SortedSet<SegmentName>>();
    for (String helixPartitionName : externalView.getPartitionSet()) {
        // Ignore segments that are not low level consumer segments
        if (!SegmentNameBuilder.Realtime.isRealtimeV2Name(helixPartitionName)) {
            continue;
        }

        final LLCSegmentName segmentName = new LLCSegmentName(helixPartitionName);
        String kafkaPartitionName = segmentName.getPartitionRange();
        SortedSet<SegmentName> segmentsForPartition = sortedSegmentsByKafkaPartition.get(kafkaPartitionName);

        // Create sorted set if necessary
        if (segmentsForPartition == null) {
            segmentsForPartition = new TreeSet<SegmentName>();

            sortedSegmentsByKafkaPartition.put(kafkaPartitionName, segmentsForPartition);
        }

        segmentsForPartition.add(segmentName);
    }

    // 2. Ensure that for each Kafka partition, we have at most one Helix partition (Pinot segment) in consuming state
    Map<String, SegmentName> allowedSegmentInConsumingStateByKafkaPartition = new HashMap<String, SegmentName>();
    for (String kafkaPartition : sortedSegmentsByKafkaPartition.keySet()) {
        SortedSet<SegmentName> sortedSegmentsForKafkaPartition = sortedSegmentsByKafkaPartition
                .get(kafkaPartition);
        SegmentName lastAllowedSegmentInConsumingState = null;

        for (SegmentName segmentName : sortedSegmentsForKafkaPartition) {
            Map<String, String> helixPartitionState = externalView.getStateMap(segmentName.getSegmentName());
            boolean allInConsumingState = true;
            int replicasInConsumingState = 0;

            // Only keep the segment if all replicas have it in CONSUMING state
            for (String externalViewState : helixPartitionState.values()) {
                // Ignore ERROR state
                if (externalViewState.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ERROR)) {
                    continue;
                }

                // Not all segments are in CONSUMING state, therefore don't consider the last segment assignable to CONSUMING
                // replicas
                if (externalViewState.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ONLINE)) {
                    allInConsumingState = false;
                    break;
                }

                // Otherwise count the replica as being in CONSUMING state
                if (externalViewState.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.CONSUMING)) {
                    replicasInConsumingState++;
                }
            }

            // If all replicas have this segment in consuming state (and not all of them are in ERROR state), then pick this
            // segment to be the last allowed segment to be in CONSUMING state
            if (allInConsumingState && 0 < replicasInConsumingState) {
                lastAllowedSegmentInConsumingState = segmentName;
                break;
            }
        }

        if (lastAllowedSegmentInConsumingState != null) {
            allowedSegmentInConsumingStateByKafkaPartition.put(kafkaPartition,
                    lastAllowedSegmentInConsumingState);
        }
    }

    // 3. Sort all the segments to be used during assignment in ascending order of replicas

    // PriorityQueue throws IllegalArgumentException when given a size of zero
    int segmentCount = Math.max(externalView.getPartitionSet().size(), 1);
    PriorityQueue<Pair<String, Set<String>>> segmentToReplicaSetQueue = new PriorityQueue<Pair<String, Set<String>>>(
            segmentCount, new Comparator<Pair<String, Set<String>>>() {
                @Override
                public int compare(Pair<String, Set<String>> firstPair, Pair<String, Set<String>> secondPair) {
                    return Integer.compare(firstPair.getRight().size(), secondPair.getRight().size());
                }
            });
    RoutingTableInstancePruner instancePruner = new RoutingTableInstancePruner(instanceConfigList);

    for (Map.Entry<String, SortedSet<SegmentName>> entry : sortedSegmentsByKafkaPartition.entrySet()) {
        String kafkaPartition = entry.getKey();
        SortedSet<SegmentName> segmentNames = entry.getValue();

        // The only segment name which is allowed to be in CONSUMING state or null
        SegmentName validConsumingSegment = allowedSegmentInConsumingStateByKafkaPartition.get(kafkaPartition);

        for (SegmentName segmentName : segmentNames) {
            Set<String> validReplicas = new HashSet<String>();
            Map<String, String> externalViewState = externalView.getStateMap(segmentName.getSegmentName());

            for (Map.Entry<String, String> instanceAndStateEntry : externalViewState.entrySet()) {
                String instance = instanceAndStateEntry.getKey();
                String state = instanceAndStateEntry.getValue();

                // Skip pruned replicas (shutting down or otherwise disabled)
                if (instancePruner.isInactive(instance)) {
                    continue;
                }

                // Replicas in ONLINE state are always allowed
                if (state.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ONLINE)) {
                    validReplicas.add(instance);
                    continue;
                }

                // Replicas in CONSUMING state are only allowed on the last segment
                if (state.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.CONSUMING)
                        && segmentName.equals(validConsumingSegment)) {
                    validReplicas.add(instance);
                }
            }

            segmentToReplicaSetQueue
                    .add(new ImmutablePair<String, Set<String>>(segmentName.getSegmentName(), validReplicas));

            // If this segment is the segment allowed in CONSUMING state, don't process segments after it in that Kafka
            // partition
            if (segmentName.equals(validConsumingSegment)) {
                break;
            }
        }
    }

    // 4. For each segment to be used during assignment, pick a random replica, weighted by the number of segments
    //    assigned to each replica.
    List<ServerToSegmentSetMap> routingTables = new ArrayList<ServerToSegmentSetMap>(routingTableCount);
    for (int i = 0; i < routingTableCount; ++i) {
        Map<String, Set<String>> instanceToSegmentSetMap = new HashMap<String, Set<String>>();

        PriorityQueue<Pair<String, Set<String>>> segmentToReplicaSetQueueCopy = new PriorityQueue<Pair<String, Set<String>>>(
                segmentToReplicaSetQueue);

        while (!segmentToReplicaSetQueueCopy.isEmpty()) {
            Pair<String, Set<String>> segmentAndValidReplicaSet = segmentToReplicaSetQueueCopy.poll();
            String segment = segmentAndValidReplicaSet.getKey();
            Set<String> validReplicaSet = segmentAndValidReplicaSet.getValue();

            String replica = pickWeightedRandomReplica(validReplicaSet, instanceToSegmentSetMap);
            if (replica != null) {
                Set<String> segmentsForInstance = instanceToSegmentSetMap.get(replica);

                if (segmentsForInstance == null) {
                    segmentsForInstance = new HashSet<String>();
                    instanceToSegmentSetMap.put(replica, segmentsForInstance);
                }

                segmentsForInstance.add(segment);
            }
        }

        routingTables.add(new ServerToSegmentSetMap(instanceToSegmentSetMap));
    }

    return routingTables;
}

From source file:com.quancheng.saluki.boot.runner.GrpcReferenceRunner.java

private Pair<String, String> findGroupAndVersionByServiceName(String serviceName) {
    for (Map<String, String> referenceDefintion : servcieReferenceDefintions) {
        String servcieDefineName = referenceDefintion.get("service");
        if (servcieDefineName.equals(serviceName)) {
            String group = referenceDefintion.get("group");
            String version = referenceDefintion.get("version");
            return new ImmutablePair<String, String>(group, version);
        }//from www  .ja  va 2  s .  c o  m
    }
    return new ImmutablePair<String, String>(null, null);
}

From source file:io.pravega.controller.server.ControllerService.java

@SuppressWarnings("ReturnCount")
public CompletableFuture<Pair<UUID, List<SegmentRange>>> createTransaction(final String scope,
        final String stream, final long lease, final long maxExecutionPeriod, final long scaleGracePeriod) {
    Exceptions.checkNotNullOrEmpty(scope, "scope");
    Exceptions.checkNotNullOrEmpty(stream, "stream");

    return streamTransactionMetadataTasks
            .createTxn(scope, stream, lease, maxExecutionPeriod, scaleGracePeriod, null).thenApply(pair -> {
                VersionedTransactionData data = pair.getKey();
                List<Segment> segments = pair.getValue();
                return new ImmutablePair<>(data.getId(), getSegmentRanges(segments, scope, stream));
            });//from   w ww .j  ava 2s .  c om
}

From source file:com.romeikat.datamessie.core.base.util.sparsetable.SparseSingleTable.java

@Override
public synchronized Z mergeAllValues(final Function<Pair<Z, Z>, Z> mergeFunction) {
    final List<Z> allValues = getAllValues();

    if (allValues.isEmpty()) {
        return null;
    }/*from w  w  w  .  j  a  v  a2s. co  m*/

    Z mergedValue = allValues.get(0);
    for (int i = 1; i < allValues.size(); i++) {
        final Z previousValue = mergedValue;
        final Z currentValue = allValues.get(i);
        final Pair<Z, Z> previousAndCurrentValue = new ImmutablePair<Z, Z>(previousValue, currentValue);
        mergedValue = mergeFunction.apply(previousAndCurrentValue);
    }

    return mergedValue;
}

From source file:com.twitter.distributedlog.TestTruncate.java

private Pair<DistributedLogManager, AsyncLogWriter> populateData(Map<Long, DLSN> txid2DLSN,
        DistributedLogConfiguration confLocal, String name, int numLogSegments, int numEntriesPerLogSegment,
        boolean createInprogressLogSegment) throws Exception {
    long txid = 1;
    for (long i = 1; i <= numLogSegments; i++) {
        LOG.info("Writing Log Segment {}.", i);
        DistributedLogManager dlm = createNewDLM(confLocal, name);
        AsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned();
        for (int j = 1; j <= numEntriesPerLogSegment; j++) {
            long curTxId = txid++;
            DLSN dlsn = Await.result(writer.write(DLMTestUtil.getLogRecordInstance(curTxId)));
            txid2DLSN.put(curTxId, dlsn);
        }/* w w  w .j a  va2  s. co m*/
        Utils.close(writer);
        dlm.close();
    }

    if (createInprogressLogSegment) {
        DistributedLogManager dlm = createNewDLM(confLocal, name);
        AsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned();
        for (int j = 1; j <= 10; j++) {
            long curTxId = txid++;
            DLSN dlsn = Await.result(writer.write(DLMTestUtil.getLogRecordInstance(curTxId)));
            txid2DLSN.put(curTxId, dlsn);
        }
        return new ImmutablePair<DistributedLogManager, AsyncLogWriter>(dlm, writer);
    } else {
        return null;
    }
}

From source file:com.offbynull.peernetic.debug.visualizer.JGraphXVisualizer.java

private void addConnection(final AddEdgeCommand<A> command) {
    Validate.notNull(command);//  w  ww.java 2s  . c  om

    SwingUtilities.invokeLater(() -> {
        Object parent = graph.getDefaultParent();

        Object fromVertex = nodeLookupMap.get(command.getFrom());
        Object toVertex = nodeLookupMap.get(command.getTo());
        ImmutablePair<A, A> conn = new ImmutablePair<>(command.getFrom(), command.getTo());
        Validate.isTrue(nodeLookupMap.containsKey(command.getFrom()), "Connection %s source doesn't exist",
                conn);
        Validate.isTrue(nodeLookupMap.containsKey(command.getTo()), "Connection %s destination doesn't exist",
                conn);
        Validate.isTrue(!connToEdgeLookupMap.containsKey(conn), "Connection %s already exists", conn);

        if (!connToEdgeLookupMap.containsKey(conn)) {
            Object edge = graph.insertEdge(parent, null, null, fromVertex, toVertex);
            connToEdgeLookupMap.put(conn, edge);
            edgeToConnLookupMap.put(edge, conn);
        }

        zoomFit();
    });
}

From source file:hu.ppke.itk.nlpg.purepos.decoder.AbstractDecoder.java

private Map<NGram<Integer>, Map<Integer, Pair<Double, Double>>> getNextForSingleTaggedToken(
        final Set<NGram<Integer>> prevTagsSet, Collection<Integer> anals) {
    Map<NGram<Integer>, Map<Integer, Pair<Double, Double>>> ret = new HashMap<NGram<Integer>, Map<Integer, Pair<Double, Double>>>();
    for (NGram<Integer> prevTags : prevTagsSet) {
        Map<Integer, Pair<Double, Double>> tagProbs = new HashMap<Integer, Pair<Double, Double>>();
        Integer tag = anals.iterator().next();
        Double tagProb = model.getTagTransitionModel().getLogProb(prevTags.toList(), tag);
        tagProb = tagProb == Float.NEGATIVE_INFINITY ? 0 : tagProb;
        tagProbs.put(tag, new ImmutablePair<Double, Double>(tagProb, 0.0));
        ret.put(prevTags, tagProbs);//from ww  w  .j  a v a  2 s.c  om
    }
    return ret;
}