Example usage for com.google.common.collect ImmutableSortedMap keySet

List of usage examples for com.google.common.collect ImmutableSortedMap keySet

Introduction

In this page you can find the example usage for com.google.common.collect ImmutableSortedMap keySet.

Prototype

ImmutableSortedSet keySet

To view the source code for com.google.common.collect ImmutableSortedMap keySet.

Click Source Link

Usage

From source file:com.yahoo.spaclu.data.index.IndexFeatureValueSpark.java

public static void main(String[] args) throws IOException {
    IndexFeatureValueOptions optionsFormatRawToDatabase = new IndexFeatureValueOptions(args);

    String inputPathString = optionsFormatRawToDatabase.getInputPath();
    String outputPathString = optionsFormatRawToDatabase.getOutputPath();
    String indexPathString = optionsFormatRawToDatabase.getIndexPath();
    int numberOfPartitions = optionsFormatRawToDatabase.getNumberOfPartitions();
    int maxCutoffThreshold = optionsFormatRawToDatabase.getMaximumCutoffThreshold();
    int minCutoffThreshold = optionsFormatRawToDatabase.getMinimumCutoffThreshold();

    /*//w w  w  .  j a v  a 2 s . c  o m
     * Set<String> excludingFeatureNames = new HashSet<String>();
     * excludingFeatureNames.add("login");
     * excludingFeatureNames.add("time"); excludingFeatureNames.add("day");
     * excludingFeatureNames.add("hms"); excludingFeatureNames.add("fail");
     */

    sLogger.info("Tool: " + IndexFeatureValueSpark.class.getSimpleName());
    sLogger.info(" - input path: " + inputPathString);
    sLogger.info(" - output path: " + outputPathString);
    sLogger.info(" - index path: " + indexPathString);
    sLogger.info(" - number of partitions: " + numberOfPartitions);
    sLogger.info(" - maximum cutoff: " + maxCutoffThreshold);
    sLogger.info(" - minimum cutoff: " + minCutoffThreshold);

    // Create a default hadoop configuration
    Configuration conf = new Configuration();

    // Parse created config to the HDFS
    FileSystem fs = FileSystem.get(conf);

    Path outputPath = new Path(outputPathString);
    if (fs.exists(outputPath)) {
        fs.delete(outputPath, true);
    }

    SparkConf sparkConf = new SparkConf().setAppName(optionsFormatRawToDatabase.toString());

    JavaSparkContext sc = new JavaSparkContext(sparkConf);

    Map<Integer, String> featureIndices = getFeatureIndices(sc.textFile(indexPathString));

    List<Integer> listOfAllFeatureIndices = new LinkedList<Integer>();
    List<String> listOfAllFeatureInfo = new LinkedList<String>();
    Iterator<Integer> indexIter = featureIndices.keySet().iterator();
    while (indexIter.hasNext()) {
        Integer tempKey = indexIter.next();
        listOfAllFeatureIndices.add(tempKey);
        listOfAllFeatureInfo.add(featureIndices.get(tempKey));
    }

    /*
     * 
     * 
     * 
     * 
     * 
     * 
     * 
     */

    JavaRDD<String> rawLines = sc.textFile(inputPathString).repartition(numberOfPartitions);

    JavaRDD<String[]> tokenizedLines = rawLines.map(new LineFilter(listOfAllFeatureIndices));
    JavaPairRDD<Entry<Integer, String>, Long> featureValuesCounts = tokenizedLines
            .flatMapToPair(new FeatureValueMapper()).reduceByKey(new FeatureValueReducer());

    Map<Integer, Builder<String, Long>> featureValueMapping = new Hashtable<Integer, Builder<String, Long>>();
    Iterator<Tuple2<Entry<Integer, String>, Long>> iter = featureValuesCounts.collect().iterator();
    while (iter.hasNext()) {
        Tuple2<Entry<Integer, String>, Long> temp = iter.next();
        Entry<Integer, String> featureValueEntry = temp._1;
        int featureIndex = featureValueEntry.getKey();
        String featureValue = featureValueEntry.getValue();
        long featureValueCount = temp._2;

        if (!featureValueMapping.containsKey(featureIndex)) {
            Builder<String, Long> mapBuilder = new Builder<String, Long>(Ordering.natural());

            featureValueMapping.put(featureIndex, mapBuilder);
        }

        featureValueMapping.get(featureIndex).put(featureValue, featureValueCount);
    }

    Preconditions.checkArgument(featureValueMapping.size() == listOfAllFeatureIndices.size());

    String outputFeaturePathString = outputPathString + "feature" + Settings.SEPERATOR;
    fs.mkdirs(new Path(outputFeaturePathString));

    String outputFeatureNamePathString = outputPathString + "feature.dat";
    Path outputFeatureNamePath = new Path(outputFeatureNamePathString);
    PrintWriter featureNamePrinterWriter = new PrintWriter(fs.create(outputFeatureNamePath), true);

    List<Integer> listOfFeatureIndicesToKeep = new LinkedList<Integer>();

    Map<Integer, Map<String, Integer>> featureValueIndex = new Hashtable<Integer, Map<String, Integer>>();
    for (int d = 0; d < featureValueMapping.size(); d++) {
        Map<String, Integer> valueToIndex = new Hashtable<String, Integer>();
        Map<Integer, String> indexToValue = new Hashtable<Integer, String>();

        ImmutableSortedMap<String, Long> immutableSortedMap = featureValueMapping.get(d).build();
        for (String keyString : immutableSortedMap.keySet()) {
            valueToIndex.put(keyString, valueToIndex.size());
            indexToValue.put(indexToValue.size(), keyString);
        }

        if (valueToIndex.size() <= minCutoffThreshold || valueToIndex.size() > maxCutoffThreshold) {
            sLogger.info("Feature (" + listOfAllFeatureInfo.get(d) + ") contains " + valueToIndex.size()
                    + " values, skip...");

            continue;
        } else {
            sLogger.info("Feature (" + listOfAllFeatureInfo.get(d) + ") contains " + valueToIndex.size()
                    + " values.");

            listOfFeatureIndicesToKeep.add(listOfAllFeatureIndices.get(d));
            featureNamePrinterWriter.println(listOfAllFeatureInfo.get(d));
        }

        String outputFeatureIndexPathString = outputFeaturePathString + "index" + Settings.UNDER_SCORE
                + featureValueIndex.size() + ".dat";
        Path outputIndexPath = new Path(outputFeatureIndexPathString);

        featureValueIndex.put(featureValueIndex.size(), valueToIndex);

        PrintWriter featureValueIndexPrinterWriter = new PrintWriter(fs.create(outputIndexPath), true);
        for (int i = 0; i < indexToValue.size(); i++) {
            featureValueIndexPrinterWriter.println("" + i + Settings.TAB + indexToValue.get(i) + Settings.TAB
                    + immutableSortedMap.get(indexToValue.get(i)));
        }
        featureValueIndexPrinterWriter.close();
    }

    featureNamePrinterWriter.close();

    JavaRDD<String[]> filteredLines = rawLines.map(new LineFilter(listOfFeatureIndicesToKeep));
    JavaRDD<FeatureIntegerVector> indexedData = filteredLines.map(new FeatureValueIndexer(featureValueIndex));

    String outputDataPathString = outputPathString + "data";
    Path outputDataPath = new Path(outputDataPathString);
    if (fs.exists(outputDataPath)) {
        fs.delete(outputDataPath, true);
    }
    indexedData.saveAsTextFile(outputDataPathString);

    sc.stop();
}

From source file:gr.demokritos.iit.cru.creativity.reasoning.semantic.CompetitiveThinkingSpaces.java

public static ArrayList<String> CompetitiveThinkingSpaces(String story, int noOfClusters, String language)
        throws ClassNotFoundException, SQLException, InstantiationException, IllegalAccessException,
        IOException, Exception {

    ArrayList<String> tokensList = new ArrayList<String>();
    Connect c = new Connect(language);
    InfoSummarization inf = new InfoSummarization(c);
    LinkedHashMap<ArrayList<String>, Double> temp = inf.TopTerms(story, true);
    for (ArrayList<String> stems : temp.keySet()) {
        for (int j = 0; j < stems.size(); j++) {
            //for every stem, put each of its corresponding terms to tagCloud with the stem's tf 
            tokensList.add(stems.get(j).split("\\{")[0] + ";" + temp.get(stems));
        }//from   w  ww  . ja va2s.com
    }
    c.CloseConnection();

    //String clusters = properties.getProperty("clusters");
    ArrayList<String> clustersList = new ArrayList<String>();
    KeyphraseClustering kc = new KeyphraseClustering(tokensList, noOfClusters, language);
    Connect m = new Connect(language);
    clustersList = kc.getClusters();
    int dom = 0;
    for (int i = 0; i < clustersList.size(); i++) {
        if (clustersList.get(i).split(";").length > tokensList.size() / 2) {
            dom = i;
        }
    }
    String[] words = clustersList.get(dom)
            .subSequence(clustersList.get(dom).indexOf(";") + 1, clustersList.get(dom).length()).toString()
            .split(";");//keep the rest of the words in the big cluster
    clustersList.set(dom, clustersList.get(dom).split(";")[0] + ";");//first cluster is its first word
    int noWords = (words.length + clustersList.size()) / clustersList.size();
    HashMap<String, Double> tags = new HashMap<String, Double>();
    for (String s : words) {//for all the candidate words
        for (int i = 0; i < clustersList.size(); i++) {
            tags.put(s + "-" + i, m.getDistance(s, clustersList.get(i).split(";")[0]));//semlev(words, clustersList));//store their difference between the words and each of the clusters

        }
    }
    Comparator<String> valueComparator = Ordering.natural().onResultOf(Functions.forMap(tags))
            .compound(Ordering.natural());
    ImmutableSortedMap<String, Double> es = ImmutableSortedMap.copyOf(tags, valueComparator);

    ArrayList<String> examined = new ArrayList<String>();
    ///sort tags ascending with guava
    for (String o : es.keySet()) {
        String[] g = o.split("-");
        int pointer = Integer.parseInt(g[1]);
        if (clustersList.get(pointer).split(";").length > noWords || examined.contains(g[0])) {
            ///if the cluster has already as much words as it should have, 
            //or if the word has already been stored into a cluster
            continue; //continue with the next minimum difference in the list
        }
        examined.add(g[0]);
        clustersList.set(pointer, clustersList.get(pointer) + g[0] + ";");
    }

    if (examined.size() < words.length) {// if some words have not been set to clusters
        //put them in the first cluster
        for (String h : words) {
            if (!examined.contains(h)) {
                clustersList.set(0, clustersList.get(0) + h + ";");
            }
        }
    }
    m.CloseConnection();
    return clustersList;
}

From source file:edu.mit.streamjit.util.bytecode.methodhandles.Combinators.java

/**
 * Returns a method handle with a leading int argument that selects one of
 * the method handles in the given map, which is invoked with the remaining
 * arguments.  If the leading int argument is not present in the map, an
 * AssertionError will be thrown./*  ww w  . java 2 s  . c o m*/
 * @param cases the switch cases
 * @return a method handle approximating the switch statement
 */
public static MethodHandle lookupswitch(Map<Integer, MethodHandle> cases) {
    ImmutableSortedMap<Integer, MethodHandle> sortedCases = ImmutableSortedMap.copyOf(cases);
    String validCases = sortedCases.keySet().toString();
    IntConsumer defaultCaseAction = (idx) -> {
        throw new AssertionError(String.format("lookupswitch index %d not in cases %s", idx, validCases));
    };
    MethodHandle defaultCase = INTCONSUMER_ACCEPT.bindTo(defaultCaseAction);
    if (!sortedCases.values().isEmpty()) {
        //just pick an arbitrary element -- we'll catch type mismatches later
        MethodType t = sortedCases.values().iterator().next().type();
        defaultCase = MethodHandles.dropArguments(defaultCase, 1, t.parameterArray());
        defaultCase = defaultCase.asType(defaultCase.type().changeReturnType(t.returnType()));
    }
    return lookupswitch(sortedCases, defaultCase);
}

From source file:org.apache.kylin.query.util.ConvertToComputedColumn.java

static String replaceComputedColumn(String inputSql, ImmutableSortedMap<String, String> computedColumn) {
    if (inputSql == null) {
        return "";
    }/*  w w w  .j  a v  a2  s  . c om*/

    if (computedColumn == null || computedColumn.isEmpty()) {
        return inputSql;
    }
    String result = inputSql;
    String[] lines = inputSql.split("\n");
    List<Pair<String, String>> toBeReplacedExp = new ArrayList<>(); //{"alias":"expression"}, like {"t1":"t1.a+t1.b+t1.c"}

    for (String ccExp : computedColumn.keySet()) {
        List<SqlNode> matchedNodes = new ArrayList<>();
        try {
            matchedNodes = getMatchedNodes(inputSql, computedColumn.get(ccExp));
        } catch (SqlParseException e) {
            logger.error("Convert to computedColumn Fail,parse sql fail ", e.getMessage());
        }
        for (SqlNode node : matchedNodes) {
            Pair<Integer, Integer> startEndPos = CalciteParser.getReplacePos(node, lines);
            int start = startEndPos.getLeft();
            int end = startEndPos.getRight();
            //add table alias like t1.column,if exists alias
            String alias = getTableAlias(node);
            toBeReplacedExp.add(Pair.of(alias, inputSql.substring(start, end)));
        }
        logger.debug("Computed column: " + ccExp + "'s matched list:" + toBeReplacedExp);
        //replace user's input sql
        for (Pair<String, String> toBeReplaced : toBeReplacedExp) {
            result = result.replace(toBeReplaced.getRight(), toBeReplaced.getLeft() + ccExp);
        }
    }
    return result;
}

From source file:edu.mit.streamjit.util.bytecode.methodhandles.Combinators.java

private static MethodHandle lookupswitch0(ImmutableSortedMap<Integer, MethodHandle> cases,
        MethodHandle defaultCase) {
    if (cases.isEmpty())
        return defaultCase;
    if (cases.size() == 1) {
        Map.Entry<Integer, MethodHandle> next = cases.entrySet().iterator().next();
        return MethodHandles.guardWithTest(eq(next.getKey()),
                MethodHandles.dropArguments(next.getValue(), 0, int.class), //discard the case index
                defaultCase);//w  w w.  j a va  2s .c  o  m
    }
    int median = median(cases.keySet().asList());
    return MethodHandles.guardWithTest(le(median), lookupswitch0(cases.headMap(median, true), defaultCase),
            lookupswitch0(cases.tailMap(median, false), defaultCase));
}

From source file:org.gradle.api.internal.changedetection.rules.InputPropertiesTaskStateChanges.java

public InputPropertiesTaskStateChanges(@Nullable TaskExecution previousExecution,
        TaskExecution currentExecution, TaskInternal task, ValueSnapshotter valueSnapshotter) {
    ImmutableSortedMap<String, ValueSnapshot> previousInputProperties = previousExecution == null
            ? ImmutableSortedMap.<String, ValueSnapshot>of()
            : previousExecution.getInputProperties();
    ImmutableSortedMap.Builder<String, ValueSnapshot> builder = ImmutableSortedMap.naturalOrder();
    removed = new HashSet<String>(previousInputProperties.keySet());
    changed = new HashSet<String>();
    added = new HashSet<String>();
    for (Map.Entry<String, Object> entry : task.getInputs().getProperties().entrySet()) {
        String propertyName = entry.getKey();
        Object value = entry.getValue();
        try {/*from  w w  w  .  j ava  2 s . c  o  m*/
            removed.remove(propertyName);
            ValueSnapshot previousSnapshot = previousInputProperties.get(propertyName);
            if (previousSnapshot == null) {
                added.add(propertyName);
                builder.put(propertyName, valueSnapshotter.snapshot(value));
            } else {
                ValueSnapshot newSnapshot = valueSnapshotter.snapshot(value, previousSnapshot);
                if (newSnapshot == previousSnapshot) {
                    builder.put(propertyName, previousSnapshot);
                } else {
                    changed.add(propertyName);
                    builder.put(propertyName, valueSnapshotter.snapshot(value));
                }
            }
        } catch (Exception e) {
            throw new GradleException(String.format(
                    "Unable to store input properties for %s. Property '%s' with value '%s' cannot be serialized.",
                    task, propertyName, value), e);
        }
    }

    currentExecution.setInputProperties(builder.build());
    this.task = task;
}

From source file:org.geogit.api.plumbing.diff.DiffCounter.java

/**
 * Counts the number of differences between two trees that contain {@link RevTree#buckets()
 * buckets} instead of direct {@link RevTree#children() children}
 *//*www  . j  a  va2 s  . c  om*/
private DiffObjectCount countBucketDiffs(ImmutableSortedMap<Integer, Bucket> leftBuckets,
        ImmutableSortedMap<Integer, Bucket> rightBuckets) {

    DiffObjectCount count = new DiffObjectCount();
    final Set<Integer> bucketIds = Sets.union(leftBuckets.keySet(), rightBuckets.keySet());

    ObjectId leftTreeId;
    ObjectId rightTreeId;

    for (Integer bucketId : bucketIds) {
        @Nullable
        Bucket leftBucket = leftBuckets.get(bucketId);
        @Nullable
        Bucket rightBucket = rightBuckets.get(bucketId);

        leftTreeId = leftBucket == null ? null : leftBucket.id();
        rightTreeId = rightBucket == null ? null : rightBucket.id();

        if (leftTreeId == null || rightTreeId == null) {
            count.add(sizeOfTree(leftTreeId == null ? rightTreeId : leftTreeId));
        } else {
            count.add(countDiffs(leftTreeId, rightTreeId));
        }
    }
    return count;
}

From source file:com.google.devtools.build.lib.rules.cpp.CcIncLibrary.java

@Override
public ConfiguredTarget create(final RuleContext ruleContext) throws RuleErrorException, InterruptedException {
    FeatureConfiguration featureConfiguration = CcCommon.configureFeatures(ruleContext);
    PathFragment packageFragment = ruleContext.getPackageDirectory();

    // The rule needs a unique location for the include directory, which doesn't conflict with any
    // other rule. For that reason, the include directory is at:
    // configuration/package_name/_/target_name
    // And then the symlink is placed at:
    // configuration/package_name/_/target_name/package_name
    // So that these inclusions can be resolved correctly:
    // #include "package_name/a.h"
    ///*from w  w  w  . ja  v  a 2  s  .c  om*/
    // The target of the symlink is:
    // package_name/targetPrefix/
    // All declared header files must be below that directory.
    String expandedIncSymlinkAttr = ruleContext.attributes().get("prefix", Type.STRING);

    // We use an additional "_" directory here to avoid conflicts between this and previous Blaze
    // versions. Previous Blaze versions created a directory symlink; the new version does not
    // detect that the output directory isn't a directory, and tries to put the symlinks into what
    // is actually a symlink into the source tree.
    PathFragment includeDirectory = new PathFragment("_").getRelative(ruleContext.getTarget().getName());
    Root configIncludeDirectory = ruleContext.getConfiguration()
            .getIncludeDirectory(ruleContext.getRule().getRepository());
    PathFragment includePath = configIncludeDirectory.getExecPath().getRelative(packageFragment)
            .getRelative(includeDirectory);
    Path includeRoot = configIncludeDirectory.getPath().getRelative(packageFragment)
            .getRelative(includeDirectory);

    // For every source artifact, we compute a virtual artifact that is below the include directory.
    // These are used for include checking.
    PathFragment prefixFragment = packageFragment.getRelative(expandedIncSymlinkAttr);
    if (!prefixFragment.isNormalized()) {
        ruleContext.attributeWarning("prefix", "should not contain '.' or '..' elements");
    }
    ImmutableSortedMap.Builder<Artifact, Artifact> virtualArtifactMapBuilder = ImmutableSortedMap
            .orderedBy(Artifact.EXEC_PATH_COMPARATOR);
    prefixFragment = prefixFragment.normalize();
    ImmutableList<Artifact> hdrs = ruleContext.getPrerequisiteArtifacts("hdrs", Mode.TARGET).list();
    for (Artifact src : hdrs) {
        // All declared header files must start with package/targetPrefix.
        if (!src.getRootRelativePath().startsWith(prefixFragment)) {
            ruleContext.attributeError("hdrs",
                    src + " does not start with '" + prefixFragment.getPathString() + "'");
            return null;
        }

        // Remove the targetPrefix from within the exec path of the source file, and prepend the
        // unique directory prefix, e.g.:
        // third_party/foo/1.2/bar/a.h -> third_party/foo/name/third_party/foo/bar/a.h
        PathFragment suffix = src.getRootRelativePath().relativeTo(prefixFragment);
        PathFragment virtualPath = includeDirectory.getRelative(packageFragment).getRelative(suffix);

        // These virtual artifacts have the symlink action as generating action.
        Artifact virtualArtifact = ruleContext.getPackageRelativeArtifact(virtualPath, configIncludeDirectory);
        virtualArtifactMapBuilder.put(virtualArtifact, src);
    }
    ImmutableSortedMap<Artifact, Artifact> virtualArtifactMap = virtualArtifactMapBuilder.build();
    ruleContext.registerAction(
            new CreateIncSymlinkAction(ruleContext.getActionOwner(), virtualArtifactMap, includeRoot));

    CcLibraryHelper.Info info = new CcLibraryHelper(ruleContext, semantics, featureConfiguration)
            .addIncludeDirs(Arrays.asList(includePath)).addPublicHeaders(virtualArtifactMap.keySet())
            .addDeps(ruleContext.getPrerequisites("deps", Mode.TARGET)).build();

    // cc_inc_library doesn't compile any file - no compilation outputs available.
    InstrumentedFilesProvider instrumentedFilesProvider = new CcCommon(ruleContext)
            .getInstrumentedFilesProvider(new ArrayList<Artifact>(), /*withBaselineCoverage=*/true);

    return new RuleConfiguredTargetBuilder(ruleContext).addProviders(info.getProviders())
            .addSkylarkTransitiveInfo(CcSkylarkApiProvider.NAME, new CcSkylarkApiProvider())
            .addOutputGroups(info.getOutputGroups())
            .add(InstrumentedFilesProvider.class, instrumentedFilesProvider)
            .add(RunfilesProvider.class, RunfilesProvider.simple(Runfiles.EMPTY)).build();
}

From source file:org.geogit.api.plumbing.diff.DiffTreeVisitor.java

/**
 * Traverse two bucket trees and notify their differences to the {@code consumer}.
 * <p>//from   ww  w . ja v a2 s .c o m
 * If this method is called than its guaranteed that the two bucket trees are note equal (one of
 * them may be empty though), and that {@link Consumer#bucket} returned {@code true}
 * <p>
 * For each bucket index present in the joint set of the two trees buckets,
 * {@link #traverseTree(Consumer, RevTree, RevTree, int)} will be called for the bucket trees
 * that are not equal with {@code bucketDepth} incremented by one.
 * 
 * @param consumer the callback object to receive diff events from the comparison of the two
 *        trees
 * @param left the bucket tree at the left side of the comparison
 * @param right the bucket tree at the right side of the comparison
 * @param bucketDepth the current depth at which the comparison is evaluating these two bucket
 *        trees
 * @see #traverseTree(Consumer, RevTree, RevTree, int)
 * @precondition {@code !left.equals(right)}
 * @precondition {@code left.isEmpty() || left.buckets().isPresent()}
 * @precondition {@code right.isEmpty() || right.buckets().isPresent()}
 */
private void traverseBucketBucket(Consumer consumer, final RevTree left, final RevTree right,
        final int bucketDepth) {
    checkState(left.isEmpty() || left.buckets().isPresent());
    checkState(right.isEmpty() || right.buckets().isPresent());

    ImmutableSortedMap<Integer, Bucket> lb = left.buckets().get();
    ImmutableSortedMap<Integer, Bucket> rb = right.buckets().get();
    TreeSet<Integer> availableIndexes = newTreeSet(union(lb.keySet(), rb.keySet()));

    @Nullable
    Bucket lbucket;
    @Nullable
    Bucket rbucket;
    for (Integer index : availableIndexes) {
        lbucket = lb.get(index);
        rbucket = rb.get(index);
        if (Objects.equal(lbucket, rbucket)) {
            continue;
        }
        if (consumer.bucket(index.intValue(), bucketDepth, lbucket, rbucket)) {
            RevTree ltree = lbucket == null ? RevTree.EMPTY : leftSource.getTree(lbucket.id());
            RevTree rtree = rbucket == null ? RevTree.EMPTY : rightSource.getTree(rbucket.id());
            traverseTree(consumer, ltree, rtree, bucketDepth + 1);
        }
    }
}

From source file:se.sics.caracaldb.global.DefaultPolicy.java

private void replaceFailedNodes(LUTWorkingBuffer lut, ImmutableSortedMap<Integer, Address> failIds,
        ImmutableSortedMap<Integer, Address> joinIds, ExtremeKMap<Double, Address> xKMemory,
        ExtremeKMap<Double, Address> xKCpu, ImmutableMap<Address, Stats.Report> stats) {

    TreeSet<Integer> idsToDealWith = new TreeSet<Integer>(failIds.keySet());
    TreeMultimap<Long, Integer> candidates = TreeMultimap.create();
    // If a node fails and rejoins immediately, assign the same id and don't touch 
    // its replicationSets, since it may still have data from before the failure
    for (Entry<Integer, Address> e : joinIds.entrySet()) {
        Address curNode = lut.lut.getHost(e.getKey());
        if (curNode.equals(e.getValue())) {
            idsToDealWith.remove(e.getKey());
            candidates.put(0l, e.getKey());
        }//from w w w .  j a v a  2s .  c  o m
    }
    // Add nodes with lowest resource usage to candidates
    ImmutableSet.Builder<Address> candidateAddrs = ImmutableSet.builder(); // still need to look up their ids
    candidateAddrs.addAll(xKMemory.bottom().values()).addAll(xKCpu.bottom().values()).build();
    Map<Address, Integer> candidateIds = lut.lut.getIdsForAddresses(candidateAddrs.build());
    for (Entry<Address, Integer> e : candidateIds.entrySet()) {
        Address addr = e.getKey();
        Integer id = e.getValue();
        Stats.Report rep = stats.get(addr);
        long curSize = rep.averageSize * rep.numberOfVNodes;
        candidates.put(curSize, id);
    }
    // Replace nodes in affected sets
    int index = 0;
    for (Integer[] member : lut.replicationSets()) {
        Integer[] newRepSet = Arrays.copyOf(member, member.length);
        for (int pos = 0; pos < member.length; pos++) {
            if (idsToDealWith.contains(member[pos])) {
                long lowestSize = candidates.keySet().first();
                if (lowestSize > averageHostSize) {
                    addMoreCandidates(lut, candidates, stats);
                }
                // pick the first (lowestSize) host not in the replicationSet
                long curSize = -1;
                long addedSize = -1;
                for (Entry<Long, Integer> e : candidates.entries()) {
                    if (LookupTable.positionInSet(newRepSet, e.getValue()) < 0) {
                        newRepSet[pos] = e.getValue();
                        curSize = e.getKey();
                        addedSize = guessAddedSize(lut, member, stats);
                        break;
                    }
                }
                if ((curSize < 0) || (addedSize < 0)) {
                    LOG.error("Could not find any candidate for replacing {} in replicationSet {}!",
                            member[pos], index);
                    continue;
                }
                // Update candidates
                candidates.remove(curSize, newRepSet[pos]);
                candidates.put(curSize + addedSize, newRepSet[pos]);
            }
        }
        if (!Arrays.equals(member, newRepSet)) {
            lut.putRepSet(index, newRepSet);
        }
        index++;
    }
}