Example usage for com.google.common.collect Ordering natural

List of usage examples for com.google.common.collect Ordering natural

Introduction

In this page you can find the example usage for com.google.common.collect Ordering natural.

Prototype

@GwtCompatible(serializable = true)
@SuppressWarnings("unchecked") 
public static <C extends Comparable> Ordering<C> natural() 

Source Link

Document

Returns a serializable ordering that uses the natural order of the values.

Usage

From source file:eu.interedition.collatex.util.VariantGraphRanking.java

public RowSortedTable<Integer, Witness, Set<Token>> asTable() {
    final TreeBasedTable<Integer, Witness, Set<Token>> table = TreeBasedTable.create(Ordering.natural(),
            Witness.SIGIL_COMPARATOR);/*from  w w  w .java 2  s .  c  om*/
    for (Map.Entry<VariantGraph.Vertex, Integer> rank : byVertex.entrySet()) {
        final int row = rank.getValue();
        for (Token token : rank.getKey().tokens(witnesses)) {
            final Witness column = token.getWitness();

            Set<Token> cell = table.get(row, column);
            if (cell == null) {
                table.put(row, column, cell = Sets.newHashSet());
            }
            cell.add(token);
        }
    }
    return table;
}

From source file:com.facebook.buck.core.rules.impl.SymlinkTree.java

/**
 * Creates an instance of {@link SymlinkTree}
 *
 * @param category A name used in the symlink steps
 * @param target The target for this rule
 * @param filesystem The filesystem that the tree lives on
 * @param root The directory to create symlinks in
 * @param links A map of path within the link tree to the target of the symlikm
 * @param directoriesToMerge A map of relative paths within the link tree into which files from
 *     the value will be recursively linked. e.g. if a file at /tmp/foo/bar should be linked as
 *     /tmp/symlink-root/subdir/bar, the map should contain {Paths.get("subdir"),
 *     SourcePath(Paths.get("tmp", "foo")) }
 * @param ruleFinder Used to iterate over {@code directoriesToMerge} in order get the build time
 *//*from   w  w w  .  ja  va2 s. co  m*/
public SymlinkTree(String category, BuildTarget target, ProjectFilesystem filesystem, Path root,
        ImmutableMap<Path, SourcePath> links, ImmutableMultimap<Path, SourcePath> directoriesToMerge,
        SourcePathRuleFinder ruleFinder) {
    super(target, filesystem);
    this.category = category;
    this.directoriesToMerge = directoriesToMerge;

    this.buildDeps = directoriesToMerge.values().stream().map(ruleFinder::getRule).filter(Optional::isPresent)
            .map(Optional::get).collect(ImmutableSortedSet.toImmutableSortedSet(Ordering.natural()));

    Preconditions.checkState(!root.isAbsolute(), "Expected symlink tree root to be relative: %s", root);

    this.root = root;
    this.links = ImmutableSortedMap.copyOf(links);

    this.type = category + "_symlink_tree";
}

From source file:com.dropbox.presto.kafka.KafkaSplitManager.java

@Override
public SplitSource getPartitionSplits(TableHandle tableHandle, List<Partition> partitions) {
    checkNotNull(partitions, "partitions is null");

    Partition partition = Iterables.getFirst(partitions, null);
    if (partition != null) {
        checkArgument(partition instanceof HivePartition, "Partition must be a hive partition");
    }/*from   w  ww  .j  a v a2  s .c o m*/

    SchemaTableName tableName = getTableName(tableHandle);
    Table table = null;
    Iterable<org.apache.hadoop.hive.metastore.api.Partition> hivePartitions = null;
    try {
        table = metastore().getTable(tableName.getSchemaName(), tableName.getTableName());
        if (table.getPartitionKeys() != null && table.getPartitionKeys().size() > 0) {
            List<String> partitionNames = new ArrayList<String>(
                    Lists.transform(partitions, partitionIdGetter()));
            Collections.sort(partitionNames, Ordering.natural().reverse());
            hivePartitions = getPartitions(table, tableName, partitionNames);
        }
    } catch (NoSuchObjectException e) {
        throw new RuntimeException(tableName + " not found.");
    } catch (MetaException e) {
        throw new RuntimeException(tableName + " not found.");
    }

    return new KafkaSplitSourceProvider(connectorId, table, hivePartitions, kafkaConfig).get();
}

From source file:com.arcbees.chosen.client.gwt.BaseChosenValueListBox.java

/**
 * Remove values to the acceptable values list. This method will update the component automatically.
 * <p/>/* www .j  a  v  a 2s . c o m*/
 * Calling this method will not reset the current selected value(s) except if this(ese) value(s) is(are) not in
 * the accepted values list anymore.
 */
public void removeValues(List<T> valuesToRemove) {
    // we have to remove values in decreasing order of their related index. Otherwise, we will have to update
    // the indexes map each time we remove an item and this method will perform in O(n2)
    TreeSet<Integer> indexToRemove = new TreeSet<Integer>(Ordering.natural().reverse());

    for (T value : valuesToRemove) {
        Object key = keyProvider.getKey(value);
        if (valueKeyToIndex.containsKey(key)) {
            indexToRemove.add(valueKeyToIndex.get(key));
        }
    }

    if (!indexToRemove.isEmpty()) {
        for (int index : indexToRemove) {
            removeItem(index);
        }

        updateAfterRemoval();
    }
}

From source file:org.sonar.server.ws.WebServicesWs.java

void handleList(List<Controller> controllers, Request request, Response response) {
    boolean includeInternals = request.mandatoryParamAsBoolean("include_internals");
    JsonWriter writer = response.newJsonWriter();
    writer.beginObject();/*w ww. j  av a 2s  .c  o m*/
    writer.name("webServices").beginArray();

    // sort controllers by path
    Ordering<Controller> ordering = Ordering.natural().onResultOf(Controller::path);
    for (Controller controller : ordering.sortedCopy(controllers)) {
        writeController(writer, controller, includeInternals);
    }
    writer.endArray();
    writer.endObject();
    writer.close();
}

From source file:com.yahoo.druid.hadoop.HiveDatasourceInputFormat.java

@Override
public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException {
    logger.info("checkPost #5");

    String overlordUrl = jobConf.get(CONF_DRUID_OVERLORD_HOSTPORT);
    Preconditions.checkArgument(overlordUrl != null && !overlordUrl.isEmpty(),
            CONF_DRUID_OVERLORD_HOSTPORT + " not defined");

    logger.info("druid overlord url = " + overlordUrl);

    String schemaStr = jobConf.get(CONF_DRUID_SCHEMA);

    Preconditions.checkArgument(schemaStr != null && !schemaStr.isEmpty(),
            "schema undefined,  provide " + CONF_DRUID_SCHEMA);
    logger.info("schema = " + schemaStr);

    DatasourceIngestionSpec ingestionSpec = HadoopDruidIndexerConfig.JSON_MAPPER.readValue(schemaStr,
            DatasourceIngestionSpec.class);
    String segmentsStr = getSegmentsToLoad(ingestionSpec.getDataSource(), ingestionSpec.getIntervals(),
            overlordUrl);/* w ww  .ja  v a 2 s . c o m*/
    logger.info("segments list received from overlord = " + segmentsStr);

    List<DataSegment> segmentsList = HadoopDruidIndexerConfig.JSON_MAPPER.readValue(segmentsStr,
            new TypeReference<List<DataSegment>>() {
            });
    VersionedIntervalTimeline<String, DataSegment> timeline = new VersionedIntervalTimeline<>(
            Ordering.natural());
    for (DataSegment segment : segmentsList) {
        timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
    }
    final List<TimelineObjectHolder<String, DataSegment>> timeLineSegments = timeline
            .lookup(ingestionSpec.getIntervals().get(0));
    final List<WindowedDataSegment> windowedSegments = new ArrayList<>();
    for (TimelineObjectHolder<String, DataSegment> holder : timeLineSegments) {
        for (PartitionChunk<DataSegment> chunk : holder.getObject()) {
            windowedSegments.add(new WindowedDataSegment(chunk.getObject(), holder.getInterval()));
        }
    }

    jobConf.set(CONF_INPUT_SEGMENTS, HadoopDruidIndexerConfig.JSON_MAPPER.writeValueAsString(windowedSegments));

    segmentsStr = Preconditions.checkNotNull(jobConf.get(CONF_INPUT_SEGMENTS), "No segments found to read");
    List<WindowedDataSegment> segments = HadoopDruidIndexerConfig.JSON_MAPPER.readValue(segmentsStr,
            new TypeReference<List<WindowedDataSegment>>() {
            });
    if (segments == null || segments.size() == 0) {
        throw new ISE("No segments found to read");
    }

    logger.info("segments to read " + segmentsStr);

    long maxSize = numSplits;

    if (maxSize > 0) {
        // combining is to happen, let us sort the segments list by size so that
        // they
        // are combined appropriately
        Collections.sort(segments, new Comparator<WindowedDataSegment>() {
            @Override
            public int compare(WindowedDataSegment s1, WindowedDataSegment s2) {
                return Long.compare(s1.getSegment().getSize(), s2.getSegment().getSize());
            }
        });
    }

    List<InputSplit> splits = Lists.newArrayList();

    List<WindowedDataSegment> list = new ArrayList<>();
    long size = 0;

    // JobConf dummyConf = new JobConf();
    Job job = new Job(jobConf);
    JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job);
    Path[] paths = org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getInputPaths(jobContext);
    logger.info("dummyPath : " + paths);

    jobConf.set("druid.hive.dummyfilename", paths[0].toString());

    InputFormat fio = supplier.get();
    for (WindowedDataSegment segment : segments) {
        if (size + segment.getSegment().getSize() > maxSize && size > 0) {
            splits.add(toDataSourceSplit(list, fio, jobConf, paths[0]));
            list = Lists.newArrayList();
            size = 0;
        }

        list.add(segment);
        size += segment.getSegment().getSize();
    }

    if (list.size() > 0) {
        splits.add(toDataSourceSplit(list, fio, jobConf, paths[0]));
    }

    logger.info("Number of splits: " + splits.size());
    for (InputSplit split : splits) {
        logger.info(split.getClass().getName());
        for (String location : split.getLocations())
            logger.info(location);
    }
    return Iterables.toArray(splits, InputSplit.class);
}

From source file:de.iteratec.iteraplan.businesslogic.exchange.common.contextoverview.RelationTypeConnect.java

@Override
public List<NameUriPair> getBuildingBlocks(int insId) {
    BigInteger mainId = BigInteger.valueOf(insId);
    ObjectExpression mainIns = model.findById(insType, mainId);
    RRelationshipEndExpression relEnd1stHop = insType.findRelationshipEndByPersistentName(relEndPoint1stHop);
    RStructuredTypeExpression relationshipType = relEnd1stHop.getType();
    Collection<ObjectExpression> relators = relEnd1stHop.apply(mainIns).getMany();
    RRelationshipEndExpression relEnd2ndHop = relationshipType
            .findRelationshipEndByPersistentName(relEndPoint2ndHop);
    RStructuredTypeExpression connectedType = relEnd2ndHop.getType();

    Set<ObjectExpression> connectedOEs = Sets.newHashSet();
    for (ObjectExpression relatorOE : relators) {
        connectedOEs.addAll(relEnd2ndHop.apply(relatorOE).getMany());
    }/*from   www.  j  a v  a  2s .  c om*/

    final RPropertyExpression connectedTypeNameProp = connectedType
            .findPropertyByPersistentName(ElasticMiConstants.PERSISTENT_NAME_NAME);

    Function<ObjectExpression, String> nameExtract = new Function<ObjectExpression, String>() {
        @Override
        public String apply(ObjectExpression input) {
            return connectedTypeNameProp.apply(input).getOne().asString();
        }
    };

    List<ObjectExpression> sortedOes = Ordering.natural().onResultOf(nameExtract).sortedCopy(connectedOEs);
    for (int i = 0; i < sortedOes.size(); i++) {
        ObjectExpression oe = sortedOes.get(i);
        String name = oe.getValues(connectedTypeNameProp).getOne().asString();
        pairs.add(extractPair(name, oe, connectedType, serverURL));
    }

    return pairs;
}

From source file:org.sonar.server.ws.ListingWs.java

void handleList(List<Controller> controllers, Request request, Response response) {
    boolean includeInternals = request.mandatoryParamAsBoolean("include_internals");
    JsonWriter writer = response.newJsonWriter();
    writer.beginObject();//from w  w w .  j a v a  2  s  .c  o  m
    writer.name("webServices").beginArray();

    // sort controllers by path
    Ordering<Controller> ordering = Ordering.natural().onResultOf(new Function<Controller, String>() {
        @Override
        public String apply(Controller controller) {
            return controller.path();
        }
    });
    for (Controller controller : ordering.sortedCopy(controllers)) {
        writeController(writer, controller, includeInternals);
    }
    writer.endArray();
    writer.endObject();
    writer.close();
}

From source file:de.faustedition.reasoning.InscriptionPrecedenceResource.java

@Override
protected void doInit() throws ResourceException {
    super.doInit();

    final VerseInterval verseInterval = VerseManager.fromRequestAttibutes(getRequestAttributes());

    final Multimap<String, GraphVerseInterval> intervalIndex = Multimaps
            .index(verseManager.forInterval(verseInterval), new Function<GraphVerseInterval, String>() {
                @Override//from   w ww .  j av a  2  s  .co  m
                public String apply(@Nullable GraphVerseInterval input) {

                    //final String sigil = transcriptManager.materialUnitForTranscript(input.getTranscript(textRepo)).toString();
                    final String sigil = ((Document) (transcriptManager
                            .materialUnitForTranscript(input.getTranscript(textRepo)))).getSource().toString();
                    return sigil;
                }

            });

    inscriptions = Sets.newHashSet();
    for (String sigil : Ordering.natural().immutableSortedCopy(intervalIndex.keySet())) {
        final Inscription inscription = new Inscription(sigil);
        for (VerseInterval interval : intervalIndex.get(sigil)) {
            inscription.addInterval(interval.getStart(), interval.getEnd());
        }
        Preconditions.checkState(!inscription.isEmpty());
        inscriptions.add(inscription);
        //long materialUnitId = intervalIndex.get(sigil).iterator().next().getTranscript(textRepo).getMaterialUnitId();
        //Node node = graphDb.getNodeById(materialUnitId);
        Node node = transcriptManager.materialUnitForTranscript(
                intervalIndex.get(sigil).iterator().next().getTranscript(textRepo)).node;
        nodeMap.put(inscription, node);

    }
    for (Inscription subject : inscriptions) {
        for (Inscription object : inscriptions) {
            if (InscriptionRelations.syntagmaticallyPrecedesByFirstLine(subject, object)) {
                //            if (InscriptionRelations.syntagmaticallyPrecedesByAverage(subject, object)) {

                syntagmaticPrecedence.relate(subject, object);
            }
            if (InscriptionRelations.exclusivelyContains(subject, object)) {
                exclusiveContainment.relate(subject, object);
            }
            if (InscriptionRelations.paradigmaticallyContains(subject, object)) {
                paradigmaticContainment.relate(subject, object);
            }

        }
    }
    try {
        explicitPrecedence = new GraphBasedRelation<Inscription>(nodeMap,
                new FaustURI(new URI("faust://secondary/gruss2011")));
    } catch (URISyntaxException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    List<Premise<Inscription>> premises = new ArrayList<Premise<Inscription>>();
    //      premises.addAll(premisesFromGeneticSources());
    premises.addAll(premisesFromInference());

    precedence = new PremiseBasedRelation<Inscription>(premises);
    //      precedence = new LastPremiseRelation<Inscription> (premises);

    Relation<Inscription> test = Util
            .wrapTransitive(new PremiseBasedRelation<Inscription>(premisesFromInference()), inscriptions);

    Relation<Inscription> check = Util
            .wrapTransitive(new PremiseBasedRelation<Inscription>(premisesFromGeneticSources()), inscriptions);

    logger.info("Genetic graph statistics: ");
    logger.info("  Coverage: " + Statistics.completeness(precedence, check, inscriptions) * 100 + ", Recall: "
            + Statistics.recall(precedence, check, inscriptions) * 100 + ", Accuracy : "
            + Statistics.correctness(precedence, check, inscriptions) * 100

    );

}