Example usage for com.google.common.collect Table column

List of usage examples for com.google.common.collect Table column

Introduction

In this page you can find the example usage for com.google.common.collect Table column.

Prototype

Map<R, V> column(C columnKey);

Source Link

Document

Returns a view of all mappings that have the given column key.

Usage

From source file:org.clueminer.eval.external.AdjustedRandCorrected.java

/**
 * Count number of classes in each cluster when we don't know how many
 * classes we have./*from  ww  w  .  j a  v  a  2s. co  m*/
 *
 *
 * @param clust
 * @return
 */
public int[][] countMutual(Clustering<E, C> clust) {
    //SortedSet klasses = dataset.getClasses();
    //Table<String, String, Integer> table = counting.contingencyTable(clust);
    Table<String, String, Integer> table = contingencyTable(clust);
    //String[] klassLabels = (String[]) klasses.toArray(new String[klasses.size()]);
    Set<String> rows = table.rowKeySet();
    String[] rowLabels = rows.toArray(new String[rows.size()]);
    int[][] conf = new int[rowLabels.length + 1][clust.size() + 1];

    int k = 0;
    //Dump.array(rowLabels, "classes");
    for (Cluster c : clust) {
        Map<String, Integer> col = table.column(c.getName());
        for (int i = 0; i < rowLabels.length; i++) {
            if (col.containsKey(rowLabels[i])) {
                conf[i][k] = col.get(rowLabels[i]);
                conf[i][clust.size()] += conf[i][k];
            }
            conf[rows.size()][k] += conf[i][k];
        }
        k++;
    }
    //Dump.matrix(conf, "conf mat", 0);
    return conf;
}

From source file:no.ssb.vtl.script.operations.join.OuterJoinOperation.java

@Override
protected BiFunction<DataPoint, DataPoint, DataPoint> getMerger(final Dataset leftDataset,
        final Dataset rightDataset) {

    final Table<Component, Dataset, Component> componentMapping = getComponentMapping();
    final DataStructure structure = getDataStructure();
    final DataStructure rightStructure = rightDataset.getDataStructure();

    return (left, right) -> {

        /*//from w  w w.java  2s  .c o m
         * We overwrite the ids if right != null for simplicity.
         */
        DataPoint result;
        if (left != null) {
            result = DataPoint.create(left);
        } else {
            result = DataPoint.create(structure.size());
        }

        if (right != null) {
            Map<Component, VTLObject> leftMap = structure.asMap(result);
            Map<Component, VTLObject> rightMap = rightStructure.asMap(right);
            for (Map.Entry<Component, Component> mapping : componentMapping.column(rightDataset).entrySet()) {
                Component to = mapping.getKey();
                Component from = mapping.getValue();
                leftMap.put(to, rightMap.get(from));
            }
        }

        return DataPoint.create(result);
    };
}

From source file:es.upm.dit.xsdinferencer.generation.generatorimpl.statisticsgeneration.StatisticResultsDocGeneratorImpl.java

/**
 * This method generates an element with info of the nodes under a complex type (either elements of the complex type or attributes of elements of a concrete complex type).
 * For each node, a child element is generated with all the info of its occurrences, a child with the info of its values (if there is any info about them) which will contain one child per value 
 * with its info and a child with the numericValuesInfo, if any. 
 * @param elementName The name of the generated element.
 * @param subElementName The name of each the element which contains the info of the node at a path.
 * @param sourceMap The map that contains the info of the nodes of the complex type
 * @param valuesElementName The name of the child element with the values info
 * @param valuesSubElementName The name of each child of the valuesElementName child, with the info of a concrete value
 * @param valuesTable the table that contains the info of each value of each node
 * @param numericValuesInfo The info of numeric values
 * @param numericValuesStatisticsElementName The element of the child which will contain the numeric statistics info
 * @return An element with all the information described
 *//*from   w ww.ja  v a  2  s. c  o m*/
protected <T extends SchemaNode> Element generateNodesOfComplexTypesInfoElements(String elementName,
        String subElementName, Map<T, BasicStatisticsEntry> sourceMap, String valuesElementName,
        String valuesSubElementName, Table<String, SchemaNode, BasicStatisticsEntry> valuesTable,
        Map<SchemaNode, BasicStatisticsEntry> numericValuesInfo, String numericValuesStatisticsElementName) {
    Element element = new Element(elementName, STATISTICS_NAMESPACE);
    for (T node : sourceMap.keySet()) {
        String nodeName = node.getName();
        String nodeNamespace = node.getNamespace();
        Map<String, BasicStatisticsEntry> valuesOfNode = valuesTable.column(node);
        Element currentElement = generateBasicStatisticsEntryBasedElement(subElementName, null, nodeName,
                nodeNamespace, null, sourceMap.get(node), true, true);
        element.addContent(currentElement);
        if (!valuesOfNode.isEmpty()) {
            Element valuesOfNodeElement = new Element("values", STATISTICS_NAMESPACE);
            for (String value : valuesOfNode.keySet()) {
                Element currentValueElement = generateBasicStatisticsEntryBasedElement("value", null, nodeName,
                        nodeNamespace, value, valuesOfNode.get(value), true, true);
                valuesOfNodeElement.addContent(currentValueElement);
            }
            currentElement.addContent(valuesOfNodeElement);
        }
        BasicStatisticsEntry numericValuesInfoOfNode = numericValuesInfo.get(node);
        if (numericValuesInfoOfNode != null) {
            Element numericValuesInfoOfNodeElement = generateBasicStatisticsEntryBasedElement(
                    "numericValuesStatistics", numericValuesInfoOfNode, false, false);
            currentElement.addContent(numericValuesInfoOfNodeElement);
        }
    }
    element.sortChildren(BASIC_ENTRY_ELEMENT_COMPARATOR);
    return element;
}

From source file:com.intuit.wasabi.assignment.impl.AssignmentsImpl.java

protected Experiment getExperimentFromTable(Table<Experiment.ID, Experiment.Label, Experiment> allExperiments,
        Experiment.Label experimentLabel) {

    Collection<Experiment> experiments = allExperiments.column(experimentLabel).values();
    return experiments.isEmpty() ? null : experiments.iterator().next();
}

From source file:com.griddynamics.jagger.engine.e1.scenario.DefaultWorkloadSuggestionMaker.java

@Override
public WorkloadConfiguration suggest(BigDecimal desiredTps, NodeTpsStatistics statistics, int maxThreads) {
    log.debug("Going to suggest workload configuration. desired tps {}. statistics {}", desiredTps, statistics);

    Table<Integer, Integer, Pair<Long, BigDecimal>> threadDelayStats = statistics.getThreadDelayStats();

    if (areEqual(desiredTps, BigDecimal.ZERO)) {
        return WorkloadConfiguration.with(0, 0);
    }//from   ww  w  .  java2s  .  c o  m

    if (threadDelayStats.isEmpty()) {
        throw new IllegalArgumentException("Cannot suggest workload configuration");
    }

    if (!threadDelayStats.contains(CALIBRATION_CONFIGURATION.getThreads(),
            CALIBRATION_CONFIGURATION.getDelay())) {
        log.debug("Statistics is empty. Going to return calibration info.");
        return CALIBRATION_CONFIGURATION;
    }
    if (threadDelayStats.size() == 2 && areEqual(threadDelayStats.get(1, 0).getSecond(), BigDecimal.ZERO)) {
        log.warn("No calibration info. Going to retry.");
        return CALIBRATION_CONFIGURATION;
    }

    Map<Integer, Pair<Long, BigDecimal>> noDelays = threadDelayStats.column(0);

    Integer threadCount = findClosestPoint(desiredTps, noDelays);

    if (threadCount == 0) {
        threadCount = 1;
    }

    if (threadCount > maxThreads) {
        log.warn("{} calculated max {} allowed", threadCount, maxThreads);
        threadCount = maxThreads;
    }

    int currentThreads = statistics.getCurrentWorkloadConfiguration().getThreads();
    int diff = threadCount - currentThreads;
    if (diff > maxDiff) {
        log.debug("Increasing to {} is required current thread count is {} max allowed diff is {}",
                new Object[] { threadCount, currentThreads, maxDiff });
        return WorkloadConfiguration.with(currentThreads + maxDiff, 0);
    }

    if (noDelays.containsKey(threadCount) && noDelays.get(threadCount).getSecond().compareTo(desiredTps) < 0) {
        if (log.isDebugEnabled()) {
            log.debug("Statistics for current point has been already calculated and it is less then desired one"
                    + "\nLook like we have achieved maximum for this node."
                    + "\nGoing to help max tps detector.");
        }
        int threads = currentThreads;
        if (threads < maxThreads) {
            threads++;
        }
        return WorkloadConfiguration.with(threads, 0);
    }

    if (!threadDelayStats.contains(threadCount, 0)) {
        return WorkloadConfiguration.with(threadCount, 0);
    }

    Map<Integer, Pair<Long, BigDecimal>> delays = threadDelayStats.row(threadCount);

    if (delays.size() == 1) {
        int delay = suggestDelay(delays.get(0).getSecond(), threadCount, desiredTps);

        return WorkloadConfiguration.with(threadCount, delay);
    }

    Integer delay = findClosestPoint(desiredTps, threadDelayStats.row(threadCount));

    return WorkloadConfiguration.with(threadCount, delay);

}

From source file:eu.lp0.cursus.scoring.scores.impl.AveragingRacePointsData.java

@Override
protected Map<Pilot, Integer> calculateRacePoints(Race race) {
    Table<Pilot, Race, Integer> racePoints = ArrayTable.create(scoresBeforeAveraging.getRacePoints());

    if (method != AveragingMethod.SET_NULL) {
        for (Pilot pilot : scores.getPilots()) {
            // Calculate an average score using the other races
            if (racePoints.row(pilot).get(race) == null) {
                Set<Race> otherRaces = getOtherRacesForPilot(pilot, race, true);

                // Add the scores from the other races
                int points = 0;
                for (Race otherRace : otherRaces) {
                    points += racePoints.row(pilot).get(otherRace);
                }/*  w  ww  .  j  a va 2  s  .co m*/

                // Calculate and apply the average
                points = BigDecimal.valueOf(points).divide(BigDecimal.valueOf(otherRaces.size()), rounding)
                        .intValue();
                racePoints.row(pilot).put(race, points);
            }
        }
    } else {
        // Do nothing, the scores for those pilots will be null
    }

    return racePoints.column(race);
}

From source file:co.cask.tigon.internal.app.runtime.flow.FlowletProgramRunner.java

private ProcessSpecificationFactory processSpecificationFactory(final BasicFlowletContext flowletContext,
        final DataFabricFacade dataFabricFacade, final QueueReaderFactory queueReaderFactory,
        final String flowletName, final Table<Node, String, Set<QueueSpecification>> queueSpecs,
        final ImmutableList.Builder<ConsumerSupplier<?>> queueConsumerSupplierBuilder,
        final SchemaCache schemaCache) {

    return new ProcessSpecificationFactory() {
        @Override//from   w w  w.ja v a2 s.  c o  m
        public <T> ProcessSpecification create(Set<String> inputNames, Schema schema, TypeToken<T> dataType,
                ProcessMethod<T> method, ConsumerConfig consumerConfig, int batchSize, Tick tickAnnotation) {
            List<QueueReader<T>> queueReaders = Lists.newLinkedList();

            for (Map.Entry<Node, Set<QueueSpecification>> entry : queueSpecs.column(flowletName).entrySet()) {
                for (QueueSpecification queueSpec : entry.getValue()) {
                    final QueueName queueName = queueSpec.getQueueName();

                    if (queueSpec.getInputSchema().equals(schema)
                            && (inputNames.contains(queueName.getSimpleName())
                                    || inputNames.contains(FlowletDefinition.ANY_INPUT))) {

                        int numGroups = getNumGroups(Iterables.concat(queueSpecs.row(entry.getKey()).values()),
                                queueName);
                        Function<ByteBuffer, T> decoder = wrapInputDecoder(flowletContext, queueName,
                                createInputDatumDecoder(dataType, schema, schemaCache));

                        ConsumerSupplier<QueueConsumer> consumerSupplier = ConsumerSupplier
                                .create(dataFabricFacade, queueName, consumerConfig, numGroups);
                        queueConsumerSupplierBuilder.add(consumerSupplier);
                        queueReaders.add(
                                queueReaderFactory.createQueueReader(consumerSupplier, batchSize, decoder));

                    }
                }
            }

            // If inputs is needed but there is no available input queue, return null
            if (!inputNames.isEmpty() && queueReaders.isEmpty()) {
                return null;
            }
            return new ProcessSpecification<T>(new RoundRobinQueueReader<T>(queueReaders), method,
                    tickAnnotation);
        }
    };
}

From source file:co.cask.cdap.internal.app.runtime.distributed.DistributedProgramRuntimeService.java

private Multimap<String, QueueName> getFlowletQueues(Program program, FlowSpecification flowSpec) {
    // Generate all queues specifications
    Id.Application appId = Id.Application.from(program.getNamespaceId(), program.getApplicationId());
    Table<QueueSpecificationGenerator.Node, String, Set<QueueSpecification>> queueSpecs = new SimpleQueueSpecificationGenerator(
            appId).create(flowSpec);// w  ww  .j  a  va 2s  .com

    // For storing result from flowletId to queue.
    ImmutableSetMultimap.Builder<String, QueueName> resultBuilder = ImmutableSetMultimap.builder();

    // Loop through each flowlet
    for (Map.Entry<String, FlowletDefinition> entry : flowSpec.getFlowlets().entrySet()) {
        String flowletId = entry.getKey();
        long groupId = FlowUtils.generateConsumerGroupId(program, flowletId);
        int instances = entry.getValue().getInstances();

        // For each queue that the flowlet is a consumer, store the number of instances for this flowlet
        for (QueueSpecification queueSpec : Iterables.concat(queueSpecs.column(flowletId).values())) {
            resultBuilder.put(flowletId, queueSpec.getQueueName());
        }
    }
    return resultBuilder.build();
}

From source file:no.ssb.vtl.script.operations.join.InnerJoinOperation.java

@Override
public Optional<Stream<DataPoint>> getData(Order requestedOrder, Dataset.Filtering filtering,
        Set<String> components) {

    // Try to create a compatible order.
    // If not, the caller will have to sort the result manually.
    Optional<Order> compatibleOrder = createCompatibleOrder(getDataStructure(), getCommonIdentifiers(),
            requestedOrder);// w w  w . j a  v  a 2 s . c om
    if (!compatibleOrder.isPresent()) {
        return Optional.empty();
    }

    Order requiredOrder = compatibleOrder.get();

    // Compute the predicate
    Order predicate = computePredicate(requiredOrder);

    Iterator<Dataset> iterator = datasets.values().iterator();
    Dataset left = iterator.next();
    Dataset right = left;

    // Close all children
    Closer closer = Closer.create();
    try {

        Table<Component, Dataset, Component> componentMapping = getComponentMapping();
        Stream<DataPoint> result = getOrSortData(left,
                adjustOrderForStructure(requiredOrder, left.getDataStructure()), filtering, components)
                        .peek(new DataPointCapacityExpander(getDataStructure().size()));
        closer.register(result);

        boolean first = true;
        while (iterator.hasNext()) {
            left = right;
            right = iterator.next();

            Stream<DataPoint> rightStream = getOrSortData(right,
                    adjustOrderForStructure(requiredOrder, right.getDataStructure()), filtering, components);
            closer.register(rightStream);

            // The first left stream uses its own structure. After that, the left data structure
            // will always be the resulting structure. We use a flag (first) to handle the first case
            // since the hotfix needs to quickly released but this code should be refactored.

            result = StreamSupport.stream(new InnerJoinSpliterator<>(
                    new JoinKeyExtractor(first ? left.getDataStructure() : getDataStructure(), predicate,
                            first ? componentMapping.column(left)::get : c -> c),
                    new JoinKeyExtractor(right.getDataStructure(), predicate, componentMapping.column(right)),
                    predicate, new InnerJoinMerger(getDataStructure(), right.getDataStructure()),
                    result.spliterator(), rightStream.spliterator()), false);

            first = false;
        }

        // Close all the underlying streams.
        return Optional.of(result.onClose(() -> {
            try {
                closer.close();
            } catch (IOException e) {
                // ignore (cannot happen).
            }
        }));

    } catch (Exception ex) {
        try {
            closer.close();
        } catch (IOException ioe) {
            ex.addSuppressed(ioe);
        }
        throw ex;
    }
}

From source file:co.cask.cdap.internal.app.runtime.flow.FlowletProgramRunner.java

private ProcessSpecificationFactory processSpecificationFactory(final BasicFlowletContext flowletContext,
        final DataFabricFacade dataFabricFacade, final QueueReaderFactory queueReaderFactory,
        final String flowletName, final Table<Node, String, Set<QueueSpecification>> queueSpecs,
        final ImmutableList.Builder<ConsumerSupplier<?>> queueConsumerSupplierBuilder,
        final SchemaCache schemaCache) {

    final Id.Program program = Id.Flow.from(flowletContext.getNamespaceId(), flowletContext.getApplicationId(),
            ProgramType.FLOW, flowletContext.getFlowId());
    return new ProcessSpecificationFactory() {
        @Override//  w w w . j a  va2 s  .com
        public <T> ProcessSpecification create(Set<String> inputNames, Schema schema, TypeToken<T> dataType,
                ProcessMethod<T> method, ConsumerConfig consumerConfig, int batchSize, Tick tickAnnotation) {
            List<QueueReader<T>> queueReaders = Lists.newLinkedList();

            for (Map.Entry<Node, Set<QueueSpecification>> entry : queueSpecs.column(flowletName).entrySet()) {
                for (QueueSpecification queueSpec : entry.getValue()) {
                    final QueueName queueName = queueSpec.getQueueName();

                    if (queueSpec.getInputSchema().equals(schema)
                            && (inputNames.contains(queueName.getSimpleName())
                                    || inputNames.contains(FlowletDefinition.ANY_INPUT))) {

                        if (entry.getKey().getType() == FlowletConnection.Type.STREAM) {
                            ConsumerSupplier<StreamConsumer> consumerSupplier = ConsumerSupplier.create(
                                    program.getNamespace(), flowletContext.getOwners(), usageRegistry,
                                    dataFabricFacade, queueName, consumerConfig);
                            queueConsumerSupplierBuilder.add(consumerSupplier);
                            // No decoding is needed, as a process method can only have StreamEvent as type for consuming stream
                            Function<StreamEvent, T> decoder = wrapInputDecoder(flowletContext, null, queueName,
                                    new Function<StreamEvent, T>() {
                                        @Override
                                        @SuppressWarnings("unchecked")
                                        public T apply(StreamEvent input) {
                                            return (T) input;
                                        }
                                    });

                            queueReaders.add(queueReaderFactory.createStreamReader(consumerSupplier, batchSize,
                                    decoder));

                        } else {
                            int numGroups = getNumGroups(
                                    Iterables.concat(queueSpecs.row(entry.getKey()).values()), queueName);
                            Function<ByteBuffer, T> decoder = wrapInputDecoder(flowletContext,
                                    entry.getKey().getName(), // the producer flowlet,
                                    queueName, createInputDatumDecoder(dataType, schema, schemaCache));

                            ConsumerSupplier<QueueConsumer> consumerSupplier = ConsumerSupplier.create(
                                    program.getNamespace(), flowletContext.getOwners(), usageRegistry,
                                    dataFabricFacade, queueName, consumerConfig, numGroups);
                            queueConsumerSupplierBuilder.add(consumerSupplier);
                            queueReaders.add(
                                    queueReaderFactory.createQueueReader(consumerSupplier, batchSize, decoder));
                        }
                    }
                }
            }

            // If inputs is needed but there is no available input queue, return null
            if (!inputNames.isEmpty() && queueReaders.isEmpty()) {
                return null;
            }
            return new ProcessSpecification<>(new RoundRobinQueueReader<>(queueReaders), method,
                    tickAnnotation);
        }
    };
}