Example usage for com.google.common.collect Table column

List of usage examples for com.google.common.collect Table column

Introduction

In this page you can find the example usage for com.google.common.collect Table column.

Prototype

Map<R, V> column(C columnKey);

Source Link

Document

Returns a view of all mappings that have the given column key.

Usage

From source file:co.cask.cdap.internal.app.runtime.flow.FlowUtils.java

/**
 * Gets all consumer group configurations for the given queue.
 *//*from   ww w  . j a v  a  2s .c  o m*/
private static Set<ConsumerGroupConfig> getAllConsumerGroups(Program program, FlowSpecification flowSpec,
        QueueName queueName,
        Table<QueueSpecificationGenerator.Node, String, Set<QueueSpecification>> queueSpecs) {

    Set<ConsumerGroupConfig> groupConfigs = Sets.newHashSet();
    SchemaGenerator schemaGenerator = new ReflectionSchemaGenerator();

    // Get all the consumers of this queue.
    for (Map.Entry<String, FlowletDefinition> entry : flowSpec.getFlowlets().entrySet()) {
        String flowletId = entry.getKey();
        for (QueueSpecification queueSpec : Iterables.concat(queueSpecs.column(flowletId).values())) {
            if (!queueSpec.getQueueName().equals(queueName)) {
                continue;
            }

            try {
                // Inspect the flowlet consumer
                FlowletDefinition flowletDefinition = entry.getValue();
                Class<?> flowletClass = program.getClassLoader()
                        .loadClass(flowletDefinition.getFlowletSpec().getClassName());
                long groupId = generateConsumerGroupId(program, flowletId);

                addConsumerGroup(queueSpec, TypeToken.of(flowletClass), groupId,
                        flowletDefinition.getInstances(), schemaGenerator, groupConfigs);
            } catch (ClassNotFoundException e) {
                // There is no way for not able to load a Flowlet class as it should be verified during deployment.
                throw Throwables.propagate(e);
            }
        }
    }

    return groupConfigs;
}

From source file:co.cask.tigon.internal.app.runtime.flow.FlowUtils.java

/**
 * Configures all queues being used in a flow.
 *
 * @return A Multimap from flowletId to QueueName where the flowlet is a consumer of.
 *//*w  ww .j av  a2s  .  c o  m*/
public static Multimap<String, QueueName> configureQueue(Program program, FlowSpecification flowSpec,
        QueueAdmin queueAdmin) {
    // Generate all queues specifications
    Table<QueueSpecificationGenerator.Node, String, Set<QueueSpecification>> queueSpecs = new SimpleQueueSpecificationGenerator()
            .create(flowSpec);

    // For each queue in the flow, gather a map of consumer groupId to number of instances
    Table<QueueName, Long, Integer> queueConfigs = HashBasedTable.create();

    // For storing result from flowletId to queue.
    ImmutableSetMultimap.Builder<String, QueueName> resultBuilder = ImmutableSetMultimap.builder();

    // Loop through each flowlet
    for (Map.Entry<String, FlowletDefinition> entry : flowSpec.getFlowlets().entrySet()) {
        String flowletId = entry.getKey();
        long groupId = FlowUtils.generateConsumerGroupId(program, flowletId);
        int instances = entry.getValue().getInstances();

        // For each queue that the flowlet is a consumer, store the number of instances for this flowlet
        for (QueueSpecification queueSpec : Iterables.concat(queueSpecs.column(flowletId).values())) {
            queueConfigs.put(queueSpec.getQueueName(), groupId, instances);
            resultBuilder.put(flowletId, queueSpec.getQueueName());
        }
    }

    try {
        // For each queue in the flow, configure it through QueueAdmin
        for (Map.Entry<QueueName, Map<Long, Integer>> row : queueConfigs.rowMap().entrySet()) {
            LOG.info("Queue config for {} : {}", row.getKey(), row.getValue());
            queueAdmin.configureGroups(row.getKey(), row.getValue());
        }
        return resultBuilder.build();
    } catch (Exception e) {
        LOG.error("Failed to configure queues", e);
        throw Throwables.propagate(e);
    }
}

From source file:com.przemo.etl.transformations.MovingAverageTransformation.java

/**
 * Calculates moving average on the values of the given column in the table. The table has to be sorted to give correct results.
 * The values in the column must be of a numeric type.
 * @param column/*from  w  w w .java2 s .c o m*/
 * @param period 
 */
public MovingAverageTransformation(final String column, final int period) {
    this.calculatedColumn = column;
    this.transformation = new Function() {

        @Override
        public Object apply(Object f) {
            if (f instanceof Table && ((Table) f).containsColumn(column)) {
                Table t = (Table) f;
                if (t.column(column).size() >= period) {
                    Table tr = calculateMovingAverage(t, period, column);
                    if (tr != null) {
                        t.putAll(tr);
                    }
                }
            }
            return f;
        }
    };
}

From source file:com.przemo.conjunctions.Joiner.java

public Table join(Table[] tables, String columnJoinName, String[] resultingColumns) {
    if (tables != null && columnJoinName != null && tables.length > 0) {
        //tables[0] is the main table against which we're going to check the joining values
        Table main = tables[0];
        Map cl = main.column(columnJoinName);
        Set<Object> frows = cl.keySet();
        if (tables.length > 1 && main.containsColumn(columnJoinName)) {
            if (resultingColumns == null || resultingColumns.length == 0) {
                resultingColumns = concatAllColumns(tables);
            }/*w  w w  .  j  a  va  2  s . c o m*/
            filterJoinedRows(tables, columnJoinName, frows, cl);
            return buildJoinedTable(frows, columnJoinName, cl, tables, resultingColumns);
        } else {
            return main;
        }
    } else {
        return null;
    }
}

From source file:co.cask.cdap.internal.app.runtime.flow.FlowUtils.java

/**
 * Configures all queues being used in a flow.
 *
 * @return A Multimap from flowletId to QueueName where the flowlet is a consumer of.
 *///from  w w w.  j  a  v  a2  s  . c o  m
public static Multimap<String, QueueName> configureQueue(Program program, FlowSpecification flowSpec,
        StreamAdmin streamAdmin, QueueAdmin queueAdmin, TransactionExecutorFactory txExecutorFactory) {
    // Generate all queues specifications
    Id.Application appId = Id.Application.from(program.getNamespaceId(), program.getApplicationId());
    Table<QueueSpecificationGenerator.Node, String, Set<QueueSpecification>> queueSpecs = new SimpleQueueSpecificationGenerator(
            appId).create(flowSpec);

    // For each queue in the flow, gather all consumer groups information
    Multimap<QueueName, ConsumerGroupConfig> queueConfigs = HashMultimap.create();

    // Loop through each flowlet and generate the map from consumer flowlet id to queue
    ImmutableSetMultimap.Builder<String, QueueName> resultBuilder = ImmutableSetMultimap.builder();
    for (Map.Entry<String, FlowletDefinition> entry : flowSpec.getFlowlets().entrySet()) {
        String flowletId = entry.getKey();

        for (QueueSpecification queueSpec : Iterables.concat(queueSpecs.column(flowletId).values())) {
            resultBuilder.put(flowletId, queueSpec.getQueueName());
        }
    }

    // For each queue, gather all consumer groups.
    for (QueueSpecification queueSpec : Iterables.concat(queueSpecs.values())) {
        QueueName queueName = queueSpec.getQueueName();
        queueConfigs.putAll(queueName, getAllConsumerGroups(program, flowSpec, queueName, queueSpecs));
    }

    try {
        // Configure each stream consumer in the Flow. Also collects all queue configurers.
        final List<ConsumerGroupConfigurer> groupConfigurers = Lists.newArrayList();

        for (Map.Entry<QueueName, Collection<ConsumerGroupConfig>> entry : queueConfigs.asMap().entrySet()) {
            LOG.info("Queue config for {} : {}", entry.getKey(), entry.getValue());
            if (entry.getKey().isStream()) {
                Map<Long, Integer> configs = Maps.newHashMap();
                for (ConsumerGroupConfig config : entry.getValue()) {
                    configs.put(config.getGroupId(), config.getGroupSize());
                }
                streamAdmin.configureGroups(entry.getKey().toStreamId(), configs);
            } else {
                groupConfigurers.add(new ConsumerGroupConfigurer(queueAdmin.getQueueConfigurer(entry.getKey()),
                        entry.getValue()));
            }
        }

        // Configure queue transactionally
        try {
            Transactions.createTransactionExecutor(txExecutorFactory, groupConfigurers)
                    .execute(new TransactionExecutor.Subroutine() {
                        @Override
                        public void apply() throws Exception {
                            for (ConsumerGroupConfigurer configurer : groupConfigurers) {
                                configurer.configure();
                            }
                        }
                    });
        } finally {
            for (ConsumerGroupConfigurer configurer : groupConfigurers) {
                Closeables.closeQuietly(configurer);
            }
        }

        return resultBuilder.build();
    } catch (Exception e) {
        LOG.error("Failed to configure queues", e);
        throw Throwables.propagate(e);
    }
}

From source file:com.griddynamics.jagger.engine.e1.scenario.DefaultMaxTpsCalculator.java

@Override
public BigDecimal getMaxTps(NodeTpsStatistics stats) {
    Table<Integer, Integer, Pair<Long, BigDecimal>> threadDelayStats = stats.getThreadDelayStats();

    Map<Integer, Pair<Long, BigDecimal>> threadsTps = threadDelayStats.column(0);
    log.debug("Going to calculate max tps for {}", threadsTps);
    List<Integer> threads = Lists.newLinkedList(threadsTps.keySet());

    if (threads.size() < SAMPLE_SIZE) {
        log.debug("Not enough samples to guess a tps max on node");
        return null;
    }//from   w  ww  . j av  a2 s  .c  om

    Collections.sort(threads);

    BigDecimal max = null;
    boolean isNonIncreasing = true;

    int start = threads.size() - SAMPLE_SIZE;
    int end = threads.size();

    BigDecimal previous = null;
    for (int i = start; i < end; i++) {
        Integer threadCount = threads.get(i);

        BigDecimal tps = threadsTps.get(threadCount).getSecond();

        if (previous == null) {
            max = tps;
            previous = tps;
        }

        if (tps.compareTo(previous) > 0) {
            isNonIncreasing = false;
            break;
        }

        if (previous.compareTo(max) > 0) {
            max = previous;
        }
    }

    if (!isNonIncreasing) {
        log.debug("Cannot guess max tps. According to stats tps is increasing.");
        return null;
    }

    return max;
}

From source file:com.yammer.collections.azure.ColumnMapView.java

ColumnMapView(final Table<R, C, V> backingTable) {
    this.backingTable = backingTable;
    valueCreator = new Function<C, Map<R, V>>() {
        @Override//from  ww  w . j av  a  2s  .com
        public Map<R, V> apply(C key) {
            return backingTable.column(key);
        }
    };
    entryConstructor = new

    Function<C, Entry<C, Map<R, V>>>() {
        @Override
        public Entry<C, Map<R, V>> apply(C input) {
            return new ColumnMapViewEntry<>(backingTable, ColumnMapView.this, input);
        }
    };
}

From source file:com.assylias.jbloomberg.HistoricalData.java

/**
 * Method to build queries in order to retrieve a specific table, column, row or cell.
 *
 * @param field the field for which data is required
 *
 * @return a query builder to build the query.
 *//*  w w w  .j av a2  s. c om*/
public synchronized ResultForField forField(String field) {
    Table<LocalDate, String, TypedObject> forField = TreeBasedTable.create();
    for (Map.Entry<String, Table<LocalDate, String, TypedObject>> e : data.entrySet()) {
        String ticker = e.getKey();
        Map<LocalDate, TypedObject> fieldData = e.getValue().column(field);
        forField.column(ticker).putAll(fieldData);
    }
    return new ResultForField(forField);
}

From source file:mtsar.processors.answer.KOSAggregator.java

private Table<Integer, Integer, Double> workersUpdate(Table<Integer, Integer, Short> graph,
        Table<Integer, Integer, Double> xs) {
    final Table<Integer, Integer, Double> ys = HashBasedTable.create(graph.rowKeySet().size(),
            graph.columnKeySet().size());

    for (final Table.Cell<Integer, Integer, Short> cell : graph.cellSet()) {
        double sumProduct = 0.0;

        final int taskId = cell.getRowKey(), workerId = cell.getColumnKey();
        final Map<Integer, Short> tasks = graph.column(workerId);

        for (final Map.Entry<Integer, Short> task : tasks.entrySet()) {
            if (task.getKey() == taskId)
                continue;
            sumProduct += task.getValue() * xs.get(task.getKey(), workerId);
        }// w w  w.j  a  va  2 s.c om

        ys.put(taskId, workerId, sumProduct);
    }

    return ys;
}

From source file:org.terasology.asset.AssetManagerImpl.java

@Override
public <D extends AssetData> void removeAssetSource(AssetSource source) {
    assetSources.remove(source.getSourceId());
    for (AssetUri override : source.listOverrides()) {
        if (overrides.get(override).equals(source)) {
            overrides.remove(override);/*from   ww w .jav a2s. com*/
            Asset<D> asset = (Asset<D>) assetCache.get(override);
            if (asset != null) {
                if (TerasologyConstants.ENGINE_MODULE.equals(override.getModuleName())) {
                    AssetData data = loadAssetData(override, true);
                    asset.reload((D) data);
                } else {
                    dispose(asset);
                }
            }
        }
    }
    for (Table<Name, Name, AssetUri> table : uriLookup.values()) {
        Map<Name, AssetUri> columnMap = table.column(source.getSourceId());
        for (AssetUri value : columnMap.values()) {
            Asset<?> asset = assetCache.remove(value);
            if (asset != null) {
                asset.dispose();
            }
        }
        columnMap.clear();
    }
}