Example usage for org.apache.commons.collections CollectionUtils subtract

List of usage examples for org.apache.commons.collections CollectionUtils subtract

Introduction

In this page you can find the example usage for org.apache.commons.collections CollectionUtils subtract.

Prototype

public static Collection subtract(final Collection a, final Collection b) 

Source Link

Document

Returns a new Collection containing a - b.

Usage

From source file:org.apache.eagle.alert.engine.spout.CorrelationSpout.java

@SuppressWarnings("unchecked")
public void onReload(final SpoutSpec newMeta, Map<String, StreamDefinition> sds) throws Exception {
    // calculate topic create/remove/update
    List<String> topics = getTopics(newMeta);
    List<String> cachedTopcies = getTopics(cachedSpoutSpec);
    Collection<String> newTopics = CollectionUtils.subtract(topics, cachedTopcies);
    Collection<String> removeTopics = CollectionUtils.subtract(cachedTopcies, topics);
    Collection<String> updateTopics = CollectionUtils.intersection(topics, cachedTopcies);

    LOG.info("Topics were added={}, removed={}, modified={}", newTopics, removeTopics, updateTopics);

    // build lookup table for scheme
    Map<String, String> newSchemaName = new HashMap<String, String>();
    Map<String, Map<String, String>> dataSourceProperties = new HashMap<>();
    for (Kafka2TupleMetadata ds : newMeta.getKafka2TupleMetadataMap().values()) {
        newSchemaName.put(ds.getTopic(), ds.getSchemeCls());
        dataSourceProperties.put(ds.getTopic(), ds.getProperties());
    }//from  w  w w.  j  a  v a 2s.  c  o m

    // copy and swap
    Map<String, KafkaSpoutWrapper> newKafkaSpoutList = new HashMap<>(this.kafkaSpoutList);
    // iterate new topics and then create KafkaSpout
    for (String topic : newTopics) {
        KafkaSpoutWrapper wrapper = newKafkaSpoutList.get(topic);
        if (wrapper != null) {
            LOG.warn(MessageFormat.format(
                    "try to create new topic {0}, but found in the active spout list, this may indicate some inconsistency",
                    topic));
            continue;
        }
        KafkaSpoutWrapper newWrapper = createKafkaSpout(
                ConfigFactory.parseMap(dataSourceProperties.get(topic)).withFallback(this.config), conf,
                context, collector, topic, newSchemaName.get(topic), newMeta, sds);
        newKafkaSpoutList.put(topic, newWrapper);
    }
    // iterate remove topics and then close KafkaSpout
    for (String topic : removeTopics) {
        KafkaSpoutWrapper wrapper = newKafkaSpoutList.get(topic);
        if (wrapper == null) {
            LOG.warn(MessageFormat.format(
                    "try to remove topic {0}, but not found in the active spout list, this may indicate some inconsistency",
                    topic));
            continue;
        }
        removeKafkaSpout(wrapper);
        newKafkaSpoutList.remove(topic);
    }

    // iterate update topic and then update metadata
    for (String topic : updateTopics) {
        KafkaSpoutWrapper spoutWrapper = newKafkaSpoutList.get(topic);
        if (spoutWrapper == null) {
            LOG.warn(MessageFormat.format(
                    "try to update topic {0}, but not found in the active spout list, this may indicate some inconsistency",
                    topic));
            continue;
        }
        spoutWrapper.update(newMeta, sds);
    }

    // swap
    this.cachedSpoutSpec = newMeta;
    this.kafkaSpoutList = newKafkaSpoutList;
    this.sds = sds;
}

From source file:org.apache.eagle.jpm.mr.running.storm.MRRunningJobFetchSpout.java

private Set<String> getFinishedAppIds(Set<String> runningAppIdsAtThisTime,
        Set<String> runningAppIdsAtPreviousTime) {
    Set<String> finishedAppIds = new HashSet<>(
            CollectionUtils.subtract(runningAppIdsAtPreviousTime, runningAppIdsAtThisTime));
    return finishedAppIds;
}

From source file:org.apache.eagle.notification.plugin.NotificationPluginManagerImpl.java

@Override
public void updateNotificationPlugins(AlertDefinitionAPIEntity alertDef, boolean isDelete) {
    try {/*from  ww w  .j a va 2 s.co  m*/
        // Update Notification Plugin about the change in AlertDefinition
        String policyId = alertDef.getTags().get(Constants.POLICY_ID);
        if (isDelete) {
            // iterate all plugins and delete this policy
            for (NotificationPlugin plugin : policyNotificationMapping.get(policyId)) {
                plugin.update(policyId, null, true);
            }
            policyNotificationMapping.remove(policyId);
            LOG.info("Deleted notifications for policy " + policyId);
            return;
        }

        Map<String, NotificationPlugin> plugins = pluginsForPolicy(alertDef);
        // calculate difference between current plugins and previous plugin
        Collection<NotificationPlugin> previousPlugins = policyNotificationMapping.get(policyId);
        if (previousPlugins != null) {
            Collection<NotificationPlugin> deletedPlugins = CollectionUtils.subtract(previousPlugins,
                    plugins.values());
            LOG.info("Going to delete plugins " + deletedPlugins + ", for policy " + policyId);
            for (NotificationPlugin plugin : deletedPlugins) {
                plugin.update(policyId, null, true);
            }
        }

        // iterate current notifications and update it individually
        List<Map<String, String>> notificationConfigCollection = NotificationPluginUtils
                .deserializeNotificationConfig(alertDef.getNotificationDef());
        for (NotificationPlugin plugin : plugins.values()) {
            plugin.update(policyId, notificationConfigCollection, false);
        }

        policyNotificationMapping.put(policyId, plugins.values());// update policy - notification types map
        LOG.info("Successfully broadcast policy updates to all Notification Plugins ...");
    } catch (Exception e) {
        LOG.error("Error broadcasting policy notification changes ", e);
    }
}

From source file:org.apache.falcon.catalog.CatalogPartitionHandler.java

private void registerPartitions(Configuration conf, CatalogStorage storage, Path staticPath,
        List<String> staticPartition) throws FalconException {
    try {//from   w  w  w .  ja va2  s  . c  o m
        FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(conf);
        if (!fs.exists(staticPath)) {
            //Do nothing if the output path doesn't exist
            return;
        }

        List<String> partitionColumns = getPartitionColumns(conf, storage);
        int dynamicPartCols = partitionColumns.size() - staticPartition.size();
        Path searchPath = staticPath;
        if (dynamicPartCols > 0) {
            searchPath = new Path(staticPath, StringUtils.repeat("*", "/", dynamicPartCols));
        }

        //Figure out the dynamic partitions from the directories on hdfs
        FileStatus[] files = fs.globStatus(searchPath, PATH_FILTER);
        Map<List<String>, String> partitions = new HashMap<List<String>, String>();
        for (FileStatus file : files) {
            List<String> dynamicParts = getDynamicPartitions(file.getPath(), staticPath);
            List<String> partitionValues = new ArrayList<String>(staticPartition);
            partitionValues.addAll(dynamicParts);
            LOG.debug("Final partition - " + partitionValues);
            partitions.put(partitionValues, file.getPath().toString());
        }

        List<List<String>> existPartitions = listPartitions(conf, storage, staticPartition);
        Collection<List<String>> targetPartitions = partitions.keySet();

        Collection<List<String>> partitionsForDrop = CollectionUtils.subtract(existPartitions,
                targetPartitions);
        Collection<List<String>> partitionsForAdd = CollectionUtils.subtract(targetPartitions, existPartitions);
        Collection<List<String>> partitionsForUpdate = CollectionUtils.intersection(existPartitions,
                targetPartitions);

        for (List<String> partition : partitionsForDrop) {
            dropPartitions(conf, storage, partition);
        }

        for (List<String> partition : partitionsForAdd) {
            addPartition(conf, storage, partition, partitions.get(partition));
        }

        for (List<String> partition : partitionsForUpdate) {
            updatePartition(conf, storage, partition, partitions.get(partition));
        }
    } catch (IOException e) {
        throw new FalconException(e);
    }
}

From source file:org.apache.myfaces.custom.picklist.HtmlPicklistRenderer.java

private List selectItemsForAvailableList(FacesContext facesContext, UIComponent uiComponent,
        List selectItemList, List selectItemsForSelectedList, Converter converter) {

    return new ArrayList(CollectionUtils.subtract(selectItemList, selectItemsForSelectedList));
}

From source file:org.apache.oozie.command.SchemaCheckXCommand.java

private boolean checkTables(DatabaseMetaData metaData, String catalog,
        final Collection<String> expectedTablesRaw) throws SQLException {
    boolean problem = false;
    Set<String> expectedTables = new HashSet<String>(expectedTablesRaw);
    expectedTables.add(caseTableName("oozie_sys"));
    expectedTables.add(caseTableName("openjpa_sequence_table"));
    expectedTables.add(caseTableName("validate_conn"));
    // Oracle returns > 1000 tables if we don't have the schema "OOZIE"; MySQL and Postgres don't want this
    String schema = null;/*from   w  w  w.  ja  v  a 2s  . com*/
    if (dbType.equals("oracle")) {
        schema = "OOZIE";
    }
    ResultSet rs = metaData.getTables(catalog, schema, null, new String[] { "TABLE" });
    Set<String> foundTables = new HashSet<String>();
    while (rs.next()) {
        String tabName = rs.getString("TABLE_NAME");
        if (tabName != null) {
            foundTables.add(tabName);
        }
    }
    Collection missingTables = CollectionUtils.subtract(expectedTables, foundTables);
    if (!missingTables.isEmpty()) {
        LOG.error("Found [{0}] missing tables: {1}", missingTables.size(),
                Arrays.toString(missingTables.toArray()));
        problem = true;
    } else if (LOG.isDebugEnabled()) {
        LOG.debug("No missing tables found: {0}", Arrays.toString(expectedTables.toArray()));
    }
    if (!ignoreExtras) {
        Collection extraTables = CollectionUtils.subtract(foundTables, expectedTables);
        if (!extraTables.isEmpty()) {
            LOG.error("Found [{0}] extra tables: {1}", extraTables.size(),
                    Arrays.toString(extraTables.toArray()));
            problem = true;
        } else {
            LOG.debug("No extra tables found");
        }
    }
    return problem;
}

From source file:org.apache.oozie.command.SchemaCheckXCommand.java

private boolean checkColumns(DatabaseMetaData metaData, String catalog, String table,
        Map<String, Integer> expectedColumnTypes) throws SQLException {
    boolean problem = false;
    Map<String, Pair<Integer, String>> foundColumns = new HashMap<String, Pair<Integer, String>>();
    ResultSet rs = metaData.getColumns(catalog, null, table, null);
    while (rs.next()) {
        String colName = rs.getString("COLUMN_NAME");
        Integer dataType = rs.getInt("DATA_TYPE");
        String colDef = rs.getString("COLUMN_DEF");
        if (colName != null) {
            foundColumns.put(colName, new Pair<Integer, String>(dataType, colDef));
        }/* ww w. j av  a  2  s  . c  om*/
    }
    Collection missingColumns = CollectionUtils.subtract(expectedColumnTypes.keySet(), foundColumns.keySet());
    if (!missingColumns.isEmpty()) {
        LOG.error("Found [{0}] missing columns in table [{1}]: {2}", missingColumns.size(), table,
                Arrays.toString(missingColumns.toArray()));
        problem = true;
    } else {
        for (Map.Entry<String, Integer> ent : expectedColumnTypes.entrySet()) {
            if (!foundColumns.get(ent.getKey()).getFirst().equals(ent.getValue())) {
                LOG.error("Expected column [{0}] in table [{1}] to have type [{2}], but found type [{3}]",
                        ent.getKey(), table, getSQLTypeFromInt(ent.getValue()),
                        getSQLTypeFromInt(foundColumns.get(ent.getKey()).getFirst()));
                problem = true;
            } else if (foundColumns.get(ent.getKey()).getSecond() != null) {
                LOG.error(
                        "Expected column [{0}] in table [{1}] to have default value [NULL], but found default vale [{2}]",
                        ent.getKey(), table, foundColumns.get(ent.getKey()).getSecond());
                problem = true;
            } else {
                LOG.debug("Found column [{0}] in table [{1}] with type [{2}] and default value [NULL]",
                        ent.getKey(), table, getSQLTypeFromInt(ent.getValue()));
            }
        }
    }
    if (!ignoreExtras) {
        Collection extraColumns = CollectionUtils.subtract(foundColumns.keySet(), expectedColumnTypes.keySet());
        if (!extraColumns.isEmpty()) {
            LOG.error("Found [{0}] extra columns in table [{1}]: {2}", extraColumns.size(), table,
                    Arrays.toString(extraColumns.toArray()));
            problem = true;
        } else {
            LOG.debug("No extra columns found in table [{0}]", table);
        }
    }
    return problem;
}

From source file:org.apache.oozie.command.SchemaCheckXCommand.java

private boolean checkIndexes(DatabaseMetaData metaData, String catalog, String table,
        Set<String> expectedIndexedColumns) throws SQLException {
    boolean problem = false;
    Set<String> foundIndexedColumns = new HashSet<String>();
    ResultSet rs = metaData.getIndexInfo(catalog, null, table, false, true);
    while (rs.next()) {
        String colName = rs.getString("COLUMN_NAME");
        if (colName != null) {
            foundIndexedColumns.add(colName);
        }//from   w  w  w.ja  va 2s  .co  m
    }
    Collection missingIndexColumns = CollectionUtils.subtract(expectedIndexedColumns, foundIndexedColumns);
    if (!missingIndexColumns.isEmpty()) {
        LOG.error("Found [{0}] missing indexes for columns in table [{1}]: {2}", missingIndexColumns.size(),
                table, Arrays.toString(missingIndexColumns.toArray()));
        problem = true;
    } else {
        if (LOG.isDebugEnabled()) {
            LOG.debug("No missing indexes found in table [{0}]: {1}", table,
                    Arrays.toString(expectedIndexedColumns.toArray()));
        }
    }
    if (!ignoreExtras) {
        Collection extraIndexColumns = CollectionUtils.subtract(foundIndexedColumns, expectedIndexedColumns);
        if (!extraIndexColumns.isEmpty()) {
            LOG.error("Found [{0}] extra indexes for columns in table [{1}]: {2}", extraIndexColumns.size(),
                    table, Arrays.toString(extraIndexColumns.toArray()));
            problem = true;
        } else {
            LOG.debug("No extra indexes found in table [{0}]", table);
        }
    }
    return problem;
}

From source file:org.apache.sysml.hops.ipa.InterProceduralAnalysis.java

/**
 * Public interface to perform IPA over a given DML program.
 * /*from   ww  w. j  a va  2s.  com*/
 * @param dmlp the dml program
 * @throws HopsException if HopsException occurs
 * @throws ParseException if ParseException occurs
 * @throws LanguageException if LanguageException occurs
 */
@SuppressWarnings("unchecked")
public void analyzeProgram(DMLProgram dmlp) throws HopsException, ParseException, LanguageException {
    //step 1: get candidates for statistics propagation into functions (if required)
    Map<String, Integer> fcandCounts = new HashMap<String, Integer>();
    Map<String, FunctionOp> fcandHops = new HashMap<String, FunctionOp>();
    Map<String, Set<Long>> fcandSafeNNZ = new HashMap<String, Set<Long>>();
    Set<String> allFCandKeys = new HashSet<String>();
    if (!dmlp.getFunctionStatementBlocks().isEmpty()) {
        for (StatementBlock sb : dmlp.getStatementBlocks()) //get candidates (over entire program)
            getFunctionCandidatesForStatisticPropagation(sb, fcandCounts, fcandHops);
        allFCandKeys.addAll(fcandCounts.keySet()); //cp before pruning
        pruneFunctionCandidatesForStatisticPropagation(fcandCounts, fcandHops);
        determineFunctionCandidatesNNZPropagation(fcandHops, fcandSafeNNZ);
        DMLTranslator.resetHopsDAGVisitStatus(dmlp);
    }

    //step 2: get unary dimension-preserving non-candidate functions
    Collection<String> unaryFcandTmp = CollectionUtils.subtract(allFCandKeys, fcandCounts.keySet());
    HashSet<String> unaryFcands = new HashSet<String>();
    if (!unaryFcandTmp.isEmpty() && UNARY_DIMS_PRESERVING_FUNS) {
        for (String tmp : unaryFcandTmp)
            if (isUnarySizePreservingFunction(dmlp.getFunctionStatementBlock(tmp)))
                unaryFcands.add(tmp);
    }

    //step 3: propagate statistics and scalars into functions and across DAGs
    if (!fcandCounts.isEmpty() || INTRA_PROCEDURAL_ANALYSIS) {
        //(callVars used to chain outputs/inputs of multiple functions calls) 
        LocalVariableMap callVars = new LocalVariableMap();
        for (StatementBlock sb : dmlp.getStatementBlocks()) //propagate stats into candidates
            propagateStatisticsAcrossBlock(sb, fcandCounts, callVars, fcandSafeNNZ, unaryFcands,
                    new HashSet<String>());
    }

    //step 4: remove unused functions (e.g., inlined or never called)
    if (REMOVE_UNUSED_FUNCTIONS) {
        removeUnusedFunctions(dmlp, allFCandKeys);
    }

    //step 5: flag functions with loops for 'recompile-on-entry'
    if (FLAG_FUNCTION_RECOMPILE_ONCE) {
        flagFunctionsForRecompileOnce(dmlp);
    }

    //step 6: set global data flow properties
    if (REMOVE_UNNECESSARY_CHECKPOINTS && OptimizerUtils.isSparkExecutionMode()) {
        //remove unnecessary checkpoint before update 
        removeCheckpointBeforeUpdate(dmlp);

        //move necessary checkpoint after update
        moveCheckpointAfterUpdate(dmlp);

        //remove unnecessary checkpoint read-{write|uagg}
        removeCheckpointReadWrite(dmlp);
    }

    //step 7: remove constant binary ops
    if (REMOVE_CONSTANT_BINARY_OPS) {
        removeConstantBinaryOps(dmlp);
    }

    //TODO evaluate potential of SECOND_CHANCE
    //(consistent call stats after first IPA pass and hence additional potential)
}

From source file:org.apache.sysml.runtime.transform.decode.DecoderFactory.java

@SuppressWarnings("unchecked")
public static Decoder createDecoder(String spec, String[] colnames, ValueType[] schema, FrameBlock meta)
        throws DMLRuntimeException {
    Decoder decoder = null;//w w  w  . ja v a  2s .com

    try {
        //parse transform specification
        JSONObject jSpec = new JSONObject(spec);
        List<Decoder> ldecoders = new ArrayList<Decoder>();

        //create decoders 'recode', 'dummy' and 'pass-through'
        List<Integer> rcIDs = Arrays.asList(
                ArrayUtils.toObject(TfMetaUtils.parseJsonIDList(jSpec, colnames, TfUtils.TXMETHOD_RECODE)));
        List<Integer> dcIDs = Arrays.asList(
                ArrayUtils.toObject(TfMetaUtils.parseJsonIDList(jSpec, colnames, TfUtils.TXMETHOD_DUMMYCODE)));
        rcIDs = new ArrayList<Integer>(CollectionUtils.union(rcIDs, dcIDs));
        List<Integer> ptIDs = new ArrayList<Integer>(
                CollectionUtils.subtract(UtilFunctions.getSequenceList(1, meta.getNumColumns(), 1), rcIDs));

        //create default schema if unspecified (with double columns for pass-through)
        if (schema == null) {
            schema = UtilFunctions.nCopies(meta.getNumColumns(), ValueType.STRING);
            for (Integer col : ptIDs)
                schema[col - 1] = ValueType.DOUBLE;
        }

        if (!dcIDs.isEmpty()) {
            ldecoders.add(new DecoderDummycode(schema, ArrayUtils.toPrimitive(dcIDs.toArray(new Integer[0]))));
        }
        if (!rcIDs.isEmpty()) {
            ldecoders.add(new DecoderRecode(schema, !dcIDs.isEmpty(),
                    ArrayUtils.toPrimitive(rcIDs.toArray(new Integer[0]))));
        }
        if (!ptIDs.isEmpty()) {
            ldecoders.add(new DecoderPassThrough(schema, ArrayUtils.toPrimitive(ptIDs.toArray(new Integer[0])),
                    ArrayUtils.toPrimitive(dcIDs.toArray(new Integer[0]))));
        }

        //create composite decoder of all created decoders
        //and initialize with given meta data (recode, dummy, bin)
        decoder = new DecoderComposite(schema, ldecoders);
        if (meta != null)
            decoder.initMetaData(meta);
    } catch (Exception ex) {
        throw new DMLRuntimeException(ex);
    }

    return decoder;
}