Example usage for org.apache.commons.lang BooleanUtils isTrue

List of usage examples for org.apache.commons.lang BooleanUtils isTrue

Introduction

In this page you can find the example usage for org.apache.commons.lang BooleanUtils isTrue.

Prototype

public static boolean isTrue(Boolean bool) 

Source Link

Document

Checks if a Boolean value is true, handling null by returning false.

 BooleanUtils.isTrue(Boolean.TRUE)  = true BooleanUtils.isTrue(Boolean.FALSE) = false BooleanUtils.isTrue(null)          = false 

Usage

From source file:org.apache.sqoop.connector.jdbc.oracle.OracleJdbcToInitializer.java

private void createAnyRequiredOracleObjects(MutableContext context, ToJobConfig jobConfig,
        ConnectionConfig connectionConfig) throws SQLException {

    // The SYSDATE on the Oracle database will be used as the partition value
    // for this export job...
    Object sysDateTime = OracleQueries.getSysDate(connection);
    String sysDateStr = OracleQueries.oraDATEToString(sysDateTime, "yyyy-mm-dd hh24:mi:ss");
    context.setString(OracleJdbcConnectorConstants.SQOOP_ORACLE_JOB_SYSDATE, sysDateStr);

    checkForOldOraOopTemporaryOracleTables(connection, sysDateTime, OracleQueries.getCurrentSchema(connection));

    // Store the actual partition value, so the N mappers know what value to
    // insert.../*from   ww  w.  j  av a  2 s  .com*/
    String partitionValue = OracleQueries.oraDATEToString(sysDateTime,
            OracleJdbcConnectorConstants.ORAOOP_EXPORT_PARTITION_DATE_FORMAT);
    context.setString(OracleJdbcConnectorConstants.ORAOOP_EXPORT_PARTITION_DATE_VALUE, partitionValue);

    // Generate the (22 character) partition name...
    String partitionName = OracleUtilities.createExportTablePartitionNameFromOracleTimestamp(sysDateTime);

    //TODO: Number of mappers needs to be fixed
    int numMappers = 8;

    String exportTableTemplate = jobConfig.templateTable;

    if (exportTableTemplate == null) {
        exportTableTemplate = "";
    }

    String user = connectionConfig.username;
    //TODO: This is from the other Oracle Manager
    //if (user == null) {
    //  user = OracleManager.getSessionUser(connection);
    //}

    OracleTable templateTableContext = OracleUtilities.decodeOracleTableName(user, exportTableTemplate);

    boolean noLoggingOnNewTable = BooleanUtils.isTrue(jobConfig.nologging);

    List<String> updateKeyCol = jobConfig.updateKey;

    /* =========================== */
    /* VALIDATION OF INPUTS */
    /* =========================== */

    if (updateKeyCol == null || updateKeyCol.isEmpty()) {
        // We're performing an "insert" export, not an "update" export.

        // Check that the "oraoop.export.merge" property has not been specified,
        // as this would be
        // an invalid scenario...
        if (OracleUtilities.getExportUpdateMode(jobConfig) == UpdateMode.Merge) {
            throw new RuntimeException("The merge option can only be used if " + "an update key is specified.");
        }
    }

    if (OracleUtilities.userWantsToCreatePartitionedExportTableFromTemplate(jobConfig)
            || OracleUtilities.userWantsToCreateNonPartitionedExportTableFromTemplate(jobConfig)) {

        // OraOop will create the export table.

        if (table.getName().length() > OracleJdbcConnectorConstants.Oracle.MAX_IDENTIFIER_LENGTH) {
            String msg = String.format(
                    "The Oracle table name \"%s\" is longer than %d characters.\n"
                            + "Oracle will not allow a table with this name to be created.",
                    table.getName(), OracleJdbcConnectorConstants.Oracle.MAX_IDENTIFIER_LENGTH);
            throw new RuntimeException(msg);
        }

        if (updateKeyCol != null && !updateKeyCol.isEmpty()) {

            // We're performing an "update" export, not an "insert" export.

            // Check whether the user is attempting an "update" (i.e. a non-merge).
            // If so, they're
            // asking to only UPDATE rows in a (about to be created) (empty) table
            // that contains no rows.
            // This will be a waste of time, as we'd be attempting to perform UPDATE
            // operations against a
            // table with no rows in it...
            UpdateMode updateMode = OracleUtilities.getExportUpdateMode(jobConfig);
            if (updateMode == UpdateMode.Update) {
                throw new RuntimeException(
                        String.format("\n\nCombining the template table option with the merge "
                                + "option is nonsensical, as this would create an "
                                + "empty table and then perform "
                                + "a lot of work that results in a table containing no rows.\n"));
            }
        }

        // Check that the specified template table actually exists and is a
        // table...
        String templateTableObjectType = OracleQueries.getOracleObjectType(connection, templateTableContext);
        if (templateTableObjectType == null) {
            throw new RuntimeException(
                    String.format("The specified Oracle template table \"%s\" does not exist.",
                            templateTableContext.toString()));
        }

        if (!templateTableObjectType.equalsIgnoreCase(OracleJdbcConnectorConstants.Oracle.OBJECT_TYPE_TABLE)) {
            throw new RuntimeException(String.format(
                    "The specified Oracle template table \"%s\" is not an " + "Oracle table, it's a %s.",
                    templateTableContext.toString(), templateTableObjectType));
        }

        if (BooleanUtils.isTrue(jobConfig.dropTableIfExists)) {
            OracleQueries.dropTable(connection, table);
        }

        // Check that there is no existing database object with the same name of
        // the table to be created...
        String newTableObjectType = OracleQueries.getOracleObjectType(connection, table);
        if (newTableObjectType != null) {
            throw new RuntimeException(String.format(
                    "%s cannot create a new Oracle table named %s as a \"%s\" "
                            + "with this name already exists.",
                    OracleJdbcConnectorConstants.CONNECTOR_NAME, table.toString(), newTableObjectType));
        }
    } else {
        // The export table already exists.

        if (updateKeyCol != null && !updateKeyCol.isEmpty()) {

            // We're performing an "update" export, not an "insert" export.

            // Check that there exists an index on the export table on the
            // update-key column(s).
            // Without such an index, this export may perform like a real dog...
            String[] updateKeyColumns = OracleUtilities.getExportUpdateKeyColumnNames(jobConfig);
            if (!OracleQueries.doesIndexOnColumnsExist(connection, table, updateKeyColumns)) {
                String msg = String.format(
                        "\n**************************************************************"
                                + "***************************************************************"
                                + "\n\tThe table %1$s does not have a valid index on " + "the column(s) %2$s.\n"
                                + "\tAs a consequence, this export may take a long time to " + "complete.\n"
                                + "\tIf performance is unacceptable, consider reattempting this "
                                + "job after creating an index " + "on this table via the SQL...\n"
                                + "\t\tcreate index <index_name> on %1$s(%2$s);\n"
                                + "****************************************************************"
                                + "*************************************************************",
                        table.toString(), OracleUtilities.stringArrayToCSV(updateKeyColumns));
                LOG.warn(msg);
            }
        }
    }

    boolean createMapperTables = false;

    if (updateKeyCol != null && !updateKeyCol.isEmpty()) {
        createMapperTables = true;
    }

    if (OracleUtilities.userWantsToCreatePartitionedExportTableFromTemplate(jobConfig)) {
        /* ================================= */
        /* CREATE A PARTITIONED TABLE */
        /* ================================= */

        // Create a new Oracle table using the specified template...

        String[] subPartitionNames = OracleUtilities.generateExportTableSubPartitionNames(numMappers,
                sysDateTime);
        // Create the export table from a template table...
        String tableStorageClause = OracleUtilities.getExportTableStorageClause(jobConfig);

        OracleQueries.createExportTableFromTemplateWithPartitioning(connection, table, tableStorageClause,
                templateTableContext, noLoggingOnNewTable, partitionName, sysDateTime, numMappers,
                subPartitionNames);

        createMapperTables = true;
    } else if (OracleUtilities.userWantsToCreateNonPartitionedExportTableFromTemplate(jobConfig)) {
        /* ===================================== */
        /* CREATE A NON-PARTITIONED TABLE */
        /* ===================================== */
        String tableStorageClause = OracleUtilities.getExportTableStorageClause(jobConfig);

        OracleQueries.createExportTableFromTemplate(connection, table, tableStorageClause, templateTableContext,
                noLoggingOnNewTable);
    } else {
        /* ===================================================== */
        /* ADD ADDITIONAL PARTITIONS TO AN EXISTING TABLE */
        /* ===================================================== */

        // If the export table is partitioned, and the partitions were created by
        // OraOop, then we need
        // create additional partitions...

        OracleTablePartitions tablePartitions = OracleQueries.getPartitions(connection, table);
        // Find any partition name starting with "ORAOOP_"...
        OracleTablePartition oraOopPartition = tablePartitions
                .findPartitionByRegEx("^" + OracleJdbcConnectorConstants.EXPORT_TABLE_PARTITION_NAME_PREFIX);

        if (tablePartitions.size() > 0 && oraOopPartition == null) {

            for (int idx = 0; idx < tablePartitions.size(); idx++) {
                LOG.info(String.format("The Oracle table %s has a partition named \"%s\".", table.toString(),
                        tablePartitions.get(idx).getName()));
            }

            LOG.warn(String.format(
                    "The Oracle table %s is partitioned.\n" + "These partitions were not created by %s.",
                    table.toString(), OracleJdbcConnectorConstants.CONNECTOR_NAME));
        }

        if (oraOopPartition != null) {

            // Indicate in the configuration what's happening...
            context.setBoolean(OracleJdbcConnectorConstants.EXPORT_TABLE_HAS_SQOOP_PARTITIONS, true);

            LOG.info(String.format(
                    "The Oracle table %s is partitioned.\n" + "These partitions were created by %s, so "
                            + "additional partitions will now be created.\n"
                            + "The name of the new partition will be \"%s\".",
                    table.toString(), OracleJdbcConnectorConstants.CONNECTOR_NAME, partitionName));

            String[] subPartitionNames = OracleUtilities.generateExportTableSubPartitionNames(numMappers,
                    sysDateTime);

            // Add another partition (and N subpartitions) to this existing,
            // partitioned export table...
            OracleQueries.createMoreExportTablePartitions(connection, table, partitionName, sysDateTime,
                    subPartitionNames);

            createMapperTables = true;
        }
    }

    if (createMapperTables) {
        createUniqueMapperTable(sysDateTime, numMappers, jobConfig);
    }
}

From source file:org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.java

public static boolean oracleJdbcUrlGenerationDisabled(ConnectionConfig config) {
    return BooleanUtils.isTrue(config.jdbcUrlVerbatim);
}

From source file:org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.java

public static boolean omitLobAndLongColumnsDuringImport(FromJobConfig jobConfig) {
    return BooleanUtils.isTrue(jobConfig.omitLobColumns);
}

From source file:org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.java

public static boolean enableOracleParallelProcessingDuringExport(ToJobConfig jobConfig) {
    return BooleanUtils.isTrue(jobConfig.parallel);
}

From source file:org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.java

public static boolean userWantsToCreatePartitionedExportTableFromTemplate(ToJobConfig jobConfig) {
    return userWantsToCreateExportTableFromTemplate(jobConfig) && BooleanUtils.isTrue(jobConfig.partitioned);
}

From source file:org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.java

public static boolean userWantsToCreateNonPartitionedExportTableFromTemplate(ToJobConfig jobConfig) {
    return userWantsToCreateExportTableFromTemplate(jobConfig) && !BooleanUtils.isTrue(jobConfig.partitioned);
}

From source file:org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.java

public static UpdateMode getExportUpdateMode(ToJobConfig jobConfig) {
    UpdateMode updateMode = UpdateMode.Update;

    if (BooleanUtils.isTrue(jobConfig.updateMerge)) {
        updateMode = UpdateMode.Merge;/*from  w ww.j a v a  2 s  . c o m*/
    }

    return updateMode;
}

From source file:org.apache.storm.daemon.logviewer.handler.LogviewerLogSearchHandler.java

/**
 * Deep search across worker log files in a topology.
 *
 * @param topologyId topology ID//from w  w w .j a va 2s.c o m
 * @param user username
 * @param search search string
 * @param numMatchesStr the count of maximum matches
 * @param portStr worker port, null or '*' if the request wants to search from all worker logs
 * @param fileOffsetStr index (offset) of the log files
 * @param offsetStr start offset for log file
 * @param searchArchived true if the request wants to search also archived files, false if not
 * @param callback callback for JSONP
 * @param origin origin
 * @return Response containing JSON content representing search result
 */
public Response deepSearchLogsForTopology(String topologyId, String user, String search, String numMatchesStr,
        String portStr, String fileOffsetStr, String offsetStr, Boolean searchArchived, String callback,
        String origin) {
    String rootDir = logRoot;
    Object returnValue;
    File topologyDir = new File(rootDir, topologyId);
    if (StringUtils.isEmpty(search) || !topologyDir.exists()) {
        returnValue = new ArrayList<>();
    } else {
        int fileOffset = ObjectReader.getInt(fileOffsetStr, 0);
        int offset = ObjectReader.getInt(offsetStr, 0);
        int numMatches = ObjectReader.getInt(numMatchesStr, 1);

        File[] portDirsArray = topologyDir.listFiles();
        List<File> portDirs;
        if (portDirsArray != null) {
            portDirs = Arrays.asList(portDirsArray);
        } else {
            portDirs = new ArrayList<>();
        }

        if (StringUtils.isEmpty(portStr) || portStr.equals("*")) {
            // check for all ports
            List<List<File>> filteredLogs = portDirs.stream().map(portDir -> logsForPort(user, portDir))
                    .filter(logs -> logs != null && !logs.isEmpty()).collect(toList());

            if (BooleanUtils.isTrue(searchArchived)) {
                returnValue = filteredLogs.stream().map(fl -> findNMatches(fl, numMatches, 0, 0, search))
                        .collect(toList());
            } else {
                returnValue = filteredLogs.stream().map(fl -> Collections.singletonList(first(fl)))
                        .map(fl -> findNMatches(fl, numMatches, 0, 0, search)).collect(toList());
            }
        } else {
            int port = Integer.parseInt(portStr);
            // check just the one port
            List<Integer> slotsPorts = (List<Integer>) stormConf
                    .getOrDefault(DaemonConfig.SUPERVISOR_SLOTS_PORTS, new ArrayList<>());
            boolean containsPort = slotsPorts.stream()
                    .anyMatch(slotPort -> slotPort != null && (slotPort == port));
            if (!containsPort) {
                returnValue = new ArrayList<>();
            } else {
                File portDir = new File(rootDir + File.separator + topologyId + File.separator + port);

                if (!portDir.exists() || logsForPort(user, portDir).isEmpty()) {
                    returnValue = new ArrayList<>();
                } else {
                    List<File> filteredLogs = logsForPort(user, portDir);
                    if (BooleanUtils.isTrue(searchArchived)) {
                        returnValue = findNMatches(filteredLogs, numMatches, fileOffset, offset, search);
                    } else {
                        returnValue = findNMatches(Collections.singletonList(first(filteredLogs)), numMatches,
                                0, offset, search);
                    }
                }
            }
        }
    }

    return LogviewerResponseBuilder.buildSuccessJsonResponse(returnValue, callback, origin);
}

From source file:org.betaconceptframework.astroboa.console.jsf.edit.CmsPropertyWrapper.java

public String getPathRelativeToCmsPropertyParent() {

    getPath();//from  www .  ja v  a2  s.c  om

    String relativePath = PropertyPath.getLastDescendant(path);

    //Special case. If property is COMPLEX and MULTIPLE
    //and path does not contains index
    //0 is added to relative path
    if (ValueType.Complex == getValueType() && BooleanUtils.isTrue(isMultiple())
            && PropertyPath.extractIndexFromPath(relativePath) == PropertyPath.NO_INDEX) {
        relativePath += CmsConstants.LEFT_BRACKET + "0" + CmsConstants.RIGHT_BRACKET;
    }

    return relativePath;
}

From source file:org.betaconceptframework.astroboa.console.jsf.richfaces.LazyLoadingCmsDefinitionTreeNodeRichFaces.java

public boolean hasParentMultiple() {
    if (getParent() != null) {
        if (getParent() instanceof LazyLoadingCmsDefinitionTreeNodeRichFaces) {
            LazyLoadingCmsDefinitionTreeNodeRichFaces parentDefinitionTreeNode = (LazyLoadingCmsDefinitionTreeNodeRichFaces) getParent();
            Boolean isParentMultiple = (parentDefinitionTreeNode).getDefinitionMultiple();
            if (BooleanUtils.isTrue(isParentMultiple))
                return true;

            return parentDefinitionTreeNode.hasParentMultiple();
        }//from w w w  .java2 s  .c o m

        return false;
    }

    return false;
}