Example usage for java.lang System lineSeparator

List of usage examples for java.lang System lineSeparator

Introduction

In this page you can find the example usage for java.lang System lineSeparator.

Prototype

String lineSeparator

To view the source code for java.lang System lineSeparator.

Click Source Link

Usage

From source file:com.adobe.acs.commons.hc.impl.HealthCheckStatusEmailer.java

/**
 * Gererates the plain-text email sections for sets of Health Check Execution Results.
 *
 * @param title The section title//from www.  ja  va2s.  c  o m
 * @param results the  Health Check Execution Results to render as plain text
 * @return the String for this section to be embedded in the e-mail
 */
protected String resultToPlainText(final String title, final List<HealthCheckExecutionResult> results) {
    final StringBuilder sb = new StringBuilder();

    sb.append(title);
    sb.append(System.lineSeparator());

    if (results.size() == 0) {
        sb.append("No " + StringUtils.lowerCase(title) + " could be found!");
        sb.append(System.lineSeparator());
    } else {
        sb.append(StringUtils.repeat("-", NUM_DASHES));
        sb.append(System.lineSeparator());

        for (final HealthCheckExecutionResult result : results) {
            sb.append(StringUtils.rightPad("[ " + result.getHealthCheckResult().getStatus().name() + " ]",
                    HEALTH_CHECK_STATUS_PADDING));
            sb.append("  ");
            sb.append(result.getHealthCheckMetadata().getTitle());
            sb.append(System.lineSeparator());
        }
    }

    return sb.toString();
}

From source file:com.adaptris.ftp.ApacheFtpClientImpl.java

private void logReply(String[] replyText) {
    if (replyText == null || replyText.length == 0) {
        return;//ww w.  j a  v  a 2 s . c o m
    }
    if (isAdditionaDebug()) {
        StringBuilder sb = new StringBuilder();
        for (int i = 0; i < replyText.length; i++) {
            sb.append(replyText[i]);
            if (i + 1 < replyText.length) {
                sb.append(System.lineSeparator());
            }
        }
        logR.trace(sb.toString());
    }
}

From source file:ddf.catalog.impl.operations.UpdateOperations.java

private String buildUpdateLog(UpdateRequest createReq) {
    StringBuilder strBuilder = new StringBuilder();
    List<Metacard> metacards = createReq.getUpdates().stream().map(Map.Entry::getValue)
            .collect(Collectors.toList());
    String metacardTitleLabel = "Metacard Title: ";
    String metacardIdLabel = "Metacard ID: ";

    for (int i = 0; i < metacards.size(); i++) {
        Metacard card = metacards.get(i);
        strBuilder.append(System.lineSeparator()).append("Batch #: ").append(i + 1).append(" | ");
        if (card != null) {
            if (card.getTitle() != null) {
                strBuilder.append(metacardTitleLabel).append(card.getTitle()).append(" | ");
            }//from  ww w . jav a 2  s .com
            if (card.getId() != null) {
                strBuilder.append(metacardIdLabel).append(card.getId()).append(" | ");
            }
        } else {
            strBuilder.append("Null Metacard");
        }
    }
    return strBuilder.toString();
}

From source file:kmi.taa.core.PredicateObjectRetriever.java

public String JsonTransform(String origin) {
    if (origin.equals(""))
        return "";
    StringBuilder builder = new StringBuilder();

    Gson gson = new Gson();
    Response response = gson.fromJson(origin, Response.class);

    ArrayList<ResponseBindings> bindings = response.getResults().getBindings();
    for (ResponseBindings rb : bindings) {
        String pred = rb.getP().get("value");
        String obj = rb.getO().get("value");
        if (pred.contains("/prop/P")) {
            continue;
        }//from  ww w.  j a v  a  2s .  co m
        builder.append(pred + "\t" + obj);
        builder.append(System.lineSeparator());
    }

    return builder.toString();
}

From source file:com.pearson.eidetic.driver.threads.subthreads.SnapshotChecker.java

public boolean snapshotDecision(AmazonEC2Client ec2Client, Volume vol, String period) {
    if ((ec2Client == null) || (vol == null) || (period == null)) {
        return false;
    }/*from  w w  w .ja  va2s.c om*/
    try {

        List<Snapshot> int_snapshots = getAllSnapshotsOfVolume(ec2Client, vol, numRetries_,
                maxApiRequestsPerSecond_, uniqueAwsAccountIdentifier_);

        List<Snapshot> comparelist = new ArrayList();

        for (Snapshot snapshot : int_snapshots) {
            String sndesc = snapshot.getDescription();
            if ("week".equalsIgnoreCase(period) && sndesc.startsWith("week_snapshot")) {
                comparelist.add(snapshot);
            } else if ("day".equalsIgnoreCase(period) && sndesc.startsWith("day_snapshot")) {
                comparelist.add(snapshot);
            } else if ("hour".equalsIgnoreCase(period) && sndesc.startsWith("hour_snapshot")) {
                comparelist.add(snapshot);
            } else if ("month".equalsIgnoreCase(period) && sndesc.startsWith("month_snapshot")) {
                comparelist.add(snapshot);
            }
        }

        List<Snapshot> sortedCompareList = new ArrayList<>(comparelist);
        sortSnapshotsByDate(sortedCompareList);

        int hours = getHoursBetweenNowAndNewestSnapshot(sortedCompareList);
        int days = getDaysBetweenNowAndNewestSnapshot(sortedCompareList);

        if (("week".equalsIgnoreCase(period) && days < 0) || ("week".equalsIgnoreCase(period) && days >= 14)) {
        } else if (("hour".equalsIgnoreCase(period) && hours < 0)
                || ("hour".equalsIgnoreCase(period) && hours >= 2)) {
        } else if (("day".equalsIgnoreCase(period) && days < 0)
                || ("day".equalsIgnoreCase(period) && hours >= 25)) {
        } else if (("month".equalsIgnoreCase(period) && days < 0)
                || ("month".equalsIgnoreCase(period) && days >= 60)) {
        } else {
            return false;
        }

    } catch (Exception e) {
        logger.info("awsAccountNickname=\"" + uniqueAwsAccountIdentifier_
                + "\",Event=\"Error\", Error=\"error in snapshotDecision\", stacktrace=\"" + e.toString()
                + System.lineSeparator() + StackTrace.getStringFromStackTrace(e) + "\"");
        return false;
    }

    return true;
}

From source file:com.hybridbpm.core.util.HybridbpmCoreUtil.java

public static String updateFormCodeWithFiles(FormModel formModel, String code) {
    try {//from   w w  w .ja  va2  s  .c om
        StringBuilder designBuilder = new StringBuilder();
        for (FileModel file : formModel.getFiles()) {
            String line = FileModelUtil.getFileComponent(file);
            if (!line.isEmpty()) {
                designBuilder.append(line).append(System.lineSeparator());
            }
        }
        return replaceGeneratedCode(code, designBuilder.toString(), SyntaxConstant.FORM_FILES_START,
                SyntaxConstant.FORM_FILES_END);
    } catch (Exception ex) {
        logger.log(Level.SEVERE, ex.getMessage(), ex);
        return code;
    }
}

From source file:io.hops.hopsworks.common.dao.jupyter.config.JupyterConfigFilesGenerator.java

private boolean createConfigFiles(String confDirPath, String hdfsUser, String realName, Project project,
        String nameNodeEndpoint, Integer port, JupyterSettings js) throws IOException, ServiceException {
    File jupyter_config_file = new File(confDirPath + JUPYTER_NOTEBOOK_CONFIG);
    File jupyter_kernel_file = new File(confDirPath + JUPYTER_CUSTOM_KERNEL);
    File sparkmagic_config_file = new File(confDirPath + SPARKMAGIC_CONFIG);
    File custom_js = new File(confDirPath + JUPYTER_CUSTOM_JS);
    boolean createdJupyter = false;
    boolean createdSparkmagic = false;
    boolean createdCustomJs = false;

    if (!jupyter_config_file.exists()) {

        String[] nn = nameNodeEndpoint.split(":");
        String nameNodeIp = nn[0];
        String nameNodePort = nn[1];

        String pythonKernel = "";

        if (settings.isPythonKernelEnabled() && !project.getPythonVersion().contains("X")) {
            pythonKernel = ", 'python-" + hdfsUser + "'";
            StringBuilder jupyter_kernel_config = ConfigFileGenerator.instantiateFromTemplate(
                    ConfigFileGenerator.JUPYTER_CUSTOM_KERNEL, "hdfs_user", hdfsUser, "hadoop_home",
                    settings.getHadoopSymbolicLinkDir(), "hadoop_version", settings.getHadoopVersion(),
                    "anaconda_home", settings.getAnacondaProjectDir(project.getName()), "secret_dir",
                    settings.getStagingDir() + Settings.PRIVATE_DIRS + js.getSecret());
            ConfigFileGenerator.createConfigFile(jupyter_kernel_file, jupyter_kernel_config.toString());
        }//from  w  ww  .j a  v a 2  s  .co  m

        StringBuilder jupyter_notebook_config = ConfigFileGenerator.instantiateFromTemplate(
                ConfigFileGenerator.JUPYTER_NOTEBOOK_CONFIG_TEMPLATE, "project", project.getName(),
                "namenode_ip", nameNodeIp, "namenode_port", nameNodePort, "hopsworks_ip",
                settings.getHopsworksIp(), "base_dir", js.getBaseDir(), "hdfs_user", hdfsUser, "port",
                port.toString(), "python-kernel", pythonKernel, "umask", js.getUmask(), "hadoop_home",
                this.settings.getHadoopSymbolicLinkDir(), "hdfs_home", this.settings.getHadoopSymbolicLinkDir(),
                "secret_dir", this.settings.getStagingDir() + Settings.PRIVATE_DIRS + js.getSecret());
        createdJupyter = ConfigFileGenerator.createConfigFile(jupyter_config_file,
                jupyter_notebook_config.toString());
    }
    if (!sparkmagic_config_file.exists()) {
        StringBuilder sparkFiles = new StringBuilder();
        sparkFiles
                //Log4j.properties
                .append(settings.getSparkLog4JPath()).append(",")
                // Glassfish domain truststore
                .append(settings.getGlassfishTrustStoreHdfs()).append("#").append(Settings.DOMAIN_CA_TRUSTSTORE)
                .append(",")
                // Add HopsUtil
                .append(settings.getHopsUtilHdfsPath());

        if (!js.getFiles().equals("")) {
            //Split the comma-separated string and append it to sparkFiles
            for (String file : js.getFiles().split(",")) {
                sparkFiles.append(",").append(file);
            }
        }

        String extraClassPath = settings.getHopsLeaderElectionJarPath() + File.pathSeparator
                + settings.getHopsUtilFilename();

        if (!js.getJars().equals("")) {
            //Split the comma-separated string and append the names to the driver and executor classpath
            for (String jar : js.getJars().split(",")) {
                sparkFiles.append(",").append(jar);
                //Get jar name
                String name = jar.substring(jar.lastIndexOf("/") + 1);
                extraClassPath += File.pathSeparator + name;
            }
        }

        // If Hops RPC TLS is enabled, password file would be injected by the
        // NodeManagers. We don't need to add it as LocalResource
        if (!settings.getHopsRpcTls()) {
            sparkFiles
                    // Keystore
                    .append(",hdfs://").append(settings.getHdfsTmpCertDir()).append(File.separator)
                    .append(hdfsUser).append(File.separator).append(hdfsUser).append("__kstore.jks#")
                    .append(Settings.K_CERTIFICATE).append(",")
                    // TrustStore
                    .append("hdfs://").append(settings.getHdfsTmpCertDir()).append(File.separator)
                    .append(hdfsUser).append(File.separator).append(hdfsUser).append("__tstore.jks#")
                    .append(Settings.T_CERTIFICATE).append(",")
                    // File with crypto material password
                    .append("hdfs://").append(settings.getHdfsTmpCertDir()).append(File.separator)
                    .append(hdfsUser).append(File.separator).append(hdfsUser).append("__cert.key#")
                    .append(Settings.CRYPTO_MATERIAL_PASSWORD);
        }

        //Prepare pyfiles
        StringBuilder pyFilesBuilder = new StringBuilder();
        if (!Strings.isNullOrEmpty(js.getPyFiles())) {
            pyFilesBuilder = new StringBuilder();
            for (String file : js.getPyFiles().split(",")) {
                file += "#" + file.substring(file.lastIndexOf("/") + 1);
                pyFilesBuilder.append(file).append(",");
            }
            //Remove last comma character
            pyFilesBuilder.deleteCharAt(pyFilesBuilder.length() - 1);
        }

        String sparkProps = js.getSparkParams();

        // Spark properties user has defined in the jupyter dashboard
        Map<String, String> userSparkProperties = HopsUtils.validateUserProperties(sparkProps,
                settings.getSparkDir());

        LOGGER.info("SparkProps are: " + System.lineSeparator() + sparkProps);

        boolean isExperiment = js.getMode().compareToIgnoreCase("experiment") == 0;
        boolean isParallelExperiment = js.getMode().compareToIgnoreCase("parallelexperiments") == 0;
        boolean isDistributedTraining = js.getMode().compareToIgnoreCase("distributedtraining") == 0;
        boolean isMirroredStrategy = js.getDistributionStrategy().compareToIgnoreCase("mirroredstrategy") == 0
                && isDistributedTraining;
        boolean isParameterServerStrategy = js.getDistributionStrategy()
                .compareToIgnoreCase("parameterserverstrategy") == 0 && isDistributedTraining;
        boolean isCollectiveAllReduceStrategy = js.getDistributionStrategy()
                .compareToIgnoreCase("collectiveallreducestrategy") == 0 && isDistributedTraining;
        boolean isSparkDynamic = js.getMode().compareToIgnoreCase("sparkdynamic") == 0;
        String extraJavaOptions = "-D" + Settings.LOGSTASH_JOB_INFO + "=" + project.getName().toLowerCase()
                + ",jupyter,notebook,?" + " -D" + Settings.HOPSWORKS_JOBTYPE_PROPERTY + "=" + JobType.SPARK
                + " -D" + Settings.KAFKA_BROKERADDR_PROPERTY + "=" + settings.getKafkaBrokersStr() + " -D"
                + Settings.HOPSWORKS_REST_ENDPOINT_PROPERTY + "=" + settings.getRestEndpoint() + " -D"
                + Settings.HOPSWORKS_ELASTIC_ENDPOINT_PROPERTY + "=" + settings.getElasticRESTEndpoint() + " -D"
                + Settings.HOPSWORKS_PROJECTID_PROPERTY + "=" + project.getId() + " -D"
                + Settings.HOPSWORKS_PROJECTNAME_PROPERTY + "=" + project.getName()
                + " -Dlog4j.configuration=./log4j.properties";

        // Get information about which version of TensorFlow the user is running
        TfLibMapping tfLibMapping = tfLibMappingFacade.findTfMappingForProject(project);
        if (tfLibMapping == null) {
            // We are not supporting this version.
            throw new ServiceException(RESTCodes.ServiceErrorCode.TENSORFLOW_VERSION_NOT_SUPPORTED, Level.INFO);
        }
        String tfLdLibraryPath = tfLibMappingUtil.buildTfLdLibraryPath(tfLibMapping);

        // Map of default/system Spark(Magic) properties <Property_Name, ConfigProperty>
        // Property_Name should be either the SparkMagic property name or Spark property name
        // The replacement pattern is defined in ConfigProperty
        Map<String, ConfigProperty> sparkMagicParams = new HashMap<>();
        sparkMagicParams.put("livy_ip", new ConfigProperty("livy_ip", HopsUtils.IGNORE, settings.getLivyIp()));
        sparkMagicParams.put("jupyter_home", new ConfigProperty("jupyter_home", HopsUtils.IGNORE, confDirPath));
        sparkMagicParams.put("driverCores",
                new ConfigProperty("driver_cores", HopsUtils.IGNORE,
                        (isExperiment || isDistributedTraining || isParallelExperiment) ? "1"
                                : Integer.toString(js.getAppmasterCores())));
        sparkMagicParams.put("driverMemory", new ConfigProperty("driver_memory", HopsUtils.IGNORE,
                Integer.toString(js.getAppmasterMemory()) + "m"));
        sparkMagicParams.put("numExecutors",
                new ConfigProperty("num_executors", HopsUtils.IGNORE, (isExperiment || isMirroredStrategy) ? "1"
                        : (isParameterServerStrategy) ? Integer.toString(js.getNumExecutors() + js.getNumTfPs())
                                : (isSparkDynamic) ? Integer.toString(js.getDynamicMinExecutors())
                                        : Integer.toString(js.getNumExecutors())));
        sparkMagicParams.put("executorCores",
                new ConfigProperty("executor_cores", HopsUtils.IGNORE,
                        (isExperiment || isDistributedTraining || isParallelExperiment) ? "1"
                                : Integer.toString(js.getNumExecutorCores())));
        sparkMagicParams.put("executorMemory", new ConfigProperty("executor_memory", HopsUtils.IGNORE,
                Integer.toString(js.getExecutorMemory()) + "m"));
        sparkMagicParams.put("proxyUser", new ConfigProperty("hdfs_user", HopsUtils.IGNORE, hdfsUser));
        sparkMagicParams.put("name", new ConfigProperty("spark_magic_name", HopsUtils.IGNORE,
                "remotesparkmagics-jupyter-" + js.getMode()));
        sparkMagicParams.put("queue", new ConfigProperty("yarn_queue", HopsUtils.IGNORE, "default"));

        // Export versions of software

        sparkMagicParams.put("spark.yarn.appMasterEnv.LIVY_VERSION",
                new ConfigProperty("livy_version", HopsUtils.IGNORE, this.settings.getLivyVersion()));

        sparkMagicParams.put("spark.yarn.appMasterEnv.SPARK_VERSION",
                new ConfigProperty("spark_version", HopsUtils.IGNORE, this.settings.getSparkVersion()));

        sparkMagicParams.put("spark.yarn.appMasterEnv.KAFKA_VERSION",
                new ConfigProperty("kafka_version", HopsUtils.IGNORE, this.settings.getKafkaVersion()));

        sparkMagicParams.put("spark.yarn.appMasterEnv.TENSORFLOW_VERSION",
                new ConfigProperty("tensorflow_version", HopsUtils.IGNORE, tfLibMapping.getTfVersion()));

        sparkMagicParams.put("spark.yarn.appMasterEnv.CUDA_VERSION",
                new ConfigProperty("cuda_version", HopsUtils.IGNORE, tfLibMapping.getCudaVersion()));

        sparkMagicParams.put("spark.yarn.appMasterEnv.HOPSWORKS_VERSION",
                new ConfigProperty("hopsworks_version", HopsUtils.IGNORE, this.settings.getHopsworksVersion()));

        sparkMagicParams.put("spark.yarn.appMasterEnv.HADOOP_VERSION",
                new ConfigProperty("hadoop_version", HopsUtils.IGNORE, this.settings.getHadoopVersion()));

        sparkMagicParams.put("spark.yarn.appMasterEnv.KAFKA_BROKERS",
                new ConfigProperty("kafka_brokers", HopsUtils.IGNORE, this.settings.getKafkaBrokersStr()));

        sparkMagicParams.put("spark.yarn.appMasterEnv.ELASTIC_ENDPOINT", new ConfigProperty("elastic_endpoint",
                HopsUtils.IGNORE, this.settings.getElasticRESTEndpoint()));

        sparkMagicParams.put("spark.yarn.appMasterEnv.HOPSWORKS_USER",
                new ConfigProperty("hopsworks_user", HopsUtils.IGNORE, realName));

        // Spark properties
        sparkMagicParams.put(Settings.SPARK_EXECUTORENV_PATH,
                new ConfigProperty("spark_executorEnv_PATH", HopsUtils.APPEND_PATH,
                        this.settings.getAnacondaProjectDir(project.getName())
                                + "/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"));

        sparkMagicParams.put("spark.yarn.appMasterEnv.PYSPARK_PYTHON", new ConfigProperty("pyspark_bin",
                HopsUtils.IGNORE, this.settings.getAnacondaProjectDir(project.getName()) + "/bin/python"));

        sparkMagicParams.put("spark.yarn.appMasterEnv.PYSPARK_DRIVER_PYTHON", new ConfigProperty("pyspark_bin",
                HopsUtils.IGNORE, this.settings.getAnacondaProjectDir(project.getName()) + "/bin/python"));

        sparkMagicParams.put("spark.yarn.appMasterEnv.PYSPARK3_PYTHON", new ConfigProperty("pyspark_bin",
                HopsUtils.IGNORE, this.settings.getAnacondaProjectDir(project.getName()) + "/bin/python"));

        sparkMagicParams.put(Settings.SPARK_YARN_APPMASTERENV_LD_LIBRARY_PATH,
                new ConfigProperty("spark_yarn_appMaster_LD_LIBRARY_PATH", HopsUtils.APPEND_PATH,
                        this.settings.getJavaHome() + "/jre/lib/amd64/server:" + tfLdLibraryPath
                                + this.settings.getHadoopSymbolicLinkDir() + "/lib/native"));

        sparkMagicParams.put("spark.yarn.appMasterEnv.HADOOP_HOME",
                new ConfigProperty("hadoop_home", HopsUtils.IGNORE, this.settings.getHadoopSymbolicLinkDir()));

        sparkMagicParams.put(Settings.SPARK_YARN_APPMASTERENV_LIBHDFS_OPTS,
                new ConfigProperty("spark_yarn_appMasterEnv_LIBHDFS_OPTS", HopsUtils.APPEND_SPACE,
                        "-Xmx96m -Dlog4j.configuration=" + this.settings.getHadoopSymbolicLinkDir()
                                + "/etc/hadoop/log4j.properties -Dhadoop.root.logger=ERROR,RFA"));

        sparkMagicParams.put("spark.yarn.appMasterEnv.HADOOP_HDFS_HOME",
                new ConfigProperty("hadoop_home", HopsUtils.IGNORE, this.settings.getHadoopSymbolicLinkDir()));

        sparkMagicParams.put("spark.yarn.appMasterEnv.HADOOP_USER_NAME",
                new ConfigProperty("hdfs_user", HopsUtils.IGNORE, hdfsUser));

        sparkMagicParams.put("spark.yarn.appMasterEnv.REST_ENDPOINT",
                new ConfigProperty("rest_endpoint", HopsUtils.IGNORE, settings.getRestEndpoint()));

        sparkMagicParams.put("spark.yarn.appMasterEnv.HDFS_BASE_DIR",
                new ConfigProperty("spark_yarn_appMasterEnv_HDFS_BASE_DIR", HopsUtils.IGNORE,
                        "hdfs://Projects/" + project.getName() + js.getBaseDir()));

        sparkMagicParams.put(Settings.SPARK_DRIVER_STAGINGDIR_ENV, new ConfigProperty("spark_yarn_stagingDir",
                HopsUtils.IGNORE, "hdfs:///Projects/" + project.getName() + "/Resources"));

        sparkMagicParams.put("spark.yarn.dist.files",
                new ConfigProperty("spark_yarn_dist_files", HopsUtils.IGNORE, sparkFiles.toString()));

        sparkMagicParams.put("spark.yarn.dist.archives",
                new ConfigProperty("spark_yarn_dist_archives", HopsUtils.IGNORE, js.getArchives()));

        sparkMagicParams.put("spark.yarn.dist.pyFiles",
                new ConfigProperty("spark_yarn_dist_pyFiles", HopsUtils.IGNORE, pyFilesBuilder.toString()));

        sparkMagicParams.put(Settings.SPARK_DRIVER_EXTRALIBRARYPATH,
                new ConfigProperty("spark_driver_extraLibraryPath", HopsUtils.APPEND_PATH, tfLdLibraryPath));

        sparkMagicParams.put(Settings.SPARK_DRIVER_EXTRAJAVAOPTIONS,
                new ConfigProperty("spark_driver_extraJavaOptions", HopsUtils.APPEND_SPACE, extraJavaOptions));

        sparkMagicParams.put(Settings.SPARK_DRIVER_EXTRACLASSPATH,
                new ConfigProperty("spark_driver_extraClassPath", HopsUtils.APPEND_PATH, extraClassPath));

        sparkMagicParams.put(Settings.SPARK_EXECUTOR_EXTRACLASSPATH,
                new ConfigProperty("spark_executor_extraClassPath", HopsUtils.APPEND_PATH, extraClassPath));

        sparkMagicParams.put("spark.executorEnv.REST_ENDPOINT",
                new ConfigProperty("rest_endpoint", HopsUtils.IGNORE, settings.getRestEndpoint()));

        sparkMagicParams.put(Settings.SPARK_EXECUTORENV_HADOOP_USER_NAME,
                new ConfigProperty("hdfs_user", HopsUtils.IGNORE, hdfsUser));

        sparkMagicParams.put("spark.executorEnv.HADOOP_HOME",
                new ConfigProperty("hadoop_home", HopsUtils.IGNORE, this.settings.getHadoopSymbolicLinkDir()));

        sparkMagicParams.put(Settings.SPARK_EXECUTORENV_LIBHDFS_OPTS,
                new ConfigProperty("spark_executorEnv_LIBHDFS_OPTS", HopsUtils.APPEND_SPACE,
                        "-Xmx96m -Dlog4j.configuration=" + this.settings.getHadoopSymbolicLinkDir()
                                + "/etc/hadoop/log4j.properties -Dhadoop.root.logger=ERROR,RFA"));

        sparkMagicParams.put("spark.executorEnv.PYSPARK_PYTHON", new ConfigProperty("pyspark_bin",
                HopsUtils.IGNORE, this.settings.getAnacondaProjectDir(project.getName()) + "/bin/python"));

        sparkMagicParams.put("spark.executorEnv.PYSPARK3_PYTHON", new ConfigProperty("pyspark_bin",
                HopsUtils.IGNORE, this.settings.getAnacondaProjectDir(project.getName()) + "/bin/python"));

        sparkMagicParams.put(Settings.SPARK_EXECUTORENV_LD_LIBRARY_PATH,
                new ConfigProperty("spark_executorEnv_LD_LIBRARY_PATH", HopsUtils.APPEND_PATH,
                        this.settings.getJavaHome() + "/jre/lib/amd64/server:" + tfLdLibraryPath
                                + this.settings.getHadoopSymbolicLinkDir() + "/lib/native"));

        sparkMagicParams.put("spark.executorEnv.HADOOP_HDFS_HOME",
                new ConfigProperty("hadoop_home", HopsUtils.IGNORE, this.settings.getHadoopSymbolicLinkDir()));

        // Export versions of software

        sparkMagicParams.put("spark.executorEnv.LIVY_VERSION",
                new ConfigProperty("livy_version", HopsUtils.IGNORE, this.settings.getLivyVersion()));

        sparkMagicParams.put("spark.executorEnv.SPARK_VERSION",
                new ConfigProperty("spark_version", HopsUtils.IGNORE, this.settings.getSparkVersion()));

        sparkMagicParams.put("spark.executorEnv.KAFKA_VERSION",
                new ConfigProperty("kafka_version", HopsUtils.IGNORE, this.settings.getKafkaVersion()));

        sparkMagicParams.put("spark.executorEnv.TENSORFLOW_VERSION",
                new ConfigProperty("tensorflow_version", HopsUtils.IGNORE, tfLibMapping.getTfVersion()));

        sparkMagicParams.put("spark.executorEnv.CUDA_VERSION",
                new ConfigProperty("cuda_version", HopsUtils.IGNORE, tfLibMapping.getCudaVersion()));

        sparkMagicParams.put("spark.executorEnv.HOPSWORKS_VERSION",
                new ConfigProperty("hopsworks_version", HopsUtils.IGNORE, this.settings.getHopsworksVersion()));

        sparkMagicParams.put("spark.executorEnv.HADOOP_VERSION",
                new ConfigProperty("hadoop_version", HopsUtils.IGNORE, this.settings.getHadoopVersion()));

        sparkMagicParams.put("spark.executorEnv.KAFKA_BROKERS",
                new ConfigProperty("kafka_brokers", HopsUtils.IGNORE, this.settings.getKafkaBrokersStr()));

        sparkMagicParams.put("spark.executorEnv.ELASTIC_ENDPOINT", new ConfigProperty("elastic_endpoint",
                HopsUtils.IGNORE, this.settings.getElasticRESTEndpoint()));

        sparkMagicParams.put("spark.executorEnv.HOPSWORKS_USER",
                new ConfigProperty("hopsworks_user", HopsUtils.IGNORE, realName));

        sparkMagicParams.put(Settings.SPARK_EXECUTOR_EXTRA_JAVA_OPTS, new ConfigProperty(
                "spark_executor_extraJavaOptions", HopsUtils.APPEND_SPACE, extraJavaOptions));

        sparkMagicParams.put("spark.executorEnv.HDFS_BASE_DIR",
                new ConfigProperty("spark_executorEnv_HDFS_BASE_DIR", HopsUtils.IGNORE,
                        "hdfs://Projects/" + project.getName() + js.getBaseDir()));

        sparkMagicParams.put("spark.pyspark.python", new ConfigProperty("pyspark_bin", HopsUtils.IGNORE,
                this.settings.getAnacondaProjectDir(project.getName()) + "/bin/python"));

        sparkMagicParams.put("spark.shuffle.service.enabled", new ConfigProperty("", HopsUtils.IGNORE, "true"));

        sparkMagicParams.put("spark.submit.deployMode", new ConfigProperty("", HopsUtils.IGNORE, "cluster"));

        sparkMagicParams.put("spark.tensorflow.application",
                new ConfigProperty("spark_tensorflow_application", HopsUtils.IGNORE,
                        Boolean.toString(isExperiment || isParallelExperiment || isDistributedTraining)));

        sparkMagicParams.put("spark.tensorflow.num.ps", new ConfigProperty("spark_tensorflow_num_ps",
                HopsUtils.IGNORE, (isParameterServerStrategy) ? Integer.toString(js.getNumTfPs()) : "0"));

        sparkMagicParams.put("spark.executor.gpus",
                new ConfigProperty("spark_executor_gpus", HopsUtils.IGNORE,
                        (isDistributedTraining || isParallelExperiment || isExperiment)
                                ? Integer.toString(js.getNumExecutorGpus())
                                : "0"));

        sparkMagicParams.put("spark.dynamicAllocation.enabled",
                new ConfigProperty("spark_dynamicAllocation_enabled", HopsUtils.OVERWRITE, Boolean.toString(
                        isSparkDynamic || isExperiment || isParallelExperiment || isDistributedTraining)));

        sparkMagicParams.put("spark.dynamicAllocation.initialExecutors", new ConfigProperty(
                "spark_dynamicAllocation_initialExecutors", HopsUtils.OVERWRITE,
                (isExperiment || isParallelExperiment || isMirroredStrategy) ? "0"
                        : (isParameterServerStrategy) ? Integer.toString(js.getNumExecutors() + js.getNumTfPs())
                                : (isCollectiveAllReduceStrategy) ? Integer.toString(js.getNumExecutors())
                                        : Integer.toString(js.getDynamicMinExecutors())));

        sparkMagicParams.put("spark.dynamicAllocation.minExecutors",
                new ConfigProperty("spark_dynamicAllocation_minExecutors", HopsUtils.OVERWRITE,
                        (isExperiment || isParallelExperiment || isDistributedTraining) ? "0"
                                : Integer.toString(js.getDynamicMinExecutors())));

        sparkMagicParams.put("spark.dynamicAllocation.maxExecutors",
                new ConfigProperty("spark_dynamicAllocation_maxExecutors", HopsUtils.OVERWRITE,
                        (isExperiment || isMirroredStrategy) ? "1"
                                : (isParallelExperiment) ? Integer.toString(js.getNumExecutors())
                                        : (isParameterServerStrategy)
                                                ? Integer.toString(js.getNumExecutors() + js.getNumTfPs())
                                                : (isCollectiveAllReduceStrategy)
                                                        ? Integer.toString(js.getNumExecutors())
                                                        : Integer.toString(js.getDynamicMaxExecutors())));

        sparkMagicParams.put("spark.dynamicAllocation.executorIdleTimeout",
                new ConfigProperty("spark_dynamicAllocation_executorIdleTimeout", HopsUtils.OVERWRITE,
                        (isParameterServerStrategy)
                                ? Integer.toString(((js.getNumExecutors() + js.getNumTfPs()) * 15) + 60) + "s"
                                : "60s"));

        // Blacklisting behaviour for TensorFlow on Spark (e.g. Hyperparameter search) to make it robust
        // Allow many failures on a particular node before blacklisting the node
        // Blacklist executor instantly

        sparkMagicParams.put("spark.blacklist.enabled",
                new ConfigProperty("spark_blacklist_enabled", HopsUtils.OVERWRITE,
                        ((isExperiment || isParallelExperiment) && js.getFaultTolerant()) ? "true" : "false"));

        // If any task fails on an executor - kill it instantly (need fresh working directory for each task)
        sparkMagicParams.put("spark.blacklist.task.maxTaskAttemptsPerExecutor",
                new ConfigProperty("spark_max_task_attempts_per_executor", HopsUtils.OVERWRITE, "1"));

        // Blacklist node after 2 tasks fails on it
        sparkMagicParams.put("spark.blacklist.task.maxTaskAttemptsPerNode",
                new ConfigProperty("spark_max_task_attempts_per_node", HopsUtils.OVERWRITE, "2"));

        // If any task fails on an executor within a stage - blacklist it
        sparkMagicParams.put("spark.blacklist.stage.maxFailedTasksPerExecutor",
                new ConfigProperty("spark_stage_max_failed_tasks_per_executor", HopsUtils.OVERWRITE, "1"));

        // Blacklist node after 2 tasks within a stage fails on it
        sparkMagicParams.put("spark.blacklist.stage.maxFailedExecutorsPerNode",
                new ConfigProperty("spark_stage_max_failed_executors_per_node", HopsUtils.OVERWRITE, "2"));

        // If any task fails on an executor within an application - blacklist it
        sparkMagicParams.put("spark.blacklist.application.maxFailedTasksPerExecutor", new ConfigProperty(
                "spark_application_max_failed_tasks_per_executor", HopsUtils.OVERWRITE, "1"));

        // If 2 task fails on a node within an application - blacklist it
        sparkMagicParams.put("spark.blacklist.application.maxFailedExecutorsPerNode", new ConfigProperty(
                "spark_application_max_failed_executors_per_node", HopsUtils.OVERWRITE, "2"));

        sparkMagicParams.put("spark.task.maxFailures",
                new ConfigProperty("spark_task_max_failures", HopsUtils.OVERWRITE,
                        (isParallelExperiment || isExperiment) && js.getFaultTolerant() ? "3"
                                : (isParallelExperiment || isExperiment || isDistributedTraining) ? "1" : "4"));

        // Always kill the blacklisted executors (further failures could be results of local files from the failed task)
        sparkMagicParams.put("spark.blacklist.killBlacklistedExecutors",
                new ConfigProperty("spark_kill_blacklisted_executors", HopsUtils.OVERWRITE,
                        (isExperiment || isParallelExperiment) ? "true" : "false"));

        // Merge system and user defined properties
        Map<String, String> sparkParamsAfterMerge = HopsUtils.mergeHopsworksAndUserParams(sparkMagicParams,
                userSparkProperties, false);

        StringBuilder sparkmagic_sb = ConfigFileGenerator
                .instantiateFromTemplate(ConfigFileGenerator.SPARKMAGIC_CONFIG_TEMPLATE, sparkParamsAfterMerge);
        createdSparkmagic = ConfigFileGenerator.createConfigFile(sparkmagic_config_file,
                sparkmagic_sb.toString());
    }
    if (!custom_js.exists()) {

        StringBuilder custom_js_sb = ConfigFileGenerator.instantiateFromTemplate(
                ConfigFileGenerator.JUPYTER_CUSTOM_TEMPLATE, "hadoop_home",
                this.settings.getHadoopSymbolicLinkDir());
        createdCustomJs = ConfigFileGenerator.createConfigFile(custom_js, custom_js_sb.toString());
    }

    // Add this local file to 'spark: file' to copy it to hdfs and localize it.
    return createdJupyter || createdSparkmagic || createdCustomJs;
}

From source file:kmi.taa.core.PredicateObjectRetriever.java

public String combineComment(String response) {
    String[] str = response.split(System.lineSeparator());
    StringBuilder builder = new StringBuilder();
    for (int i = 0; i < str.length; i++) {
        String[] line = str[i].split("\t");
        if (line[0].equalsIgnoreCase("") && line[line.length - 1].endsWith("details.")) {
            builder.append(line[line.length - 1].trim());
            builder.append(System.lineSeparator());
        } else if (line[0].equalsIgnoreCase("") && !line[line.length - 1].endsWith("details.")) {
            builder.append(line[line.length - 1].trim() + " ");
        } else if (line[0].contains("#comment")) {
            builder.append(str[i]);//ww w . j ava 2  s  .  co m
        } else {
            builder.append(str[i]);
            builder.append(System.lineSeparator());
        }

    }
    return builder.toString();
}

From source file:ca.wumbo.doommanager.client.controller.ConsoleController.java

/**
 * Adds text to the textbox./*from ww w . j  a  v  a  2s.  c  om*/
 * 
 * @param text
 *       The text to add. This should not be null (if so, a warning will be
 *       emitted) and will be discarded if it is.
 */
public void addText(String text) {
    if (text == null) {
        log.warn("Passed a null text log message to the console.");
        return;
    }
    textArea.appendText(text + System.lineSeparator());
}

From source file:ddf.catalog.impl.operations.OperationsCrudSupport.java

private Metacard generateMetacard(String mimeTypeRaw, String id, String fileName, Subject subject,
        Path tmpContentPath) throws MetacardCreationException, MimeTypeParseException {

    Metacard generatedMetacard = null;/*from w  w w.  j a va  2s  . c o m*/
    InputTransformer transformer = null;
    StringBuilder causeMessage = new StringBuilder("Could not create metacard with mimeType ");
    try {
        MimeType mimeType = new MimeType(mimeTypeRaw);

        List<InputTransformer> listOfCandidates = frameworkProperties.getMimeTypeToTransformerMapper()
                .findMatches(InputTransformer.class, mimeType);

        LOGGER.debug("List of matches for mimeType [{}]: {}", mimeType, listOfCandidates);

        for (InputTransformer candidate : listOfCandidates) {
            transformer = candidate;

            try (InputStream transformerStream = com.google.common.io.Files
                    .asByteSource(tmpContentPath.toFile()).openStream()) {
                generatedMetacard = transformer.transform(transformerStream);
            }
            if (generatedMetacard != null) {
                break;
            }
        }
    } catch (CatalogTransformerException | IOException e) {
        causeMessage.append(mimeTypeRaw).append(". Reason: ").append(System.lineSeparator())
                .append(e.getMessage());

        // The caught exception more than likely does not have the root cause message
        // that is needed to inform the caller as to why things have failed.  Therefore
        // we need to iterate through the chain of cause exceptions and gather up
        // all of their message details.
        Throwable cause = e.getCause();
        while (cause != null && cause != cause.getCause()) {
            causeMessage.append(System.lineSeparator()).append(cause.getMessage());
            cause = cause.getCause();
        }
        LOGGER.debug("Transformer [{}] could not create metacard.", transformer, e);
    }

    if (generatedMetacard == null) {
        throw new MetacardCreationException(causeMessage.toString());
    }

    if (id != null) {
        generatedMetacard.setAttribute(new AttributeImpl(Metacard.ID, id));
    } else {
        generatedMetacard
                .setAttribute(new AttributeImpl(Metacard.ID, UUID.randomUUID().toString().replaceAll("-", "")));
    }

    if (StringUtils.isBlank(generatedMetacard.getTitle())) {
        generatedMetacard.setAttribute(new AttributeImpl(Metacard.TITLE, fileName));
    }

    String name = Optional.ofNullable(SubjectUtils.getName(subject)).orElse("");

    generatedMetacard.setAttribute(new AttributeImpl(Metacard.POINT_OF_CONTACT, name));

    return generatedMetacard;
}