Example usage for java.io File pathSeparator

List of usage examples for java.io File pathSeparator

Introduction

In this page you can find the example usage for java.io File pathSeparator.

Prototype

String pathSeparator

To view the source code for java.io File pathSeparator.

Click Source Link

Document

The system-dependent path-separator character, represented as a string for convenience.

Usage

From source file:net.sourceforge.pmd.util.fxdesigner.SourceEditorController.java

public void setAuxclasspathFiles(String files) {
    List<File> newVal = Arrays.stream(files.split(File.pathSeparator)).map(File::new)
            .collect(Collectors.toList());
    auxclasspathFiles.setValue(newVal);//from w  ww .ja  v  a2s  .c  o  m
}

From source file:org.apache.hive.hcatalog.templeton.AppConfig.java

private static void dumpConfig(Configuration conf, StringBuilder sb) {
    Iterator<Map.Entry<String, String>> configIter = conf.iterator();
    List<Map.Entry<String, String>> configVals = new ArrayList<>();
    while (configIter.hasNext()) {
        configVals.add(configIter.next());
    }/*from   w  ww .j av a 2s  . co m*/
    Collections.sort(configVals, new Comparator<Map.Entry<String, String>>() {
        @Override
        public int compare(Map.Entry<String, String> ent, Map.Entry<String, String> ent2) {
            return ent.getKey().compareTo(ent2.getKey());
        }
    });
    for (Map.Entry<String, String> entry : configVals) {
        //use get() to make sure variable substitution works
        if (entry.getKey().toLowerCase().contains("path")) {
            StringTokenizer st = new StringTokenizer(conf.get(entry.getKey()), File.pathSeparator);
            sb.append(entry.getKey()).append("=\n");
            while (st.hasMoreTokens()) {
                sb.append("    ").append(st.nextToken()).append(File.pathSeparator).append('\n');
            }
        } else {
            sb.append(entry.getKey()).append('=').append(conf.get(entry.getKey())).append('\n');
        }
    }
}

From source file:org.jiemamy.eclipse.core.ui.composer.DbImporterWizardPage.java

private void connectionSucceeded() {
    btnTest.setText(Messages.DbImportWizardPage_btn_reconfigure);

    cmbDialect.setEnabled(false);/*from  ww  w  .j ava 2  s  .  co  m*/
    lstDriverJars.setEnabled(false);
    btnAddJar.setEnabled(false);
    btnRemoveJar.setEnabled(false);
    cmbDriverClass.setEnabled(false);
    txtUri.setEnabled(false);
    txtUsername.setEnabled(false);
    txtPassword.setEnabled(false);
    txtSchema.setEnabled(false);
    btnImportDataSet.setEnabled(false);

    settings.put("cmbDialect", cmbDialect.getText());
    settings.put("lstDriverJars", StringUtils.join(lstDriverJars.getItems(), File.pathSeparator));
    settings.put("cmbDriverClass", cmbDriverClass.getText());
    settings.put("txtUri", txtUri.getText());
    settings.put("txtUsername", txtUsername.getText());
    settings.put("txtPassword", txtPassword.getText());
    settings.put("txtSchema", txtSchema.getText());
    settings.put("btnImportDataSet", btnImportDataSet.getSelection());

    setPageComplete(true);
}

From source file:com.wintindustries.pfserver.PFDefaultLibrary.FileServices.PFFileServiceLocal.PFFileServiceLocal.java

@Override
public synchronized void write(PFFile file, PFFileAcsessOptions options) {
    // check for any unregognized file option perameters, log any unsupported oprtations
    checkForUnsupportedFileAcsessOptions(options);

    PFLocation location = file.getLocation();
    String locationPath = convertPFLocationToPath(location);
    File fileToWrite = new File(locationPath);

    // first figure out if file already exists 
    if (fileToWrite.exists()) {
        // here we process any options that have to do with writing similalr files 
        for (PFFileAcsessOptions.option opt : options.getOptions()) {
            // increment a number at the end of the filename until you find a number that does exist, then set that as the new name for the document

            if (opt == PFFileAcsessOptions.option.kFileNameIncrementNumber) {
                int incrementNumber = 0; // the current increment that will be added to the begining of the filename  
                String finalFilename = "(" + incrementNumber + ")" + file.getName(); // the final filename that will be used

                while (new File(convertPFLocationToPath(file.getParent().getLocation()) + File.pathSeparator
                        + finalFilename).exists()) {

                    finalFilename = "(" + incrementNumber + ")" + file.getName(); // the final filename that will be used
                    incrementNumber++;/* w ww  . j a  va2 s .  c o m*/

                }

                // set the name as the name of the file 
                file.setName(finalFilename);
                continue;

            }
        }

    } else {
        try {
            // if the file does not already exist, just write the file 
            FileWriter writer;
            BufferedWriter bufWrite;
            FileOutputStream out;
            location = file.getLocation();
            locationPath = convertPFLocationToPath(location);
            fileToWrite = new File(locationPath);
            out = new FileOutputStream(fileToWrite);

            file.getData().reset();

            int len = -1;
            byte[] bytes = new byte[32 * 1024];

            len = file.getData().read(bytes);
            while (len != -1) {
                out.write(bytes, 0, len);
                len = file.getData().read(bytes);
            }

            out.close();

        } catch (Exception ex) {
            throw new RuntimeException(ex);
        }
    }

}

From source file:org.codehaus.mojo.aspectj.AbstractAjcCompiler.java

/**
 * Assembles a complete ajc compiler arguments list.
 *
 * @throws MojoExecutionException error in configuration
 *//*from w w w .jav a  2  s  .co  m*/
protected void assembleArguments() throws MojoExecutionException {
    // Add classpath
    ajcOptions.add("-classpath");
    ajcOptions.add(AjcHelper.createClassPath(project, null, getOutputDirectories()));

    // Add boot classpath
    if (null != bootclasspath) {
        ajcOptions.add("-bootclasspath");
        ajcOptions.add(bootclasspath);
    }

    // Add warn option
    if (null != warn) {
        ajcOptions.add("-warn:" + warn);
    }

    // Add artifacts or directories to weave
    String joinedWeaveDirectories = null;
    if (weaveDirectories != null) {
        joinedWeaveDirectories = StringUtils.join(weaveDirectories, File.pathSeparator);
    }
    addModulesArgument("-inpath", ajcOptions, weaveDependencies, joinedWeaveDirectories,
            "dependencies and/or directories to weave");

    // Add library artifacts 
    addModulesArgument("-aspectpath", ajcOptions, aspectLibraries, getAdditionalAspectPaths(),
            "an aspect library");

    //add target dir argument
    ajcOptions.add("-d");
    ajcOptions.add(getOutputDirectories().get(0));

    // Add all the files to be included in the build,
    if (null != ajdtBuildDefFile) {
        resolvedIncludes = AjcHelper.getBuildFilesForAjdtFile(ajdtBuildDefFile, basedir);
    } else {
        resolvedIncludes = AjcHelper.getBuildFilesForSourceDirs(getSourceDirectories(), this.includes,
                this.excludes);
    }
    ajcOptions.addAll(resolvedIncludes);
}

From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.BlockPoolSlice.java

private void addReplicaToReplicasMap(Block block, ReplicaMap volumeMap, boolean isFinalized)
        throws IOException {
    ReplicaInfo newReplica = null;//from w ww .  j  ava2 s.c  o m
    long blockId = block.getBlockId();
    long genStamp = block.getGenerationStamp();
    if (isFinalized) {
        newReplica = new FinalizedReplica(blockId, block.getNumBytes(), genStamp, volume,
                DatanodeUtil.idToBlockDir(finalizedDir, blockId));
    } else {
        File file = new File(rbwDir, block.getBlockName());
        boolean loadRwr = true;
        File restartMeta = new File(file.getParent() + File.pathSeparator + "." + file.getName() + ".restart");
        Scanner sc = null;
        try {
            sc = new Scanner(restartMeta, "UTF-8");
            // The restart meta file exists
            if (sc.hasNextLong() && (sc.nextLong() > Time.now())) {
                // It didn't expire. Load the replica as a RBW.
                // We don't know the expected block length, so just use 0
                // and don't reserve any more space for writes.
                newReplica = new ReplicaBeingWritten(blockId, validateIntegrityAndSetLength(file, genStamp),
                        genStamp, volume, file.getParentFile(), null, 0);
                loadRwr = false;
            }
            sc.close();
            if (!restartMeta.delete()) {
                FsDatasetImpl.LOG.warn("Failed to delete restart meta file: " + restartMeta.getPath());
            }
        } catch (FileNotFoundException fnfe) {
            // nothing to do hereFile dir =
        } finally {
            if (sc != null) {
                sc.close();
            }
        }
        // Restart meta doesn't exist or expired.
        if (loadRwr) {
            newReplica = new ReplicaWaitingToBeRecovered(blockId, validateIntegrityAndSetLength(file, genStamp),
                    genStamp, volume, file.getParentFile());
        }
    }

    ReplicaInfo oldReplica = volumeMap.get(bpid, newReplica.getBlockId());
    if (oldReplica == null) {
        volumeMap.add(bpid, newReplica);
    } else {
        FsDatasetImpl.LOG.warn("Two block files with the same block id exist " + "on disk: "
                + oldReplica.getBlockFile() + " and " + newReplica.getBlockFile());
    }
}

From source file:org.apache.druid.indexing.overlord.ForkingTaskRunner.java

@Override
public ListenableFuture<TaskStatus> run(final Task task) {
    synchronized (tasks) {
        if (!tasks.containsKey(task.getId())) {
            tasks.put(task.getId(), new ForkingTaskRunnerWorkItem(task, exec.submit(new Callable<TaskStatus>() {
                @Override//from  w  ww . j a  v a2 s  .c  o  m
                public TaskStatus call() {
                    final String attemptUUID = UUID.randomUUID().toString();
                    final File taskDir = taskConfig.getTaskDir(task.getId());
                    final File attemptDir = new File(taskDir, attemptUUID);

                    final ProcessHolder processHolder;
                    final String childHost = node.getHost();
                    int childPort = -1;
                    int tlsChildPort = -1;

                    if (node.isEnablePlaintextPort()) {
                        childPort = portFinder.findUnusedPort();
                    }

                    if (node.isEnableTlsPort()) {
                        tlsChildPort = portFinder.findUnusedPort();
                    }

                    final TaskLocation taskLocation = TaskLocation.create(childHost, childPort, tlsChildPort);

                    try {
                        final Closer closer = Closer.create();
                        try {
                            if (!attemptDir.mkdirs()) {
                                throw new IOE("Could not create directories: %s", attemptDir);
                            }

                            final File taskFile = new File(taskDir, "task.json");
                            final File statusFile = new File(attemptDir, "status.json");
                            final File logFile = new File(taskDir, "log");
                            final File reportsFile = new File(attemptDir, "report.json");

                            // time to adjust process holders
                            synchronized (tasks) {
                                final ForkingTaskRunnerWorkItem taskWorkItem = tasks.get(task.getId());

                                if (taskWorkItem.shutdown) {
                                    throw new IllegalStateException("Task has been shut down!");
                                }

                                if (taskWorkItem == null) {
                                    log.makeAlert("WTF?! TaskInfo disappeared!").addData("task", task.getId())
                                            .emit();
                                    throw new ISE("TaskInfo disappeared for task[%s]!", task.getId());
                                }

                                if (taskWorkItem.processHolder != null) {
                                    log.makeAlert("WTF?! TaskInfo already has a processHolder")
                                            .addData("task", task.getId()).emit();
                                    throw new ISE("TaskInfo already has processHolder for task[%s]!",
                                            task.getId());
                                }

                                final List<String> command = Lists.newArrayList();
                                final String taskClasspath;
                                if (task.getClasspathPrefix() != null && !task.getClasspathPrefix().isEmpty()) {
                                    taskClasspath = Joiner.on(File.pathSeparator)
                                            .join(task.getClasspathPrefix(), config.getClasspath());
                                } else {
                                    taskClasspath = config.getClasspath();
                                }

                                command.add(config.getJavaCommand());
                                command.add("-cp");
                                command.add(taskClasspath);

                                Iterables.addAll(command, new QuotableWhiteSpaceSplitter(config.getJavaOpts()));
                                Iterables.addAll(command, config.getJavaOptsArray());

                                // Override task specific javaOpts
                                Object taskJavaOpts = task
                                        .getContextValue(ForkingTaskRunnerConfig.JAVA_OPTS_PROPERTY);
                                if (taskJavaOpts != null) {
                                    Iterables.addAll(command,
                                            new QuotableWhiteSpaceSplitter((String) taskJavaOpts));
                                }

                                for (String propName : props.stringPropertyNames()) {
                                    for (String allowedPrefix : config.getAllowedPrefixes()) {
                                        // See https://github.com/apache/incubator-druid/issues/1841
                                        if (propName.startsWith(allowedPrefix)
                                                && !ForkingTaskRunnerConfig.JAVA_OPTS_PROPERTY.equals(propName)
                                                && !ForkingTaskRunnerConfig.JAVA_OPTS_ARRAY_PROPERTY
                                                        .equals(propName)) {
                                            command.add(StringUtils.format("-D%s=%s", propName,
                                                    props.getProperty(propName)));
                                        }
                                    }
                                }

                                // Override child JVM specific properties
                                for (String propName : props.stringPropertyNames()) {
                                    if (propName.startsWith(CHILD_PROPERTY_PREFIX)) {
                                        command.add(StringUtils.format("-D%s=%s",
                                                propName.substring(CHILD_PROPERTY_PREFIX.length()),
                                                props.getProperty(propName)));
                                    }
                                }

                                // Override task specific properties
                                final Map<String, Object> context = task.getContext();
                                if (context != null) {
                                    for (String propName : context.keySet()) {
                                        if (propName.startsWith(CHILD_PROPERTY_PREFIX)) {
                                            command.add(StringUtils.format("-D%s=%s",
                                                    propName.substring(CHILD_PROPERTY_PREFIX.length()),
                                                    task.getContextValue(propName)));
                                        }
                                    }
                                }

                                // Add dataSource, taskId and taskType for metrics or logging
                                command.add(
                                        StringUtils.format("-D%s%s=%s", MonitorsConfig.METRIC_DIMENSION_PREFIX,
                                                DruidMetrics.DATASOURCE, task.getDataSource()));
                                command.add(
                                        StringUtils.format("-D%s%s=%s", MonitorsConfig.METRIC_DIMENSION_PREFIX,
                                                DruidMetrics.TASK_ID, task.getId()));
                                command.add(
                                        StringUtils.format("-D%s%s=%s", MonitorsConfig.METRIC_DIMENSION_PREFIX,
                                                DruidMetrics.TASK_TYPE, task.getType()));

                                command.add(StringUtils.format("-Ddruid.host=%s", childHost));
                                command.add(StringUtils.format("-Ddruid.plaintextPort=%d", childPort));
                                command.add(StringUtils.format("-Ddruid.tlsPort=%d", tlsChildPort));
                                /**
                                 * These are not enabled per default to allow the user to either set or not set them
                                 * Users are highly suggested to be set in druid.indexer.runner.javaOpts
                                 * See org.apache.druid.concurrent.TaskThreadPriority#getThreadPriorityFromTaskPriority(int)
                                 * for more information
                                 command.add("-XX:+UseThreadPriorities");
                                 command.add("-XX:ThreadPriorityPolicy=42");
                                 */

                                command.add("org.apache.druid.cli.Main");
                                command.add("internal");
                                command.add("peon");
                                command.add(taskFile.toString());
                                command.add(statusFile.toString());
                                command.add(reportsFile.toString());
                                String nodeType = task.getNodeType();
                                if (nodeType != null) {
                                    command.add("--nodeType");
                                    command.add(nodeType);
                                }

                                if (!taskFile.exists()) {
                                    jsonMapper.writeValue(taskFile, task);
                                }

                                log.info("Running command: %s", Joiner.on(" ").join(command));
                                taskWorkItem.processHolder = new ProcessHolder(
                                        new ProcessBuilder(ImmutableList.copyOf(command))
                                                .redirectErrorStream(true).start(),
                                        logFile, taskLocation.getHost(), taskLocation.getPort(),
                                        taskLocation.getTlsPort());

                                processHolder = taskWorkItem.processHolder;
                                processHolder.registerWithCloser(closer);
                            }

                            TaskRunnerUtils.notifyLocationChanged(listeners, task.getId(), taskLocation);
                            TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(),
                                    TaskStatus.running(task.getId()));

                            log.info("Logging task %s output to: %s", task.getId(), logFile);
                            boolean runFailed = true;

                            final ByteSink logSink = Files.asByteSink(logFile, FileWriteMode.APPEND);

                            // This will block for a while. So we append the thread information with more details
                            final String priorThreadName = Thread.currentThread().getName();
                            Thread.currentThread()
                                    .setName(StringUtils.format("%s-[%s]", priorThreadName, task.getId()));

                            try (final OutputStream toLogfile = logSink.openStream()) {
                                ByteStreams.copy(processHolder.process.getInputStream(), toLogfile);
                                final int statusCode = processHolder.process.waitFor();
                                log.info("Process exited with status[%d] for task: %s", statusCode,
                                        task.getId());
                                if (statusCode == 0) {
                                    runFailed = false;
                                }
                            } finally {
                                Thread.currentThread().setName(priorThreadName);
                                // Upload task logs
                                taskLogPusher.pushTaskLog(task.getId(), logFile);
                                if (reportsFile.exists()) {
                                    taskLogPusher.pushTaskReports(task.getId(), reportsFile);
                                }
                            }

                            TaskStatus status;
                            if (!runFailed) {
                                // Process exited successfully
                                status = jsonMapper.readValue(statusFile, TaskStatus.class);
                            } else {
                                // Process exited unsuccessfully
                                status = TaskStatus.failure(task.getId());
                            }

                            TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(), status);
                            return status;
                        } catch (Throwable t) {
                            throw closer.rethrow(t);
                        } finally {
                            closer.close();
                        }
                    } catch (Throwable t) {
                        log.info(t, "Exception caught during execution");
                        throw Throwables.propagate(t);
                    } finally {
                        try {
                            synchronized (tasks) {
                                final ForkingTaskRunnerWorkItem taskWorkItem = tasks.remove(task.getId());
                                if (taskWorkItem != null && taskWorkItem.processHolder != null) {
                                    taskWorkItem.processHolder.process.destroy();
                                }
                                if (!stopping) {
                                    saveRunningTasks();
                                }
                            }

                            if (node.isEnablePlaintextPort()) {
                                portFinder.markPortUnused(childPort);
                            }
                            if (node.isEnableTlsPort()) {
                                portFinder.markPortUnused(tlsChildPort);
                            }

                            try {
                                if (!stopping && taskDir.exists()) {
                                    log.info("Removing task directory: %s", taskDir);
                                    FileUtils.deleteDirectory(taskDir);
                                }
                            } catch (Exception e) {
                                log.makeAlert(e, "Failed to delete task directory")
                                        .addData("taskDir", taskDir.toString()).addData("task", task.getId())
                                        .emit();
                            }
                        } catch (Exception e) {
                            log.error(e, "Suppressing exception caught while cleaning up task");
                        }
                    }
                }
            })));
        }
        saveRunningTasks();
        return tasks.get(task.getId()).getResult();
    }
}

From source file:org.apache.druid.initialization.Initialization.java

public static List<URL> getURLsForClasspath(String cp) {
    try {/*  ww  w  . j a v  a 2s  .  co m*/
        String[] paths = cp.split(File.pathSeparator);

        List<URL> urls = new ArrayList<>();
        for (String path : paths) {
            File f = new File(path);
            if ("*".equals(f.getName())) {
                File parentDir = f.getParentFile();
                if (parentDir.isDirectory()) {
                    File[] jars = parentDir.listFiles(new FilenameFilter() {
                        @Override
                        public boolean accept(File dir, String name) {
                            return name != null && (name.endsWith(".jar") || name.endsWith(".JAR"));
                        }
                    });
                    for (File jar : jars) {
                        urls.add(jar.toURI().toURL());
                    }
                }
            } else {
                urls.add(new File(path).toURI().toURL());
            }
        }
        return urls;
    } catch (IOException ex) {
        throw Throwables.propagate(ex);
    }
}

From source file:org.apache.hive.spark.client.AbstractSparkClient.java

private Future<Void> startDriver(final RpcServer rpcServer, final String clientId, final String secret)
        throws IOException {
    final String serverAddress = rpcServer.getAddress();
    final String serverPort = String.valueOf(rpcServer.getPort());

    String sparkHome = getSparkHome();

    String sparkLogDir = conf.get("hive.spark.log.dir");
    if (sparkLogDir == null) {
        if (sparkHome == null) {
            sparkLogDir = "./target/";
        } else {/*ww  w .  ja va  2s.c  om*/
            sparkLogDir = sparkHome + "/logs/";
        }
    }

    String osxTestOpts = "";
    if (Strings.nullToEmpty(System.getProperty("os.name")).toLowerCase().contains("mac")) {
        osxTestOpts = Strings.nullToEmpty(System.getenv(OSX_TEST_OPTS));
    }

    String driverJavaOpts = Joiner.on(" ").skipNulls().join("-Dhive.spark.log.dir=" + sparkLogDir, osxTestOpts,
            conf.get(DRIVER_OPTS_KEY));
    String executorJavaOpts = Joiner.on(" ").skipNulls().join("-Dhive.spark.log.dir=" + sparkLogDir,
            osxTestOpts, conf.get(EXECUTOR_OPTS_KEY));

    // Create a file with all the job properties to be read by spark-submit. Change the
    // file's permissions so that only the owner can read it. This avoid having the
    // connection secret show up in the child process's command line.
    File properties = File.createTempFile("spark-submit.", ".properties");
    if (!properties.setReadable(false) || !properties.setReadable(true, true)) {
        throw new IOException("Cannot change permissions of job properties file.");
    }
    properties.deleteOnExit();

    Properties allProps = new Properties();
    // first load the defaults from spark-defaults.conf if available
    try {
        URL sparkDefaultsUrl = Thread.currentThread().getContextClassLoader()
                .getResource("spark-defaults.conf");
        if (sparkDefaultsUrl != null) {
            LOG.info("Loading spark defaults configs from: " + sparkDefaultsUrl);
            allProps.load(new ByteArrayInputStream(Resources.toByteArray(sparkDefaultsUrl)));
        }
    } catch (Exception e) {
        String msg = "Exception trying to load spark-defaults.conf: " + e;
        throw new IOException(msg, e);
    }
    // then load the SparkClientImpl config
    for (Map.Entry<String, String> e : conf.entrySet()) {
        allProps.put(e.getKey(), conf.get(e.getKey()));
    }
    allProps.put(SparkClientFactory.CONF_CLIENT_ID, clientId);
    allProps.put(SparkClientFactory.CONF_KEY_SECRET, secret);
    allProps.put(DRIVER_OPTS_KEY, driverJavaOpts);
    allProps.put(EXECUTOR_OPTS_KEY, executorJavaOpts);

    String isTesting = conf.get("spark.testing");
    if (isTesting != null && isTesting.equalsIgnoreCase("true")) {
        String hiveHadoopTestClasspath = Strings.nullToEmpty(System.getenv("HIVE_HADOOP_TEST_CLASSPATH"));
        if (!hiveHadoopTestClasspath.isEmpty()) {
            String extraDriverClasspath = Strings.nullToEmpty((String) allProps.get(DRIVER_EXTRA_CLASSPATH));
            if (extraDriverClasspath.isEmpty()) {
                allProps.put(DRIVER_EXTRA_CLASSPATH, hiveHadoopTestClasspath);
            } else {
                extraDriverClasspath = extraDriverClasspath.endsWith(File.pathSeparator) ? extraDriverClasspath
                        : extraDriverClasspath + File.pathSeparator;
                allProps.put(DRIVER_EXTRA_CLASSPATH, extraDriverClasspath + hiveHadoopTestClasspath);
            }

            String extraExecutorClasspath = Strings
                    .nullToEmpty((String) allProps.get(EXECUTOR_EXTRA_CLASSPATH));
            if (extraExecutorClasspath.isEmpty()) {
                allProps.put(EXECUTOR_EXTRA_CLASSPATH, hiveHadoopTestClasspath);
            } else {
                extraExecutorClasspath = extraExecutorClasspath.endsWith(File.pathSeparator)
                        ? extraExecutorClasspath
                        : extraExecutorClasspath + File.pathSeparator;
                allProps.put(EXECUTOR_EXTRA_CLASSPATH, extraExecutorClasspath + hiveHadoopTestClasspath);
            }
        }
    }

    Writer writer = new OutputStreamWriter(new FileOutputStream(properties), Charsets.UTF_8);
    try {
        allProps.store(writer, "Spark Context configuration");
    } finally {
        writer.close();
    }

    // Define how to pass options to the child process. If launching in client (or local)
    // mode, the driver options need to be passed directly on the command line. Otherwise,
    // SparkSubmit will take care of that for us.
    String master = conf.get("spark.master");
    Preconditions.checkArgument(master != null, "spark.master is not defined.");
    String deployMode = conf.get(SPARK_DEPLOY_MODE);

    if (SparkClientUtilities.isYarnClusterMode(master, deployMode)) {
        String executorCores = conf.get("spark.executor.cores");
        if (executorCores != null) {
            addExecutorCores(executorCores);
        }

        String executorMemory = conf.get("spark.executor.memory");
        if (executorMemory != null) {
            addExecutorMemory(executorMemory);
        }

        String numOfExecutors = conf.get("spark.executor.instances");
        if (numOfExecutors != null) {
            addNumExecutors(numOfExecutors);
        }
    }
    // The options --principal/--keypad do not work with --proxy-user in spark-submit.sh
    // (see HIVE-15485, SPARK-5493, SPARK-19143), so Hive could only support doAs or
    // delegation token renewal, but not both. Since doAs is a more common case, if both
    // are needed, we choose to favor doAs. So when doAs is enabled, we use kinit command,
    // otherwise, we pass the principal/keypad to spark to support the token renewal for
    // long-running application.
    if ("kerberos".equals(hiveConf.get(HADOOP_SECURITY_AUTHENTICATION))) {
        String principal = SecurityUtil
                .getServerPrincipal(hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL), "0.0.0.0");
        String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB);
        boolean isDoAsEnabled = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS);
        if (StringUtils.isNotBlank(principal) && StringUtils.isNotBlank(keyTabFile)) {
            addKeytabAndPrincipal(isDoAsEnabled, keyTabFile, principal);
        }
    }
    if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)) {
        try {
            String currentUser = Utils.getUGI().getShortUserName();
            // do not do impersonation in CLI mode
            if (!currentUser.equals(System.getProperty("user.name"))) {
                LOG.info("Attempting impersonation of " + currentUser);
                addProxyUser(currentUser);
            }
        } catch (Exception e) {
            String msg = "Cannot obtain username: " + e;
            throw new IllegalStateException(msg, e);
        }
    }

    String regStr = conf.get("spark.kryo.registrator");
    if (HIVE_KRYO_REG_NAME.equals(regStr)) {
        addJars(SparkClientUtilities.findKryoRegistratorJar(hiveConf));
    }

    addPropertiesFile(properties.getAbsolutePath());
    addClass(RemoteDriver.class.getName());

    String jar = "spark-internal";
    if (SparkContext.jarOfClass(this.getClass()).isDefined()) {
        jar = SparkContext.jarOfClass(this.getClass()).get();
    }
    addExecutableJar(jar);

    addAppArg(RemoteDriver.REMOTE_DRIVER_HOST_CONF);
    addAppArg(serverAddress);
    addAppArg(RemoteDriver.REMOTE_DRIVER_PORT_CONF);
    addAppArg(serverPort);

    //hive.spark.* keys are passed down to the RemoteDriver via REMOTE_DRIVER_CONF
    // so that they are not used in sparkContext but only in remote driver,
    //as --properties-file contains the spark.* keys that are meant for SparkConf object.
    for (String hiveSparkConfKey : RpcConfiguration.HIVE_SPARK_RSC_CONFIGS) {
        String value = RpcConfiguration.getValue(hiveConf, hiveSparkConfKey);
        addAppArg(RemoteDriver.REMOTE_DRIVER_CONF);
        addAppArg(String.format("%s=%s", hiveSparkConfKey, value));
    }

    return launchDriver(isTesting, rpcServer, clientId);
}

From source file:org.apache.flink.runtime.taskmanager.TaskManager.java

public TaskManager(ExecutionMode executionMode, JobManagerProtocol jobManager,
        InputSplitProviderProtocol splitProvider, ChannelLookupProtocol channelLookup,
        AccumulatorProtocol accumulators, InetSocketAddress jobManagerAddress,
        InetAddress taskManagerBindAddress) throws Exception {
    if (executionMode == null || jobManager == null || splitProvider == null || channelLookup == null
            || accumulators == null) {//from  ww w.  ja  va  2 s. com
        throw new NullPointerException();
    }

    LOG.info("TaskManager execution mode: " + executionMode);

    this.executionMode = executionMode;
    this.jobManager = jobManager;
    this.lookupService = channelLookup;
    this.globalInputSplitProvider = splitProvider;
    this.accumulatorProtocolProxy = accumulators;

    // initialize the number of slots
    {
        int slots = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, -1);
        if (slots == -1) {
            slots = 1;
            LOG.info("Number of task slots not configured. Creating one task slot.");
        } else if (slots <= 0) {
            throw new Exception("Illegal value for the number of task slots: " + slots);
        } else {
            LOG.info("Creating " + slots + " task slot(s).");
        }
        this.numberOfSlots = slots;
    }

    int ipcPort = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_IPC_PORT_KEY, -1);
    int dataPort = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_DATA_PORT_KEY, -1);
    if (ipcPort == -1) {
        ipcPort = getAvailablePort();
    }
    if (dataPort == -1) {
        dataPort = getAvailablePort();
    }

    this.localInstanceConnectionInfo = new InstanceConnectionInfo(taskManagerBindAddress, ipcPort, dataPort);
    LOG.info("TaskManager connection information:" + this.localInstanceConnectionInfo);

    // Start local RPC server, give it the number of threads as we have slots
    try {
        // some magic number for the handler threads
        final int numHandlers = Math.min(numberOfSlots, 2 * Hardware.getNumberCPUCores());

        this.taskManagerServer = RPC.getServer(this, taskManagerBindAddress.getHostAddress(), ipcPort,
                numHandlers);
        this.taskManagerServer.start();
    } catch (IOException e) {
        LOG.error("Failed to start TaskManager server. " + e.getMessage(), e);
        throw new Exception("Failed to start taskmanager server. " + e.getMessage(), e);
    }

    // Load profiler if it should be used
    if (GlobalConfiguration.getBoolean(ProfilingUtils.ENABLE_PROFILING_KEY, false)) {

        final String profilerClassName = GlobalConfiguration.getString(ProfilingUtils.TASKMANAGER_CLASSNAME_KEY,
                "org.apache.flink.runtime.profiling.impl.TaskManagerProfilerImpl");

        this.profiler = ProfilingUtils.loadTaskManagerProfiler(profilerClassName,
                jobManagerAddress.getAddress(), this.localInstanceConnectionInfo);

        if (this.profiler == null) {
            LOG.error("Cannot find class name for the profiler.");
        } else {
            LOG.info("Profiling of jobs is enabled.");
        }
    } else {
        this.profiler = null;
        LOG.info("Profiling of jobs is disabled.");
    }

    // Get the directory for storing temporary files
    final String[] tmpDirPaths = GlobalConfiguration
            .getString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, ConfigConstants.DEFAULT_TASK_MANAGER_TMP_PATH)
            .split(",|" + File.pathSeparator);

    checkTempDirs(tmpDirPaths);

    int numBuffers = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_NETWORK_NUM_BUFFERS_KEY,
            ConfigConstants.DEFAULT_TASK_MANAGER_NETWORK_NUM_BUFFERS);

    int bufferSize = GlobalConfiguration.getInteger(ConfigConstants.TASK_MANAGER_NETWORK_BUFFER_SIZE_KEY,
            ConfigConstants.DEFAULT_TASK_MANAGER_NETWORK_BUFFER_SIZE);

    // Initialize the channel manager
    try {
        NetworkConnectionManager networkConnectionManager = null;

        switch (executionMode) {
        case LOCAL:
            networkConnectionManager = new LocalConnectionManager();
            break;
        case CLUSTER:
            int numInThreads = GlobalConfiguration.getInteger(
                    ConfigConstants.TASK_MANAGER_NET_NUM_IN_THREADS_KEY,
                    ConfigConstants.DEFAULT_TASK_MANAGER_NET_NUM_IN_THREADS);

            int numOutThreads = GlobalConfiguration.getInteger(
                    ConfigConstants.TASK_MANAGER_NET_NUM_OUT_THREADS_KEY,
                    ConfigConstants.DEFAULT_TASK_MANAGER_NET_NUM_OUT_THREADS);

            int lowWaterMark = GlobalConfiguration.getInteger(
                    ConfigConstants.TASK_MANAGER_NET_NETTY_LOW_WATER_MARK,
                    ConfigConstants.DEFAULT_TASK_MANAGER_NET_NETTY_LOW_WATER_MARK);

            int highWaterMark = GlobalConfiguration.getInteger(
                    ConfigConstants.TASK_MANAGER_NET_NETTY_HIGH_WATER_MARK,
                    ConfigConstants.DEFAULT_TASK_MANAGER_NET_NETTY_HIGH_WATER_MARK);

            networkConnectionManager = new NettyConnectionManager(localInstanceConnectionInfo.address(),
                    localInstanceConnectionInfo.dataPort(), bufferSize, numInThreads, numOutThreads,
                    lowWaterMark, highWaterMark);
            break;
        }

        channelManager = new ChannelManager(lookupService, localInstanceConnectionInfo, numBuffers, bufferSize,
                networkConnectionManager);
    } catch (IOException ioe) {
        LOG.error(StringUtils.stringifyException(ioe));
        throw new Exception("Failed to instantiate ChannelManager.", ioe);
    }

    // initialize the memory manager
    {
        // Check whether the memory size has been explicitly configured.
        final long configuredMemorySize = GlobalConfiguration
                .getInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, -1);
        final long memorySize;

        if (configuredMemorySize == -1) {
            // no manually configured memory. take a relative fraction of the free heap space
            float fraction = GlobalConfiguration.getFloat(ConfigConstants.TASK_MANAGER_MEMORY_FRACTION_KEY,
                    ConfigConstants.DEFAULT_MEMORY_MANAGER_MEMORY_FRACTION);
            memorySize = (long) (EnvironmentInformation.getSizeOfFreeHeapMemoryWithDefrag() * fraction);
            LOG.info("Using " + fraction + " of the free heap space for managed memory.");
        } else if (configuredMemorySize <= 0) {
            throw new Exception("Invalid value for Memory Manager memory size: " + configuredMemorySize);
        } else {
            memorySize = configuredMemorySize << 20;
        }

        final int pageSize = GlobalConfiguration.getInteger(
                ConfigConstants.TASK_MANAGER_NETWORK_BUFFER_SIZE_KEY,
                ConfigConstants.DEFAULT_TASK_MANAGER_NETWORK_BUFFER_SIZE);

        // Initialize the memory manager
        LOG.info("Initializing memory manager with " + (memorySize >>> 20) + " megabytes of memory. "
                + "Page size is " + pageSize + " bytes.");

        try {
            @SuppressWarnings("unused")
            final boolean lazyAllocation = GlobalConfiguration.getBoolean(
                    ConfigConstants.TASK_MANAGER_MEMORY_LAZY_ALLOCATION_KEY,
                    ConfigConstants.DEFAULT_TASK_MANAGER_MEMORY_LAZY_ALLOCATION);

            this.memoryManager = new DefaultMemoryManager(memorySize, this.numberOfSlots, pageSize);
        } catch (Throwable t) {
            LOG.error(
                    "Unable to initialize memory manager with " + (memorySize >>> 20) + " megabytes of memory.",
                    t);
            throw new Exception("Unable to initialize memory manager.", t);
        }
    }

    this.hardwareDescription = HardwareDescription.extractFromSystem(this.memoryManager.getMemorySize());

    // Determine the port of the BLOB server and register it with the library cache manager
    {
        final int blobPort = this.jobManager.getBlobServerPort();

        if (blobPort == -1) {
            LOG.warn("Unable to determine BLOB server address: User library download will not be available");
            this.libraryCacheManager = new FallbackLibraryCacheManager();
        } else {
            final InetSocketAddress blobServerAddress = new InetSocketAddress(jobManagerAddress.getAddress(),
                    blobPort);
            LOG.info("Determined BLOB server address to be " + blobServerAddress);

            this.libraryCacheManager = new BlobLibraryCacheManager(new BlobCache(blobServerAddress),
                    GlobalConfiguration.getConfiguration());
        }
    }
    this.ioManager = new IOManagerAsync(tmpDirPaths);

    // start the heart beats
    {
        final long interval = GlobalConfiguration.getInteger(
                ConfigConstants.TASK_MANAGER_HEARTBEAT_INTERVAL_KEY,
                ConfigConstants.DEFAULT_TASK_MANAGER_HEARTBEAT_INTERVAL);

        this.heartbeatThread = new Thread() {
            @Override
            public void run() {
                registerAndRunHeartbeatLoop(interval, MAX_LOST_HEART_BEATS);
            }
        };
        this.heartbeatThread.setName("Heartbeat Thread");
        this.heartbeatThread.start();
    }

    // --------------------------------------------------------------------
    // Memory Usage
    // --------------------------------------------------------------------

    final MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();
    final List<GarbageCollectorMXBean> gcMXBeans = ManagementFactory.getGarbageCollectorMXBeans();

    LOG.info(getMemoryUsageStatsAsString(memoryMXBean));

    boolean startMemoryUsageLogThread = GlobalConfiguration.getBoolean(
            ConfigConstants.TASK_MANAGER_DEBUG_MEMORY_USAGE_START_LOG_THREAD,
            ConfigConstants.DEFAULT_TASK_MANAGER_DEBUG_MEMORY_USAGE_START_LOG_THREAD);

    if (startMemoryUsageLogThread) {
        final int logIntervalMs = GlobalConfiguration.getInteger(
                ConfigConstants.TASK_MANAGER_DEBUG_MEMORY_USAGE_LOG_INTERVAL_MS,
                ConfigConstants.DEFAULT_TASK_MANAGER_DEBUG_MEMORY_USAGE_LOG_INTERVAL_MS);

        new Thread(new Runnable() {
            @Override
            public void run() {
                try {
                    while (!isShutDown()) {
                        Thread.sleep(logIntervalMs);

                        LOG.info(getMemoryUsageStatsAsString(memoryMXBean));
                        LOG.info(getGarbageCollectorStatsAsString(gcMXBeans));
                    }
                } catch (InterruptedException e) {
                    LOG.warn("Unexpected interruption of memory usage logger thread.");
                }
            }
        }).start();
    }
}