Example usage for org.apache.commons.io FilenameUtils EXTENSION_SEPARATOR

List of usage examples for org.apache.commons.io FilenameUtils EXTENSION_SEPARATOR

Introduction

In this page you can find the example usage for org.apache.commons.io FilenameUtils EXTENSION_SEPARATOR.

Prototype

char EXTENSION_SEPARATOR

To view the source code for org.apache.commons.io FilenameUtils EXTENSION_SEPARATOR.

Click Source Link

Document

The extension separator character.

Usage

From source file:com.googlecode.bpmn_simulator.gui.dialogs.ImageExportChooser.java

@Override
public File getSelectedFile() {
    final File file = super.getSelectedFile();
    if (file != null) {
        final String extension = getSelectedImageFormat();
        if (!FilenameUtils.isExtension(file.getName(), extension)) {
            return new File(file.getPath() + FilenameUtils.EXTENSION_SEPARATOR + extension);
        }//w w w. java2  s  . co  m
    }
    return file;
}

From source file:edu.udo.scaffoldhunter.model.clustering.evaluation.FileSaverMetaModule.java

/**
 * Save the each result to a single File
 * /*from w w  w.  j  av a 2  s.  c o m*/
 * @param results
 */
private void saveSingleResults(Collection<EvaluationResult> results, boolean csv) {
    for (EvaluationResult result : results) {
        String uniqueFile = getUniqueBaseName(result);
        saveToFile(uniqueFile + FilenameUtils.EXTENSION_SEPARATOR + FilenameUtils.getExtension(path),
                result.toString());
        if (csv) {
            saveToFile(uniqueFile + FilenameUtils.EXTENSION_SEPARATOR + "csv", result.getCSVString());
        }
    }
}

From source file:com.github.maven_nar.cpptasks.compiler.CommandLineCompiler.java

@Override
public String[] getOutputFileNames(final String inputFile, final VersionInfo versionInfo) {
    ////w w w.  j  a  v  a2 s .c  o  m
    // if a recognized input file
    //
    if (bid(inputFile) > 1) {
        final String baseName = getBaseOutputName(inputFile);
        final File standardisedFile = new File(inputFile);
        try {
            return new String[] { baseName + FilenameUtils.EXTENSION_SEPARATOR
                    + Integer.toHexString(standardisedFile.getCanonicalPath().hashCode()) + getOutputSuffix() };
        } catch (IOException e) {
            throw new BuildException("Source file not found", e);
        }
    }
    return new String[0];
}

From source file:edu.udo.scaffoldhunter.model.clustering.evaluation.FileSaverMetaModule.java

/**
 * Construct and save the OrderedBySubsetSize CSV string / file
 * //from   w w w .j ava  2s . c o m
 * @param results
 */
private void saveCSVOrderedBySubsetSize(Collection<EvaluationResult> results) {
    StringBuilder csvString = new StringBuilder();

    // ordered keys of all results
    TreeSet<String> keys = Sets.newTreeSet();
    for (EvaluationResult result : results) {
        keys.addAll(result.getResults().keySet());
    }

    // create csv header
    csvString.append("subset size,");
    for (String key : keys) {
        csvString.append(key);
        csvString.append(",");
    }
    csvString.deleteCharAt(csvString.length() - 1);

    // create csv data & store single result file (not storing
    // csv file at this point)
    for (EvaluationResult result : results) {
        csvString.append(System.getProperty("line.separator").toString());
        csvString.append(result.getSubsetSize());
        csvString.append(",");
        for (String key : keys) {
            String value = result.getResults().get(key);
            if (value != null) {
                csvString.append(value);
            }
            csvString.append(",");
        }
        csvString.deleteCharAt(csvString.length() - 1);
    }

    saveToFile(
            FilenameUtils.concat(FilenameUtils.getFullPathNoEndSeparator(path), FilenameUtils.getBaseName(path))
                    + FilenameUtils.EXTENSION_SEPARATOR + "csv",
            csvString.toString());
}

From source file:com.ibm.watson.developer_cloud.professor_languo.pipeline.QuestionSetManager.java

/**
 * This function is responsible for parsing a duplicate Stack Exchange thread TSV file produced by
 * {@link StackExchangeThreadSerializer}, and partitioning each such thread into the training set,
 * test set, or validation set. In addition, the corresponding row of the TSV file will be written
 * out to a training-, test-, or validation-set-specific TSV file in the same directory as the
 * input TSV file./*from   w  ww. j  a va 2  s .com*/
 * 
 * @param dupQuestionFile - A TSV file containing duplicate {@link StackExchangeThread} records
 * @param trainTestValidateCumulativeProbs - A CDF of the desired proportion of training, test,
 *        and validation set records
 * @throws PipelineException
 */
private void parseTsvAndPartitionRecords(File dupQuestionFile, double[] trainTestValidateCumulativeProbs)
        throws PipelineException {
    // Open the TSV file for parsing, and CSVPrinters for outputting train,
    // test, and validation set
    // TSV files
    String baseName = FilenameUtils.removeExtension(dupQuestionFile.getAbsolutePath());
    String extension = FilenameUtils.getExtension(dupQuestionFile.getAbsolutePath());
    try (FileReader reader = new FileReader(dupQuestionFile);
            CSVPrinter trainSetPrinter = new CSVPrinter(
                    new FileWriter(baseName + StackExchangeConstants.DUP_THREAD_TSV_TRAIN_FILE_SUFFIX
                            + FilenameUtils.EXTENSION_SEPARATOR + extension),
                    CSVFormat.TDF.withHeader(CorpusBuilder.getTsvColumnHeaders()));
            CSVPrinter testSetPrinter = new CSVPrinter(
                    new FileWriter(baseName + StackExchangeConstants.DUP_THREAD_TSV_TEST_FILE_SUFFIX
                            + FilenameUtils.EXTENSION_SEPARATOR + extension),
                    CSVFormat.TDF.withHeader(CorpusBuilder.getTsvColumnHeaders()));
            CSVPrinter validationSetPrinter = new CSVPrinter(
                    new FileWriter(baseName + StackExchangeConstants.DUP_THREAD_TSV_VALIDATE_FILE_SUFFIX
                            + FilenameUtils.EXTENSION_SEPARATOR + extension),
                    CSVFormat.TDF.withHeader(CorpusBuilder.getTsvColumnHeaders()))) {

        // Parse the duplicate thread TSV file
        CSVParser parser = CSVFormat.TDF.withHeader().parse(reader);

        // Iterate over each CSV record, and place into a desired partition
        // (train, test, or
        // validation)
        Iterator<CSVRecord> recordIterator = parser.iterator();
        while (recordIterator.hasNext()) {
            CSVRecord record = recordIterator.next();

            // Get the StackExchangeThread associated with this record, and
            // create a question from it
            StackExchangeThread duplicateThread = StackExchangeThreadSerializer.deserializeThreadFromBinFile(
                    record.get(CorpusBuilder.TSV_COL_HEADER_SERIALIZED_FILE_PATH));
            StackExchangeQuestion duplicateQuestion = new StackExchangeQuestion(duplicateThread);
            String parentId = record.get(CorpusBuilder.TSV_COL_HEADER_PARENT_ID);

            // Now drop this question into a partition, and write it to a
            // corresponding TSV file
            double p = rng.nextDouble(); // Random number determines
            // partition for this record
            if (p <= trainTestValidateCumulativeProbs[0]) {
                // This record goes in the training set
                if (!addQuestionToSet(duplicateQuestion, parentId, this.trainingSet)) {
                    throw new PipelineException(
                            MessageFormat.format(Messages.getString("RetrieveAndRank.TRAINING_SET_FAILED_Q"), //$NON-NLS-1$
                                    duplicateThread.getId()));
                }
                trainSetPrinter.printRecord((Object[]) convertRecordToArray(record));
            } else if (p <= trainTestValidateCumulativeProbs[1]) {
                // This record goes in the test set
                if (!addQuestionToSet(duplicateQuestion, parentId, this.testSet)) {
                    throw new PipelineException(
                            MessageFormat.format(Messages.getString("RetrieveAndRank.TEST_SET_FAILED_Q"), //$NON-NLS-1$
                                    duplicateThread.getId()));
                }
                testSetPrinter.printRecord((Object[]) convertRecordToArray(record));
            } else {
                // This record goes in the validation set
                assert (p <= trainTestValidateCumulativeProbs[2]);
                if (!addQuestionToSet(duplicateQuestion, parentId, this.validationSet)) {
                    throw new PipelineException(
                            MessageFormat.format(Messages.getString("RetrieveAndRank.VALIDATION_SET_FAILED_Q"), //$NON-NLS-1$
                                    duplicateThread.getId()));
                }
                validationSetPrinter.printRecord((Object[]) convertRecordToArray(record));
            }
        }

        // Flush all the printers prior to closing
        trainSetPrinter.flush();
        testSetPrinter.flush();
        validationSetPrinter.flush();
    } catch (IOException | IngestionException e) {
        throw new PipelineException(e);
    }
}

From source file:edu.oregonstate.eecs.mcplan.abstraction.Experiments.java

private static String deriveDatasetName(final String base, final int iter) {
    if (iter == -1) {
        return base;
    }//from  www. j ava 2 s .  c  o m

    final String basename = FilenameUtils.getBaseName(base);
    final String ext = FilenameUtils.getExtension(base);
    return basename + "_" + iter + (ext.isEmpty() ? "" : FilenameUtils.EXTENSION_SEPARATOR + ext);
}

From source file:org.apache.hadoop.hive.ql.exec.tez.TezSessionState.java

/**
 * Returns a local resource representing a jar.
 * This resource will be used to execute the plan on the cluster.
 * @param localJarPath Local path to the jar to be localized.
 * @return LocalResource corresponding to the localized hive exec resource.
 * @throws IOException when any file system related call fails.
 * @throws LoginException when we are unable to determine the user.
 * @throws URISyntaxException when current jar location cannot be determined.
 *//*from ww  w.j  a va2 s .c o  m*/
private LocalResource createJarLocalResource(String localJarPath)
        throws IOException, LoginException, IllegalArgumentException, FileNotFoundException {
    // TODO Reduce the number of lookups that happen here. This shouldn't go to HDFS for each call.
    // The hiveJarDir can be determined once per client.
    FileStatus destDirStatus = utils.getHiveJarDirectory(conf);
    assert destDirStatus != null;
    Path destDirPath = destDirStatus.getPath();

    Path localFile = new Path(localJarPath);
    String sha = getSha(localFile);

    String destFileName = localFile.getName();

    // Now, try to find the file based on SHA and name. Currently we require exact name match.
    // We could also allow cutting off versions and other stuff provided that SHA matches...
    destFileName = FilenameUtils.removeExtension(destFileName) + "-" + sha + FilenameUtils.EXTENSION_SEPARATOR
            + FilenameUtils.getExtension(destFileName);

    if (LOG.isDebugEnabled()) {
        LOG.debug("The destination file name for [" + localJarPath + "] is " + destFileName);
    }

    // TODO: if this method is ever called on more than one jar, getting the dir and the
    //       list need to be refactored out to be done only once.
    Path destFile = new Path(destDirPath.toString() + "/" + destFileName);
    return utils.localizeResource(localFile, destFile, LocalResourceType.FILE, conf);
}

From source file:org.apache.hadoop.hive.ql.udf.generic.GenericUDTFGetSplits.java

/**
 * Returns a local resource representing a jar. This resource will be used to
 * execute the plan on the cluster.//w  w w.j  a  v a  2 s. c o  m
 * 
 * @param localJarPath
 *          Local path to the jar to be localized.
 * @return LocalResource corresponding to the localized hive exec resource.
 * @throws IOException
 *           when any file system related call fails.
 * @throws LoginException
 *           when we are unable to determine the user.
 * @throws URISyntaxException
 *           when current jar location cannot be determined.
 */
private LocalResource createJarLocalResource(String localJarPath, DagUtils utils, Configuration conf)
        throws IOException, LoginException, IllegalArgumentException, FileNotFoundException {
    FileStatus destDirStatus = utils.getHiveJarDirectory(conf);
    assert destDirStatus != null;
    Path destDirPath = destDirStatus.getPath();

    Path localFile = new Path(localJarPath);
    String sha = getSha(localFile, conf);

    String destFileName = localFile.getName();

    // Now, try to find the file based on SHA and name. Currently we require
    // exact name match.
    // We could also allow cutting off versions and other stuff provided that
    // SHA matches...
    destFileName = FilenameUtils.removeExtension(destFileName) + "-" + sha + FilenameUtils.EXTENSION_SEPARATOR
            + FilenameUtils.getExtension(destFileName);

    // TODO: if this method is ever called on more than one jar, getting the dir
    // and the
    // list need to be refactored out to be done only once.
    Path destFile = new Path(destDirPath.toString() + "/" + destFileName);
    return utils.localizeResource(localFile, destFile, LocalResourceType.FILE, conf);
}

From source file:org.codice.ddf.configuration.admin.ExportMigrationConfigurationAdminContext.java

@Nullable
private ExportMigrationConfigurationAdminEntry getEntry(Configuration configuration) {
    Path path = getPathFromConfiguration(configuration);
    final String pathString = path.toString();
    final String extn = FilenameUtils.getExtension(pathString);
    PersistenceStrategy ps = admin.getPersister(extn);

    if (ps == null) {
        if (warnedExtensions.add(extn)) {
            context.getReport()//  w  ww . jav a 2 s  . co m
                    .record(new MigrationWarning(
                            String.format("Persistence strategy [%s] is not defined; defaulting to [%s]", extn,
                                    defaultFileExtension)));
        }
        ps = admin.getPersister(defaultFileExtension);
        if (ps == null) {
            if (!warnedDefaultExtension) {
                this.warnedDefaultExtension = true;
                context.getReport().record(new MigrationWarning(String
                        .format("Default persistence strategy [%s] is not defined", defaultFileExtension)));
            }
            return null;
        }
        path = Paths.get(pathString + FilenameUtils.EXTENSION_SEPARATOR + defaultFileExtension);
    }
    return new ExportMigrationConfigurationAdminEntry(context.getEntry(path), configuration, ps);
}

From source file:org.codice.ddf.configuration.admin.ExportMigrationConfigurationAdminContext.java

private Path constructPathForBasename(Configuration configuration) {
    final String fpid = configuration.getFactoryPid();
    final String basename;

    if (fpid != null) { // it is a managed service factory!!!
        // Felix Fileinstall uses the hyphen as separator between factoryPid and alias. For
        // safety reasons, all hyphens are removed from the generated UUID.
        final String alias = UUID.randomUUID().toString().replaceAll("-", "");

        basename = fpid + '-' + alias;
    } else {// w ww.  j av a  2  s.  c o  m
        basename = configuration.getPid();
    }
    return Paths.get(basename + FilenameUtils.EXTENSION_SEPARATOR + defaultFileExtension);
}