Example usage for org.apache.commons.io FilenameUtils normalize

List of usage examples for org.apache.commons.io FilenameUtils normalize

Introduction

In this page you can find the example usage for org.apache.commons.io FilenameUtils normalize.

Prototype

public static String normalize(String filename, boolean unixSeparator) 

Source Link

Document

Normalizes a path, removing double and single dot path steps.

Usage

From source file:org.craftercms.studio.impl.v1.service.content.ObjectMetadataManagerImpl.java

@Override
@ValidateParams//from   w  w w .j  av a 2 s .  c o m
public void updateObjectPath(@ValidateStringParam(name = "site") String site,
        @ValidateSecurePathParam(name = "oldPath") String oldPath,
        @ValidateSecurePathParam(name = "newPath") String newPath) {
    newPath = FilenameUtils.normalize(newPath, true);
    oldPath = FilenameUtils.normalize(oldPath, true);
    Map<String, Object> params = new HashMap<>();
    params.put("site", site);
    params.put("oldPath", oldPath);
    params.put("newPath", newPath);
    itemMetadataMapper.updateObjectPath(params);
}

From source file:org.craftercms.studio.impl.v1.service.content.ObjectMetadataManagerImpl.java

@Override
@ValidateParams//from   ww  w  . j av  a  2s. c o  m
public void clearRenamed(@ValidateStringParam(name = "site") String site,
        @ValidateSecurePathParam(name = "path") String path) {
    path = FilenameUtils.normalize(path, true);
    Map<String, Object> params = new HashMap<>();
    params.put("renamed", false);
    params.put(ItemMetadata.PROP_OLD_URL, "");
    setObjectMetadata(site, path, params);
}

From source file:org.craftercms.studio.impl.v1.service.content.ObjectMetadataManagerImpl.java

@Override
@ValidateParams/* w w w  .  j av  a2s  . com*/
public void updateCommitId(@ValidateStringParam(name = "site") String site,
        @ValidateSecurePathParam(name = "path") String path,
        @ValidateStringParam(name = "commitId") String commitId) {
    path = FilenameUtils.normalize(path, true);
    Map<String, Object> params = new HashMap<>();
    params.put("site", site);
    params.put("path", path);
    params.put("commitId", commitId);
    itemMetadataMapper.updateCommitId(params);
}

From source file:org.datacleaner.util.FileResolver.java

public String toPath(File file) {
    if (file == null) {
        return null;
    }//w w w .  j ava 2  s  . c  om

    String path = file.getPath();

    // Make relative if possible
    final String basePath = FilenameUtils.normalize(_baseDir.getAbsolutePath(), true);
    final String filePath = FilenameUtils.normalize(file.getAbsolutePath(), true);

    final boolean absolute;
    if (filePath.startsWith(basePath)) {
        path = filePath.substring(basePath.length());
        absolute = false;
    } else {
        absolute = file.isAbsolute();
    }

    path = StringUtils.replaceAll(path, "\\", "/");
    if (!absolute) {
        // some normalization (because filenames are often used to compare
        // datastores)
        if (path.startsWith("/")) {
            path = path.substring(1);
        }
        if (path.startsWith("./")) {
            path = path.substring(2);
        }
    }
    return path;
}

From source file:org.datavec.spark.storage.SparkStorageUtils.java

/**
 * Save a {@code JavaRDD<List<Writable>>} to a Hadoop {@link org.apache.hadoop.io.SequenceFile}. Each record is given
 * a unique (but noncontiguous) {@link LongWritable} key, and values are stored as {@link RecordWritable} instances.
 * <p>/*www.j  a v  a  2  s  .  com*/
 * Use {@link #restoreSequenceFile(String, JavaSparkContext)} to restore values saved with this method.
 *
 * @param path           Path to save the sequence file
 * @param rdd            RDD to save
 * @param maxOutputFiles Nullable. If non-null: first coalesce the RDD to the specified size (number of partitions)
 *                       to limit the maximum number of output sequence files
 * @see #saveSequenceFileSequences(String, JavaRDD)
 * @see #saveMapFile(String, JavaRDD)
 */
public static void saveSequenceFile(String path, JavaRDD<List<Writable>> rdd, Integer maxOutputFiles) {
    path = FilenameUtils.normalize(path, true);
    if (maxOutputFiles != null) {
        rdd = rdd.coalesce(maxOutputFiles);
    }
    JavaPairRDD<List<Writable>, Long> dataIndexPairs = rdd.zipWithUniqueId(); //Note: Long values are unique + NOT contiguous; more efficient than zipWithIndex
    JavaPairRDD<LongWritable, RecordWritable> keyedByIndex = dataIndexPairs
            .mapToPair(new RecordSavePrepPairFunction());

    keyedByIndex.saveAsNewAPIHadoopFile(path, LongWritable.class, RecordWritable.class,
            SequenceFileOutputFormat.class);
}

From source file:org.datavec.spark.storage.SparkStorageUtils.java

/**
 * Save a {@code JavaRDD<List<List<Writable>>>} to a Hadoop {@link org.apache.hadoop.io.SequenceFile}. Each record
 * is given a unique (but noncontiguous) {@link LongWritable} key, and values are stored as {@link SequenceRecordWritable} instances.
 * <p>//from w w w . j  av a 2 s.  c o  m
 * Use {@link #restoreSequenceFileSequences(String, JavaSparkContext)} to restore values saved with this method.
 *
 * @param path           Path to save the sequence file
 * @param rdd            RDD to save
 * @param maxOutputFiles Nullable. If non-null: first coalesce the RDD to the specified size (number of partitions)
 *                       to limit the maximum number of output sequence files
 * @see #saveSequenceFile(String, JavaRDD)
 * @see #saveMapFileSequences(String, JavaRDD)
 */
public static void saveSequenceFileSequences(String path, JavaRDD<List<List<Writable>>> rdd,
        Integer maxOutputFiles) {
    path = FilenameUtils.normalize(path, true);
    if (maxOutputFiles != null) {
        rdd = rdd.coalesce(maxOutputFiles);
    }
    JavaPairRDD<List<List<Writable>>, Long> dataIndexPairs = rdd.zipWithUniqueId(); //Note: Long values are unique + NOT contiguous; more efficient than zipWithIndex
    JavaPairRDD<LongWritable, SequenceRecordWritable> keyedByIndex = dataIndexPairs
            .mapToPair(new SequenceRecordSavePrepPairFunction());

    keyedByIndex.saveAsNewAPIHadoopFile(path, LongWritable.class, SequenceRecordWritable.class,
            SequenceFileOutputFormat.class);
}

From source file:org.datavec.spark.storage.SparkStorageUtils.java

/**
 * Save a {@code JavaRDD<List<Writable>>} to a Hadoop {@link org.apache.hadoop.io.MapFile}. Each record is
 * given a <i>unique and contiguous</i> {@link LongWritable} key, and values are stored as
 * {@link RecordWritable} instances.<br>
 * <b>Note</b>: If contiguous keys are not required, using a sequence file instead is preferable from a performance
 * point of view. Contiguous keys are often only required for non-Spark use cases, such as with
 * {@link org.datavec.hadoop.records.reader.mapfile.MapFileRecordReader}
 * <p>//from   ww w  . j av a  2  s.  c om
 * Use {@link #restoreMapFileSequences(String, JavaSparkContext)} to restore values saved with this method.
 *
 * @param path           Path to save the MapFile
 * @param rdd            RDD to save
 * @param c              Configuration object, used to customise options for the map file
 * @param maxOutputFiles Nullable. If non-null: first coalesce the RDD to the specified size (number of partitions)
 *                       to limit the maximum number of output map files
 * @see #saveMapFileSequences(String, JavaRDD)
 * @see #saveSequenceFile(String, JavaRDD)
 */
public static void saveMapFile(String path, JavaRDD<List<Writable>> rdd, Configuration c,
        Integer maxOutputFiles) {
    path = FilenameUtils.normalize(path, true);
    if (maxOutputFiles != null) {
        rdd = rdd.coalesce(maxOutputFiles);
    }
    JavaPairRDD<List<Writable>, Long> dataIndexPairs = rdd.zipWithIndex(); //Note: Long values are unique + contiguous, but requires a count
    JavaPairRDD<LongWritable, RecordWritable> keyedByIndex = dataIndexPairs
            .mapToPair(new RecordSavePrepPairFunction());

    keyedByIndex.saveAsNewAPIHadoopFile(path, LongWritable.class, RecordWritable.class,
            MapFileOutputFormat.class, c);
}

From source file:org.datavec.spark.storage.SparkStorageUtils.java

/**
 * Restore a {@code JavaPairRDD<Long,List<Writable>>} previously saved with {@link #saveMapFile(String, JavaRDD)}}<br>
 * Note that if the keys are not required, simply use {@code restoreMapFile(...).values()}
 *
 * @param path Path of the MapFile//from   www.  ja v  a  2 s.  co  m
 * @param sc   Spark context
 * @return The restored RDD, with their unique indices as the key
 */
public static JavaPairRDD<Long, List<Writable>> restoreMapFile(String path, JavaSparkContext sc) {
    Configuration c = new Configuration();
    c.set(FileInputFormat.INPUT_DIR, FilenameUtils.normalize(path, true));
    JavaPairRDD<LongWritable, RecordWritable> pairRDD = sc.newAPIHadoopRDD(c, SequenceFileInputFormat.class,
            LongWritable.class, RecordWritable.class);

    return pairRDD.mapToPair(new RecordLoadPairFunction());
}

From source file:org.datavec.spark.storage.SparkStorageUtils.java

/**
 * Save a {@code JavaRDD<List<List<Writable>>>} to a Hadoop {@link org.apache.hadoop.io.MapFile}. Each record is
 * given a <i>unique and contiguous</i> {@link LongWritable} key, and values are stored as
 * {@link SequenceRecordWritable} instances.<br>
 * <b>Note</b>: If contiguous keys are not required, using a sequence file instead is preferable from a performance
 * point of view. Contiguous keys are often only required for non-Spark use cases, such as with
 * {@link org.datavec.hadoop.records.reader.mapfile.MapFileSequenceRecordReader}<br>
 * <p>/*  w w  w .ja  va  2  s  .  c  om*/
 * Use {@link #restoreMapFileSequences(String, JavaSparkContext)} to restore values saved with this method.
 *
 * @param path Path to save the MapFile
 * @param rdd  RDD to save
 * @param c    Configuration object, used to customise options for the map file
 * @see #saveMapFileSequences(String, JavaRDD)
 * @see #saveSequenceFile(String, JavaRDD)
 */
public static void saveMapFileSequences(String path, JavaRDD<List<List<Writable>>> rdd, Configuration c,
        Integer maxOutputFiles) {
    path = FilenameUtils.normalize(path, true);
    if (maxOutputFiles != null) {
        rdd = rdd.coalesce(maxOutputFiles);
    }
    JavaPairRDD<List<List<Writable>>, Long> dataIndexPairs = rdd.zipWithIndex();
    JavaPairRDD<LongWritable, SequenceRecordWritable> keyedByIndex = dataIndexPairs
            .mapToPair(new SequenceRecordSavePrepPairFunction());

    keyedByIndex.saveAsNewAPIHadoopFile(path, LongWritable.class, SequenceRecordWritable.class,
            MapFileOutputFormat.class, c);
}

From source file:org.datavec.spark.storage.SparkStorageUtils.java

/**
 * Restore a {@code JavaPairRDD<Long,List<List<Writable>>>} previously saved with {@link #saveMapFile(String, JavaRDD)}}<br>
 * Note that if the keys are not required, simply use {@code restoreMapFileSequences(...).values()}
 *
 * @param path Path of the MapFile//from  ww  w .j av a  2 s .c  o  m
 * @param sc   Spark context
 * @return The restored RDD, with their unique indices as the key
 */
public static JavaPairRDD<Long, List<List<Writable>>> restoreMapFileSequences(String path,
        JavaSparkContext sc) {
    Configuration c = new Configuration();
    c.set(FileInputFormat.INPUT_DIR, FilenameUtils.normalize(path, true));
    JavaPairRDD<LongWritable, SequenceRecordWritable> pairRDD = sc.newAPIHadoopRDD(c,
            SequenceFileInputFormat.class, LongWritable.class, SequenceRecordWritable.class);

    return pairRDD.mapToPair(new SequenceRecordLoadPairFunction());
}