Example usage for org.apache.hadoop.fs Path isAbsolute

List of usage examples for org.apache.hadoop.fs Path isAbsolute

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path isAbsolute.

Prototype

public boolean isAbsolute() 

Source Link

Document

Returns true if the path component (i.e.

Usage

From source file:hydrograph.engine.cascading.scheme.hive.parquet.HiveParquetTableDescriptor.java

License:Apache License

public HiveParquetTableDescriptor(String databaseName, String tableName, String[] columnNames,
        String[] columnTypes, String[] partitionKeys, String delimiter, String serializationLib, Path location,
        boolean transactional, int buckets) {

    super(databaseName, tableName, columnNames, columnTypes, partitionKeys, delimiter, serializationLib,
            location, transactional, buckets);

    if (tableName == null || tableName.isEmpty())
        throw new IllegalArgumentException("tableName cannot be null or empty");

    if (databaseName == null || tableName.isEmpty())
        this.databaseName = HIVE_DEFAULT_DATABASE_NAME;
    else//from  w  w w.j  a v a  2s .c o  m
        this.databaseName = databaseName.toLowerCase();

    this.tableName = tableName.toLowerCase();

    // SonarQube: Constructors and methods receiving arrays should clone
    // objects and
    // store the copy. This prevents that future changes from the user
    // affect the internal functionality
    this.columnNames = columnNames == null ? null : columnNames.clone();
    this.columnTypes = columnTypes == null ? null : columnTypes.clone();
    this.partitionKeys = partitionKeys == null ? null : partitionKeys.clone();
    this.serializationLib = serializationLib;
    this.transactional = transactional;
    // Only set the delimiter if the serialization lib is Delimited.
    if (delimiter == null && this.serializationLib.equals(HIVE_DEFAULT_SERIALIZATION_LIB_NAME))
        this.delimiter = HIVE_DEFAULT_DELIMITER;
    else
        this.delimiter = delimiter;

    if (isPartitioned())
        verifyPartitionKeys();

    if (columnNames == null || columnTypes == null || columnNames.length == 0 || columnTypes.length == 0
            || columnNames.length != columnTypes.length)
        throw new IllegalArgumentException(
                "columnNames and columnTypes cannot be empty and must have the same size");

    if (location != null) {
        if (!location.isAbsolute())
            throw new IllegalArgumentException("location must be a fully qualified absolute path");

        // Store as string since path is not serialisable
        this.location = location.toString();
    }
    this.buckets = buckets;
}

From source file:hydrograph.engine.cascading.scheme.hive.text.HiveTextTableDescriptor.java

License:Apache License

public HiveTextTableDescriptor(String databaseName, String tableName, String[] columnNames,
        String[] columnTypes, String[] partitionKeys, String delimiter, String serializationLib, Path location,
        boolean transactional, int buckets) {

    super(databaseName, tableName, columnNames, columnTypes, partitionKeys, delimiter, serializationLib,
            location, transactional, buckets);

    if (tableName == null || tableName.isEmpty())
        throw new IllegalArgumentException("tableName cannot be null or empty");

    if (databaseName == null || tableName.isEmpty())
        this.databaseName = HIVE_DEFAULT_DATABASE_NAME;
    else/* w  ww. j  a v a 2s.c om*/
        this.databaseName = databaseName.toLowerCase();

    this.tableName = tableName.toLowerCase();

    // SonarQube: Constructors and methods receiving arrays should clone
    // objects and
    // store the copy. This prevents that future changes from the user
    // affect the internal functionality
    this.columnNames = columnNames == null ? null : columnNames.clone();
    this.columnTypes = columnTypes == null ? null : columnTypes.clone();
    this.partitionKeys = partitionKeys == null ? null : partitionKeys.clone();
    this.serializationLib = serializationLib;
    this.transactional = transactional;
    // Only set the delimiter if the serialization lib is Delimited.
    if (delimiter == null && this.serializationLib.equals(HIVE_DEFAULT_SERIALIZATION_LIB_NAME))
        this.delimiter = HIVE_DEFAULT_DELIMITER;
    else
        this.delimiter = delimiter;

    if (isPartitioned())
        verifyPartitionKeys();

    if (columnNames == null || columnTypes == null || columnNames.length == 0 || columnTypes.length == 0
            || columnNames.length != columnTypes.length)
        throw new IllegalArgumentException(
                "columnNames and columnTypes cannot be empty and must have the same size");

    if (location != null) {
        if (!location.isAbsolute())
            throw new IllegalArgumentException("location must be a fully qualified absolute path");

        // Store as string since path is not serialisable
        this.location = location.toString();
    }
    this.buckets = buckets;
}

From source file:io.hops.erasure_coding.BaseEncodingManager.java

License:Apache License

/**
 * Make an absolute path relative by stripping the leading /
 *///from  w  w  w .  j a  v a 2  s. com
static Path makeRelative(Path path) {
    if (!path.isAbsolute()) {
        return path;
    }
    String p = path.toUri().getPath();
    String relative = p.substring(1, p.length());
    return new Path(relative);
}

From source file:io.prestosql.plugin.hive.s3.PrestoS3FileSystem.java

License:Apache License

public static String keyFromPath(Path path) {
    checkArgument(path.isAbsolute(), "Path is not absolute: %s", path);
    String key = nullToEmpty(path.toUri().getPath());
    if (key.startsWith(PATH_SEPARATOR)) {
        key = key.substring(PATH_SEPARATOR.length());
    }//from  ww w.  j ava  2  s  .  c  om
    if (key.endsWith(PATH_SEPARATOR)) {
        key = key.substring(0, key.length() - PATH_SEPARATOR.length());
    }
    return key;
}

From source file:org.apache.distributedlog.fs.DLFileSystem.java

License:Apache License

private Path makeAbsolute(Path f) {
    if (f.isAbsolute()) {
        return f;
    } else {//  w  ww .  ja v a 2s . co  m
        return new Path(workingDir, f);
    }
}

From source file:org.apache.drill.exec.store.parquet.metadata.MetadataPathUtils.java

License:Apache License

/**
 * Helper method that converts a list of relative paths to absolute ones
 *
 * @param paths list of relative paths/*from  w  w w  . j  a  v  a2  s .  c  o  m*/
 * @param baseDir base parent directory
 * @return list of absolute paths
 */
public static List<Path> convertToAbsolutePaths(List<Path> paths, String baseDir) {
    if (!paths.isEmpty()) {
        List<Path> absolutePaths = Lists.newArrayList();
        for (Path relativePath : paths) {
            Path absolutePath = (relativePath.isAbsolute()) ? relativePath : new Path(baseDir, relativePath);
            absolutePaths.add(absolutePath);
        }
        return absolutePaths;
    }
    return paths;
}

From source file:org.apache.drill.exec.store.parquet.metadata.MetadataPathUtils.java

License:Apache License

/**
 * Convert a list of files with relative paths to files with absolute ones
 *
 * @param files list of files with relative paths
 * @param baseDir base parent directory//  w  ww  .ja va 2  s  . c  om
 * @return list of files with absolute paths
 */
public static List<ParquetFileMetadata_v3> convertToFilesWithAbsolutePaths(List<ParquetFileMetadata_v3> files,
        String baseDir) {
    if (!files.isEmpty()) {
        List<ParquetFileMetadata_v3> filesWithAbsolutePaths = Lists.newArrayList();
        for (ParquetFileMetadata_v3 file : files) {
            Path relativePath = file.getPath();
            // create a new file if old one contains a relative path, otherwise use an old file
            ParquetFileMetadata_v3 fileWithAbsolutePath = (relativePath.isAbsolute()) ? file
                    : new ParquetFileMetadata_v3(new Path(baseDir, relativePath), file.length, file.rowGroups);
            filesWithAbsolutePaths.add(fileWithAbsolutePath);
        }
        return filesWithAbsolutePaths;
    }
    return files;
}

From source file:org.apache.drill.exec.store.parquet.metadata.MetadataPathUtils.java

License:Apache License

/**
 * Constructs relative path from child full path and base path. Or return child path if the last one is already relative
 *
 * @param childPath full absolute path// w  w w  .j  a v a  2s.  co m
 * @param baseDir base path (the part of the Path, which should be cut off from child path)
 * @return relative path
 */
public static Path relativize(Path baseDir, Path childPath) {
    Path fullPathWithoutSchemeAndAuthority = Path.getPathWithoutSchemeAndAuthority(childPath);
    Path basePathWithoutSchemeAndAuthority = Path.getPathWithoutSchemeAndAuthority(baseDir);

    // Since hadoop Path hasn't relativize() we use uri.relativize() to get relative path
    Path relativeFilePath = new Path(
            basePathWithoutSchemeAndAuthority.toUri().relativize(fullPathWithoutSchemeAndAuthority.toUri()));
    if (relativeFilePath.isAbsolute()) {
        throw new IllegalStateException(String.format("Path %s is not a subpath of %s.",
                basePathWithoutSchemeAndAuthority.toUri().getPath(),
                fullPathWithoutSchemeAndAuthority.toUri().getPath()));
    }
    return relativeFilePath;
}

From source file:org.apache.falcon.hadoop.JailedFileSystem.java

License:Apache License

private Path toLocalPath(Path f) {
    if (!f.isAbsolute()) {
        f = new Path(getWorkingDirectory(), f);
    }//from w ww  .j a  va  2 s.c o  m
    return new Path(basePath + f.toUri().getPath());
}

From source file:org.apache.falcon.oozie.process.SparkProcessWorkflowBuilder.java

License:Apache License

private void validateSparkJarFilePath(Path sparkJarFilePath) throws FalconException {
    if (!sparkJarFilePath.isAbsolute()) {
        throw new FalconException("Spark jar file path must be absolute:" + sparkJarFilePath);
    }/*w  w  w. j  a  va  2 s.  c om*/
}