Example usage for org.apache.hadoop.fs Path getParent

List of usage examples for org.apache.hadoop.fs Path getParent

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path getParent.

Prototype

public Path getParent() 

Source Link

Document

Returns the parent of a path or null if at root.

Usage

From source file:org.apache.hive.hcatalog.streaming.TestStreaming.java

License:Apache License

private void corruptSideFile(final String file, final HiveConf conf, final Map<String, List<Long>> offsetMap,
        final String key, final int numEntries) throws IOException {
    Path dataPath = new Path(file);
    Path sideFilePath = OrcAcidUtils.getSideFile(dataPath);
    Path cPath = new Path(sideFilePath.getParent(), sideFilePath.getName() + ".corrupt");
    FileSystem fs = sideFilePath.getFileSystem(conf);
    List<Long> offsets = offsetMap.get(key);
    long lastOffset = offsets.get(offsets.size() - 1);
    FSDataOutputStream fdos = fs.create(cPath, true);
    // corrupt last entry
    if (numEntries < 0) {
        byte[] lastOffsetBytes = longToBytes(lastOffset);
        for (int i = 0; i < offsets.size() - 1; i++) {
            fdos.writeLong(offsets.get(i));
        }/*  w  ww .  j a  v a2  s.co m*/

        fdos.write(lastOffsetBytes, 0, 3);
    } else if (numEntries > 0) {
        int firstRun = Math.min(offsets.size(), numEntries);
        // add original entries
        for (int i = 0; i < firstRun; i++) {
            fdos.writeLong(offsets.get(i));
        }

        // add fake entries
        int remaining = numEntries - firstRun;
        for (int i = 0; i < remaining; i++) {
            fdos.writeLong(lastOffset + ((i + 1) * 100));
        }
    }

    fdos.close();
    fs.delete(sideFilePath, false);
    fs.rename(cPath, sideFilePath);
}

From source file:org.apache.hoya.avro.RoleHistoryWriter.java

License:Apache License

/**
 * Delete all old history entries older than the one we want to keep. This
 * uses the filename ordering to determine age, not timestamps
 * @param fileSystem filesystem//  w ww  .  jav  a  2 s  .c  o  m
 * @param keep path to keep -used in thresholding the files
 * @return the number of files deleted
 * @throws FileNotFoundException if the path to keep is not present (safety
 * check to stop the entire dir being purged)
 * @throws IOException IO problems
 */
public int purgeOlderHistoryEntries(FileSystem fileSystem, Path keep) throws IOException {
    assert fileSystem != null : "null filesystem";
    if (!fileSystem.exists(keep)) {
        throw new FileNotFoundException(keep.toString());
    }
    Path dir = keep.getParent();
    log.debug("Purging entries in {} up to {}", dir, keep);
    List<Path> paths = findAllHistoryEntries(fileSystem, dir, true);
    Collections.sort(paths, new OlderFilesFirst());
    int deleteCount = 0;
    for (Path path : paths) {
        if (path.equals(keep)) {
            break;
        } else {
            log.debug("Deleting {}", path);
            deleteCount++;
            fileSystem.delete(path, false);
        }
    }
    return deleteCount;
}

From source file:org.apache.ignite.igfs.HadoopIgfs20FileSystemAbstractSelfTest.java

License:Apache License

/** @throws Exception If failed. */
public void testCreateCheckOverwrite() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
    final Path file = new Path(dir, "someFile");

    FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class),
            Options.CreateOpts.perms(FsPermission.getDefault()));

    out.close();//from   w ww .  j av  a2 s.  c o  m

    // Check intermediate directory permissions.
    assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir).getPermission());
    assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir.getParent()).getPermission());
    assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir.getParent().getParent()).getPermission());

    GridTestUtils.assertThrows(log, new Callable<Object>() {
        @Override
        public Object call() throws Exception {
            return fs.create(file, EnumSet.noneOf(CreateFlag.class),
                    Options.CreateOpts.perms(FsPermission.getDefault()));
        }
    }, PathExistsException.class, null);

    // Overwrite should be successful.
    FSDataOutputStream out1 = fs.create(file, EnumSet.of(CreateFlag.OVERWRITE),
            Options.CreateOpts.perms(FsPermission.getDefault()));

    out1.close();
}

From source file:org.apache.ignite.igfs.HadoopIgfs20FileSystemAbstractSelfTest.java

License:Apache License

/** @throws Exception If failed. */
public void testSetPermissionCheckDefaultPermission() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path file = new Path(fsHome, "/tmp/my");

    FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class),
            Options.CreateOpts.perms(FsPermission.getDefault()));

    os.close();//from w w  w .  ja v a2  s . com

    fs.setPermission(file, null);

    assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission());
    assertEquals(FsPermission.getDefault(), fs.getFileStatus(file.getParent()).getPermission());
}

From source file:org.apache.ignite.igfs.IgfsHadoopFileSystemAbstractSelfTest.java

License:Apache License

/** @throws Exception If failed. */
@SuppressWarnings("deprecation")
public void testCreateCheckOverwrite() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3");
    final Path file = new Path(dir, "someFile");

    FSDataOutputStream out = fs.create(file, FsPermission.getDefault(), false, 64 * 1024,
            fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);

    out.close();//  w w  w  . j a va2  s  .co  m

    // Check intermediate directory permissions.
    assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir).getPermission());
    assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir.getParent()).getPermission());
    assertEquals(FsPermission.getDefault(), fs.getFileStatus(dir.getParent().getParent()).getPermission());

    GridTestUtils.assertThrows(log, new Callable<Object>() {
        @Override
        public Object call() throws Exception {
            return fs.create(file, FsPermission.getDefault(), false, 1024, (short) 1, 2048, null);
        }
    }, PathExistsException.class, null);

    // Overwrite should be successful.
    FSDataOutputStream out1 = fs.create(file, true);

    out1.close();
}

From source file:org.apache.ignite.igfs.IgfsHadoopFileSystemAbstractSelfTest.java

License:Apache License

/** @throws Exception If failed. */
@SuppressWarnings("deprecation")
public void testSetPermissionCheckDefaultPermission() throws Exception {
    Path fsHome = new Path(primaryFsUri);
    Path file = new Path(fsHome, "/tmp/my");

    FSDataOutputStream os = fs.create(file, FsPermission.getDefault(), false, 64 * 1024,
            fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);

    os.close();/*from  w  w  w .ja  v a  2  s  .c o m*/

    fs.setPermission(file, null);

    assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission());
    assertEquals(FsPermission.getDefault(), fs.getFileStatus(file.getParent()).getPermission());
}

From source file:org.apache.ignite.internal.processors.hadoop.fs.GridHadoopRawLocalFileSystem.java

License:Apache License

/** {@inheritDoc} */
@Override//  w  w w .  j a va2  s. co  m
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
    if (f == null)
        throw new IllegalArgumentException("mkdirs path arg is null");

    Path parent = f.getParent();

    File p2f = convert(f);

    if (parent != null) {
        File parent2f = convert(parent);

        if (parent2f != null && parent2f.exists() && !parent2f.isDirectory())
            throw new FileAlreadyExistsException("Parent path is not a directory: " + parent);

    }

    return (parent == null || mkdirs(parent)) && (p2f.mkdir() || p2f.isDirectory());
}

From source file:org.apache.impala.analysis.LoadDataStmt.java

License:Apache License

/**
 * Check to see if Impala has the necessary permissions to access the source and dest
 * paths for this LOAD statement (which maps onto a sequence of file move operations,
 * with the requisite permission requirements), and check to see if all files to be
 * moved are in format that Impala understands. Errors are raised as AnalysisExceptions.
 *//*  ww  w . ja  va2  s.  co m*/
private void analyzePaths(Analyzer analyzer, HdfsTable hdfsTable) throws AnalysisException {
    // The user must have permission to access the source location. Since the files will
    // be moved from this location, the user needs to have all permission.
    sourceDataPath_.analyze(analyzer, Privilege.ALL);

    // Catch all exceptions thrown by accessing files, and rethrow as AnalysisExceptions.
    try {
        Path source = sourceDataPath_.getPath();
        FileSystem fs = source.getFileSystem(FileSystemUtil.getConfiguration());
        if (!(fs instanceof DistributedFileSystem) && !(fs instanceof S3AFileSystem)) {
            throw new AnalysisException(String.format(
                    "INPATH location '%s' " + "must point to an HDFS or S3A filesystem.", sourceDataPath_));
        }
        if (!fs.exists(source)) {
            throw new AnalysisException(String.format("INPATH location '%s' does not exist.", sourceDataPath_));
        }

        // If the source file is a directory, we must be able to read from and write to
        // it. If the source file is a file, we must be able to read from it, and write to
        // its parent directory (in order to delete the file as part of the move operation).
        FsPermissionChecker checker = FsPermissionChecker.getInstance();

        if (fs.isDirectory(source)) {
            if (FileSystemUtil.getTotalNumVisibleFiles(source) == 0) {
                throw new AnalysisException(
                        String.format("INPATH location '%s' contains no visible files.", sourceDataPath_));
            }
            if (FileSystemUtil.containsVisibleSubdirectory(source)) {
                throw new AnalysisException(String.format(
                        "INPATH location '%s' cannot contain non-hidden subdirectories.", sourceDataPath_));
            }
            if (!checker.getPermissions(fs, source).checkPermissions(FsAction.READ_WRITE)) {
                throw new AnalysisException(String.format(
                        "Unable to LOAD DATA from %s "
                                + "because Impala does not have READ and WRITE permissions on this directory",
                        source));
            }
        } else {
            // INPATH names a file.
            if (FileSystemUtil.isHiddenFile(source.getName())) {
                throw new AnalysisException(
                        String.format("INPATH location '%s' points to a hidden file.", source));
            }

            if (!checker.getPermissions(fs, source.getParent()).checkPermissions(FsAction.WRITE)) {
                throw new AnalysisException(String.format("Unable to LOAD DATA from %s "
                        + "because Impala does not have WRITE permissions on its parent " + "directory %s",
                        source, source.getParent()));
            }

            if (!checker.getPermissions(fs, source).checkPermissions(FsAction.READ)) {
                throw new AnalysisException(String.format("Unable to LOAD DATA from %s "
                        + "because Impala does not have READ permissions on this file", source));
            }
        }

        String noWriteAccessErrorMsg = String.format(
                "Unable to LOAD DATA into "
                        + "target table (%s) because Impala does not have WRITE access to HDFS " + "location: ",
                hdfsTable.getFullName());

        HdfsPartition partition;
        String location;
        if (partitionSpec_ != null) {
            partition = hdfsTable.getPartition(partitionSpec_.getPartitionSpecKeyValues());
            location = partition.getLocation();
            if (!TAccessLevelUtil.impliesWriteAccess(partition.getAccessLevel())) {
                throw new AnalysisException(noWriteAccessErrorMsg + location);
            }
        } else {
            // "default" partition
            partition = hdfsTable.getPartitionMap().get(ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID);
            location = hdfsTable.getLocation();
            if (!hdfsTable.hasWriteAccess()) {
                throw new AnalysisException(noWriteAccessErrorMsg + hdfsTable.getLocation());
            }
        }
        Preconditions.checkNotNull(partition);

        // Verify the files being loaded are supported.
        for (FileStatus fStatus : fs.listStatus(source)) {
            if (fs.isDirectory(fStatus.getPath()))
                continue;
            StringBuilder errorMsg = new StringBuilder();
            HdfsFileFormat fileFormat = partition.getInputFormatDescriptor().getFileFormat();
            if (!fileFormat.isFileCompressionTypeSupported(fStatus.getPath().toString(), errorMsg)) {
                throw new AnalysisException(errorMsg.toString());
            }
        }
    } catch (FileNotFoundException e) {
        throw new AnalysisException("File not found: " + e.getMessage(), e);
    } catch (IOException e) {
        throw new AnalysisException("Error accessing filesystem: " + e.getMessage(), e);
    }
}

From source file:org.apache.impala.catalog.HdfsTable.java

License:Apache License

/**
 * Gets the AccessLevel that is available for Impala for this table based on the
 * permissions Impala has on the given path. If the path does not exist, recurses up
 * the path until a existing parent directory is found, and inherit access permissions
 * from that.//from  www. j  a  v a 2 s.  co  m
 * Always returns READ_WRITE for S3 files.
 */
private TAccessLevel getAvailableAccessLevel(FileSystem fs, Path location) throws IOException {

    // Avoid calling getPermissions() on file path for S3 files, as that makes a round
    // trip to S3. Also, the S3A connector is currently unable to manage S3 permissions,
    // so for now it is safe to assume that all files(objects) have READ_WRITE
    // permissions, as that's what the S3A connector will always return too.
    // TODO: Revisit if the S3A connector is updated to be able to manage S3 object
    // permissions. (see HADOOP-13892)
    if (FileSystemUtil.isS3AFileSystem(fs))
        return TAccessLevel.READ_WRITE;

    FsPermissionChecker permissionChecker = FsPermissionChecker.getInstance();
    while (location != null) {
        if (fs.exists(location)) {
            FsPermissionChecker.Permissions perms = permissionChecker.getPermissions(fs, location);
            if (perms.canReadAndWrite()) {
                return TAccessLevel.READ_WRITE;
            } else if (perms.canRead()) {
                return TAccessLevel.READ_ONLY;
            } else if (perms.canWrite()) {
                return TAccessLevel.WRITE_ONLY;
            }
            return TAccessLevel.NONE;
        }
        location = location.getParent();
    }
    // Should never get here.
    Preconditions.checkNotNull(location, "Error: no path ancestor exists");
    return TAccessLevel.NONE;
}

From source file:org.apache.impala.common.FileSystemUtil.java

License:Apache License

/**
 * Relocates the given file to a new location (either another directory or a
 * file in the same or different filesystem). The file is generally moved (renamed) to
 * the new location. However, the file is copied if the source and destination are in
 * different encryption zones so that the file can be decrypted and/or encrypted, or if
 * the source and destination are in different filesystems. If renameIfAlreadyExists is
 * true, no error will be thrown if a file with the same name already exists in the
 * destination location. Instead, a UUID will be appended to the base file name,
 * preserving the existing file extension. If renameIfAlreadyExists is false, an
 * IOException will be thrown if there is a file name conflict.
 *//*from  w  w  w.j a  v a2  s.com*/
public static void relocateFile(Path sourceFile, Path dest, boolean renameIfAlreadyExists) throws IOException {
    FileSystem destFs = dest.getFileSystem(CONF);
    FileSystem sourceFs = sourceFile.getFileSystem(CONF);

    Path destFile = destFs.isDirectory(dest) ? new Path(dest, sourceFile.getName()) : dest;
    // If a file with the same name does not already exist in the destination location
    // then use the same file name. Otherwise, generate a unique file name.
    if (renameIfAlreadyExists && destFs.exists(destFile)) {
        Path destDir = destFs.isDirectory(dest) ? dest : dest.getParent();
        destFile = new Path(destDir, appendToBaseFileName(destFile.getName(), UUID.randomUUID().toString()));
    }
    boolean sameFileSystem = isPathOnFileSystem(sourceFile, destFs);
    boolean destIsDfs = isDistributedFileSystem(destFs);

    // If the source and the destination are on different file systems, or in different
    // encryption zones, files can't be moved from one location to the other and must be
    // copied instead.
    boolean sameEncryptionZone = arePathsInSameHdfsEncryptionZone(destFs, sourceFile, destFile);
    // We can do a rename if the src and dst are in the same encryption zone in the same
    // distributed filesystem.
    boolean doRename = destIsDfs && sameFileSystem && sameEncryptionZone;
    // Alternatively, we can do a rename if the src and dst are on the same
    // non-distributed filesystem.
    if (!doRename)
        doRename = !destIsDfs && sameFileSystem;
    if (doRename) {
        if (LOG.isTraceEnabled()) {
            LOG.trace(String.format("Moving '%s' to '%s'", sourceFile.toString(), destFile.toString()));
        }
        // Move (rename) the file.
        destFs.rename(sourceFile, destFile);
        return;
    }
    if (destIsDfs && sameFileSystem) {
        Preconditions.checkState(!doRename);
        // We must copy rather than move if the source and dest are in different
        // encryption zones. A move would return an error from the NN because a move is a
        // metadata-only operation and the files would not be encrypted/decrypted properly
        // on the DNs.
        if (LOG.isTraceEnabled()) {
            LOG.trace(String.format("Copying source '%s' to '%s' because HDFS encryption zones are different.",
                    sourceFile, destFile));
        }
    } else {
        Preconditions.checkState(!sameFileSystem);
        if (LOG.isTraceEnabled()) {
            LOG.trace(String.format("Copying '%s' to '%s' between filesystems.", sourceFile, destFile));
        }
    }
    FileUtil.copy(sourceFs, sourceFile, destFs, destFile, true, true, CONF);
}