Example usage for org.apache.hadoop.fs Path getParent

List of usage examples for org.apache.hadoop.fs Path getParent

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path getParent.

Prototype

public Path getParent() 

Source Link

Document

Returns the parent of a path or null if at root.

Usage

From source file:org.apache.carbondata.core.metadata.SegmentFileStore.java

License:Apache License

/**
 * If partition specs are available, then check the location map for any index file path which is
 * not present in the partitionSpecs. If found then delete that index file.
 * If the partition directory is empty, then delete the directory also.
 * If partition specs are null, then directly delete parent directory in locationMap.
 *///from w  w  w  .java  2s.  c  o  m
private static void deletePhysicalPartition(List<PartitionSpec> partitionSpecs,
        Map<String, List<String>> locationMap, List<String> indexOrMergeFiles, String tablePath)
        throws IOException {
    for (String indexOrMergFile : indexOrMergeFiles) {
        if (null != partitionSpecs) {
            Path location = new Path(indexOrMergFile);
            boolean exists = pathExistsInPartitionSpec(partitionSpecs, location);
            if (!exists) {
                FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(location.toString()));
            }
        } else {
            Path location = new Path(indexOrMergFile);
            FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(location.toString()));
        }
    }
    for (Map.Entry<String, List<String>> entry : locationMap.entrySet()) {
        if (partitionSpecs != null) {
            Path location = new Path(entry.getKey());
            boolean exists = pathExistsInPartitionSpec(partitionSpecs, location);
            if (!exists) {
                FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(location.toString()));
                for (String carbonDataFile : entry.getValue()) {
                    FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(carbonDataFile));
                }
            }
            CarbonFile path = FileFactory.getCarbonFile(location.getParent().toString());
            if (path.listFiles().length == 0) {
                FileFactory
                        .deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(location.getParent().toString()));
            }
        } else {
            Path location = new Path(entry.getKey()).getParent();
            // delete the segment folder
            CarbonFile segmentPath = FileFactory.getCarbonFile(location.toString());
            if (null != segmentPath && segmentPath.exists()
                    && !new Path(tablePath).equals(new Path(segmentPath.getAbsolutePath()))) {
                FileFactory.deleteAllCarbonFilesOfDir(segmentPath);
            }
        }
    }
}

From source file:org.apache.carbondata.core.util.BlockletDataMapUtil.java

License:Apache License

public static Set<TableBlockIndexUniqueIdentifier> getTableBlockUniqueIdentifiers(Segment segment)
        throws IOException {
    Set<TableBlockIndexUniqueIdentifier> tableBlockIndexUniqueIdentifiers = new HashSet<>();
    Map<String, String> indexFiles = segment.getCommittedIndexFile();
    for (Map.Entry<String, String> indexFileEntry : indexFiles.entrySet()) {
        Path indexFile = new Path(indexFileEntry.getKey());
        tableBlockIndexUniqueIdentifiers
                .add(new TableBlockIndexUniqueIdentifier(indexFile.getParent().toString(), indexFile.getName(),
                        indexFileEntry.getValue(), segment.getSegmentNo()));
    }//from   ww w.  java2  s  .c o m
    return tableBlockIndexUniqueIdentifiers;
}

From source file:org.apache.carbondata.core.writer.CarbonIndexFileMergeWriter.java

License:Apache License

private String writeMergeIndexFileBasedOnSegmentFile(String segmentId, List<String> indexFileNamesTobeAdded,
        SegmentFileStore segmentFileStore, CarbonFile[] indexFiles, String uuid) throws IOException {
    SegmentIndexFileStore fileStore = new SegmentIndexFileStore();
    fileStore.readAllIIndexOfSegment(segmentFileStore.getSegmentFile(), segmentFileStore.getTablePath(),
            SegmentStatus.SUCCESS, true);
    Map<String, byte[]> indexMap = fileStore.getCarbonIndexMapWithFullPath();
    Map<String, Map<String, byte[]>> indexLocationMap = new HashMap<>();
    for (Map.Entry<String, byte[]> entry : indexMap.entrySet()) {
        Path path = new Path(entry.getKey());
        Map<String, byte[]> map = indexLocationMap.get(path.getParent().toString());
        if (map == null) {
            map = new HashMap<>();
            indexLocationMap.put(path.getParent().toString(), map);
        }//from  w  ww  .j ava  2s  . c om
        map.put(path.getName(), entry.getValue());
    }
    for (Map.Entry<String, Map<String, byte[]>> entry : indexLocationMap.entrySet()) {
        String mergeIndexFile = writeMergeIndexFile(indexFileNamesTobeAdded, entry.getKey(), entry.getValue(),
                segmentId);
        for (Map.Entry<String, SegmentFileStore.FolderDetails> segentry : segmentFileStore.getLocationMap()
                .entrySet()) {
            String location = segentry.getKey();
            if (segentry.getValue().isRelative()) {
                location = segmentFileStore.getTablePath() + CarbonCommonConstants.FILE_SEPARATOR + location;
            }
            if (new Path(entry.getKey()).equals(new Path(location))) {
                segentry.getValue().setMergeFileName(mergeIndexFile);
                segentry.getValue().setFiles(new HashSet<String>());
                break;
            }
        }
    }
    String newSegmentFileName = SegmentFileStore.genSegmentFileName(segmentId, uuid)
            + CarbonTablePath.SEGMENT_EXT;
    String path = CarbonTablePath.getSegmentFilesLocation(table.getTablePath())
            + CarbonCommonConstants.FILE_SEPARATOR + newSegmentFileName;
    SegmentFileStore.writeSegmentFile(segmentFileStore.getSegmentFile(), path);
    SegmentFileStore.updateSegmentFile(table, segmentId, newSegmentFileName,
            table.getCarbonTableIdentifier().getTableId(), segmentFileStore);

    for (CarbonFile file : indexFiles) {
        file.delete();
    }

    return uuid;
}

From source file:org.apache.cassandra.hadoop.fs.CassandraFileSystem.java

License:Apache License

@Override
public boolean mkdirs(Path path, FsPermission permission) throws IOException {
    Path absolutePath = makeAbsolute(path);
    List<Path> paths = new ArrayList<Path>();
    do {//from   www  .  j  a v  a2s .c  o m
        paths.add(0, absolutePath);
        absolutePath = absolutePath.getParent();
    } while (absolutePath != null);

    boolean result = true;
    for (Path p : paths) {
        result &= mkdir(p, permission);
    }
    return result;
}

From source file:org.apache.cassandra.hadoop.fs.CassandraFileSystem.java

License:Apache License

/**
 * @param permission//from  w  w  w  .  j  a  v a  2 s  . c o  m
 *            Currently ignored.
 */
@Override
public FSDataOutputStream create(Path file, FsPermission permission, boolean overwrite, int bufferSize,
        short replication, long blockSize, Progressable progress) throws IOException {

    INode inode = store.retrieveINode(makeAbsolute(file));
    if (inode != null) {
        if (overwrite) {
            delete(file);
        } else {
            throw new IOException("File already exists: " + file);
        }
    } else {
        Path parent = file.getParent();
        if (parent != null) {
            if (!mkdirs(parent)) {
                throw new IOException("Mkdirs failed to create " + parent.toString());
            }
        }
    }
    return new FSDataOutputStream(new CassandraOutputStream(getConf(), store, makeAbsolute(file), permission,
            blockSize, subBlockSize, progress, bufferSize), statistics);
}

From source file:org.apache.cassandra.hadoop.fs.CassandraFileSystem.java

License:Apache License

@Override
public boolean rename(Path src, Path dst) throws IOException {
    if (logger.isDebugEnabled())
        logger.debug("Renaming " + src + " to " + dst);

    Path absoluteSrc = makeAbsolute(src);
    INode srcINode = store.retrieveINode(absoluteSrc);
    if (srcINode == null) {
        // src path doesn't exist
        return false;
    }//from  ww  w . j  a v a2s  .  c o m
    Path absoluteDst = makeAbsolute(dst);
    INode dstINode = store.retrieveINode(absoluteDst);
    if (dstINode != null && dstINode.isDirectory()) {
        absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
        dstINode = store.retrieveINode(absoluteDst);
    }
    if (dstINode != null) {
        // dst path already exists - can't overwrite
        return false;
    }
    Path dstParent = absoluteDst.getParent();
    if (dstParent != null) {
        INode dstParentINode = store.retrieveINode(dstParent);
        if (dstParentINode == null || dstParentINode.isFile()) {
            // dst parent doesn't exist or is a file
            return false;
        }
    }
    return renameRecursive(absoluteSrc, absoluteDst);
}

From source file:org.apache.cassandra.hadoop.fs.CassandraFileSystemThriftStore.java

License:Apache License

/**
 * @param path a Path/*from w  w  w  .j av a 2s.c om*/
 * @return the parent to the <code>path</code> or null if the <code>path</code> represents the root.
 */
private String getParentForIndex(Path path) {
    Path parent = path.getParent();

    if (parent == null) {
        return "null";
    }

    return parent.toUri().getPath();
}

From source file:org.apache.coheigea.bigdata.hdfs.HDFSTest.java

License:Apache License

@org.junit.Test
public void testDirectoryPermissions() throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file
    final Path file = new Path("/tmp/tmpdir/data-file4");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();//from   w w w  .  j  a  v  a 2 s .  c  om
    }
    out.close();

    // Try to read the directory as "bob" - this should be allowed
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob");
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false);
            Assert.assertTrue(iter.hasNext());

            fs.close();
            return null;
        }
    });

    // Change permissions so that the directory can't be read by "other"
    fileSystem.setPermission(file.getParent(), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE));

    // Try to read the base directory as the file owner
    RemoteIterator<LocatedFileStatus> iter = fileSystem.listFiles(file.getParent(), false);
    Assert.assertTrue(iter.hasNext());

    // Now try to read the directory as "bob" again - this should fail
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            try {
                RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false);
                Assert.assertTrue(iter.hasNext());
                Assert.fail("Failure expected on an incorrect permission");
            } catch (AccessControlException ex) {
                // expected
            }

            fs.close();
            return null;
        }
    });
}

From source file:org.apache.coheigea.bigdata.hdfs.ranger.HDFSRangerTest.java

License:Apache License

@org.junit.Test
public void executeTest() throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    final Path file = new Path("/tmp/tmpdir3/data-file2");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();/*from ww w . j a  v a2 s .  com*/
    }
    out.close();

    // Change permissions to read-only
    fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));

    // Change the parent directory permissions to be execute only for the owner
    Path parentDir = new Path("/tmp/tmpdir3");
    fileSystem.setPermission(parentDir, new FsPermission(FsAction.EXECUTE, FsAction.NONE, FsAction.NONE));

    // Try to read the directory as "bob" - this should be allowed (by the policy - user)
    UserGroupInformation ugi = UserGroupInformation.createUserForTesting("bob", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false);
            Assert.assertTrue(iter.hasNext());

            fs.close();
            return null;
        }
    });

    // Try to read the directory as "alice" - this should be allowed (by the policy - group)
    ugi = UserGroupInformation.createUserForTesting("alice", new String[] { "IT" });
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false);
            Assert.assertTrue(iter.hasNext());

            fs.close();
            return null;
        }
    });

    // Now try to read the directory as unknown user "eve" - this should not be allowed
    ugi = UserGroupInformation.createUserForTesting("eve", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            try {
                RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false);
                Assert.assertTrue(iter.hasNext());
                Assert.fail("Failure expected on an incorrect permission");
            } catch (RemoteException ex) {
                // expected
                Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName()));
            }

            fs.close();
            return null;
        }
    });

    // Now try to read the directory as known user "dave" - this should not be allowed, as he doesn't have the correct permissions
    ugi = UserGroupInformation.createUserForTesting("dave", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            try {
                RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false);
                Assert.assertTrue(iter.hasNext());
                Assert.fail("Failure expected on an incorrect permission");
            } catch (RemoteException ex) {
                // expected
                Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName()));
            }

            fs.close();
            return null;
        }
    });
}

From source file:org.apache.crunch.impl.spark.SparkRuntime.java

License:Apache License

private void distributeFiles() {
    try {/*from w  ww . java2s  .com*/
        URI[] uris = DistributedCache.getCacheFiles(conf);
        if (uris != null) {
            URI[] outURIs = new URI[uris.length];
            for (int i = 0; i < uris.length; i++) {
                Path path = new Path(uris[i]);
                FileSystem fs = path.getFileSystem(conf);
                if (fs.isFile(path)) {
                    outURIs[i] = uris[i];
                } else {
                    Path mergePath = new Path(path.getParent(), "sparkreadable-" + path.getName());
                    FileUtil.copyMerge(fs, path, fs, mergePath, false, conf, "");
                    outURIs[i] = mergePath.toUri();
                }
                sparkContext.addFile(outURIs[i].toString());
            }
            DistributedCache.setCacheFiles(outURIs, conf);
        }
    } catch (IOException e) {
        throw new RuntimeException("Error retrieving cache files", e);
    }
}