Example usage for org.apache.hadoop.fs Path getParent

List of usage examples for org.apache.hadoop.fs Path getParent

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path getParent.

Prototype

public Path getParent() 

Source Link

Document

Returns the parent of a path or null if at root.

Usage

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.BaseClient.java

License:Apache License

public InputStream forRead(final Path path, final int readSize) throws Exception {
    setClientIDIfUnset();/*www  .  j a  v  a2  s .  com*/

    final FileHandle parentFileHandle = checkNotNull(lookup(path.getParent()));
    final StateID stateID = checkNotNull(
            doOpen(parentFileHandle, path.getName(), NFS4_OPEN4_SHARE_ACCESS_READ, NFS4_OPEN4_NOCREATE));
    final FileHandle fileHandle = checkNotNull(mPathFileHandleMap.get(path));

    /*
     * Code below reads 1 byte per RPC. It's intended to test to and not
     * EVER be copied and used.
     */
    return new InputStream() {

        protected long fileOffset = 0L;
        protected byte[] buffer = new byte[readSize];
        protected int bufferOffset;
        protected int bufferLength;

        @Override
        public int read() throws IOException {

            if (bufferOffset < bufferLength) {
                fileOffset++;
                return buffer[bufferOffset++];
            }

            CompoundRequest compoundRequest = newRequest();
            List<OperationRequest> operations = Lists.newArrayList();
            PUTFHRequest putFhRequest = new PUTFHRequest();
            putFhRequest.setFileHandle(fileHandle);
            operations.add(putFhRequest);

            READRequest readRequest = new READRequest();
            readRequest.setOffset(fileOffset);
            readRequest.setCount(buffer.length);
            readRequest.setStateID(stateID);
            operations.add(readRequest);

            compoundRequest.setOperations(operations);
            List<OperationResponse> operationResponses;
            try {
                operationResponses = getResult(compoundRequest);
            } catch (NFS4Exception e) {
                throw new RuntimeException(e);
            }
            getResponse(operationResponses.remove(0), PUTFHResponse.class);

            READResponse readResponse = getResponse(operationResponses.remove(0), READResponse.class);
            if (readResponse.isEOF()) {
                return -1;
            }
            bufferOffset = 0;
            bufferLength = readResponse.getLength();
            byte[] data = readResponse.getData();
            assertNotNull(data);
            System.arraycopy(data, readResponse.getStart(), buffer, bufferOffset, bufferLength);
            return read();
        }

        @Override
        public void close() throws IOException {

            CompoundRequest compoundRequest = newRequest();
            List<OperationRequest> operations = Lists.newArrayList();
            PUTFHRequest putFhRequest = new PUTFHRequest();
            putFhRequest.setFileHandle(fileHandle);
            operations.add(putFhRequest);

            CLOSERequest closeRequest = new CLOSERequest();
            closeRequest.setSeqID(stateID.getSeqID() + 1);
            closeRequest.setStateID(stateID);
            operations.add(closeRequest);

            compoundRequest.setOperations(operations);
            List<OperationResponse> operationResponses;
            try {
                operationResponses = getResult(compoundRequest);
            } catch (NFS4Exception e) {
                throw new RuntimeException(e);
            }
            getResponse(operationResponses.remove(0), PUTFHResponse.class);

            CLOSEResponse closeResponse = getResponse(operationResponses.remove(0), CLOSEResponse.class);

            mFileHandleStateID.put(fileHandle, closeResponse.getStateID());
        }
    };
}

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.NFS4Handler.java

License:Apache License

/**
 *
 * @param stateID//from  w  w  w  .  j  av  a2s  . c o m
 * @param fs
 * @param fileHandle
 * @param overwrite
 * @return
 * @throws NFS4Exception
 * @throws IOException
 */
public synchronized FSDataOutputStream forWrite(StateID stateID, FileSystem fs, FileHandle fileHandle,
        boolean overwrite) throws NFS4Exception, IOException {
    FileHolder fileHolder = mFileHandleMap.get(fileHandle);
    if (fileHolder != null) {
        OpenFile<FSDataOutputStream> file = fileHolder.getFSDataOutputStream();
        if (file != null) {
            if (file.isOwnedBy(stateID)) {
                return file.get();
            }
            throw new NFS4Exception(NFS4ERR_FILE_OPEN);
        }
        Path path = new Path(fileHolder.getPath());
        boolean exists = fs.exists(path);
        // If overwrite = false, fs.create throws IOException which
        // is useless. In case of IOE do we always return EXIST?
        // doesn't seem to make sense. As such, I am mitigating the issue
        // even if there is a known race between the exists and create
        if (!overwrite && exists) {
            // append to a file
            // We used to be NFS4ERR_EXIST here but the linux client behaved rather
            // oddly.
            // It would open the fily with overwrite=true but then send the data
            // which
            // was to be appended at offset 0
            throw new NFS4Exception(NFS4ERR_PERM, "File Exists and overwrite = false", true);
        }
        if (path.getParent() != null) {
            // TODO bad perms will fail with IOException, perhaps we should check
            // that file can be created before trying to so we can return the
            // correct error perm denied
        }
        if (exists && fs.getFileStatus(path).isDir()) {
            throw new NFS4Exception(NFS4ERR_ISDIR);
        }
        FSDataOutputStream out = fs.create(path, overwrite);
        this.incrementMetric("FILES_OPENED_WRITE", 1);
        fileHolder.setFSDataOutputStream(stateID, out);
        return out;
    }
    throw new NFS4Exception(NFS4ERR_STALE);
}

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.state.HDFSState.java

License:Apache License

/**
 * Open a file handle for write/*w w w.j  a  v  a2s . c  om*/
 * @param stateID
 * @param fileHandle
 * @param overwrite
 * @throws NFS4Exception
 * @throws IOException
 */
public synchronized HDFSOutputStream openForWrite(FileSystem fs, StateID stateID, FileHandle fileHandle,
        boolean overwrite) throws NFS4Exception, IOException {
    HDFSFile hdsfsFile = mOpenFilesMap.get(fileHandle);
    if (hdsfsFile != null) {
        OpenResource<HDFSOutputStream> file = hdsfsFile.getHDFSOutputStreamForWrite();
        if (file != null) {
            if (file.isOwnedBy(stateID)) {
                return file.get();
            }
            throw new NFS4Exception(NFS4ERR_FILE_OPEN);
        }
    }
    INode inode = mFileHandleINodeMap.getINodeByFileHandle(fileHandle);
    if (inode == null) {
        throw new NFS4Exception(NFS4ERR_STALE);
    }
    Path path = new Path(inode.getPath());
    boolean exists = fs.exists(path);
    // If overwrite = false, fs.create throws IOException which
    // is useless. In case of IOE do we always return EXIST?
    // doesn't seem to make sense. As such, I am mitigating the issue
    // even if there is a known race between the exists and create
    if (!overwrite && exists) {
        // append to a file
        // We used to be NFS4ERR_EXIST here but the linux client behaved
        // rather oddly. It would open the file with overwrite=true but
        // then send the data which was to be appended at offset 0
        throw new NFS4Exception(NFS4ERR_PERM, "File Exists and overwrite = false");
    }
    if (path.getParent() != null) {
        // TODO bad perms will fail with IOException, perhaps we should check
        // that file can be created before trying to so we can return the
        // correct error perm denied
        // check(user, groups, status, access);
    }
    if (exists && fs.getFileStatus(path).isDir()) {
        throw new NFS4Exception(NFS4ERR_ISDIR);
    }
    HDFSOutputStream out = new HDFSOutputStream(fs.create(path, overwrite), path.toString(), fileHandle);
    mMetrics.incrementMetric(FILES_OPENED_WRITE, 1);
    if (hdsfsFile == null) {
        hdsfsFile = new HDFSFile(fileHandle, inode.getPath(), inode.getNumber());
        mOpenFilesMap.put(fileHandle, hdsfsFile);
    }
    hdsfsFile.setHDFSOutputStream(stateID, out);
    return out;
}

From source file:com.cloudera.hive.scd.SQLUpdater.java

License:Open Source License

private List<String> loadUpdateStatements(InputSplit split, JobConf jc) throws IOException {
    long currentSCDTime = asSCDTime(jc.get("scd.time", ""), System.currentTimeMillis());
    List<String> stmts = Lists.newArrayList();
    if (split instanceof FileSplit) {
        Path base = ((FileSplit) split).getPath();
        FileSystem fs = base.getFileSystem(jc);
        Path updates = new Path(base.getParent(), ".updates");
        if (fs.exists(updates)) {
            stmts.addAll(readLines(fs, updates, currentSCDTime));
        }/* w ww.j  a  v a2 s  . c  o  m*/
    }
    return stmts;
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testRename() throws Exception {
    FileSystem fs = FileSystem.get(getHadoopConf());
    Path path = new Path(getHadoopTestDir(), "foo");
    fs.mkdirs(path);//  w  ww.  jav a2s  .  c om
    fs.close();
    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    Path oldPath = new Path(path.toUri().getPath());
    Path newPath = new Path(path.getParent(), "bar");
    fs.rename(oldPath, newPath);
    fs.close();
    fs = FileSystem.get(getHadoopConf());
    Assert.assertFalse(fs.exists(oldPath));
    Assert.assertTrue(fs.exists(newPath));
    fs.close();
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testListStatus() throws Exception {
    FileSystem fs = FileSystem.get(getHadoopConf());
    Path path = new Path(getHadoopTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);/*from   ww  w. ja v a2  s  .co m*/
    os.close();
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath()));
    fs.close();

    Assert.assertEquals(status2.getPermission(), status1.getPermission());
    Assert.assertEquals(status2.getPath().toUri().getPath(), status1.getPath().toUri().getPath());
    Assert.assertEquals(status2.getReplication(), status1.getReplication());
    Assert.assertEquals(status2.getBlockSize(), status1.getBlockSize());
    Assert.assertEquals(status2.getAccessTime(), status1.getAccessTime());
    Assert.assertEquals(status2.getModificationTime(), status1.getModificationTime());
    Assert.assertEquals(status2.getOwner(), status1.getOwner());
    Assert.assertEquals(status2.getGroup(), status1.getGroup());
    Assert.assertEquals(status2.getLen(), status1.getLen());

    FileStatus[] stati = fs.listStatus(path.getParent());
    Assert.assertEquals(stati.length, 1);
    Assert.assertEquals(stati[0].getPath().getName(), path.getName());
}

From source file:com.cloudera.impala.catalog.HdfsTable.java

License:Apache License

/**
 * Gets the AccessLevel that is available for Impala for this table based on the
 * permissions Impala has on the given path. If the path does not exist, recurses up the
 * path until a existing parent directory is found, and inherit access permissions from
 * that./*from   www  .j a  v  a  2s .  co  m*/
 */
private TAccessLevel getAvailableAccessLevel(FileSystem fs, Path location) throws IOException {
    FsPermissionChecker permissionChecker = FsPermissionChecker.getInstance();
    while (location != null) {
        if (fs.exists(location)) {
            FsPermissionChecker.Permissions perms = permissionChecker.getPermissions(fs, location);
            if (perms.canReadAndWrite()) {
                return TAccessLevel.READ_WRITE;
            } else if (perms.canRead()) {
                LOG.debug(String.format("Impala does not have WRITE access to '%s' in table: %s", location,
                        getFullName()));
                return TAccessLevel.READ_ONLY;
            } else if (perms.canWrite()) {
                LOG.debug(String.format("Impala does not have READ access to '%s' in table: %s", location,
                        getFullName()));
                return TAccessLevel.WRITE_ONLY;
            }
            LOG.debug(String.format("Impala does not have READ or WRITE access to " + "'%s' in table: %s",
                    location, getFullName()));
            return TAccessLevel.NONE;
        }
        location = location.getParent();
    }
    // Should never get here.
    Preconditions.checkNotNull(location, "Error: no path ancestor exists");
    return TAccessLevel.NONE;
}

From source file:com.cloudera.impala.common.FileSystemUtil.java

License:Apache License

/**
 * Moves (renames) the given file to a new location (either another directory or a
 * file. If renameIfAlreadyExists is true, no error will be thrown if a file with the
 * same name already exists in the destination location. Instead, a UUID will be
 * appended to the base file name, preserving the the existing file extension.
 * If renameIfAlreadyExists is false, an IOException will be thrown if there is a
 * file name conflict.//w w  w  . j  av  a  2s .  c  o m
 */
public static void moveFile(Path sourceFile, Path dest, boolean renameIfAlreadyExists) throws IOException {
    FileSystem fs = dest.getFileSystem(CONF);

    Path destFile = fs.isDirectory(dest) ? new Path(dest, sourceFile.getName()) : dest;
    // If a file with the same name does not already exist in the destination location
    // then use the same file name. Otherwise, generate a unique file name.
    if (renameIfAlreadyExists && fs.exists(destFile)) {
        Path destDir = fs.isDirectory(dest) ? dest : dest.getParent();
        destFile = new Path(destDir, appendToBaseFileName(destFile.getName(), UUID.randomUUID().toString()));
    }
    LOG.debug(String.format("Moving '%s' to '%s'", sourceFile.toString(), destFile.toString()));
    // Move (rename) the file.
    fs.rename(sourceFile, destFile);
}

From source file:com.cloudera.impala.planner.PlannerTestBase.java

License:Apache License

/**
 * Normalize components of the given file path, removing any environment- or test-run
 * dependent components.  For example, substitutes the unique id portion of Impala
 * generated file names with a fixed literal.  Subclasses should override to do
 * filesystem specific cleansing./*from   w  w  w  . j a  v a 2s. c  om*/
 */
protected Path cleanseFilePath(Path path) {
    String fileName = path.getName();
    Pattern pattern = Pattern.compile("\\w{16}-\\w{16}_\\d+_data");
    Matcher matcher = pattern.matcher(fileName);
    fileName = matcher.replaceFirst("<UID>_data");
    return new Path(path.getParent(), fileName);
}

From source file:com.cloudera.oryx.common.servcomp.Store.java

License:Open Source License

private void makeParentDirs(Path path) throws IOException {
    Preconditions.checkNotNull(path);/*w w w  . j a  v  a 2  s.c o m*/
    Path parent = path.getParent();
    if (fs.exists(parent)) {
        // Can't be a file
        return;
    }
    boolean success;
    try {
        success = fs.mkdirs(parent);
    } catch (AccessControlException ace) {
        log.error("Permissions problem; is {} writable in HDFS?", parent);
        throw ace;
    }
    if (!success && !fs.exists(parent)) {
        throw new IOException("Can't make " + parent);
    }
}