Example usage for org.apache.hadoop.fs Path getName

List of usage examples for org.apache.hadoop.fs Path getName

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path getName.

Prototype

public String getName() 

Source Link

Document

Returns the final component of this path.

Usage

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.BaseClient.java

License:Apache License

protected FileHandle lookup(Path path) throws NFS4Exception {
    Path parent;/*ww w  .j  a  v a  2s .co  m*/
    LOGGER.info("Lookup on " + path);
    if (path.equals(ROOT)) {
        parent = path;
    } else {
        parent = path.getParent();
    }
    FileHandle parentFileHandle = mPathFileHandleMap.get(parent);
    if (parentFileHandle == null) {
        parentFileHandle = lookup(parent);
    }

    if (parent.equals(path)) {
        return parentFileHandle;
    }

    CompoundRequest compoundRequest = newRequest();
    List<OperationRequest> operations = Lists.newArrayList();
    PUTFHRequest putFhRequest = new PUTFHRequest();
    putFhRequest.setFileHandle(parentFileHandle);
    operations.add(putFhRequest);
    LOOKUPRequest lookupRequest = new LOOKUPRequest();
    lookupRequest.setName(path.getName());
    operations.add(lookupRequest);

    operations.add(new GETFHRequest());
    operations.add(newGETATTRRequest());

    compoundRequest.setOperations(operations);

    List<OperationResponse> operationResponses = getResult(compoundRequest);

    getResponse(operationResponses.remove(0), PUTFHResponse.class);
    getResponse(operationResponses.remove(0), LOOKUPResponse.class);
    GETFHResponse getFHResponse = getResponse(operationResponses.remove(0), GETFHResponse.class);
    FileHandle fileHandle = getFHResponse.getFileHandle();
    mPathFileHandleMap.put(path, fileHandle);
    mFileHandlePathMap.put(fileHandle, path);
    GETATTRResponse getAttrResponse = getResponse(operationResponses.remove(0), GETATTRResponse.class);
    mFileHandleAttributeMap.put(fileHandle, getAttrResponse.getAttrValues());
    return fileHandle;
}

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.BaseClient.java

License:Apache License

public OutputStream forWrite(final Path path) throws Exception {
    setClientIDIfUnset();/*  w  w  w  .j  av a  2s  . c om*/

    final FileHandle parentFileHandle = checkNotNull(lookup(path.getParent()));
    final StateID stateID = checkNotNull(
            doOpen(parentFileHandle, path.getName(), NFS4_OPEN4_SHARE_ACCESS_WRITE, NFS4_OPEN4_CREATE));
    final FileHandle fileHandle = checkNotNull(mPathFileHandleMap.get(path));

    return new OutputStream() {

        protected long fileOffset = 0L;

        @Override
        public void write(int b) throws IOException {
            CompoundRequest compoundRequest = newRequest();
            List<OperationRequest> operations = Lists.newArrayList();
            PUTFHRequest putFhRequest = new PUTFHRequest();
            putFhRequest.setFileHandle(fileHandle);
            operations.add(putFhRequest);

            WRITERequest writeRequest = new WRITERequest();
            byte[] data = new byte[1];
            data[0] = (byte) b;
            writeRequest.setData(data, 0, data.length);
            writeRequest.setOffset(fileOffset);
            writeRequest.setStable(NFS4_COMMIT_UNSTABLE4);
            writeRequest.setStateID(stateID);

            operations.add(writeRequest);

            compoundRequest.setOperations(operations);
            List<OperationResponse> operationResponses;
            try {
                operationResponses = getResult(compoundRequest);
            } catch (NFS4Exception e) {
                throw new RuntimeException(e);
            }
            getResponse(operationResponses.remove(0), PUTFHResponse.class);

            WRITEResponse writeResponse = getResponse(operationResponses.remove(0), WRITEResponse.class);
            if (writeResponse.getCount() != data.length) {
                throw new IOException("Write failed: " + writeResponse.getCount());
            }
            fileOffset++;
        }

        @Override
        public void close() throws IOException {

            CompoundRequest compoundRequest = newRequest();
            List<OperationRequest> operations = Lists.newArrayList();
            PUTFHRequest putFhRequest = new PUTFHRequest();
            putFhRequest.setFileHandle(fileHandle);
            operations.add(putFhRequest);

            COMMITRequest commitRequest = new COMMITRequest();
            commitRequest.setCount(0);
            commitRequest.setOffset(0);
            operations.add(commitRequest);

            CLOSERequest closeRequest = new CLOSERequest();
            closeRequest.setSeqID(stateID.getSeqID() + 1);
            closeRequest.setStateID(stateID);
            operations.add(closeRequest);

            compoundRequest.setOperations(operations);
            List<OperationResponse> operationResponses;
            try {
                operationResponses = getResult(compoundRequest);
            } catch (NFS4Exception e) {
                throw new RuntimeException(e);
            }
            getResponse(operationResponses.remove(0), PUTFHResponse.class);

            getResponse(operationResponses.remove(0), COMMITResponse.class);

            CLOSEResponse closeResponse = getResponse(operationResponses.remove(0), CLOSEResponse.class);

            mFileHandleStateID.put(fileHandle, closeResponse.getStateID());
        }
    };
}

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.BaseClient.java

License:Apache License

public InputStream forRead(final Path path, final int readSize) throws Exception {
    setClientIDIfUnset();//from w  ww  . j ava2 s  . c o m

    final FileHandle parentFileHandle = checkNotNull(lookup(path.getParent()));
    final StateID stateID = checkNotNull(
            doOpen(parentFileHandle, path.getName(), NFS4_OPEN4_SHARE_ACCESS_READ, NFS4_OPEN4_NOCREATE));
    final FileHandle fileHandle = checkNotNull(mPathFileHandleMap.get(path));

    /*
     * Code below reads 1 byte per RPC. It's intended to test to and not
     * EVER be copied and used.
     */
    return new InputStream() {

        protected long fileOffset = 0L;
        protected byte[] buffer = new byte[readSize];
        protected int bufferOffset;
        protected int bufferLength;

        @Override
        public int read() throws IOException {

            if (bufferOffset < bufferLength) {
                fileOffset++;
                return buffer[bufferOffset++];
            }

            CompoundRequest compoundRequest = newRequest();
            List<OperationRequest> operations = Lists.newArrayList();
            PUTFHRequest putFhRequest = new PUTFHRequest();
            putFhRequest.setFileHandle(fileHandle);
            operations.add(putFhRequest);

            READRequest readRequest = new READRequest();
            readRequest.setOffset(fileOffset);
            readRequest.setCount(buffer.length);
            readRequest.setStateID(stateID);
            operations.add(readRequest);

            compoundRequest.setOperations(operations);
            List<OperationResponse> operationResponses;
            try {
                operationResponses = getResult(compoundRequest);
            } catch (NFS4Exception e) {
                throw new RuntimeException(e);
            }
            getResponse(operationResponses.remove(0), PUTFHResponse.class);

            READResponse readResponse = getResponse(operationResponses.remove(0), READResponse.class);
            if (readResponse.isEOF()) {
                return -1;
            }
            bufferOffset = 0;
            bufferLength = readResponse.getLength();
            byte[] data = readResponse.getData();
            assertNotNull(data);
            System.arraycopy(data, readResponse.getStart(), buffer, bufferOffset, bufferLength);
            return read();
        }

        @Override
        public void close() throws IOException {

            CompoundRequest compoundRequest = newRequest();
            List<OperationRequest> operations = Lists.newArrayList();
            PUTFHRequest putFhRequest = new PUTFHRequest();
            putFhRequest.setFileHandle(fileHandle);
            operations.add(putFhRequest);

            CLOSERequest closeRequest = new CLOSERequest();
            closeRequest.setSeqID(stateID.getSeqID() + 1);
            closeRequest.setStateID(stateID);
            operations.add(closeRequest);

            compoundRequest.setOperations(operations);
            List<OperationResponse> operationResponses;
            try {
                operationResponses = getResult(compoundRequest);
            } catch (NFS4Exception e) {
                throw new RuntimeException(e);
            }
            getResponse(operationResponses.remove(0), PUTFHResponse.class);

            CLOSEResponse closeResponse = getResponse(operationResponses.remove(0), CLOSEResponse.class);

            mFileHandleStateID.put(fileHandle, closeResponse.getStateID());
        }
    };
}

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.TestWithClient.java

License:Apache License

public void doReadDir(BaseClient client) throws IOException, InterruptedException, NFS4Exception {
    /*//from  w ww  .j a  v  a2s  . c o m
     * traverse through a directory that does not change often and ensure it
     * checks out the same as through the native api
     */
    Path rootPath = new Path("/");
    Path etcPath = new Path(TestUtils.tmpDirPathForTest);
    compareFileStatusFile(client.getFileStatus(rootPath));
    ImmutableList<Path> paths = client.listPath(new Path(rootPath, etcPath));
    File etcFile = new File(rootPath.toString(), etcPath.toString());
    assertEquals(etcFile.list().length, paths.size());
    for (Path path : paths) {
        LOGGER.debug("checking file => " + path.getName());
        compareFileStatusFile(client.getFileStatus(path));
    }

    client.shutdown();
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testListStatus() throws Exception {
    FileSystem fs = FileSystem.get(getHadoopConf());
    Path path = new Path(getHadoopTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);/*from  w w  w. jav  a2  s  .co  m*/
    os.close();
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath()));
    fs.close();

    Assert.assertEquals(status2.getPermission(), status1.getPermission());
    Assert.assertEquals(status2.getPath().toUri().getPath(), status1.getPath().toUri().getPath());
    Assert.assertEquals(status2.getReplication(), status1.getReplication());
    Assert.assertEquals(status2.getBlockSize(), status1.getBlockSize());
    Assert.assertEquals(status2.getAccessTime(), status1.getAccessTime());
    Assert.assertEquals(status2.getModificationTime(), status1.getModificationTime());
    Assert.assertEquals(status2.getOwner(), status1.getOwner());
    Assert.assertEquals(status2.getGroup(), status1.getGroup());
    Assert.assertEquals(status2.getLen(), status1.getLen());

    FileStatus[] stati = fs.listStatus(path.getParent());
    Assert.assertEquals(stati.length, 1);
    Assert.assertEquals(stati[0].getPath().getName(), path.getName());
}

From source file:com.cloudera.impala.analysis.LoadDataStmt.java

License:Apache License

private void analyzePaths(Analyzer analyzer, HdfsTable hdfsTable) throws AnalysisException {
    // The user must have permission to access the source location. Since the files will
    // be moved from this location, the user needs to have all permission.
    sourceDataPath_.analyze(analyzer, Privilege.ALL);

    try {/* w ww. j a  va 2s.  c o m*/
        Path source = sourceDataPath_.getPath();
        FileSystem fs = source.getFileSystem(FileSystemUtil.getConfiguration());
        // sourceDataPath_.analyze() ensured that path is on an HDFS filesystem.
        Preconditions.checkState(fs instanceof DistributedFileSystem);
        DistributedFileSystem dfs = (DistributedFileSystem) fs;
        if (!dfs.exists(source)) {
            throw new AnalysisException(String.format("INPATH location '%s' does not exist.", sourceDataPath_));
        }

        if (dfs.isDirectory(source)) {
            if (FileSystemUtil.getTotalNumVisibleFiles(source) == 0) {
                throw new AnalysisException(
                        String.format("INPATH location '%s' contains no visible files.", sourceDataPath_));
            }
            if (FileSystemUtil.containsSubdirectory(source)) {
                throw new AnalysisException(
                        String.format("INPATH location '%s' cannot contain subdirectories.", sourceDataPath_));
            }
        } else { // INPATH points to a file.
            if (FileSystemUtil.isHiddenFile(source.getName())) {
                throw new AnalysisException(
                        String.format("INPATH location '%s' points to a hidden file.", source));
            }
        }

        String noWriteAccessErrorMsg = String.format(
                "Unable to LOAD DATA into "
                        + "target table (%s) because Impala does not have WRITE access to HDFS " + "location: ",
                hdfsTable.getFullName());

        HdfsPartition partition;
        String location;
        if (partitionSpec_ != null) {
            partition = hdfsTable.getPartition(partitionSpec_.getPartitionSpecKeyValues());
            location = partition.getLocation();
            if (!TAccessLevelUtil.impliesWriteAccess(partition.getAccessLevel())) {
                throw new AnalysisException(noWriteAccessErrorMsg + partition.getLocation());
            }
        } else {
            // "default" partition
            partition = hdfsTable.getPartitions().get(0);
            location = hdfsTable.getLocation();
            if (!hdfsTable.hasWriteAccess()) {
                throw new AnalysisException(noWriteAccessErrorMsg + hdfsTable.getLocation());
            }
        }
        Preconditions.checkNotNull(partition);

        // Until Frontend.loadTableData() can handle cross-filesystem and filesystems
        // that aren't HDFS, require that source and dest are on the same HDFS.
        if (!FileSystemUtil.isPathOnFileSystem(new Path(location), fs)) {
            throw new AnalysisException(String.format(
                    "Unable to LOAD DATA into target table (%s) because source path (%s) and "
                            + "destination %s (%s) are on different file-systems.",
                    hdfsTable.getFullName(), source, partitionSpec_ == null ? "table" : "partition",
                    partition.getLocation()));
        }
        // Verify the files being loaded are supported.
        for (FileStatus fStatus : fs.listStatus(source)) {
            if (fs.isDirectory(fStatus.getPath()))
                continue;
            StringBuilder errorMsg = new StringBuilder();
            HdfsFileFormat fileFormat = partition.getInputFormatDescriptor().getFileFormat();
            if (!fileFormat.isFileCompressionTypeSupported(fStatus.getPath().toString(), errorMsg)) {
                throw new AnalysisException(errorMsg.toString());
            }
        }
    } catch (FileNotFoundException e) {
        throw new AnalysisException("File not found: " + e.getMessage(), e);
    } catch (IOException e) {
        throw new AnalysisException("Error accessing file system: " + e.getMessage(), e);
    }
}

From source file:com.cloudera.impala.common.FileSystemUtil.java

License:Apache License

/**
 * Moves all visible (non-hidden) files from a source directory to a destination
 * directory. Any sub-directories within the source directory are skipped.
 * Returns the number of files moved as part of this operation.
 *//* w w  w.ja  v a  2 s . c o m*/
public static int moveAllVisibleFiles(Path sourceDir, Path destDir) throws IOException {
    FileSystem fs = destDir.getFileSystem(CONF);
    Preconditions.checkState(fs.isDirectory(destDir));
    Preconditions.checkState(fs.isDirectory(sourceDir));

    // Use the same UUID to resolve all file name conflicts. This helps mitigate problems
    // that might happen if there is a conflict moving a set of files that have
    // dependent file names. For example, foo.lzo and foo.lzo_index.
    UUID uuid = UUID.randomUUID();

    // Enumerate all the files in the source
    int numFilesMoved = 0;
    for (FileStatus fStatus : fs.listStatus(sourceDir)) {
        if (fStatus.isDirectory()) {
            LOG.debug("Skipping copy of directory: " + fStatus.getPath());
            continue;
        } else if (isHiddenFile(fStatus.getPath().getName())) {
            continue;
        }

        Path destFile = new Path(destDir, fStatus.getPath().getName());
        if (fs.exists(destFile)) {
            destFile = new Path(destDir, appendToBaseFileName(destFile.getName(), uuid.toString()));
        }
        FileSystemUtil.moveFile(fStatus.getPath(), destFile, false);
        ++numFilesMoved;
    }
    return numFilesMoved;
}

From source file:com.cloudera.impala.common.FileSystemUtil.java

License:Apache License

/**
 * Moves (renames) the given file to a new location (either another directory or a
 * file. If renameIfAlreadyExists is true, no error will be thrown if a file with the
 * same name already exists in the destination location. Instead, a UUID will be
 * appended to the base file name, preserving the the existing file extension.
 * If renameIfAlreadyExists is false, an IOException will be thrown if there is a
 * file name conflict./*from  w  w  w .  j  av  a 2s. co m*/
 */
public static void moveFile(Path sourceFile, Path dest, boolean renameIfAlreadyExists) throws IOException {
    FileSystem fs = dest.getFileSystem(CONF);

    Path destFile = fs.isDirectory(dest) ? new Path(dest, sourceFile.getName()) : dest;
    // If a file with the same name does not already exist in the destination location
    // then use the same file name. Otherwise, generate a unique file name.
    if (renameIfAlreadyExists && fs.exists(destFile)) {
        Path destDir = fs.isDirectory(dest) ? dest : dest.getParent();
        destFile = new Path(destDir, appendToBaseFileName(destFile.getName(), UUID.randomUUID().toString()));
    }
    LOG.debug(String.format("Moving '%s' to '%s'", sourceFile.toString(), destFile.toString()));
    // Move (rename) the file.
    fs.rename(sourceFile, destFile);
}

From source file:com.cloudera.impala.planner.PlannerTestBase.java

License:Apache License

/**
 * Normalize components of the given file path, removing any environment- or test-run
 * dependent components.  For example, substitutes the unique id portion of Impala
 * generated file names with a fixed literal.  Subclasses should override to do
 * filesystem specific cleansing.// w  w  w .  j av a 2s .c  o m
 */
protected Path cleanseFilePath(Path path) {
    String fileName = path.getName();
    Pattern pattern = Pattern.compile("\\w{16}-\\w{16}_\\d+_data");
    Matcher matcher = pattern.matcher(fileName);
    fileName = matcher.replaceFirst("<UID>_data");
    return new Path(path.getParent(), fileName);
}

From source file:com.cloudera.impala.util.TestLoadMetadataUtil.java

License:Apache License

/**
 * Test if it returns an empty list when the filepath is a directory.
 *//*from   w  w  w.  j  a  v a2  s .  co m*/
private void testDirectory(MethodName methodName) throws IOException {
    Map<FsKey, FileBlocksInfo> perFsFileBlocks = Maps.newHashMap();
    Map<String, List<FileDescriptor>> fileDescMap = Maps.newHashMap();
    Path dirPath = createDirInHdfs("dir");
    List<FileDescriptor> fileDesclist = null;
    switch (methodName) {
    case LOAD_FILE_DESCRIPTORS:
        fileDesclist = LoadMetadataUtil.loadFileDescriptors(fs_, dirPath, null, HdfsFileFormat.TEXT,
                perFsFileBlocks, false, dirPath.getName(), null, fileDescMap);
        break;
    case LOAD_VIA_LOCATED_FILE_STATUS:
        fileDesclist = LoadMetadataUtil.loadViaListLocatedStatus(fs_, dirPath, null, HdfsFileFormat.TEXT,
                perFsFileBlocks, false, dirPath.getName(), null, fileDescMap);
        break;
    case LOAD_VIA_LIST_STATUS_ITERATOR:
        fileDesclist = LoadMetadataUtil.loadViaListStatusIterator(fs_, dirPath, null, HdfsFileFormat.TEXT,
                perFsFileBlocks, false, dirPath.getName(), null, fileDescMap);
        break;
    default:
        LOG.error("Unsupported enum method name");
        Preconditions.checkState(false);
    }
    for (FsKey key : perFsFileBlocks.keySet()) {
        assertEquals(HDFS_BASE_PATH, key.toString());
    }
    assertEquals(0, fileDesclist.size());
}