Example usage for org.apache.hadoop.fs Path Path

List of usage examples for org.apache.hadoop.fs Path Path

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path Path.

Prototype

public Path(URI aUri) 

Source Link

Document

Construct a path from a URI

Usage

From source file:alluxio.hadoop.HadoopUtilsTest.java

License:Apache License

/**
 * Test for the {@link HadoopUtils#getPathWithoutScheme(Path)} method from an HDFS URI.
 *///from ww  w  .  ja  va2 s  .  c  om
@Test
public void testGetPathWithoutSchemaFromHDFSURI() {
    final Path path = new Path(URI.create("hdfs://localhost:1234/foo/bar/baz?please=dont&show=up"));

    final String output = HadoopUtils.getPathWithoutScheme(path);
    Assert.assertEquals("/foo/bar/baz", output);
}

From source file:alluxio.hadoop.HdfsFileInputStream.java

License:Apache License

/**
 * Constructs a new stream for reading a file from HDFS.
 *
 * @param uri the Alluxio file URI//from   w  w  w  . java 2 s  . co  m
 * @param conf Hadoop configuration
 * @param bufferSize the buffer size
 * @param stats filesystem statistics
 * @throws IOException if the underlying file does not exist or its stream cannot be created
 */
public HdfsFileInputStream(AlluxioURI uri, org.apache.hadoop.conf.Configuration conf, int bufferSize,
        org.apache.hadoop.fs.FileSystem.Statistics stats) throws IOException {
    LOG.debug("HdfsFileInputStream({}, {}, {}, {}, {})", uri, conf, bufferSize, stats);
    long bufferBytes = Configuration.getBytes(PropertyKey.USER_FILE_BUFFER_BYTES);
    mBuffer = new byte[Ints.checkedCast(bufferBytes) * 4];
    mCurrentPosition = 0;
    FileSystem fs = FileSystem.Factory.get();
    mHadoopConf = conf;
    mHadoopBufferSize = bufferSize;
    mStatistics = stats;
    try {
        mFileInfo = fs.getStatus(uri);
        mHdfsPath = new Path(mFileInfo.getUfsPath());
        mAlluxioFileInputStream = fs.openFile(uri, OpenFileOptions.defaults());
    } catch (FileDoesNotExistException e) {
        throw new FileNotFoundException(ExceptionMessage.HDFS_FILE_NOT_FOUND.getMessage(mHdfsPath, uri));
    } catch (AlluxioException e) {
        throw new IOException(e);
    }
}

From source file:alluxio.hadoop.mapreduce.KeyValueOutputCommitter.java

License:Apache License

/**
 * @param taskContext MapReduce task configuration
 * @return true if the task output directory exists, otherwise false
 * @throws IOException if fails to determine whether the output directory exists
 *///from  ww w.ja  va  2  s.co  m
@Override
public boolean needsTaskCommit(TaskAttemptContext taskContext) throws IOException {
    Path taskOutputPath = new Path(KeyValueOutputFormat.getTaskOutputURI(taskContext).toString());
    FileSystem fs = taskOutputPath.getFileSystem(taskContext.getConfiguration());
    return fs.exists(taskOutputPath);
}

From source file:alluxio.hadoop.mapreduce.KeyValueOutputFormat.java

License:Apache License

/**
 * @param taskContext MapReduce task configuration
 * @return a {@link KeyValueOutputCommitter}
 * @throws IOException when committer fails to be created
 *//*w w w  .  j  a  va  2  s . c o  m*/
@Override
public OutputCommitter getOutputCommitter(TaskAttemptContext taskContext) throws IOException {
    if (mCommitter == null) {
        mCommitter = new KeyValueOutputCommitter(
                new Path(KeyValueOutputFormat.getJobOutputURI(taskContext).toString()), taskContext);
    }
    return mCommitter;
}

From source file:alluxio.underfs.cfs.CfsUnderFileSystemFactory.java

License:Apache License

public UnderFileSystem create(String path, Configuration alluxioConf, Object conf) {
    Preconditions.checkArgument(path != null, "path may not be null");

    Path rootPath = getRoot(new Path(path));
    synchronized (cfsUfsCache) {
        if (!cfsUfsCache.containsKey(rootPath)) {
            cfsUfsCache.put(rootPath, new CfsUnderFileSystem(path, alluxioConf, conf));
        }/*from ww w  . j a va  2 s.  c  o m*/
        return cfsUfsCache.get(rootPath);
    }
}

From source file:alluxio.underfs.hdfs.acl.SupportedHdfsAclProvider.java

License:Apache License

@Override
public Pair<AccessControlList, DefaultAccessControlList> getAcl(FileSystem hdfs, String path)
        throws IOException {
    AclStatus hdfsAcl;/*  w  w w . j  ava  2s  .c o  m*/
    Path filePath = new Path(path);
    boolean isDir = hdfs.isDirectory(filePath);
    try {
        hdfsAcl = hdfs.getAclStatus(filePath);
    } catch (AclException e) {
        // When dfs.namenode.acls.enabled is false, getAclStatus throws AclException.
        return new Pair<>(null, null);
    }
    AccessControlList acl = new AccessControlList();
    DefaultAccessControlList defaultAcl = new DefaultAccessControlList();

    acl.setOwningUser(hdfsAcl.getOwner());
    acl.setOwningGroup(hdfsAcl.getGroup());
    defaultAcl.setOwningUser(hdfsAcl.getOwner());
    defaultAcl.setOwningGroup(hdfsAcl.getGroup());
    for (AclEntry entry : hdfsAcl.getEntries()) {
        alluxio.security.authorization.AclEntry.Builder builder = new alluxio.security.authorization.AclEntry.Builder();
        builder.setType(getAclEntryType(entry));
        builder.setSubject(entry.getName() == null ? "" : entry.getName());
        FsAction permission = entry.getPermission();
        if (permission.implies(FsAction.READ)) {
            builder.addAction(AclAction.READ);
        } else if (permission.implies(FsAction.WRITE)) {
            builder.addAction(AclAction.WRITE);
        } else if (permission.implies(FsAction.EXECUTE)) {
            builder.addAction(AclAction.EXECUTE);
        }
        if (entry.getScope().equals(AclEntryScope.ACCESS)) {
            acl.setEntry(builder.build());
        } else {
            // default ACL, must be a directory
            defaultAcl.setEntry(builder.build());
        }
    }
    if (isDir) {
        return new Pair<>(acl, defaultAcl);
    } else {
        // a null defaultACL indicates this is a file
        return new Pair<>(acl, null);
    }
}

From source file:alluxio.underfs.hdfs.acl.SupportedHdfsAclProvider.java

License:Apache License

@Override
public void setAclEntries(FileSystem hdfs, String path,
        List<alluxio.security.authorization.AclEntry> aclEntries) throws IOException {
    // convert AccessControlList into hdfsAcl
    List<AclEntry> aclSpecs = new ArrayList<>();

    for (alluxio.security.authorization.AclEntry entry : aclEntries) {
        AclEntry hdfsAclEntry = getHdfsAclEntry(entry);
        aclSpecs.add(hdfsAclEntry);/*from   w  ww  .  j a va2s. co  m*/
    }
    // set hdfsAcl;
    try {
        hdfs.setAcl(new Path(path), aclSpecs);
    } catch (UnsupportedOperationException e) {
        // noop if hdfs does not support acl
    }
}

From source file:alluxio.underfs.hdfs.HdfsUnderFileSystem.java

License:Apache License

/**
 * Constructs a new HDFS {@link UnderFileSystem}.
 *
 * @param uri the {@link AlluxioURI} for this UFS
 * @param conf the configuration for Hadoop
 *//*ww w. ja v  a 2 s .  co m*/
public HdfsUnderFileSystem(AlluxioURI uri, Object conf) {
    super(uri);
    final String ufsPrefix = uri.toString();
    final org.apache.hadoop.conf.Configuration hadoopConf;
    if (conf != null && conf instanceof org.apache.hadoop.conf.Configuration) {
        hadoopConf = (org.apache.hadoop.conf.Configuration) conf;
    } else {
        hadoopConf = new org.apache.hadoop.conf.Configuration();
    }
    prepareConfiguration(ufsPrefix, hadoopConf);
    hadoopConf.addResource(new Path(hadoopConf.get(PropertyKey.UNDERFS_HDFS_CONFIGURATION.toString())));
    HdfsUnderFileSystemUtils.addS3Credentials(hadoopConf);

    Path path = new Path(ufsPrefix);
    try {
        mFileSystem = path.getFileSystem(hadoopConf);
    } catch (IOException e) {
        LOG.error("Exception thrown when trying to get FileSystem for {}", ufsPrefix, e);
        throw Throwables.propagate(e);
    }
}

From source file:alluxio.underfs.hdfs.HdfsUnderFileSystem.java

License:Apache License

@Override
public FSDataOutputStream create(String path, CreateOptions options) throws IOException {
    IOException te = null;//  ww w. j a v a  2  s. c  om
    RetryPolicy retryPolicy = new CountingRetry(MAX_TRY);
    Permission perm = options.getPermission();
    while (retryPolicy.attemptRetry()) {
        try {
            LOG.debug("Creating HDFS file at {} with perm {}", path, perm.toString());
            // TODO(chaomin): support creating HDFS files with specified block size and replication.
            return FileSystem.create(mFileSystem, new Path(path), new FsPermission(perm.getMode().toShort()));
        } catch (IOException e) {
            LOG.error("Retry count {} : {} ", retryPolicy.getRetryCount(), e.getMessage(), e);
            te = e;
        }
    }
    throw te;
}

From source file:alluxio.underfs.hdfs.HdfsUnderFileSystem.java

License:Apache License

@Override
public boolean delete(String path, boolean recursive) throws IOException {
    LOG.debug("deleting {} {}", path, recursive);
    IOException te = null;/*from ww  w  .j a  v  a2 s. c  o  m*/
    RetryPolicy retryPolicy = new CountingRetry(MAX_TRY);
    while (retryPolicy.attemptRetry()) {
        try {
            return mFileSystem.delete(new Path(path), recursive);
        } catch (IOException e) {
            LOG.error("Retry count {} : {}", retryPolicy.getRetryCount(), e.getMessage(), e);
            te = e;
        }
    }
    throw te;
}