Example usage for org.apache.hadoop.fs FileStatus FileStatus

List of usage examples for org.apache.hadoop.fs FileStatus FileStatus

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus FileStatus.

Prototype

public FileStatus(long length, boolean isdir, int block_replication, long blocksize, long modification_time,
            Path path) 

Source Link

Usage

From source file:cascading.tap.hadoop.HttpFileSystem.java

License:Open Source License

@Override
public FileStatus getFileStatus(Path path) throws IOException {
    URL url = makeUrl(path);/*from  w  w w  . j  a  va2s .  c o  m*/

    HttpURLConnection connection = (HttpURLConnection) url.openConnection();
    connection.setRequestMethod("HEAD");
    connection.connect();

    debugConnection(connection);

    if (connection.getResponseCode() != 200)
        throw new FileNotFoundException("could not find file: " + path);

    long length = connection.getHeaderFieldInt("Content-Length", 0);

    length = length < 0 ? 0 : length; // queries may return -1

    long modified = connection.getHeaderFieldDate("Last-Modified", System.currentTimeMillis());

    return new FileStatus(length, false, 1, getDefaultBlockSize(), modified, path);
}

From source file:com.aliyun.fs.oss.nat.NativeOssFileSystem.java

License:Apache License

private FileStatus newFile(FileMetadata meta, Path path) {
    return new FileStatus(meta.getLength(), false, 1, MAX_OSS_FILE_SIZE, meta.getLastModified(),
            path.makeQualified(this));
}

From source file:com.aliyun.fs.oss.nat.NativeOssFileSystem.java

License:Apache License

private FileStatus newDirectory(Path path) {
    return new FileStatus(0, true, 1, MAX_OSS_FILE_SIZE, 0, path.makeQualified(this));
}

From source file:com.asakusafw.runtime.compatibility.hadoop1.SequenceFileCompatibilityHadoop1.java

License:Apache License

@Override
public SequenceFile.Reader openReader(InputStream in, long length, Configuration conf) throws IOException {
    if (in == null) {
        throw new IllegalArgumentException("in must not be null"); //$NON-NLS-1$
    }/* ww w  .j  a v  a  2  s  . c  o  m*/
    if (conf == null) {
        throw new IllegalArgumentException("conf must not be null"); //$NON-NLS-1$
    }
    FileStatus status = new FileStatus(length, false, 0, length, 0, new Path("dummy:///")); //$NON-NLS-1$
    Path path = status.getPath();
    return new SequenceFile.Reader(new InputStreamFileSystem(status, in), path, conf);
}

From source file:com.cloudera.cdk.morphline.hadoop.rcfile.SingleStreamFileSystem.java

License:Apache License

public SingleStreamFileSystem(InputStream inputStream, Path path) throws IOException {
    this.inputStream = new FSDataInputStream(inputStream);
    this.path = path;
    // Since this is a stream, we dont know the length of the stream. Setting it
    // to the maximum size
    this.fileStatus = new FileStatus(Long.MAX_VALUE, false, 0, 0, 0, path);
}

From source file:com.conductor.s3.S3InputFormatUtils.java

License:Apache License

/**
 * Efficiently gets the Hadoop {@link org.apache.hadoop.fs.FileStatus} for all S3 files under the provided
 * {@code dirs}//from w ww .  j ava2s  .  co  m
 * 
 * @param s3Client
 *            s3 client
 * @param blockSize
 *            the block size
 * @param dirs
 *            the dirs to search through
 * @return the {@link org.apache.hadoop.fs.FileStatus} version of all S3 files under {@code dirs}
 */
static List<FileStatus> getFileStatuses(final AmazonS3 s3Client, final long blockSize, final Path... dirs) {
    final List<FileStatus> result = Lists.newArrayList();
    for (final Path dir : dirs) {
        // get bucket and prefix from path
        final String bucket = S3HadoopUtils.getBucketFromPath(dir.toString());
        final String prefix = S3HadoopUtils.getKeyFromPath(dir.toString());
        // list request
        final ListObjectsRequest req = new ListObjectsRequest().withMaxKeys(Integer.MAX_VALUE)
                .withBucketName(bucket).withPrefix(prefix);
        // recursively page through all objects under the path
        for (ObjectListing listing = s3Client.listObjects(req); listing.getObjectSummaries()
                .size() > 0; listing = s3Client.listNextBatchOfObjects(listing)) {
            for (final S3ObjectSummary summary : listing.getObjectSummaries()) {
                final Path path = new Path(
                        String.format("s3n://%s/%s", summary.getBucketName(), summary.getKey()));
                if (S3_PATH_FILTER.accept(path)) {
                    result.add(new FileStatus(summary.getSize(), false, 1, blockSize,
                            summary.getLastModified().getTime(), path));
                }
            }
            // don't need to check the next listing if this one is not truncated
            if (!listing.isTruncated()) {
                break;
            }
        }
    }
    return result;
}

From source file:com.facebook.presto.hive.PrestoS3FileSystem.java

License:Apache License

@Override
public FileStatus getFileStatus(Path path) throws IOException {
    if (path.getName().isEmpty()) {
        // the bucket root requires special handling
        if (getS3ObjectMetadata(path) != null) {
            return new FileStatus(0, true, 1, 0, 0, qualifiedPath(path));
        }//from   www  .  java2s .c  o  m
        throw new FileNotFoundException("File does not exist: " + path);
    }

    ObjectMetadata metadata = getS3ObjectMetadata(path);

    if (metadata == null) {
        // check if this path is a directory
        Iterator<LocatedFileStatus> iterator = listPrefix(path);
        if ((iterator != null) && iterator.hasNext()) {
            return new FileStatus(0, true, 1, 0, 0, qualifiedPath(path));
        }
        throw new FileNotFoundException("File does not exist: " + path);
    }

    return new FileStatus(metadata.getContentLength(), false, 1, BLOCK_SIZE.toBytes(),
            lastModifiedTime(metadata), qualifiedPath(path));
}

From source file:com.facebook.presto.hive.PrestoS3FileSystem.java

License:Apache License

private Iterator<LocatedFileStatus> statusFromPrefixes(List<String> prefixes) {
    List<LocatedFileStatus> list = new ArrayList<>();
    for (String prefix : prefixes) {
        Path path = qualifiedPath(new Path("/" + prefix));
        FileStatus status = new FileStatus(0, true, 1, 0, 0, path);
        list.add(createLocatedFileStatus(status));
    }//from ww w .j  a  v a2s.c  o m
    return list.iterator();
}

From source file:com.facebook.presto.hive.PrestoS3FileSystem.java

License:Apache License

private Iterator<LocatedFileStatus> statusFromObjects(List<S3ObjectSummary> objects) {
    List<LocatedFileStatus> list = new ArrayList<>();
    for (S3ObjectSummary object : objects) {
        if (!object.getKey().endsWith("/")) {
            FileStatus status = new FileStatus(object.getSize(), false, 1, BLOCK_SIZE.toBytes(),
                    object.getLastModified().getTime(), qualifiedPath(new Path("/" + object.getKey())));
            list.add(createLocatedFileStatus(status));
        }/*from  w w w.  j a  v a2s .  c  o  m*/
    }
    return list.iterator();
}

From source file:com.facebook.presto.hive.s3.PrestoS3FileSystem.java

License:Apache License

@Override
public FileStatus getFileStatus(Path path) throws IOException {
    if (path.getName().isEmpty()) {
        // the bucket root requires special handling
        if (getS3ObjectMetadata(path) != null) {
            return new FileStatus(0, true, 1, 0, 0, qualifiedPath(path));
        }/*from  w w  w .  ja v  a 2s.  com*/
        throw new FileNotFoundException("File does not exist: " + path);
    }

    ObjectMetadata metadata = getS3ObjectMetadata(path);

    if (metadata == null) {
        // check if this path is a directory
        Iterator<LocatedFileStatus> iterator = listPrefix(path);
        if (iterator.hasNext()) {
            return new FileStatus(0, true, 1, 0, 0, qualifiedPath(path));
        }
        throw new FileNotFoundException("File does not exist: " + path);
    }

    return new FileStatus(getObjectSize(path, metadata), false, 1, BLOCK_SIZE.toBytes(),
            lastModifiedTime(metadata), qualifiedPath(path));
}