Example usage for org.apache.hadoop.security UserGroupInformation doAs

List of usage examples for org.apache.hadoop.security UserGroupInformation doAs

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation doAs.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public <T> T doAs(PrivilegedExceptionAction<T> action) throws IOException, InterruptedException 

Source Link

Document

Run the given action as the user, potentially throwing an exception.

Usage

From source file:se.sics.nstream.hops.hdfs.HDFSHelper.java

License:Open Source License

public static Result<Long> length(UserGroupInformation ugi, final HDFSEndpoint hdfsEndpoint,
        HDFSResource resource) {//from  w  w w  .j  ava 2 s.co  m
    final String filePath = resource.dirPath + Path.SEPARATOR + resource.fileName;
    LOG.debug("{}getting length of file:{}", new Object[] { logPrefix, filePath });

    try {
        Result<Long> result = ugi.doAs(new PrivilegedExceptionAction<Result<Long>>() {
            @Override
            public Result<Long> run() {
                try (FileSystem fs = FileSystem.get(hdfsEndpoint.hdfsConfig)) {
                    long length = -1;
                    if (fs.isFile(new Path(filePath))) {
                        length = fs.getLength(new Path(filePath));
                    }
                    return Result.success(length);
                } catch (IOException ex) {
                    LOG.warn("{}could not get size of file:{}", logPrefix, ex.getMessage());
                    return Result.externalSafeFailure(new HDFSException("hdfs file length", ex));
                }
            }
        });
        LOG.trace("{}op completed", new Object[] { logPrefix });
        return result;
    } catch (IOException | InterruptedException ex) {
        LOG.error("{}unexpected exception:{}", logPrefix, ex);
        return Result.externalSafeFailure(new HDFSException("hdfs file length", ex));
    }
}

From source file:se.sics.nstream.hops.hdfs.HDFSHelper.java

License:Open Source License

public static Result<Boolean> delete(UserGroupInformation ugi, final HDFSEndpoint hdfsEndpoint,
        HDFSResource resource) {//from www .j a v a 2 s  .  c om
    final String filePath = resource.dirPath + Path.SEPARATOR + resource.fileName;
    LOG.info("{}deleting file:{}", new Object[] { logPrefix, filePath });
    try {
        Result<Boolean> result = ugi.doAs(new PrivilegedExceptionAction<Result<Boolean>>() {
            @Override
            public Result<Boolean> run() {
                try (FileSystem fs = FileSystem.get(hdfsEndpoint.hdfsConfig)) {
                    fs.delete(new Path(filePath), false);
                    return Result.success(true);
                } catch (IOException ex) {
                    LOG.warn("{}could not delete file:{}", logPrefix, ex.getMessage());
                    return Result.externalUnsafeFailure(new HDFSException("hdfs file delete", ex));
                }
            }
        });
        LOG.trace("{}op completed", new Object[] { logPrefix });
        return result;
    } catch (IOException | InterruptedException ex) {
        LOG.error("{}unexpected exception:{}", logPrefix, ex);
        return Result.externalUnsafeFailure(new HDFSException("hdfs file delete", ex));
    }
}

From source file:se.sics.nstream.hops.hdfs.HDFSHelper.java

License:Open Source License

public static Result<Boolean> simpleCreate(UserGroupInformation ugi, final HDFSEndpoint hdfsEndpoint,
        final HDFSResource hdfsResource) {
    final String filePath = hdfsResource.dirPath + Path.SEPARATOR + hdfsResource.fileName;
    LOG.info("{}creating file:{}", new Object[] { logPrefix, filePath });
    try {/*from   ww w  .  ja v  a  2 s . c  o m*/
        Result<Boolean> result = ugi.doAs(new PrivilegedExceptionAction<Result<Boolean>>() {
            @Override
            public Result<Boolean> run() {
                try (FileSystem fs = FileSystem.get(hdfsEndpoint.hdfsConfig)) {
                    if (!fs.isDirectory(new Path(hdfsResource.dirPath))) {
                        fs.mkdirs(new Path(hdfsResource.dirPath));
                    }
                    if (fs.isFile(new Path(filePath))) {
                        return Result.success(false);
                    }
                    try (FSDataOutputStream out = fs.create(new Path(filePath), (short) 1)) {
                        return Result.success(true);
                    }
                } catch (IOException ex) {
                    LOG.warn("{}could not write file:{}", logPrefix, ex.getMessage());
                    return Result.externalUnsafeFailure(new HDFSException("hdfs file simpleCreate", ex));
                }
            }
        });
        LOG.trace("{}op completed", new Object[] { logPrefix });
        return result;
    } catch (IOException | InterruptedException ex) {
        LOG.error("{}unexpected exception:{}", logPrefix, ex);
        return Result.externalUnsafeFailure(new HDFSException("hdfs file simpleCreate", ex));
    }
}

From source file:se.sics.nstream.hops.hdfs.HDFSHelper.java

License:Open Source License

public static Result<Boolean> createWithLength(UserGroupInformation ugi, final HDFSEndpoint hdfsEndpoint,
        final HDFSResource hdfsResource, final long fileSize) {
    final String filePath = hdfsResource.dirPath + Path.SEPARATOR + hdfsResource.fileName;
    LOG.debug("{}creating file:{}", new Object[] { logPrefix, filePath });
    try {/*from   w  ww.ja va2s.  c om*/
        Result<Boolean> result = ugi.doAs(new PrivilegedExceptionAction<Result<Boolean>>() {
            @Override
            public Result<Boolean> run() {
                try (FileSystem fs = FileSystem.get(hdfsEndpoint.hdfsConfig)) {
                    if (!fs.isDirectory(new Path(hdfsResource.dirPath))) {
                        fs.mkdirs(new Path(hdfsResource.dirPath));
                    }
                    if (fs.isFile(new Path(filePath))) {
                        return Result.success(false);
                    }
                    Random rand = new Random(1234);
                    try (FSDataOutputStream out = fs.create(new Path(filePath))) {
                        for (int i = 0; i < fileSize / 1024; i++) {
                            byte[] data = new byte[1024];
                            rand.nextBytes(data);
                            out.write(data);
                            out.flush();
                        }
                        if (fileSize % 1024 != 0) {
                            byte[] data = new byte[(int) (fileSize % 1024)];
                            rand.nextBytes(data);
                            out.write(data);
                            out.flush();
                        }
                        return Result.success(true);
                    }
                } catch (IOException ex) {
                    LOG.warn("{}could not create file:{}", logPrefix, ex.getMessage());
                    return Result.externalUnsafeFailure(new HDFSException("hdfs file createWithLength", ex));
                }
            }
        });
        LOG.trace("{}op completed", new Object[] { logPrefix });
        return result;
    } catch (IOException | InterruptedException ex) {
        LOG.error("{}unexpected exception:{}", logPrefix, ex);
        return Result.externalUnsafeFailure(new HDFSException("hdfs file createWithLength", ex));
    }
}

From source file:se.sics.nstream.hops.hdfs.HDFSHelper.java

License:Open Source License

public static Result<byte[]> read(UserGroupInformation ugi, final HDFSEndpoint hdfsEndpoint,
        HDFSResource resource, final KRange readRange) {
    final String filePath = resource.dirPath + Path.SEPARATOR + resource.fileName;
    LOG.debug("{}reading from file:{}", new Object[] { logPrefix, filePath });
    try {//  w w  w.j av  a  2  s.com
        Result<byte[]> result = ugi.doAs(new PrivilegedExceptionAction<Result<byte[]>>() {
            @Override
            public Result<byte[]> run() {
                try (DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(hdfsEndpoint.hdfsConfig);
                        FSDataInputStream in = fs.open(new Path(filePath))) {
                    int readLength = (int) (readRange.upperAbsEndpoint() - readRange.lowerAbsEndpoint() + 1);
                    byte[] byte_read = new byte[readLength];
                    in.readFully(readRange.lowerAbsEndpoint(), byte_read);
                    return Result.success(byte_read);
                } catch (IOException ex) {
                    LOG.warn("{}could not read file:{} ex:{}",
                            new Object[] { logPrefix, filePath, ex.getMessage() });
                    return Result.externalSafeFailure(new HDFSException("hdfs file read", ex));
                }
            }
        });
        LOG.trace("{}op completed", new Object[] { logPrefix });
        return result;
    } catch (IOException | InterruptedException ex) {
        LOG.error("{}unexpected exception:{}", logPrefix, ex);
        return Result.externalSafeFailure(new HDFSException("hdfs file read", ex));
    }
}

From source file:se.sics.nstream.hops.hdfs.HDFSHelper.java

License:Open Source License

public static Result<Boolean> append(UserGroupInformation ugi, final HDFSEndpoint hdfsEndpoint,
        HDFSResource resource, final byte[] data) {
    final String filePath = resource.dirPath + Path.SEPARATOR + resource.fileName;
    LOG.debug("{}appending to file:{}", new Object[] { logPrefix, filePath });
    try {/*from   ww w.j ava  2s  .  c  om*/
        Result<Boolean> result = ugi.doAs(new PrivilegedExceptionAction<Result<Boolean>>() {
            @Override
            public Result<Boolean> run() {
                try (DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(hdfsEndpoint.hdfsConfig);
                        FSDataOutputStream out = fs.append(new Path(filePath))) {
                    out.write(data);
                    out.flush();
                    return Result.success(true);
                } catch (IOException ex) {
                    LOG.warn("{}could not append to file:{} ex:{}",
                            new Object[] { logPrefix, filePath, ex.getMessage() });
                    return Result.externalUnsafeFailure(new HDFSException("hdfs file append", ex));
                }
            }
        });
        LOG.trace("{}op completed", new Object[] { logPrefix });
        return result;
    } catch (IOException | InterruptedException ex) {
        LOG.error("{}unexpected exception:{}", logPrefix, ex);
        return Result.externalUnsafeFailure(new HDFSException("hdfs file append", ex));
    }
}

From source file:se.sics.nstream.hops.hdfs.HDFSHelper.java

License:Open Source License

public static Result<ManifestJSON> readManifest(UserGroupInformation ugi, final HDFSEndpoint hdfsEndpoint,
        HDFSResource hdfsResource) {// w w w . jav  a2s .  c  o  m
    final String filePath = hdfsResource.dirPath + Path.SEPARATOR + hdfsResource.fileName;
    LOG.debug("{}reading manifest:{}", new Object[] { logPrefix, filePath });
    try {
        Result<ManifestJSON> result = ugi.doAs(new PrivilegedExceptionAction<Result<ManifestJSON>>() {
            @Override
            public Result<ManifestJSON> run() {
                try (DistributedFileSystem fs = (DistributedFileSystem) FileSystem
                        .get(hdfsEndpoint.hdfsConfig)) {
                    if (!fs.isFile(new Path(filePath))) {
                        LOG.warn("{}file does not exist", new Object[] { logPrefix, filePath });
                        return Result.externalSafeFailure(new HDFSException("hdfs file read"));
                    }
                    try (FSDataInputStream in = fs.open(new Path(filePath))) {
                        long manifestLength = fs.getLength(new Path(filePath));
                        byte[] manifestByte = new byte[(int) manifestLength];
                        in.readFully(manifestByte);
                        ManifestJSON manifest = ManifestHelper.getManifestJSON(manifestByte);
                        return Result.success(manifest);
                    }
                } catch (IOException ex) {
                    LOG.warn("{}could not read file:{} ex:{}",
                            new Object[] { logPrefix, filePath, ex.getMessage() });
                    return Result.externalSafeFailure(new HDFSException("hdfs file read", ex));
                }
            }
        });
        LOG.trace("{}op completed", new Object[] { logPrefix });
        return result;
    } catch (IOException | InterruptedException ex) {
        LOG.error("{}unexpected exception:{}", logPrefix, ex);
        return Result.externalSafeFailure(new HDFSException("hdfs file read", ex));
    }
}

From source file:se.sics.nstream.hops.hdfs.HDFSHelper.java

License:Open Source License

public static Result<Boolean> writeManifest(UserGroupInformation ugi, final HDFSEndpoint hdfsEndpoint,
        final HDFSResource hdfsResource, final ManifestJSON manifest) {
    final String filePath = hdfsResource.dirPath + Path.SEPARATOR + hdfsResource.fileName;
    LOG.debug("{}writing manifest:{}", new Object[] { logPrefix, filePath });
    try {//from   w w  w. j a v a2  s. com
        Result<Boolean> result = ugi.doAs(new PrivilegedExceptionAction<Result<Boolean>>() {
            @Override
            public Result<Boolean> run() {
                try (FileSystem fs = FileSystem.get(hdfsEndpoint.hdfsConfig)) {
                    if (!fs.isDirectory(new Path(hdfsResource.dirPath))) {
                        fs.mkdirs(new Path(hdfsResource.dirPath));
                    }
                    if (fs.isFile(new Path(filePath))) {
                        return Result.success(false);
                    }
                    try (FSDataOutputStream out = fs.create(new Path(filePath))) {
                        byte[] manifestByte = ManifestHelper.getManifestByte(manifest);
                        out.write(manifestByte);
                        out.flush();
                        return Result.success(true);
                    }
                } catch (IOException ex) {
                    LOG.warn("{}could not create file:{}", logPrefix, ex.getMessage());
                    return Result.externalUnsafeFailure(new HDFSException("hdfs file createWithLength", ex));
                }
            }
        });
        LOG.trace("{}op completed", new Object[] { logPrefix });
        return result;
    } catch (IOException | InterruptedException ex) {
        LOG.error("{}unexpected exception:{}", logPrefix, ex);
        return Result.externalUnsafeFailure(new HDFSException("hdfs file createWithLength", ex));
    }
}

From source file:stroom.pipeline.server.writer.HDFSFileAppender.java

License:Apache License

public static void runOnHDFS(final UserGroupInformation userGroupInformation, final Configuration conf,
        final Consumer<FileSystem> func) {
    try {/* w  ww . j  a v  a2s.c om*/
        userGroupInformation.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                final FileSystem hdfs = getHDFS(conf);

                // run the passed lambda
                func.accept(hdfs);

                return null;
            }
        });
    } catch (final InterruptedException e) {
        Thread.currentThread().interrupt();
    } catch (final IOException ioe) {
        throw new RuntimeException(ioe);
    }
}

From source file:stroom.pipeline.server.writer.HDFSFileAppender.java

License:Apache License

private HDFSLockedOutputStream getHDFSLockedOutputStream(final Path filePath) throws IOException {
    final UserGroupInformation ugi = getUserGroupInformation();
    HDFSLockedOutputStream hdfsLockedOutputStream = null;

    try {//www . ja  va 2  s  . c o  m
        hdfsLockedOutputStream = ugi.doAs(new PrivilegedExceptionAction<HDFSLockedOutputStream>() {
            @Override
            public HDFSLockedOutputStream run() throws Exception {
                final FileSystem hdfs = getHDFS();
                final Path dir = filePath.getParent();

                // Create the directory if it doesn't exist
                if (!hdfs.exists(dir)) {
                    hdfs.mkdirs(dir);
                }

                final Path lockFile = createCleanPath(filePath + LOCK_EXTENSION);
                final Path outFile = filePath;

                // Make sure we can create both output files without
                // overwriting
                // another
                // file.
                if (hdfs.exists(lockFile)) {
                    throw new ProcessException("Output file \"" + lockFile.toString() + "\" already exists");
                }

                if (hdfs.exists(outFile)) {
                    throw new ProcessException("Output file \"" + outFile.toString() + "\" already exists");
                }

                // Get a writer for the new lock file.
                final OutputStream outputStream = new BufferedOutputStream(hdfs.create(lockFile));

                return new HDFSLockedOutputStream(outputStream, lockFile, outFile, hdfs);
            }
        });
    } catch (final InterruptedException e) {
        Thread.currentThread().interrupt();
    }

    if (hdfsLockedOutputStream == null) {
        throw new RuntimeException(String.format(
                "Something went wrong creating the HDFSLockedOutputStream, lockFile %s, outFile %s, hdfs uri %s",
                filePath, hdfsUri));
    }

    return hdfsLockedOutputStream;
}