Example usage for org.apache.hadoop.security UserGroupInformation doAs

List of usage examples for org.apache.hadoop.security UserGroupInformation doAs

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation doAs.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public <T> T doAs(PrivilegedExceptionAction<T> action) throws IOException, InterruptedException 

Source Link

Document

Run the given action as the user, potentially throwing an exception.

Usage

From source file:org.apache.ignite.internal.processors.hadoop.impl.v2.HadoopV2TaskContext.java

License:Apache License

/** {@inheritDoc} */
@Override/*  w  w  w.  ja  v a2  s.com*/
public <T> T runAsJobOwner(final Callable<T> c) throws IgniteCheckedException {
    String user = job.info().user();

    user = IgfsUtils.fixUserName(user);

    assert user != null;

    String ugiUser;

    try {
        UserGroupInformation currUser = UserGroupInformation.getCurrentUser();

        assert currUser != null;

        ugiUser = currUser.getShortUserName();
    } catch (IOException ioe) {
        throw new IgniteCheckedException(ioe);
    }

    try {
        if (F.eq(user, ugiUser))
            // if current UGI context user is the same, do direct call:
            return c.call();
        else {
            UserGroupInformation ugi = UserGroupInformation.getBestUGI(null, user);

            return ugi.doAs(new PrivilegedExceptionAction<T>() {
                @Override
                public T run() throws Exception {
                    return c.call();
                }
            });
        }
    } catch (Exception e) {
        throw new IgniteCheckedException(e);
    }
}

From source file:org.apache.ivory.cluster.util.EmbeddedCluster.java

License:Apache License

public static EmbeddedCluster newCluster(final String name, final boolean withMR, final String user)
        throws Exception {

    UserGroupInformation hdfsUser = UserGroupInformation.createRemoteUser(user);
    return hdfsUser.doAs(new PrivilegedExceptionAction<EmbeddedCluster>() {
        @Override// w w w.j  a va 2 s  .  co m
        public EmbeddedCluster run() throws Exception {
            return createClusterAsUser(name, withMR);
        }
    });
}

From source file:org.apache.lens.server.auth.DelegationTokenAuthenticationFilter.java

License:Apache License

@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
    Principal userPrincipal = requestContext.getSecurityContext().getUserPrincipal();
    if (userPrincipal != null) {
        log.info("Authentication already done for principal {}, skipping this filter...",
                userPrincipal.getName());
        return;//from w  w w  .j  a  v a2s. c o  m
    }
    // only authenticate when @Authenticate is present on resource
    if (resourceInfo.getResourceClass() == null || resourceInfo.getResourceMethod() == null) {
        return;
    }
    if (!(resourceInfo.getResourceClass().isAnnotationPresent(Authenticate.class)
            || resourceInfo.getResourceMethod().isAnnotationPresent(Authenticate.class))) {
        return;
    }

    String delegationToken = requestContext.getHeaderString(HDFS_DELEGATION_TKN_HEADER);
    if (StringUtils.isBlank(delegationToken)) {
        return;
    }

    Token<AbstractDelegationTokenIdentifier> dt = new Token();
    dt.decodeFromUrlString(delegationToken);
    UserGroupInformation user = dt.decodeIdentifier().getUser();
    user.addToken(dt);

    log.info("Received delegation token for user: {}", user.getUserName());

    try {
        user.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws IOException {
                try (FileSystem fs = FileSystem.get(new Configuration())) {
                    fs.exists(PATH_TO_CHECK); // dummy hdfs call
                    requestContext.setSecurityContext(createSecurityContext(user.getUserName(), AUTH_SCHEME));
                    return null;
                }
            }
        });
    } catch (InterruptedException | IOException e) {
        log.error("Error while doing HDFS op: ", e);
        throw new NotAuthorizedException(Response.status(401).entity("Invalid HDFS delegation token").build());
    }
}

From source file:org.apache.metron.maas.service.yarn.YarnUtils.java

License:Apache License

public void publishContainerStartEvent(final TimelineClient timelineClient, Container container,
        String domainId, UserGroupInformation ugi) {
    final TimelineEntity entity = new TimelineEntity();
    entity.setEntityId("" + container.getId());
    entity.setEntityType(ApplicationMaster.DSEntity.DS_CONTAINER.toString());
    entity.setDomainId(domainId);/*from w w w.  j a v  a  2s .c o  m*/
    entity.addPrimaryFilter("user", ugi.getShortUserName());
    TimelineEvent event = new TimelineEvent();
    event.setTimestamp(System.currentTimeMillis());
    event.setEventType(ContainerEvents.CONTAINER_START.toString());
    event.addEventInfo("Node", container.getNodeId().toString());
    event.addEventInfo("Resources", container.getResource().toString());
    entity.addEvent(event);

    try {
        ugi.doAs(new PrivilegedExceptionAction<TimelinePutResponse>() {
            @Override
            public TimelinePutResponse run() throws Exception {
                return timelineClient.putEntities(entity);
            }
        });
    } catch (Exception e) {
        LOG.error("Container start event could not be published for " + container.getId().toString(),
                e instanceof UndeclaredThrowableException ? e.getCause() : e);
    }
}

From source file:org.apache.nifi.processors.hadoop.AbstractFetchHDFSRecord.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    // do this before getting a flow file so that we always get a chance to attempt Kerberos relogin
    final FileSystem fileSystem = getFileSystem();
    final Configuration configuration = getConfiguration();
    final UserGroupInformation ugi = getUserGroupInformation();

    if (configuration == null || fileSystem == null || ugi == null) {
        getLogger().error(//from   www. j a v a 2 s.c  om
                "Processor not configured properly because Configuration, FileSystem, or UserGroupInformation was null");
        context.yield();
        return;
    }

    final FlowFile originalFlowFile = session.get();
    if (originalFlowFile == null) {
        context.yield();
        return;
    }

    ugi.doAs((PrivilegedAction<Object>) () -> {
        FlowFile child = null;
        final String filenameValue = context.getProperty(FILENAME)
                .evaluateAttributeExpressions(originalFlowFile).getValue();
        try {
            final Path path = new Path(filenameValue);
            final AtomicReference<Throwable> exceptionHolder = new AtomicReference<>(null);
            final AtomicReference<WriteResult> writeResult = new AtomicReference<>();

            final RecordSetWriterFactory recordSetWriterFactory = context.getProperty(RECORD_WRITER)
                    .asControllerService(RecordSetWriterFactory.class);

            final StopWatch stopWatch = new StopWatch(true);

            // use a child FlowFile so that if any error occurs we can route the original untouched FlowFile to retry/failure
            child = session.create(originalFlowFile);

            final AtomicReference<String> mimeTypeRef = new AtomicReference<>();
            child = session.write(child, (final OutputStream rawOut) -> {
                try (final BufferedOutputStream out = new BufferedOutputStream(rawOut);
                        final HDFSRecordReader recordReader = createHDFSRecordReader(context, originalFlowFile,
                                configuration, path)) {

                    Record record = recordReader.nextRecord();
                    final RecordSchema schema = recordSetWriterFactory.getSchema(
                            originalFlowFile.getAttributes(), record == null ? null : record.getSchema());

                    try (final RecordSetWriter recordSetWriter = recordSetWriterFactory
                            .createWriter(getLogger(), schema, out)) {
                        recordSetWriter.beginRecordSet();
                        if (record != null) {
                            recordSetWriter.write(record);
                        }

                        while ((record = recordReader.nextRecord()) != null) {
                            recordSetWriter.write(record);
                        }

                        writeResult.set(recordSetWriter.finishRecordSet());
                        mimeTypeRef.set(recordSetWriter.getMimeType());
                    }
                } catch (Exception e) {
                    exceptionHolder.set(e);
                }
            });

            stopWatch.stop();

            // if any errors happened within the session.write then throw the exception so we jump
            // into one of the appropriate catch blocks below
            if (exceptionHolder.get() != null) {
                throw exceptionHolder.get();
            }

            FlowFile successFlowFile = postProcess(context, session, child, path);

            final Map<String, String> attributes = new HashMap<>(writeResult.get().getAttributes());
            attributes.put(RECORD_COUNT_ATTR, String.valueOf(writeResult.get().getRecordCount()));
            attributes.put(CoreAttributes.MIME_TYPE.key(), mimeTypeRef.get());
            successFlowFile = session.putAllAttributes(successFlowFile, attributes);

            final Path qualifiedPath = path.makeQualified(fileSystem.getUri(),
                    fileSystem.getWorkingDirectory());
            getLogger().info("Successfully received content from {} for {} in {} milliseconds",
                    new Object[] { qualifiedPath, successFlowFile, stopWatch.getDuration() });
            session.getProvenanceReporter().fetch(successFlowFile, qualifiedPath.toString(),
                    stopWatch.getDuration(TimeUnit.MILLISECONDS));
            session.transfer(successFlowFile, REL_SUCCESS);
            session.remove(originalFlowFile);
            return null;

        } catch (final FileNotFoundException | AccessControlException e) {
            getLogger().error("Failed to retrieve content from {} for {} due to {}; routing to failure",
                    new Object[] { filenameValue, originalFlowFile, e });
            final FlowFile failureFlowFile = session.putAttribute(originalFlowFile, FETCH_FAILURE_REASON_ATTR,
                    e.getMessage() == null ? e.toString() : e.getMessage());
            session.transfer(failureFlowFile, REL_FAILURE);
        } catch (final IOException | FlowFileAccessException e) {
            getLogger().error("Failed to retrieve content from {} for {} due to {}; routing to retry",
                    new Object[] { filenameValue, originalFlowFile, e });
            session.transfer(session.penalize(originalFlowFile), REL_RETRY);
            context.yield();
        } catch (final Throwable t) {
            getLogger().error("Failed to retrieve content from {} for {} due to {}; routing to failure",
                    new Object[] { filenameValue, originalFlowFile, t });
            final FlowFile failureFlowFile = session.putAttribute(originalFlowFile, FETCH_FAILURE_REASON_ATTR,
                    t.getMessage() == null ? t.toString() : t.getMessage());
            session.transfer(failureFlowFile, REL_FAILURE);
        }

        // if we got this far then we weren't successful so we need to clean up the child flow file if it got initialized
        if (child != null) {
            session.remove(child);
        }

        return null;
    });

}

From source file:org.apache.nifi.processors.hadoop.AbstractPutHDFSRecord.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    // do this before getting a flow file so that we always get a chance to attempt Kerberos relogin
    final FileSystem fileSystem = getFileSystem();
    final Configuration configuration = getConfiguration();
    final UserGroupInformation ugi = getUserGroupInformation();

    if (configuration == null || fileSystem == null || ugi == null) {
        getLogger().error(/*from  ww w  .  j ava2  s. co  m*/
                "Processor not configured properly because Configuration, FileSystem, or UserGroupInformation was null");
        context.yield();
        return;
    }

    final FlowFile flowFile = session.get();
    if (flowFile == null) {
        context.yield();
        return;
    }

    ugi.doAs((PrivilegedAction<Object>) () -> {
        Path tempDotCopyFile = null;
        FlowFile putFlowFile = flowFile;
        try {
            final String filenameValue = putFlowFile.getAttribute(CoreAttributes.FILENAME.key()); // TODO codec extension
            final String directoryValue = context.getProperty(DIRECTORY)
                    .evaluateAttributeExpressions(putFlowFile).getValue();

            // create the directory if it doesn't exist
            final Path directoryPath = new Path(directoryValue);
            createDirectory(fileSystem, directoryPath, remoteOwner, remoteGroup);

            // write to tempFile first and on success rename to destFile
            final Path tempFile = new Path(directoryPath, "." + filenameValue);
            final Path destFile = new Path(directoryPath, filenameValue);

            final boolean destinationExists = fileSystem.exists(destFile) || fileSystem.exists(tempFile);
            final boolean shouldOverwrite = context.getProperty(OVERWRITE).asBoolean();

            // if the tempFile or destFile already exist, and overwrite is set to false, then transfer to failure
            if (destinationExists && !shouldOverwrite) {
                session.transfer(session.penalize(putFlowFile), REL_FAILURE);
                getLogger().warn(
                        "penalizing {} and routing to failure because file with same name already exists",
                        new Object[] { putFlowFile });
                return null;
            }

            final AtomicReference<Throwable> exceptionHolder = new AtomicReference<>(null);
            final AtomicReference<WriteResult> writeResult = new AtomicReference<>();
            final RecordReaderFactory recordReaderFactory = context.getProperty(RECORD_READER)
                    .asControllerService(RecordReaderFactory.class);

            final FlowFile flowFileIn = putFlowFile;
            final StopWatch stopWatch = new StopWatch(true);

            // Read records from the incoming FlowFile and write them the tempFile
            session.read(putFlowFile, (final InputStream rawIn) -> {
                RecordReader recordReader = null;
                HDFSRecordWriter recordWriter = null;

                try (final BufferedInputStream in = new BufferedInputStream(rawIn)) {

                    // if we fail to create the RecordReader then we want to route to failure, so we need to
                    // handle this separately from the other IOExceptions which normally route to retry
                    try {
                        recordReader = recordReaderFactory.createRecordReader(flowFileIn, in, getLogger());
                    } catch (Exception e) {
                        final RecordReaderFactoryException rrfe = new RecordReaderFactoryException(
                                "Unable to create RecordReader", e);
                        exceptionHolder.set(rrfe);
                        return;
                    }

                    final RecordSet recordSet = recordReader.createRecordSet();

                    recordWriter = createHDFSRecordWriter(context, flowFile, configuration, tempFile,
                            recordReader.getSchema());
                    writeResult.set(recordWriter.write(recordSet));
                } catch (Exception e) {
                    exceptionHolder.set(e);
                } finally {
                    IOUtils.closeQuietly(recordReader);
                    IOUtils.closeQuietly(recordWriter);
                }
            });
            stopWatch.stop();

            final String dataRate = stopWatch.calculateDataRate(putFlowFile.getSize());
            final long millis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
            tempDotCopyFile = tempFile;

            // if any errors happened within the session.read then throw the exception so we jump
            // into one of the appropriate catch blocks below
            if (exceptionHolder.get() != null) {
                throw exceptionHolder.get();
            }

            // Attempt to rename from the tempFile to destFile, and change owner if successfully renamed
            rename(fileSystem, tempFile, destFile);
            changeOwner(fileSystem, destFile, remoteOwner, remoteGroup);

            getLogger().info("Wrote {} to {} in {} milliseconds at a rate of {}",
                    new Object[] { putFlowFile, destFile, millis, dataRate });

            putFlowFile = postProcess(context, session, putFlowFile, destFile);

            final String newFilename = destFile.getName();
            final String hdfsPath = destFile.getParent().toString();

            // Update the filename and absolute path attributes
            final Map<String, String> attributes = new HashMap<>(writeResult.get().getAttributes());
            attributes.put(CoreAttributes.FILENAME.key(), newFilename);
            attributes.put(ABSOLUTE_HDFS_PATH_ATTRIBUTE, hdfsPath);
            attributes.put(RECORD_COUNT_ATTR, String.valueOf(writeResult.get().getRecordCount()));
            putFlowFile = session.putAllAttributes(putFlowFile, attributes);

            // Send a provenance event and transfer to success
            final Path qualifiedPath = destFile.makeQualified(fileSystem.getUri(),
                    fileSystem.getWorkingDirectory());
            session.getProvenanceReporter().send(putFlowFile, qualifiedPath.toString());
            session.transfer(putFlowFile, REL_SUCCESS);

        } catch (IOException | FlowFileAccessException e) {
            deleteQuietly(fileSystem, tempDotCopyFile);
            getLogger().error("Failed to write due to {}", new Object[] { e });
            session.transfer(session.penalize(putFlowFile), REL_RETRY);
            context.yield();
        } catch (Throwable t) {
            deleteQuietly(fileSystem, tempDotCopyFile);
            getLogger().error("Failed to write due to {}", new Object[] { t });
            session.transfer(putFlowFile, REL_FAILURE);
        }

        return null;
    });
}

From source file:org.apache.nifi.processors.hadoop.FetchHDFS.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();//from w ww. ja  va  2  s. c  o m
    if (flowFile == null) {
        return;
    }

    final FileSystem hdfs = getFileSystem();
    final UserGroupInformation ugi = getUserGroupInformation();
    final String filenameValue = context.getProperty(FILENAME).evaluateAttributeExpressions(flowFile)
            .getValue();

    final Path path;
    try {
        path = new Path(filenameValue);
    } catch (IllegalArgumentException e) {
        getLogger().error("Failed to retrieve content from {} for {} due to {}; routing to failure",
                new Object[] { filenameValue, flowFile, e });
        flowFile = session.putAttribute(flowFile, "hdfs.failure.reason", e.getMessage());
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    final StopWatch stopWatch = new StopWatch(true);
    final FlowFile finalFlowFile = flowFile;

    ugi.doAs(new PrivilegedAction<Object>() {
        @Override
        public Object run() {
            InputStream stream = null;
            CompressionCodec codec = null;
            Configuration conf = getConfiguration();
            final CompressionCodecFactory compressionCodecFactory = new CompressionCodecFactory(conf);
            final CompressionType compressionType = CompressionType
                    .valueOf(context.getProperty(COMPRESSION_CODEC).toString());
            final boolean inferCompressionCodec = compressionType == CompressionType.AUTOMATIC;

            if (inferCompressionCodec) {
                codec = compressionCodecFactory.getCodec(path);
            } else if (compressionType != CompressionType.NONE) {
                codec = getCompressionCodec(context, getConfiguration());
            }

            FlowFile flowFile = finalFlowFile;
            final Path qualifiedPath = path.makeQualified(hdfs.getUri(), hdfs.getWorkingDirectory());
            try {
                final String outputFilename;
                final String originalFilename = path.getName();
                stream = hdfs.open(path, 16384);

                // Check if compression codec is defined (inferred or otherwise)
                if (codec != null) {
                    stream = codec.createInputStream(stream);
                    outputFilename = StringUtils.removeEnd(originalFilename, codec.getDefaultExtension());
                } else {
                    outputFilename = originalFilename;
                }

                flowFile = session.importFrom(stream, finalFlowFile);
                flowFile = session.putAttribute(flowFile, CoreAttributes.FILENAME.key(), outputFilename);

                stopWatch.stop();
                getLogger().info("Successfully received content from {} for {} in {}",
                        new Object[] { qualifiedPath, flowFile, stopWatch.getDuration() });
                session.getProvenanceReporter().fetch(flowFile, qualifiedPath.toString(),
                        stopWatch.getDuration(TimeUnit.MILLISECONDS));
                session.transfer(flowFile, REL_SUCCESS);
            } catch (final FileNotFoundException | AccessControlException e) {
                getLogger().error("Failed to retrieve content from {} for {} due to {}; routing to failure",
                        new Object[] { qualifiedPath, flowFile, e });
                flowFile = session.putAttribute(flowFile, "hdfs.failure.reason", e.getMessage());
                flowFile = session.penalize(flowFile);
                session.transfer(flowFile, REL_FAILURE);
            } catch (final IOException e) {
                getLogger().error(
                        "Failed to retrieve content from {} for {} due to {}; routing to comms.failure",
                        new Object[] { qualifiedPath, flowFile, e });
                flowFile = session.penalize(flowFile);
                session.transfer(flowFile, REL_COMMS_FAILURE);
            } finally {
                IOUtils.closeQuietly(stream);
            }

            return null;
        }
    });

}

From source file:org.apache.nifi.processors.hadoop.GetHDFSFileInfo.java

License:Apache License

protected HDFSObjectInfoDetails walkHDFSTree(final ProcessContext context, final ProcessSession session,
        FlowFile origFF, final FileSystem hdfs, final UserGroupInformation ugi, final HDFSFileInfoRequest req,
        HDFSObjectInfoDetails parent, final boolean statsOnly) throws Exception {

    final HDFSObjectInfoDetails p = parent;

    if (!ugi.doAs((PrivilegedExceptionAction<Boolean>) () -> hdfs
            .exists(p != null ? p.getPath() : new Path(req.fullPath)))) {
        return null;
    }/*  w  ww  .j a  v  a2  s . com*/

    if (parent == null) {
        parent = new HDFSObjectInfoDetails(ugi.doAs(
                (PrivilegedExceptionAction<FileStatus>) () -> hdfs.getFileStatus(new Path(req.fullPath))));
    }
    if (parent.isFile() && p == null) {
        //single file path requested and found, lets send to output:
        processHDFSObject(context, session, origFF, req, parent, true);
        return parent;
    }

    final Path path = parent.getPath();

    FileStatus[] listFSt = null;
    try {
        listFSt = ugi.doAs((PrivilegedExceptionAction<FileStatus[]>) () -> hdfs.listStatus(path));
    } catch (IOException e) {
        parent.error = "Couldn't list directory: " + e;
        processHDFSObject(context, session, origFF, req, parent, p == null);
        return parent; //File not found exception, or access denied - don't interrupt, just don't list
    }
    if (listFSt != null) {
        for (FileStatus f : listFSt) {
            HDFSObjectInfoDetails o = new HDFSObjectInfoDetails(f);
            HDFSObjectInfoDetails vo = validateMatchingPatterns(o, req);
            if (o.isDirectory() && !o.isSymlink() && req.isRecursive) {
                o = walkHDFSTree(context, session, origFF, hdfs, ugi, req, o, vo == null || statsOnly);
                parent.countDirs += o.countDirs;
                parent.totalLen += o.totalLen;
                parent.countFiles += o.countFiles;
            } else if (o.isDirectory() && o.isSymlink()) {
                parent.countDirs += 1;
            } else if (o.isFile() && !o.isSymlink()) {
                parent.countFiles += 1;
                parent.totalLen += o.getLen();
            } else if (o.isFile() && o.isSymlink()) {
                parent.countFiles += 1; // do not add length of the symlink, as it doesn't consume space under THIS directory, but count files, as it is still an object.
            }

            // Decide what to do with child: if requested FF per object or per dir - just emit new FF with info in 'o' object
            if (vo != null && !statsOnly) {
                parent.addChild(vo);
                if (vo.isFile() && !vo.isSymlink()) {
                    processHDFSObject(context, session, origFF, req, vo, false);
                }
            }
        }
        if (!statsOnly) {
            processHDFSObject(context, session, origFF, req, parent, p == null);
        }
        if (req.groupping != Groupping.ALL) {
            parent.setChildren(null); //we need children in full tree only when single output requested.
        }
    }

    return parent;
}

From source file:org.apache.nifi.processors.hadoop.GetHDFSSequenceFile.java

License:Apache License

protected Set<FlowFile> getFlowFiles(final Configuration conf, final FileSystem hdfs,
        final SequenceFileReader<Set<FlowFile>> reader, final Path file) throws Exception {
    PrivilegedExceptionAction<Set<FlowFile>> privilegedExceptionAction = new PrivilegedExceptionAction<Set<FlowFile>>() {
        @Override//w ww  .  j  a  v a 2 s .  c  om
        public Set<FlowFile> run() throws Exception {
            return reader.readSequenceFile(file, conf, hdfs);
        }
    };
    UserGroupInformation userGroupInformation = getUserGroupInformation();
    if (userGroupInformation == null) {
        return privilegedExceptionAction.run();
    } else {
        return userGroupInformation.doAs(privilegedExceptionAction);
    }
}

From source file:org.apache.nifi.processors.hadoop.MoveHDFS.java

License:Apache License

protected void processBatchOfFiles(final List<Path> files, final ProcessContext context,
        final ProcessSession session, FlowFile parentFlowFile) {
    Preconditions.checkState(parentFlowFile != null, "No parent flowfile for this batch was provided");

    // process the batch of files
    final Configuration conf = getConfiguration();
    final FileSystem hdfs = getFileSystem();
    final UserGroupInformation ugi = getUserGroupInformation();

    if (conf == null || ugi == null) {
        getLogger().error("Configuration or UserGroupInformation not configured properly");
        session.transfer(parentFlowFile, REL_FAILURE);
        context.yield();//from ww  w.j  ava  2 s. c om
        return;
    }

    for (final Path file : files) {

        ugi.doAs(new PrivilegedAction<Object>() {
            @Override
            public Object run() {
                FlowFile flowFile = session.create(parentFlowFile);
                try {
                    final String originalFilename = file.getName();
                    final Path configuredRootOutputDirPath = processorConfig.getOutputDirectory();
                    final Path newFile = new Path(configuredRootOutputDirPath, originalFilename);
                    final boolean destinationExists = hdfs.exists(newFile);
                    // If destination file already exists, resolve that
                    // based on processor configuration
                    if (destinationExists) {
                        switch (processorConfig.getConflictResolution()) {
                        case REPLACE_RESOLUTION:
                            if (hdfs.delete(file, false)) {
                                getLogger().info("deleted {} in order to replace with the contents of {}",
                                        new Object[] { file, flowFile });
                            }
                            break;
                        case IGNORE_RESOLUTION:
                            session.transfer(flowFile, REL_SUCCESS);
                            getLogger().info(
                                    "transferring {} to success because file with same name already exists",
                                    new Object[] { flowFile });
                            return null;
                        case FAIL_RESOLUTION:
                            session.transfer(session.penalize(flowFile), REL_FAILURE);
                            getLogger().warn(
                                    "penalizing {} and routing to failure because file with same name already exists",
                                    new Object[] { flowFile });
                            return null;
                        default:
                            break;
                        }
                    }

                    // Create destination directory if it does not exist
                    try {
                        if (!hdfs.getFileStatus(configuredRootOutputDirPath).isDirectory()) {
                            throw new IOException(configuredRootOutputDirPath.toString()
                                    + " already exists and is not a directory");
                        }
                    } catch (FileNotFoundException fe) {
                        if (!hdfs.mkdirs(configuredRootOutputDirPath)) {
                            throw new IOException(
                                    configuredRootOutputDirPath.toString() + " could not be created");
                        }
                        changeOwner(context, hdfs, configuredRootOutputDirPath);
                    }

                    boolean moved = false;
                    for (int i = 0; i < 10; i++) { // try to rename multiple
                        // times.
                        if (processorConfig.getOperation().equals("move")) {
                            if (hdfs.rename(file, newFile)) {
                                moved = true;
                                break;// rename was successful
                            }
                        } else {
                            if (FileUtil.copy(hdfs, file, hdfs, newFile, false, conf)) {
                                moved = true;
                                break;// copy was successful
                            }
                        }
                        Thread.sleep(200L);// try waiting to let whatever might cause rename failure to resolve
                    }
                    if (!moved) {
                        throw new ProcessException("Could not move file " + file + " to its final filename");
                    }

                    changeOwner(context, hdfs, newFile);
                    final String outputPath = newFile.toString();
                    final String newFilename = newFile.getName();
                    final String hdfsPath = newFile.getParent().toString();
                    flowFile = session.putAttribute(flowFile, CoreAttributes.FILENAME.key(), newFilename);
                    flowFile = session.putAttribute(flowFile, ABSOLUTE_HDFS_PATH_ATTRIBUTE, hdfsPath);
                    final String transitUri = (outputPath.startsWith("/")) ? "hdfs:/" + outputPath
                            : "hdfs://" + outputPath;
                    session.getProvenanceReporter().send(flowFile, transitUri);
                    session.transfer(flowFile, REL_SUCCESS);

                } catch (final Throwable t) {
                    getLogger().error("Failed to rename on HDFS due to {}", new Object[] { t });
                    session.transfer(session.penalize(flowFile), REL_FAILURE);
                    context.yield();
                }
                return null;
            }
        });
    }
}