Example usage for java.util.concurrent.atomic AtomicReference set

List of usage examples for java.util.concurrent.atomic AtomicReference set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference set.

Prototype

public final void set(V newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.apache.solr.servlet.SolrDispatchFilter.java

private boolean authenticateRequest(ServletRequest request, ServletResponse response,
        final AtomicReference<ServletRequest> wrappedRequest) throws IOException {
    boolean requestContinues = false;
    final AtomicBoolean isAuthenticated = new AtomicBoolean(false);
    AuthenticationPlugin authenticationPlugin = cores.getAuthenticationPlugin();
    if (authenticationPlugin == null) {
        return true;
    } else {//from  ww  w  .j  a  va 2s. c  o m
        // /admin/info/key must be always open. see SOLR-9188
        // tests work only w/ getPathInfo
        //otherwise it's just enough to have getServletPath()
        if (PKIAuthenticationPlugin.PATH.equals(((HttpServletRequest) request).getServletPath())
                || PKIAuthenticationPlugin.PATH.equals(((HttpServletRequest) request).getPathInfo()))
            return true;
        String header = ((HttpServletRequest) request).getHeader(PKIAuthenticationPlugin.HEADER);
        if (header != null && cores.getPkiAuthenticationPlugin() != null)
            authenticationPlugin = cores.getPkiAuthenticationPlugin();
        try {
            log.debug("Request to authenticate: {}, domain: {}, port: {}", request, request.getLocalName(),
                    request.getLocalPort());
            // upon successful authentication, this should call the chain's next filter.
            requestContinues = authenticationPlugin.doAuthenticate(request, response, (req, rsp) -> {
                isAuthenticated.set(true);
                wrappedRequest.set(req);
            });
        } catch (Exception e) {
            log.info("Error authenticating", e);
            throw new SolrException(ErrorCode.SERVER_ERROR, "Error during request authentication, ", e);
        }
    }
    // requestContinues is an optional short circuit, thus we still need to check isAuthenticated.
    // This is because the AuthenticationPlugin doesn't always have enough information to determine if
    // it should short circuit, e.g. the Kerberos Authentication Filter will send an error and not
    // call later filters in chain, but doesn't throw an exception.  We could force each Plugin
    // to implement isAuthenticated to simplify the check here, but that just moves the complexity to
    // multiple code paths.
    if (!requestContinues || !isAuthenticated.get()) {
        response.flushBuffer();
        return false;
    }
    return true;
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.Workstation.java

/**
 * Loads into the cache the workspaces residing in the specified source
 * control repository and having the specified owner. Returns the workspaces
 * from the QueryWorkspaces call made by this method.
 *
 * @param client//from w ww.jav  a 2s  .c  o  m
 *        the client (must not be <code>null</code>)
 * @param ownerName
 *        the owner of the workspaces (must not be <code>null</code> or
 *        empty)
 * @param workspaces
 *        the holder for the workspaces (must not be <code>null</code>)
 */
public void updateWorkspaceInfoCache(final VersionControlClient client, final String ownerName,
        final AtomicReference<Workspace[]> workspaces) {
    Check.notNull(client, "client"); //$NON-NLS-1$
    Check.notNullOrEmpty(ownerName, "ownerName"); //$NON-NLS-1$
    Check.notNull(workspaces, "workspaces"); //$NON-NLS-1$

    // It is okay to use the passed in ownerName here to construct the key
    // because all valid forms of ownerName are in the table.
    final String key = createLoadWorkspacesTableKey(client, ownerName);
    workspaces.set(updateWorkspaceInfoCache(key, client, ownerName));
}

From source file:com.microsoft.tfs.client.common.ui.teamexplorer.internal.pendingchanges.PendingChangesViewModel.java

public PolicyEvaluatorState evaluateCheckinPolicies(final PendingCheckin pendingCheckin,
        final AtomicReference<PolicyFailure[]> outFailures) {
    if (policyEvaluator == null) {
        return PolicyEvaluatorState.UNEVALUATED;
    }// w  w  w . java  2s  . c o  m

    Check.notNull(policyContext, "policyContext"); //$NON-NLS-1$

    PolicyFailure[] failures;

    try {
        policyEvaluator.setPendingCheckin(pendingCheckin);
        failures = policyEvaluator.evaluate(policyContext);
    } catch (final PolicyEvaluationCancelledException e) {
        return PolicyEvaluatorState.CANCELLED;
    }
    policyWarnings = PolicyFailureData.fromPolicyFailures(failures, policyContext);
    firePolicyWarningsChangedEvent();

    if (outFailures != null) {
        outFailures.set(failures);
    }

    return policyEvaluator.getPolicyEvaluatorState();
}

From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java

/**
 * Tests the flush() method with Append and StreamSegmentSealOperations when there are Storage errors.
 *//*  ww w .  j  a v a  2  s  .c  o  m*/
@Test
public void testSealWithStorageErrors() throws Exception {
    // Add some appends and seal, and then flush together. Verify that everything got flushed in one go.
    final int appendCount = 1000;
    final WriterConfig config = WriterConfig.builder()
            .with(WriterConfig.FLUSH_THRESHOLD_BYTES, appendCount * 50) // Extra high length threshold.
            .with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000)
            .with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();

    @Cleanup
    TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT, executorService()).join();

    @Cleanup
    ByteArrayOutputStream writtenData = new ByteArrayOutputStream();

    // Part 1: flush triggered by accumulated size.
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length (not bothering with flushing here; testFlushSeal() covers that).
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
    }

    // Generate and add a Seal Operation.
    StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
    context.segmentAggregator.add(sealOp);

    // Have the writes fail every few attempts with a well known exception.
    AtomicBoolean generateSyncException = new AtomicBoolean(true);
    AtomicBoolean generateAsyncException = new AtomicBoolean(true);
    AtomicReference<IntentionalException> setException = new AtomicReference<>();
    Supplier<Exception> exceptionSupplier = () -> {
        IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
        setException.set(ex);
        return ex;
    };
    context.storage.setSealSyncErrorInjector(
            new ErrorInjector<>(count -> generateSyncException.getAndSet(false), exceptionSupplier));
    context.storage.setSealAsyncErrorInjector(
            new ErrorInjector<>(count -> generateAsyncException.getAndSet(false), exceptionSupplier));

    // Call flush and verify that the entire Aggregator got flushed and the Seal got persisted to Storage.
    int attemptCount = 4;
    for (int i = 0; i < attemptCount; i++) {
        // Repeat a number of times, at least once should work.
        setException.set(null);
        try {
            FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join();
            Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
            Assert.assertNotNull("No FlushResult provided.", flushResult);
        } catch (Exception ex) {
            if (setException.get() != null) {
                Assert.assertEquals("Unexpected exception thrown.", setException.get(),
                        ExceptionHelpers.getRealException(ex));
            } else {
                // Not expecting any exception this time.
                throw ex;
            }
        }

        if (!generateAsyncException.get() && !generateSyncException.get() && setException.get() == null) {
            // We are done. We got at least one through.
            break;
        }
    }

    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    SegmentProperties storageInfo = context.storage
            .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length,
            storageInfo.getLength());
    Assert.assertTrue("Segment is not sealed in storage post flush.", storageInfo.isSealed());
    Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.",
            context.segmentAggregator.getMetadata().isSealedInStorage());
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0,
            actualData.length, TIMEOUT).join();
    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}

From source file:de.hybris.platform.test.TransactionTest.java

@Test
public void testIneffectiveRollbackWithActivateFalse() throws Exception {
    try {//from  w ww .  ja va 2s  .  c om
        final Transaction tx = Transaction.current();
        // simulate config setting with threadlocal flag -> has the same semantics!
        Transaction.enableUserTransactionForThread(false);

        final AtomicReference<PK> titlePKref = new AtomicReference<PK>();
        try {
            tx.execute(new TransactionBody() {
                @Override
                public Object execute() throws Exception {
                    titlePKref.set(UserManager.getInstance().createTitle("TTT").getPK());
                    throw new RuntimeException("rollback please");
                }
            });
            fail("RuntimeException expected");
        } catch (final RuntimeException e) {
            assertEquals("rollback please", e.getMessage());
        }

        assertNotSame(tx, Transaction.current());
        assertNotNull(titlePKref.get());
        final Title title = jaloSession.getItem(titlePKref.get());
        assertNotNull(title);
        assertTrue(title.isAlive());
    } finally {
        Transaction.enableUserTransactionForThread(true);
    }
}

From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java

/**
 * Tests the behavior of flush() with appends and storage errors (on the write() method).
 *///w w w  .  ja  v  a 2  s  . c  o  m
@Test
public void testFlushAppendWithStorageErrors() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    final int appendCount = config.getFlushThresholdBytes() * 10;
    final int failSyncEvery = 2;
    final int failAsyncEvery = 3;

    @Cleanup
    TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT, executorService()).join();

    // Have the writes fail every few attempts with a well known exception.
    AtomicReference<IntentionalException> setException = new AtomicReference<>();
    Supplier<Exception> exceptionSupplier = () -> {
        IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
        setException.set(ex);
        return ex;
    };
    context.storage.setWriteSyncErrorInjector(
            new ErrorInjector<>(count -> count % failSyncEvery == 0, exceptionSupplier));
    context.storage.setWriteAsyncErrorInjector(
            new ErrorInjector<>(count -> count % failAsyncEvery == 0, exceptionSupplier));

    @Cleanup
    ByteArrayOutputStream writtenData = new ByteArrayOutputStream();

    // Part 1: flush triggered by accumulated size.
    int exceptionCount = 0;
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);

        // Call flush() and inspect the result.
        setException.set(null);
        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot.
        FlushResult flushResult = null;

        try {
            flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join();
            Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
            Assert.assertNotNull("No FlushResult provided.", flushResult);
        } catch (Exception ex) {
            if (setException.get() != null) {
                Assert.assertEquals("Unexpected exception thrown.", setException.get(),
                        ExceptionHelpers.getRealException(ex));
                exceptionCount++;
            } else {
                // Not expecting any exception this time.
                throw ex;
            }
        }

        // Check flush result.
        if (flushResult != null) {
            AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0,
                    flushResult.getFlushedBytes());
            Assert.assertEquals("Not expecting any merged bytes in this test.", 0,
                    flushResult.getMergedBytes());
        }
    }

    // Do one last flush at the end to make sure we clear out all the buffers, if there's anything else left.
    context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot.
    context.storage.setWriteSyncErrorInjector(null);
    context.storage.setWriteAsyncErrorInjector(null);
    context.segmentAggregator.flush(TIMEOUT, executorService()).join();

    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    long storageLength = context.storage
            .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join()
            .getLength();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0,
            actualData.length, TIMEOUT).join();

    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
    AssertExtensions.assertGreaterThan("Not enough errors injected.", 0, exceptionCount);
}

From source file:org.apache.nifi.processors.hadoop.AbstractPutHDFSRecord.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    // do this before getting a flow file so that we always get a chance to attempt Kerberos relogin
    final FileSystem fileSystem = getFileSystem();
    final Configuration configuration = getConfiguration();
    final UserGroupInformation ugi = getUserGroupInformation();

    if (configuration == null || fileSystem == null || ugi == null) {
        getLogger().error(//from   w  w  w  .  j  av  a  2 s .  c  o m
                "Processor not configured properly because Configuration, FileSystem, or UserGroupInformation was null");
        context.yield();
        return;
    }

    final FlowFile flowFile = session.get();
    if (flowFile == null) {
        context.yield();
        return;
    }

    ugi.doAs((PrivilegedAction<Object>) () -> {
        Path tempDotCopyFile = null;
        FlowFile putFlowFile = flowFile;
        try {
            final String filenameValue = putFlowFile.getAttribute(CoreAttributes.FILENAME.key()); // TODO codec extension
            final String directoryValue = context.getProperty(DIRECTORY)
                    .evaluateAttributeExpressions(putFlowFile).getValue();

            // create the directory if it doesn't exist
            final Path directoryPath = new Path(directoryValue);
            createDirectory(fileSystem, directoryPath, remoteOwner, remoteGroup);

            // write to tempFile first and on success rename to destFile
            final Path tempFile = new Path(directoryPath, "." + filenameValue);
            final Path destFile = new Path(directoryPath, filenameValue);

            final boolean destinationExists = fileSystem.exists(destFile) || fileSystem.exists(tempFile);
            final boolean shouldOverwrite = context.getProperty(OVERWRITE).asBoolean();

            // if the tempFile or destFile already exist, and overwrite is set to false, then transfer to failure
            if (destinationExists && !shouldOverwrite) {
                session.transfer(session.penalize(putFlowFile), REL_FAILURE);
                getLogger().warn(
                        "penalizing {} and routing to failure because file with same name already exists",
                        new Object[] { putFlowFile });
                return null;
            }

            final AtomicReference<Throwable> exceptionHolder = new AtomicReference<>(null);
            final AtomicReference<WriteResult> writeResult = new AtomicReference<>();
            final RecordReaderFactory recordReaderFactory = context.getProperty(RECORD_READER)
                    .asControllerService(RecordReaderFactory.class);

            final FlowFile flowFileIn = putFlowFile;
            final StopWatch stopWatch = new StopWatch(true);

            // Read records from the incoming FlowFile and write them the tempFile
            session.read(putFlowFile, (final InputStream rawIn) -> {
                RecordReader recordReader = null;
                HDFSRecordWriter recordWriter = null;

                try (final BufferedInputStream in = new BufferedInputStream(rawIn)) {

                    // if we fail to create the RecordReader then we want to route to failure, so we need to
                    // handle this separately from the other IOExceptions which normally route to retry
                    try {
                        recordReader = recordReaderFactory.createRecordReader(flowFileIn, in, getLogger());
                    } catch (Exception e) {
                        final RecordReaderFactoryException rrfe = new RecordReaderFactoryException(
                                "Unable to create RecordReader", e);
                        exceptionHolder.set(rrfe);
                        return;
                    }

                    final RecordSet recordSet = recordReader.createRecordSet();

                    recordWriter = createHDFSRecordWriter(context, flowFile, configuration, tempFile,
                            recordReader.getSchema());
                    writeResult.set(recordWriter.write(recordSet));
                } catch (Exception e) {
                    exceptionHolder.set(e);
                } finally {
                    IOUtils.closeQuietly(recordReader);
                    IOUtils.closeQuietly(recordWriter);
                }
            });
            stopWatch.stop();

            final String dataRate = stopWatch.calculateDataRate(putFlowFile.getSize());
            final long millis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
            tempDotCopyFile = tempFile;

            // if any errors happened within the session.read then throw the exception so we jump
            // into one of the appropriate catch blocks below
            if (exceptionHolder.get() != null) {
                throw exceptionHolder.get();
            }

            // Attempt to rename from the tempFile to destFile, and change owner if successfully renamed
            rename(fileSystem, tempFile, destFile);
            changeOwner(fileSystem, destFile, remoteOwner, remoteGroup);

            getLogger().info("Wrote {} to {} in {} milliseconds at a rate of {}",
                    new Object[] { putFlowFile, destFile, millis, dataRate });

            putFlowFile = postProcess(context, session, putFlowFile, destFile);

            final String newFilename = destFile.getName();
            final String hdfsPath = destFile.getParent().toString();

            // Update the filename and absolute path attributes
            final Map<String, String> attributes = new HashMap<>(writeResult.get().getAttributes());
            attributes.put(CoreAttributes.FILENAME.key(), newFilename);
            attributes.put(ABSOLUTE_HDFS_PATH_ATTRIBUTE, hdfsPath);
            attributes.put(RECORD_COUNT_ATTR, String.valueOf(writeResult.get().getRecordCount()));
            putFlowFile = session.putAllAttributes(putFlowFile, attributes);

            // Send a provenance event and transfer to success
            final Path qualifiedPath = destFile.makeQualified(fileSystem.getUri(),
                    fileSystem.getWorkingDirectory());
            session.getProvenanceReporter().send(putFlowFile, qualifiedPath.toString());
            session.transfer(putFlowFile, REL_SUCCESS);

        } catch (IOException | FlowFileAccessException e) {
            deleteQuietly(fileSystem, tempDotCopyFile);
            getLogger().error("Failed to write due to {}", new Object[] { e });
            session.transfer(session.penalize(putFlowFile), REL_RETRY);
            context.yield();
        } catch (Throwable t) {
            deleteQuietly(fileSystem, tempDotCopyFile);
            getLogger().error("Failed to write due to {}", new Object[] { t });
            session.transfer(putFlowFile, REL_FAILURE);
        }

        return null;
    });
}

From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java

/**
 * Tests the flush() method with Append and MergeTransactionOperations.
 *//*from   w  ww. j a  va  2s.com*/
@Test
public void testMergeWithStorageErrors() throws Exception {
    // Storage Errors
    final int appendCount = 100; // This is number of appends per Segment/Transaction - there will be a lot of appends here.
    final int failSyncEvery = 2;
    final int failAsyncEvery = 3;
    final WriterConfig config = WriterConfig.builder()
            .with(WriterConfig.FLUSH_THRESHOLD_BYTES, appendCount * 50) // Extra high length threshold.
            .with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000)
            .with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();

    @Cleanup
    TestContext context = new TestContext(config);

    // Create and initialize all segments.
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT, executorService()).join();
    for (SegmentAggregator a : context.transactionAggregators) {
        context.storage.create(a.getMetadata().getName(), TIMEOUT).join();
        a.initialize(TIMEOUT, executorService()).join();
    }

    // Store written data by segment - so we can check it later.
    HashMap<Long, ByteArrayOutputStream> dataBySegment = new HashMap<>();

    // Add a few appends to each Transaction aggregator and to the parent aggregator and seal all Transactions.
    for (int i = 0; i < context.transactionAggregators.length; i++) {
        SegmentAggregator transactionAggregator = context.transactionAggregators[i];
        long transactionId = transactionAggregator.getMetadata().getId();
        ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
        dataBySegment.put(transactionId, writtenData);

        for (int appendId = 0; appendId < appendCount; appendId++) {
            StorageOperation appendOp = generateAppendAndUpdateMetadata(appendId, transactionId, context);
            transactionAggregator.add(appendOp);
            getAppendData(appendOp, writtenData, context);
        }

        transactionAggregator.add(generateSealAndUpdateMetadata(transactionId, context));
    }

    // Merge all the Transactions in the parent Segment.
    @Cleanup
    ByteArrayOutputStream parentData = new ByteArrayOutputStream();
    for (int transIndex = 0; transIndex < context.transactionAggregators.length; transIndex++) {
        // Merge this Transaction into the parent & record its data in the final parent data array.
        long transactionId = context.transactionAggregators[transIndex].getMetadata().getId();
        context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(transactionId, context));

        ByteArrayOutputStream transactionData = dataBySegment.get(transactionId);
        parentData.write(transactionData.toByteArray());
        transactionData.close();
    }

    // Have the writes fail every few attempts with a well known exception.
    AtomicReference<IntentionalException> setException = new AtomicReference<>();
    Supplier<Exception> exceptionSupplier = () -> {
        IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
        setException.set(ex);
        return ex;
    };
    context.storage.setConcatSyncErrorInjector(
            new ErrorInjector<>(count -> count % failSyncEvery == 0, exceptionSupplier));
    context.storage.setConcatAsyncErrorInjector(
            new ErrorInjector<>(count -> count % failAsyncEvery == 0, exceptionSupplier));

    // Flush all the Aggregators, while checking that the right errors get handled and can be recovered from.
    tryFlushAllSegments(context, () -> setException.set(null), setException::get);

    // Verify that all Transactions are now fully merged.
    for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
        SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
        Assert.assertTrue("Merged Transaction was not marked as deleted in metadata.",
                transactionMetadata.isDeleted());
        Assert.assertFalse("Merged Transaction still exists in storage.",
                context.storage.exists(transactionMetadata.getName(), TIMEOUT).join());
    }

    // Verify that in the end, the contents of the parents is as expected.
    byte[] expectedData = parentData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    long storageLength = context.storage
            .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join()
            .getLength();
    Assert.assertEquals("Unexpected number of bytes flushed/merged to Storage.", expectedData.length,
            storageLength);
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0,
            actualData.length, TIMEOUT).join();
    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}

From source file:org.apache.nifi.processors.hadoop.AbstractFetchHDFSRecord.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    // do this before getting a flow file so that we always get a chance to attempt Kerberos relogin
    final FileSystem fileSystem = getFileSystem();
    final Configuration configuration = getConfiguration();
    final UserGroupInformation ugi = getUserGroupInformation();

    if (configuration == null || fileSystem == null || ugi == null) {
        getLogger().error(//from  w w w .j ava  2s .  co  m
                "Processor not configured properly because Configuration, FileSystem, or UserGroupInformation was null");
        context.yield();
        return;
    }

    final FlowFile originalFlowFile = session.get();
    if (originalFlowFile == null) {
        context.yield();
        return;
    }

    ugi.doAs((PrivilegedAction<Object>) () -> {
        FlowFile child = null;
        final String filenameValue = context.getProperty(FILENAME)
                .evaluateAttributeExpressions(originalFlowFile).getValue();
        try {
            final Path path = new Path(filenameValue);
            final AtomicReference<Throwable> exceptionHolder = new AtomicReference<>(null);
            final AtomicReference<WriteResult> writeResult = new AtomicReference<>();

            final RecordSetWriterFactory recordSetWriterFactory = context.getProperty(RECORD_WRITER)
                    .asControllerService(RecordSetWriterFactory.class);
            final RecordSetWriter recordSetWriter = recordSetWriterFactory.createWriter(getLogger(),
                    originalFlowFile, new NullInputStream(0));

            final StopWatch stopWatch = new StopWatch(true);

            // use a child FlowFile so that if any error occurs we can route the original untouched FlowFile to retry/failure
            child = session.create(originalFlowFile);
            child = session.write(child, (final OutputStream rawOut) -> {
                try (final BufferedOutputStream out = new BufferedOutputStream(rawOut);
                        final HDFSRecordReader recordReader = createHDFSRecordReader(context, originalFlowFile,
                                configuration, path)) {

                    final RecordSchema emptySchema = new SimpleRecordSchema(Collections.emptyList());

                    final RecordSet recordSet = new RecordSet() {
                        @Override
                        public RecordSchema getSchema() throws IOException {
                            return emptySchema;
                        }

                        @Override
                        public Record next() throws IOException {
                            return recordReader.nextRecord();
                        }
                    };

                    writeResult.set(recordSetWriter.write(recordSet, out));
                } catch (Exception e) {
                    exceptionHolder.set(e);
                }
            });

            stopWatch.stop();

            // if any errors happened within the session.write then throw the exception so we jump
            // into one of the appropriate catch blocks below
            if (exceptionHolder.get() != null) {
                throw exceptionHolder.get();
            }

            FlowFile successFlowFile = postProcess(context, session, child, path);

            final Map<String, String> attributes = new HashMap<>(writeResult.get().getAttributes());
            attributes.put(RECORD_COUNT_ATTR, String.valueOf(writeResult.get().getRecordCount()));
            attributes.put(CoreAttributes.MIME_TYPE.key(), recordSetWriter.getMimeType());
            successFlowFile = session.putAllAttributes(successFlowFile, attributes);

            final URI uri = path.toUri();
            getLogger().info("Successfully received content from {} for {} in {} milliseconds",
                    new Object[] { uri, successFlowFile, stopWatch.getDuration() });
            session.getProvenanceReporter().fetch(successFlowFile, uri.toString(),
                    stopWatch.getDuration(TimeUnit.MILLISECONDS));
            session.transfer(successFlowFile, REL_SUCCESS);
            session.remove(originalFlowFile);
            return null;

        } catch (final FileNotFoundException | AccessControlException e) {
            getLogger().error("Failed to retrieve content from {} for {} due to {}; routing to failure",
                    new Object[] { filenameValue, originalFlowFile, e });
            final FlowFile failureFlowFile = session.putAttribute(originalFlowFile, FETCH_FAILURE_REASON_ATTR,
                    e.getMessage());
            session.transfer(failureFlowFile, REL_FAILURE);
        } catch (final IOException | FlowFileAccessException e) {
            getLogger().error("Failed to retrieve content from {} for {} due to {}; routing to retry",
                    new Object[] { filenameValue, originalFlowFile, e });
            session.transfer(session.penalize(originalFlowFile), REL_RETRY);
            context.yield();
        } catch (final Throwable t) {
            getLogger().error("Failed to retrieve content from {} for {} due to {}; routing to failure",
                    new Object[] { filenameValue, originalFlowFile, t });
            final FlowFile failureFlowFile = session.putAttribute(originalFlowFile, FETCH_FAILURE_REASON_ATTR,
                    t.getMessage());
            session.transfer(failureFlowFile, REL_FAILURE);
        }

        // if we got this far then we weren't successful so we need to clean up the child flow file if it got initialized
        if (child != null) {
            session.remove(child);
        }

        return null;
    });

}

From source file:com.dgtlrepublic.anitomyj.Parser.java

/** Search for anime keywords. */
private void searchForKeywords() {
    for (int i = 0; i < tokens.size(); i++) {
        Token token = tokens.get(i);// w  ww . ja  v a 2s.co  m
        if (token.getCategory() != kUnknown)
            continue;

        String word = token.getContent();
        word = StringHelper.trimAny(word, " -");
        if (word.isEmpty())
            continue;

        // Don't bother if the word is a number that cannot be CRC
        if (word.length() != 8 && StringHelper.isNumericString(word))
            continue;

        String keyword = KeywordManager.normalzie(word);
        AtomicReference<ElementCategory> category = new AtomicReference<>(kElementUnknown);
        AtomicReference<KeywordOptions> options = new AtomicReference<>(new KeywordOptions());

        if (KeywordManager.getInstance().findAndSet(keyword, category, options)) {
            if (!this.options.parseReleaseGroup && category.get() == kElementReleaseGroup)
                continue;
            if (!ParserHelper.isElementCategorySearchable(category.get()) || !options.get().isSearchable())
                continue;
            if (ParserHelper.isElementCategorySingular(category.get()) && !empty(category.get()))
                continue;
            if (category.get() == kElementAnimeSeasonPrefix) {
                parserHelper.checkAndSetAnimeSeasonKeyword(token, i);
                continue;
            } else if (category.get() == kElementEpisodePrefix) {
                if (options.get().isValid()) {
                    parserHelper.checkExtentKeyword(kElementEpisodeNumber, i, token);
                    continue;
                }
            } else if (category.get() == kElementReleaseVersion) {
                word = StringUtils.substring(word, 1);
            } else if (category.get() == kElementVolumePrefix) {
                parserHelper.checkExtentKeyword(kElementVolumeNumber, i, token);
                continue;
            }
        } else {
            if (empty(kElementFileChecksum) && ParserHelper.isCrc32(word)) {
                category.set(kElementFileChecksum);
            } else if (empty(kElementVideoResolution) && ParserHelper.isResolution(word)) {
                category.set(kElementVideoResolution);
            }
        }

        if (category.get() != kElementUnknown) {
            elements.add(new Element(category.get(), word));
            if (options.get() != null && options.get().isIdentifiable()) {
                token.setCategory(kIdentifier);
            }
        }
    }
}