Example usage for java.util.concurrent.atomic AtomicBoolean set

List of usage examples for java.util.concurrent.atomic AtomicBoolean set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean set.

Prototype

public final void set(boolean newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.apache.hadoop.hdfs.client.impl.TestBlockReaderFactory.java

/**
 * When an InterruptedException is sent to a thread calling
 * FileChannel#read, the FileChannel is immediately closed and the
 * thread gets an exception.  This effectively means that we might have
 * someone asynchronously calling close() on the file descriptors we use
 * in BlockReaderLocal.  So when unreferencing a ShortCircuitReplica in
 * ShortCircuitCache#unref, we should check if the FileChannel objects
 * are still open.  If not, we should purge the replica to avoid giving
 * it out to any future readers.//from   w  w w  .  j a  va 2 s  .  com
 *
 * This is a regression test for HDFS-6227: Short circuit read failed
 * due to ClosedChannelException.
 *
 * Note that you may still get ClosedChannelException errors if two threads
 * are reading from the same replica and an InterruptedException is delivered
 * to one of them.
 */
@Test(timeout = 120000)
public void testPurgingClosedReplicas() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicInteger replicasCreated = new AtomicInteger(0);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            replicasCreated.incrementAndGet();
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testPurgingClosedReplicas", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4095;
    final int SEED = 0xFADE0;
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);

    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
            .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    final LocatedBlock lblock = locatedBlocks.get(0); // first block
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (true) {
                    BlockReader blockReader = null;
                    try {
                        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0,
                                TEST_FILE_LEN);
                        sem.release();
                        try {
                            blockReader.readAll(buf, 0, TEST_FILE_LEN);
                        } finally {
                            sem.acquireUninterruptibly();
                        }
                    } catch (ClosedByInterruptException e) {
                        LOG.info("got the expected ClosedByInterruptException", e);
                        sem.release();
                        break;
                    } finally {
                        if (blockReader != null)
                            blockReader.close();
                    }
                    LOG.info("read another " + TEST_FILE_LEN + " bytes.");
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
                sem.release();
            }
        }
    };
    Thread thread = new Thread(readerRunnable);
    thread.start();

    // While the thread is reading, send it interrupts.
    // These should trigger a ClosedChannelException.
    while (thread.isAlive()) {
        sem.acquireUninterruptibly();
        thread.interrupt();
        sem.release();
    }
    Assert.assertFalse(testFailed.get());

    // We should be able to read from the file without
    // getting a ClosedChannelException.
    BlockReader blockReader = null;
    try {
        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
        blockReader.readFully(buf, 0, TEST_FILE_LEN);
    } finally {
        if (blockReader != null)
            blockReader.close();
    }
    byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(buf, expected));

    // Another ShortCircuitReplica object should have been created.
    Assert.assertEquals(2, replicasCreated.get());

    dfs.close();
    cluster.shutdown();
    sockDir.close();
}

From source file:org.apache.tez.dag.app.rm.TestContainerReuse.java

@Test(timeout = 10000l)
public void testSimpleReuse() throws IOException, InterruptedException, ExecutionException {
    Configuration tezConf = new Configuration(new YarnConfiguration());
    tezConf.setBoolean(TezConfiguration.TEZ_AM_CONTAINER_REUSE_ENABLED, true);
    tezConf.setBoolean(TezConfiguration.TEZ_AM_CONTAINER_REUSE_RACK_FALLBACK_ENABLED, true);
    tezConf.setLong(TezConfiguration.TEZ_AM_CONTAINER_REUSE_LOCALITY_DELAY_ALLOCATION_MILLIS, 0);
    tezConf.setLong(TezConfiguration.TEZ_AM_CONTAINER_IDLE_RELEASE_TIMEOUT_MIN_MILLIS, 0);
    RackResolver.init(tezConf);/* w  w w .  ja  v  a  2  s  .  com*/
    TaskSchedulerAppCallback mockApp = mock(TaskSchedulerAppCallback.class);

    CapturingEventHandler eventHandler = new CapturingEventHandler();
    TezDAGID dagID = TezDAGID.getInstance("0", 0, 0);

    AMRMClient<CookieContainerRequest> rmClientCore = new AMRMClientForTest();
    TezAMRMClientAsync<CookieContainerRequest> rmClient = spy(new AMRMClientAsyncForTest(rmClientCore, 100));
    String appUrl = "url";
    String appMsg = "success";
    AppFinalStatus finalStatus = new AppFinalStatus(FinalApplicationStatus.SUCCEEDED, appMsg, appUrl);

    doReturn(finalStatus).when(mockApp).getFinalAppStatus();

    AppContext appContext = mock(AppContext.class);
    AMContainerMap amContainerMap = new AMContainerMap(mock(ContainerHeartbeatHandler.class),
            mock(TaskAttemptListener.class), new ContainerContextMatcher(), appContext);
    AMNodeTracker amNodeTracker = new AMNodeTracker(eventHandler, appContext);
    doReturn(amContainerMap).when(appContext).getAllContainers();
    doReturn(amNodeTracker).when(appContext).getNodeTracker();
    doReturn(DAGAppMasterState.RUNNING).when(appContext).getAMState();
    doReturn(dagID).when(appContext).getCurrentDAGID();
    doReturn(mock(ClusterInfo.class)).when(appContext).getClusterInfo();

    TaskSchedulerEventHandler taskSchedulerEventHandlerReal = new TaskSchedulerEventHandlerForTest(appContext,
            eventHandler, rmClient, new AlwaysMatchesContainerMatcher());
    TaskSchedulerEventHandler taskSchedulerEventHandler = spy(taskSchedulerEventHandlerReal);
    taskSchedulerEventHandler.init(tezConf);
    taskSchedulerEventHandler.start();

    TaskSchedulerWithDrainableAppCallback taskScheduler = (TaskSchedulerWithDrainableAppCallback) ((TaskSchedulerEventHandlerForTest) taskSchedulerEventHandler)
            .getSpyTaskScheduler();
    TaskSchedulerAppCallbackDrainable drainableAppCallback = taskScheduler.getDrainableAppCallback();
    AtomicBoolean drainNotifier = new AtomicBoolean(false);
    taskScheduler.delayedContainerManager.drainedDelayedContainersForTest = drainNotifier;

    Resource resource1 = Resource.newInstance(1024, 1);
    String[] host1 = { "host1" };
    String[] host2 = { "host2" };

    String[] racks = { "/default-rack" };
    Priority priority1 = Priority.newInstance(1);

    TezVertexID vertexID1 = TezVertexID.getInstance(dagID, 1);

    //Vertex 1, Task 1, Attempt 1, host1
    TezTaskAttemptID taID11 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID1, 1), 1);
    TaskAttempt ta11 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent1 = createLaunchRequestEvent(taID11, ta11, resource1, host1, racks,
            priority1);

    //Vertex 1, Task 2, Attempt 1, host1
    TezTaskAttemptID taID12 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID1, 2), 1);
    TaskAttempt ta12 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent2 = createLaunchRequestEvent(taID12, ta12, resource1, host1, racks,
            priority1);

    //Vertex 1, Task 3, Attempt 1, host2
    TezTaskAttemptID taID13 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID1, 3), 1);
    TaskAttempt ta13 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent3 = createLaunchRequestEvent(taID13, ta13, resource1, host2, racks,
            priority1);

    //Vertex 1, Task 4, Attempt 1, host2
    TezTaskAttemptID taID14 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID1, 4), 1);
    TaskAttempt ta14 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent4 = createLaunchRequestEvent(taID14, ta14, resource1, host2, racks,
            priority1);

    taskSchedulerEventHandler.handleEvent(lrEvent1);
    taskSchedulerEventHandler.handleEvent(lrEvent2);
    taskSchedulerEventHandler.handleEvent(lrEvent3);
    taskSchedulerEventHandler.handleEvent(lrEvent4);

    Container container1 = createContainer(1, "host1", resource1, priority1);

    // One container allocated.
    drainNotifier.set(false);
    taskScheduler.onContainersAllocated(Collections.singletonList(container1));
    TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier);
    drainableAppCallback.drain();
    verify(taskSchedulerEventHandler).taskAllocated(eq(ta11), any(Object.class), eq(container1));

    // Task assigned to container completed successfully. Container should be re-used.
    taskSchedulerEventHandler
            .handleEvent(new AMSchedulerEventTAEnded(ta11, container1.getId(), TaskAttemptState.SUCCEEDED));
    drainableAppCallback.drain();
    verify(taskScheduler).deallocateTask(eq(ta11), eq(true));
    verify(taskSchedulerEventHandler).taskAllocated(eq(ta12), any(Object.class), eq(container1));
    verify(rmClient, times(0)).releaseAssignedContainer(eq(container1.getId()));
    eventHandler.verifyNoInvocations(AMContainerEventStopRequest.class);
    eventHandler.reset();

    // Task assigned to container completed successfully.
    // Verify reuse across hosts.
    taskSchedulerEventHandler
            .handleEvent(new AMSchedulerEventTAEnded(ta12, container1.getId(), TaskAttemptState.SUCCEEDED));
    drainableAppCallback.drain();
    verify(taskScheduler).deallocateTask(eq(ta12), eq(true));
    verify(taskSchedulerEventHandler).taskAllocated(eq(ta13), any(Object.class), eq(container1));
    verify(rmClient, times(0)).releaseAssignedContainer(eq(container1.getId()));
    eventHandler.verifyNoInvocations(AMContainerEventStopRequest.class);
    eventHandler.reset();

    // Verify no re-use if a previous task fails.
    taskSchedulerEventHandler
            .handleEvent(new AMSchedulerEventTAEnded(ta13, container1.getId(), TaskAttemptState.FAILED));
    drainableAppCallback.drain();
    verify(taskSchedulerEventHandler, times(0)).taskAllocated(eq(ta14), any(Object.class), eq(container1));
    verify(taskScheduler).deallocateTask(eq(ta13), eq(false));
    verify(rmClient).releaseAssignedContainer(eq(container1.getId()));
    eventHandler.verifyInvocation(AMContainerEventStopRequest.class);
    eventHandler.reset();

    Container container2 = createContainer(2, "host2", resource1, priority1);

    // Second container allocated. Should be allocated to the last task.
    drainNotifier.set(false);
    taskScheduler.onContainersAllocated(Collections.singletonList(container2));
    TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier);
    drainableAppCallback.drain();
    verify(taskSchedulerEventHandler).taskAllocated(eq(ta14), any(Object.class), eq(container2));

    // Task assigned to container completed successfully. No pending requests. Container should be released.
    taskSchedulerEventHandler
            .handleEvent(new AMSchedulerEventTAEnded(ta14, container2.getId(), TaskAttemptState.SUCCEEDED));
    drainableAppCallback.drain();
    verify(taskScheduler).deallocateTask(eq(ta14), eq(true));
    verify(rmClient).releaseAssignedContainer(eq(container2.getId()));
    eventHandler.verifyInvocation(AMContainerEventStopRequest.class);
    eventHandler.reset();

    taskScheduler.close();
    taskSchedulerEventHandler.close();
}

From source file:org.apache.camel.processor.MulticastProcessor.java

protected void doProcessParallel(final Exchange original, final AtomicExchange result,
        final Iterable<ProcessorExchangePair> pairs, final boolean streaming, final AsyncCallback callback)
        throws Exception {

    ObjectHelper.notNull(executorService, "ExecutorService", this);
    ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this);

    final CompletionService<Exchange> completion;
    if (streaming) {
        // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence)
        completion = new ExecutorCompletionService<Exchange>(executorService);
    } else {//from  w ww.jav  a2s. co m
        // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence)
        completion = new SubmitOrderedCompletionService<Exchange>(executorService);
    }

    // when parallel then aggregate on the fly
    final AtomicBoolean running = new AtomicBoolean(true);
    final AtomicInteger total = new AtomicInteger(0);
    final AtomicBoolean allTasksSubmitted = new AtomicBoolean();
    final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1);
    final AtomicException executionException = new AtomicException();

    final Iterator<ProcessorExchangePair> it = pairs.iterator();

    if (it.hasNext()) {
        // issue task to execute in separate thread so it can aggregate on-the-fly
        // while we submit new tasks, and those tasks complete concurrently
        // this allows us to optimize work and reduce memory consumption
        AggregateOnTheFlyTask task = new AggregateOnTheFlyTask(result, original, total, completion, running,
                aggregationOnTheFlyDone, allTasksSubmitted, executionException);

        // and start the aggregation task so we can aggregate on-the-fly
        aggregateExecutorService.submit(task);
    }

    LOG.trace("Starting to submit parallel tasks");

    while (it.hasNext()) {
        final ProcessorExchangePair pair = it.next();
        final Exchange subExchange = pair.getExchange();
        updateNewExchange(subExchange, total.intValue(), pairs, it);

        completion.submit(new Callable<Exchange>() {
            public Exchange call() throws Exception {
                if (!running.get()) {
                    // do not start processing the task if we are not running
                    return subExchange;
                }

                try {
                    doProcessParallel(pair);
                } catch (Throwable e) {
                    subExchange.setException(e);
                }

                // Decide whether to continue with the multicast or not; similar logic to the Pipeline
                Integer number = getExchangeIndex(subExchange);
                boolean continueProcessing = PipelineHelper.continueProcessing(subExchange,
                        "Parallel processing failed for number " + number, LOG);
                if (stopOnException && !continueProcessing) {
                    // signal to stop running
                    running.set(false);
                    // throw caused exception
                    if (subExchange.getException() != null) {
                        // wrap in exception to explain where it failed
                        throw new CamelExchangeException("Parallel processing failed for number " + number,
                                subExchange, subExchange.getException());
                    }
                }

                if (LOG.isTraceEnabled()) {
                    LOG.trace("Parallel processing complete for exchange: " + subExchange);
                }
                return subExchange;
            }
        });

        total.incrementAndGet();
    }

    // signal all tasks has been submitted
    if (LOG.isTraceEnabled()) {
        LOG.trace("Signaling that all " + total.get() + " tasks has been submitted.");
    }
    allTasksSubmitted.set(true);

    // its to hard to do parallel async routing so we let the caller thread be synchronously
    // and have it pickup the replies and do the aggregation (eg we use a latch to wait)
    // wait for aggregation to be done
    if (LOG.isDebugEnabled()) {
        LOG.debug("Waiting for on-the-fly aggregation to complete aggregating " + total.get() + " responses.");
    }
    aggregationOnTheFlyDone.await();

    // did we fail for whatever reason, if so throw that caused exception
    if (executionException.get() != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Parallel processing failed due " + executionException.get().getMessage());
        }
        throw executionException.get();
    }

    // no everything is okay so we are done
    if (LOG.isDebugEnabled()) {
        LOG.debug("Done parallel processing " + total + " exchanges");
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLogSplit.java

/**
 * Simulates splitting a WAL out from under a regionserver that is still trying to write it.  Ensures we do not
 * lose edits.//w w w  .ja va2s. c om
 * @throws IOException
 * @throws InterruptedException
 */
@Test(timeout = 300000)
public void testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException {
    final AtomicLong counter = new AtomicLong(0);
    AtomicBoolean stop = new AtomicBoolean(false);
    // Region we'll write edits too and then later examine to make sure they all made it in.
    final String region = REGIONS.get(0);
    Thread zombie = new ZombieLastLogWriterRegionServer(this.conf, counter, stop, region);
    try {
        long startCount = counter.get();
        zombie.start();
        // Wait till writer starts going.
        while (startCount == counter.get())
            Threads.sleep(1);
        // Give it a second to write a few appends.
        Threads.sleep(1000);
        final Configuration conf2 = HBaseConfiguration.create(this.conf);
        final User robber = User.createUserForTesting(conf2, ROBBER, GROUP);
        int count = robber.runAs(new PrivilegedExceptionAction<Integer>() {
            @Override
            public Integer run() throws Exception {
                FileSystem fs = FileSystem.get(conf2);
                int expectedFiles = fs.listStatus(HLOGDIR).length;
                HLogSplitter.split(HBASEDIR, HLOGDIR, OLDLOGDIR, fs, conf2);
                Path[] logfiles = getLogForRegion(HBASEDIR, TABLE_NAME, region);
                assertEquals(expectedFiles, logfiles.length);
                int count = 0;
                for (Path logfile : logfiles) {
                    count += countHLog(logfile, fs, conf2);
                }
                return count;
            }
        });
        LOG.info("zombie=" + counter.get() + ", robber=" + count);
        assertTrue(
                "The log file could have at most 1 extra log entry, but can't have less. Zombie could write "
                        + counter.get() + " and logfile had only " + count,
                counter.get() == count || counter.get() + 1 == count);
    } finally {
        stop.set(true);
        zombie.interrupt();
        Threads.threadDumpingIsAlive(zombie);
    }
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.localworkspace.LocalDataAccessLayer.java

private static GetOperation[] sendToServer(final AtomicReference<Failure[]> failures,
        final AtomicBoolean onlineOperationRequired) {
    failures.set(new Failure[0]);
    onlineOperationRequired.set(true);
    return null;//from w  ww.ja va2s  . c om
}

From source file:com.dragoniade.deviantart.favorites.FavoritesDownloader.java

private File getFile(Deviation da, String url, String filename, AtomicBoolean download,
        YesNoAllDialog matureMoveDialog, YesNoAllDialog overwriteDialog, YesNoAllDialog overwriteNewerDialog,
        YesNoAllDialog deleteEmptyDialog) {

    progress.setText("Downloading file '" + filename + "' from " + da.getArtist());

    String title = filename + " by " + da.getArtist();
    long timestamp = da.getTimestamp().getTime();
    File artPG = LocationHelper.getFile(destination, userId, da, filename);
    File artMature = LocationHelper.getFile(destinationMature, userId, da, filename);
    File art = null;//w  w  w  .  j a  v a 2 s  . co m

    if (da.isMature()) {
        if (artPG.exists()) {
            int resMove = matureMoveDialog.displayDialog(owner, title,
                    "This deviation labeled as mature already exists in the main download path.\n Do you want to move the current file to the mature path?");
            if (resMove == YesNoAllDialog.CANCEL) {
                return null;
            }
            if (resMove == YesNoAllDialog.YES) {
                File parent = artMature.getParentFile();
                if (!parent.mkdirs()) {
                    showMessageDialog(owner, "Unable to create '" + parent.getPath() + "'.", "Error",
                            JOptionPane.ERROR_MESSAGE);
                    return null;
                }

                if (artMature.exists()) {
                    int resOv = overwriteDialog.displayDialog(owner, "File already exists",
                            "The file '" + artMature.getPath() + "' already exists. Overwrite?");
                    if (resOv == YesNoAllDialog.YES) {
                        if (!artMature.delete()) {
                            showMessageDialog(owner, "Unable to delete '" + artMature.getPath() + "'.", "Error",
                                    JOptionPane.ERROR_MESSAGE);
                            return null;
                        }
                    } else {
                        return null;
                    }
                }
                if (!artPG.renameTo(artMature)) {
                    showMessageDialog(owner,
                            "Unable to move '" + artPG.getPath() + "' to '" + artMature.getPath() + "'.",
                            "Error", JOptionPane.ERROR_MESSAGE);
                    return null;
                }

                int resEmpty = deleteEmptyDialog.displayDialog(owner, "Delete", "Delete empty folders?");
                if (resEmpty == YesNoAllDialog.YES) {
                    deleteEmptyFolders(artPG);
                }

                if (resEmpty == YesNoAllDialog.CANCEL) {
                    return null;
                }

                download.set(false);
            }

            if (resMove == YesNoAllDialog.NO) {
                download.set(false);
            }
        }
        art = artMature;
    } else {
        art = artPG;
    }

    if (art.exists()) {
        if (timestamp > art.lastModified()) {
            int resOver = overwriteNewerDialog.displayDialog(owner, title,
                    "This deviation already exist but a newer version is available. Replace?");
            if (resOver == YesNoAllDialog.CANCEL) {
                return null;
            }

            if (resOver == YesNoAllDialog.NO) {
                download.set(false);
            } else {
                download.set(false);
            }
        } else {
            download.set(false);
        }
    }

    return art;
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.soapextensions.PendingChange.java

/**
 * Acquires base content if the pending change is in local workspace. If
 * file is in the baseline folder, copies it from there. If not, downloads
 * it from the server.//from  w ww . j a  va  2s. c om
 *
 * @param client
 *        the {@link VersionControlClient} to use (must not be
 *        <code>null</code>)
 * @param localFileName
 *        the local file name to copy the baseline to (must not be
 *        <code>null</code>)
 * @return true if this pending change is in local workspace, false if this
 *         is server workspace.
 * @throws VersionControlException
 *         if this is local workspace but we failed to acquire content (e.g.
 *         baseline is deleted and connection to the server failed).
 */
private boolean copyLocalBaseline(final VersionControlClient client, final String localFileName) {
    Check.notNull(client, "client"); //$NON-NLS-1$
    Check.notNull(localFileName, "localFileName"); //$NON-NLS-1$

    final AtomicBoolean handled = new AtomicBoolean(false);

    Check.isTrue(
            pendingSetName != null && pendingSetName.length() > 0 && pendingSetOwner != null
                    && pendingSetOwner.length() > 0,
            MessageFormat.format("PendingSetName or PendingSetOwner were not populated for pending change {0}", //$NON-NLS-1$
                    toString()));

    if (inShelveset || pendingSetName == null || pendingSetName.length() == 0 || pendingSetOwner == null
            || pendingSetOwner.length() == 0) {
        return handled.get();
    }

    final Workspace workspace = client.getRuntimeWorkspaceCache().tryGetWorkspace(pendingSetName,
            pendingSetOwner);
    if (workspace != null && workspace.getLocation() == WorkspaceLocation.LOCAL) {
        if (isAdd()) {
            throw new VersionControlException(
                    MessageFormat.format(Messages.getString("PendingChange.NoBaseFileForPendingChangeFormat"), //$NON-NLS-1$
                            getLocalOrServerItem()));
        }

        final LocalWorkspaceTransaction transaction = new LocalWorkspaceTransaction(workspace);
        try {
            transaction.execute(new WorkspacePropertiesLocalVersionTransaction() {
                @Override
                public void invoke(final LocalWorkspaceProperties wp, final WorkspaceVersionTable lv) {
                    final WorkspaceLocalItem lvEntry = lv.getByPendingChange(PendingChange.this);

                    if (null != lvEntry && null != lvEntry.getBaselineFileGUID()) {
                        try {
                            final boolean symlink = PropertyConstants.IS_SYMLINK
                                    .equals(PropertyUtils.selectMatching(lvEntry.getPropertyValues(),
                                            PropertyConstants.SYMBOLIC_KEY));

                            wp.copyBaselineToTarget(lvEntry.getBaselineFileGUID(), localFileName,
                                    lvEntry.getLength(), lvEntry.getHashValue(), symlink);
                            handled.set(true);
                        } catch (final Exception e) {
                            /* Could not copy the local baseline */
                        }
                    }
                }
            });
        } finally {
            try {
                transaction.close();
            } catch (final IOException e) {
                throw new VersionControlException(e);
            }
        }

        if (!handled.get()) {
            // we don't have baseline which we should have, let's hit the
            // server to get it

            // TODO we can compress and store it as a baseline
            final String serverItem = (getSourceServerItem() == null || getSourceServerItem().length() == 0)
                    ? getServerItem()
                    : getSourceServerItem();
            final int versionToDownload = isBranch() ? getSourceVersionFrom() : getVersion();

            // for pending branch we use SourceServerItem (source of
            // branch), but SourceVersionFrom (version of the source)
            // instead of Version since it's not committed

            final Item item = client.getItem(serverItem, new ChangesetVersionSpec(versionToDownload),
                    getDeletionID(), true);

            client.downloadFile(new DownloadSpec(item.getDownloadURL()), new File(localFileName), true);

            handled.set(true);
        }
    }

    return handled.get();
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.java

@Override
public void delete(Iterable<Position> positions) throws InterruptedException, ManagedLedgerException {
    checkNotNull(positions);//from   w w w.  ja v  a  2 s.c  o  m

    class Result {
        ManagedLedgerException exception = null;
    }

    final Result result = new Result();
    final CountDownLatch counter = new CountDownLatch(1);
    final AtomicBoolean timeout = new AtomicBoolean(false);

    asyncDelete(positions, new AsyncCallbacks.DeleteCallback() {
        @Override
        public void deleteComplete(Object ctx) {
            if (timeout.get()) {
                log.warn("[{}] [{}] Delete operation timeout. Callback deleteComplete at position {}",
                        ledger.getName(), name, positions);
            }

            counter.countDown();
        }

        @Override
        public void deleteFailed(ManagedLedgerException exception, Object ctx) {
            result.exception = exception;

            if (timeout.get()) {
                log.warn("[{}] [{}] Delete operation timeout. Callback deleteFailed at position {}",
                        ledger.getName(), name, positions);
            }

            counter.countDown();
        }
    }, null);

    if (!counter.await(ManagedLedgerImpl.AsyncOperationTimeoutSeconds, TimeUnit.SECONDS)) {
        timeout.set(true);
        log.warn("[{}] [{}] Delete operation timeout. No callback was triggered at position {}",
                ledger.getName(), name, positions);
        throw new ManagedLedgerException("Timeout during delete operation");
    }

    if (result.exception != null) {
        throw result.exception;
    }
}

From source file:com.datamelt.nifi.processors.ExecuteRuleEngine.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    // map used to store the attribute name and its value from the content of the flow file
    final Map<String, String> propertyMap = new HashMap<>();

    // get a logger instance
    final ComponentLog logger = getLogger();

    // a header from the content if present
    final AtomicReference<HeaderRow> header = new AtomicReference<>();

    AtomicBoolean error = new AtomicBoolean();

    // get the flow file
    FlowFile flowFile = session.get();/*w  w  w  .j a  v a2  s  .c  om*/
    if (flowFile == null) {
        return;
    }

    // list of rows from splitting the original flow file content
    ArrayList<RuleEngineRow> flowFileRows = new ArrayList<RuleEngineRow>();

    // list of rows containing the detailed results of the ruleengine
    ArrayList<RuleEngineRow> flowFileDetails = new ArrayList<RuleEngineRow>();

    boolean headerPresent = context.getProperty(ATTRIBUTE_HEADER_PRESENT).getValue().equals("true");

    // put the name of the ruleengine zip file in the list of properties
    propertyMap.put(PROPERTY_RULEENGINE_ZIPFILE_NAME,
            context.getProperty(ATTRIBUTE_RULEENGINE_ZIPFILE).getValue());

    final int batchSize = Integer.parseInt(context.getProperty(BATCH_SIZE_NAME).getValue());

    // read flow file into input stream
    session.read(flowFile, new InputStreamCallback() {
        public void process(InputStream in) throws IOException {
            try {
                // iterator over the lines from the input stream
                LineIterator iterator = IOUtils.lineIterator(in, "utf-8");

                // check if configuration indicates that a header row is present in the flow file content
                if (headerPresent) {
                    logger.debug("configuration indicates a header row is present in flow file content");

                    // if there is at least one row of data and the header is not defined yet
                    if (iterator.hasNext() && header.get() == null) {
                        // set the header from the content
                        header.set(new HeaderRow(iterator.nextLine(), separator));
                    }
                }
                // if no header row is present in the flow file content
                else {
                    logger.debug("configuration indicates no header row is present in flow file content");

                    // use the header from the field names
                    header.set(headerFromFieldNames);
                }

                // loop over all rows of data
                while (iterator.hasNext()) {
                    // we handle the error per row of data
                    error.set(false);

                    // get a row to process
                    String row = iterator.nextLine();

                    // check that we have data
                    if (row != null && !row.trim().equals("")) {
                        RowFieldCollection rowFieldCollection = null;
                        try {
                            rowFieldCollection = getRowFieldCollection(row, header.get());

                            logger.debug("RowFieldCollection header contains: "
                                    + rowFieldCollection.getHeader().getNumberOfFields() + " fields");
                            logger.debug("RowFieldCollection contains: "
                                    + rowFieldCollection.getNumberOfFields() + " fields");

                            // run the ruleengine with the given data from the flow file
                            logger.debug("running business ruleengine...");

                            // run the business logic/rules against the data
                            ruleEngine.run("flowfile", rowFieldCollection);

                            // add some debugging output that might be useful
                            logger.debug("number of rulegroups: " + ruleEngine.getNumberOfGroups());
                            logger.debug(
                                    "number of rulegroups passed: " + ruleEngine.getNumberOfGroupsPassed());
                            logger.debug(
                                    "number of rulegroups failed: " + ruleEngine.getNumberOfGroupsFailed());
                            logger.debug(
                                    "number of rulegroups skipped: " + ruleEngine.getNumberOfGroupsSkipped());
                            logger.debug("number of rules: " + ruleEngine.getNumberOfRules());
                            logger.debug("number of rules passed: " + ruleEngine.getNumberOfRulesPassed());
                            logger.debug("number of rules failed: " + ruleEngine.getNumberOfRulesFailed());
                            logger.debug("number of actions: " + ruleEngine.getNumberOfActions());

                            // add some properties of the ruleengine execution to the map
                            addRuleEngineProperties(propertyMap);
                        } catch (Exception ex) {
                            error.set(true);
                            logger.error(ex.getMessage(), ex);
                        }

                        // if no error occurred we create a save the data for the creation of the flow files
                        if (!error.get()) {
                            // process only if the collection of fields was changed by
                            // a ruleengine action. this means the data was updated so
                            // we will have to re-write/re-create the flow file content.
                            if (rowFieldCollection.isCollectionUpdated()) {
                                // put an indicator that the data was modified by the ruleengine
                                propertyMap.put(PROPERTY_RULEENGINE_CONTENT_MODIFIED, "true");

                                logger.debug(
                                        "data was modified - updating flow file content with ruleengine results");

                                // the RuleEngineRow instance will contain the row of data and the map of properties
                                // and will later be used when the flow files are created
                                flowFileRows
                                        .add(new RuleEngineRow(getResultRow(rowFieldCollection), propertyMap));
                            } else {
                                // put an indicator that the data was NOT modified by the ruleengine
                                propertyMap.put(PROPERTY_RULEENGINE_CONTENT_MODIFIED, "false");

                                logger.debug("data was not modified - using original content");

                                // the RuleEngineRow instance will contain the row of data and the map of properties
                                // and will later be used when the flow files are created
                                flowFileRows.add(new RuleEngineRow(row, propertyMap));
                            }

                            if (flowFileRows.size() >= batchSize) {
                                // generate flow files from the individual rows
                                List<FlowFile> splitFlowFiles = generateFlowFileSplits(context, session,
                                        flowFileRows, header.get(), headerPresent);
                                // transfer all individual rows to success relationship
                                if (splitFlowFiles.size() > 0) {
                                    session.transfer(splitFlowFiles, SUCCESS);
                                }
                            }

                            // if the user configured detailed results 
                            if (context.getProperty(ATTRIBUTE_OUTPUT_DETAILED_RESULTS).getValue()
                                    .equals("true")) {
                                // get the configured output type
                                String outputType = context.getProperty(ATTRIBUTE_OUTPUT_DETAILED_RESULTS_TYPE)
                                        .getValue();
                                logger.debug("configuration set to output detailed results with type ["
                                        + outputType + "]");

                                // we need to create a flow file only, if the ruleengine results are according to the output type settings
                                if (outputType.equals(OUTPUT_TYPE_ALL_GROUPS_ALL_RULES)
                                        || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_ALL_RULES)
                                                && ruleEngine.getNumberOfGroupsFailed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_FAILED_RULES)
                                                && ruleEngine.getNumberOfGroupsFailed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_PASSED_RULES)
                                                && ruleEngine.getNumberOfGroupsFailed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_ALL_RULES)
                                                && ruleEngine.getNumberOfGroupsPassed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_FAILED_RULES)
                                                && ruleEngine.getNumberOfGroupsPassed() > 0
                                                || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_PASSED_RULES)
                                                        && ruleEngine.getNumberOfGroupsPassed() > 0))) {
                                    // create the content for the flow file
                                    String content = getFlowFileRuleEngineDetailsContent(header.get(),
                                            headerPresent, outputType, row);

                                    // add results to the list
                                    flowFileDetails.add(new RuleEngineRow(content, propertyMap));

                                    if (flowFileDetails.size() >= batchSize) {
                                        List<FlowFile> detailsFlowFiles = generateFlowFilesRuleEngineDetails(
                                                context, session, flowFileDetails, header.get(), headerPresent);
                                        // transfer all individual rows to detailed relationship
                                        if (detailsFlowFiles.size() > 0) {
                                            session.transfer(detailsFlowFiles, DETAILED_RESULTS);
                                        }
                                    }
                                }
                            }
                            // clear the collections of ruleengine results
                            ruleEngine.getRuleExecutionCollection().clear();
                        }
                        // if we have an error we create a flow file from the current row of data and send it to the failure relationsship
                        else {
                            FlowFile failureFlowFile = generateFailureFlowFile(context, session, row,
                                    header.get(), headerPresent);
                            session.transfer(failureFlowFile, FAILURE);
                        }
                    }
                }

                LineIterator.closeQuietly(iterator);
            } catch (Exception ex) {
                ex.printStackTrace();
                logger.error("error running the business ruleengine", ex);
            }
        }
    });

    // generate flow files from the individual rows
    List<FlowFile> splitFlowFiles = generateFlowFileSplits(context, session, flowFileRows, header.get(),
            headerPresent);

    // generate flow files from the individual rows
    List<FlowFile> detailsFlowFiles = generateFlowFilesRuleEngineDetails(context, session, flowFileDetails,
            header.get(), headerPresent);

    // transfer the original flow file
    session.transfer(flowFile, ORIGINAL);

    // transfer all individual rows to success relationship
    if (splitFlowFiles.size() > 0) {
        session.transfer(splitFlowFiles, SUCCESS);
    }

    // transfer all individual rows to success relationship
    if (detailsFlowFiles.size() > 0) {
        session.transfer(detailsFlowFiles, DETAILED_RESULTS);
    }
}

From source file:com.parse.ParseObject.java

/**
 * This saves all of the objects and files reachable from the given object. It does its work in
 * multiple waves, saving as many as possible in each wave. If there's ever an error, it just
 * gives up, sets error, and returns NO.
 *//*from w  ww  . j a v  a  2  s .c o  m*/
private static Task<Void> deepSaveAsync(final Object object, final String sessionToken) {
    Set<ParseObject> objects = new HashSet<>();
    Set<ParseFile> files = new HashSet<>();
    collectDirtyChildren(object, objects, files);

    // This has to happen separately from everything else because ParseUser.save() is
    // special-cased to work for lazy users, but new users can't be created by
    // ParseMultiCommand's regular save.
    Set<ParseUser> users = new HashSet<>();
    for (ParseObject o : objects) {
        if (o instanceof ParseUser) {
            ParseUser user = (ParseUser) o;
            if (user.isLazy()) {
                users.add((ParseUser) o);
            }
        }
    }
    objects.removeAll(users);

    // objects will need to wait for files to be complete since they may be nested children.
    final AtomicBoolean filesComplete = new AtomicBoolean(false);
    List<Task<Void>> tasks = new ArrayList<>();
    for (ParseFile file : files) {
        tasks.add(file.saveAsync(sessionToken, null, null));
    }
    Task<Void> filesTask = Task.whenAll(tasks).continueWith(new Continuation<Void, Void>() {
        @Override
        public Void then(Task<Void> task) throws Exception {
            filesComplete.set(true);
            return null;
        }
    });

    // objects will need to wait for users to be complete since they may be nested children.
    final AtomicBoolean usersComplete = new AtomicBoolean(false);
    tasks = new ArrayList<>();
    for (final ParseUser user : users) {
        tasks.add(user.saveAsync(sessionToken));
    }
    Task<Void> usersTask = Task.whenAll(tasks).continueWith(new Continuation<Void, Void>() {
        @Override
        public Void then(Task<Void> task) throws Exception {
            usersComplete.set(true);
            return null;
        }
    });

    final Capture<Set<ParseObject>> remaining = new Capture<>(objects);
    Task<Void> objectsTask = Task.forResult(null).continueWhile(new Callable<Boolean>() {
        @Override
        public Boolean call() throws Exception {
            return remaining.get().size() > 0;
        }
    }, new Continuation<Void, Task<Void>>() {
        @Override
        public Task<Void> then(Task<Void> task) throws Exception {
            // Partition the objects into two sets: those that can be save immediately,
            // and those that rely on other objects to be created first.
            final List<ParseObject> current = new ArrayList<>();
            final Set<ParseObject> nextBatch = new HashSet<>();
            for (ParseObject obj : remaining.get()) {
                if (obj.canBeSerialized()) {
                    current.add(obj);
                } else {
                    nextBatch.add(obj);
                }
            }
            remaining.set(nextBatch);

            if (current.size() == 0 && filesComplete.get() && usersComplete.get()) {
                // We do cycle-detection when building the list of objects passed to this function, so
                // this should never get called. But we should check for it anyway, so that we get an
                // exception instead of an infinite loop.
                throw new RuntimeException("Unable to save a ParseObject with a relation to a cycle.");
            }

            // Package all save commands together
            if (current.size() == 0) {
                return Task.forResult(null);
            }

            return enqueueForAll(current, new Continuation<Void, Task<Void>>() {
                @Override
                public Task<Void> then(Task<Void> toAwait) throws Exception {
                    return saveAllAsync(current, sessionToken, toAwait);
                }
            });
        }
    });

    return Task.whenAll(Arrays.asList(filesTask, usersTask, objectsTask));
}