Example usage for java.util.concurrent.atomic AtomicBoolean get

List of usage examples for java.util.concurrent.atomic AtomicBoolean get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean get.

Prototype

public final boolean get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:com.turbospaces.model.BO.java

/**
 * create business object over actual basic persistent entity
 * //  w  w  w . j  ava  2 s .  c o  m
 * @param delegate
 *            the actual persistent entity meta-data provider
 * @throws NoSuchMethodException
 *             re-throw cglib exception
 * @throws SecurityException
 *             re-throw cglib exception
 * @throws IntrospectionException
 *             re-throw exceptions
 */
public BO(final BasicPersistentEntity delegate)
        throws SecurityException, NoSuchMethodException, IntrospectionException {
    this.delegate = delegate;
    this.fastConstructor = FastClass.create(delegate.getType())
            .getConstructor(delegate.getType().getConstructor());

    // find optimistic lock version/routing fields
    {
        final Collection<PersistentProperty> versionCandidates = Lists.newLinkedList();
        final Collection<PersistentProperty> routingCandidates = Lists.newLinkedList();
        delegate.doWithProperties(new PropertyHandler() {
            @Override
            public void doWithPersistentProperty(final PersistentProperty persistentProperty) {
                PropertyDescriptor propertyDescriptor = persistentProperty.getPropertyDescriptor();
                Field field = persistentProperty.getField();

                if (hasAnnotation(propertyDescriptor, field, Version.class))
                    versionCandidates.add(persistentProperty);
                if (hasAnnotation(propertyDescriptor, field, Routing.class))
                    routingCandidates.add(persistentProperty);
            }

            private boolean hasAnnotation(final PropertyDescriptor descriptor, final Field field,
                    final Class annotation) {
                if (descriptor != null && descriptor.getReadMethod() != null
                        && descriptor.getReadMethod().getAnnotation(annotation) != null)
                    return true;
                if (field != null && field.getAnnotation(annotation) != null)
                    return true;
                return false;
            }
        });
        Preconditions.checkArgument(versionCandidates.size() <= 1,
                "too many fields marked with @Version annotation, candidates = "
                        + versionCandidates.toString());
        Preconditions.checkArgument(routingCandidates.size() <= 1,
                "too many fields marked with @Routing annotation, candidates = "
                        + routingCandidates.toString());

        if (!versionCandidates.isEmpty())
            optimisticLockVersionProperty = versionCandidates.iterator().next();
        if (!routingCandidates.isEmpty())
            routingProperty = routingCandidates.iterator().next();
    }

    {
        // Java Beans convention marker
        AtomicBoolean propertyAccess = new AtomicBoolean(true);

        List<String> setters = Lists.newLinkedList();
        List<String> getters = Lists.newLinkedList();
        List<Class<?>> types = Lists.newLinkedList();

        for (PersistentProperty<?> persistentProperty : getOrderedProperties()) {
            PropertyDescriptor propertyDescriptor = persistentProperty.getPropertyDescriptor();
            if (propertyDescriptor != null) {
                if (propertyDescriptor.getReadMethod() != null && propertyDescriptor.getWriteMethod() != null) {
                    setters.add(propertyDescriptor.getWriteMethod().getName());
                    getters.add(propertyDescriptor.getReadMethod().getName());
                    types.add(persistentProperty.getType());
                }
            } else {
                propertyAccess.set(false);
                brokenProperties.add(persistentProperty);
            }
        }

        if (propertyAccess.get())
            // create properties extract for all persistent properties
            bulkBean = BulkBean.create(delegate.getType(), getters.toArray(new String[getters.size()]),
                    setters.toArray(new String[setters.size()]), types.toArray(new Class[types.size()]));
        else
            Log.warn(String.format(
                    "PropetiesSerializer-%s unable to use getters-setters access optimization. Suspected/Corrupted properties = %s",
                    delegate.getType().getSimpleName(), getBrokenProperties()));

        boolean canOptimizeIdProperty = hasReadWriteMethods(delegate.getIdProperty());
        boolean canOptimizeVersionProperty = hasReadWriteMethods(getOptimisticLockVersionProperty());
        boolean canOptimizeRoutingProperty = hasReadWriteMethods(getRoutingProperty());

        // create id/version/routing bulk fields extractor
        if (canOptimizeIdProperty && canOptimizeVersionProperty && canOptimizeRoutingProperty) {
            String[] g = new String[] {
                    delegate.getIdProperty().getPropertyDescriptor().getReadMethod().getName(),
                    getOptimisticLockVersionProperty().getPropertyDescriptor().getReadMethod().getName(),
                    getRoutingProperty().getPropertyDescriptor().getReadMethod().getName() };
            String[] s = new String[] {
                    delegate.getIdProperty().getPropertyDescriptor().getWriteMethod().getName(),
                    getOptimisticLockVersionProperty().getPropertyDescriptor().getWriteMethod().getName(),
                    getRoutingProperty().getPropertyDescriptor().getWriteMethod().getName() };
            Class<?>[] c = new Class[] { delegate.getIdProperty().getType(),
                    getOptimisticLockVersionProperty().getType(), getRoutingProperty().getType() };

            idVersionRoutingBulkBean = BulkBean.create(delegate.getType(), g, s, c);
        }
    }
}

From source file:org.apache.hadoop.hbase.master.cleaner.TestLogsCleaner.java

@Test
public void testZooKeeperAbortDuringGetListOfReplicators() throws Exception {
    ReplicationLogCleaner cleaner = new ReplicationLogCleaner();

    List<FileStatus> dummyFiles = Arrays.asList(
            new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log1")),
            new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log2")));

    FaultyZooKeeperWatcher faultyZK = new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null);
    final AtomicBoolean getListOfReplicatorsFailed = new AtomicBoolean(false);

    try {//from ww  w . j  a va 2  s .c o  m
        faultyZK.init();
        ReplicationQueueStorage queueStorage = spy(
                ReplicationStorageFactory.getReplicationQueueStorage(faultyZK, conf));
        doAnswer(new Answer<Object>() {
            @Override
            public Object answer(InvocationOnMock invocation) throws Throwable {
                try {
                    return invocation.callRealMethod();
                } catch (ReplicationException e) {
                    LOG.debug("Caught Exception", e);
                    getListOfReplicatorsFailed.set(true);
                    throw e;
                }
            }
        }).when(queueStorage).getAllWALs();

        cleaner.setConf(conf, faultyZK, queueStorage);
        // should keep all files due to a ConnectionLossException getting the queues znodes
        cleaner.preClean();
        Iterable<FileStatus> toDelete = cleaner.getDeletableFiles(dummyFiles);

        assertTrue(getListOfReplicatorsFailed.get());
        assertFalse(toDelete.iterator().hasNext());
        assertFalse(cleaner.isStopped());
    } finally {
        faultyZK.close();
    }
}

From source file:com.drextended.actionhandler.action.CompositeAction.java

/**
 * Prepares popup menu to show given menu items
 *
 * @param context    The Context, which generally get from view by {@link View#getContext()}
 * @param view       The View, which can be used for prepare any visual effect (like animation),
 *                   Generally it is that view which was clicked and initiated action to fire.
 * @param actionType The action type//from  www.  j av a 2  s .  com
 * @param model      The model which should be handled by the action.
 * @param menuItems  list of items which will be shown in a menu
 * @return popup menu to show given menu items
 */
protected PopupMenu buildPopupMenu(final Context context, final View view, final String actionType,
        final M model, final List<ActionItem> menuItems) {
    final PopupMenu popupMenu = new PopupMenu(context, view);
    final Menu menu = popupMenu.getMenu();
    int count = menuItems.size();
    for (int index = 0; index < count; index++) {
        final ActionItem item = menuItems.get(index);
        //noinspection unchecked
        menu.add(0, index, 0, item.titleProvider.getTitle(context, model));
        if (mShowNonAcceptedActions) {
            menu.getItem(index).setEnabled(item.action.isModelAccepted(model));
        }
    }
    final AtomicBoolean activated = new AtomicBoolean(false);
    popupMenu.setOnMenuItemClickListener(new PopupMenu.OnMenuItemClickListener() {
        @Override
        public boolean onMenuItemClick(MenuItem item) {
            activated.set(true);
            final ActionItem actionItem = menuItems.get(item.getItemId());
            if (item.isEnabled()) {
                fireActionItem(context, view, actionItem.actionType, model, actionItem);
            } else {
                notifyOnActionDismiss("The model is not accepted for selected action", view, actionType, model);
            }
            return true;
        }
    });
    popupMenu.setOnDismissListener(new PopupMenu.OnDismissListener() {
        @Override
        public void onDismiss(PopupMenu menu) {
            if (!activated.get()) {
                notifyOnActionDismiss("CompositeAction menu dismissed", view, actionType, model);
            }
        }
    });
    return popupMenu;
}

From source file:com.streamsets.pipeline.stage.bigquery.destination.BigQueryTarget.java

@Override
public void write(Batch batch) throws StageException {
    Map<TableId, List<Record>> tableIdToRecords = new LinkedHashMap<>();
    Map<Long, Record> requestIndexToRecords = new LinkedHashMap<>();

    if (batch.getRecords().hasNext()) {
        ELVars elVars = getContext().createELVars();
        batch.getRecords().forEachRemaining(record -> {
            RecordEL.setRecordInContext(elVars, record);
            try {
                String datasetName = dataSetEval.eval(elVars, conf.datasetEL, String.class);
                String tableName = tableNameELEval.eval(elVars, conf.tableNameEL, String.class);
                TableId tableId = TableId.of(datasetName, tableName);
                if (tableIdExistsCache.get(tableId)) {
                    List<Record> tableIdRecords = tableIdToRecords.computeIfAbsent(tableId,
                            t -> new ArrayList<>());
                    tableIdRecords.add(record);
                } else {
                    getContext().toError(record, Errors.BIGQUERY_17, datasetName, tableName,
                            conf.credentials.projectId);
                }/* w  w  w.  j av a 2s . c  o m*/
            } catch (ELEvalException e) {
                LOG.error("Error evaluating DataSet/TableName EL", e);
                getContext().toError(record, Errors.BIGQUERY_10, e);
            } catch (ExecutionException e) {
                LOG.error("Error when checking exists for tableId, Reason : {}", e);
                Throwable rootCause = Throwables.getRootCause(e);
                getContext().toError(record, Errors.BIGQUERY_13, rootCause);
            }
        });

        tableIdToRecords.forEach((tableId, records) -> {
            final AtomicLong index = new AtomicLong(0);
            final AtomicBoolean areThereRecordsToWrite = new AtomicBoolean(false);
            InsertAllRequest.Builder insertAllRequestBuilder = InsertAllRequest.newBuilder(tableId);
            records.forEach(record -> {
                try {
                    String insertId = getInsertIdForRecord(elVars, record);
                    Map<String, ?> rowContent = convertToRowObjectFromRecord(record);
                    if (rowContent.isEmpty()) {
                        throw new OnRecordErrorException(record, Errors.BIGQUERY_14);
                    }
                    insertAllRequestBuilder.addRow(insertId, rowContent);
                    areThereRecordsToWrite.set(true);
                    requestIndexToRecords.put(index.getAndIncrement(), record);
                } catch (OnRecordErrorException e) {
                    LOG.error("Error when converting record {} to row, Reason : {} ",
                            record.getHeader().getSourceId(), e.getMessage());
                    getContext().toError(record, e.getErrorCode(), e.getParams());
                }
            });

            if (areThereRecordsToWrite.get()) {
                insertAllRequestBuilder.setIgnoreUnknownValues(conf.ignoreInvalidColumn);
                insertAllRequestBuilder.setSkipInvalidRows(false);

                InsertAllRequest request = insertAllRequestBuilder.build();

                if (!request.getRows().isEmpty()) {
                    try {
                        InsertAllResponse response = bigQuery.insertAll(request);
                        if (response.hasErrors()) {
                            response.getInsertErrors().forEach((requestIdx, errors) -> {
                                Record record = requestIndexToRecords.get(requestIdx);
                                String messages = COMMA_JOINER.join(errors.stream()
                                        .map(BigQueryError::getMessage).collect(Collectors.toList()));
                                String reasons = COMMA_JOINER.join(errors.stream().map(BigQueryError::getReason)
                                        .collect(Collectors.toList()));
                                LOG.error("Error when inserting record {}, Reasons : {}, Messages : {}",
                                        record.getHeader().getSourceId(), reasons, messages);
                                getContext().toError(record, Errors.BIGQUERY_11, reasons, messages);
                            });
                        }
                    } catch (BigQueryException e) {
                        LOG.error(Errors.BIGQUERY_13.getMessage(), e);
                        //Put all records to error.
                        for (long i = 0; i < request.getRows().size(); i++) {
                            Record record = requestIndexToRecords.get(i);
                            getContext().toError(record, Errors.BIGQUERY_13, e);
                        }
                    }
                }
            }
        });
    }
}

From source file:org.apache.tinkerpop.gremlin.structure.io.IoCustomTest.java

@Test
@FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES)
@FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES)
@FeatureRequirement(featureClass = Graph.Features.EdgePropertyFeatures.class, feature = Graph.Features.EdgePropertyFeatures.FEATURE_SERIALIZABLE_VALUES)
public void shouldSupportUUID() throws Exception {
    final UUID id = UUID.randomUUID();
    final Vertex v1 = graph.addVertex(T.label, "person");
    final Vertex v2 = graph.addVertex(T.label, "person");
    final Edge e = v1.addEdge("friend", v2, "uuid", id);

    try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) {
        final GraphWriter writer = writerMaker.apply(graph);
        writer.writeEdge(os, e);/*from w w w .  j  a  v  a  2  s .c o m*/

        final AtomicBoolean called = new AtomicBoolean(false);
        final GraphReader reader = readerMaker.apply(graph);
        try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) {
            reader.readEdge(bais, edge -> {
                final Edge detachedEdge = (Edge) edge;
                assertEquals(e.id(), assertIdDirectly ? detachedEdge.id()
                        : graph.edges(detachedEdge.id().toString()).next().id());
                assertEquals(v1.id(), assertIdDirectly ? detachedEdge.outVertex().id()
                        : graph.vertices(detachedEdge.outVertex().id().toString()).next().id());
                assertEquals(v2.id(), assertIdDirectly ? detachedEdge.inVertex().id()
                        : graph.vertices(detachedEdge.inVertex().id().toString()).next().id());
                assertEquals(v1.label(), detachedEdge.outVertex().label());
                assertEquals(v2.label(), detachedEdge.inVertex().label());
                assertEquals(e.label(), detachedEdge.label());
                assertEquals(e.keys().size(), IteratorUtils.count(detachedEdge.properties()));
                assertEquals(id, detachedEdge.value("uuid"));

                called.set(true);

                return null;
            });
        }

        assertTrue(called.get());
    }
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java

@Test
public void shouldTimeoutSleepingScript() throws Exception {
    final AtomicBoolean successCalled = new AtomicBoolean(false);
    final AtomicBoolean failureCalled = new AtomicBoolean(false);

    final CountDownLatch timeOutCount = new CountDownLatch(1);

    final GremlinExecutor gremlinExecutor = GremlinExecutor.build().scriptEvaluationTimeout(250)
            .afterFailure((b, e) -> failureCalled.set(true)).afterSuccess((b) -> successCalled.set(true))
            .afterTimeout((b) -> timeOutCount.countDown()).create();
    try {//from   w w  w  .  j a  v a2  s  . co  m
        gremlinExecutor.eval("Thread.sleep(1000);10").get();
        fail("This script should have timed out with an exception");
    } catch (Exception ex) {
        assertEquals(TimeoutException.class, ex.getCause().getClass());
    }

    assertTrue(timeOutCount.await(2000, TimeUnit.MILLISECONDS));

    assertFalse(successCalled.get());
    assertFalse(failureCalled.get());
    assertEquals(0, timeOutCount.getCount());
    gremlinExecutor.close();
}

From source file:com.microsoft.tfs.core.ws.runtime.client.SOAPService.java

/**
 * Execute a SOAP request that was built via
 * {@link #createSOAPRequest(String, SOAPMethodRequestWriter)}
 *
 * @param request// w w w .j  ava  2  s .  c o m
 *        the request to execute (not null).
 * @param responseName
 *        the name of the SOAP response message for this request (not null)
 * @param responseReader
 *        the response reader that will do the work of reading the response
 *        (except the SOAP envelope). If null, no response stream reader is
 *        invoked (no response data is read except for the SOAP envelope and
 *        body elements).
 * @throws SOAPFault
 *         if a SOAP fault was returned by the server.
 * @throws UnauthorizedException
 *         if the client could not contact the server because of an
 *         authorization error (HTTP 401).
 * @throws ProxyUnauthorizedException
 *         if the client could not authenticate to the HTTP proxy
 * @throws FederatedAuthException
 *         if the client could not contact the server because it lacks the
 *         proper federated authentication (ACS) cookies and the federated
 *         authentication handler (set by
 *         {@link #setTransportAuthHandler(TransportAuthHandler)} ) did not
 *         handle the exception. The caller is expected to obtain the
 *         cookies and resubmit.
 * @throws InvalidServerResponseException
 *         if the server returned data that could not be parsed as XML or
 *         SOAP.
 * @throws EndpointNotFoundException
 *         if the server returned HTTP 404 when the request was executed.
 * @throws TransportException
 *         if some other an IO error occurred.
 * @throws TransportRequestHandlerCanceledException
 *         if the user cancelled the prompt for credentials
 */
protected void executeSOAPRequest(final SOAPRequest request, final String responseName,
        final SOAPMethodResponseReader responseReader) throws SOAPFault, UnauthorizedException,
        ProxyUnauthorizedException, FederatedAuthException, InvalidServerResponseException,
        EndpointNotFoundException, TransportException, TransportRequestHandlerCanceledException {
    /*
     * Duplicate the transport request handler map so we needn't keep a lock
     * and so that we have a consistent set throughout execution.
     */
    final List<TransportRequestHandler> requestHandlers = new ArrayList<TransportRequestHandler>();

    synchronized (transportRequestHandlers) {
        requestHandlers.addAll(transportRequestHandlers);
    }

    /*
     * Allow the transport authentication handler to process initial
     * credentials. This can happen if we're lazily authenticating and we do
     * not yet have a full set of credentials.
     */
    final AtomicBoolean cancel = new AtomicBoolean(false);

    for (final TransportRequestHandler requestHandler : requestHandlers) {
        // cancel doesn't stop us from invoking handlers
        if (requestHandler.prepareRequest(this, request, cancel) == Status.COMPLETE) {
            break;
        }
    }

    if (cancel.get()) {
        throw new TransportRequestHandlerCanceledException();
    }

    /*
     * Execute this method in a retry loop. On exceptions, we can delegate
     * to a user configured exception handler, which may modify the method
     * and allow us to resubmit.
     *
     * The typical use case for this is ACS authentication - it can expire
     * in the middle of a call and we want to prompt the user to
     * reauthenticate.
     */

    RuntimeException failure = null;
    do {
        try {
            executeSOAPRequestInternal(request, responseName, responseReader);
            break;
        } catch (final RuntimeException e) {
            // Give the handlers a chance to handle/correct/cancel this
            // exception

            boolean exceptionHandled = false;
            cancel.set(false);

            for (final TransportRequestHandler requestHandler : requestHandlers) {
                // cancel doesn't stop us from invoking handlers
                if (requestHandler.handleException(this, request, e, cancel) == Status.COMPLETE) {
                    /*
                     * This handler handled the exception - defer all others
                     * from attempting to handle it and reset the auth
                     * state.
                     */
                    request.getPostMethod().getHostAuthState().invalidate();

                    failure = null;
                    exceptionHandled = true;
                    break;
                }

                // Status was CONTINUE, continue with next handler
            }

            // Wasn't handled, prepare to throw it
            if (!exceptionHandled) {
                // The user wants to cancel, convert to a cancel
                if (cancel.get()) {
                    failure = new TransportRequestHandlerCanceledException();
                } else {
                    failure = e;
                }
                break;
            }

            // Exception handled, loop to retry
        }
    } while (true);

    if (failure != null) {
        throw failure;
    }

    for (final TransportRequestHandler requestHandler : requestHandlers) {
        requestHandler.handleSuccess(this, request);
    }
}

From source file:io.pravega.client.stream.impl.ReaderGroupStateManager.java

private Map<Segment, Long> acquireSegment(long timeLag) throws ReinitializationRequiredException {
    AtomicReference<Map<Segment, Long>> result = new AtomicReference<>();
    AtomicBoolean reinitRequired = new AtomicBoolean(false);
    sync.updateState(state -> {//w  w  w.j  a va 2 s  . c om
        if (!state.isReaderOnline(readerId)) {
            reinitRequired.set(true);
            return null;
        }
        int toAcquire = calculateNumSegmentsToAcquire(state);
        if (toAcquire == 0) {
            result.set(Collections.emptyMap());
            return null;
        }
        Map<Segment, Long> unassignedSegments = state.getUnassignedSegments();
        Map<Segment, Long> acquired = new HashMap<>(toAcquire);
        List<ReaderGroupStateUpdate> updates = new ArrayList<>(toAcquire);
        Iterator<Entry<Segment, Long>> iter = unassignedSegments.entrySet().iterator();
        for (int i = 0; i < toAcquire; i++) {
            assert iter.hasNext();
            Entry<Segment, Long> segment = iter.next();
            acquired.put(segment.getKey(), segment.getValue());
            updates.add(new AcquireSegment(readerId, segment.getKey()));
        }
        updates.add(new UpdateDistanceToTail(readerId, timeLag));
        result.set(acquired);
        return updates;
    });
    if (reinitRequired.get()) {
        throw new ReinitializationRequiredException();
    }
    acquireTimer.reset(calculateAcquireTime(sync.getState()));
    return result.get();
}

From source file:org.apache.tinkerpop.gremlin.server.GremlinDriverIntegrateTest.java

@Test
public void shouldProcessSessionRequestsInOrder() throws Exception {
    final Cluster cluster = Cluster.open();
    final Client client = cluster.connect(name.getMethodName());

    final ResultSet rsFive = client.submit("Thread.sleep(5000);'five'");
    final ResultSet rsZero = client.submit("'zero'");

    final CompletableFuture<List<Result>> futureFive = rsFive.all();
    final CompletableFuture<List<Result>> futureZero = rsZero.all();

    final AtomicBoolean hit = new AtomicBoolean(false);
    while (!futureFive.isDone()) {
        // futureZero can't finish before futureFive - racy business here?
        assertThat(futureZero.isDone(), is(false));
        hit.set(true);/*w w w .  java  2s  .  co  m*/
    }

    // should have entered the loop at least once and thus proven that futureZero didn't return ahead of
    // futureFive
    assertThat(hit.get(), is(true));

    assertEquals("zero", futureZero.get().get(0).getString());
    assertEquals("five", futureFive.get(10, TimeUnit.SECONDS).get(0).getString());
}

From source file:org.apache.flume.channel.kafka.TestKafkaChannel.java

private List<Event> pullEvents(final KafkaChannel channel, ExecutorCompletionService<Void> submitterSvc,
        final int total, final boolean testRollbacks, final boolean retryAfterRollback) {
    final List<Event> eventsPulled = Collections.synchronizedList(new ArrayList<Event>(50));
    final CyclicBarrier barrier = new CyclicBarrier(5);
    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicInteger rolledBackCount = new AtomicInteger(0);
    final AtomicBoolean startedGettingEvents = new AtomicBoolean(false);
    final AtomicBoolean rolledBack = new AtomicBoolean(false);
    for (int k = 0; k < 5; k++) {
        final int index = k;
        submitterSvc.submit(new Callable<Void>() {
            @Override// ww  w  . j  av  a2 s. c  o  m
            public Void call() throws Exception {
                Transaction tx = null;
                final List<Event> eventsLocal = Lists.newLinkedList();
                int takenByThisThread = 0;
                channel.registerThread();
                Thread.sleep(1000);
                barrier.await();
                while (counter.get() < (total - rolledBackCount.get())) {
                    if (tx == null) {
                        tx = channel.getTransaction();
                        tx.begin();
                    }
                    try {
                        Event e = channel.take();
                        if (e != null) {
                            startedGettingEvents.set(true);
                            eventsLocal.add(e);
                        } else {
                            if (testRollbacks && index == 4 && (!rolledBack.get())
                                    && startedGettingEvents.get()) {
                                tx.rollback();
                                tx.close();
                                tx = null;
                                rolledBack.set(true);
                                final int eventsLocalSize = eventsLocal.size();
                                eventsLocal.clear();
                                if (!retryAfterRollback) {
                                    rolledBackCount.set(eventsLocalSize);
                                    return null;
                                }
                            } else {
                                tx.commit();
                                tx.close();
                                tx = null;
                                eventsPulled.addAll(eventsLocal);
                                counter.getAndAdd(eventsLocal.size());
                                eventsLocal.clear();
                            }
                        }
                    } catch (Exception ex) {
                        eventsLocal.clear();
                        if (tx != null) {
                            tx.rollback();
                            tx.close();
                        }
                        tx = null;
                        ex.printStackTrace();
                    }
                }
                // Close txn.
                return null;
            }
        });
    }
    return eventsPulled;
}