Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:de.hybris.platform.test.TransactionTest.java

@Test
public void testLocking() throws Exception {

    if (Config.isHSQLDBUsed()) {
        LOG.warn("HDSQLDB doesnt seem to support SELECT FOR UPDATE properly so we don't test it any more");
        return;/* w w  w.  j  a v a 2  s  . c o  m*/
    }

    final ProductManager productManager = ProductManager.getInstance();

    final Currency curr = C2LManager.getInstance().createCurrency("TestCurr");

    /** Verify that we can begin a transaction, lock an entity, then commit without an exception occurring. */
    {
        final Transaction transaction = Transaction.current();
        try {
            assertNotNull("Transaction object is null", transaction);
            assertFalse("A previous transaction is already running.", transaction.isRunning());
            transaction.begin();
            final Product productForTest1 = productManager.createProduct("transactionLockingTest1");
            transaction.commit();
            transaction.begin();
            transaction.lock(productForTest1);
            transaction.commit();
        } catch (final Exception e) {
            transaction.rollback();
            throw e;
        }
    }

    {
        /** Verify that an IllegalStateException is thrown if we attempt to lock outside of a transaction. */
        final Transaction transaction = Transaction.current();
        try {
            assertNotNull("Transaction object is null", transaction);
            assertFalse("A previous transaction is already running.", transaction.isRunning());
            final Product productForTest2 = productManager.createProduct("transactionLockingTest2");
            transaction.lock(productForTest2);
            fail("Expected IllegalStateException to occur when attempting to lock an item outside of a transaction.");
        }
        // An IllegalStateException is expected for this test to pass.
        catch (final IllegalStateException e) {
            //
        }
    }

    /**
     * Verify that if we attempt to acquire a lock on the same entity multiple times from the same transaction, that
     * no errors occur.
     */
    {
        final Transaction transaction = Transaction.current();
        try {
            assertNotNull("Transaction object is null", transaction);
            assertFalse("A previous transaction is already running.", transaction.isRunning());
            final Product productForTest3 = productManager.createProduct("transactionLockingTest3");
            transaction.begin();
            for (int i = 0; i < 10; i++) {
                transaction.lock(productForTest3);
            }
            transaction.commit();
        } catch (final Exception e) {
            transaction.rollback();
            throw e;
        }
    }

    /**
     * Verify that if we begin a transaction, lock an entity, then commit multiple times that a lock can be acquired
     * each time.
     */
    {
        final Transaction transaction = Transaction.current();
        try {
            final Product productForTest4 = productManager.createProduct("transactionLockingTest4");
            for (int i = 0; i < 10; i++) {
                assertNotNull("Transaction object is null", transaction);
                assertFalse("A previous transaction is already running.", transaction.isRunning());
                transaction.begin();
                transaction.lock(productForTest4);
                transaction.commit();
            }
        } catch (final Exception e) {
            transaction.rollback();
            throw e;
        }
    }

    /**
     * Verify that if we begin a transaction, lock an entity, then rollback multiple times that a lock can be acquired
     * each time.
     */
    {
        final Transaction transaction = Transaction.current();
        try {
            final Product productForTest5 = productManager.createProduct("transactionLockingTest5");
            for (int i = 0; i < 10; i++) {
                assertNotNull("Transaction object is null", transaction);
                assertFalse("A previous transaction is already running.", transaction.isRunning());
                transaction.begin();
                transaction.lock(productForTest5);
                transaction.rollback();
            }
        } catch (final Exception e) {
            transaction.rollback();
            throw e;
        }
    }

    /**
     * Verify that we can not lock after a transaction has been committed.
     */
    {
        final Transaction transaction = Transaction.current();
        try {
            final Product productForTest6 = productManager.createProduct("transactionLockingTest6");
            assertNotNull("Transaction object is null", transaction);
            assertFalse("A previous transaction is already running.", transaction.isRunning());
            transaction.begin();
            transaction.commit();
            transaction.lock(productForTest6);
            fail("A lock was acquired after the transaction has been committed.");
        }
        // An IllegalStateException is expected for the test to pass
        catch (final IllegalStateException e) {
            //
        }
    }

    /**
     * Verify that we can not lock after a transaction has been rolled back.
     */
    {
        final Transaction transaction = Transaction.current();
        try {
            final Product productForTest7 = productManager.createProduct("transactionLockingTest7");
            assertNotNull("Transaction object is null", transaction);
            assertFalse("A previous transaction is already running.", transaction.isRunning());
            transaction.begin();
            transaction.rollback();
            transaction.lock(productForTest7);
            fail("A lock was acquired after the transaction has been rolled back.");
        }
        // An IllegalStateException is expected for the test to pass
        catch (final IllegalStateException e) {
            //
        }
    }

    /**
     * Verify multiple threads attempting to lock the same object and the behavior that occurs.
     */
    try {
        final Order lockedOrder = OrderManager.getInstance().createOrder(//
                "lockedOrder", //
                JaloSession.getCurrentSession().getUser(), //
                curr, //
                Calendar.getInstance().getTime(), //
                true);
        lockedOrder.setTotal(0.0d);

        final ComposedType composedType = lockedOrder.getComposedType();

        final String checkQuery = "SELECT "
                + composedType.getAttributeDescriptorIncludingPrivate(Order.TOTAL).getDatabaseColumn()
                + " FROM " + composedType.getTable() + " WHERE PK = ?";

        final int THREADS = 16;

        // Create an executor service that uses 16 threads to test
        // the transaction locking
        final ExecutorService executor = Executors.newFixedThreadPool(//
                THREADS, //
                new ThreadFactory() {
                    final Tenant threadFactoryTenant = Registry.getCurrentTenant();

                    @Override
                    public Thread newThread(final Runnable runnable) {
                        return new Thread() {
                            protected void prepareThread() {
                                Registry.setCurrentTenant(threadFactoryTenant);
                            }

                            protected void unprepareThread() {
                                JaloSession.deactivate();
                                Registry.unsetCurrentTenant();
                            }

                            @Override
                            public void run() {
                                try {
                                    prepareThread();
                                    runnable.run();
                                } finally {
                                    unprepareThread();
                                }
                            }
                        };
                    }
                });

        // Create 8 callables that will concurrently
        // attempt to lock the same object.
        final AtomicInteger stackCounter = new AtomicInteger();
        final List<Callable<Object>> callables = new ArrayList<Callable<Object>>();
        for (int j = 0; j < THREADS; j++) {
            callables.add(new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                    final PK pk = lockedOrder.getPK();
                    if (pk == null) {
                        throw new IllegalStateException();
                    }

                    for (int k = 0; k < 100; k++) {
                        final Transaction transaction = Transaction.current();

                        assertNotNull("Transaction object is null", transaction);

                        PreparedStatement statement = null;
                        ResultSet resultSet = null;
                        try {
                            transaction.begin();
                            transaction.setTransactionIsolationLevel(Connection.TRANSACTION_READ_COMMITTED);
                            transaction.lock(lockedOrder);
                            final int stack = stackCounter.incrementAndGet();
                            if (stack > 1) {
                                stackCounter.decrementAndGet();
                                throw new IllegalStateException("Got " + stack + " threads in protected area!");
                            }

                            statement = transaction.getTXBoundConnection().prepareStatement(checkQuery);
                            statement.setLong(1, lockedOrder.getPK().getLongValue());
                            resultSet = statement.executeQuery();
                            if (!resultSet.next()) {
                                throw new IllegalStateException("Expected result set");
                            }
                            final double dbValue = resultSet.getDouble(1);
                            final double jaloValue = lockedOrder.getTotal();
                            if (Math.abs(dbValue - jaloValue) >= 1d) {
                                throw new IllegalStateException(
                                        "Jalo value differs from db value : " + jaloValue + "<>" + dbValue);
                            }

                            lockedOrder.setTotal(jaloValue + 1.0d);

                            stackCounter.decrementAndGet();
                            transaction.commit();
                        } catch (final Exception e) {
                            e.printStackTrace();
                            transaction.rollback();
                            throw e;
                        } finally {
                            Utilities.tryToCloseJDBC(null, statement, resultSet, true);
                        }
                    }
                    return null;
                }
            });
        }
        // Get the value of each future to determine if an exception was thrown.
        for (final Future<Object> future : executor.invokeAll(callables)) {
            future.get();
        }
        final double expected = THREADS * 100;
        assertEquals(//
                "Total value of order after all transaction differs", //
                expected, //
                ((Order) JaloSession.getCurrentSession().getItem(lockedOrder.getPK())).getTotal(), 0.000001);
    } catch (final IllegalStateException e) {
        e.printStackTrace();
        throw e;
    }

    /**
     * Verify changes to a value on a lock
     */

    // TODO:

    /**
     * Tests related to caching
     */

    // TODO:
}

From source file:alfio.manager.support.CustomMessageManager.java

public void sendMessages(String eventName, Optional<Integer> categoryId, List<MessageModification> input,
        String username) {/*from w  ww.j  a  v  a  2 s.c om*/

    Event event = eventManager.getSingleEvent(eventName, username);
    preview(event, input, username);//dry run for checking the syntax
    Organization organization = eventManager.loadOrganizer(event, username);
    AtomicInteger counter = new AtomicInteger();
    Map<String, List<MessageModification>> byLanguage = input.stream()
            .collect(Collectors.groupingBy(m -> m.getLocale().getLanguage()));

    sendMessagesExecutor.execute(() -> {
        categoryId.map(id -> ticketRepository.findConfirmedByCategoryId(event.getId(), id))
                .orElseGet(() -> ticketRepository.findAllConfirmed(event.getId())).stream()
                .filter(t -> isNotBlank(t.getFullName()) && isNotBlank(t.getEmail())).parallel().map(t -> {
                    Model model = new ExtendedModelMap();
                    model.addAttribute("eventName", eventName);
                    model.addAttribute("fullName", t.getFullName());
                    model.addAttribute("organizationName", organization.getName());
                    model.addAttribute("organizationEmail", organization.getEmail());
                    model.addAttribute("reservationURL",
                            ticketReservationManager.reservationUrl(t.getTicketsReservationId(), event));
                    model.addAttribute("reservationID",
                            ticketReservationManager.getShortReservationID(event, t.getTicketsReservationId()));
                    model.addAttribute("ticketURL",
                            ticketReservationManager.ticketUpdateUrl(event, t.getUuid()));
                    return Triple.of(t, t.getEmail(), model);
                }).forEach(triple -> {
                    Ticket ticket = triple.getLeft();
                    MessageModification m = Optional.ofNullable(byLanguage.get(ticket.getUserLanguage()))
                            .orElseGet(() -> byLanguage.get(byLanguage.keySet().stream().findFirst()
                                    .orElseThrow(IllegalStateException::new)))
                            .get(0);
                    Model model = triple.getRight();
                    String subject = renderResource(m.getSubject(), model, m.getLocale(), templateManager);
                    String text = renderResource(m.getText(), model, m.getLocale(), templateManager);
                    List<Mailer.Attachment> attachments = new ArrayList<>();
                    if (m.isAttachTicket()) {
                        ticketReservationManager.findById(ticket.getTicketsReservationId())
                                .ifPresent(reservation -> {
                                    ticketCategoryRepository.getByIdAndActive(ticket.getCategoryId())
                                            .ifPresent(ticketCategory -> {
                                                attachments.add(generateTicketAttachment(ticket, reservation,
                                                        ticketCategory, organization));
                                            });
                                });
                    }
                    counter.incrementAndGet();
                    notificationManager.sendSimpleEmail(event, triple.getMiddle(), subject, () -> text,
                            attachments);
                });
    });

}

From source file:com.linkedin.pinot.requestHandler.BrokerRequestHandler.java

private Object getDataTableFromBrokerRequestList(final BrokerRequest federatedBrokerRequest,
        final ReduceService reduceService, final List<BrokerRequest> requests,
        BucketingSelection overriddenSelection, final ScatterGatherStats scatterGatherStats,
        final long requestId) throws InterruptedException {
    // Step1/*from   ww w .  jav  a 2s .  com*/
    long scatterGatherStartTime = System.nanoTime();
    long queryRoutingTime = 0;
    Map<BrokerRequest, Pair<CompositeFuture<ServerInstance, ByteBuf>, ScatterGatherStats>> responseFuturesList = new HashMap<BrokerRequest, Pair<CompositeFuture<ServerInstance, ByteBuf>, ScatterGatherStats>>();
    for (BrokerRequest request : requests) {
        final long routingStartTime = System.nanoTime();
        RoutingTableLookupRequest rtRequest = new RoutingTableLookupRequest(
                request.getQuerySource().getTableName());
        Map<ServerInstance, SegmentIdSet> segmentServices = _routingTable.findServers(rtRequest);
        if (segmentServices == null || segmentServices.isEmpty()) {
            LOGGER.info("Not found ServerInstances to Segments Mapping for Table - {}",
                    rtRequest.getTableName());
            continue;
        }
        LOGGER.debug("Find ServerInstances to Segments Mapping for table - {}", rtRequest.getTableName());
        for (ServerInstance serverInstance : segmentServices.keySet()) {
            LOGGER.debug("{} : {}", serverInstance, segmentServices.get(serverInstance));
        }
        queryRoutingTime += System.nanoTime() - routingStartTime;
        ScatterGatherStats respStats = new ScatterGatherStats();

        // Step 2-4
        scatterGatherStartTime = System.nanoTime();
        ScatterGatherRequestImpl scatterRequest = new ScatterGatherRequestImpl(request, segmentServices,
                _replicaSelection, ReplicaSelectionGranularity.SEGMENT_ID_SET, request.getBucketHashKey(), 0,
                //TODO: Speculative Requests not yet supported
                overriddenSelection, requestId, _brokerTimeOutMs, _brokerId);
        responseFuturesList.put(request, Pair.of(
                _scatterGatherer.scatterGather(scatterRequest, scatterGatherStats, _brokerMetrics), respStats));
    }
    _brokerMetrics.addPhaseTiming(federatedBrokerRequest, BrokerQueryPhase.QUERY_ROUTING, queryRoutingTime);

    long scatterGatherTime = 0;
    long deserializationTime = 0;
    //Step 5 - Deserialize Responses and build instance response map
    final Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
    final AtomicInteger responseSeq = new AtomicInteger(-1);
    {
        for (BrokerRequest request : responseFuturesList.keySet()) {
            CompositeFuture<ServerInstance, ByteBuf> compositeFuture = responseFuturesList.get(request)
                    .getKey();
            ScatterGatherStats respStats = responseFuturesList.get(request).getValue();

            Map<ServerInstance, ByteBuf> responseMap = null;
            try {
                responseMap = compositeFuture.get();
                // The 'get' call above waits for all the responses to come in before returning.
                // compositeFuture has the individual response times of each underlying future.
                // We get a map of server to the response time of the server here.
                Map<String, Long> responseTimeMap = compositeFuture.getResponseTimes();
                respStats.setResponseTimeMillis(responseTimeMap);
                scatterGatherStats.merge(respStats);
            } catch (ExecutionException e) {
                LOGGER.warn("Caught exception while fetching response", e);
                _brokerMetrics.addMeteredQueryValue(federatedBrokerRequest,
                        BrokerMeter.REQUEST_FETCH_EXCEPTIONS, 1);
            }

            scatterGatherTime += System.nanoTime() - scatterGatherStartTime;

            final long deserializationStartTime = System.nanoTime();

            Map<ServerInstance, Throwable> errors = compositeFuture.getError();

            if (null != responseMap) {
                for (Entry<ServerInstance, ByteBuf> responseEntry : responseMap.entrySet()) {
                    try {
                        ByteBuf b = responseEntry.getValue();
                        byte[] b2 = new byte[b.readableBytes()];
                        if (b2 == null || b2.length == 0) {
                            continue;
                        }
                        b.readBytes(b2);
                        DataTable r2 = new DataTable(b2);
                        // Hybrid requests may get response from same instance, so we need to distinguish them.
                        ServerInstance decoratedServerInstance = new ServerInstance(
                                responseEntry.getKey().getHostname(), responseEntry.getKey().getPort(),
                                responseSeq.incrementAndGet());
                        if (errors != null && errors.containsKey(responseEntry.getKey())) {
                            Throwable throwable = errors.get(responseEntry.getKey());
                            if (throwable != null) {
                                r2.getMetadata().put("exception",
                                        new RequestProcessingException(throwable).toString());
                                _brokerMetrics.addMeteredQueryValue(federatedBrokerRequest,
                                        BrokerMeter.REQUEST_FETCH_EXCEPTIONS, 1);
                            }
                        }
                        instanceResponseMap.put(decoratedServerInstance, r2);
                    } catch (Exception ex) {
                        LOGGER.error("Got exceptions in collect query result for instance "
                                + responseEntry.getKey() + ", error: " + ex.getMessage(), ex);
                        _brokerMetrics.addMeteredQueryValue(federatedBrokerRequest,
                                BrokerMeter.REQUEST_DESERIALIZATION_EXCEPTIONS, 1);
                    }
                }
            }
            deserializationTime += System.nanoTime() - deserializationStartTime;
        }
    }
    _brokerMetrics.addPhaseTiming(federatedBrokerRequest, BrokerQueryPhase.SCATTER_GATHER, scatterGatherTime);
    _brokerMetrics.addPhaseTiming(federatedBrokerRequest, BrokerQueryPhase.DESERIALIZATION,
            deserializationTime);

    // Step 6 : Do the reduce and return
    try {
        return _brokerMetrics.timeQueryPhase(federatedBrokerRequest, BrokerQueryPhase.REDUCE,
                new Callable<BrokerResponse>() {
                    @Override
                    public BrokerResponse call() {
                        BrokerResponse returnValue = reduceService.reduceOnDataTable(federatedBrokerRequest,
                                instanceResponseMap);
                        _brokerMetrics.addMeteredQueryValue(federatedBrokerRequest,
                                BrokerMeter.DOCUMENTS_SCANNED, returnValue.getNumDocsScanned());
                        return returnValue;
                    }
                });
    } catch (Exception e) {
        // Shouldn't happen, this is only here because timeQueryPhase() can throw a checked exception, even though the nested callable can't.
        LOGGER.error("Caught exception while processing query", e);
        Utils.rethrowException(e);
        throw new AssertionError("Should not reach this");
    }
}

From source file:org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater.java

@Test
public void testRMVersionLessThanMinimum() throws InterruptedException, IOException {
    final AtomicInteger numCleanups = new AtomicInteger(0);
    YarnConfiguration conf = createNMConfig();
    conf.set(YarnConfiguration.NM_RESOURCEMANAGER_MINIMUM_VERSION, "3.0.0");
    nm = new NodeManager() {
        @Override/*from   w  w  w  .j a v  a2s  .co  m*/
        protected NodeStatusUpdater createNodeStatusUpdater(Context context, Dispatcher dispatcher,
                NodeHealthCheckerService healthChecker) {
            MyNodeStatusUpdater myNodeStatusUpdater = new MyNodeStatusUpdater(context, dispatcher,
                    healthChecker, metrics);
            MyResourceTracker2 myResourceTracker2 = new MyResourceTracker2();
            myResourceTracker2.heartBeatNodeAction = NodeAction.NORMAL;
            myResourceTracker2.rmVersion = "3.0.0";
            myNodeStatusUpdater.resourceTracker = myResourceTracker2;
            return myNodeStatusUpdater;
        }

        @Override
        protected ContainerManagerImpl createContainerManager(Context context, ContainerExecutor exec,
                DeletionService del, NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager,
                LocalDirsHandlerService dirsHandler) {
            return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater, metrics, dirsHandler) {

                @Override
                public void cleanUpApplicationsOnNMShutDown() {
                    super.cleanUpApplicationsOnNMShutDown();
                    numCleanups.incrementAndGet();
                }
            };
        }
    };

    nm.init(conf);
    nm.start();

    // NM takes a while to reach the STARTED state.
    int waitCount = 0;
    while (nm.getServiceState() != STATE.STARTED && waitCount++ != 20) {
        LOG.info("Waiting for NM to stop..");
        Thread.sleep(1000);
    }
    Assert.assertTrue(nm.getServiceState() == STATE.STARTED);
    nm.stop();
}

From source file:com.streamsets.pipeline.stage.origin.jdbc.cdc.oracle.OracleCDCSource.java

private void discardOldUncommitted(LocalDateTime startTime) {
    if (!useLocalBuffering) {
        return;//from www  .  j  a  va 2s .c o  m
    }
    bufferedRecordsLock.lock();
    try {
        AtomicInteger txnDiscarded = new AtomicInteger(0);
        AtomicInteger recordsDiscarded = new AtomicInteger(0);
        LOG.info("Removing expired transactions.");
        Iterator<Map.Entry<TransactionIdKey, HashQueue<RecordSequence>>> iter = bufferedRecords.entrySet()
                .iterator();
        while (iter.hasNext()) {
            Map.Entry<TransactionIdKey, HashQueue<RecordSequence>> entry = iter.next();
            if (expired(entry, startTime)) {
                LOG.info("Removing transaction with id: " + entry.getKey().txnId);
                if (!configBean.discardExpired) {
                    for (RecordSequence x : entry.getValue()) {
                        try {
                            Record record = generateRecord(x.sqlString, x.headers, x.opCode);
                            if (record != null) {
                                expiredRecords.offer(new RecordTxnInfo(record, entry.getKey().txnId,
                                        entry.getKey().txnStartTime));
                            }
                        } catch (UnparseableSQLException ex) {
                            unparseable.offer(x.sqlString);
                        } catch (Exception ex) {
                            LOG.error("Error while generating expired record from SQL: " + x.sqlString);
                        }
                        recordsDiscarded.incrementAndGet();
                    }
                }
                txnDiscarded.incrementAndGet();
                iter.remove();
            }
        }
        LOG.info(Utils.format("Removed {} transactions and a total of {} records.", txnDiscarded.get(),
                recordsDiscarded.get()));
    } finally {
        bufferedRecordsLock.unlock();
    }
}

From source file:com.twitter.distributedlog.BKLogHandler.java

private void asyncGetLedgerListInternal(final Comparator<LogSegmentMetadata> comparator,
        final LogSegmentFilter segmentFilter, final Watcher watcher,
        final GenericCallback<List<LogSegmentMetadata>> finalCallback, final AtomicInteger numAttemptsLeft,
        final AtomicLong backoffMillis) {
    final Stopwatch stopwatch = Stopwatch.createStarted();
    try {/* w  ww .j av a2s  . c  o  m*/
        if (LOG.isTraceEnabled()) {
            LOG.trace("Async getting ledger list for {}.", getFullyQualifiedName());
        }
        final GenericCallback<List<LogSegmentMetadata>> callback = new GenericCallback<List<LogSegmentMetadata>>() {
            @Override
            public void operationComplete(int rc, List<LogSegmentMetadata> result) {
                long elapsedMicros = stopwatch.stop().elapsed(TimeUnit.MICROSECONDS);
                if (KeeperException.Code.OK.intValue() != rc) {
                    getListStat.registerFailedEvent(elapsedMicros);
                } else {
                    if (LogSegmentFilter.DEFAULT_FILTER == segmentFilter) {
                        isFullListFetched.set(true);
                    }
                    getListStat.registerSuccessfulEvent(elapsedMicros);
                }
                finalCallback.operationComplete(rc, result);
            }
        };
        zooKeeperClient.get().getChildren(logMetadata.getLogSegmentsPath(), watcher,
                new AsyncCallback.Children2Callback() {
                    @Override
                    public void processResult(final int rc, final String path, final Object ctx,
                            final List<String> children, final Stat stat) {
                        if (KeeperException.Code.OK.intValue() != rc) {

                            if ((KeeperException.Code.CONNECTIONLOSS.intValue() == rc
                                    || KeeperException.Code.SESSIONEXPIRED.intValue() == rc
                                    || KeeperException.Code.SESSIONMOVED.intValue() == rc)
                                    && numAttemptsLeft.decrementAndGet() > 0) {
                                long backoffMs = backoffMillis.get();
                                backoffMillis.set(Math.min(conf.getZKRetryBackoffMaxMillis(), 2 * backoffMs));
                                scheduler.schedule(new Runnable() {
                                    @Override
                                    public void run() {
                                        asyncGetLedgerListInternal(comparator, segmentFilter, watcher,
                                                finalCallback, numAttemptsLeft, backoffMillis);
                                    }
                                }, backoffMs, TimeUnit.MILLISECONDS);
                                return;
                            }
                            callback.operationComplete(rc, null);
                            return;
                        }

                        if (LOG.isTraceEnabled()) {
                            LOG.trace("Got ledger list from {} : {}", logMetadata.getLogSegmentsPath(),
                                    children);
                        }

                        ledgerListWatchSet.set(true);
                        Set<String> segmentsReceived = new HashSet<String>();
                        segmentsReceived.addAll(segmentFilter.filter(children));
                        Set<String> segmentsAdded;
                        final Set<String> removedSegments = Collections.synchronizedSet(new HashSet<String>());
                        final Map<String, LogSegmentMetadata> addedSegments = Collections
                                .synchronizedMap(new HashMap<String, LogSegmentMetadata>());
                        Pair<Set<String>, Set<String>> segmentChanges = logSegmentCache.diff(segmentsReceived);
                        segmentsAdded = segmentChanges.getLeft();
                        removedSegments.addAll(segmentChanges.getRight());

                        if (segmentsAdded.isEmpty()) {
                            if (LOG.isTraceEnabled()) {
                                LOG.trace("No segments added for {}.", getFullyQualifiedName());
                            }

                            // update the cache before fetch
                            logSegmentCache.update(removedSegments, addedSegments);

                            List<LogSegmentMetadata> segmentList;
                            try {
                                segmentList = getCachedLogSegments(comparator);
                            } catch (UnexpectedException e) {
                                callback.operationComplete(KeeperException.Code.DATAINCONSISTENCY.intValue(),
                                        null);
                                return;
                            }
                            callback.operationComplete(KeeperException.Code.OK.intValue(), segmentList);
                            notifyUpdatedLogSegments(segmentList);
                            if (!removedSegments.isEmpty()) {
                                notifyOnOperationComplete();
                            }
                            return;
                        }

                        final AtomicInteger numChildren = new AtomicInteger(segmentsAdded.size());
                        final AtomicInteger numFailures = new AtomicInteger(0);
                        for (final String segment : segmentsAdded) {
                            metadataStore.getLogSegment(logMetadata.getLogSegmentPath(segment))
                                    .addEventListener(new FutureEventListener<LogSegmentMetadata>() {

                                        @Override
                                        public void onSuccess(LogSegmentMetadata result) {
                                            addedSegments.put(segment, result);
                                            complete();
                                        }

                                        @Override
                                        public void onFailure(Throwable cause) {
                                            // NONODE exception is possible in two cases
                                            // 1. A log segment was deleted by truncation between the call to getChildren and read
                                            // attempt on the znode corresponding to the segment
                                            // 2. In progress segment has been completed => inprogress ZNode does not exist
                                            if (cause instanceof KeeperException
                                                    && KeeperException.Code.NONODE == ((KeeperException) cause)
                                                            .code()) {
                                                removedSegments.add(segment);
                                                complete();
                                            } else {
                                                // fail fast
                                                if (1 == numFailures.incrementAndGet()) {
                                                    int rcToReturn = KeeperException.Code.SYSTEMERROR
                                                            .intValue();
                                                    if (cause instanceof KeeperException) {
                                                        rcToReturn = ((KeeperException) cause).code()
                                                                .intValue();
                                                    } else if (cause instanceof ZKException) {
                                                        rcToReturn = ((ZKException) cause)
                                                                .getKeeperExceptionCode().intValue();
                                                    }
                                                    // :( properly we need dlog related response code.
                                                    callback.operationComplete(rcToReturn, null);
                                                    return;
                                                }
                                            }
                                        }

                                        private void complete() {
                                            if (0 == numChildren.decrementAndGet() && numFailures.get() == 0) {
                                                // update the cache only when fetch completed
                                                logSegmentCache.update(removedSegments, addedSegments);
                                                List<LogSegmentMetadata> segmentList;
                                                try {
                                                    segmentList = getCachedLogSegments(comparator);
                                                } catch (UnexpectedException e) {
                                                    callback.operationComplete(
                                                            KeeperException.Code.DATAINCONSISTENCY.intValue(),
                                                            null);
                                                    return;
                                                }
                                                callback.operationComplete(KeeperException.Code.OK.intValue(),
                                                        segmentList);
                                                notifyUpdatedLogSegments(segmentList);
                                                notifyOnOperationComplete();
                                            }
                                        }
                                    });
                        }
                    }
                }, null);
    } catch (ZooKeeperClient.ZooKeeperConnectionException e) {
        getListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
        finalCallback.operationComplete(KeeperException.Code.CONNECTIONLOSS.intValue(), null);
    } catch (InterruptedException e) {
        getListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
        finalCallback.operationComplete(KeeperException.Code.CONNECTIONLOSS.intValue(), null);
    }
}

From source file:org.nd4j.linalg.api.test.NDArrayTests.java

@Test
public void testVectorDimensionMulti() {
    INDArray arr = Nd4j.create(Nd4j.linspace(1, 24, 24).data(), new int[] { 4, 3, 2 });
    final AtomicInteger count = new AtomicInteger(0);

    arr.iterateOverDimension(arr.shape().length - 1, new SliceOp() {

        /**/*from ww w .  ja  v a2 s.c o  m*/
         * Operates on an ndarray slice
         *
         * @param nd the result to operate on
         */
        @Override
        public void operate(INDArray nd) {
            INDArray test = nd;
            if (count.get() == 0) {
                INDArray answer = Nd4j.create(new float[] { 1, 2 }, new int[] { 2 });
                assertEquals(answer, test);
            } else if (count.get() == 1) {
                INDArray answer = Nd4j.create(new float[] { 3, 4 }, new int[] { 2 });
                assertEquals(answer, test);
            } else if (count.get() == 2) {
                INDArray answer = Nd4j.create(new float[] { 5, 6 }, new int[] { 2 });
                assertEquals(answer, test);
            } else if (count.get() == 3) {
                INDArray answer = Nd4j.create(new float[] { 7, 8 }, new int[] { 2 });
                assertEquals(answer, test);
                answer.data().destroy();
            } else if (count.get() == 4) {
                INDArray answer = Nd4j.create(new float[] { 9, 10 }, new int[] { 2 });
                assertEquals(answer, test);
                answer.data().destroy();
            } else if (count.get() == 5) {
                INDArray answer = Nd4j.create(new float[] { 11, 12 }, new int[] { 2 });
                assertEquals(answer, test);
                answer.data().destroy();
            }

            count.incrementAndGet();
        }
    }, false);
}

From source file:au.org.ala.biocache.dao.SearchDAOImpl.java

private int processQueryResults(Map<String, Integer> uidStats, String[] fields, String[] qaFields,
        RecordWriter rw, QueryResponse qr, DownloadDetailsDTO dd, boolean checkLimit,
        AtomicInteger resultsCount) {
    int count = 0;
    for (SolrDocument sd : qr.getResults()) {
        if (sd.getFieldValue("data_resource_uid") != null
                && (!checkLimit || (checkLimit && resultsCount.intValue() < MAX_DOWNLOAD_SIZE))) {

            //resultsCount++;
            count++;/*from w w  w . j  av a 2 s.c  o m*/
            synchronized (resultsCount) {
                resultsCount.incrementAndGet();
            }

            //add the record
            String[] values = new String[fields.length + qaFields.length];

            //get all the "single" values from the index
            for (int j = 0; j < fields.length; j++) {
                Object value = sd.getFirstValue(fields[j]);
                if (value instanceof Date)
                    values[j] = value == null ? ""
                            : org.apache.commons.lang.time.DateFormatUtils.format((Date) value, "yyyy-MM-dd");
                else
                    values[j] = value == null ? "" : value.toString();
            }

            //now handle the assertions
            java.util.Collection<Object> assertions = sd.getFieldValues("assertions");

            //Handle the case where there a no assertions against a record
            if (assertions == null) {
                assertions = Collections.EMPTY_LIST;
            }

            for (int k = 0; k < qaFields.length; k++) {
                values[fields.length + k] = Boolean.toString(assertions.contains(qaFields[k]));
            }

            rw.write(values);

            //increment the counters....
            incrementCount(uidStats, sd.getFieldValue("institution_uid"));
            incrementCount(uidStats, sd.getFieldValue("collection_uid"));
            incrementCount(uidStats, sd.getFieldValue("data_provider_uid"));
            incrementCount(uidStats, sd.getFieldValue("data_resource_uid"));
        }
    }
    dd.updateCounts(count);
    return count;
}

From source file:com.splout.db.integration.TestMultiThreadedQueryAndDeploy.java

@Test
@Ignore // Causes some non-deterministic problems, to be analyzed
public void test() throws Throwable {
    FileUtils.deleteDirectory(new File(TMP_FOLDER));
    new File(TMP_FOLDER).mkdirs();

    createSploutEnsemble(N_QNODES, N_DNODES);
    String[] qNodeAddresses = new String[N_QNODES];
    for (int i = 0; i < N_QNODES; i++) {
        qNodeAddresses[i] = getqNodes().get(i).getAddress();
    }/*from w w w  .java 2s.c  o m*/

    final SploutClient client = new SploutClient(qNodeAddresses);
    final Tablespace testTablespace = createTestTablespace(N_DNODES);
    final Random random = new Random(SEED);
    final AtomicBoolean failed = new AtomicBoolean(false);
    final AtomicInteger iteration = new AtomicInteger(0);
    final Set<Integer> iterationsSeen = new HashSet<Integer>();

    deployIteration(0, random, client, testTablespace);

    for (QNode qnode : getqNodes()) {
        // Make sure all QNodes are aware of the the first deploy
        // There might be some delay as they have to receive notifications via Hazelcast etc
        long waitedSoFar = 0;
        QueryStatus status = null;
        SploutClient perQNodeClient = new SploutClient(qnode.getAddress());
        do {
            status = perQNodeClient.query(TABLESPACE, "0", "SELECT * FROM " + TABLE + ";", null);
            Thread.sleep(100);
            waitedSoFar += 100;
            if (waitedSoFar > 5000) {
                throw new AssertionError("Waiting too much on a test condition");
            }
        } while (status == null || status.getError() != null);
        log.info("QNode [" + qnode.getAddress() + "] is ready to serve deploy 0.");
    }

    try {
        // Business logic here
        ExecutorService service = Executors.newFixedThreadPool(N_THREADS);

        // These threads will continuously perform queries and check that the results is consistent.
        // They will also count how many deploys have happened since the beginning.
        for (int i = 0; i < N_THREADS; i++) {
            service.submit(new Runnable() {
                @Override
                public void run() {
                    try {
                        while (true) {
                            int randomDNode = Math.abs(random.nextInt()) % N_DNODES;
                            QueryStatus status = client.query(TABLESPACE, (randomDNode * 10) + "",
                                    "SELECT * FROM " + TABLE + ";", null);
                            log.info("Query status -> " + status);
                            assertEquals(1, status.getResult().size());
                            Map<String, Object> jsonResult = (Map<String, Object>) status.getResult().get(0);
                            Integer seenIteration = (Integer) jsonResult.get("iteration");
                            synchronized (iterationsSeen) {
                                iterationsSeen.add(seenIteration);
                            }
                            assertTrue(seenIteration <= iteration.get());
                            assertEquals(randomDNode, jsonResult.get("dnode"));
                            Thread.sleep(100);
                        }
                    } catch (InterruptedException ie) {
                        // Bye bye
                        log.info("Bye bye!");
                    } catch (Throwable e) {
                        e.printStackTrace();
                        failed.set(true);
                    }
                }
            });
        }

        final SploutConfiguration config = SploutConfiguration.getTestConfig();
        final int iterationsToPerform = config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE) + 5;
        for (int i = 0; i < iterationsToPerform; i++) {
            iteration.incrementAndGet();
            log.info("Deploy iteration: " + iteration.get());
            deployIteration(iteration.get(), random, client, testTablespace);

            new TestUtils.NotWaitingForeverCondition() {
                @Override
                public boolean endCondition() {
                    synchronized (iterationsSeen) {
                        return iterationsSeen.size() == (iteration.get() + 1);
                    }
                }
            }.waitAtMost(5000);
        }

        assertEquals(false, failed.get());

        service.shutdownNow(); // will interrupt all threads
        while (!service.isTerminated()) {
            Thread.sleep(100);
        }

        CoordinationStructures coord = TestUtils.getCoordinationStructures(config);
        assertNotNull(coord.getCopyVersionsBeingServed().get(TABLESPACE));

        // Assert that there is only MAX_VERSIONS versions of the tablespace (due to old version cleanup)
        new TestUtils.NotWaitingForeverCondition() {

            @Override
            public boolean endCondition() {
                QNodeHandler handler = (QNodeHandler) qNodes.get(0).getHandler();
                int seenVersions = 0;
                for (Map.Entry<TablespaceVersion, Tablespace> tablespaceVersion : handler.getContext()
                        .getTablespaceVersionsMap().entrySet()) {
                    if (tablespaceVersion.getKey().getTablespace().equals(TABLESPACE)) {
                        seenVersions++;
                    }
                }
                return seenVersions <= config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE);
            }
        }.waitAtMost(5000);
    } finally {
        closeSploutEnsemble();
        FileUtils.deleteDirectory(new File(TMP_FOLDER));
    }
}

From source file:org.apache.camel.processor.MulticastProcessor.java

protected boolean doProcessSequential(Exchange original, AtomicExchange result,
        Iterable<ProcessorExchangePair> pairs, AsyncCallback callback) throws Exception {
    AtomicInteger total = new AtomicInteger();
    Iterator<ProcessorExchangePair> it = pairs.iterator();

    while (it.hasNext()) {
        ProcessorExchangePair pair = it.next();
        Exchange subExchange = pair.getExchange();
        updateNewExchange(subExchange, total.get(), pairs, it);

        boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
        if (!sync) {
            if (LOG.isTraceEnabled()) {
                LOG.trace("Processing exchangeId: " + pair.getExchange().getExchangeId()
                        + " is continued being processed asynchronously");
            }/*w w  w  .ja  va2 s  . com*/
            // the remainder of the multicast will be completed async
            // so we break out now, then the callback will be invoked which then continue routing from where we left here
            return false;
        }

        if (LOG.isTraceEnabled()) {
            LOG.trace("Processing exchangeId: " + pair.getExchange().getExchangeId()
                    + " is continued being processed synchronously");
        }

        // Decide whether to continue with the multicast or not; similar logic to the Pipeline
        // remember to test for stop on exception and aggregate before copying back results
        boolean continueProcessing = PipelineHelper.continueProcessing(subExchange,
                "Sequential processing failed for number " + total.get(), LOG);
        if (stopOnException && !continueProcessing) {
            if (subExchange.getException() != null) {
                // wrap in exception to explain where it failed
                throw new CamelExchangeException("Sequential processing failed for number " + total.get(),
                        subExchange, subExchange.getException());
            } else {
                // we want to stop on exception, and the exception was handled by the error handler
                // this is similar to what the pipeline does, so we should do the same to not surprise end users
                // so we should set the failed exchange as the result and be done
                result.set(subExchange);
                return true;
            }
        }

        if (LOG.isTraceEnabled()) {
            LOG.trace("Sequential processing complete for number " + total + " exchange: " + subExchange);
        }

        doAggregate(getAggregationStrategy(subExchange), result, subExchange);
        total.incrementAndGet();
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Done sequential processing " + total + " exchanges");
    }

    return true;
}