Example usage for java.util.concurrent.atomic AtomicInteger intValue

List of usage examples for java.util.concurrent.atomic AtomicInteger intValue

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger intValue.

Prototype

public int intValue() 

Source Link

Document

Returns the current value of this AtomicInteger as an int , with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:com.couchbase.lite.store.SQLiteViewStore.java

/**
 * Updates the indexes of one or more views in parallel.
 *
 * @param inputViews An array of ViewStore instances, always including the receiver.
 * @return Status OK if updated or NOT_MODIFIED if already up-to-date.
 * @throws CouchbaseLiteException/*from   w  w  w .  j a  va 2s  . co m*/
 */
@Override
@InterfaceAudience.Private
public Status updateIndexes(List<ViewStore> inputViews) throws CouchbaseLiteException {
    Log.v(Log.TAG_VIEW, "Re-indexing view: %s", name);
    if (getViewID() <= 0) {
        String msg = "getViewID() < 0";
        throw new CouchbaseLiteException(msg, new Status(Status.NOT_FOUND));
    }

    store.beginTransaction();
    boolean success = false;
    Cursor cursor = null;
    try {
        // If the view the update is for doesn't need any update, don't do anything:
        final long dbMaxSequence = store.getLastSequence();
        final long forViewLastSequence = getLastSequenceIndexed();
        if (forViewLastSequence >= dbMaxSequence) {
            success = true;
            return new Status(Status.NOT_MODIFIED);
        }

        // Check whether we need to update at all,
        // and remove obsolete emitted results from the 'maps' table:
        long minLastSequence = dbMaxSequence;
        final long[] viewLastSequence = new long[inputViews.size()];
        int deletedCount = 0;
        int i = 0;
        final HashSet<String> docTypes = new HashSet<String>();
        HashMap<String, String> viewDocTypes = null;
        boolean allDocTypes = false;
        final HashMap<Integer, Integer> viewTotalRows = new HashMap<Integer, Integer>();
        final ArrayList<SQLiteViewStore> views = new ArrayList<SQLiteViewStore>();
        final ArrayList<Mapper> mapBlocks = new ArrayList<Mapper>();

        for (ViewStore v : inputViews) {
            assert (v != null);
            SQLiteViewStore view = (SQLiteViewStore) v;
            ViewStoreDelegate delegate = view.getDelegate();
            Mapper map = delegate != null ? delegate.getMap() : null;
            if (map == null) {
                if (view == this) {
                    String msg = String.format(Locale.ENGLISH,
                            "Cannot index view %s: " + "no map block registered", view.getName());
                    Log.e(Log.TAG_VIEW, msg);
                    throw new CouchbaseLiteException(msg, new Status(Status.BAD_REQUEST));
                }
                Log.v(Log.TAG_VIEW, "    %s has no map block; skipping it", view.getName());
                continue;
            }

            views.add(view);
            mapBlocks.add(map);

            int viewID = view.getViewID();
            if (viewID <= 0) {
                String message = String.format(Locale.ENGLISH, "View '%s' not found in database",
                        view.getName());
                Log.e(Log.TAG_VIEW, message);
                throw new CouchbaseLiteException(message, new Status(Status.NOT_FOUND));
            }

            int totalRows = view.getTotalRows();
            viewTotalRows.put(viewID, totalRows);

            long last = view == this ? forViewLastSequence : view.getLastSequenceIndexed();
            viewLastSequence[i++] = last;
            if (last < 0) {
                String msg = String.format(Locale.ENGLISH, "last < 0 (%d)", last);
                throw new CouchbaseLiteException(msg, new Status(Status.INTERNAL_SERVER_ERROR));
            } else if (last < dbMaxSequence) {
                if (last == 0)
                    view.createIndex();
                minLastSequence = Math.min(minLastSequence, last);
                Log.v(Log.TAG_VIEW, "    %s last indexed at #%d", view.getName(), last);

                String docType = delegate.getDocumentType();
                if (docType != null) {
                    docTypes.add(docType);
                    if (viewDocTypes == null)
                        viewDocTypes = new HashMap<String, String>();
                    viewDocTypes.put(view.getName(), docType);
                } else {
                    allDocTypes = true;
                }

                int changes = 0;
                if (last == 0) {
                    changes = store.getStorageEngine().delete(view.queryString("maps_#"), null, null);
                } else {
                    store.optimizeSQLIndexes();
                    String[] args = { Long.toString(last), Long.toString(last) };
                    changes = store.getStorageEngine().delete(view.queryString("maps_#"),
                            "sequence IN (SELECT parent FROM revs "
                                    + "WHERE sequence>? AND +parent>0 AND +parent<=?)",
                            args);
                }

                // Update #deleted rows:
                deletedCount += changes;

                // Only count these deletes as changes if this isn't a view reset to 0
                if (last != 0) {
                    int newTotalRows = viewTotalRows.get(viewID) - changes;
                    viewTotalRows.put(viewID, newTotalRows);
                }
            }
        }

        if (minLastSequence == dbMaxSequence) {
            Log.v(Log.TAG_VIEW, "minLastSequence (%d) == dbMaxSequence (%d), nothing to do", minLastSequence,
                    dbMaxSequence);
            success = true;
            return new Status(Status.NOT_MODIFIED);
        }

        Log.v(Log.TAG_VIEW, "Updating indexes of (%s) from #%d to #%d ...", viewNames(views), minLastSequence,
                dbMaxSequence);

        // This is the emit() block, which gets called from within the user-defined map() block
        // that's called down below.
        final AtomicInteger insertedCount = new AtomicInteger(0);
        AbstractMapEmitBlock emitBlock = new AbstractMapEmitBlock() {
            @Override
            public void emit(Object key, Object value) {
                if (key == null) {
                    Log.w(Log.TAG_VIEW, "emit() called with nil key; ignoring");
                    return;
                }
                try {
                    curView.emit(key, value, this.sequence); // emit block's sequence
                    int curViewID = curView.getViewID();
                    viewTotalRows.put(curViewID, viewTotalRows.get(curViewID) + 1);
                } catch (Exception e) {
                    Log.e(Log.TAG_VIEW, "Error emitting", e);
                    throw new RuntimeException(e);
                }
            }
        };

        // Now scan every revision added since the last time the view was indexed:

        // NOTE: Below is original Query. In case query result uses a lot of memory,
        //       Android SQLiteDatabase causes null value column. Then it causes the missing
        //       _index data because following logic skip result if column is null.
        //       To avoid the issue, retrieving json field is isolated from original query.
        //       Because json field could be large, maximum size is 2MB.
        // StringBuffer sql = new StringBuffer( "SELECT revs.doc_id, sequence, docid, revid,
        // json, no_attachments, deleted FROM revs, docs WHERE sequence>? AND current!=0 ");

        boolean checkDocTypes = docTypes.size() > 1 || (allDocTypes && docTypes.size() > 0);
        StringBuilder sql = new StringBuilder(
                "SELECT revs.doc_id, sequence, docid, revid, no_attachments, deleted ");
        if (checkDocTypes)
            sql.append(", doc_type ");
        sql.append("FROM revs, docs WHERE sequence>? AND current!=0 ");
        if (minLastSequence == 0) {
            sql.append("AND deleted=0 ");
        }
        if (!allDocTypes && docTypes.size() > 0) {
            String docTypesString = getJoinedSQLQuotedStrings(docTypes.toArray(new String[docTypes.size()]));
            sql.append("AND doc_type IN (").append(docTypesString).append(") ");
        }
        // order result by deleted ASC so if multiple revs returned the non deleted are the first
        // NOTE: Views broken with concurrent update and delete
        // https://github.com/couchbase/couchbase-lite-java-core/issues/952
        sql.append("AND revs.doc_id = docs.doc_id ORDER BY revs.doc_id, deleted ASC, revid DESC");
        String[] selectArgs = { Long.toString(minLastSequence) };
        cursor = store.getStorageEngine().rawQuery(sql.toString(), selectArgs);

        boolean keepGoing = cursor.moveToNext(); // Go to first result row
        while (keepGoing) {
            // NOTE: skip row if 1st column is null
            // https://github.com/couchbase/couchbase-lite-java-core/issues/497
            if (cursor.isNull(0)) {
                keepGoing = cursor.moveToNext();
                continue;
            }

            long docID = cursor.getLong(0);

            // Reconstitute the document as a dictionary:
            long sequence = cursor.getLong(1);
            String docId = cursor.getString(2);
            if (docId.startsWith("_design/")) { // design docs don't get indexed!
                keepGoing = cursor.moveToNext();
                continue;
            }
            String revID = cursor.getString(3);
            boolean deleted = cursor.getInt(5) > 0;
            String docType = checkDocTypes ? cursor.getString(6) : null;

            // Skip rows with the same doc_id -- these are losing conflicts.
            // NOTE: Or Skip rows if 1st column is null
            // https://github.com/couchbase/couchbase-lite-java-core/issues/497
            ArrayList<String> conflicts = null;
            boolean isNull;
            while ((keepGoing = cursor.moveToNext())
                    && ((isNull = cursor.isNull(0)) || cursor.getLong(0) == docID)) {
                if (isNull)
                    continue;
                if (!deleted) {
                    if (conflicts == null)
                        conflicts = new ArrayList<String>();
                    conflicts.add(cursor.getString(3));
                }
            }

            long realSequence = sequence; // because sequence may be changed, below
            if (minLastSequence > 0) {
                // Find conflicts with documents from previous indexings.
                Cursor cursor2 = null;
                try {
                    String[] selectArgs2 = { Long.toString(docID), Long.toString(minLastSequence) };
                    cursor2 = store.getStorageEngine()
                            .rawQuery("SELECT revid, sequence FROM revs "
                                    + "WHERE doc_id=? AND sequence<=? AND current!=0 AND deleted=0 "
                                    + "ORDER BY revID DESC ", selectArgs2);

                    if (cursor2.moveToNext()) {
                        String oldRevID = cursor2.getString(0);
                        // This is the revision that used to be the 'winner'.
                        // Remove its emitted rows:
                        long oldSequence = cursor2.getLong(1);
                        String[] args = { Long.toString(oldSequence) };
                        for (SQLiteViewStore view : views) {
                            int changes = view.store.getStorageEngine().delete(view.queryString("maps_#"),
                                    "sequence=?", args);
                            deletedCount += changes;
                            int thisViewID = view.getViewID();
                            int newTotalRows = viewTotalRows.get(thisViewID) - changes;
                            viewTotalRows.put(thisViewID, newTotalRows);
                        }

                        String conflictRevID = oldRevID;
                        if (deleted || RevisionInternal.CBLCompareRevIDs(oldRevID, revID) > 0) {
                            // It still 'wins' the conflict, so it's the one that
                            // should be mapped [again], not the current revision!
                            conflictRevID = revID;
                            revID = oldRevID;
                            deleted = false;
                            sequence = oldSequence;
                        }

                        if (!deleted) {
                            // Conflict revisions:
                            if (conflicts == null)
                                conflicts = new ArrayList<String>();
                            conflicts.add(conflictRevID);
                            while (cursor2.moveToNext()) {
                                conflicts.add(cursor2.getString(0));
                            }
                        }
                    }
                } finally {
                    if (cursor2 != null) {
                        cursor2.close();
                    }
                }
            }

            if (deleted)
                continue;

            // Get json blob:
            String[] selectArgs3 = { Long.toString(sequence) };
            byte[] json = SQLiteUtils.byteArrayResultForQuery(store.getStorageEngine(),
                    "SELECT json FROM revs WHERE sequence=?", selectArgs3);

            // Get the document properties, to pass to the map function:
            Map<String, Object> curDoc = store.documentPropertiesFromJSON(json, docId, revID, false, sequence);

            if (curDoc == null) {
                Log.w(Log.TAG_VIEW, "Failed to parse JSON of doc %s rev %s", docID, revID);
                continue;
            }
            curDoc.put("_local_seq", sequence);

            if (conflicts != null)
                curDoc.put("_conflicts", conflicts);

            // Call the user-defined map() to emit new key/value pairs from this revision:
            i = -1;
            for (SQLiteViewStore view : views) {
                curView = view;
                ++i;
                if (viewLastSequence[i] < realSequence) {
                    if (checkDocTypes) {
                        String viewDocType = viewDocTypes.get(view.getName());
                        if (viewDocType != null && !viewDocType.equals(docType))
                            continue; // skip; view's documentType doesn't match this doc
                    }
                    Log.v(Log.TAG_VIEW, "#%d: map '%s' for view %s...", sequence, docID, view.getName());
                    try {
                        emitBlock.setSequence(sequence);
                        mapBlocks.get(i).map(curDoc, emitBlock);
                    } catch (Throwable e) {
                        String msg = String.format(Locale.ENGLISH, "Error when calling map block of view '%s'",
                                view.getName());
                        Log.e(Log.TAG_VIEW, msg, e);
                        throw new CouchbaseLiteException(msg, e, new Status(Status.CALLBACK_ERROR));
                    }
                }
            }
        }

        // Finally, record the last revision sequence number that was indexed and update #rows:
        for (SQLiteViewStore view : views) {
            view.finishCreatingIndex();
            int newTotalRows = viewTotalRows.get(view.getViewID());
            ContentValues updateValues = new ContentValues();
            updateValues.put("lastSequence", dbMaxSequence);
            updateValues.put("total_docs", newTotalRows);
            String[] whereArgs = { Integer.toString(view.getViewID()) };
            store.getStorageEngine().update("views", updateValues, "view_id=?", whereArgs);
        }
        Log.v(Log.TAG_VIEW, "...Finished re-indexing (%s) to #%d (deleted %d, added %d)", viewNames(views),
                dbMaxSequence, deletedCount, insertedCount.intValue());

        success = true;
        return new Status(Status.OK);
    } catch (SQLException ex) {
        throw new CouchbaseLiteException(ex, new Status(Status.DB_ERROR));
    } finally {
        curView = null;
        if (cursor != null)
            cursor.close();
        if (store != null)
            store.endTransaction(success);
    }
}

From source file:org.alfresco.bm.report.XLSXReporter.java

private void createEventSheets(final XSSFWorkbook workbook) {
    // Create the fonts we need
    Font fontBold = workbook.createFont();
    fontBold.setBoldweight(Font.BOLDWEIGHT_BOLD);

    // Create the styles we need
    CreationHelper helper = workbook.getCreationHelper();
    final XSSFCellStyle dataStyle = workbook.createCellStyle();
    dataStyle.setAlignment(HorizontalAlignment.RIGHT);
    final XSSFCellStyle headerStyle = workbook.createCellStyle();
    headerStyle.setAlignment(HorizontalAlignment.RIGHT);
    headerStyle.setFont(fontBold);/*from w  w w .j a  va2  s  . c  o m*/
    final XSSFCellStyle dateStyle = workbook.createCellStyle();
    dateStyle.setDataFormat(helper.createDataFormat().getFormat("HH:mm:ss"));

    // Calculate a good window size
    ResultService resultService = getResultService();
    EventRecord firstResult = resultService.getFirstResult();
    EventRecord lastResult = resultService.getLastResult();
    if (firstResult == null || lastResult == null) {
        return;
    }
    long start = firstResult.getStartTime();
    long end = lastResult.getStartTime();
    long windowSize = AbstractEventReporter.getWindowSize(start, end, 100); // Well-known window sizes

    // Keep track of sheets by event name. Note that XLSX truncates sheets to 31 chars, so use 28 chars and ~01, ~02
    final Map<String, String> sheetNames = new HashMap<String, String>(31);
    final Map<String, XSSFSheet> sheets = new HashMap<String, XSSFSheet>(31);
    final Map<String, AtomicInteger> rowNums = new HashMap<String, AtomicInteger>(31);

    ResultHandler handler = new ResultHandler() {
        @Override
        public boolean processResult(long fromTime, long toTime,
                Map<String, DescriptiveStatistics> statsByEventName, Map<String, Integer> failuresByEventName)
                throws Throwable {
            // Get or create a sheet for each event
            for (String eventName : statsByEventName.keySet()) {
                // What sheet name to we use?
                String sheetName = sheetNames.get(eventName);
                if (sheetName == null) {
                    sheetName = eventName;
                    if (eventName.length() > 28) {
                        int counter = 1;
                        // Find a sheet name not in use
                        while (true) {
                            sheetName = eventName.substring(0, 28);
                            sheetName = String.format("%s~%02d", sheetName, counter);
                            // Have we used this, yet?
                            if (sheets.containsKey(sheetName)) {
                                // Yes, we have used it.
                                counter++;
                                continue;
                            }
                            // This is unique
                            break;
                        }
                    }
                    sheetNames.put(eventName, sheetName);
                }
                // Get and create the sheet, if necessary
                XSSFSheet sheet = sheets.get(sheetName);
                if (sheet == null) {
                    // Create
                    try {
                        sheet = workbook.createSheet(sheetName);
                        sheets.put(sheetName, sheet);
                        sheet.getHeader().setCenter(title + " - " + eventName);
                        sheet.getPrintSetup().setFitWidth((short) 1);
                        sheet.getPrintSetup().setLandscape(true);
                    } catch (Exception e) {
                        logger.error("Unable to create workbook sheet for event: " + eventName, e);
                        continue;
                    }
                    // Intro
                    XSSFCell cell = sheet.createRow(0).createCell(0);
                    cell.setCellValue(title + " - " + eventName + ":");
                    cell.setCellStyle(headerStyle);
                    // Headings
                    XSSFRow row = sheet.createRow(1);
                    cell = row.createCell(0);
                    cell.setCellStyle(headerStyle);
                    cell.setCellValue("time");
                    cell = row.createCell(1);
                    cell.setCellStyle(headerStyle);
                    cell.setCellValue("mean");
                    cell = row.createCell(2);
                    cell.setCellStyle(headerStyle);
                    cell.setCellValue("min");
                    cell = row.createCell(3);
                    cell.setCellStyle(headerStyle);
                    cell.setCellValue("max");
                    cell = row.createCell(4);
                    cell.setCellStyle(headerStyle);
                    cell.setCellValue("stdDev");
                    cell = row.createCell(5);
                    cell.setCellStyle(headerStyle);
                    cell.setCellValue("num");
                    cell = row.createCell(6);
                    cell.setCellStyle(headerStyle);
                    cell.setCellValue("numPerSec");
                    cell = row.createCell(7);
                    cell.setCellStyle(headerStyle);
                    cell.setCellValue("fail");
                    cell = row.createCell(8);
                    cell.setCellStyle(headerStyle);
                    cell.setCellValue("failPerSec");
                    // Size the columns
                    sheet.autoSizeColumn(0);
                    sheet.autoSizeColumn(1);
                    sheet.autoSizeColumn(2);
                    sheet.autoSizeColumn(3);
                    sheet.autoSizeColumn(4);
                    sheet.autoSizeColumn(5);
                    sheet.autoSizeColumn(6);
                    sheet.autoSizeColumn(7);
                    sheet.autoSizeColumn(8);
                }
                AtomicInteger rowNum = rowNums.get(eventName);
                if (rowNum == null) {
                    rowNum = new AtomicInteger(2);
                    rowNums.put(eventName, rowNum);
                }

                DescriptiveStatistics stats = statsByEventName.get(eventName);
                Integer failures = failuresByEventName.get(eventName);

                double numPerSec = (double) stats.getN() / ((double) (toTime - fromTime) / 1000.0);
                double failuresPerSec = (double) failures / ((double) (toTime - fromTime) / 1000.0);

                XSSFRow row = sheet.createRow(rowNum.getAndIncrement());
                XSSFCell cell;
                cell = row.createCell(0, Cell.CELL_TYPE_NUMERIC);
                cell.setCellStyle(dateStyle);
                cell.setCellValue(new Date(toTime));
                cell = row.createCell(5, Cell.CELL_TYPE_NUMERIC);
                cell.setCellValue(stats.getN());
                cell = row.createCell(6, Cell.CELL_TYPE_NUMERIC);
                cell.setCellValue(numPerSec);
                cell = row.createCell(7, Cell.CELL_TYPE_NUMERIC);
                cell.setCellValue(failures);
                cell = row.createCell(8, Cell.CELL_TYPE_NUMERIC);
                cell.setCellValue(failuresPerSec);
                // Leave out values if there is no mean
                if (Double.isNaN(stats.getMean())) {
                    continue;
                }
                cell = row.createCell(1, Cell.CELL_TYPE_NUMERIC);
                cell.setCellValue(stats.getMean());
                cell = row.createCell(2, Cell.CELL_TYPE_NUMERIC);
                cell.setCellValue(stats.getMin());
                cell = row.createCell(3, Cell.CELL_TYPE_NUMERIC);
                cell.setCellValue(stats.getMax());
                cell = row.createCell(4, Cell.CELL_TYPE_NUMERIC);
                cell.setCellValue(stats.getStandardDeviation());
            }
            return true;
        }
    };
    resultService.getResults(handler, start, windowSize, windowSize, false);

    // Create charts in the sheets
    for (String eventName : sheetNames.keySet()) {
        // Get the sheet name
        String sheetName = sheetNames.get(eventName);
        if (sheetName == null) {
            logger.error("Did not find sheet for event: " + eventName);
            continue;
        }
        // Get the sheet
        XSSFSheet sheet = sheets.get(sheetName);
        if (sheet == null) {
            logger.error("Did not find sheet for name: " + sheetName);
            continue;
        }
        // What row did we get up to
        AtomicInteger rowNum = rowNums.get(eventName);
        if (rowNum == null) {
            logger.error("Did not find row number for event: " + sheetName);
            continue;
        }

        // This axis is common to both charts
        ChartDataSource<Number> xTime = DataSources.fromNumericCellRange(sheet,
                new CellRangeAddress(1, rowNum.intValue() - 1, 0, 0));

        // Graph of event times
        XSSFDrawing drawingTimes = sheet.createDrawingPatriarch();
        ClientAnchor anchorTimes = drawingTimes.createAnchor(0, 0, 0, 0, 0, 5, 15, 25);
        Chart chartTimes = drawingTimes.createChart(anchorTimes);
        ChartLegend legendTimes = chartTimes.getOrCreateLegend();
        legendTimes.setPosition(LegendPosition.BOTTOM);

        LineChartData chartDataTimes = chartTimes.getChartDataFactory().createLineChartData();

        ChartAxis bottomAxisTimes = chartTimes.getChartAxisFactory().createCategoryAxis(AxisPosition.BOTTOM);
        bottomAxisTimes.setNumberFormat("#,##0;-#,##0");
        ValueAxis leftAxisTimes = chartTimes.getChartAxisFactory().createValueAxis(AxisPosition.LEFT);

        // Mean
        ChartDataSource<Number> yMean = DataSources.fromNumericCellRange(sheet,
                new CellRangeAddress(1, rowNum.intValue() - 1, 1, 1));
        LineChartSeries yMeanSerie = chartDataTimes.addSeries(xTime, yMean);
        yMeanSerie.setTitle(title + " - " + eventName + ": Mean (ms)");

        // Std Dev
        ChartDataSource<Number> yStdDev = DataSources.fromNumericCellRange(sheet,
                new CellRangeAddress(1, rowNum.intValue() - 1, 4, 4));
        LineChartSeries yStdDevSerie = chartDataTimes.addSeries(xTime, yStdDev);
        yStdDevSerie.setTitle(title + " - " + eventName + ": Standard Deviation (ms)");

        // Plot event times
        chartTimes.plot(chartDataTimes, bottomAxisTimes, leftAxisTimes);

        // Graph of event volumes

        // Graph of event times
        XSSFDrawing drawingVolumes = sheet.createDrawingPatriarch();
        ClientAnchor anchorVolumes = drawingVolumes.createAnchor(0, 0, 0, 0, 0, 25, 15, 35);
        Chart chartVolumes = drawingVolumes.createChart(anchorVolumes);
        ChartLegend legendVolumes = chartVolumes.getOrCreateLegend();
        legendVolumes.setPosition(LegendPosition.BOTTOM);

        LineChartData chartDataVolumes = chartVolumes.getChartDataFactory().createLineChartData();

        ChartAxis bottomAxisVolumes = chartVolumes.getChartAxisFactory()
                .createCategoryAxis(AxisPosition.BOTTOM);
        bottomAxisVolumes.setNumberFormat("#,##0;-#,##0");
        ValueAxis leftAxisVolumes = chartVolumes.getChartAxisFactory().createValueAxis(AxisPosition.LEFT);

        // Number per second
        ChartDataSource<Number> yNumPerSec = DataSources.fromNumericCellRange(sheet,
                new CellRangeAddress(1, rowNum.intValue() - 1, 6, 6));
        LineChartSeries yNumPerSecSerie = chartDataVolumes.addSeries(xTime, yNumPerSec);
        yNumPerSecSerie.setTitle(title + " - " + eventName + ": Events per Second");

        // Failures per second
        ChartDataSource<Number> yFailPerSec = DataSources.fromNumericCellRange(sheet,
                new CellRangeAddress(1, rowNum.intValue() - 1, 8, 8));
        LineChartSeries yFailPerSecSerie = chartDataVolumes.addSeries(xTime, yFailPerSec);
        yFailPerSecSerie.setTitle(title + " - " + eventName + ": Failures per Second");

        // Plot volumes
        chartVolumes.plot(chartDataVolumes, bottomAxisVolumes, leftAxisVolumes);
    }
}

From source file:org.apache.camel.processor.MulticastProcessor.java

protected void doProcessParallel(final Exchange original, final AtomicExchange result,
        final Iterable<ProcessorExchangePair> pairs, final boolean streaming, final AsyncCallback callback)
        throws Exception {

    ObjectHelper.notNull(executorService, "ExecutorService", this);
    ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this);

    final CompletionService<Exchange> completion;
    if (streaming) {
        // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence)
        completion = new ExecutorCompletionService<Exchange>(executorService);
    } else {//from w  ww .  j av  a2  s.c o m
        // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence)
        completion = new SubmitOrderedCompletionService<Exchange>(executorService);
    }

    // when parallel then aggregate on the fly
    final AtomicBoolean running = new AtomicBoolean(true);
    final AtomicInteger total = new AtomicInteger(0);
    final AtomicBoolean allTasksSubmitted = new AtomicBoolean();
    final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1);
    final AtomicException executionException = new AtomicException();

    final Iterator<ProcessorExchangePair> it = pairs.iterator();

    if (it.hasNext()) {
        // issue task to execute in separate thread so it can aggregate on-the-fly
        // while we submit new tasks, and those tasks complete concurrently
        // this allows us to optimize work and reduce memory consumption
        AggregateOnTheFlyTask task = new AggregateOnTheFlyTask(result, original, total, completion, running,
                aggregationOnTheFlyDone, allTasksSubmitted, executionException);

        // and start the aggregation task so we can aggregate on-the-fly
        aggregateExecutorService.submit(task);
    }

    LOG.trace("Starting to submit parallel tasks");

    while (it.hasNext()) {
        final ProcessorExchangePair pair = it.next();
        final Exchange subExchange = pair.getExchange();
        updateNewExchange(subExchange, total.intValue(), pairs, it);

        completion.submit(new Callable<Exchange>() {
            public Exchange call() throws Exception {
                if (!running.get()) {
                    // do not start processing the task if we are not running
                    return subExchange;
                }

                try {
                    doProcessParallel(pair);
                } catch (Throwable e) {
                    subExchange.setException(e);
                }

                // Decide whether to continue with the multicast or not; similar logic to the Pipeline
                Integer number = getExchangeIndex(subExchange);
                boolean continueProcessing = PipelineHelper.continueProcessing(subExchange,
                        "Parallel processing failed for number " + number, LOG);
                if (stopOnException && !continueProcessing) {
                    // signal to stop running
                    running.set(false);
                    // throw caused exception
                    if (subExchange.getException() != null) {
                        // wrap in exception to explain where it failed
                        throw new CamelExchangeException("Parallel processing failed for number " + number,
                                subExchange, subExchange.getException());
                    }
                }

                if (LOG.isTraceEnabled()) {
                    LOG.trace("Parallel processing complete for exchange: " + subExchange);
                }
                return subExchange;
            }
        });

        total.incrementAndGet();
    }

    // signal all tasks has been submitted
    if (LOG.isTraceEnabled()) {
        LOG.trace("Signaling that all " + total.get() + " tasks has been submitted.");
    }
    allTasksSubmitted.set(true);

    // its to hard to do parallel async routing so we let the caller thread be synchronously
    // and have it pickup the replies and do the aggregation (eg we use a latch to wait)
    // wait for aggregation to be done
    if (LOG.isDebugEnabled()) {
        LOG.debug("Waiting for on-the-fly aggregation to complete aggregating " + total.get() + " responses.");
    }
    aggregationOnTheFlyDone.await();

    // did we fail for whatever reason, if so throw that caused exception
    if (executionException.get() != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Parallel processing failed due " + executionException.get().getMessage());
        }
        throw executionException.get();
    }

    // no everything is okay so we are done
    if (LOG.isDebugEnabled()) {
        LOG.debug("Done parallel processing " + total + " exchanges");
    }
}

From source file:org.apache.hadoop.hbase.master.TestRegionPlacement.java

/**
 * Check whether regions are assigned to servers consistent with the explicit
 * hints that are persisted in the hbase:meta table.
 * Also keep track of the number of the regions are assigned to the
 * primary region server./*from ww  w  .  ja va  2s.  co m*/
 * @return the number of regions are assigned to the primary region server
 * @throws IOException
 */
private int getNumRegionisOnPrimaryRS() throws IOException {
    final AtomicInteger regionOnPrimaryNum = new AtomicInteger(0);
    final AtomicInteger totalRegionNum = new AtomicInteger(0);
    LOG.info("The start of region placement verification");
    MetaScannerVisitor visitor = new MetaScannerVisitor() {
        public boolean processRow(Result result) throws IOException {
            try {
                HRegionInfo info = MetaScanner.getHRegionInfo(result);
                if (info.getTable().getNamespaceAsString()
                        .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
                    return true;
                }
                byte[] server = result.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
                byte[] favoredNodes = result.getValue(HConstants.CATALOG_FAMILY,
                        FavoredNodeAssignmentHelper.FAVOREDNODES_QUALIFIER);
                // Add the favored nodes into assignment plan
                ServerName[] favoredServerList = FavoredNodeAssignmentHelper.getFavoredNodesList(favoredNodes);
                favoredNodesAssignmentPlan.put(info, favoredServerList);

                Position[] positions = Position.values();
                if (info != null) {
                    totalRegionNum.incrementAndGet();
                    if (server != null) {
                        ServerName serverName = ServerName.valueOf(Bytes.toString(server), -1);
                        if (favoredNodes != null) {
                            String placement = "[NOT FAVORED NODE]";
                            for (int i = 0; i < favoredServerList.length; i++) {
                                if (favoredServerList[i].equals(serverName)) {
                                    placement = positions[i].toString();
                                    if (i == Position.PRIMARY.ordinal()) {
                                        regionOnPrimaryNum.incrementAndGet();
                                    }
                                    break;
                                }
                            }
                            LOG.info(info.getRegionNameAsString() + " on " + serverName + " " + placement);
                        } else {
                            LOG.info(info.getRegionNameAsString() + " running on " + serverName
                                    + " but there is no favored region server");
                        }
                    } else {
                        LOG.info(info.getRegionNameAsString() + " not assigned to any server");
                    }
                }
                return true;
            } catch (RuntimeException e) {
                LOG.error("Result=" + result);
                throw e;
            }
        }

        @Override
        public void close() throws IOException {
        }
    };
    MetaScanner.metaScan(TEST_UTIL.getConfiguration(), visitor);
    LOG.info("There are " + regionOnPrimaryNum.intValue() + " out of " + totalRegionNum.intValue()
            + " regions running on the primary" + " region servers");
    return regionOnPrimaryNum.intValue();
}

From source file:org.apache.nifi.controller.FlowController.java

/**
 * Updates the number of threads that can be simultaneously used for
 * executing processors.//from  w w w.  j  a v a2s .  c  o m
 *
 * @param maxThreadCount This method must be called while holding the write
 * lock!
 */
private void setMaxThreadCount(final int maxThreadCount, final FlowEngine engine,
        final AtomicInteger maxThreads) {
    if (maxThreadCount < 1) {
        throw new IllegalArgumentException();
    }

    maxThreads.getAndSet(maxThreadCount);
    if (null != engine && engine.getCorePoolSize() < maxThreadCount) {
        engine.setCorePoolSize(maxThreads.intValue());
    }
}

From source file:org.apache.qpid.systest.management.jmx.QueueManagementTest.java

public void testMoveMessagesBetweenQueuesWithActiveConsumerOnSourceQueue() throws Exception {
    setTestClientSystemProperty(ClientProperties.MAX_PREFETCH_PROP_NAME, new Integer(1).toString());
    Connection asyncConnection = getConnection();
    asyncConnection.start();/*from  w w  w  . j  av a 2  s  .c o  m*/

    final int numberOfMessagesToSend = 50;
    sendMessage(_session, _sourceQueue, numberOfMessagesToSend);
    syncSession(_session);
    assertEquals("Unexpected queue depth after send", numberOfMessagesToSend,
            _managedSourceQueue.getMessageCount().intValue());

    List<Long> amqMessagesIds = getAMQMessageIdsOn(_managedSourceQueue, 1, numberOfMessagesToSend);

    long fromMessageId = amqMessagesIds.get(0);
    long toMessageId = amqMessagesIds.get(numberOfMessagesToSend - 1);

    CountDownLatch consumerReadToHalfwayLatch = new CountDownLatch(numberOfMessagesToSend / 2);
    AtomicInteger totalConsumed = new AtomicInteger(0);
    startAsyncConsumerOn(_sourceQueue, asyncConnection, consumerReadToHalfwayLatch, totalConsumed);

    boolean halfwayPointReached = consumerReadToHalfwayLatch.await(5000, TimeUnit.MILLISECONDS);
    assertTrue("Did not read half of messages within time allowed", halfwayPointReached);

    _managedSourceQueue.moveMessages(fromMessageId, toMessageId, _destinationQueueName);

    asyncConnection.stop();

    // The exact number of messages moved will be non deterministic, as the number of messages processed
    // by the consumer cannot be predicted.  There is also the possibility that a message can remain
    // on the source queue.  This situation will arise if a message has been acquired by the consumer, but not
    // yet delivered to the client application (i.e. MessageListener#onMessage()) when the Connection#stop() occurs.
    //
    // The number of messages moved + the number consumed + any messages remaining on source should
    // *always* be equal to the number we originally sent.

    int numberOfMessagesReadByConsumer = totalConsumed.intValue();
    int numberOfMessagesOnDestinationQueue = _managedDestinationQueue.getMessageCount().intValue();
    int numberOfMessagesRemainingOnSourceQueue = _managedSourceQueue.getMessageCount().intValue();

    LOGGER.debug("Async consumer read : " + numberOfMessagesReadByConsumer
            + " Number of messages moved to destination : " + numberOfMessagesOnDestinationQueue
            + " Number of messages remaining on source : " + numberOfMessagesRemainingOnSourceQueue);
    assertEquals("Unexpected number of messages after move", numberOfMessagesToSend,
            numberOfMessagesReadByConsumer + numberOfMessagesOnDestinationQueue
                    + numberOfMessagesRemainingOnSourceQueue);
}

From source file:org.apache.qpid.systest.management.jmx.QueueManagementTest.java

public void testMoveMessagesBetweenQueuesWithActiveConsumerOnDestinationQueue() throws Exception {
    setTestClientSystemProperty(ClientProperties.MAX_PREFETCH_PROP_NAME, new Integer(1).toString());
    Connection asyncConnection = getConnection();
    asyncConnection.start();//from   ww  w. j av a  2 s  . c o m

    final int numberOfMessagesToSend = 50;
    sendMessage(_session, _sourceQueue, numberOfMessagesToSend);
    syncSession(_session);
    assertEquals("Unexpected queue depth after send", numberOfMessagesToSend,
            _managedSourceQueue.getMessageCount().intValue());

    List<Long> amqMessagesIds = getAMQMessageIdsOn(_managedSourceQueue, 1, numberOfMessagesToSend);
    long fromMessageId = amqMessagesIds.get(0);
    long toMessageId = amqMessagesIds.get(numberOfMessagesToSend - 1);

    AtomicInteger totalConsumed = new AtomicInteger(0);
    CountDownLatch allMessagesConsumedLatch = new CountDownLatch(numberOfMessagesToSend);
    startAsyncConsumerOn(_destinationQueue, asyncConnection, allMessagesConsumedLatch, totalConsumed);

    _managedSourceQueue.moveMessages(fromMessageId, toMessageId, _destinationQueueName);

    allMessagesConsumedLatch.await(5000, TimeUnit.MILLISECONDS);
    assertEquals("Did not consume all messages from destination queue", numberOfMessagesToSend,
            totalConsumed.intValue());
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java

@Test
public void shouldNotExhaustThreads() throws Exception {
    final ScheduledExecutorService executorService = Executors.newScheduledThreadPool(2, testingThreadFactory);
    final GremlinExecutor gremlinExecutor = GremlinExecutor.build().executorService(executorService)
            .scheduledExecutorService(executorService).create();

    final AtomicInteger count = new AtomicInteger(0);
    assertTrue(IntStream.range(0, 1000).mapToObj(i -> gremlinExecutor.eval("1+1")).allMatch(f -> {
        try {/*from  w  ww  .ja  va  2s.  c om*/
            return (Integer) f.get() == 2;
        } catch (Exception ex) {
            throw new RuntimeException(ex);
        } finally {
            count.incrementAndGet();
        }
    }));

    assertEquals(1000, count.intValue());

    executorService.shutdown();
    executorService.awaitTermination(30000, TimeUnit.MILLISECONDS);
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java

@Test
public void shouldFailUntilImportExecutes() throws Exception {
    final GremlinExecutor gremlinExecutor = GremlinExecutor.build().create();

    final Set<String> imports = new HashSet<String>() {
        {//w ww .j av  a  2 s  .co  m
            add("import java.awt.Color");
        }
    };

    final AtomicInteger successes = new AtomicInteger(0);
    final AtomicInteger failures = new AtomicInteger(0);

    // issue 1000 scripts in one thread using a class that isn't imported.  this will result in failure.
    // while that thread is running start a new thread that issues an addImports to include that class.
    // this should block further evals in the first thread until the import is complete at which point
    // evals in the first thread will resume and start to succeed
    final Thread t1 = new Thread(
            () -> IntStream.range(0, 1000).mapToObj(i -> gremlinExecutor.eval("Color.BLACK")).forEach(f -> {
                f.exceptionally(t -> failures.incrementAndGet()).join();
                if (!f.isCompletedExceptionally())
                    successes.incrementAndGet();
            }));

    final Thread t2 = new Thread(() -> {
        while (failures.get() < 500) {
        }
        gremlinExecutor.getScriptEngines().addImports(imports);
    });

    t1.start();
    t2.start();

    t1.join();
    t2.join();

    assertTrue(successes.intValue() > 0);
    assertTrue(failures.intValue() >= 500);

    gremlinExecutor.close();
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.ScriptEnginesTest.java

@Test
public void shouldFailUntilImportExecutes() throws Exception {
    final ScriptEngines engines = new ScriptEngines(se -> {
    });/*from   w ww .j a  v a2  s .  c  o m*/
    engines.reload("gremlin-groovy", Collections.<String>emptySet(), Collections.<String>emptySet(),
            Collections.emptyMap());

    final Set<String> imports = new HashSet<String>() {
        {
            add("import java.awt.Color");
        }
    };

    final AtomicInteger successes = new AtomicInteger(0);
    final AtomicInteger failures = new AtomicInteger(0);

    final Thread threadImport = new Thread(() -> {
        engines.addImports(imports);
    });

    // issue 1000 scripts in one thread using a class that isn't imported.  this will result in failure.
    // while that thread is running start a new thread that issues an addImports to include that class.
    // this should block further evals in the first thread until the import is complete at which point
    // evals in the first thread will resume and start to succeed
    final Thread threadEvalAndTriggerImport = new Thread(() -> IntStream.range(0, 1000).forEach(i -> {
        try {
            engines.eval("Color.BLACK", new SimpleBindings(), "gremlin-groovy");
            successes.incrementAndGet();
        } catch (Exception ex) {
            if (failures.incrementAndGet() == 500)
                threadImport.start();
            Thread.yield();
        }
    }));

    threadEvalAndTriggerImport.start();

    threadEvalAndTriggerImport.join();
    threadImport.join();

    assertTrue("Success: " + successes.intValue() + " - Failures: " + failures.intValue(),
            successes.intValue() > 0);
    assertTrue("Success: " + successes.intValue() + " - Failures: " + failures.intValue(),
            failures.intValue() >= 500);

    engines.close();
}