Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.apache.tajo.storage.FileStorageManager.java

/**
 *
 * @param fs/*from   ww w  . ja va  2s  . co  m*/
 * @param path The table path
 * @param result The final result files to be used
 * @param startFileIndex
 * @param numResultFiles
 * @param currentFileIndex
 * @param partitioned A flag to indicate if this table is partitioned
 * @param currentDepth Current visiting depth of partition directories
 * @param maxDepth The partition depth of this table
 * @throws IOException
 */
private void getNonZeroLengthDataFiles(FileSystem fs, Path path, List<FileStatus> result, int startFileIndex,
        int numResultFiles, AtomicInteger currentFileIndex, boolean partitioned, int currentDepth, int maxDepth)
        throws IOException {
    // Intermediate directory
    if (fs.isDirectory(path)) {

        FileStatus[] files = fs.listStatus(path, StorageManager.hiddenFileFilter);

        if (files != null && files.length > 0) {

            for (FileStatus eachFile : files) {

                // checking if the enough number of files are found
                if (result.size() >= numResultFiles) {
                    return;
                }
                if (eachFile.isDirectory()) {

                    getNonZeroLengthDataFiles(fs, eachFile.getPath(), result, startFileIndex, numResultFiles,
                            currentFileIndex, partitioned, currentDepth + 1, // increment a visiting depth
                            maxDepth);

                    // if partitioned table, we should ignore files located in the intermediate directory.
                    // we can ensure that this file is in leaf directory if currentDepth == maxDepth.
                } else if (eachFile.isFile() && eachFile.getLen() > 0
                        && (!partitioned || currentDepth == maxDepth)) {
                    if (currentFileIndex.get() >= startFileIndex) {
                        result.add(eachFile);
                    }
                    currentFileIndex.incrementAndGet();
                }
            }
        }

        // Files located in leaf directory
    } else {
        FileStatus fileStatus = fs.getFileStatus(path);
        if (fileStatus != null && fileStatus.getLen() > 0) {
            if (currentFileIndex.get() >= startFileIndex) {
                result.add(fileStatus);
            }
            currentFileIndex.incrementAndGet();
            if (result.size() >= numResultFiles) {
                return;
            }
        }
    }
}

From source file:org.dasein.cloud.azure.tests.network.AzureVlanSupportTest.java

@Test
public void removeSubnetShouldPostCorrectRequest() throws CloudException, InternalException {
    final AtomicInteger putCount = new AtomicInteger(0);
    new MockUp<CloseableHttpClient>() {
        @Mock//from  ww w .  j ava 2 s  .com
        public CloseableHttpResponse execute(Invocation inv, HttpUriRequest request) throws IOException {
            if (request.getMethod().equals("GET")
                    && VIRTUAL_NETWORK_SITES_URL.equals(request.getURI().toString())) {
                DaseinObjectToXmlEntity<VirtualNetworkSitesModel> daseinEntity = new DaseinObjectToXmlEntity<VirtualNetworkSitesModel>(
                        createVirtualNetworkSitesModelWithSubnet(ID, NAME, REGION, CIDR, "Created", SUBNET_NAME,
                                SUBNET_CIDR));
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else if ("GET".equals(request.getMethod())
                    && NETWORK_CONFIG_URL.equals(request.getURI().toString())) {
                DaseinObjectToXmlEntity<NetworkConfigurationModel> daseinEntity = new DaseinObjectToXmlEntity<NetworkConfigurationModel>(
                        createNetworkConfigurationModelWithSubnet(NAME, REGION, CIDR, SUBNET_NAME,
                                SUBNET_CIDR));
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else if ("PUT".equals(request.getMethod())) {
                putCount.incrementAndGet();
                NetworkConfigurationModel networkConfigurationModel = createNetworkConfigurationModelWithSubnet(
                        NAME, REGION, CIDR, null, null);
                assertPut(request, NETWORK_CONFIG_URL,
                        new Header[] { new BasicHeader("x-ms-version", "2012-03-01") },
                        networkConfigurationModel);
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), null,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else {
                throw new IOException("Request is not mocked");
            }

        }
    };
    vlanSupport.removeSubnet(SUBNET_ID);
    assertEquals("removeVlan PUT network config should perform only 1 times", 1, putCount.get());
}

From source file:org.apache.hadoop.hive.ql.exec.tez.TestHostAffinitySplitLocationProvider.java

private double testHashDistribution(int locs, final int missCount, FileSplit[] splits,
        AtomicInteger errorCount) {
    // This relies heavily on what method determineSplits ... calls and doesn't.
    // We could do a wrapper with only size() and get() methods instead of List, to be sure.
    @SuppressWarnings("unchecked")
    List<String> partLocs = (List<String>) Mockito.mock(List.class);
    Mockito.when(partLocs.size()).thenReturn(locs);
    final AtomicInteger state = new AtomicInteger(0);
    Mockito.when(partLocs.get(Mockito.anyInt())).thenAnswer(new Answer<String>() {
        @Override//from ww w  .  j  ava  2s . c o  m
        public String answer(InvocationOnMock invocation) throws Throwable {
            return (state.getAndIncrement() == missCount) ? "not-null" : null;
        }
    });
    int[] hitCounts = new int[locs];
    for (int splitIx = 0; splitIx < splits.length; ++splitIx) {
        state.set(0);
        int index = HostAffinitySplitLocationProvider.determineLocation(partLocs,
                splits[splitIx].getPath().toString(), splits[splitIx].getStart(), null);
        ++hitCounts[index];
    }
    SummaryStatistics ss = new SummaryStatistics();
    for (int hitCount : hitCounts) {
        ss.addValue(hitCount);
    }
    // All of this is completely bogus and mostly captures the following function:
    // f(output) = I-eyeballed-the(output) == they-look-ok.
    // It's pretty much a golden file... 
    // The fact that stdev doesn't increase with increasing missCount is captured outside.
    double avg = ss.getSum() / ss.getN(), stdev = ss.getStandardDeviation(), cv = stdev / avg;
    double allowedMin = avg - 2.5 * stdev, allowedMax = avg + 2.5 * stdev;
    if (allowedMin > ss.getMin() || allowedMax < ss.getMax() || cv > 0.22) {
        LOG.info("The distribution for " + locs + " locations, " + missCount + " misses isn't to "
                + "our liking: avg " + avg + ", stdev " + stdev + ", cv " + cv + ", min " + ss.getMin()
                + ", max " + ss.getMax());
        errorCount.incrementAndGet();
    }
    return cv;
}

From source file:org.nd4j.linalg.api.test.NDArrayTests.java

@Test
public void testVectorDimension() {
    INDArray test = Nd4j.create(Nd4j.linspace(1, 4, 4).data(), new int[] { 2, 2 });
    final AtomicInteger count = new AtomicInteger(0);
    //row wise//from  w ww .  j  ava  2 s . com
    test.iterateOverDimension(1, new SliceOp() {

        /**
         * Operates on an ndarray slice
         *
         * @param nd the result to operate on
         */
        @Override
        public void operate(INDArray nd) {
            INDArray test = nd;
            if (count.get() == 0) {
                INDArray firstDimension = Nd4j.create(new float[] { 1, 2 }, new int[] { 2 });
                assertEquals(firstDimension, test);
            } else {
                INDArray firstDimension = Nd4j.create(new float[] { 3, 4 }, new int[] { 2 });
                assertEquals(firstDimension, test);

            }

            count.incrementAndGet();
        }

    }, false);

    count.set(0);

    //columnwise
    test.iterateOverDimension(0, new SliceOp() {

        /**
         * Operates on an ndarray slice
         *
         * @param nd the result to operate on
         */
        @Override
        public void operate(INDArray nd) {
            log.info("Operator " + nd);
            INDArray test = nd;
            if (count.get() == 0) {
                INDArray firstDimension = Nd4j.create(new float[] { 1, 3 }, new int[] { 2 });
                assertEquals(firstDimension, test);
            } else {
                INDArray firstDimension = Nd4j.create(new float[] { 2, 4 }, new int[] { 2 });
                assertEquals(firstDimension, test);
                firstDimension.data().destroy();

            }

            count.incrementAndGet();
        }

    }, false);

    test.data().destroy();

}

From source file:org.dasein.cloud.azurepack.tests.compute.AzurePackVirtualMachineSupportTest.java

@Test
public void lauchVhdVMShouldSendCorrectRequest() throws CloudException, InternalException {
    final AtomicInteger postCount = new AtomicInteger(0);
    new StartOrStopVirtualMachinesRequestExecutorMockUp("Start") {
        @Mock// w w w  .  j a v  a  2 s.co  m
        public void $init(CloudProvider provider, HttpClientBuilder clientBuilder, HttpUriRequest request,
                ResponseHandler handler) {
            String requestUri = request.getURI().toString();
            if (request.getMethod().equals("POST")
                    && requestUri.equals(String.format(LIST_VM_RESOURCES, ENDPOINT, ACCOUNT_NO))) {
                requestResourceType = 21;
                WAPVirtualMachineModel wapVirtualMachineModel = new WAPVirtualMachineModel();
                wapVirtualMachineModel.setName(VM_1_NAME);
                wapVirtualMachineModel.setCloudId(REGION);
                wapVirtualMachineModel.setStampId(DATACENTER_ID);
                wapVirtualMachineModel.setVirtualHardDiskId(VHD_1_ID);
                wapVirtualMachineModel.setHardwareProfileId(HWP_1_ID);

                List<WAPNewAdapterModel> adapters = new ArrayList<>();
                WAPNewAdapterModel newAdapterModel = new WAPNewAdapterModel();
                newAdapterModel.setVmNetworkName(VM_1_NETWORK_NAME);
                adapters.add(newAdapterModel);
                wapVirtualMachineModel.setNewVirtualNetworkAdapterInput(adapters);

                assertPost(request, String.format(LIST_VM_RESOURCES, ENDPOINT, ACCOUNT_NO), new Header[0],
                        wapVirtualMachineModel);
            } else {
                super.$init(provider, clientBuilder, request, handler);
            }
            responseHandler = handler;
        }

        @Mock
        public Object execute() {
            if (requestResourceType == 21) {
                postCount.incrementAndGet();
                return mapFromModel(this.responseHandler, createWAPVirtualMachineModel());
            } else {
                return super.execute();
            }
        }
    };

    VMLaunchOptions vmLaunchOptions = VMLaunchOptions.getInstance(HWP_1_ID, VHD_1_ID, VM_1_NAME,
            VM_1_DESCRIPTION);
    vmLaunchOptions.inVlan(null, DATACENTER_ID, VM_1_NETWORK_ID);
    VirtualMachine virtualMachine = azurePackVirtualMachineSupport.launch(vmLaunchOptions);
    assertEquals("terminate doesn't send DELETE request", 1, postCount.get());
    assertVirtualMachine(virtualMachine);
}

From source file:org.apache.tez.dag.app.rm.YarnTaskSchedulerService.java

synchronized void determineMinHeldContainers() {
    sessionMinHeldContainers.clear();// w  ww  .  j  av  a2 s  .c o m
    if (sessionNumMinHeldContainers <= 0) {
        return;
    }

    if (heldContainers.size() <= sessionNumMinHeldContainers) {
        sessionMinHeldContainers.addAll(heldContainers.keySet());
    }

    Map<String, AtomicInteger> rackHeldNumber = Maps.newHashMap();
    Map<String, List<HeldContainer>> nodeHeldContainers = Maps.newHashMap();
    for (HeldContainer heldContainer : heldContainers.values()) {
        AtomicInteger count = rackHeldNumber.get(heldContainer.getRack());
        if (count == null) {
            count = new AtomicInteger(0);
            rackHeldNumber.put(heldContainer.getRack(), count);
        }
        count.incrementAndGet();
        List<HeldContainer> nodeContainers = nodeHeldContainers.get(heldContainer.getNode());
        if (nodeContainers == null) {
            nodeContainers = Lists.newLinkedList();
            nodeHeldContainers.put(heldContainer.getNode(), nodeContainers);
        }
        nodeContainers.add(heldContainer);
    }
    Map<String, AtomicInteger> rackToHoldNumber = Maps.newHashMap();
    for (String rack : rackHeldNumber.keySet()) {
        rackToHoldNumber.put(rack, new AtomicInteger(0));
    }

    // distribute evenly across nodes
    // the loop assigns 1 container per rack over all racks
    int containerCount = 0;
    while (containerCount < sessionNumMinHeldContainers && !rackHeldNumber.isEmpty()) {
        Iterator<Entry<String, AtomicInteger>> iter = rackHeldNumber.entrySet().iterator();
        while (containerCount < sessionNumMinHeldContainers && iter.hasNext()) {
            Entry<String, AtomicInteger> entry = iter.next();
            if (entry.getValue().decrementAndGet() >= 0) {
                containerCount++;
                rackToHoldNumber.get(entry.getKey()).incrementAndGet();
            } else {
                iter.remove();
            }
        }
    }

    // distribute containers evenly across nodes while not exceeding rack limit
    // the loop assigns 1 container per node over all nodes
    containerCount = 0;
    while (containerCount < sessionNumMinHeldContainers && !nodeHeldContainers.isEmpty()) {
        Iterator<Entry<String, List<HeldContainer>>> iter = nodeHeldContainers.entrySet().iterator();
        while (containerCount < sessionNumMinHeldContainers && iter.hasNext()) {
            List<HeldContainer> nodeContainers = iter.next().getValue();
            if (nodeContainers.isEmpty()) {
                // node is empty. remove it.
                iter.remove();
                continue;
            }
            HeldContainer heldContainer = nodeContainers.remove(nodeContainers.size() - 1);
            if (rackToHoldNumber.get(heldContainer.getRack()).decrementAndGet() >= 0) {
                // rack can hold a container
                containerCount++;
                sessionMinHeldContainers.add(heldContainer.getContainer().getId());
            } else {
                // rack limit reached. remove node.
                iter.remove();
            }
        }
    }

    LOG.info("Holding on to " + sessionMinHeldContainers.size() + " containers"
            + " out of total held containers: " + heldContainers.size());
}

From source file:org.apache.bookkeeper.client.LedgerHandle.java

void ensembleChangeLoop(List<BookieSocketAddress> origEnsemble,
        Map<Integer, BookieSocketAddress> failedBookies) {
    int ensembleChangeId = numEnsembleChanges.incrementAndGet();
    String logContext = String.format("[EnsembleChange(ledger:%d, change-id:%010d)]", ledgerId,
            ensembleChangeId);/*  www .  java2  s.  c  o  m*/

    // when the ensemble changes are too frequent, close handle
    if (ensembleChangeId > clientCtx.getConf().maxAllowedEnsembleChanges) {
        LOG.info("{} reaches max allowed ensemble change number {}", logContext,
                clientCtx.getConf().maxAllowedEnsembleChanges);
        handleUnrecoverableErrorDuringAdd(WriteException);
        return;
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("{} Replacing {} in {}", logContext, failedBookies, origEnsemble);
    }

    AtomicInteger attempts = new AtomicInteger(0);
    new MetadataUpdateLoop(clientCtx.getLedgerManager(), getId(), this::getVersionedLedgerMetadata,
            (metadata) -> metadata.getState() == LedgerMetadata.State.OPEN
                    && failedBookies.entrySet().stream().anyMatch(e -> LedgerMetadataUtils
                            .getLastEnsembleValue(metadata).get(e.getKey()).equals(e.getValue())),
            (metadata) -> {
                attempts.incrementAndGet();

                List<BookieSocketAddress> currentEnsemble = getCurrentEnsemble();
                List<BookieSocketAddress> newEnsemble = EnsembleUtils.replaceBookiesInEnsemble(
                        clientCtx.getBookieWatcher(), metadata, currentEnsemble, failedBookies, logContext);
                Long lastEnsembleKey = LedgerMetadataUtils.getLastEnsembleKey(metadata);
                LedgerMetadataBuilder builder = LedgerMetadataBuilder.from(metadata);
                long newEnsembleStartEntry = getLastAddConfirmed() + 1;
                checkState(lastEnsembleKey <= newEnsembleStartEntry,
                        "New ensemble must either replace the last ensemble, or add a new one");
                if (LOG.isDebugEnabled()) {
                    LOG.debug("{}[attempt:{}] changing ensemble from: {} to: {} starting at entry: {}",
                            logContext, attempts.get(), currentEnsemble, newEnsemble, newEnsembleStartEntry);
                }

                if (lastEnsembleKey.equals(newEnsembleStartEntry)) {
                    return builder.replaceEnsembleEntry(newEnsembleStartEntry, newEnsemble).build();
                } else {
                    return builder.newEnsembleEntry(newEnsembleStartEntry, newEnsemble).build();
                }
            }, this::setLedgerMetadata).run().whenCompleteAsync((metadata, ex) -> {
                if (ex != null) {
                    LOG.warn("{}[attempt:{}] Exception changing ensemble", logContext, attempts.get(), ex);
                    handleUnrecoverableErrorDuringAdd(BKException.getExceptionCode(ex, WriteException));
                } else if (metadata.getValue().isClosed()) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug(
                                "{}[attempt:{}] Metadata closed during attempt to replace bookie."
                                        + " Another client must have recovered the ledger.",
                                logContext, attempts.get());
                    }
                    handleUnrecoverableErrorDuringAdd(BKException.Code.LedgerClosedException);
                } else if (metadata.getValue().getState() == LedgerMetadata.State.IN_RECOVERY) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug(
                                "{}[attempt:{}] Metadata marked as in-recovery during attempt to replace bookie."
                                        + " Another client must be recovering the ledger.",
                                logContext, attempts.get());
                    }

                    handleUnrecoverableErrorDuringAdd(BKException.Code.LedgerFencedException);
                } else {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("{}[attempt:{}] Success updating metadata.", logContext, attempts.get());
                    }

                    List<BookieSocketAddress> newEnsemble = null;
                    Set<Integer> replaced = null;
                    synchronized (metadataLock) {
                        if (!delayedWriteFailedBookies.isEmpty()) {
                            Map<Integer, BookieSocketAddress> toReplace = new HashMap<>(
                                    delayedWriteFailedBookies);
                            delayedWriteFailedBookies.clear();

                            ensembleChangeLoop(origEnsemble, toReplace);
                        } else {
                            newEnsemble = getCurrentEnsemble();
                            replaced = EnsembleUtils.diffEnsemble(origEnsemble, newEnsemble);
                            LOG.info("New Ensemble: {} for ledger: {}", newEnsemble, ledgerId);

                            changingEnsemble = false;
                        }
                    }
                    if (newEnsemble != null) { // unsetSuccess outside of lock
                        unsetSuccessAndSendWriteRequest(newEnsemble, replaced);
                    }
                }
            }, clientCtx.getMainWorkerPool().chooseThread(ledgerId));
}

From source file:de.unihannover.se.processSimulation.interactive.ServerMain.java

private void simulateAndPrintOutput(PrintWriter w, BulkParameterFactory f, ExperimentRunSettings s,
        int requestId) {
    w.println("<h2>Simulation output</h2>");

    final StringBuilder detailsTable = new StringBuilder();
    final AtomicInteger count = new AtomicInteger(1);
    detailsTable.append("<table border=\"1\">");
    detailsTable.append(//from w ww  . j av a  2 s . co m
            "<tr><th>#</th><th colspan=\"3\">Story points</th><th colspan=\"3\">Cycle time</th><th colspan=\"3\">Bugs found by customer</th></tr>\n");
    detailsTable.append(
            "<tr><th></th><th>no</th><th>pre</th><th>post</th><th>no</th><th>pre</th><th>post</th><th>no</th><th>pre</th><th>post</th></tr>\n");
    final SingleRunCallback detailsCallback = new SingleRunCallback() {
        @Override
        public void handleResult(ExperimentResult no, ExperimentResult pre, ExperimentResult post) {
            System.err.println("run " + count + " finished");
            detailsTable.append("<tr>");
            detailsTable.append("<td><a href=\"details/").append(requestId).append("/").append(count)
                    .append("/overview\" target=\"_blank\">").append(count).append("</a></td>");
            detailsTable.append("<td>").append(no == null ? "" : no.getFinishedStoryPoints()).append("</td>");
            detailsTable.append("<td>").append(pre.getFinishedStoryPoints()).append("</td>");
            detailsTable.append("<td>").append(post.getFinishedStoryPoints()).append("</td>");
            detailsTable.append("<td>").append(no == null ? "" : no.getStoryCycleTimeMean()).append("</td>");
            detailsTable.append("<td>").append(pre.getStoryCycleTimeMean()).append("</td>");
            detailsTable.append("<td>").append(post.getStoryCycleTimeMean()).append("</td>");
            detailsTable.append("<td>").append(no == null ? "" : no.getIssueCountFoundByCustomers())
                    .append("</td>");
            detailsTable.append("<td>").append(pre.getIssueCountFoundByCustomers()).append("</td>");
            detailsTable.append("<td>").append(post.getIssueCountFoundByCustomers()).append("</td>");
            detailsTable.append("</tr>");
            count.incrementAndGet();
        }
    };

    final ExperimentRun result;
    try {
        result = ExperimentRun.perform(s, DataGenerator::runExperiment, f, detailsCallback);
    } catch (final RuntimeException e) {
        w.println("An exception occured during simulation: " + e.getMessage());
        return;
    }

    final ExperimentRunSummary summary = result.getSummary();
    w.println("Summary result - Story points: " + summary.getStoryPointsResult() + "<br/>");
    w.println("Summary result - Bugs found by customer: " + summary.getIssuesResult() + "<br/>");
    w.println("Summary result - Cycle time: " + summary.getCycleTimeResult() + "<br/>");
    if (!result.isSummaryStatisticallySignificant()) {
        w.println("Summary result not statistically significant<br/>");
    }
    w.println("Median finished stories (best alternative): " + result.getFinishedStoryMedian().toHtml()
            + "<br/>");
    w.println("Median share of productive work: " + result.getShareProductiveWork().toHtmlPercent() + "<br/>");
    w.println("Median share no review/review story points: " + result.getFactorNoReview().toHtmlPercent()
            + "<br/>");
    w.println("Median difference pre/post story points: "
            + this.formatDiff(result.getFactorStoryPoints(), "pre", "post") + "; "
            + result.getMinMaxFactorStoryPoints() + "<br/>");
    w.println("Median difference pre/post issues found by customer/story point: "
            + this.formatDiff(result.getFactorIssues(), "post", "pre") + "; " + result.getMinMaxFactorIssues()
            + "<br/>");
    w.println("Median difference pre/post cycle time: "
            + this.formatDiff(result.getFactorCycleTime(), "post", "pre") + "; "
            + result.getMinMaxFactorCycleTime() + "<br/>");
    w.println("<br/>");

    detailsTable.append("<tr>");
    detailsTable.append("<td></td>");
    detailsTable.append("<td>").append(result.getFinishedStoryPointsMedian(ReviewMode.NO_REVIEW).toHtml())
            .append("</td>");
    detailsTable.append("<td>").append(result.getFinishedStoryPointsMedian(ReviewMode.PRE_COMMIT).toHtml())
            .append("</td>");
    detailsTable.append("<td>").append(result.getFinishedStoryPointsMedian(ReviewMode.POST_COMMIT).toHtml())
            .append("</td>");
    detailsTable.append("<td>").append(result.getStoryCycleTimeMeanMedian(ReviewMode.NO_REVIEW).toHtml())
            .append("</td>");
    detailsTable.append("<td>").append(result.getStoryCycleTimeMeanMedian(ReviewMode.PRE_COMMIT).toHtml())
            .append("</td>");
    detailsTable.append("<td>").append(result.getStoryCycleTimeMeanMedian(ReviewMode.POST_COMMIT).toHtml())
            .append("</td>");
    detailsTable.append("<td>").append(result.getIssueCountMedian(ReviewMode.NO_REVIEW).toHtml())
            .append("</td>");
    detailsTable.append("<td>").append(result.getIssueCountMedian(ReviewMode.PRE_COMMIT).toHtml())
            .append("</td>");
    detailsTable.append("<td>").append(result.getIssueCountMedian(ReviewMode.POST_COMMIT).toHtml())
            .append("</td>");
    detailsTable.append("</tr>");
    detailsTable.append("</table>");
    w.println(detailsTable);
}

From source file:com.igormaznitsa.jute.JuteMojo.java

private int executeNextTestsFromList(final List<String> logStrings, final int maxTestNameLength,
        final String testClassPath, final List<TestContainer> testContainers, final int startIndex,
        final AtomicInteger startedCounter, final AtomicInteger errorCounter,
        final AtomicInteger skippedCounter) throws Exception {
    final List<TestContainer> toExecute = new ArrayList<TestContainer>();

    int detectedOrder = -1;

    for (int i = startIndex; i < testContainers.size(); i++) {
        final TestContainer c = testContainers.get(i);
        if (detectedOrder < 0) {
            if (c.getOrder() < 0) {
                toExecute.add(c);// w ww. ja  va 2s  .com
            } else {
                if (toExecute.isEmpty()) {
                    detectedOrder = c.getOrder();
                    toExecute.add(c);
                } else {
                    break;
                }
            }
        } else {
            if (c.getOrder() == detectedOrder) {
                toExecute.add(c);
            } else {
                break;
            }
        }
    }
    final CountDownLatch counterDown;

    if (detectedOrder >= 0 && toExecute.size() > 1) {
        counterDown = new CountDownLatch(toExecute.size());
    } else {
        counterDown = null;
    }

    final List<Throwable> thrownErrors = Collections.synchronizedList(new ArrayList<Throwable>());

    for (final TestContainer container : toExecute) {
        final Runnable run = new Runnable() {
            @Override
            public void run() {
                final long startTime = System.currentTimeMillis();
                try {
                    getLog().debug("Start execution: " + container.toString());
                    startedCounter.incrementAndGet();
                    final TestResult result = container.executeTest(getLog(), onlyAnnotated, maxTestNameLength,
                            testClassPath, javaProperties, env);
                    final long endTime = System.currentTimeMillis();
                    switch (result) {
                    case ERROR:
                    case TIMEOUT: {
                        errorCounter.incrementAndGet();
                    }
                        break;
                    case SKIPPED: {
                        skippedCounter.incrementAndGet();
                    }
                        break;
                    }

                    if (logStrings != null) {
                        final boolean printConsoleLog = result != TestResult.OK || container.isPrintConsole();
                        synchronized (logStrings) {
                            logStrings.addAll(makeTestResultReference(counterDown == null, container,
                                    endTime - startTime, maxTestNameLength, result,
                                    (printConsoleLog ? container.getLastTerminalOut() : null)));
                        }
                    }
                } catch (Throwable thr) {
                    getLog().debug("Error during execution " + container.toString(), thr);
                    thrownErrors.add(thr);
                } finally {
                    getLog().debug("End execution: " + container.toString());
                    if (counterDown != null) {
                        counterDown.countDown();
                    }
                }
            }
        };
        if (counterDown == null) {
            getLog().debug("Sync.execution: " + container.toString());
            run.run();
        } else {
            getLog().debug("Async.execution: " + container.toString());
            CACHED_EXECUTOR.execute(run);
        }
    }
    if (counterDown != null) {
        try {
            counterDown.await();
        } catch (InterruptedException ex) {
            getLog().error(ex);
        }
    }
    if (!thrownErrors.isEmpty()) {
        for (final Throwable thr : thrownErrors) {
            getLog().error(thr);
        }
    }

    return toExecute.size();
}

From source file:org.dasein.cloud.azurepack.tests.compute.AzurePackVirtualMachineSupportTest.java

@Test
public void lauchTemplateVMShouldSendCorrectRequest() throws CloudException, InternalException {
    final AtomicInteger postCount = new AtomicInteger(0);
    new StartOrStopVirtualMachinesRequestExecutorMockUp("Start") {
        @Mock/*w w w  . j  a v a 2s . c  o  m*/
        public void $init(CloudProvider provider, HttpClientBuilder clientBuilder, HttpUriRequest request,
                ResponseHandler handler) {
            String requestUri = request.getURI().toString();
            if (request.getMethod().equals("POST")
                    && requestUri.equals(String.format(LIST_VM_RESOURCES, ENDPOINT, ACCOUNT_NO))) {
                requestResourceType = 21;
                WAPVirtualMachineModel wapVirtualMachineModel = new WAPVirtualMachineModel();
                wapVirtualMachineModel.setName(VM_1_NAME);
                wapVirtualMachineModel.setCloudId(REGION);
                wapVirtualMachineModel.setStampId(DATACENTER_ID);
                wapVirtualMachineModel.setVmTemplateId(TPL_1_ID);
                wapVirtualMachineModel.setProductKey(VM_1_WINDOWS_SERIAL_NUMBER);
                wapVirtualMachineModel.setLocalAdminUserName(VM_1_BOOTSTRAP_USER);
                wapVirtualMachineModel.setLocalAdminPassword(VM_1_BOOTSTRAP_PASSWORD);
                List<WAPNewAdapterModel> adapters = new ArrayList<>();
                WAPNewAdapterModel newAdapterModel = new WAPNewAdapterModel();
                newAdapterModel.setVmNetworkName(VM_1_NETWORK_NAME);
                adapters.add(newAdapterModel);
                wapVirtualMachineModel.setNewVirtualNetworkAdapterInput(adapters);

                assertPost(request, String.format(LIST_VM_RESOURCES, ENDPOINT, ACCOUNT_NO), new Header[0],
                        wapVirtualMachineModel);
            } else {
                super.$init(provider, clientBuilder, request, handler);
            }
            responseHandler = handler;
        }

        @Mock
        public Object execute() {
            if (requestResourceType == 21) {
                postCount.incrementAndGet();
                return mapFromModel(this.responseHandler, createWAPVirtualMachineModel());
            } else {
                return super.execute();
            }
        }
    };

    VMLaunchOptions vmLaunchOptions = VMLaunchOptions.getInstance(HWP_1_ID, TPL_1_ID, VM_1_NAME,
            VM_1_DESCRIPTION);
    vmLaunchOptions.inVlan(null, DATACENTER_ID, VM_1_NETWORK_ID);
    vmLaunchOptions.withWinProductSerialNum(VM_1_WINDOWS_SERIAL_NUMBER);
    vmLaunchOptions.withBootstrapUser("dummy-user-name-to-be-replaced", VM_1_BOOTSTRAP_PASSWORD);
    VirtualMachine virtualMachine = azurePackVirtualMachineSupport.launch(vmLaunchOptions);
    assertEquals("terminate doesn't send DELETE request", 1, postCount.get());
    assertVirtualMachine(virtualMachine);
}