Example usage for java.util.concurrent.atomic AtomicInteger get

List of usage examples for java.util.concurrent.atomic AtomicInteger get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger get.

Prototype

public final int get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:interactivespaces.activity.component.ActivityComponentContextTest.java

/**
 * Make a couple of threads start running and see if they properly stop
 * running when the context signals startup successful.
 *///w w  w  . j a va 2 s . com
@Test
public void testStartupWaitWithTwoThreadsSuccess() throws Exception {
    final CountDownLatch startLatch = new CountDownLatch(2);
    final CountDownLatch stopLatch = new CountDownLatch(2);
    final AtomicInteger countAllowedHandlers = new AtomicInteger(0);
    Runnable runnable = new Runnable() {
        @Override
        public void run() {
            startLatch.countDown();

            if (context.canHandlerRun()) {
                countAllowedHandlers.incrementAndGet();
            }

            stopLatch.countDown();
        }
    };

    executor.execute(runnable);
    executor.execute(runnable);

    // Make sure they have both entered before starting the wait.
    Assert.assertTrue(startLatch.await(500, TimeUnit.MILLISECONDS));

    context.endStartupPhase(true);

    // Make sure they have both entered before starting the wait.
    Assert.assertTrue(stopLatch.await(500, TimeUnit.MILLISECONDS));

    // All handlers should have been allowed.
    Assert.assertEquals(2, countAllowedHandlers.get());
}

From source file:interactivespaces.activity.component.ActivityComponentContextTest.java

/**
 * Make a couple of threads start running and see if they properly stop
 * running when the context signals startup failure.
 *//*from w  w w  .  ja v a 2  s  .co  m*/
@Test
public void testStartupWaitWithTwoThreadsFailure() throws Exception {
    final CountDownLatch startLatch = new CountDownLatch(2);
    final CountDownLatch stopLatch = new CountDownLatch(2);
    final AtomicInteger countAllowedHandlers = new AtomicInteger(0);

    Runnable runnable = new Runnable() {
        @Override
        public void run() {
            startLatch.countDown();

            if (context.canHandlerRun()) {
                countAllowedHandlers.incrementAndGet();
            }

            stopLatch.countDown();
        }
    };

    executor.execute(runnable);
    executor.execute(runnable);

    // Make sure they have both entered before starting the wait.
    Assert.assertTrue(startLatch.await(500, TimeUnit.MILLISECONDS));

    context.endStartupPhase(false);

    // Make sure they have both entered before starting the wait.
    Assert.assertTrue(stopLatch.await(500, TimeUnit.MILLISECONDS));

    // No handlers should have been allowed.
    Assert.assertEquals(0, countAllowedHandlers.get());
}

From source file:dk.statsbiblioteket.util.xml.XMLStepperTest.java

public void testMultipleInner() throws XMLStreamException {
    final String XML = "<foo><bar><zoo>z1</zoo><zoo>z2</zoo></bar></foo>";
    for (final Boolean step : new boolean[] { Boolean.FALSE, Boolean.TRUE }) {
        XMLStreamReader xml = xmlFactory.createXMLStreamReader(new StringReader(XML));
        XMLStepper.findTagStart(xml, "zoo");
        final AtomicInteger zooCount = new AtomicInteger(0);
        XMLStepper.iterateTags(xml, new XMLStepper.Callback() {
            @Override/*from   w w w  . j a  va  2  s . co m*/
            public boolean elementStart(XMLStreamReader xml, List<String> tags, String current)
                    throws XMLStreamException {
                if ("zoo".equals(current)) {
                    zooCount.incrementAndGet();
                }
                if (step) {
                    xml.next();
                    return true;
                }
                return false;
            }
        });
        assertEquals(
                "After iteration with step==" + step
                        + ", the stepper should have encountered 'zoo' the right number of times",
                2, zooCount.get());
        assertEquals(
                "After iteration with step==" + step
                        + ", the reader should be positioned at the correct end tag",
                "bar", xml.getLocalName());
    }

}

From source file:org.dasein.cloud.azure.tests.network.AzureLoadBalancerSupportWithMockHttpClientTest.java

@Test
public void modifyHealthCheckShouldPostCorrectRequest() throws CloudException, InternalException {
    final int portChangeTo = 8080;
    final AtomicInteger getCount = new AtomicInteger(0);
    final AtomicInteger postCount = new AtomicInteger(0);

    new MockUp<CloseableHttpClient>() {
        @Mock/*from  w w w.j a v  a 2 s.  c  om*/
        public CloseableHttpResponse execute(Invocation inv, HttpUriRequest request) throws IOException {
            if ("GET".equals(request.getMethod()) && DEFINITION_URL.equals(request.getURI().toString())) {
                getCount.incrementAndGet();
                assertGet(request, DEFINITION_URL,
                        new Header[] { new BasicHeader("x-ms-version", "2012-03-01") });

                if (getCount.get() == 1) {
                    DaseinObjectToXmlEntity<DefinitionModel> daseinEntity = new DaseinObjectToXmlEntity<DefinitionModel>(
                            createDefinitionModel("Failover", "Enabled", HC_PORT));
                    return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                            new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
                } else {
                    DaseinObjectToXmlEntity<DefinitionModel> daseinEntity = new DaseinObjectToXmlEntity<DefinitionModel>(
                            createDefinitionModel("Failover", "Enabled", portChangeTo));
                    return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                            new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
                }
            } else if ("POST".equals(request.getMethod())
                    && DEFINITIONS_URL.equals(request.getURI().toString())) {
                postCount.incrementAndGet();
                assertPost(request, DEFINITIONS_URL,
                        new Header[] { new BasicHeader("x-ms-version", "2012-03-01") },
                        createDefinitionModel("Failover", "Enabled", portChangeTo));

                DefinitionModel definitionModel = new DefinitionModel();
                definitionModel.setVersion("2");
                DaseinObjectToXmlEntity<DefinitionModel> daseinEntity = new DaseinObjectToXmlEntity<DefinitionModel>(
                        definitionModel);
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else {
                throw new IOException("Request is not mocked");
            }
        }
    };

    LoadBalancerHealthCheck loadBalancerHealthCheck = loadBalancerSupport.modifyHealthCheck(LB_NAME,
            HealthCheckOptions.getInstance(LB_NAME, HC_DESCRIPTION, LB_NAME, null, HC_PROTOCOL, 8080, HC_PATH,
                    9, 9, 9, 9));
    assertEquals("LoadBalancerSupport.modifyHealthCheck() post count doesn't match", 1, postCount.get());
    assertLoadBalancerHealthCheck(loadBalancerHealthCheck, portChangeTo);
}

From source file:org.opennms.ng.services.poller.Poller.java

private int scheduleMatchingServices(String criteria) {
    String sql = "SELECT ifServices.nodeId AS nodeId, node.nodeLabel AS nodeLabel, ifServices.ipAddr AS ipAddr, "
            + "ifServices.serviceId AS serviceId, service.serviceName AS serviceName, ifServices.status as status, "
            + "outages.svcLostEventId AS svcLostEventId, events.eventUei AS svcLostEventUei, "
            + "outages.ifLostService AS ifLostService, outages.ifRegainedService AS ifRegainedService "
            + "FROM ifServices " + "JOIN node ON ifServices.nodeId = node.nodeId "
            + "JOIN service ON ifServices.serviceId = service.serviceId " + "LEFT OUTER JOIN outages ON "
            + "ifServices.nodeId = outages.nodeId AND " + "ifServices.ipAddr = outages.ipAddr AND "
            + "ifServices.serviceId = outages.serviceId AND " + "ifRegainedService IS NULL "
            + "LEFT OUTER JOIN events ON outages.svcLostEventId = events.eventid "
            + "WHERE ifServices.status in ('A','N')" + (criteria == null ? "" : " AND " + criteria);

    final AtomicInteger count = new AtomicInteger(0);

    Querier querier = new Querier(m_dataSource, sql) {
        @Override/*from   ww w .j  a va2 s  . c o  m*/
        public void processRow(ResultSet rs) throws SQLException {
            if (scheduleService(rs.getInt("nodeId"), rs.getString("nodeLabel"), rs.getString("ipAddr"),
                    rs.getString("serviceName"), "A".equals(rs.getString("status")),
                    (Number) rs.getObject("svcLostEventId"), rs.getTimestamp("ifLostService"),
                    rs.getString("svcLostEventUei"))) {
                count.incrementAndGet();
            }
        }
    };
    querier.execute();

    return count.get();
}

From source file:com.streamsets.pipeline.stage.origin.spooldir.TestSpoolDirSource.java

@Test
public void testAdvanceToNextSpoolFile() throws Exception {
    TSpoolDirSource source = createSource(null);
    PushSourceRunner runner = new PushSourceRunner.Builder(TSpoolDirSource.class, source).addOutputLane("lane")
            .build();// w  w w  .  j  ava2  s  .c o  m

    File file1 = new File(source.spoolDir, "file-0.log").getAbsoluteFile();
    Files.createFile(file1.toPath());
    File file2 = new File(source.spoolDir, "file-1.log").getAbsoluteFile();
    Files.createFile(file2.toPath());

    source.file = file1;
    source.offset = 0;
    source.maxBatchSize = 10;

    AtomicInteger batchCount = new AtomicInteger(0);
    runner.runInit();

    try {
        runner.runProduce(ImmutableMap.of(Source.POLL_SOURCE_OFFSET_KEY, "file-0.log::0"), 10, output -> {
            batchCount.incrementAndGet();

            TSpoolDirRunnable runnable = source.getTSpoolDirRunnable();
            if (batchCount.get() == 1) {
                Assert.assertEquals("file-0.log", output.getOffsetEntity());
                Assert.assertEquals("{\"POS\":\"0\"}", output.getNewOffset());
                Assert.assertTrue(runnable.produceCalled);

                Assert.assertEquals(1, runner.getEventRecords().size());
                Assert.assertEquals("new-file", runner.getEventRecords().get(0).getEventType());

                runnable.produceCalled = false;
                runnable.offsetIncrement = -1;
            } else if (batchCount.get() == 2) {
                Assert.assertEquals("file-0.log", output.getOffsetEntity());
                Assert.assertEquals("{\"POS\":\"-1\"}", output.getNewOffset());
                Assert.assertTrue(runnable.produceCalled);
                Assert.assertEquals(2, runner.getEventRecords().size());
                Assert.assertEquals("new-file", runner.getEventRecords().get(0).getEventType());

                Assert.assertEquals("finished-file", runner.getEventRecords().get(1).getEventType());
                Assert.assertEquals(0, runner.getEventRecords().get(1).get("/error-count").getValueAsInteger());
                Assert.assertEquals(0,
                        runner.getEventRecords().get(1).get("/record-count").getValueAsInteger());

                runnable.file = file2;

            } else if (batchCount.get() == 4) {
                runnable.produceCalled = false;
                runnable.offset = 0;
                runnable.offsetIncrement = 0;
                runner.setStop();
            } else if (batchCount.get() > 4) {
                runner.setStop();
            }
        });

        runner.waitOnProduce();

        Assert.assertEquals(4, batchCount.get());

        TestOffsetUtil.compare("file-1.log::-1", runner.getOffsets());
        Assert.assertFalse(source.produceCalled);

        // 2 each of new-file and finished-file and 1 no-more-data
        Assert.assertEquals(5, runner.getEventRecords().size());

        // check for LineageEvents.
        List<LineageEvent> events = runner.getLineageEvents();
        Assert.assertEquals(2, events.size());
        Assert.assertEquals(LineageEventType.ENTITY_READ, events.get(0).getEventType());
        Assert.assertEquals(LineageEventType.ENTITY_READ, events.get(1).getEventType());

        Assert.assertTrue(events.get(0).getSpecificAttribute(LineageSpecificAttribute.ENTITY_NAME)
                .contains("file-0.log"));
        Assert.assertTrue(events.get(1).getSpecificAttribute(LineageSpecificAttribute.ENTITY_NAME)
                .contains("file-1.log"));
    } finally {
        runner.runDestroy();
    }
}

From source file:com.microsoft.onedrive.apiexplorer.ItemFragment.java

/**
 * Creates a link on this item//  w w w .j a  v a  2  s . co m
 * @param item The item to delete
 */
private void createLink(final Item item) {
    final CharSequence[] items = { "view", "edit" };
    final int nothingSelected = -1;
    final AtomicInteger selection = new AtomicInteger(nothingSelected);
    final AlertDialog alertDialog = new AlertDialog.Builder(getActivity()).setTitle(R.string.create_link)
            .setIcon(android.R.drawable.ic_menu_share)
            .setPositiveButton(R.string.create_link, new DialogInterface.OnClickListener() {
                @Override
                public void onClick(final DialogInterface dialog, final int which) {
                    if (selection.get() == nothingSelected) {
                        return;
                    }

                    final BaseApplication application = (BaseApplication) getActivity().getApplication();
                    application.getOneDriveClient().getDrive().getItems(item.id)
                            .getCreateLink(items[selection.get()].toString()).buildRequest()
                            .create(new DefaultCallback<Permission>(getActivity()) {
                                @Override
                                public void success(final Permission permission) {
                                    final ClipboardManager cm = (ClipboardManager) getActivity()
                                            .getSystemService(Context.CLIPBOARD_SERVICE);
                                    final ClipData data = ClipData.newPlainText("Link Url",
                                            permission.link.webUrl);
                                    cm.setPrimaryClip(data);
                                    Toast.makeText(getActivity(), application.getString(R.string.created_link),
                                            Toast.LENGTH_LONG).show();
                                    getActivity().onBackPressed();
                                }
                            });
                }
            }).setSingleChoiceItems(items, 0, new DialogInterface.OnClickListener() {
                @Override
                public void onClick(final DialogInterface dialog, final int which) {
                    selection.set(which);
                }
            }).setNegativeButton(R.string.cancel, new DialogInterface.OnClickListener() {
                @Override
                public void onClick(final DialogInterface dialog, final int which) {
                    dialog.cancel();
                }
            }).create();
    alertDialog.show();
}

From source file:com.streamsets.pipeline.stage.cloudstorage.destination.GoogleCloudStorageTarget.java

@Override
public void write(Batch batch) throws StageException {
    String pathExpression = GcsUtil.normalizePrefix(gcsTargetConfig.commonPrefix)
            + gcsTargetConfig.partitionTemplate;
    if (gcsTargetConfig.dataFormat == DataFormat.WHOLE_FILE) {
        handleWholeFileFormat(batch, elVars);
    } else {//  ww  w .  j  a v  a 2 s  .c  o m
        Multimap<String, Record> pathToRecordMap = ELUtils.partitionBatchByExpression(partitionEval, elVars,
                pathExpression, timeDriverElEval, elVars, gcsTargetConfig.timeDriverTemplate,
                Calendar.getInstance(TimeZone.getTimeZone(ZoneId.of(gcsTargetConfig.timeZoneID))), batch);

        pathToRecordMap.keySet().forEach(path -> {
            Collection<Record> records = pathToRecordMap.get(path);
            String fileName = GcsUtil.normalizePrefix(path) + gcsTargetConfig.fileNamePrefix + '_'
                    + UUID.randomUUID();
            if (StringUtils.isNotEmpty(gcsTargetConfig.fileNameSuffix)) {
                fileName = fileName + "." + gcsTargetConfig.fileNameSuffix;
            }
            try {
                ByteArrayOutputStream bOut = new ByteArrayOutputStream();
                OutputStream os = bOut;
                if (gcsTargetConfig.compress) {
                    fileName = fileName + ".gz";
                    os = new GZIPOutputStream(bOut);
                }
                BlobId blobId = BlobId.of(gcsTargetConfig.bucketTemplate, fileName);
                BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType(getContentType()).build();
                final AtomicInteger recordsWithoutErrors = new AtomicInteger(0);
                try (DataGenerator dg = gcsTargetConfig.dataGeneratorFormatConfig.getDataGeneratorFactory()
                        .getGenerator(os)) {
                    records.forEach(record -> {
                        try {
                            dg.write(record);
                            recordsWithoutErrors.incrementAndGet();
                        } catch (DataGeneratorException | IOException e) {
                            LOG.error("Error writing record {}. Reason {}", record.getHeader().getSourceId(),
                                    e);
                            getContext().toError(record, Errors.GCS_02, record.getHeader().getSourceId(), e);
                        }
                    });
                } catch (IOException e) {
                    LOG.error("Error happened when creating Output stream. Reason {}", e);
                    records.forEach(record -> getContext().toError(record, e));
                }

                try {
                    if (recordsWithoutErrors.get() > 0) {
                        Blob blob = storage.create(blobInfo, bOut.toByteArray());
                        GCSEvents.GCS_OBJECT_WRITTEN.create(getContext())
                                .with(GCSEvents.BUCKET, blob.getBucket())
                                .with(GCSEvents.OBJECT_KEY, blob.getName())
                                .with(GCSEvents.RECORD_COUNT, recordsWithoutErrors.longValue()).createAndSend();
                    }
                } catch (StorageException e) {
                    LOG.error("Error happened when writing to Output stream. Reason {}", e);
                    records.forEach(record -> getContext().toError(record, e));
                }
            } catch (IOException e) {
                LOG.error("Error happened when creating Output stream. Reason {}", e);
                records.forEach(record -> getContext().toError(record, e));
            }
        });
    }
}

From source file:org.apache.hadoop.hbase.mapreduce.TestLoadIncrementalHFilesSplitRecovery.java

/**
 * This test splits a table and attempts to bulk load.  The bulk import files
 * should be split before atomically importing.
 *///from  w ww  . ja va  2  s.co m
@Test
public void testGroupOrSplitPresplit() throws Exception {
    final String table = "groupOrSplitPresplit";
    setupTable(table, 10);
    populateTable(table, 1);
    assertExpectedTable(table, ROWCOUNT, 1);
    forceSplit(table);

    final AtomicInteger countedLqis = new AtomicInteger();
    LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
        protected List<LoadQueueItem> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups,
                final LoadQueueItem item, final HTable htable, final Pair<byte[][], byte[][]> startEndKeys)
                throws IOException {
            List<LoadQueueItem> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
            if (lqis != null) {
                countedLqis.addAndGet(lqis.size());
            }
            return lqis;
        }
    };

    // create HFiles for different column families
    Path bulk = buildBulkFiles(table, 2);
    HTable ht = new HTable(util.getConfiguration(), Bytes.toBytes(table));
    lih.doBulkLoad(bulk, ht);

    assertExpectedTable(table, ROWCOUNT, 2);
    assertEquals(20, countedLqis.get());
}

From source file:org.apache.hadoop.hbase.client.HBaseFsck.java

/**
 * Contacts the master and prints out cluster-wide information
 * @throws IOException if a remote or network exception occurs
 * @return 0 on success, non-zero on failure
 */// w w  w .j a v a  2  s  .c o m
int doWork() throws IOException, InterruptedException {

    if (initAndScanRootMeta() == -1) {
        return -1;
    }

    // get a list of all tables that have not changed recently.
    AtomicInteger numSkipped = new AtomicInteger(0);
    HTableDescriptor[] allTables = getTables(numSkipped);
    errors.print("Number of Tables: " + allTables.length);
    if (details) {
        if (numSkipped.get() > 0) {
            errors.detail("\n Number of Tables in flux: " + numSkipped.get());
        }
        for (HTableDescriptor td : allTables) {
            String tableName = td.getNameAsString();
            errors.detail("\t Table: " + tableName + "\t" + (td.isReadOnly() ? "ro" : "rw") + "\t"
                    + (td.isRootRegion() ? "ROOT" : (td.isMetaRegion() ? "META" : "    ")) + "\t" + " families:"
                    + td.getFamilies().size());
        }
    }

    // From the master, get a list of all known live region servers
    Collection<HServerInfo> regionServers = status.getServerInfo();
    errors.print("Number of live region servers:" + regionServers.size());
    if (details) {
        for (HServerInfo rsinfo : regionServers) {
            errors.detail("\t RegionServer:" + rsinfo.getServerName());
        }
    }

    // From the master, get a list of all dead region servers
    Collection<String> deadRegionServers = status.getDeadServerNames();
    errors.print("Number of dead region servers:" + deadRegionServers.size());
    if (details) {
        for (String name : deadRegionServers) {
            errors.detail("\t RegionServer(dead):" + name);
        }
    }

    // Determine what's deployed
    scanRegionServers(regionServers);

    // Determine what's on HDFS
    scanHdfs();

    // finish all async tasks before analyzing what we have
    finishAsyncWork();

    // Check consistency
    checkConsistency();

    // Check integrity
    checkIntegrity();

    // Check if information in .regioninfo and .META. is consistent
    if (checkRegionInfo) {
        checkRegionInfo();
    }

    // Print table summary
    printTableSummary();

    return errors.summarize();
}