Example usage for java.util.concurrent.atomic AtomicInteger get

List of usage examples for java.util.concurrent.atomic AtomicInteger get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger get.

Prototype

public final int get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:com.netflix.conductor.core.execution.TestWorkflowExecutor.java

@Test
public void test() throws Exception {

    AtomicBoolean httpTaskExecuted = new AtomicBoolean(false);
    AtomicBoolean http2TaskExecuted = new AtomicBoolean(false);

    new Wait();/* ww w.  j a va2  s.  c om*/
    new WorkflowSystemTask("HTTP") {
        @Override
        public boolean isAsync() {
            return true;
        }

        @Override
        public void start(Workflow workflow, Task task, WorkflowExecutor executor) throws Exception {
            httpTaskExecuted.set(true);
            task.setStatus(Status.COMPLETED);
            super.start(workflow, task, executor);
        }

    };

    new WorkflowSystemTask("HTTP2") {

        @Override
        public void start(Workflow workflow, Task task, WorkflowExecutor executor) throws Exception {
            http2TaskExecuted.set(true);
            task.setStatus(Status.COMPLETED);
            super.start(workflow, task, executor);
        }

    };

    Workflow workflow = new Workflow();
    workflow.setWorkflowId("1");

    TestConfiguration config = new TestConfiguration();
    MetadataDAO metadata = mock(MetadataDAO.class);
    ExecutionDAO edao = mock(ExecutionDAO.class);
    QueueDAO queue = mock(QueueDAO.class);
    ObjectMapper om = new ObjectMapper();

    WorkflowExecutor executor = new WorkflowExecutor(metadata, edao, queue, om, config);
    List<Task> tasks = new LinkedList<>();

    WorkflowTask taskToSchedule = new WorkflowTask();
    taskToSchedule.setWorkflowTaskType(Type.USER_DEFINED);
    taskToSchedule.setType("HTTP");

    WorkflowTask taskToSchedule2 = new WorkflowTask();
    taskToSchedule2.setWorkflowTaskType(Type.USER_DEFINED);
    taskToSchedule2.setType("HTTP2");

    WorkflowTask wait = new WorkflowTask();
    wait.setWorkflowTaskType(Type.WAIT);
    wait.setType("WAIT");
    wait.setTaskReferenceName("wait");

    Task task1 = SystemTask.userDefined(workflow, IDGenerator.generate(), taskToSchedule, new HashMap<>(), null,
            0);
    Task task2 = SystemTask.waitTask(workflow, IDGenerator.generate(), taskToSchedule, new HashMap<>());
    Task task3 = SystemTask.userDefined(workflow, IDGenerator.generate(), taskToSchedule2, new HashMap<>(),
            null, 0);

    tasks.add(task1);
    tasks.add(task2);
    tasks.add(task3);

    when(edao.createTasks(tasks)).thenReturn(tasks);
    AtomicInteger startedTaskCount = new AtomicInteger(0);
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            startedTaskCount.incrementAndGet();
            return null;
        }
    }).when(edao).updateTask(any());

    AtomicInteger queuedTaskCount = new AtomicInteger(0);
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            String queueName = invocation.getArgumentAt(0, String.class);
            System.out.println(queueName);
            queuedTaskCount.incrementAndGet();
            return null;
        }
    }).when(queue).push(any(), any(), anyInt());

    boolean stateChanged = executor.scheduleTask(workflow, tasks);
    assertEquals(2, startedTaskCount.get());
    assertEquals(1, queuedTaskCount.get());
    assertTrue(stateChanged);
    assertFalse(httpTaskExecuted.get());
    assertTrue(http2TaskExecuted.get());
}

From source file:com.github.gfx.android.orma.example.fragment.BenchmarkFragment.java

Single<Result> startSelectAllWithRealm() {
    return Single.fromCallable(() -> {
        long result = runWithBenchmark(() -> {
            AtomicInteger count = new AtomicInteger();
            Realm realm = Realm.getDefaultInstance();
            RealmResults<RealmTodo> results = realm.where(RealmTodo.class).findAllSorted("createdTime",
                    Sort.ASCENDING);/*from   ww  w .  ja  va2  s  .c om*/
            for (RealmTodo todo : results) {
                @SuppressWarnings("unused")
                String title = todo.getTitle();
                @SuppressWarnings("unused")
                String content = todo.getContent();
                @SuppressWarnings("unused")
                Date createdTime = todo.getCreatedTime();

                count.incrementAndGet();
            }
            if (results.size() != count.get()) {
                throw new AssertionError("unexpected get: " + count.get());
            }
            realm.close();

            Log.d(TAG, "Realm/forEachAll count: " + count);
        });
        return new Result("Realm/forEachAll", result);
    });
}

From source file:org.apache.cayenne.access.dbsync.BaseSchemaUpdateStrategy_ConcurrencyTest.java

@Test
public void testUpdateSchema_Concurrency() throws InterruptedException, ExecutionException, TimeoutException {

    final AtomicInteger counter = new AtomicInteger();
    final AtomicBoolean errors = new AtomicBoolean(false);

    final BaseSchemaUpdateStrategy strategy = new BaseSchemaUpdateStrategy() {
        @Override//from   w w w. j av  a 2 s.c  o m
        protected void processSchemaUpdate(DataNode dataNode) throws SQLException {
            counter.incrementAndGet();
        }
    };

    Collection<Future<?>> tasks = new ArrayList<>();

    for (int i = 0; i < 20; i++) {
        tasks.add(threadPool.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    strategy.updateSchema(dataNode);
                } catch (Throwable e) {
                    LOGGER.error("error in test", e);
                    errors.set(true);
                }
            }
        }));
    }

    for (Future<?> f : tasks) {
        f.get(1, TimeUnit.SECONDS);
    }

    assertFalse(errors.get());
    assertEquals(1, counter.get());
}

From source file:io.fabric8.msg.jnatsd.TestProtocol.java

@Test
public void testSendAndRecv() throws Exception {
    try (Connection c = connectionFactory.createConnection()) {
        assertFalse(c.isClosed());//  w w  w  .j  a  va 2s . c  o  m
        final AtomicInteger received = new AtomicInteger();
        int count = 1000;
        try (AsyncSubscription s = c.subscribeAsync("foo", new MessageHandler() {
            public void onMessage(Message msg) {
                received.incrementAndGet();
            }
        })) {
            // s.start();
            assertFalse(c.isClosed());
            for (int i = 0; i < count; i++) {
                c.publish("foo", null);
            }
            c.flush();

            try {
                Thread.sleep(500);
            } catch (InterruptedException e) {
            }

            assertTrue(String.format("Received (%s) != count (%s)", received, count), received.get() == count);
        }
    }
}

From source file:org.jtheque.file.FileServiceTest.java

@Test
public void exporters() {
    final AtomicInteger counter = new AtomicInteger(0);

    final Collection<String> datas = Arrays.asList("data1", "data2", "data3");

    fileService.registerExporter("no-module", new Exporter<String>() {
        @Override//from   w ww .  j a  v  a2s.  c  o m
        public boolean canExportTo(String fileType) {
            return "xml".equals(fileType);
        }

        @Override
        public void export(String path, Collection<String> exportedDatas) throws FileException {
            counter.incrementAndGet();

            assertEquals("path", path);

            assertEquals(datas, exportedDatas);
        }
    });

    try {
        fileService.exportDatas("no-module", "xml", "path", datas);
    } catch (FileException e) {
        fail("Exception during the export");
    }

    assertEquals(1, counter.get());
}

From source file:com.squarespace.template.HardSoftCodeLimiterTest.java

@Test
public void testBothLimits() throws CodeException {
    final AtomicInteger softCount = new AtomicInteger();
    final AtomicInteger hardCount = new AtomicInteger();
    HardSoftCodeLimiter.Handler handler = new HardSoftCodeLimiter.Handler() {

        @Override//from  w  ww.j  a va2s  . c  o  m
        public void onLimit(Limit limit, HardSoftCodeLimiter limiter) throws CodeExecuteException {
            if (limit.equals(Limit.SOFT)) {
                softCount.incrementAndGet();
            }
            if (limit.equals(Limit.HARD)) {
                hardCount.incrementAndGet();
            }
        }
    };

    CodeLimiter codeLimiter = HardSoftCodeLimiter.builder().setSoftLimit(5).setHardLimit(10).setResolution(1)
            .setHandler(handler).build();

    compiler().newExecutor().template("{.repeated section @}{.even?}{@}{.or}#{.end}{.end}")
            .json("[0,1,2,3,4,5,6,7,8,9]").codeLimiter(codeLimiter).execute();

    assertEquals(softCount.get(), 1);
    assertEquals(hardCount.get(), 1);
}

From source file:com.webtide.jetty.load.generator.jenkins.LoadGeneratorBuilder.java

protected void runProcess(TaskListener taskListener, FilePath workspace, Run<?, ?> run, Launcher launcher,
        Resource resource) throws Exception {

    // -------------------------
    // listeners to get data files
    // -------------------------
    List<Resource.NodeListener> nodeListeners = new ArrayList<>();

    Path resultFilePath = Paths.get(launcher.getChannel() //
            .call(new LoadGeneratorProcessFactory.RemoteTmpFileCreate()));

    ValuesFileWriter valuesFileWriter = new ValuesFileWriter(resultFilePath);
    nodeListeners.add(valuesFileWriter);

    List<LoadGenerator.Listener> loadGeneratorListeners = new ArrayList<>();
    loadGeneratorListeners.add(valuesFileWriter);

    Path statsResultFilePath = Paths.get(launcher.getChannel() //
            .call(new LoadGeneratorProcessFactory.RemoteTmpFileCreate()));

    ArgumentListBuilder args = getArgsProcess(resource, launcher.getComputer(), taskListener, //
            run, statsResultFilePath.toString());

    String monitorUrl = getMonitorUrl(taskListener, run);

    String alpnBootVersion = getAlpnVersion();
    // well a quick marker to say we do not need alpn
    if (getTransport() == LoadGeneratorStarterArgs.Transport.HTTP //
            || getTransport() == LoadGeneratorStarterArgs.Transport.HTTPS) {
        alpnBootVersion = "N/A";
    }/*from   w  ww.j  ava  2 s .  c  om*/

    LOGGER.info("load generator args:" + args.toString());

    new LoadGeneratorProcessRunner().runProcess(taskListener, workspace, launcher, //
            this.jdkName, getCurrentNode(launcher.getComputer()), //
            nodeListeners, loadGeneratorListeners, //
            args.toList(), getJvmExtraArgs(), //
            alpnBootVersion, //
            AlpnBootVersions.getInstance().getJdkVersionAlpnBootVersion());

    String stats = workspace.child(statsResultFilePath.toString()).readToString();

    TimePerPathListener timePerPathListener = new TimePerPathListener(false);
    GlobalSummaryListener globalSummaryListener = new GlobalSummaryListener();
    // this one will use some memory for a long load test!!
    // FIXME find a way to flush that somewhere!!
    DetailledTimeReportListener detailledTimeReportListener = new DetailledTimeReportListener();

    // -----------------------------
    // handle response time reports
    // -----------------------------

    ResponsePerStatus responsePerStatus = new ResponsePerStatus();

    ResponseNumberPerPath responseNumberPerPath = new ResponseNumberPerPath();

    nodeListeners.clear();
    if (this.nodeListeners != null) {
        nodeListeners.addAll(this.nodeListeners);
    }
    nodeListeners.add(responseNumberPerPath);
    nodeListeners.add(timePerPathListener);
    nodeListeners.add(globalSummaryListener);
    nodeListeners.add(detailledTimeReportListener);
    nodeListeners.add(responsePerStatus);

    LOGGER.info("LoadGenerator parsing response result files");

    //-------------------------------------------------
    // time values
    //-------------------------------------------------
    parseTimeValues(workspace, resultFilePath, nodeListeners);

    //-------------------------------------------------
    // Monitor values
    //-------------------------------------------------
    String monitorJson = getMonitorValues(monitorUrl, taskListener);

    taskListener.getLogger().print("monitorJson: " + monitorJson);

    Map<String, Object> monitoringResultMap = null;

    try {
        monitoringResultMap = new ObjectMapper() //
                .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) //
                .readValue(monitorJson, Map.class);
    } catch (Exception e) {
        LOGGER.warn("skip error parsing json monitoring result");
    }
    // manage results

    SummaryReport summaryReport = new SummaryReport(run.getId());

    timePerPathListener.getResponseTimePerPath().entrySet().stream().forEach(entry -> {
        String path = entry.getKey();
        Histogram histogram = entry.getValue();

        AtomicInteger number = responseNumberPerPath.getResponseNumberPerPath().get(path);
        LOGGER.debug("responseTimePerPath: {} - mean: {}ms - number: {}", //
                path, //
                TimeUnit.NANOSECONDS.toMillis(Math.round(histogram.getMean())), //
                number.get());
        summaryReport.addResponseTimeInformations(path, new CollectorInformations(histogram, //
                TimeUnit.NANOSECONDS, TimeUnit.MILLISECONDS));
    });

    timePerPathListener.getLatencyTimePerPath().entrySet().stream().forEach(entry -> {
        String path = entry.getKey();
        Histogram histogram = entry.getValue();

        AtomicInteger number = responseNumberPerPath.getResponseNumberPerPath().get(path);
        LOGGER.debug("responseTimePerPath: {} - mean: {}ms - number: {}", //
                path, //
                TimeUnit.NANOSECONDS.toMillis(Math.round(histogram.getMean())), //
                number.get());
        summaryReport.addLatencyTimeInformations(path, new CollectorInformations(histogram, //
                TimeUnit.NANOSECONDS, TimeUnit.MILLISECONDS));
    });

    // FIXME calculate score from previous build
    HealthReport healthReport = new HealthReport(30, "text");

    Map<String, List<ResponseTimeInfo>> allResponseInfoTimePerPath = new HashMap<>();

    detailledTimeReportListener.getDetailledLatencyTimeValuesReport().getEntries().stream().forEach(entry -> {
        List<ResponseTimeInfo> responseTimeInfos = allResponseInfoTimePerPath.get(entry.getPath());
        if (responseTimeInfos == null) {
            responseTimeInfos = new ArrayList<>();
            allResponseInfoTimePerPath.put(entry.getPath(), responseTimeInfos);
        }
        responseTimeInfos.add(new ResponseTimeInfo(entry.getTimeStamp(), //
                TimeUnit.NANOSECONDS.toMillis(entry.getTime()), //
                entry.getHttpStatus()));
    });

    run.addAction(new LoadGeneratorBuildAction(healthReport, //
            summaryReport, //
            new CollectorInformations(globalSummaryListener.getResponseTimeHistogram().getIntervalHistogram(),
                    //
                    TimeUnit.NANOSECONDS, TimeUnit.MILLISECONDS), //
            new CollectorInformations(globalSummaryListener.getLatencyTimeHistogram().getIntervalHistogram(),
                    //
                    TimeUnit.NANOSECONDS, TimeUnit.MILLISECONDS), //
            allResponseInfoTimePerPath, run, monitoringResultMap, stats));

    // cleanup

    getCurrentNode(launcher.getComputer()) //
            .getChannel() //
            .call(new LoadGeneratorProcessFactory.DeleteTmpFile(resultFilePath.toString()));

    LOGGER.info("LoadGenerator end");
}

From source file:com.linkedin.pinot.perf.QueryRunner.java

/**
 * Use multiple threads to run queries as fast as possible.
 *
 * Start {numThreads} worker threads to send queries (blocking call) back to back, and use the main thread to collect
 * the statistic information and log them periodically.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.// w w  w  . j av a 2  s  .  c  o m
 * @param numThreads number of threads sending queries.
 * @throws Exception
 */
@SuppressWarnings("InfiniteLoopStatement")
public static void multiThreadedsQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        final int numThreads) throws Exception {
    final long randomSeed = 123456789L;
    final Random random = new Random(randomSeed);
    final int reportIntervalMillis = 3000;

    final List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    final int numQueries = queries.size();
    final PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicLong totalResponseTime = new AtomicLong(0L);
    final ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

    final DescriptiveStatistics stats = new DescriptiveStatistics();
    final CountDownLatch latch = new CountDownLatch(numThreads);

    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Runnable() {
            @Override
            public void run() {
                for (int j = 0; j < numQueries; j++) {
                    String query = queries.get(random.nextInt(numQueries));
                    long startTime = System.currentTimeMillis();
                    try {
                        driver.postQuery(query);
                        long clientTime = System.currentTimeMillis() - startTime;
                        synchronized (stats) {
                            stats.addValue(clientTime);
                        }

                        counter.getAndIncrement();
                        totalResponseTime.getAndAdd(clientTime);
                    } catch (Exception e) {
                        LOGGER.error("Caught exception while running query: {}", query, e);
                        return;
                    }
                }
                latch.countDown();
            }
        });
    }

    executorService.shutdown();

    int iter = 0;
    long startTime = System.currentTimeMillis();
    while (latch.getCount() > 0) {
        Thread.sleep(reportIntervalMillis);
        double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND;
        int count = counter.get();
        double avgResponseTime = ((double) totalResponseTime.get()) / count;
        LOGGER.info("Time Passed: {}s, Query Executed: {}, QPS: {}, Avg Response Time: {}ms", timePassedSeconds,
                count, count / timePassedSeconds, avgResponseTime);

        iter++;
        if (iter % 10 == 0) {
            printStats(stats);
        }
    }

    printStats(stats);
}

From source file:com.inqool.dcap.office.indexer.indexer.SolrBulkIndexer.java

protected SolrInputDocument modelToSolrInputDoc(ZdoModel model) {
    logger.debug("Constructing new SolrInputDocument...");

    final Map<String, SolrInputField> fields = new HashMap<>();

    //Add all Dublin Core terms
    for (String property : DCTools.getDcTermList()) {
        SolrInputField field = new SolrInputField(property);
        List<String> values = model.getAll(new PropertyImpl("http://purl.org/dc/terms/" + property));
        if (values.isEmpty())
            continue;
        //Skip fields that were not ticked to be published
        String visible = model.get(new PropertyImpl("http://purl.org/dc/terms/" + property + "_visibility"));
        if ("false".equals(visible) || "0".equals(visible)) { //0 should not occur any more
            continue;
        }/*  www  .  j av a 2  s .c o m*/
        if ("isPartOf".equals(property)) { //remove ip address from isPartOf
            values.set(0, store.getOnlyIdFromUrl(values.get(0)));
        }
        if ("".equals(values.get(0))) {
            values.set(0, "unknown");
        }

        field.addValue(values, INDEX_TIME_BOOST);
        fields.put(property, field);

        //Suggester data
        if ("title".equals(property) || "creator".equals(property)) {
            SolrInputDocument suggesterDoc = new SolrInputDocument();
            String suggestVal = values.get(0).trim();
            if (!suggestVal.isEmpty() && !suggestVal.equals("unknown")) {
                suggesterDoc.addField("suggesterData", values.get(0).trim());
                dataForSuggester.add(suggesterDoc);
            }
        }
    }

    //Add system fields
    SolrInputField field = new SolrInputField("id");
    field.addValue(store.getOnlyIdFromUrl(model.getUrl()), INDEX_TIME_BOOST);
    fields.put("id", field);

    addSolrFieldFromFedoraProperty("inventoryId", ZdoTerms.inventoryId, model, fields);

    addSolrFieldFromFedoraProperty("zdoType", ZdoTerms.zdoType, model, fields);
    addSolrFieldFromFedoraProperty("zdoGroup", ZdoTerms.group, model, fields);
    addSolrFieldFromFedoraProperty("orgIdmId", ZdoTerms.organization, model, fields);
    addSolrFieldFromFedoraProperty("allowContentPublicly", ZdoTerms.allowContentPublicly, model, fields);
    addSolrFieldFromFedoraProperty("allowPdfExport", ZdoTerms.allowPdfExport, model, fields);
    addSolrFieldFromFedoraProperty("allowEpubExport", ZdoTerms.allowEpubExport, model, fields);
    addSolrFieldFromFedoraProperty("watermark", ZdoTerms.watermark, model, fields);
    addSolrFieldFromFedoraProperty("watermarkPosition", ZdoTerms.watermarkPosition, model, fields);
    addSolrFieldFromFedoraProperty("imgThumb", ZdoTerms.imgThumb, model, fields);
    addSolrFieldFromFedoraProperty("imgNormal", ZdoTerms.imgNormal, model, fields);

    String publishFromStr = model.get(ZdoTerms.publishFrom);
    if (publishFromStr != null) {
        String publishFromUtc = ZonedDateTime
                .ofInstant(Instant.ofEpochSecond(Long.valueOf(publishFromStr)), ZoneId.systemDefault())
                .withZoneSameInstant(ZoneOffset.UTC).format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
        addSolrField("publishFrom", publishFromUtc, fields);
    }
    String publishToStr = model.get(ZdoTerms.publishTo);
    if (publishToStr != null) {
        String publishToUtc = ZonedDateTime
                .ofInstant(Instant.ofEpochSecond(Long.valueOf(publishToStr)), ZoneId.systemDefault())
                .withZoneSameInstant(ZoneOffset.UTC).format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
        addSolrField("publishTo", publishToUtc, fields);
    }

    String created = model.get(DCTerms.created);
    if (created != null) {
        AtomicInteger yearStart = new AtomicInteger();
        AtomicInteger yearEnd = new AtomicInteger();
        AtomicBoolean startValid = new AtomicBoolean();
        AtomicBoolean endValid = new AtomicBoolean();
        YearNormalizer.normalizeCreatedYear(created, yearStart, startValid, yearEnd, endValid);
        if (startValid.get()) {
            addSolrField("yearStart", yearStart.get(), fields);
        } else {
            logger.warn("Year could not be normalized for input string " + created);
        }
        if (endValid.get()) {
            addSolrField("yearEnd", yearEnd.get(), fields);
        }
    }

    String orgName = orgNameMapping.get(model.get(ZdoTerms.organization));
    if (orgName == null) {
        orgName = "Neznm";
    }
    addSolrField("organization", orgName, fields);

    String documentTypeId = model.get(ZdoTerms.documentType); //type and subtype names must be found for id
    String documentSubTypeId = model.get(ZdoTerms.documentSubType);
    if (documentTypeId != null) {
        addSolrField("documentType", documentTypeAccess.getTypeNameForId(Integer.valueOf(documentTypeId)),
                fields);
    }
    if (documentSubTypeId != null) {
        addSolrField("documentSubType",
                documentTypeAccess.getSubTypeNameForId(Integer.valueOf(documentSubTypeId)), fields);
    }

    //Add customFields
    int fieldIndex = 0; //we actually start from 1
    do {
        fieldIndex++;
        String fieldName = model
                .get(new PropertyImpl("http://inqool.cz/zdo/1.0/customField_" + fieldIndex + "_name"));
        if (fieldName == null)
            break;
        fieldName = "customField_" + fieldName;
        String visible = model
                .get(new PropertyImpl("http://inqool.cz/zdo/1.0/customField_" + fieldIndex + "_visibility"));
        if ("false".equals(visible) || "0".equals(visible))
            continue;
        List<String> fieldValues = model
                .getAll(new PropertyImpl("http://inqool.cz/zdo/1.0/customField_" + fieldIndex));
        if ("".equals(fieldValues.get(0))) {
            fieldValues.set(0, "unknown");
        }
        SolrInputField customField = new SolrInputField(fieldName);
        customField.addValue(fieldValues, INDEX_TIME_BOOST);
        fields.put(fieldName, customField);
    } while (true);

    SolrInputDocument solrInputDocument = new SolrInputDocument(fields);
    return solrInputDocument;
}

From source file:org.apache.activemq.bugs.AMQ6131Test.java

@Test(timeout = 300000)
public void testDurableWithNoMessageAfterRestartAndIndexRecovery() throws Exception {
    final File persistentDir = getPersistentDir();

    broker.getBroker().addDestination(broker.getAdminConnectionContext(), new ActiveMQTopic("durable.sub"),
            false);/*from  w  ww .j av a2  s .  co m*/

    ActiveMQConnectionFactory connectionFactory = new ActiveMQConnectionFactory(this.brokerConnectURI);
    ActiveMQConnection connection = (ActiveMQConnection) connectionFactory.createConnection();
    connection.setClientID("myId");
    connection.start();
    final Session jmsSession = connection.createSession(false, javax.jms.Session.AUTO_ACKNOWLEDGE);

    TopicSubscriber durable = jmsSession.createDurableSubscriber(new ActiveMQTopic("durable.sub"), "sub");
    final MessageProducer producer = jmsSession.createProducer(new ActiveMQTopic("durable.sub"));

    final int original = new ArrayList<File>(
            FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"), TrueFileFilter.INSTANCE))
                    .size();

    // 100k messages
    final byte[] data = new byte[100000];
    final Random random = new Random();
    random.nextBytes(data);

    // run test with enough messages to create a second journal file
    final AtomicInteger messageCount = new AtomicInteger();
    assertTrue("Should have added a journal file", Wait.waitFor(new Condition() {

        @Override
        public boolean isSatisified() throws Exception {
            final ActiveMQBytesMessage message = new ActiveMQBytesMessage();
            message.setContent(new ByteSequence(data));

            for (int i = 0; i < 100; i++) {
                producer.send(message);
                messageCount.getAndIncrement();
            }

            return new ArrayList<File>(FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"),
                    TrueFileFilter.INSTANCE)).size() > original;
        }
    }));

    // Consume all messages
    for (int i = 0; i < messageCount.get(); i++) {
        durable.receive();
    }

    durable.close();

    assertTrue("Subscription should go inactive", Wait.waitFor(new Condition() {
        @Override
        public boolean isSatisified() throws Exception {
            return broker.getAdminView().getInactiveDurableTopicSubscribers().length == 1;
        }
    }));

    // force a GC of unneeded journal files
    getBroker().getPersistenceAdapter().checkpoint(true);

    // wait until a journal file has been GC'd after receiving messages
    assertTrue("Should have garbage collected", Wait.waitFor(new Wait.Condition() {

        @Override
        public boolean isSatisified() throws Exception {
            return new ArrayList<File>(FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"),
                    TrueFileFilter.INSTANCE)).size() == original;
        }
    }));

    // stop the broker so we can blow away the index
    getBroker().stop();
    getBroker().waitUntilStopped();

    // delete the index so that the durables are gone from the index
    // The test passes if you take out this delete section
    for (File index : FileUtils.listFiles(persistentDir, new WildcardFileFilter("db.*"),
            TrueFileFilter.INSTANCE)) {
        FileUtils.deleteQuietly(index);
    }

    stopBroker();
    setUpBroker(false);

    assertEquals(1, broker.getAdminView().getInactiveDurableTopicSubscribers().length);
    assertEquals(0, broker.getAdminView().getDurableTopicSubscribers().length);

    ActiveMQConnectionFactory connectionFactory2 = new ActiveMQConnectionFactory(this.brokerConnectURI);
    ActiveMQConnection connection2 = (ActiveMQConnection) connectionFactory2.createConnection();
    connection2.setClientID("myId");
    connection2.start();
    final Session jmsSession2 = connection2.createSession(false, javax.jms.Session.AUTO_ACKNOWLEDGE);

    TopicSubscriber durable2 = jmsSession2.createDurableSubscriber(new ActiveMQTopic("durable.sub"), "sub");

    assertEquals(0, broker.getAdminView().getInactiveDurableTopicSubscribers().length);
    assertEquals(1, broker.getAdminView().getDurableTopicSubscribers().length);

    assertNull(durable2.receive(500));
}