Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:com.zaubersoftware.gnip4j.http.ReconnectionTest.java

/** test */
@Test //(timeout = 10000)
public void testReConnection() throws Exception {
    // ignore framework warnings
    final Logger root = Logger.getRootLogger();
    root.setLevel(Level.OFF);//  w  w w  .  j a va2 s. c  o m

    final AtomicInteger count = new AtomicInteger(0);
    final DefaultGnipStream stream = new DefaultGnipStream(new MockRemoteResourceProvider(), "test", 1,
            new MockExecutorService());
    final StringBuilder out = new StringBuilder();
    final StreamNotification n = new StreamNotification() {
        @Override
        public void notifyReConnectionError(final GnipException e) {
            out.append(String.format("ReConnectionError: %s\n", e.getMessage()));
        }

        @Override
        public void notifyReConnectionAttempt(final int attempt, final long waitTime) {
            out.append(String.format("Connection attempt %d wait time %d\n", attempt, waitTime));
        }

        @Override
        public void notifyConnectionError(final TransportGnipException e) {
            out.append(String.format("ConnectionError: %s\n", e.getMessage()));
        }

        @Override
        public void notify(final Activity activity, final GnipStream stream) {
            out.append(activity.getBody() + "\n");
            if (count.incrementAndGet() >= 4) {
                stream.close();
            }
        }
    };
    stream.open(n);
    stream.await();
    final String s = out.toString();
    final String expected = IOUtils
            .toString(getClass().getClassLoader().getResourceAsStream("reconnectlog.txt"));
    Assert.assertEquals(expected, s);
    Assert.assertEquals(
            "transferedBytes = 8000\ntransferedActivities = 4\n"
                    + "numberOfSucessfulReconnections = 1\nnumberOfReconnections = 4",
            stream.getStreamStats().toString());
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java

@Test
public void shouldFailUntilImportExecutes() throws Exception {
    final GremlinExecutor gremlinExecutor = GremlinExecutor.build().create();

    final Set<String> imports = new HashSet<String>() {
        {//from   w  w  w. j  a  va  2  s.  c om
            add("import java.awt.Color");
        }
    };

    final AtomicInteger successes = new AtomicInteger(0);
    final AtomicInteger failures = new AtomicInteger(0);

    // issue 1000 scripts in one thread using a class that isn't imported.  this will result in failure.
    // while that thread is running start a new thread that issues an addImports to include that class.
    // this should block further evals in the first thread until the import is complete at which point
    // evals in the first thread will resume and start to succeed
    final Thread t1 = new Thread(
            () -> IntStream.range(0, 1000).mapToObj(i -> gremlinExecutor.eval("Color.BLACK")).forEach(f -> {
                f.exceptionally(t -> failures.incrementAndGet()).join();
                if (!f.isCompletedExceptionally())
                    successes.incrementAndGet();
            }));

    final Thread t2 = new Thread(() -> {
        while (failures.get() < 500) {
        }
        gremlinExecutor.getScriptEngines().addImports(imports);
    });

    t1.start();
    t2.start();

    t1.join();
    t2.join();

    assertTrue(successes.intValue() > 0);
    assertTrue(failures.intValue() >= 500);

    gremlinExecutor.close();
}

From source file:com.streamsets.pipeline.stage.origin.spooldir.TestSpoolDirSource.java

@Test
public void testWithMultipleThreadsInitialOffsets() throws Exception {
    // set up multiple test files
    File f = new File("target", UUID.randomUUID().toString());
    Assert.assertTrue(f.mkdirs());//from w  ww.j a va2s. c  om

    final int numFiles = 10;
    for (int i = 0; i < numFiles; i++) {
        FileOutputStream outputStream = new FileOutputStream(
                new File(f.getAbsolutePath(), "file-" + i + ".log"));
        // each file has 5 lines
        IOUtils.writeLines(ImmutableList.of("1", "2", "3", "4", "5"), "\n", outputStream);
        outputStream.close();
    }

    // let the first 2 files, file-0.log and file-3.log, were processed and
    // file-2.log was processed 1 line/record
    // file-1.log will be skipped since is less then file-3.log
    Map<String, String> lastSourceOffsetMap = ImmutableMap.of(SpoolDirSource.OFFSET_VERSION, OFFSET_VERSION_ONE,
            "file-0.log", "{\"POS\":\"-1\"}", "file-2.log", "{\"POS\":\"2\"}", "file-3.log",
            "{\"POS\":\"-1\"}");

    SpoolDirConfigBean conf = new SpoolDirConfigBean();
    conf.dataFormat = DataFormat.TEXT;
    conf.spoolDir = f.getAbsolutePath();
    conf.batchSize = 10;
    conf.overrunLimit = 100;
    conf.poolingTimeoutSecs = 1;
    conf.filePattern = "file-[0-9].log";
    conf.pathMatcherMode = PathMatcherMode.GLOB;
    conf.maxSpoolFiles = 10;
    conf.initialFileToProcess = null;
    conf.dataFormatConfig.compression = Compression.NONE;
    conf.dataFormatConfig.filePatternInArchive = "*";
    conf.errorArchiveDir = null;
    conf.postProcessing = PostProcessingOptions.NONE;
    conf.retentionTimeMins = 10;
    conf.dataFormatConfig.textMaxLineLen = 10;
    conf.dataFormatConfig.onParseError = OnParseError.ERROR;
    conf.dataFormatConfig.maxStackTraceLines = 0;
    conf.allowLateDirectory = false;
    conf.numberOfThreads = 10;

    SpoolDirSource source = new SpoolDirSource(conf);
    PushSourceRunner runner = new PushSourceRunner.Builder(SpoolDirDSource.class, source).addOutputLane("lane")
            .build();

    AtomicInteger batchCount = new AtomicInteger(0);
    final List<Record> records = Collections.synchronizedList(new ArrayList<>(10));
    runner.runInit();

    final int maxBatchSize = 10;

    try {
        runner.runProduce(lastSourceOffsetMap, maxBatchSize, output -> {
            batchCount.incrementAndGet();

            synchronized (records) {
                records.addAll(output.getRecords().get("lane"));
            }

            if (records.size() == 34 || batchCount.get() > 10) {
                runner.setStop();
            }
        });

        runner.waitOnProduce();
        Assert.assertTrue(batchCount.get() > 1);
        TestOffsetUtil.compare("file-9.log::-1", runner.getOffsets());
        Assert.assertEquals(34, records.size());
    } finally {
        runner.runDestroy();
    }

}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestWALReplay.java

/**
 * Test writing edits into an HRegion, closing it, splitting logs, opening
 * Region again.  Verify seqids./*from   www.  j  a va  2  s.c  om*/
 * @throws IOException
 * @throws IllegalAccessException
 * @throws NoSuchFieldException
 * @throws IllegalArgumentException
 * @throws SecurityException
 */
@Test
public void testReplayEditsWrittenViaHRegion() throws IOException, SecurityException, IllegalArgumentException,
        NoSuchFieldException, IllegalAccessException, InterruptedException {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenViaHRegion");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
    deleteDir(basedir);
    final byte[] rowName = tableName.getName();
    final int countPerFamily = 10;
    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region3 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
    HRegion.closeHRegion(region3);
    // Write countPerFamily edits into the three families.  Do a flush on one
    // of the families during the load of edits so its seqid is not same as
    // others to test we do right thing when different seqids.
    HLog wal = createWAL(this.conf);
    HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
    long seqid = region.getOpenSeqNum();
    boolean first = true;
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
        if (first) {
            // If first, so we have at least one family w/ different seqid to rest.
            region.flushcache();
            first = false;
        }
    }
    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
    // Now close the region (without flush), split the log, reopen the region and assert that
    // replay of log has the correct effect, that our seqids are calculated correctly so
    // all edits in logs are seen as 'stale'/old.
    region.close(true);
    wal.close();
    runWALSplit(this.conf);
    HLog wal2 = createWAL(this.conf);
    HRegion region2 = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal2);
    long seqid2 = region2.getOpenSeqNum();
    assertTrue(seqid + result.size() < seqid2);
    final Result result1b = region2.get(g);
    assertEquals(result.size(), result1b.size());

    // Next test.  Add more edits, then 'crash' this region by stealing its wal
    // out from under it and assert that replay of the log adds the edits back
    // correctly when region is opened again.
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
    }
    // Get count of edits.
    final Result result2 = region2.get(g);
    assertEquals(2 * result.size(), result2.size());
    wal2.sync();
    // Set down maximum recovery so we dfsclient doesn't linger retrying something
    // long gone.
    HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) wal2).getOutputStream(), 1);
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString());
    user.runAs(new PrivilegedExceptionAction() {
        public Object run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // Make a new wal for new region open.
            HLog wal3 = createWAL(newConf);
            final AtomicInteger countOfRestoredEdits = new AtomicInteger(0);
            HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) {
                @Override
                protected boolean restoreEdit(Store s, KeyValue kv) {
                    boolean b = super.restoreEdit(s, kv);
                    countOfRestoredEdits.incrementAndGet();
                    return b;
                }
            };
            long seqid3 = region3.initialize();
            Result result3 = region3.get(g);
            // Assert that count of cells is same as before crash.
            assertEquals(result2.size(), result3.size());
            assertEquals(htd.getFamilies().size() * countPerFamily, countOfRestoredEdits.get());

            // I can't close wal1.  Its been appropriated when we split.
            region3.close();
            wal3.closeAndDelete();
            return null;
        }
    });
}

From source file:com.sixt.service.framework.kafka.messaging.KafkaFailoverIntegrationTest.java

@Test
public void manualKafkaTest() throws InterruptedException {

    ServiceProperties serviceProperties = fillServiceProperties();

    // Topics are created with 3 partitions - see docker-compose-kafkafailover-integrationtest.yml
    Topic ping = new Topic("ping");
    Topic pong = new Topic("pong");

    AtomicInteger sentMessages = new AtomicInteger(0);
    AtomicInteger sendFailures = new AtomicInteger(0);
    AtomicInteger recievedMessages = new AtomicInteger(0);

    Producer producer = new ProducerFactory(serviceProperties).createProducer();

    final AtomicBoolean produceMessages = new AtomicBoolean(true);

    // Produce messages until test tells producer to stop.
    ExecutorService producerExecutor = Executors.newSingleThreadExecutor();
    producerExecutor.submit(new Runnable() {
        @Override/*from ww w  .  j a v a  2  s .c om*/
        public void run() {
            OrangeContext context = new OrangeContext();
            Sleeper sleeper = new Sleeper();

            while (produceMessages.get()) {
                try {

                    String key = RandomStringUtils.randomAscii(5);
                    SayHelloToCmd payload = SayHelloToCmd.newBuilder().setName(key).build();

                    Message request = Messages.requestFor(ping, pong, key, payload, context);
                    producer.send(request);
                    sentMessages.incrementAndGet();

                    sleeper.sleepNoException(1000);
                } catch (Throwable t) {
                    sendFailures.incrementAndGet();
                    logger.error("Caught exception in producer loop", t);
                }
            }
        }
    });

    Consumer consumer = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    recievedMessages.incrementAndGet();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());

    // Wait to allow manual fiddling with Kafka. Sync with global test timeout above.
    Thread.sleep(2 * 60 * 1000);

    produceMessages.set(false);
    producer.shutdown();

    Thread.sleep(10_000);

    consumer.shutdown();

    logger.info("sentMessages: " + sentMessages.get());
    logger.info("sendFailures: " + sendFailures.get());
    logger.info("recievedMessages: " + recievedMessages.get());
}

From source file:org.apache.hadoop.hbase.tool.TestLoadIncrementalHFilesSplitRecovery.java

/**
 * Test that shows that exception thrown from the RS side will result in an exception on the
 * LIHFile client.//  w ww. j  a v  a 2 s  .c  om
 */
@Test(expected = IOException.class, timeout = 120000)
public void testBulkLoadPhaseFailure() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    final AtomicInteger attmptedCalls = new AtomicInteger();
    final AtomicInteger failedCalls = new AtomicInteger();
    util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTable(connection, table, 10);
        LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
            @Override
            protected List<LoadQueueItem> tryAtomicRegionLoad(ClientServiceCallable<byte[]> serviceCallable,
                    TableName tableName, final byte[] first, Collection<LoadQueueItem> lqis)
                    throws IOException {
                int i = attmptedCalls.incrementAndGet();
                if (i == 1) {
                    Connection errConn;
                    try {
                        errConn = getMockedConnection(util.getConfiguration());
                        serviceCallable = this.buildClientServiceCallable(errConn, table, first, lqis, true);
                    } catch (Exception e) {
                        LOG.fatal("mocking cruft, should never happen", e);
                        throw new RuntimeException("mocking cruft, should never happen");
                    }
                    failedCalls.incrementAndGet();
                    return super.tryAtomicRegionLoad(serviceCallable, tableName, first, lqis);
                }

                return super.tryAtomicRegionLoad(serviceCallable, tableName, first, lqis);
            }
        };
        try {
            // create HFiles for different column families
            Path dir = buildBulkFiles(table, 1);
            try (Table t = connection.getTable(table);
                    RegionLocator locator = connection.getRegionLocator(table);
                    Admin admin = connection.getAdmin()) {
                lih.doBulkLoad(dir, admin, t, locator);
            }
        } finally {
            util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
                    HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
        }
        fail("doBulkLoad should have thrown an exception");
    }
}

From source file:org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryParsing.java

private void checkHistoryParsing(final int numMaps, final int numReduces, final int numSuccessfulMaps)
        throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.USER_NAME, System.getProperty("user.name"));
    long amStartTimeEst = System.currentTimeMillis();
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
    RackResolver.init(conf);/* w ww  .ja v a 2 s  .c om*/
    MRApp app = new MRAppWithHistory(numMaps, numReduces, true, this.getClass().getName(), true);
    app.submit(conf);
    Job job = app.getContext().getAllJobs().values().iterator().next();
    JobId jobId = job.getID();
    LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
    app.waitForState(job, JobState.SUCCEEDED);

    // make sure all events are flushed
    app.waitForState(Service.STATE.STOPPED);

    String jobhistoryDir = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);

    FileContext fc = null;
    try {
        fc = FileContext.getFileContext(conf);
    } catch (IOException ioe) {
        LOG.info("Can not get FileContext", ioe);
        throw (new Exception("Can not get File Context"));
    }

    if (numMaps == numSuccessfulMaps) {
        String summaryFileName = JobHistoryUtils.getIntermediateSummaryFileName(jobId);
        Path summaryFile = new Path(jobhistoryDir, summaryFileName);
        String jobSummaryString = getJobSummary(fc, summaryFile);
        Assert.assertNotNull(jobSummaryString);
        Assert.assertTrue(jobSummaryString.contains("resourcesPerMap=100"));
        Assert.assertTrue(jobSummaryString.contains("resourcesPerReduce=100"));

        Map<String, String> jobSummaryElements = new HashMap<String, String>();
        StringTokenizer strToken = new StringTokenizer(jobSummaryString, ",");
        while (strToken.hasMoreTokens()) {
            String keypair = strToken.nextToken();
            jobSummaryElements.put(keypair.split("=")[0], keypair.split("=")[1]);
        }

        Assert.assertEquals("JobId does not match", jobId.toString(), jobSummaryElements.get("jobId"));
        Assert.assertEquals("JobName does not match", "test", jobSummaryElements.get("jobName"));
        Assert.assertTrue("submitTime should not be 0",
                Long.parseLong(jobSummaryElements.get("submitTime")) != 0);
        Assert.assertTrue("launchTime should not be 0",
                Long.parseLong(jobSummaryElements.get("launchTime")) != 0);
        Assert.assertTrue("firstMapTaskLaunchTime should not be 0",
                Long.parseLong(jobSummaryElements.get("firstMapTaskLaunchTime")) != 0);
        Assert.assertTrue("firstReduceTaskLaunchTime should not be 0",
                Long.parseLong(jobSummaryElements.get("firstReduceTaskLaunchTime")) != 0);
        Assert.assertTrue("finishTime should not be 0",
                Long.parseLong(jobSummaryElements.get("finishTime")) != 0);
        Assert.assertEquals("Mismatch in num map slots", numSuccessfulMaps,
                Integer.parseInt(jobSummaryElements.get("numMaps")));
        Assert.assertEquals("Mismatch in num reduce slots", numReduces,
                Integer.parseInt(jobSummaryElements.get("numReduces")));
        Assert.assertEquals("User does not match", System.getProperty("user.name"),
                jobSummaryElements.get("user"));
        Assert.assertEquals("Queue does not match", "default", jobSummaryElements.get("queue"));
        Assert.assertEquals("Status does not match", "SUCCEEDED", jobSummaryElements.get("status"));
    }

    JobHistory jobHistory = new JobHistory();
    jobHistory.init(conf);
    HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
    JobInfo jobInfo;
    long numFinishedMaps;

    synchronized (fileInfo) {
        Path historyFilePath = fileInfo.getHistoryFile();
        FSDataInputStream in = null;
        LOG.info("JobHistoryFile is: " + historyFilePath);
        try {
            in = fc.open(fc.makeQualified(historyFilePath));
        } catch (IOException ioe) {
            LOG.info("Can not open history file: " + historyFilePath, ioe);
            throw (new Exception("Can not open History File"));
        }

        JobHistoryParser parser = new JobHistoryParser(in);
        final EventReader realReader = new EventReader(in);
        EventReader reader = Mockito.mock(EventReader.class);
        if (numMaps == numSuccessfulMaps) {
            reader = realReader;
        } else {
            final AtomicInteger numFinishedEvents = new AtomicInteger(0); // Hack!
            Mockito.when(reader.getNextEvent()).thenAnswer(new Answer<HistoryEvent>() {
                public HistoryEvent answer(InvocationOnMock invocation) throws IOException {
                    HistoryEvent event = realReader.getNextEvent();
                    if (event instanceof TaskFinishedEvent) {
                        numFinishedEvents.incrementAndGet();
                    }

                    if (numFinishedEvents.get() <= numSuccessfulMaps) {
                        return event;
                    } else {
                        throw new IOException("test");
                    }
                }
            });
        }

        jobInfo = parser.parse(reader);

        numFinishedMaps = computeFinishedMaps(jobInfo, numMaps, numSuccessfulMaps);

        if (numFinishedMaps != numMaps) {
            Exception parseException = parser.getParseException();
            Assert.assertNotNull("Didn't get expected parse exception", parseException);
        }
    }

    Assert.assertEquals("Incorrect username ", System.getProperty("user.name"), jobInfo.getUsername());
    Assert.assertEquals("Incorrect jobName ", "test", jobInfo.getJobname());
    Assert.assertEquals("Incorrect queuename ", "default", jobInfo.getJobQueueName());
    Assert.assertEquals("incorrect conf path", "test", jobInfo.getJobConfPath());
    Assert.assertEquals("incorrect finishedMap ", numSuccessfulMaps, numFinishedMaps);
    Assert.assertEquals("incorrect finishedReduces ", numReduces, jobInfo.getFinishedReduces());
    Assert.assertEquals("incorrect uberized ", job.isUber(), jobInfo.getUberized());
    Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
    int totalTasks = allTasks.size();
    Assert.assertEquals("total number of tasks is incorrect  ", (numMaps + numReduces), totalTasks);

    // Verify aminfo
    Assert.assertEquals(1, jobInfo.getAMInfos().size());
    Assert.assertEquals(MRApp.NM_HOST, jobInfo.getAMInfos().get(0).getNodeManagerHost());
    AMInfo amInfo = jobInfo.getAMInfos().get(0);
    Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
    Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
    Assert.assertEquals(1, amInfo.getAppAttemptId().getAttemptId());
    Assert.assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId().getApplicationAttemptId());
    Assert.assertTrue(
            amInfo.getStartTime() <= System.currentTimeMillis() && amInfo.getStartTime() >= amStartTimeEst);

    ContainerId fakeCid = MRApp.newContainerId(-1, -1, -1, -1);
    // Assert at taskAttempt level
    for (TaskInfo taskInfo : allTasks.values()) {
        int taskAttemptCount = taskInfo.getAllTaskAttempts().size();
        Assert.assertEquals("total number of task attempts ", 1, taskAttemptCount);
        TaskAttemptInfo taInfo = taskInfo.getAllTaskAttempts().values().iterator().next();
        Assert.assertNotNull(taInfo.getContainerId());
        // Verify the wrong ctor is not being used. Remove after mrv1 is removed.
        Assert.assertFalse(taInfo.getContainerId().equals(fakeCid));
    }

    // Deep compare Job and JobInfo
    for (Task task : job.getTasks().values()) {
        TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
        Assert.assertNotNull("TaskInfo not found", taskInfo);
        for (TaskAttempt taskAttempt : task.getAttempts().values()) {
            TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts()
                    .get(TypeConverter.fromYarn((taskAttempt.getID())));
            Assert.assertNotNull("TaskAttemptInfo not found", taskAttemptInfo);
            Assert.assertEquals("Incorrect shuffle port for task attempt", taskAttempt.getShufflePort(),
                    taskAttemptInfo.getShufflePort());
            if (numMaps == numSuccessfulMaps) {
                Assert.assertEquals(MRApp.NM_HOST, taskAttemptInfo.getHostname());
                Assert.assertEquals(MRApp.NM_PORT, taskAttemptInfo.getPort());

                // Verify rack-name
                Assert.assertEquals("rack-name is incorrect", taskAttemptInfo.getRackname(), RACK_NAME);
            }
        }
    }

    // test output for HistoryViewer
    PrintStream stdps = System.out;
    try {
        System.setOut(new PrintStream(outContent));
        HistoryViewer viewer;
        synchronized (fileInfo) {
            viewer = new HistoryViewer(fc.makeQualified(fileInfo.getHistoryFile()).toString(), conf, true);
        }
        viewer.print();

        for (TaskInfo taskInfo : allTasks.values()) {

            String test = (taskInfo.getTaskStatus() == null ? "" : taskInfo.getTaskStatus()) + " "
                    + taskInfo.getTaskType() + " task list for " + taskInfo.getTaskId().getJobID();
            Assert.assertTrue(outContent.toString().indexOf(test) > 0);
            Assert.assertTrue(outContent.toString().indexOf(taskInfo.getTaskId().toString()) > 0);
        }
    } finally {
        System.setOut(stdps);

    }
}

From source file:com.facebook.BatchRequestTests.java

@LargeTest
public void testBatchOnProgressCallbackIsCalled() {
    final AtomicInteger count = new AtomicInteger();

    final AccessToken accessToken = getAccessTokenForSharedUser();

    String appId = getApplicationId();
    GraphRequest.setDefaultBatchApplicationId(appId);

    GraphRequest request1 = GraphRequest.newGraphPathRequest(accessToken, "4", null);
    assertNotNull(request1);//from w w w  . j a  v  a2  s . c  o m
    GraphRequest request2 = GraphRequest.newGraphPathRequest(accessToken, "4", null);
    assertNotNull(request2);

    GraphRequestBatch batch = new GraphRequestBatch(request1, request2);
    batch.addCallback(new GraphRequestBatch.OnProgressCallback() {
        @Override
        public void onBatchCompleted(GraphRequestBatch batch) {
        }

        @Override
        public void onBatchProgress(GraphRequestBatch batch, long current, long max) {
            count.incrementAndGet();
        }
    });

    batch.executeAndWait();
    assertEquals(1, count.get());
}

From source file:org.apache.tinkerpop.gremlin.server.GremlinDriverIntegrateTest.java

@Test
public void shouldWaitForAllResultsToArrive() throws Exception {
    final Cluster cluster = Cluster.open();
    final Client client = cluster.connect();

    final AtomicInteger checked = new AtomicInteger(0);
    final ResultSet results = client.submit("[1,2,3,4,5,6,7,8,9]");
    while (!results.allItemsAvailable()) {
        assertTrue(results.getAvailableItemCount() < 10);
        checked.incrementAndGet();
        Thread.sleep(100);/*from w ww .  ja  v a  2  s.  co m*/
    }

    assertTrue(checked.get() > 0);
    assertEquals(9, results.getAvailableItemCount());
    cluster.close();
}

From source file:org.apache.tinkerpop.gremlin.server.GremlinDriverIntegrateTest.java

@Test
public void shouldWorkOverNioTransport() throws Exception {
    final Cluster cluster = Cluster.build().channelizer(Channelizer.NioChannelizer.class.getName()).create();
    final Client client = cluster.connect();

    final AtomicInteger checked = new AtomicInteger(0);
    final ResultSet results = client.submit("[1,2,3,4,5,6,7,8,9]");
    while (!results.allItemsAvailable()) {
        assertTrue(results.getAvailableItemCount() < 10);
        checked.incrementAndGet();
        Thread.sleep(100);/*from w ww  . jav a2  s  . c o m*/
    }

    assertTrue(checked.get() > 0);
    assertEquals(9, results.getAvailableItemCount());
    cluster.close();
}