Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.apache.cassandra.repair.RepairRunnable.java

protected void runMayThrow() throws Exception {
    final TraceState traceState;

    final String tag = "repair:" + cmd;

    final AtomicInteger progress = new AtomicInteger();
    final int totalProgress = 3 + options.getRanges().size(); // calculate neighbors, validation, prepare for repair + number of ranges to repair

    String[] columnFamilies = options.getColumnFamilies()
            .toArray(new String[options.getColumnFamilies().size()]);
    Iterable<ColumnFamilyStore> validColumnFamilies = storageService.getValidColumnFamilies(false, false,
            keyspace, columnFamilies);// w  ww  . j  a v  a2  s  . co m

    final long startTime = System.currentTimeMillis();
    String message = String.format("Starting repair command #%d, repairing keyspace %s with %s", cmd, keyspace,
            options);
    logger.info(message);
    fireProgressEvent(tag, new ProgressEvent(ProgressEventType.START, 0, 100, message));
    if (options.isTraced()) {
        StringBuilder cfsb = new StringBuilder();
        for (ColumnFamilyStore cfs : validColumnFamilies)
            cfsb.append(", ").append(cfs.keyspace.getName()).append(".").append(cfs.name);

        UUID sessionId = Tracing.instance.newSession(Tracing.TraceType.REPAIR);
        traceState = Tracing.instance.begin("repair",
                ImmutableMap.of("keyspace", keyspace, "columnFamilies", cfsb.substring(2)));
        Tracing.traceRepair(message);
        traceState.enableActivityNotification(tag);
        for (ProgressListener listener : listeners)
            traceState.addProgressListener(listener);
        Thread queryThread = createQueryThread(cmd, sessionId);
        queryThread.setName("RepairTracePolling");
        queryThread.start();
    } else {
        traceState = null;
    }

    final Set<InetAddress> allNeighbors = new HashSet<>();
    Map<Range, Set<InetAddress>> rangeToNeighbors = new HashMap<>();
    try {
        for (Range<Token> range : options.getRanges()) {
            Set<InetAddress> neighbors = ActiveRepairService.getNeighbors(keyspace, range,
                    options.getDataCenters(), options.getHosts());
            rangeToNeighbors.put(range, neighbors);
            allNeighbors.addAll(neighbors);
        }
        progress.incrementAndGet();
    } catch (IllegalArgumentException e) {
        logger.error("Repair failed:", e);
        fireErrorAndComplete(tag, progress.get(), totalProgress, e.getMessage());
        return;
    }

    // Validate columnfamilies
    List<ColumnFamilyStore> columnFamilyStores = new ArrayList<>();
    try {
        Iterables.addAll(columnFamilyStores, validColumnFamilies);
        progress.incrementAndGet();
    } catch (IllegalArgumentException e) {
        fireErrorAndComplete(tag, progress.get(), totalProgress, e.getMessage());
        return;
    }

    String[] cfnames = new String[columnFamilyStores.size()];
    for (int i = 0; i < columnFamilyStores.size(); i++) {
        cfnames[i] = columnFamilyStores.get(i).name;
    }

    final UUID parentSession = UUIDGen.getTimeUUID();
    SystemDistributedKeyspace.startParentRepair(parentSession, keyspace, cfnames, options.getRanges());
    long repairedAt;
    try {
        ActiveRepairService.instance.prepareForRepair(parentSession, allNeighbors, options, columnFamilyStores);
        repairedAt = ActiveRepairService.instance.getParentRepairSession(parentSession).getRepairedAt();
        progress.incrementAndGet();
    } catch (Throwable t) {
        SystemDistributedKeyspace.failParentRepair(parentSession, t);
        fireErrorAndComplete(tag, progress.get(), totalProgress, t.getMessage());
        return;
    }

    // Set up RepairJob executor for this repair command.
    final ListeningExecutorService executor = MoreExecutors.listeningDecorator(
            new JMXConfigurableThreadPoolExecutor(options.getJobThreads(), Integer.MAX_VALUE, TimeUnit.SECONDS,
                    new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("Repair#" + cmd), "internal"));

    List<ListenableFuture<RepairSessionResult>> futures = new ArrayList<>(options.getRanges().size());
    for (Range<Token> range : options.getRanges()) {
        final RepairSession session = ActiveRepairService.instance.submitRepairSession(parentSession, range,
                keyspace, options.getParallelism(), rangeToNeighbors.get(range), repairedAt, executor, cfnames);
        if (session == null)
            continue;
        // After repair session completes, notify client its result
        Futures.addCallback(session, new FutureCallback<RepairSessionResult>() {
            public void onSuccess(RepairSessionResult result) {
                String message = String.format("Repair session %s for range %s finished", session.getId(),
                        session.getRange().toString());
                logger.info(message);
                fireProgressEvent(tag, new ProgressEvent(ProgressEventType.PROGRESS, progress.incrementAndGet(),
                        totalProgress, message));
            }

            public void onFailure(Throwable t) {
                String message = String.format("Repair session %s for range %s failed with error %s",
                        session.getId(), session.getRange().toString(), t.getMessage());
                logger.error(message, t);
                fireProgressEvent(tag, new ProgressEvent(ProgressEventType.PROGRESS, progress.incrementAndGet(),
                        totalProgress, message));
            }
        });
        futures.add(session);
    }

    // After all repair sessions completes(successful or not),
    // run anticompaction if necessary and send finish notice back to client
    final Collection<Range<Token>> successfulRanges = new ArrayList<>();
    final AtomicBoolean hasFailure = new AtomicBoolean();
    final ListenableFuture<List<RepairSessionResult>> allSessions = Futures.successfulAsList(futures);
    ListenableFuture anticompactionResult = Futures.transform(allSessions,
            new AsyncFunction<List<RepairSessionResult>, Object>() {
                @SuppressWarnings("unchecked")
                public ListenableFuture apply(List<RepairSessionResult> results) throws Exception {
                    // filter out null(=failed) results and get successful ranges
                    for (RepairSessionResult sessionResult : results) {
                        if (sessionResult != null) {
                            successfulRanges.add(sessionResult.range);
                        } else {
                            hasFailure.compareAndSet(false, true);
                        }
                    }
                    return ActiveRepairService.instance.finishParentSession(parentSession, allNeighbors,
                            successfulRanges);
                }
            });
    Futures.addCallback(anticompactionResult, new FutureCallback<Object>() {
        public void onSuccess(Object result) {
            SystemDistributedKeyspace.successfulParentRepair(parentSession, successfulRanges);
            if (hasFailure.get()) {
                fireProgressEvent(tag, new ProgressEvent(ProgressEventType.ERROR, progress.get(), totalProgress,
                        "Some repair failed"));
            } else {
                fireProgressEvent(tag, new ProgressEvent(ProgressEventType.SUCCESS, progress.get(),
                        totalProgress, "Repair completed successfully"));
            }
            repairComplete();
        }

        public void onFailure(Throwable t) {
            fireProgressEvent(tag,
                    new ProgressEvent(ProgressEventType.ERROR, progress.get(), totalProgress, t.getMessage()));
            SystemDistributedKeyspace.failParentRepair(parentSession, t);
            repairComplete();
        }

        private void repairComplete() {
            String duration = DurationFormatUtils.formatDurationWords(System.currentTimeMillis() - startTime,
                    true, true);
            String message = String.format("Repair command #%d finished in %s", cmd, duration);
            fireProgressEvent(tag,
                    new ProgressEvent(ProgressEventType.COMPLETE, progress.get(), totalProgress, message));
            logger.info(message);
            if (options.isTraced() && traceState != null) {
                for (ProgressListener listener : listeners)
                    traceState.removeProgressListener(listener);
                // Because DebuggableThreadPoolExecutor#afterExecute and this callback
                // run in a nondeterministic order (within the same thread), the
                // TraceState may have been nulled out at this point. The TraceState
                // should be traceState, so just set it without bothering to check if it
                // actually was nulled out.
                Tracing.instance.set(traceState);
                Tracing.traceRepair(message);
                Tracing.instance.stopSession();
            }
            executor.shutdownNow();
        }
    });
}

From source file:com.revetkn.ios.analyzer.ArtworkAnalyzer.java

/** Modifies the passed-in {@code applicationArtwork} instance to include image reference data. */
protected void detectImageReferences(File projectRootDirectory, final ApplicationArtwork applicationArtwork,
        final ArtworkExtractionProgressCallback progressCallback) throws Exception {
    final Map<File, String> contentsOfReferencingFiles = extractContentsOfReferencingFiles(
            projectRootDirectory);/*from   w ww.jav  a  2  s.co  m*/
    final SortedSet<File> unreferencedImageFiles = synchronizedSortedSet(new TreeSet<File>());
    final SortedSet<File> onlyProjectFileReferencedImageFiles = synchronizedSortedSet(new TreeSet<File>());
    final SortedMap<File, SortedSet<File>> allImageFilesAndReferencingFiles = synchronizedSortedMap(
            new TreeMap<File, SortedSet<File>>());

    final AtomicInteger imageFilesProcessed = new AtomicInteger(0);

    Set<Callable<Object>> imageReferenceProcessingTasks = new HashSet<Callable<Object>>();

    for (final File imageFile : applicationArtwork.getAllImageFiles()) {
        imageReferenceProcessingTasks.add(new Callable<Object>() {
            @Override
            public Object call() throws Exception {
                String imageFilename = imageFile.getName();
                SortedSet<File> filesWhereImageIsReferenced = new TreeSet<File>();
                Set<String> imageFilenameVariants = imageFilenameVariants(imageFilename);

                for (Entry<File, String> entry : contentsOfReferencingFiles.entrySet()) {
                    String fileContents = entry.getValue();

                    for (String imageFilenameVariant : imageFilenameVariants) {
                        // Quoted references, e.g. "aboutBackground"
                        if (fileContents.indexOf(format("\"%s\"", imageFilenameVariant)) != -1)
                            filesWhereImageIsReferenced.add(entry.getKey());

                        // Nib files, e.g. <string key="NSResourceName">aboutBackground~ipad.png</string>
                        else if (fileContents.indexOf(format(">%s<", imageFilenameVariant)) != -1)
                            filesWhereImageIsReferenced.add(entry.getKey());
                    }
                }

                if (filesWhereImageIsReferenced.size() == 1 && "project.pbxproj"
                        .equals(filesWhereImageIsReferenced.first().getName().toLowerCase()))
                    onlyProjectFileReferencedImageFiles.add(imageFile);

                if (filesWhereImageIsReferenced.size() == 0) {
                    unreferencedImageFiles.add(imageFile);
                } else {
                    allImageFilesAndReferencingFiles.put(imageFile, filesWhereImageIsReferenced);
                }

                progressCallback.onProcessedImageReferences(imageFile, filesWhereImageIsReferenced,
                        imageFilesProcessed.incrementAndGet(), applicationArtwork.getAllImageFiles().size());

                return null;
            }
        });
    }

    for (Future<Object> future : getExecutorService().invokeAll(imageReferenceProcessingTasks))
        future.get();

    applicationArtwork.setAllImageFilesAndReferencingFiles(allImageFilesAndReferencingFiles);
    applicationArtwork.setUnreferencedImageFiles(unreferencedImageFiles);
    applicationArtwork.setOnlyProjectFileReferencedImageFiles(onlyProjectFileReferencedImageFiles);
}

From source file:com.dumontierlab.pdb2rdf.Pdb2Rdf.java

private static void printRdf(final CommandLine cmd, final Map<String, Double> stats) {
    final File outDir = getOutputDirectory(cmd);
    final RDFWriter writer = getWriter(cmd);
    final ProgressMonitor monitor = getProgressMonitor();
    Pdb2RdfInputIterator i = processInput(cmd);
    final int inputSize = i.size();
    final AtomicInteger progressCount = new AtomicInteger();
    ExecutorService pool = null;//w  w  w .jav  a 2 s .c  om
    if (outDir != null) {
        pool = getThreadPool(cmd);
    } else {
        // if output is going to the STDOUT then we need to do process in
        // sequential mode.
        pool = Executors.newSingleThreadExecutor();
    }

    final Object lock = new Object();

    while (i.hasNext()) {
        final InputSource input = i.next();
        pool.execute(new Runnable() {
            @Override
            public void run() {
                OutputStream out = System.out;
                PdbXmlParser parser = new PdbXmlParser();
                PdbRdfModel model = null;
                try {
                    if (cmd.hasOption("detailLevel")) {
                        try {
                            DetailLevel detailLevel = Enum.valueOf(DetailLevel.class,
                                    cmd.getOptionValue("detailLevel"));
                            model = parser.parse(input, new PdbRdfModel(), detailLevel);
                        } catch (IllegalArgumentException e) {
                            LOG.fatal("Invalid argument value for detailLevel option", e);
                            System.exit(1);
                        }
                    } else {
                        model = parser.parse(input, new PdbRdfModel());
                    }
                    // add the input file information
                    model.addInputFileInformation();
                    // add the outputFile information();
                    model.addRDFFileInformation();
                    if (outDir != null) {
                        File directory = new File(outDir, model.getPdbId().substring(1, 3));
                        synchronized (lock) {
                            if (!directory.exists()) {
                                directory.mkdir();
                            }
                        }
                        File file = new File(directory, model.getPdbId() + ".rdf.gz");
                        out = new GZIPOutputStream(new FileOutputStream(file));
                    }
                    if (cmd.hasOption("format")) {
                        if (cmd.getOptionValue("format").equalsIgnoreCase("NQUADs")) {
                            Dataset ds = TDBFactory.createDataset();
                            ds.addNamedModel(model.getDatasetResource().toString(), model);
                            StringWriter sw = new StringWriter();
                            RDFDataMgr.write(sw, ds, Lang.NQUADS);

                            out.write(sw.toString().getBytes(Charset.forName("UTF-8")));
                            ds.close();

                        }
                    }

                    writer.write(model, out, null);

                    if (stats != null) {
                        updateStats(stats, model);
                    }
                    if (monitor != null) {
                        monitor.setProgress(progressCount.incrementAndGet(), inputSize);
                    }

                } catch (Exception e) {
                    String id = null;
                    if (model != null) {
                        id = model.getPdbId();
                    }
                    LOG.error("Unable to parse input for PDB: " + id, e);
                } finally {
                    try {
                        out.close();
                    } catch (IOException e) {
                        LOG.error("Unable to close output stream", e);
                    }
                }
            }
        });
    }
    pool.shutdown();
    while (!pool.isTerminated()) {
        try {
            pool.awaitTermination(1, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            break;
        }
    }
}

From source file:com.alibaba.wasp.master.FMaster.java

private boolean isAvailable(final byte[] rootTableName) throws IOException {
    final AtomicBoolean available = new AtomicBoolean(true);
    final AtomicInteger entityGroupCount = new AtomicInteger(0);
    MetaScannerVisitorBase visitor = new MetaScannerVisitorBase() {
        @Override/* w  w  w .  j a v  a  2  s . c  o  m*/
        public boolean processRow(Result rowResult) throws IOException {
            byte[] value = rowResult.getValue(FConstants.CATALOG_FAMILY, FConstants.EGINFO);
            EntityGroupInfo eginfo = EntityGroupInfo.parseFromOrNull(value);
            if (eginfo != null) {
                if (Bytes.equals(eginfo.getTableName(), rootTableName)) {
                    value = rowResult.getValue(FConstants.CATALOG_FAMILY, FConstants.EGLOCATION);
                    if (value == null) {
                        available.set(false);
                        return false;
                    }
                    entityGroupCount.incrementAndGet();
                }
            }
            // Returning true means "keep scanning"
            return true;
        }
    };
    FMetaScanner.metaScan(conf, visitor, rootTableName);
    return available.get() && (entityGroupCount.get() > 0);
}

From source file:org.apache.hadoop.hbase.regionserver.TestAtomicOperation.java

/**
 * Test multi-threaded row mutations.// w w  w  .  ja  v  a2s .  c o  m
 */
@Test
public void testRowMutationMultiThreads() throws IOException {

    LOG.info("Starting test testRowMutationMultiThreads");
    initHRegion(tableName, name.getMethodName(), fam1);

    // create 10 threads, each will alternate between adding and
    // removing a column
    int numThreads = 10;
    int opsPerThread = 500;
    AtomicOperation[] all = new AtomicOperation[numThreads];

    AtomicLong timeStamps = new AtomicLong(0);
    AtomicInteger failures = new AtomicInteger(0);
    // create all threads
    for (int i = 0; i < numThreads; i++) {
        all[i] = new AtomicOperation(region, opsPerThread, timeStamps, failures) {
            @Override
            public void run() {
                boolean op = true;
                for (int i = 0; i < numOps; i++) {
                    try {
                        // throw in some flushes
                        if (i % 10 == 0) {
                            synchronized (region) {
                                LOG.debug("flushing");
                                region.flushcache();
                                if (i % 100 == 0) {
                                    region.compactStores();
                                }
                            }
                        }
                        long ts = timeStamps.incrementAndGet();
                        RowMutations rm = new RowMutations(row);
                        if (op) {
                            Put p = new Put(row, ts);
                            p.add(fam1, qual1, value1);
                            rm.add(p);
                            Delete d = new Delete(row);
                            d.deleteColumns(fam1, qual2, ts);
                            rm.add(d);
                        } else {
                            Delete d = new Delete(row);
                            d.deleteColumns(fam1, qual1, ts);
                            rm.add(d);
                            Put p = new Put(row, ts);
                            p.add(fam1, qual2, value2);
                            rm.add(p);
                        }
                        region.mutateRow(rm);
                        op ^= true;
                        // check: should always see exactly one column
                        Get g = new Get(row);
                        Result r = region.get(g);
                        if (r.size() != 1) {
                            LOG.debug(r);
                            failures.incrementAndGet();
                            fail();
                        }
                    } catch (IOException e) {
                        e.printStackTrace();
                        failures.incrementAndGet();
                        fail();
                    }
                }
            }
        };
    }

    // run all threads
    for (int i = 0; i < numThreads; i++) {
        all[i].start();
    }

    // wait for all threads to finish
    for (int i = 0; i < numThreads; i++) {
        try {
            all[i].join();
        } catch (InterruptedException e) {
        }
    }
    assertEquals(0, failures.get());
}

From source file:com.alibaba.wasp.master.FMaster.java

/**
 * @see com.alibaba.wasp.protobuf.generated.MasterAdminProtos.MasterAdminService.BlockingInterface#fetchEntityGroupSize(com.google.protobuf.RpcController,
 *      com.alibaba.wasp.protobuf.generated.MasterAdminProtos.FetchEntityGroupSizeRequest)
 *//*  w  ww  .j  a  v  a2s  . c o  m*/
@Override
public FetchEntityGroupSizeResponse fetchEntityGroupSize(RpcController controller,
        FetchEntityGroupSizeRequest request) throws ServiceException {
    final byte[] tableNameBytes = request.getTableName().toByteArray();
    final AtomicInteger actualEgCount = new AtomicInteger(0);
    MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
        @Override
        public boolean processRow(org.apache.hadoop.hbase.client.Result rowResult) throws IOException {
            EntityGroupInfo info = EntityGroupInfo.getEntityGroupInfo(rowResult);
            if (info == null) {
                LOG.warn("No serialized EntityGroupInfo in " + rowResult);
                return true;
            }
            if (!(Bytes.equals(info.getTableName(), tableNameBytes))) {
                return false;
            }
            ServerName serverName = EntityGroupInfo.getServerName(rowResult);
            // Make sure that entityGroups are assigned to server
            if (!(info.isOffline() || info.isSplit()) && serverName != null
                    && serverName.getHostAndPort() != null) {
                actualEgCount.incrementAndGet();
            }
            return true;
        }
    };
    try {
        FMetaScanner.metaScan(conf, visitor, tableNameBytes);
    } catch (IOException e) {
        LOG.error("Failed fetchEntityGroupSize.", e);
        throw new ServiceException(e);
    }
    FetchEntityGroupSizeResponse.Builder res = FetchEntityGroupSizeResponse.newBuilder();
    res.setEgSize(actualEgCount.get());
    return res.build();
}

From source file:com.atlassian.jira.bc.group.TestDefaultGroupService.java

@Test
public void testValidateRemoveUsersFromGroupsRemoveFromAll() {
    final AtomicInteger validateCanRemoveUserCount = new AtomicInteger(0);
    final DefaultGroupService defaultGroupService = new DefaultGroupService(null, null, null, null, null, null,
            null, null, null, null, null, null, null, null) {
        @Override//w  w w.  j  a v  a  2  s.co m
        boolean isExternalUserManagementEnabled() {
            return false;
        }

        @Override
        User getUser(final String userName) {
            return null;
        }

        @Override
        boolean isUserNull(final User user) {
            return false;
        }

        @Override
        boolean validateCanRemoveUserFromGroups(final JiraServiceContext jiraServiceContext,
                final User userToRemove, final List allSelectedGroups, final List groupsToLeave,
                final boolean isAll) {
            validateCanRemoveUserCount.incrementAndGet();
            return false;
        }

        @Override
        boolean userHasAdminPermission(final User user) {
            return true;
        }
    };

    final SimpleErrorCollection errors = new SimpleErrorCollection();
    final JiraServiceContext jiraServiceContext = getContext(errors);

    final GroupRemoveChildMapper mapper = new GroupRemoveChildMapper(EasyList.build("Group1"));
    mapper.register(null);
    assertFalse(defaultGroupService.validateRemoveUsersFromGroups(jiraServiceContext, mapper));
    assertEquals(1, validateCanRemoveUserCount.get());
}

From source file:com.atlassian.jira.bc.group.TestDefaultGroupService.java

@Test
public void testValidateRemoveUsersFromGroupsRemoveFromGroups() {
    final AtomicInteger validateCanRemoveUserCount = new AtomicInteger(0);
    final DefaultGroupService defaultGroupService = new DefaultGroupService(null, null, null, null, null, null,
            null, null, null, null, null, null, null, null) {
        @Override/*from www .  jav  a  2 s  .c  o m*/
        boolean isExternalUserManagementEnabled() {
            return false;
        }

        @Override
        User getUser(final String userName) {
            return null;
        }

        @Override
        boolean isUserNull(final User user) {
            return false;
        }

        @Override
        boolean validateCanRemoveUserFromGroups(final JiraServiceContext jiraServiceContext,
                final User userToRemove, final List allSelectedGroups, final List groupsToLeave,
                final boolean isAll) {
            validateCanRemoveUserCount.incrementAndGet();
            return false;
        }

        @Override
        boolean userHasAdminPermission(final User user) {
            return true;
        }
    };

    final SimpleErrorCollection errors = new SimpleErrorCollection();
    final JiraServiceContext jiraServiceContext = getContext(errors);

    final GroupRemoveChildMapper mapper = new GroupRemoveChildMapper(EasyList.build("Group1"));
    mapper.register(null, EasyList.build("Group1", "Group2"));
    assertFalse(defaultGroupService.validateRemoveUsersFromGroups(jiraServiceContext, mapper));
    assertEquals(2, validateCanRemoveUserCount.get());

}

From source file:org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater.java

@Test
public void testStopReentrant() throws Exception {
    final AtomicInteger numCleanups = new AtomicInteger(0);
    nm = new NodeManager() {
        @Override// w w w .j a  v a  2  s  . com
        protected NodeStatusUpdater createNodeStatusUpdater(Context context, Dispatcher dispatcher,
                NodeHealthCheckerService healthChecker) {
            MyNodeStatusUpdater myNodeStatusUpdater = new MyNodeStatusUpdater(context, dispatcher,
                    healthChecker, metrics);
            MyResourceTracker2 myResourceTracker2 = new MyResourceTracker2();
            myResourceTracker2.heartBeatNodeAction = NodeAction.SHUTDOWN;
            myNodeStatusUpdater.resourceTracker = myResourceTracker2;
            return myNodeStatusUpdater;
        }

        @Override
        protected ContainerManagerImpl createContainerManager(Context context, ContainerExecutor exec,
                DeletionService del, NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager,
                LocalDirsHandlerService dirsHandler) {
            return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater, metrics, dirsHandler) {

                @Override
                public void cleanUpApplicationsOnNMShutDown() {
                    super.cleanUpApplicationsOnNMShutDown();
                    numCleanups.incrementAndGet();
                }
            };
        }
    };

    YarnConfiguration conf = createNMConfig();
    nm.init(conf);
    nm.start();

    int waitCount = 0;
    while (heartBeatID < 1 && waitCount++ != 200) {
        Thread.sleep(500);
    }
    Assert.assertFalse(heartBeatID < 1);

    // Meanwhile call stop directly as the shutdown hook would
    nm.stop();

    // NM takes a while to reach the STOPPED state.
    waitCount = 0;
    while (nm.getServiceState() != STATE.STOPPED && waitCount++ != 20) {
        LOG.info("Waiting for NM to stop..");
        Thread.sleep(1000);
    }

    Assert.assertEquals(STATE.STOPPED, nm.getServiceState());

    // It further takes a while after NM reached the STOPPED state.
    waitCount = 0;
    while (numCleanups.get() == 0 && waitCount++ != 20) {
        LOG.info("Waiting for NM shutdown..");
        Thread.sleep(1000);
    }
    Assert.assertEquals(1, numCleanups.get());
}

From source file:org.apache.hadoop.hbase.regionserver.TestAtomicOperation.java

/**
 * Test multi-threaded region mutations.
 *///  w  w w  . j  a  v a2s. c o m
@Test
public void testMultiRowMutationMultiThreads() throws IOException {

    LOG.info("Starting test testMultiRowMutationMultiThreads");
    initHRegion(tableName, name.getMethodName(), fam1);

    // create 10 threads, each will alternate between adding and
    // removing a column
    int numThreads = 10;
    int opsPerThread = 500;
    AtomicOperation[] all = new AtomicOperation[numThreads];

    AtomicLong timeStamps = new AtomicLong(0);
    AtomicInteger failures = new AtomicInteger(0);
    final List<byte[]> rowsToLock = Arrays.asList(row, row2);
    // create all threads
    for (int i = 0; i < numThreads; i++) {
        all[i] = new AtomicOperation(region, opsPerThread, timeStamps, failures) {
            @Override
            public void run() {
                boolean op = true;
                for (int i = 0; i < numOps; i++) {
                    try {
                        // throw in some flushes
                        if (i % 10 == 0) {
                            synchronized (region) {
                                LOG.debug("flushing");
                                region.flushcache();
                                if (i % 100 == 0) {
                                    region.compactStores();
                                }
                            }
                        }
                        long ts = timeStamps.incrementAndGet();
                        List<Mutation> mrm = new ArrayList<Mutation>();
                        if (op) {
                            Put p = new Put(row2, ts);
                            p.add(fam1, qual1, value1);
                            mrm.add(p);
                            Delete d = new Delete(row);
                            d.deleteColumns(fam1, qual1, ts);
                            mrm.add(d);
                        } else {
                            Delete d = new Delete(row2);
                            d.deleteColumns(fam1, qual1, ts);
                            mrm.add(d);
                            Put p = new Put(row, ts);
                            p.add(fam1, qual1, value2);
                            mrm.add(p);
                        }
                        region.mutateRowsWithLocks(mrm, rowsToLock);
                        op ^= true;
                        // check: should always see exactly one column
                        Scan s = new Scan(row);
                        RegionScanner rs = region.getScanner(s);
                        List<Cell> r = new ArrayList<Cell>();
                        while (rs.next(r))
                            ;
                        rs.close();
                        if (r.size() != 1) {
                            LOG.debug(r);
                            failures.incrementAndGet();
                            fail();
                        }
                    } catch (IOException e) {
                        e.printStackTrace();
                        failures.incrementAndGet();
                        fail();
                    }
                }
            }
        };
    }

    // run all threads
    for (int i = 0; i < numThreads; i++) {
        all[i].start();
    }

    // wait for all threads to finish
    for (int i = 0; i < numThreads; i++) {
        try {
            all[i].join();
        } catch (InterruptedException e) {
        }
    }
    assertEquals(0, failures.get());
}