Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.apache.hadoop.raid.RaidShell.java

/**
 * checks the raided file system, prints a list of corrupt files to
 * this.out and returns the number of corrupt files.
 * Also prints out the total number of files with at least one missing block.
 * When called with '-retNumStrpsMissingBlks', also prints out number of stripes
 * with certain number of blocks missing for files using the 'RS' codec. 
 *//*w  w  w .ja v  a  2  s .  c  om*/
public void fsck(String cmd, String[] args, int startIndex) throws IOException {
    final int numFsckArgs = args.length - startIndex;
    int numThreads = 16;
    String path = "/";
    boolean argsOk = false;
    boolean countOnly = false;
    boolean cntMissingBlksPerStrp = false;
    boolean listRecoverableFile = false;
    if (numFsckArgs >= 1) {
        argsOk = true;
        path = args[startIndex];
    }
    for (int i = startIndex + 1; i < args.length; i++) {
        if (args[i].equals("-threads")) {
            numThreads = Integer.parseInt(args[++i]);
        } else if (args[i].equals("-count")) {
            countOnly = true;
        } else if (args[i].equals("-retNumStrpsMissingBlks")) {
            cntMissingBlksPerStrp = true;
        } else if (args[i].equals("-listrecoverablefiles")) {
            listRecoverableFile = true;
        }
    }
    if (!argsOk) {
        printUsage(cmd);
        return;
    }
    final String dateString = dateFormat.format(new Date());
    ;
    System.err
            .println("Running RAID FSCK with " + numThreads + " threads on " + path + " at time " + dateString);

    FileSystem fs = (new Path(path)).getFileSystem(conf);

    // if we got a raid fs, get the underlying fs 
    if (fs instanceof DistributedRaidFileSystem) {
        fs = ((DistributedRaidFileSystem) fs).getFileSystem();
    }

    // check that we have a distributed fs
    if (!(fs instanceof DistributedFileSystem)) {
        throw new IOException("expected DistributedFileSystem but got " + fs.getClass().getName());
    }
    final DistributedFileSystem dfs = (DistributedFileSystem) fs;

    // get a list of corrupted files (not considering parity blocks just yet)
    // from the name node
    // these are the only files we need to consider:
    // if a file has no corrupted data blocks, it is OK even if some
    // of its parity blocks are corrupted, so no further checking is
    // necessary
    System.err.println("Querying NameNode for list of corrupt files under " + path);
    final String[] files = DFSUtil.getCorruptFiles(dfs, path);
    final List<String> corruptFileCandidates = new LinkedList<String>();
    for (final String f : files) {
        // if this file is a parity file
        // or if it does not start with the specified path,
        // ignore it
        boolean matched = false;
        for (Codec c : Codec.getCodecs()) {
            if (f.startsWith(c.getParityPrefix())) {
                matched = true;
            }
        }
        if (!matched) {
            corruptFileCandidates.add(f);
        }
    }
    // filter files marked for deletion
    RaidUtils.filterTrash(conf, corruptFileCandidates);

    //clear numStrpMissingBlks if missing blocks per stripe is to be counted
    if (cntMissingBlksPerStrp) {
        for (AtomicLongArray numStrpMissingBlks : numStrpMissingBlksMap.values()) {
            for (int i = 0; i < numStrpMissingBlks.length(); i++) {
                numStrpMissingBlks.set(i, 0);
            }
        }
    }
    System.err.println("Processing " + corruptFileCandidates.size() + " possibly corrupt files using "
            + numThreads + " threads");
    ExecutorService executor = null;
    ThreadFactory factory = new ThreadFactory() {
        final AtomicInteger tnum = new AtomicInteger();

        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName("Raidfsck-" + dateString + "-" + tnum.incrementAndGet());
            return t;
        }
    };
    if (numThreads > 1) {
        executor = Executors.newFixedThreadPool(numThreads, factory);
    } else {
        numThreads = 1;
    }
    final List<String> unRecoverableFiles = Collections.synchronizedList(new LinkedList<String>());
    final List<String> recoverableFiles = Collections.synchronizedList(new LinkedList<String>());
    final boolean finalCountOnly = countOnly;
    final boolean finalMissingBlksPerStrpCnt = cntMissingBlksPerStrp;
    final boolean finalListRecoverableFile = listRecoverableFile;
    final int step = numThreads;
    final AtomicInteger finishNum = new AtomicInteger(0);
    for (int i = 0; i < numThreads; i++) {
        if (!dfs.getClient().isOpen()) {
            throw new IOException("Filesystem closed.");
        }
        final int startIdx = i;
        Runnable work = new Runnable() {
            public void run() {
                try {
                    for (int idx = startIdx; idx < corruptFileCandidates.size(); idx += step) {
                        String corruptFileCandidate = corruptFileCandidates.get(idx);
                        boolean corrupt = false;
                        try {
                            FileStatus corruptStat;
                            try {
                                corruptStat = dfs.getFileStatus(new Path(corruptFileCandidate));
                            } catch (FileNotFoundException fnfe) {
                                continue;
                            }
                            if (!dfs.getClient().isOpen()) {
                                LOG.warn("Filesystem closed.");
                                return;
                            }
                            corrupt = isFileCorrupt(dfs, corruptStat, finalMissingBlksPerStrpCnt);
                            if (corrupt) {
                                incrCorruptCount();
                                if (!finalCountOnly && !finalListRecoverableFile) {
                                    unRecoverableFiles.add(corruptFileCandidate);
                                }
                            } else {
                                if (!finalCountOnly && finalListRecoverableFile) {
                                    recoverableFiles.add(corruptFileCandidate);
                                }
                            }
                        } catch (Throwable e) {
                            LOG.error("Error in processing " + corruptFileCandidate, e);
                        }
                    }
                } finally {
                    finishNum.incrementAndGet();
                }
            }
        };
        if (executor != null) {
            executor.execute(work);
        } else {
            work.run();
        }
    }
    if (executor != null) {
        try {
            while (finishNum.get() < numThreads) {
                try {
                    Thread.sleep(2000);
                } catch (InterruptedException ie) {
                    LOG.warn("Raidfsck get exception ", ie);
                    throw new IOException(ie);
                }
            }
        } finally {
            executor.shutdown(); // Waits for submitted tasks to finish.
        }
    }

    // If client is closed, fail the fsck check.
    if (!dfs.getClient().isOpen()) {
        throw new IOException("Filesystem closed.");
    }

    if (countOnly) {
        //Number of corrupt files (which cannot be fixed by Raid)
        out.println(getCorruptCount());
        LOG.info("Nubmer of corrupt files:" + getCorruptCount());
        //Number of files with at least one missing block
        out.println(corruptFileCandidates.size());
        LOG.info("Number of files with at least one block missing/corrupt: " + corruptFileCandidates.size());
    } else {
        if (listRecoverableFile) {
            for (String file : recoverableFiles) {
                out.println(file);
            }
        } else {
            for (String file : unRecoverableFiles) {
                out.println(file);
            }
        }
    }

    /*Number of stripes with missing blocks array, separated by each code id:
     * Number of missing blocks found from non-raided files.
     * codeId1
     * index 0: Number of stripes found with one block missing in this fsck
     * index 1: Number of stripes found with two block missing in this fsck
     * and so on
     * codeId2
     * index 0: Number of stripes found with one block missing in this fsck
     * index 1: Number of stripes found with two block missing in this fsck
     * and so on
     */
    if (cntMissingBlksPerStrp) {
        out.println(this.numNonRaidedMissingBlks);
        for (String codecId : numStrpMissingBlksMap.keySet()) {
            out.println(codecId);
            AtomicLongArray numStrpMissingBlks = numStrpMissingBlksMap.get(codecId);
            for (int j = 0; j < numStrpMissingBlks.length(); j++) {
                long temp = numStrpMissingBlks.get(j);
                out.println(temp);
                LOG.info("Number of stripes with missing blocks at index " + j + " is " + temp);
            }
        }
    }
}

From source file:de.qaware.chronix.importer.csv.FileImporter.java

/**
 * Reads the given file / folder and calls the bi consumer with the extracted points
 *
 * @param points/*from   w  w  w .  j a va 2 s . c  o m*/
 * @param folder
 * @param databases
 * @return
 */
public Pair<Integer, Integer> importPoints(Map<Attributes, Pair<Instant, Instant>> points, File folder,
        BiConsumer<List<ImportPoint>, Attributes>... databases) {

    final AtomicInteger pointCounter = new AtomicInteger(0);
    final AtomicInteger tsCounter = new AtomicInteger(0);
    final File metricsFile = new File(METRICS_FILE_PATH);

    LOGGER.info("Writing imported metrics to {}", metricsFile);
    LOGGER.info("Import supports csv files as well as gz compressed csv files.");

    try {
        final FileWriter metricsFileWriter = new FileWriter(metricsFile);

        Collection<File> files = new ArrayList<>();
        if (folder.isFile()) {
            files.add(folder);
        } else {
            files.addAll(FileUtils.listFiles(folder, new String[] { "gz", "csv" }, true));
        }

        AtomicInteger counter = new AtomicInteger(0);

        files.parallelStream().forEach(file -> {
            SimpleDateFormat sdf = new SimpleDateFormat(dateFormat);
            NumberFormat nf = DecimalFormat.getInstance(numberLocal);

            InputStream inputStream = null;
            BufferedReader reader = null;
            try {
                inputStream = new FileInputStream(file);

                if (file.getName().endsWith("gz")) {
                    inputStream = new GZIPInputStream(inputStream);
                }
                reader = new BufferedReader(new InputStreamReader(inputStream));

                //Read the first line
                String headerLine = reader.readLine();

                if (headerLine == null || headerLine.isEmpty()) {
                    boolean deleted = deleteFile(file, inputStream, reader);
                    LOGGER.debug("File is empty {}. File {} removed {}", file.getName(), deleted);
                    return;
                }

                //Extract the attributes from the file name
                //E.g. first_second_third_attribute.csv
                String[] fileNameMetaData = file.getName().split("_");

                String[] metrics = headerLine.split(csvDelimiter);

                Map<Integer, Attributes> attributesPerTimeSeries = new HashMap<>(metrics.length);

                for (int i = 1; i < metrics.length; i++) {
                    String metric = metrics[i];
                    String metricOnlyAscii = Normalizer.normalize(metric, Normalizer.Form.NFD);
                    metricOnlyAscii = metric.replaceAll("[^\\x00-\\x7F]", "");
                    Attributes attributes = new Attributes(metricOnlyAscii, fileNameMetaData);

                    //Check if meta data is completely set
                    if (isEmpty(attributes)) {
                        boolean deleted = deleteFile(file, inputStream, reader);
                        LOGGER.info("Attributes contains empty values {}. File {} deleted {}", attributes,
                                file.getName(), deleted);
                        continue;
                    }

                    if (attributes.getMetric().equals(".*")) {
                        boolean deleted = deleteFile(file, inputStream, reader);
                        LOGGER.info("Attributes metric{}. File {} deleted {}", attributes.getMetric(),
                                file.getName(), deleted);
                        continue;
                    }
                    attributesPerTimeSeries.put(i, attributes);
                    tsCounter.incrementAndGet();

                }

                Map<Integer, List<ImportPoint>> dataPoints = new HashMap<>();

                String line;
                while ((line = reader.readLine()) != null) {
                    String[] splits = line.split(csvDelimiter);
                    String date = splits[0];

                    Instant dateObject;
                    if (instantDate) {
                        dateObject = Instant.parse(date);
                    } else if (sdfDate) {
                        dateObject = sdf.parse(date).toInstant();
                    } else {
                        dateObject = Instant.ofEpochMilli(Long.valueOf(date));
                    }

                    for (int column = 1; column < splits.length; column++) {

                        String value = splits[column];
                        double numericValue = nf.parse(value).doubleValue();

                        ImportPoint point = new ImportPoint(dateObject, numericValue);

                        if (!dataPoints.containsKey(column)) {
                            dataPoints.put(column, new ArrayList<>());
                        }
                        dataPoints.get(column).add(point);
                        pointCounter.incrementAndGet();
                    }

                }

                dataPoints.values().forEach(Collections::sort);

                IOUtils.closeQuietly(reader);
                IOUtils.closeQuietly(inputStream);

                dataPoints.forEach((key, importPoints) -> {
                    for (BiConsumer<List<ImportPoint>, Attributes> database : databases) {
                        database.accept(importPoints, attributesPerTimeSeries.get(key));
                    }
                    points.put(attributesPerTimeSeries.get(key), Pair.of(importPoints.get(0).getDate(),
                            importPoints.get(importPoints.size() - 1).getDate()));
                    //write the stats to the file
                    Instant start = importPoints.get(0).getDate();
                    Instant end = importPoints.get(importPoints.size() - 1).getDate();

                    try {
                        writeStatsLine(metricsFileWriter, attributesPerTimeSeries.get(key), start, end);
                    } catch (IOException e) {
                        LOGGER.error("Could not write stats line", e);
                    }
                    LOGGER.info("{} of {} time series imported", counter.incrementAndGet(), tsCounter.get());
                });

            } catch (Exception e) {
                LOGGER.info("Exception while reading points.", e);
            } finally {
                //close all streams
                IOUtils.closeQuietly(reader);
                IOUtils.closeQuietly(inputStream);
            }

        });
    } catch (Exception e) {
        LOGGER.error("Exception occurred during reading points.");
    }
    return Pair.of(tsCounter.get(), pointCounter.get());
}

From source file:org.alfresco.repo.activities.feed.FeedNotifierImpl.java

private void executeInternal(final int repeatIntervalMins) {
    final String emailTemplateRef = getEmailTemplateRef();

    if (emailTemplateRef == null) {
        return;/*  ww w  .  j  av  a 2  s .c  o m*/
    }

    final String shareUrl = UrlUtil.getShareUrl(sysAdminParams);

    if (logger.isDebugEnabled()) {
        logger.debug("Share URL configured as: " + shareUrl);
    }

    final AtomicInteger userCnt = new AtomicInteger(0);
    final AtomicInteger feedEntryCnt = new AtomicInteger(0);

    final long startTime = System.currentTimeMillis();

    // local cache for this execution
    final Map<String, String> siteNames = new ConcurrentHashMap<String, String>(10);

    try {
        final String currentUser = AuthenticationUtil.getRunAsUser();
        final String tenantDomain = TenantUtil.getCurrentDomain();

        // process the feeds using the batch processor {@link BatchProcessor}
        BatchProcessor.BatchProcessWorker<PersonInfo> worker = new BatchProcessor.BatchProcessWorker<PersonInfo>() {
            public String getIdentifier(final PersonInfo person) {
                StringBuilder sb = new StringBuilder("Person ");
                sb.append(person.getUserName());
                return sb.toString();
            }

            public void beforeProcess() throws Throwable {
                AuthenticationUtil.pushAuthentication();
                AuthenticationUtil.setFullyAuthenticatedUser(currentUser);
            }

            public void afterProcess() throws Throwable {
                AuthenticationUtil.popAuthentication();
            }

            public void process(final PersonInfo person) throws Throwable {
                final RetryingTransactionHelper txHelper = transactionService.getRetryingTransactionHelper();
                txHelper.setMaxRetries(0);

                TenantUtil.runAsTenant(new TenantRunAsWork<Void>() {
                    @Override
                    public Void doWork() throws Exception {
                        txHelper.doInTransaction(new RetryingTransactionCallback<Void>() {
                            public Void execute() throws Throwable {
                                processInternal(person);
                                return null;
                            }
                        }, false, true);
                        return null;
                    }
                }, tenantDomain);
            }

            private void processInternal(final PersonInfo person) throws Exception {
                final NodeRef personNodeRef = person.getNodeRef();
                try {
                    Pair<Integer, Long> result = userNotifier.notifyUser(personNodeRef, MSG_EMAIL_SUBJECT,
                            new Object[] { ModelUtil.getProductName(repoAdminService) }, siteNames, shareUrl,
                            repeatIntervalMins, emailTemplateRef);
                    if (result != null) {
                        int entryCnt = result.getFirst();
                        final long maxFeedId = result.getSecond();

                        Long currentMaxFeedId = (Long) nodeService.getProperty(personNodeRef,
                                ContentModel.PROP_EMAIL_FEED_ID);
                        if ((currentMaxFeedId == null) || (currentMaxFeedId < maxFeedId)) {
                            nodeService.setProperty(personNodeRef, ContentModel.PROP_EMAIL_FEED_ID, maxFeedId);
                        }

                        userCnt.incrementAndGet();
                        feedEntryCnt.addAndGet(entryCnt);
                    }
                } catch (InvalidNodeRefException inre) {
                    // skip this person - eg. no longer exists ?
                    logger.warn(
                            "Skip feed notification for user (" + personNodeRef + "): " + inre.getMessage());
                }
            }
        };

        // grab people for the batch processor in chunks of size batchSize
        BatchProcessWorkProvider<PersonInfo> provider = new BatchProcessWorkProvider<PersonInfo>() {
            private int skip = 0;
            private int maxItems = batchSize;
            private boolean hasMore = true;

            @Override
            public int getTotalEstimatedWorkSize() {
                return personService.countPeople();
            }

            @Override
            public Collection<PersonInfo> getNextWork() {
                if (!hasMore) {
                    return Collections.emptyList();
                }
                PagingResults<PersonInfo> people = personService.getPeople(null, null, null,
                        new PagingRequest(skip, maxItems));
                List<PersonInfo> page = people.getPage();
                skip += page.size();
                hasMore = people.hasMoreItems();
                return page;
            }
        };

        final RetryingTransactionHelper txHelper = transactionService.getRetryingTransactionHelper();
        txHelper.setMaxRetries(0);

        new BatchProcessor<PersonInfo>("FeedNotifier", txHelper, provider, numThreads, batchSize,
                applicationContext, logger, 100).process(worker, true);
    } catch (Throwable e) {
        // If the VM is shutting down, then ignore
        if (vmShutdownListener.isVmShuttingDown()) {
            // Ignore
        } else {
            logger.error("Exception during notification of feeds", e);
        }
    } finally {
        int count = userCnt.get();
        int entryCount = feedEntryCnt.get();

        // assume sends are synchronous - hence bump up to last max feed id
        if (count > 0) {
            if (logger.isInfoEnabled()) {
                // TODO i18n of info message
                StringBuilder sb = new StringBuilder();
                sb.append("Notified ").append(userCnt).append(" user").append(count != 1 ? "s" : "");
                sb.append(" of ").append(feedEntryCnt).append(" activity feed entr")
                        .append(entryCount != 1 ? "ies" : "y");
                sb.append(" (in ").append(System.currentTimeMillis() - startTime).append(" msecs)");
                logger.info(sb.toString());
            }
        } else {
            if (logger.isTraceEnabled()) {
                logger.trace("Nothing to send since no new user activities found");
            }
        }
    }
}

From source file:org.apache.hadoop.hbase.master.AssignmentManager.java

/**
 * Handles various states an unassigned node can be in.
 * <p>// w  w  w  . ja v  a  2  s.c  o m
 * Method is called when a state change is suspected for an unassigned node.
 * <p>
 * This deals with skipped transitions (we got a CLOSED but didn't see CLOSING
 * yet).
 * @param rt
 * @param expectedVersion
 */
void handleRegion(final RegionTransition rt, int expectedVersion) {
    if (rt == null) {
        LOG.warn("Unexpected NULL input for RegionTransition rt");
        return;
    }
    final ServerName sn = rt.getServerName();
    // Check if this is a special HBCK transition
    if (sn.equals(HBCK_CODE_SERVERNAME)) {
        handleHBCK(rt);
        return;
    }
    final long createTime = rt.getCreateTime();
    final byte[] regionName = rt.getRegionName();
    String encodedName = HRegionInfo.encodeRegionName(regionName);
    String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName);
    // Verify this is a known server
    if (!serverManager.isServerOnline(sn) && !ignoreStatesRSOffline.contains(rt.getEventType())) {
        LOG.warn("Attempted to handle region transition for server but " + "it is not online: "
                + prettyPrintedRegionName + ", " + rt);
        return;
    }

    RegionState regionState = regionStates.getRegionState(encodedName);
    long startTime = System.currentTimeMillis();
    if (LOG.isDebugEnabled()) {
        boolean lateEvent = createTime < (startTime - 15000);
        LOG.debug("Handling " + rt.getEventType() + ", server=" + sn + ", region="
                + (prettyPrintedRegionName == null ? "null" : prettyPrintedRegionName)
                + (lateEvent ? ", which is more than 15 seconds late" : "") + ", current_state=" + regionState);
    }
    // We don't do anything for this event,
    // so separate it out, no need to lock/unlock anything
    if (rt.getEventType() == EventType.M_ZK_REGION_OFFLINE) {
        return;
    }

    // We need a lock on the region as we could update it
    Lock lock = locker.acquireLock(encodedName);
    try {
        RegionState latestState = regionStates.getRegionState(encodedName);
        if ((regionState == null && latestState != null) || (regionState != null && latestState == null)
                || (regionState != null && latestState != null
                        && latestState.getState() != regionState.getState())) {
            LOG.warn("Region state changed from " + regionState + " to " + latestState
                    + ", while acquiring lock");
        }
        long waitedTime = System.currentTimeMillis() - startTime;
        if (waitedTime > 5000) {
            LOG.warn("Took " + waitedTime + "ms to acquire the lock");
        }
        regionState = latestState;
        switch (rt.getEventType()) {
        case RS_ZK_REQUEST_REGION_SPLIT:
        case RS_ZK_REGION_SPLITTING:
        case RS_ZK_REGION_SPLIT:
            if (!handleRegionSplitting(rt, encodedName, prettyPrintedRegionName, sn)) {
                deleteSplittingNode(encodedName, sn);
            }
            break;

        case RS_ZK_REQUEST_REGION_MERGE:
        case RS_ZK_REGION_MERGING:
        case RS_ZK_REGION_MERGED:
            // Merged region is a new region, we can't find it in the region states now.
            // However, the two merging regions are not new. They should be in state for merging.
            if (!handleRegionMerging(rt, encodedName, prettyPrintedRegionName, sn)) {
                deleteMergingNode(encodedName, sn);
            }
            break;

        case M_ZK_REGION_CLOSING:
            // Should see CLOSING after we have asked it to CLOSE or additional
            // times after already being in state of CLOSING
            if (regionState == null || !regionState.isPendingCloseOrClosingOnServer(sn)) {
                LOG.warn("Received CLOSING for " + prettyPrintedRegionName + " from " + sn
                        + " but the region isn't PENDING_CLOSE/CLOSING here: "
                        + regionStates.getRegionState(encodedName));
                return;
            }
            // Transition to CLOSING (or update stamp if already CLOSING)
            regionStates.updateRegionState(rt, State.CLOSING);
            break;

        case RS_ZK_REGION_CLOSED:
            // Should see CLOSED after CLOSING but possible after PENDING_CLOSE
            if (regionState == null || !regionState.isPendingCloseOrClosingOnServer(sn)) {
                LOG.warn("Received CLOSED for " + prettyPrintedRegionName + " from " + sn
                        + " but the region isn't PENDING_CLOSE/CLOSING here: "
                        + regionStates.getRegionState(encodedName));
                return;
            }
            // Handle CLOSED by assigning elsewhere or stopping if a disable
            // If we got here all is good.  Need to update RegionState -- else
            // what follows will fail because not in expected state.
            new ClosedRegionHandler(server, this, regionState.getRegion()).process();
            updateClosedRegionHandlerTracker(regionState.getRegion());
            break;

        case RS_ZK_REGION_FAILED_OPEN:
            if (regionState == null || !regionState.isPendingOpenOrOpeningOnServer(sn)) {
                LOG.warn("Received FAILED_OPEN for " + prettyPrintedRegionName + " from " + sn
                        + " but the region isn't PENDING_OPEN/OPENING here: "
                        + regionStates.getRegionState(encodedName));
                return;
            }
            AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName);
            if (failedOpenCount == null) {
                failedOpenCount = new AtomicInteger();
                // No need to use putIfAbsent, or extra synchronization since
                // this whole handleRegion block is locked on the encoded region
                // name, and failedOpenTracker is updated only in this block
                failedOpenTracker.put(encodedName, failedOpenCount);
            }
            if (failedOpenCount.incrementAndGet() >= maximumAttempts) {
                regionStates.updateRegionState(rt, State.FAILED_OPEN);
                // remove the tracking info to save memory, also reset
                // the count for next open initiative
                failedOpenTracker.remove(encodedName);
            } else {
                // Handle this the same as if it were opened and then closed.
                regionState = regionStates.updateRegionState(rt, State.CLOSED);
                if (regionState != null) {
                    // When there are more than one region server a new RS is selected as the
                    // destination and the same is updated in the regionplan. (HBASE-5546)
                    try {
                        getRegionPlan(regionState.getRegion(), sn, true);
                        new ClosedRegionHandler(server, this, regionState.getRegion()).process();
                    } catch (HBaseIOException e) {
                        LOG.warn("Failed to get region plan", e);
                    }
                }
            }
            break;

        case RS_ZK_REGION_OPENING:
            // Should see OPENING after we have asked it to OPEN or additional
            // times after already being in state of OPENING
            if (regionState == null || !regionState.isPendingOpenOrOpeningOnServer(sn)) {
                LOG.warn("Received OPENING for " + prettyPrintedRegionName + " from " + sn
                        + " but the region isn't PENDING_OPEN/OPENING here: "
                        + regionStates.getRegionState(encodedName));
                return;
            }
            // Transition to OPENING (or update stamp if already OPENING)
            regionStates.updateRegionState(rt, State.OPENING);
            break;

        case RS_ZK_REGION_OPENED:
            // Should see OPENED after OPENING but possible after PENDING_OPEN.
            if (regionState == null || !regionState.isPendingOpenOrOpeningOnServer(sn)) {
                LOG.warn("Received OPENED for " + prettyPrintedRegionName + " from " + sn
                        + " but the region isn't PENDING_OPEN/OPENING here: "
                        + regionStates.getRegionState(encodedName));

                if (regionState != null) {
                    // Close it without updating the internal region states,
                    // so as not to create double assignments in unlucky scenarios
                    // mentioned in OpenRegionHandler#process
                    unassign(regionState.getRegion(), null, -1, null, false, sn);
                }
                return;
            }
            // Handle OPENED by removing from transition and deleted zk node
            regionState = regionStates.updateRegionState(rt, State.OPEN);
            if (regionState != null) {
                failedOpenTracker.remove(encodedName); // reset the count, if any
                new OpenedRegionHandler(server, this, regionState.getRegion(), sn, expectedVersion).process();
                updateOpenedRegionHandlerTracker(regionState.getRegion());
            }
            break;

        default:
            throw new IllegalStateException("Received event is not valid.");
        }
    } finally {
        lock.unlock();
    }
}

From source file:org.apache.lens.server.query.QueryExecutionServiceImpl.java

private void startEstimatePool() {
    int minPoolSize = conf.getInt(ESTIMATE_POOL_MIN_THREADS, DEFAULT_ESTIMATE_POOL_MIN_THREADS);
    int maxPoolSize = conf.getInt(ESTIMATE_POOL_MAX_THREADS, DEFAULT_ESTIMATE_POOL_MAX_THREADS);
    int keepAlive = conf.getInt(ESTIMATE_POOL_KEEP_ALIVE_MILLIS, DEFAULT_ESTIMATE_POOL_KEEP_ALIVE_MILLIS);

    final ThreadFactory defaultFactory = Executors.defaultThreadFactory();
    final AtomicInteger thId = new AtomicInteger();
    // We are creating our own thread factory, just so that we can override thread name for easy debugging
    ThreadFactory threadFactory = new ThreadFactory() {
        @Override/*from w w  w  .  ja v  a 2  s.c  o m*/
        public Thread newThread(Runnable r) {
            Thread th = defaultFactory.newThread(r);
            th.setName("estimate-" + thId.incrementAndGet());
            return th;
        }
    };

    log.debug("starting estimate pool");

    ThreadPoolExecutor estimatePool = new ThreadPoolExecutor(minPoolSize, maxPoolSize, keepAlive,
            TimeUnit.MILLISECONDS, new SynchronousQueue<Runnable>(), threadFactory);
    estimatePool.allowCoreThreadTimeOut(false);
    estimatePool.prestartCoreThread();
    this.estimatePool = estimatePool;
}

From source file:org.apache.lens.server.query.QueryExecutionServiceImpl.java

private void startLauncherPool() {
    int minPoolSize = conf.getInt(LAUNCHER_POOL_MIN_THREADS, DEFAULT_LAUNCHER_POOL_MIN_THREADS);
    int maxPoolSize = conf.getInt(LAUNCHER_POOL_MAX_THREADS, DEFAULT_LAUNCHER_POOL_MAX_THREADS);
    int keepAlive = conf.getInt(LAUNCHER_POOL_KEEP_ALIVE_MILLIS, DEFAULT_LAUNCHER_POOL_KEEP_ALIVE_MILLIS);

    final ThreadFactory defaultFactory = Executors.defaultThreadFactory();
    final AtomicInteger thId = new AtomicInteger();
    // We are creating our own thread factory, just so that we can override thread name for easy debugging
    ThreadFactory threadFactory = new ThreadFactory() {
        @Override/*w w w. j  a  va  2s  .  c om*/
        public Thread newThread(Runnable r) {
            Thread th = defaultFactory.newThread(r);
            th.setName("launcher-" + thId.incrementAndGet());
            return th;
        }
    };

    log.debug("starting query launcher pool");

    ThreadPoolExecutor launcherPool = new ThreadPoolExecutor(minPoolSize, maxPoolSize, keepAlive,
            TimeUnit.MILLISECONDS, new SynchronousQueue<Runnable>(), threadFactory);
    launcherPool.allowCoreThreadTimeOut(false);
    launcherPool.prestartCoreThread();
    this.queryLauncherPool = launcherPool;
}

From source file:org.apache.camel.processor.MulticastProcessor.java

protected void doProcessParallel(final Exchange original, final AtomicExchange result,
        final Iterable<ProcessorExchangePair> pairs, final boolean streaming, final AsyncCallback callback)
        throws Exception {

    ObjectHelper.notNull(executorService, "ExecutorService", this);
    ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this);

    final CompletionService<Exchange> completion;
    if (streaming) {
        // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence)
        completion = new ExecutorCompletionService<Exchange>(executorService);
    } else {/*from  w  ww . j  a  v  a  2s. co m*/
        // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence)
        completion = new SubmitOrderedCompletionService<Exchange>(executorService);
    }

    // when parallel then aggregate on the fly
    final AtomicBoolean running = new AtomicBoolean(true);
    final AtomicInteger total = new AtomicInteger(0);
    final AtomicBoolean allTasksSubmitted = new AtomicBoolean();
    final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1);
    final AtomicException executionException = new AtomicException();

    final Iterator<ProcessorExchangePair> it = pairs.iterator();

    if (it.hasNext()) {
        // issue task to execute in separate thread so it can aggregate on-the-fly
        // while we submit new tasks, and those tasks complete concurrently
        // this allows us to optimize work and reduce memory consumption
        AggregateOnTheFlyTask task = new AggregateOnTheFlyTask(result, original, total, completion, running,
                aggregationOnTheFlyDone, allTasksSubmitted, executionException);

        // and start the aggregation task so we can aggregate on-the-fly
        aggregateExecutorService.submit(task);
    }

    LOG.trace("Starting to submit parallel tasks");

    while (it.hasNext()) {
        final ProcessorExchangePair pair = it.next();
        final Exchange subExchange = pair.getExchange();
        updateNewExchange(subExchange, total.intValue(), pairs, it);

        completion.submit(new Callable<Exchange>() {
            public Exchange call() throws Exception {
                if (!running.get()) {
                    // do not start processing the task if we are not running
                    return subExchange;
                }

                try {
                    doProcessParallel(pair);
                } catch (Throwable e) {
                    subExchange.setException(e);
                }

                // Decide whether to continue with the multicast or not; similar logic to the Pipeline
                Integer number = getExchangeIndex(subExchange);
                boolean continueProcessing = PipelineHelper.continueProcessing(subExchange,
                        "Parallel processing failed for number " + number, LOG);
                if (stopOnException && !continueProcessing) {
                    // signal to stop running
                    running.set(false);
                    // throw caused exception
                    if (subExchange.getException() != null) {
                        // wrap in exception to explain where it failed
                        throw new CamelExchangeException("Parallel processing failed for number " + number,
                                subExchange, subExchange.getException());
                    }
                }

                if (LOG.isTraceEnabled()) {
                    LOG.trace("Parallel processing complete for exchange: " + subExchange);
                }
                return subExchange;
            }
        });

        total.incrementAndGet();
    }

    // signal all tasks has been submitted
    if (LOG.isTraceEnabled()) {
        LOG.trace("Signaling that all " + total.get() + " tasks has been submitted.");
    }
    allTasksSubmitted.set(true);

    // its to hard to do parallel async routing so we let the caller thread be synchronously
    // and have it pickup the replies and do the aggregation (eg we use a latch to wait)
    // wait for aggregation to be done
    if (LOG.isDebugEnabled()) {
        LOG.debug("Waiting for on-the-fly aggregation to complete aggregating " + total.get() + " responses.");
    }
    aggregationOnTheFlyDone.await();

    // did we fail for whatever reason, if so throw that caused exception
    if (executionException.get() != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Parallel processing failed due " + executionException.get().getMessage());
        }
        throw executionException.get();
    }

    // no everything is okay so we are done
    if (LOG.isDebugEnabled()) {
        LOG.debug("Done parallel processing " + total + " exchanges");
    }
}

From source file:org.apache.hadoop.hbase.client.HBaseAdmin.java

/**
 * Creates a new table with an initial set of empty regions defined by the
 * specified split keys.  The total number of regions created will be the
 * number of split keys plus one. Synchronous operation.
 * Note : Avoid passing empty split key.
 *
 * @param desc table descriptor for table
 * @param splitKeys array of split keys for the initial regions of the table
 *
 * @throws IllegalArgumentException if the table name is reserved, if the split keys
 * are repeated and if the split key has empty byte array.
 * @throws MasterNotRunningException if master is not running
 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
 * threads, the table may have been created between test-for-existence
 * and attempt-at-creation)./*from   w ww.j  a  va 2  s  .  c o m*/
 * @throws IOException
 */
public void createTable(final HTableDescriptor desc, byte[][] splitKeys) throws IOException {
    try {
        createTableAsync(desc, splitKeys);
    } catch (SocketTimeoutException ste) {
        LOG.warn("Creating " + desc.getTableName() + " took too long", ste);
    }
    int numRegs = splitKeys == null ? 1 : splitKeys.length + 1;
    int prevRegCount = 0;
    boolean doneWithMetaScan = false;
    for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier; ++tries) {
        if (!doneWithMetaScan) {
            // Wait for new table to come on-line
            final AtomicInteger actualRegCount = new AtomicInteger(0);
            MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
                @Override
                public boolean processRow(Result rowResult) throws IOException {
                    HRegionInfo info = HRegionInfo.getHRegionInfo(rowResult);
                    if (info == null) {
                        LOG.warn("No serialized HRegionInfo in " + rowResult);
                        return true;
                    }
                    if (!info.getTable().equals(desc.getTableName())) {
                        return false;
                    }
                    ServerName serverName = HRegionInfo.getServerName(rowResult);
                    // Make sure that regions are assigned to server
                    if (!(info.isOffline() || info.isSplit()) && serverName != null
                            && serverName.getHostAndPort() != null) {
                        actualRegCount.incrementAndGet();
                    }
                    return true;
                }
            };
            MetaScanner.metaScan(conf, connection, visitor, desc.getTableName());
            if (actualRegCount.get() < numRegs) {
                if (tries == this.numRetries * this.retryLongerMultiplier - 1) {
                    throw new RegionOfflineException("Only " + actualRegCount.get() + " of " + numRegs
                            + " regions are online; retries exhausted.");
                }
                try { // Sleep
                    Thread.sleep(getPauseTime(tries));
                } catch (InterruptedException e) {
                    throw new InterruptedIOException("Interrupted when opening" + " regions; "
                            + actualRegCount.get() + " of " + numRegs + " regions processed so far");
                }
                if (actualRegCount.get() > prevRegCount) { // Making progress
                    prevRegCount = actualRegCount.get();
                    tries = -1;
                }
            } else {
                doneWithMetaScan = true;
                tries = -1;
            }
        } else if (isTableEnabled(desc.getTableName())) {
            return;
        } else {
            try { // Sleep
                Thread.sleep(getPauseTime(tries));
            } catch (InterruptedException e) {
                throw new InterruptedIOException(
                        "Interrupted when waiting" + " for table to be enabled; meta scan was done");
            }
        }
    }
    throw new TableNotEnabledException(
            "Retries exhausted while still waiting for table: " + desc.getTableName() + " to be enabled");
}

From source file:com.github.podd.api.test.AbstractPoddArtifactManagerTest.java

@Ignore("TODO: Enable periodically to debug concurrency issues")
@Test//  www.j  a v  a 2  s .  co m
public final void testLoadArtifactConcurrency() throws Exception {
    // prepare:
    this.loadVersion1SchemaOntologies();

    // load test artifact
    final InputStream inputStream4Artifact = this.getClass()
            .getResourceAsStream(TestConstants.TEST_ARTIFACT_IMPORT_PSCIENCEv1);

    Assert.assertNotNull("Could not find test resource: " + TestConstants.TEST_ARTIFACT_IMPORT_PSCIENCEv1,
            inputStream4Artifact);

    final String nextTestArtifact = IOUtils.toString(inputStream4Artifact);

    final AtomicInteger count = new AtomicInteger(0);
    final CountDownLatch openLatch = new CountDownLatch(1);
    final int threadCount = 15;
    final CountDownLatch closeLatch = new CountDownLatch(threadCount);
    for (int i = 0; i < threadCount; i++) {
        final int number = i;
        final Runnable runner = new Runnable() {
            @Override
            public void run() {
                try {
                    openLatch.await();
                    for (int j = 0; j < 5; j++) {
                        final ByteArrayInputStream inputStream = new ByteArrayInputStream(
                                nextTestArtifact.getBytes(StandardCharsets.UTF_8));
                        final InferredOWLOntologyID artifactId = AbstractPoddArtifactManagerTest.this.testArtifactManager
                                .loadArtifact(inputStream, RDFFormat.RDFXML);
                    }
                    count.incrementAndGet();
                } catch (OpenRDFException | PoddException | IOException | OWLException e) {
                    e.printStackTrace();
                    Assert.fail("Failed in test: " + number);
                } catch (final InterruptedException ie) {
                    ie.printStackTrace();
                    Assert.fail("Failed in test: " + number);
                } finally {
                    closeLatch.countDown();
                }
            }
        };
        new Thread(runner, "TestThread" + number).start();
    }
    // all threads are waiting on the latch.
    openLatch.countDown(); // release the latch
    // all threads are now running concurrently.
    closeLatch.await();
    // Verify that there were no failures, as the count is only incremented for successes, where
    // the closeLatch must always be called, even for failures
    Assert.assertEquals(threadCount, count.get());

}