Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:com.joyent.manta.client.MantaClient.java

/**
 * Recursively deletes an object in Manta.
 *
 * @param path The fully qualified path of the Manta object.
 * @throws IOException If an IO exception has occurred.
 * @throws MantaClientHttpResponseException If a http status code {@literal > 300} is returned.
 *//*from w  w w.  j  a v a 2 s.  com*/
public void deleteRecursive(final String path) throws IOException {
    LOG.debug("DELETE {} [recursive]", path);

    /* We repetitively run the find() -> delete() stream operation and check
     * the diretory targeted for deletion by attempting to delete it this
     * deals with unpredictable directory contents changes that are a result
     * or concurrent modifications to the contents of the directory path to
     * be deleted. */
    int loops = 0;

    /* We record the number of request timeouts where we were unable to get
     * a HTTP connection from the pool in order to provide feedback to the
     * consumer of the SDK so that they can better tune their settings.*/
    final AtomicInteger responseTimeouts = new AtomicInteger(0);

    while (true) {
        loops++;

        /* Initially, we delete only the file objects returned from the
         * stream because we don't care what order they are in. */
        Stream<MantaObject> toDelete = find(path).map(obj -> {
            if (obj.isDirectory()) {
                return obj;
            }

            try {
                delete(obj.getPath());
            } catch (MantaClientHttpResponseException e) {
                if (!e.getServerCode().equals(MantaErrorCode.RESOURCE_NOT_FOUND_ERROR)) {
                    throw new UncheckedIOException(e);
                }
                /* This exception can be thrown if the parallelism value
                 * isn't tuned for the findForkJoinPool in relation to
                 * the amount of bandwidth available. Essentially, the
                 * processing thread is waiting too long for a new
                 * connection from the pool. If this is thrown too often,
                 * the maximum number of connections can be increased,
                 * the ConnectionRequestTimeout can be increased, or
                 * the fork join pool parallelism value can be
                 * decreased.
                 * Below we cope with this problem, by skipping the
                 * deletion of the object and letting it
                 * get deleted later in the loop when there is less
                 * contention on the connection pool.
                 */
            } catch (ConnectionPoolTimeoutException e) {
                responseTimeouts.incrementAndGet();
                LOG.debug("{} for deleting object {}", e.getMessage(), obj.getPath());
            } catch (IOException e) {
                throw new UncheckedIOException(e);
            }

            obj.getHttpHeaders().put("deleted", true);

            return obj;
        })
                /* We then sort the directories (and remaining files) with
                 * the deepest paths in the filesystem hierarchy first, so
                 * that we can delete subdirectories and files before
                 * the parent directories.*/
                .sorted(MantaObjectDepthComparator.INSTANCE);

        /* We go through every remaining directory and file attempt to
         * delete it even though that operation may not be immediately
         * successful. */
        toDelete.forEachOrdered(obj -> {
            for (int i = 0; i < config.getRetries(); i++) {
                try {
                    /* Don't bother deleting the file if it was marked as
                     * deleted from the map step. */
                    if (obj.getHttpHeaders().containsKey("deleted")) {
                        break;
                    }

                    /* If a file snuck in, we will delete it here. Typically
                     * this should be an empty directory. */
                    delete(obj.getPath());

                    LOG.trace("Finished deleting path {}", obj.getPath());

                    break;
                } catch (MantaClientHttpResponseException e) {
                    // If the directory has already gone, we are good to go
                    if (e.getServerCode().equals(MantaErrorCode.RESOURCE_NOT_FOUND_ERROR)) {
                        break;
                    }

                    /* If we get a directory not empty error we try again
                     * hoping that the next iteration will clean up any
                     * remaining files. */
                    if (e.getServerCode().equals(MantaErrorCode.DIRECTORY_NOT_EMPTY_ERROR)) {
                        continue;
                    }

                    throw new UncheckedIOException(e);
                } catch (ConnectionPoolTimeoutException e) {
                    responseTimeouts.incrementAndGet();
                    LOG.debug("{} for deleting object {}", e.getMessage(), obj.getPath());
                } catch (IOException e) {
                    throw new UncheckedIOException(e);
                }
            }
        });

        /* For each iteration of this loop, we attempt to delete the parent
         * path. If all subdirectories and files have been deleted, then
         * this operation will succeed.
         */
        try {
            delete(path);
            break;
        } catch (MantaClientHttpResponseException e) {
            // Somehow our current path has been deleted, so our work is done
            if (e.getServerCode().equals(MantaErrorCode.RESOURCE_NOT_FOUND_ERROR)) {
                break;
            } else if (e.getServerCode().equals(MantaErrorCode.DIRECTORY_NOT_EMPTY_ERROR)) {
                continue;
            }

            MantaIOException mioe = new MantaIOException("Unable to delete path", e);
            mioe.setContextValue("path", path);

            throw mioe;
        } catch (ConnectionPoolTimeoutException e) {
            responseTimeouts.incrementAndGet();
            LOG.debug("{} for deleting root object {}", e.getMessage(), path);
        }
    }

    LOG.debug("Finished deleting path {}. It took {} loops to delete recursively", path, loops);

    if (responseTimeouts.get() > 0) {
        LOG.info("Request timeouts were hit [%d] times when attempting to delete "
                + "recursively. You may want to adjust the Manta SDK request "
                + "timeout config setting, the Manta SDK maximum connections "
                + "setting, or the Java system property "
                + "[java.util.concurrent.ForkJoinPool.common.parallelism].");
    }
}

From source file:gdsc.smlm.ij.plugins.CreateData.java

/**
 * Check if the localisation, or its neighbours, reach the SNR thresholds. The intensity and noise are after EM-gain
 * has been applied.//w ww  . ja v  a  2  s  .c  om
 * 
 * @param localisationSet
 * @param intensity
 * @param noise
 * @return
 */
public boolean badLocalisation(LocalisationModelSet localisationSet, double intensity, double noise) {
    // Set the minimum SNR for either a single spot or for a spot next to a brighter neighbour
    double minSNR = settings.minSNRt1;
    AtomicInteger counter = t1Removed;

    if (localisationSet.hasNeighbour()) {
        double nextIntensity = getIntensity(localisationSet.getNext());
        double previousIntensity = getIntensity(localisationSet.getPrevious());

        // Check if either neighbour is above the t1 threshold
        if ((nextIntensity / noise > settings.minSNRt1) || (previousIntensity / noise > settings.minSNRt1)) {
            // If neighbours are bright then use a more lenient threshold
            minSNR = settings.minSNRtN;
            counter = tNRemoved;
        }
    }

    if (intensity / noise < minSNR) {
        counter.incrementAndGet();
        return true;
    }
    return false;
}

From source file:org.apache.hadoop.hbase.util.HBaseFsck.java

/**
 * Return a list of user-space table names whose metadata have not been
 * modified in the last few milliseconds specified by timelag
 * if any of the REGIONINFO_QUALIFIER, SERVER_QUALIFIER, STARTCODE_QUALIFIER,
 * SPLITA_QUALIFIER, SPLITB_QUALIFIER have not changed in the last
 * milliseconds specified by timelag, then the table is a candidate to be returned.
 * @return tables that have not been modified recently
 * @throws IOException if an error is encountered
 *//*w  ww. j  av a  2 s.  c  om*/
HTableDescriptor[] getTables(AtomicInteger numSkipped) {
    List<TableName> tableNames = new ArrayList<TableName>();
    long now = System.currentTimeMillis();

    for (HbckInfo hbi : regionInfoMap.values()) {
        MetaEntry info = hbi.metaEntry;

        // if the start key is zero, then we have found the first region of a table.
        // pick only those tables that were not modified in the last few milliseconds.
        if (info != null && info.getStartKey().length == 0 && !info.isMetaRegion()) {
            if (info.modTime + timelag < now) {
                tableNames.add(info.getTable());
            } else {
                numSkipped.incrementAndGet(); // one more in-flux table
            }
        }
    }
    return getHTableDescriptors(tableNames);
}

From source file:org.apache.camel.processor.MulticastProcessor.java

private boolean doProcessSequential(final Exchange original, final AtomicExchange result,
        final Iterable<ProcessorExchangePair> pairs, final Iterator<ProcessorExchangePair> it,
        final ProcessorExchangePair pair, final AsyncCallback callback, final AtomicInteger total) {
    boolean sync = true;

    final Exchange exchange = pair.getExchange();
    Processor processor = pair.getProcessor();
    Producer producer = pair.getProducer();

    TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes()
            : null;/* w ww  .  j  ava  2  s  .c o m*/

    // compute time taken if sending to another endpoint
    StopWatch watch = null;
    if (producer != null) {
        watch = new StopWatch();
    }

    try {
        // prepare tracing starting from a new block
        if (traced != null) {
            traced.pushBlock();
        }

        // let the prepared process it, remember to begin the exchange pair
        AsyncProcessor async = AsyncProcessorTypeConverter.convert(processor);
        pair.begin();
        sync = AsyncProcessorHelper.process(async, exchange, new AsyncCallback() {
            public void done(boolean doneSync) {
                // we are done with the exchange pair
                pair.done();

                // we only have to handle async completion of the routing slip
                if (doneSync) {
                    return;
                }

                // continue processing the multicast asynchronously
                Exchange subExchange = exchange;

                // Decide whether to continue with the multicast or not; similar logic to the Pipeline
                // remember to test for stop on exception and aggregate before copying back results
                boolean continueProcessing = PipelineHelper.continueProcessing(subExchange,
                        "Sequential processing failed for number " + total.get(), LOG);
                if (stopOnException && !continueProcessing) {
                    if (subExchange.getException() != null) {
                        // wrap in exception to explain where it failed
                        subExchange.setException(
                                new CamelExchangeException("Sequential processing failed for number " + total,
                                        subExchange, subExchange.getException()));
                    } else {
                        // we want to stop on exception, and the exception was handled by the error handler
                        // this is similar to what the pipeline does, so we should do the same to not surprise end users
                        // so we should set the failed exchange as the result and be done
                        result.set(subExchange);
                    }
                    // and do the done work
                    doDone(original, subExchange, callback, false, true);
                    return;
                }

                try {
                    doAggregate(getAggregationStrategy(subExchange), result, subExchange);
                } catch (Throwable e) {
                    // wrap in exception to explain where it failed
                    subExchange.setException(new CamelExchangeException(
                            "Sequential processing failed for number " + total, subExchange, e));
                    // and do the done work
                    doDone(original, subExchange, callback, false, true);
                    return;
                }

                total.incrementAndGet();

                // maybe there are more processors to multicast
                while (it.hasNext()) {

                    // prepare and run the next
                    ProcessorExchangePair pair = it.next();
                    subExchange = pair.getExchange();
                    updateNewExchange(subExchange, total.get(), pairs, it);
                    boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);

                    if (!sync) {
                        if (LOG.isTraceEnabled()) {
                            LOG.trace("Processing exchangeId: " + original.getExchangeId()
                                    + " is continued being processed asynchronously");
                        }
                        return;
                    }

                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
                    // remember to test for stop on exception and aggregate before copying back results
                    continueProcessing = PipelineHelper.continueProcessing(subExchange,
                            "Sequential processing failed for number " + total.get(), LOG);
                    if (stopOnException && !continueProcessing) {
                        if (subExchange.getException() != null) {
                            // wrap in exception to explain where it failed
                            subExchange.setException(new CamelExchangeException(
                                    "Sequential processing failed for number " + total, subExchange,
                                    subExchange.getException()));
                        } else {
                            // we want to stop on exception, and the exception was handled by the error handler
                            // this is similar to what the pipeline does, so we should do the same to not surprise end users
                            // so we should set the failed exchange as the result and be done
                            result.set(subExchange);
                        }
                        // and do the done work
                        doDone(original, subExchange, callback, false, true);
                        return;
                    }

                    try {
                        doAggregate(getAggregationStrategy(subExchange), result, subExchange);
                    } catch (Throwable e) {
                        // wrap in exception to explain where it failed
                        subExchange.setException(new CamelExchangeException(
                                "Sequential processing failed for number " + total, subExchange, e));
                        // and do the done work
                        doDone(original, subExchange, callback, false, true);
                        return;
                    }

                    total.incrementAndGet();
                }

                // do the done work
                subExchange = result.get() != null ? result.get() : null;
                doDone(original, subExchange, callback, false, true);
            }
        });
    } finally {
        // pop the block so by next round we have the same staring point and thus the tracing looks accurate
        if (traced != null) {
            traced.popBlock();
        }
        if (producer != null) {
            long timeTaken = watch.stop();
            Endpoint endpoint = producer.getEndpoint();
            // emit event that the exchange was sent to the endpoint
            EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
        }
    }

    return sync;
}

From source file:org.alfresco.repo.model.filefolder.FileFolderLoader.java

private int createFiles(final NodeRef folderNodeRef, final int fileCount, final int filesPerTxn,
        final long minFileSize, final long maxFileSize, final long maxUniqueDocuments,
        final boolean forceBinaryStorage, final int descriptionCount, final long descriptionSize) {
    final String nameBase = UUID.randomUUID().toString();

    final AtomicInteger count = new AtomicInteger(0);
    RetryingTransactionCallback<Void> createFilesWork = new RetryingTransactionCallback<Void>() {
        @Override//from w  ww.j  av a2  s.  co  m
        public Void execute() throws Throwable {
            // Disable timestamp propagation to the parent by disabling cm:auditable
            policyBehaviourFilter.disableBehaviour(folderNodeRef, ContentModel.ASPECT_AUDITABLE);

            for (int i = 0; i < filesPerTxn; i++) {
                // Only create files while we need; we may need to do fewer in the last txn
                if (count.get() >= fileCount) {
                    break;
                }
                // Each load has it's own base name
                String name = String.format("%s-%6d.txt", nameBase, count.get());
                // Create a file
                FileInfo fileInfo = fileFolderService.create(folderNodeRef, name, ContentModel.TYPE_CONTENT,
                        ContentModel.ASSOC_CONTAINS);
                NodeRef fileNodeRef = fileInfo.getNodeRef();
                // Spoofed document
                Locale locale = Locale.ENGLISH;
                long seed = (long) (Math.random() * maxUniqueDocuments);
                long size = normalDistribution.getValue(minFileSize, maxFileSize);
                String contentUrl = SpoofedTextContentReader.createContentUrl(locale, seed, size);
                SpoofedTextContentReader reader = new SpoofedTextContentReader(contentUrl);
                if (forceBinaryStorage) {
                    // Stream the text into the real storage
                    ContentWriter writer = contentService.getWriter(fileNodeRef, ContentModel.PROP_CONTENT,
                            true);
                    writer.setEncoding("UTF-8");
                    writer.setMimetype(MimetypeMap.MIMETYPE_TEXT_PLAIN);
                    writer.putContent(reader);
                } else {
                    // Just use the URL
                    ContentData contentData = reader.getContentData();
                    nodeService.setProperty(fileNodeRef, ContentModel.PROP_CONTENT, contentData);
                }
                // Store the description, if required
                if (descriptionCount > 0) {
                    // Add the cm:description additional properties
                    boolean wasMLAware = MLPropertyInterceptor.setMLAware(true);
                    try {
                        MLText descriptions = new MLText();
                        String[] languages = Locale.getISOLanguages();
                        String defaultLanguage = Locale.getDefault().getLanguage();
                        // Create cm:description translations
                        for (int descriptionNum = -1; descriptionNum < (descriptionCount
                                - 1); descriptionNum++) {
                            String language = null;
                            // Use the default language for the first description
                            if (descriptionNum == -1) {
                                language = defaultLanguage;
                            } else if (languages[descriptionNum].equals(defaultLanguage)) {
                                // Skip the default language, if we hit it
                                continue;
                            } else {
                                language = languages[descriptionNum];
                            }
                            Locale languageLocale = new Locale(language);
                            // For the cm:description, create new reader with a seed that changes each time
                            String descriptionUrl = SpoofedTextContentReader.createContentUrl(locale,
                                    seed + descriptionNum, descriptionSize);
                            SpoofedTextContentReader readerDescription = new SpoofedTextContentReader(
                                    descriptionUrl);
                            String description = readerDescription.getContentString();
                            descriptions.put(languageLocale, description);
                        }
                        nodeService.setProperty(fileNodeRef, ContentModel.PROP_DESCRIPTION, descriptions);
                    } finally {
                        MLPropertyInterceptor.setMLAware(wasMLAware);
                    }
                }
                // Success
                count.incrementAndGet();
            }
            return null;
        }
    };
    // Batches
    RetryingTransactionHelper txnHelper = transactionService.getRetryingTransactionHelper();
    int txnCount = (int) Math.ceil((double) fileCount / (double) filesPerTxn);
    for (int i = 0; i < txnCount; i++) {
        txnHelper.doInTransaction(createFilesWork, false, true);
    }
    // Done
    return count.get();
}

From source file:org.apache.hadoop.hive.ql.metadata.Hive.java

/**
 * Given a source directory name of the load path, load all dynamically generated partitions
 * into the specified table and return a list of strings that represent the dynamic partition
 * paths.//from w w  w  .j  a v a 2s  .com
 * @param loadPath
 * @param tableName
 * @param partSpec
 * @param replace
 * @param numDP number of dynamic partitions
 * @param listBucketingEnabled
 * @param isAcid true if this is an ACID operation
 * @param txnId txnId, can be 0 unless isAcid == true
 * @return partition map details (PartitionSpec and Partition)
 * @throws HiveException
 */
public Map<Map<String, String>, Partition> loadDynamicPartitions(final Path loadPath, final String tableName,
        final Map<String, String> partSpec, final boolean replace, final int numDP,
        final boolean listBucketingEnabled, final boolean isAcid, final long txnId,
        final boolean hasFollowingStatsTask, final AcidUtils.Operation operation) throws HiveException {

    final Map<Map<String, String>, Partition> partitionsMap = Collections
            .synchronizedMap(new LinkedHashMap<Map<String, String>, Partition>());

    int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1);
    final ExecutorService pool = Executors.newFixedThreadPool(poolSize,
            new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitions-%d").build());

    // Get all valid partition paths and existing partitions for them (if any)
    final Table tbl = getTable(tableName);
    final Set<Path> validPartitions = getValidPartitionsInPath(numDP, loadPath);

    final int partsToLoad = validPartitions.size();
    final AtomicInteger partitionsLoaded = new AtomicInteger(0);

    final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0
            && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent();
    final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null;
    final SessionState parentSession = SessionState.get();

    final List<Future<Void>> futures = Lists.newLinkedList();
    try {
        // for each dynamically created DP directory, construct a full partition spec
        // and load the partition based on that
        final Map<Long, RawStore> rawStoreMap = new HashMap<Long, RawStore>();
        for (final Path partPath : validPartitions) {
            // generate a full partition specification
            final LinkedHashMap<String, String> fullPartSpec = Maps.newLinkedHashMap(partSpec);
            Warehouse.makeSpecFromName(fullPartSpec, partPath);
            futures.add(pool.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    try {
                        // move file would require session details (needCopy() invokes SessionState.get)
                        SessionState.setCurrentSessionState(parentSession);
                        LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec);

                        // load the partition
                        Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, replace, true,
                                listBucketingEnabled, false, isAcid, hasFollowingStatsTask);
                        partitionsMap.put(fullPartSpec, newPartition);

                        if (inPlaceEligible) {
                            synchronized (ps) {
                                InPlaceUpdate.rePositionCursor(ps);
                                partitionsLoaded.incrementAndGet();
                                InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/"
                                        + partsToLoad + " partitions.");
                            }
                        }
                        // Add embedded rawstore, so we can cleanup later to avoid memory leak
                        if (getMSC().isLocalMetaStore()) {
                            if (!rawStoreMap.containsKey(Thread.currentThread().getId())) {
                                rawStoreMap.put(Thread.currentThread().getId(),
                                        HiveMetaStore.HMSHandler.getRawStore());
                            }
                        }
                        return null;
                    } catch (Exception t) {
                        LOG.error("Exception when loading partition with parameters " + " partPath=" + partPath
                                + ", " + " table=" + tbl.getTableName() + ", " + " partSpec=" + fullPartSpec
                                + ", " + " replace=" + replace + ", " + " listBucketingEnabled="
                                + listBucketingEnabled + ", " + " isAcid=" + isAcid + ", "
                                + " hasFollowingStatsTask=" + hasFollowingStatsTask, t);
                        throw t;
                    }
                }
            }));
        }
        pool.shutdown();
        LOG.debug("Number of partitions to be added is " + futures.size());

        for (Future future : futures) {
            future.get();
        }

        for (RawStore rs : rawStoreMap.values()) {
            rs.shutdown();
        }
    } catch (InterruptedException | ExecutionException e) {
        LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks");
        //cancel other futures
        for (Future future : futures) {
            future.cancel(true);
        }
        throw new HiveException("Exception when loading " + partsToLoad + " in table " + tbl.getTableName()
                + " with loadPath=" + loadPath, e);
    }

    try {
        if (isAcid) {
            List<String> partNames = new ArrayList<>(partitionsMap.size());
            for (Partition p : partitionsMap.values()) {
                partNames.add(p.getName());
            }
            getMSC().addDynamicPartitions(txnId, tbl.getDbName(), tbl.getTableName(), partNames,
                    AcidUtils.toDataOperationType(operation));
        }
        LOG.info("Loaded " + partitionsMap.size() + " partitions");
        return partitionsMap;
    } catch (TException te) {
        throw new HiveException("Exception updating metastore for acid table " + tableName + " with partitions "
                + partitionsMap.values(), te);
    }
}

From source file:com.indeed.imhotep.builder.tsv.TsvConverter.java

public void run() {
    final List<FileToIndex> files = findNewFilesToIndex();

    boolean first = true;
    final Map<FileToIndex, Future<Boolean>> futures = Maps.newHashMap();
    final int toProcessCount = files.size();
    final AtomicInteger processedCount = new AtomicInteger(0);
    for (final FileToIndex fileToIndex : files) {
        if (DEBUG_BUILD_ONE && !first) {
            break;
        }//from ww w .ja  v a  2  s.co  m
        first = false;

        futures.put(fileToIndex, executor.submit(new Callable<Boolean>() {
            @Override
            public Boolean call() throws Exception {
                final ShardInfo.DateTimeRange fileTimeRange = getTimeRangeFromFileName(fileToIndex.name);

                final String indexName = fileToIndex.index;
                final File localShardParentDir = localBuildDir;

                final String shardName = getShardName(fileTimeRange);

                final File localShardDir = new File(localShardParentDir, shardName);
                final String[] commandToRun = new String[] { "-o", localShardParentDir.getAbsolutePath(),
                        "--start", fileTimeRange.start.toString().replace("T", " ").substring(0, 19), "--end",
                        fileTimeRange.end.toString().replace("T", " ").substring(0, 19), "--overwrite",
                        "--extra", fileToIndex.fsPath.toString().replaceFirst("hdfs://[^/]*", "hdfs:") };

                final EasyIndexBuilderOptions builderOptions = new EasyIndexBuilderOptions();
                builderOptions.baseDir = localShardParentDir.getAbsolutePath();
                builderOptions.start = fileTimeRange.start.getMillis();
                builderOptions.end = fileTimeRange.end.getMillis();
                builderOptions.overwrite = true;
                builderOptions.extra = fileToIndex.fsPath.toString().replaceFirst("hdfs://[^/]*", "hdfs:");

                log.info(StringUtils.join(commandToRun, " "));
                final EasyIndexBuilderFromTSV builder = new EasyIndexBuilderFromTSV();

                builder.setOptions(builderOptions);
                final int buildExitCode = builder.build();

                // alternatively run in a separate process instead of in the same thread
                //                    final String bashCommand = "\"" + org.apache.commons.lang.StringUtils.join(commandToRun, "\" \"") + "\" 2>&1";
                //                    log.info(bashCommand);
                //                    final Process runningBuild = Runtime.getRuntime().exec(new String[] {"bash", "-c", bashCommand});
                //                    BufferedReader output = new BufferedReader(new InputStreamReader(runningBuild.getInputStream()));
                //                    for(String line = output.readLine(); line != null; line = output.readLine()) {
                //                        log.info(line); // TODO
                //                    }
                //                    final int buildExitCode = runningBuild.waitFor();

                if (buildExitCode != 0) {
                    throw new RuntimeException("Build exited with a non 0 code: " + buildExitCode);
                }
                if (!localShardDir.exists() || !localShardDir.isDirectory()) {
                    throw new RuntimeException(
                            "Build completed but a shard directory is not found at " + localShardDir);
                }

                // add build timestamp (version)
                final File localShardDirWithTimestamp = new File(localShardParentDir,
                        localShardDir.getName() + "." + yyyymmddhhmmss.print(DateTime.now()));

                if (!localShardDir.renameTo(localShardDirWithTimestamp)) {
                    throw new RuntimeException(
                            "Failed to append timestamp to completed shard dir: " + localShardDir);
                }

                final String scheme = finalFS.getUri().getScheme();
                if (scheme.equals("hdfs") || scheme.equals("s3n")) {
                    final boolean uploadSucceeded = uploadShard(localShardDirWithTimestamp.getParent(),
                            localShardDirWithTimestamp.getName(), indexName, finalShardPath, finalFS, qaMode);
                    if (uploadSucceeded) {
                        try {
                            PosixFileOperations.rmrf(localShardDirWithTimestamp);
                        } catch (IOException e) {
                            log.warn("Failed to delete temp dir: " + localShardDirWithTimestamp, e);
                        }
                    } else {
                        log.error(
                                "Shard upload failed. Local shard copy left in: " + localShardDirWithTimestamp);
                        throw new RuntimeException("Failed to upload the built shard to " + scheme);
                    }
                } else {
                    final Path localShardLoc;
                    final Path finalIndexPath;
                    final Path finalShardLoc;

                    localShardLoc = new Path("file:" + localShardDirWithTimestamp.getAbsolutePath());
                    finalIndexPath = new Path(finalShardPath, indexName);
                    finalFS.mkdirs(finalIndexPath);
                    finalShardLoc = new Path(finalIndexPath, localShardDirWithTimestamp.getName());

                    if (!finalFS.rename(localShardLoc, finalShardLoc)) {
                        throw new RuntimeException("Failed to move the completed shard dir " + localShardDir
                                + " to the final location at " + finalIndexPath);
                    }
                }

                moveToProcessed(fileToIndex, processedSuccessPath);

                log.info("Progress: " + (processedCount.incrementAndGet() * 100 / toProcessCount) + "%");

                return true;
            }
        }));
    }
    for (FileToIndex fileToIndex : files) {
        final Future<Boolean> future = futures.get(fileToIndex);
        if (future == null) {
            log.warn("Build failed for: " + fileToIndex.fsPath);
            continue;
        }
        boolean success;
        try {
            success = future.get();
        } catch (Exception e) {
            log.warn("Failed to build " + fileToIndex.fsPath, e);
            success = false;
            try {
                writeExceptionLog(fileToIndex, processedFailedPath, e);
            } catch (Exception e2) {
                log.error("Failed to write the error log to HDFS", e2);
            }
        }
        if (!success) {
            // mark the file as failed
            moveToProcessed(fileToIndex, processedFailedPath);
        }
    }
}

From source file:org.apache.hadoop.raid.RaidShell.java

private long estimateSaving(final Codec codec, final List<Path> files, final int targetReplication,
        final int numThreads, final boolean isDebug) throws IOException {
    final AtomicLong totalSavingSize = new AtomicLong(0);
    ExecutorService executor = Executors.newFixedThreadPool(numThreads);
    LOG.info("Processing " + files.size() + " files/dirs for " + codec.id + " in " + numThreads + " threads");
    if (isDebug) {
        System.out.println("oldDiskSize | oldParitySize | newDiskSize | newParitySize"
                + "| savingSize | totalSavingSize | path ");
    }/*from   w  w  w .j a  v a2 s  .  co  m*/
    final AtomicInteger finishNum = new AtomicInteger(0);
    for (int i = 0; i < numThreads; i++) {
        final int startIdx = i;
        Runnable work = new Runnable() {
            public void run() {
                try {
                    for (int idx = startIdx; idx < files.size(); idx += numThreads) {
                        try {
                            Path p = files.get(idx);
                            FileSystem fs = FileSystem.get(conf);
                            p = fs.makeQualified(p);
                            FileStatus stat = null;
                            try {
                                stat = fs.getFileStatus(p);
                            } catch (FileNotFoundException e) {
                                LOG.warn("Path " + p + " does not exist", e);
                            }
                            if (stat == null) {
                                continue;
                            }
                            short repl = 0;
                            List<FileStatus> lfs = null;
                            if (codec.isDirRaid) {
                                if (!stat.isDir()) {
                                    continue;
                                }
                                lfs = RaidNode.listDirectoryRaidFileStatus(conf, fs, p);
                                if (lfs == null) {
                                    continue;
                                }
                                repl = DirectoryStripeReader.getReplication(lfs);
                            } else {
                                repl = stat.getReplication();
                            }

                            // if should not raid, will not put the file into the write list.
                            if (!RaidNode.shouldRaid(conf, fs, stat, codec, lfs)) {
                                LOG.info("Should not raid file: " + p);
                                continue;
                            }
                            // check the replication.
                            boolean add = false;
                            if (repl > targetReplication) {
                                add = true;
                            } else if (repl == targetReplication
                                    && !ParityFilePair.parityExists(stat, codec, conf)) {
                                add = true;
                            }
                            if (add) {
                                long oldDiskSize = 0L;
                                long newDiskSize = 0L;
                                long numBlocks = 0L;
                                long parityBlockSize = 0L;
                                if (codec.isDirRaid) {
                                    for (FileStatus fsStat : lfs) {
                                        oldDiskSize += fsStat.getLen() * (fsStat.getReplication());
                                        newDiskSize += fsStat.getLen() * targetReplication;
                                    }
                                    numBlocks = DirectoryStripeReader.getBlockNum(lfs);
                                    parityBlockSize = DirectoryStripeReader.getParityBlockSize(conf, lfs);
                                } else {
                                    oldDiskSize = stat.getLen() * stat.getReplication();
                                    newDiskSize = stat.getLen() * targetReplication;
                                    numBlocks = RaidNode.getNumBlocks(stat);
                                    parityBlockSize = stat.getBlockSize();
                                }

                                long numStripes = RaidNode.numStripes(numBlocks, codec.stripeLength);
                                long newParitySize = numStripes * codec.parityLength * parityBlockSize
                                        * targetReplication;
                                long oldParitySize = 0L;
                                for (Codec other : Codec.getCodecs()) {
                                    if (other.priority < codec.priority) {
                                        Path parityPath = new Path(other.parityDirectory,
                                                RaidNode.makeRelative(stat.getPath()));
                                        long logicalSize = 0;
                                        try {
                                            logicalSize = fs.getContentSummary(parityPath).getSpaceConsumed();
                                        } catch (IOException ioe) {
                                            // doesn't exist
                                            continue;
                                        }
                                        oldParitySize += logicalSize;
                                    }
                                }
                                long savingSize = oldDiskSize + oldParitySize - newDiskSize - newParitySize;
                                totalSavingSize.addAndGet(savingSize);
                                if (isDebug) {
                                    System.out.println(oldDiskSize + " " + oldParitySize + " " + newDiskSize
                                            + " " + newParitySize + " " + savingSize + " "
                                            + totalSavingSize.get() + " " + stat.getPath());
                                }
                            }
                        } catch (IOException ioe) {
                            LOG.warn("Get IOException", ioe);
                        }
                    }
                } finally {
                    finishNum.incrementAndGet();
                }
            }
        };
        if (executor != null) {
            executor.execute(work);
        }
    }
    if (executor != null) {
        try {
            while (finishNum.get() < numThreads) {
                try {
                    Thread.sleep(2000);
                } catch (InterruptedException ie) {
                    LOG.warn("EstimateSaving get exception ", ie);
                    throw new IOException(ie);
                }
            }
        } finally {
            executor.shutdown(); // Waits for submitted tasks to finish.
        }
    }
    return totalSavingSize.get();
}

From source file:com.milaboratory.core.alignment.KAlignerTest.java

@Test
public void testRandomCorrectnessConcurrent() throws Exception {
    KAlignerParameters p = gParams.clone().setMapperKValue(6).setAlignmentStopPenalty(Integer.MIN_VALUE)
            .setMapperAbsoluteMinScore(2.1f).setMapperMinSeedsDistance(4);
    p.setScoring(new LinearGapAlignmentScoring(NucleotideSequence.ALPHABET,
            ScoringUtils.getSymmetricMatrix(4, -4, 4), -5)).setMaxAdjacentIndels(2);

    KAlignerParameters[] params = new KAlignerParameters[] { p.clone(), p.clone().setFloatingLeftBound(true),
            p.clone().setFloatingRightBound(true),
            p.clone().setFloatingLeftBound(true).setFloatingRightBound(true) };

    RandomDataGenerator rdi = new RandomDataGenerator(new Well19937c(127368647891L));
    final int baseSize = its(400, 2000);
    final int total = its(3000, 30000);
    final int threadCount = 20;
    int i, id;//from   www .  j a va  2 s .c om

    final NucleotideMutationModel mutationModel = MutationModels.getEmpiricalNucleotideMutationModel()
            .multiplyProbabilities(2.0);
    mutationModel.reseed(12343L);

    for (final KAlignerParameters parameters : params) {
        final KAligner aligner = new KAligner(parameters);

        final AtomicInteger correct = new AtomicInteger(0), incorrect = new AtomicInteger(0),
                miss = new AtomicInteger(0), scoreError = new AtomicInteger(0), random = new AtomicInteger(0);

        final List<NucleotideSequence> ncs = new ArrayList<>(baseSize);
        for (i = 0; i < baseSize; ++i) {
            NucleotideSequence reference = randomSequence(NucleotideSequence.ALPHABET, rdi, 100, 300);
            ncs.add(reference);
            aligner.addReference(reference);
        }

        final AtomicInteger counter = new AtomicInteger(total);

        Thread[] threads = new Thread[threadCount];

        final AtomicLong time = new AtomicLong(0L);

        final AtomicLong seedCounter = new AtomicLong(1273L);
        for (i = 0; i < threadCount; ++i) {
            threads[i] = new Thread() {
                @Override
                public void run() {
                    long timestamp;
                    //Different seed for different thread.
                    RandomDataGenerator rdi = new RandomDataGenerator(
                            new Well19937c(seedCounter.addAndGet(117L)));
                    while (counter.decrementAndGet() >= 0) {
                        int id = rdi.nextInt(0, baseSize - 1);
                        NucleotideSequence ref = ncs.get(id);
                        int trimRight, trimLeft;
                        boolean addLeft, addRight;

                        if (parameters.isFloatingLeftBound()) {
                            trimLeft = rdi.nextInt(0, ref.size() / 3);
                            addLeft = true;
                        } else {
                            if (rdi.nextInt(0, 1) == 0) {
                                trimLeft = 0;
                                addLeft = true;
                            } else {
                                trimLeft = rdi.nextInt(0, ref.size() / 3);
                                addLeft = false;
                            }
                        }

                        if (parameters.isFloatingRightBound()) {
                            trimRight = rdi.nextInt(0, ref.size() / 3);
                            addRight = true;
                        } else {
                            if (rdi.nextInt(0, 1) == 0) {
                                trimRight = 0;
                                addRight = true;
                            } else {
                                trimRight = rdi.nextInt(0, ref.size() / 3);
                                addRight = false;
                            }
                        }

                        NucleotideSequence subSeq = ref.getRange(trimLeft, ref.size() - trimRight);
                        NucleotideSequence left = addLeft
                                ? randomSequence(NucleotideSequence.ALPHABET, rdi, 10, 30)
                                : EMPTY;
                        NucleotideSequence right = addRight
                                ? randomSequence(NucleotideSequence.ALPHABET, rdi, 10, 30)
                                : EMPTY;

                        int[] subSeqMutations;
                        Mutations<NucleotideSequence> mmutations;
                        synchronized (mutationModel) {
                            mmutations = generateMutations(subSeq, mutationModel);
                            subSeqMutations = mmutations.getAllMutations();
                        }
                        float actionScore = AlignmentUtils.calculateScore(parameters.getScoring(),
                                subSeq.size(), mmutations);

                        int indels = 0;
                        for (int mut : subSeqMutations)
                            if (isDeletion(mut) || isInsertion(mut))
                                ++indels;

                        NucleotideSequence target = left.concatenate(mutate(subSeq, subSeqMutations))
                                .concatenate(right);

                        timestamp = System.nanoTime();
                        KAlignmentResult result = aligner.align(target);
                        time.addAndGet(System.nanoTime() - timestamp);

                        boolean found = false;
                        for (KAlignmentHit hit : result.hits) {
                            if (hit.getId() == id) {
                                //System.out.println(hit.getAlignmentScore());
                                found = true;
                                if (!parameters.isFloatingLeftBound())
                                    Assert.assertTrue(hit.getAlignment().getSequence1Range().getFrom() == 0
                                            || hit.getAlignment().getSequence2Range().getFrom() == 0);
                                if (!parameters.isFloatingRightBound())
                                    Assert.assertTrue(hit.getAlignment().getSequence1Range().getTo() == ref
                                            .size()
                                            || hit.getAlignment().getSequence2Range().getTo() == target.size());
                                if (hit.getAlignment().getScore() < actionScore
                                        && indels <= parameters.getMaxAdjacentIndels()) {
                                    scoreError.incrementAndGet();
                                    //System.out.println(target);
                                    //System.out.println(left);
                                    //printAlignment(subSeq, subSeqMutations);
                                    //System.out.println(right);
                                    //printHitAlignment(hit);
                                    ////printAlignment(ncs.get(hit.getId()).getRange(hit.getAlignment().getSequence1Range()),
                                    ////        hit.getAlignment().getMutations());
                                    //found = true;
                                }
                            } else {
                                //printHitAlignment(hit);
                                //System.out.println(hit.getAlignmentScore());
                                incorrect.incrementAndGet();
                            }
                        }

                        if (found)
                            correct.incrementAndGet();
                        else {
                            if (indels <= parameters.getMaxAdjacentIndels()) {
                                miss.incrementAndGet();
                                //System.out.println(target);
                                //System.out.println(left);
                                //printAlignment(subSeq, subSeqMutations);
                                //System.out.println(right);
                            }
                        }

                        NucleotideSequence randomSequence = randomSequence(NucleotideSequence.ALPHABET, rdi,
                                target.size() - 1, target.size());
                        for (KAlignmentHit hit : aligner.align(randomSequence).hits) {
                            hit.calculateAlignmnet();
                            if (hit.getAlignment().getScore() >= 110.0)
                                random.incrementAndGet();
                        }
                    }
                }
            };
        }

        for (i = 0; i < threadCount; ++i)
            threads[i].start();

        for (i = 0; i < threadCount; ++i)
            threads[i].join();

        System.out.println("C=" + correct.get() + ";I=" + incorrect.get() + ";M=" + miss.get() + ";ScE="
                + scoreError.get() + ";R=" + (1.0 * random.get() / baseSize / total) + " AlignmentTime = "
                + time(time.get() / total));
        Assert.assertEquals(1.0, 1.0 * correct.get() / total, 0.01);
        Assert.assertEquals(0.0, 1.0 * incorrect.get() / total, 0.001);
        Assert.assertEquals(0.0, 1.0 * miss.get() / total, 0.001);
        Assert.assertEquals(0.0, 1.0 * scoreError.get() / total, 0.001);
        Assert.assertEquals(0.0, 1.0 * random.get() / total / baseSize, 5E-6);
    }
}

From source file:org.helios.netty.jmx.MetricCollector.java

/**
 * Collects the number of threads in each thread state
 * @return an EnumMap with Thread states as the key and the number of threads in that state as the value
 *///from  w  w  w.j a v a 2s  .c o m
public EnumMap<Thread.State, AtomicInteger> getThreadStates() {
    EnumMap<Thread.State, AtomicInteger> map = new EnumMap<State, AtomicInteger>(Thread.State.class);
    for (ThreadInfo ti : threadMxBean.getThreadInfo(threadMxBean.getAllThreadIds())) {
        State st = ti.getThreadState();
        AtomicInteger ai = map.get(st);
        if (ai == null) {
            ai = new AtomicInteger(0);
            map.put(st, ai);
        }
        ai.incrementAndGet();
    }
    return map;
}