Example usage for java.util.concurrent ForkJoinPool ForkJoinPool

List of usage examples for java.util.concurrent ForkJoinPool ForkJoinPool

Introduction

In this page you can find the example usage for java.util.concurrent ForkJoinPool ForkJoinPool.

Prototype

private ForkJoinPool(byte forCommonPoolOnly) 

Source Link

Document

Constructor for common pool using parameters possibly overridden by system properties

Usage

From source file:Main.java

public static void main(String[] args) {
    List<Integer> list = new ArrayList<>();
    long expectedSum = 0;
    for (int i = 0; i < 10000; i++) {
        int random = 1 + (int) (Math.random() * ((100 - 1) + 1));
        list.add(random);// w w w . j a  va2  s  .c  om
        expectedSum += random;
    }
    System.out.println("expected sum: " + expectedSum);
    ForkJoinPool forkJoinPool = new ForkJoinPool(Runtime.getRuntime().availableProcessors());
    RecursiveSum recursiveSum = new RecursiveSum(list, 0, list.size());
    long recSum = forkJoinPool.invoke(recursiveSum);
    System.out.println("recursive-sum: " + recSum);
}

From source file:Main.java

public static void main(String[] arg) throws Exception {
    Runnable parallelCode = () -> {
        HashSet<String> allThreads = new HashSet<>();
        IntStream.range(0, 1_000_000).parallel().filter(i -> {
            allThreads.add(Thread.currentThread().getName());
            return false;
        }).min();//from   ww w .ja v a 2 s .  c  o m
        System.out.println("executed by " + allThreads);
    };
    System.out.println("default behavior: ");
    parallelCode.run();
    System.out.println("specialized pool:");
    ForkJoinPool pool = new ForkJoinPool(2);
    pool.submit(parallelCode).get();
}

From source file:org.diorite.utils.concurrent.ParallelUtils.java

public static void realParallelStream(final Runnable streamAction, final int parallelism, final boolean await) {
    final ForkJoinPool pool = new ForkJoinPool(parallelism);
    if (await) {/*www.jav a  2  s .c  o m*/
        pool.invoke(createSimpleTask(streamAction));
    } else {
        pool.submit(streamAction);
    }
}

From source file:com.joyent.manta.client.FindForkJoinPoolFactory.java

/**
 * Returns a new instance of {@link ForkJoinPool} configured with the
 * correct parallelism value./*w  w w.j a v a  2 s . co m*/
 *
 * @param config Manta configuration context object
 * @return configured instance
 */
static ForkJoinPool getInstance(final ConfigContext config) {
    Validate.notNull(config, "Configuration context is null");
    final int maximumConnections = Validate.notNull(config.getMaximumConnections(),
            "Maximum connections setting is null");

    Validate.isTrue(maximumConnections > 0, "Maximum connections is not greater than zero");

    final int parallelism = calculateParallelism(maximumConnections);

    return new ForkJoinPool(parallelism);
}

From source file:org.diorite.utils.concurrent.ParallelUtils.java

public static <T> T realParallelStream(final Supplier<T> streamAction, final int parallelism) {
    final ForkJoinPool pool = new ForkJoinPool(parallelism);
    return pool.invoke(createTask(streamAction));
}

From source file:cz.cuni.mff.d3s.spl.interpretation.DistributionLearningInterpretationParallel.java

public DistributionLearningInterpretationParallel() {
    this(new ForkJoinPool(1));
}

From source file:org.hyperledger.accountimport.KeyPairGenerator.java

/**
 * Fully initializes the object by allocating the extra ForkJoin thread pool etc
 *
 * @param threadCount  the number of threads to be used as a maximum
 *                     (depending on the algorithm being used it might
 *                     be less but not more)
 * @param outputStream the stream to where the component should write its output
 *///  ww w  .j a v a 2s  .com
public KeyPairGenerator(final int threadCount, final OutputStream outputStream) {
    bufferedWriter = new BufferedWriter(new OutputStreamWriter(outputStream));
    executorService = new ForkJoinPool(threadCount);
    generationBlockSize = threadCount * PER_THREAD_BLOCK_SIZE;
}

From source file:com.linkedin.gradle.python.tasks.ParallelWheelGenerationTask.java

@TaskAction
public void buildWheels() {

    ProgressLoggerFactory progressLoggerFactory = getServices().get(ProgressLoggerFactory.class);
    ProgressLogger progressLogger = progressLoggerFactory.newOperation(ParallelWheelGenerationTask.class);
    progressLogger.setDescription("Building wheels");

    progressLogger.started();/*from  w ww . jav  a 2  s. c  o m*/

    TaskTimer taskTimer = new TaskTimer();

    // This way we don't try to over-alloc the system to much. We'll use slightly over 1/2 of the machine to build
    // the wheels in parallel. Allowing other operations to continue.
    ForkJoinPool forkJoinPool = new ForkJoinPool(Runtime.getRuntime().availableProcessors() / 2 + 1);

    Set<File> files = getFilesToConvert().getFiles();
    int totalSize = files.size();

    try {
        forkJoinPool.submit(() -> {
            files.stream().parallel().forEach(file -> {

                PackageInfo packageInfo = PackageInfo.fromPath(file);
                currentPackages.add(packageInfo.getName());
                counter.incrementAndGet();
                updateStatusLine(progressLogger, totalSize, counter.get());
                TaskTimer.TickingClock clock = taskTimer
                        .start(packageInfo.getName() + "-" + packageInfo.getVersion());
                if (!packageSettings.requiresSourceBuild(packageInfo)) {
                    makeWheelFromSdist(packageInfo);
                }
                currentPackages.remove(packageInfo.getName());
                updateStatusLine(progressLogger, totalSize, counter.get());
                clock.stop();
            });
        }).get();
    } catch (InterruptedException | ExecutionException e) {
        logger.warn("Unable to pre-build some dependencies");
    } finally {
        forkJoinPool.shutdown();
    }

    try {
        FileUtils.write(getBuildReport(), taskTimer.buildReport());
    } catch (IOException ignore) {
        // Don't fail if there is are issues writing the timing report.
    }
    progressLogger.completed();
}

From source file:com.itemanalysis.psychometrics.irt.estimation.MarginalMaximumLikelihoodEstimation.java

public MarginalMaximumLikelihoodEstimation(ItemResponseVector[] responseVector, ItemResponseModel[] irm,
        DistributionApproximation latentDistribution) {
    this.responseVector = responseVector;
    this.irm = irm;
    this.latentDistribution = latentDistribution;
    nPoints = latentDistribution.getNumberOfPoints();
    nItems = irm.length;/*from  www. j  a  v a 2  s .co m*/
    nResponseVectors = responseVector.length;
    pool = new ForkJoinPool(PROCESSORS);
}

From source file:com.chingo247.structureapi.schematic.SchematicManager.java

public synchronized void load(File directory) {
    Preconditions.checkArgument(directory.isDirectory());

    Iterator<File> fit = FileUtils.iterateFiles(directory, new String[] { "schematic" }, true);

    if (fit.hasNext()) {
        ForkJoinPool pool = new ForkJoinPool(Runtime.getRuntime().availableProcessors()); // only create the pool if we have schematics
        Map<Long, SchematicDataNode> alreadyHere = Maps.newHashMap();
        Map<Long, SchematicDataNode> needsUpdating = Maps.newHashMap();

        List<SchematicProcessor> tasks = Lists.newArrayList();
        List<Schematic> alreadyDone = Lists.newArrayList();
        XXHasher hasher = new XXHasher();

        try (Transaction tx = graph.beginTx()) {
            Collection<SchematicDataNode> schematicNodes = schematicRepository
                    .findAfterDate(System.currentTimeMillis() - TWO_DAYS);
            for (SchematicDataNode node : schematicNodes) {
                if (!node.hasRotation()) {
                    needsUpdating.put(node.getXXHash64(), node);
                    continue;
                }/*  ww w  .  j  a v  a 2  s . co  m*/
                alreadyHere.put(node.getXXHash64(), node);
            }

            // Process the schematics that need to be loaded
            while (fit.hasNext()) {
                File schematicFile = fit.next();
                try {
                    long checksum = hasher.hash64(schematicFile);
                    // Only load schematic data that wasn't yet loaded...
                    SchematicDataNode existingData = alreadyHere.get(checksum);
                    if (existingData != null) {
                        Schematic s = new DefaultSchematic(schematicFile, existingData.getWidth(),
                                existingData.getHeight(), existingData.getLength(),
                                existingData.getAxisOffset());
                        alreadyDone.add(s);
                    } else if (getSchematic(checksum) == null) {
                        SchematicProcessor processor = new SchematicProcessor(schematicFile);
                        tasks.add(processor);
                        pool.execute(processor);
                    }
                } catch (IOException ex) {
                    Logger.getLogger(SchematicManager.class.getName()).log(Level.SEVERE, null, ex);
                }
            }
            tx.success();
        }

        // Wait for the processes the finish and queue them for bulk insert
        List<Schematic> newSchematics = Lists.newArrayList();
        try {
            for (SchematicProcessor sp : tasks) {
                Schematic schematic = sp.get();
                if (schematic != null) {
                    newSchematics.add(schematic);
                }
            }
        } catch (Exception ex) {
            Logger.getLogger(SchematicManager.class.getName()).log(Level.SEVERE, null, ex);
        }

        // Close the pool!
        pool.shutdown();

        int updated = 0;
        // Update the database
        try (Transaction tx = graph.beginTx()) {
            for (Schematic data : alreadyDone) {
                SchematicDataNode sdn = schematicRepository.findByHash(data.getHash());
                sdn.setLastImport(System.currentTimeMillis());
            }
            for (Schematic newData : newSchematics) {
                if (needsUpdating.get(newData.getHash()) != null) {
                    SchematicDataNode dataNode = schematicRepository.findByHash(newData.getHash());
                    dataNode.setRotation(newData.getAxisOffset());
                    updated++;
                    continue;
                }
                String name = newData.getFile().getName();
                long xxhash = newData.getHash();
                int width = newData.getWidth();
                int height = newData.getHeight();
                int length = newData.getLength();
                int axisOffset = newData.getAxisOffset();
                schematicRepository.addSchematic(name, xxhash, width, height, length, axisOffset,
                        System.currentTimeMillis());
            }

            // Delete unused
            int removed = 0;
            for (SchematicDataNode sdn : schematicRepository
                    .findBeforeDate(System.currentTimeMillis() - TWO_DAYS)) {
                sdn.delete();
                removed++;
            }
            if (removed > 0) {
                System.out.println("[SettlerCraft]: Deleted " + removed + " schematic(s) from cache");
            }

            if (updated > 0) {
                System.out.println("[SettlerCraft]: Updated " + updated + " schematic(s) from cache");
            }

            tx.success();
        }

        synchronized (schematics) {
            for (Schematic schematic : newSchematics) {
                schematics.put(schematic.getHash(), schematic);
            }
            for (Schematic schematic : alreadyDone) {
                schematics.put(schematic.getHash(), schematic);
            }
        }

    }

}