Example usage for java.util.concurrent ForkJoinPool commonPool

List of usage examples for java.util.concurrent ForkJoinPool commonPool

Introduction

In this page you can find the example usage for java.util.concurrent ForkJoinPool commonPool.

Prototype

public static ForkJoinPool commonPool() 

Source Link

Document

Returns the common pool instance.

Usage

From source file:net.openhft.chronicle.timeseries.Columns.java

public static void generateBrownian(DoubleColumn col, double start, double end, double sd) {
    long length = col.length();
    double sd2 = sd / Math.sqrt(length);
    NormalDistribution nd = new NormalDistribution(0, sd2 * CHUNK_SIZE);
    int trendLength = Math.toIntExact((length - 1) / CHUNK_SIZE + 2);
    BytesStore trend = NativeBytesStore.lazyNativeBytesStoreWithFixedCapacity(trendLength * 8L);
    double x = start;
    RandomGenerator rand = new MersenneTwister();
    for (int i = 0; i < trendLength - 1; i++) {
        float f = rand.nextFloat();
        trend.writeDouble((long) i << 3, x);
        x += nd.inverseCumulativeProbability(f);
    }//w ww.j av a  2 s  .  c om
    trend.writeDouble((long) (trendLength - 1) << 3, x);
    double diff = end - x;
    double gradient = diff / (trendLength - 1);
    for (int i = 0; i < trendLength; i++) {
        double y = trend.addAndGetDoubleNotAtomic((long) i << 3, i * gradient);
        //            System.out.println(i + ": "+y);
    }
    int procs = Runtime.getRuntime().availableProcessors();
    int chunksPerTask = (trendLength - 1) / procs + 1;
    ForkJoinPool fjp = ForkJoinPool.commonPool();
    List<ForkJoinTask> tasks = new ArrayList<>(procs);
    for (int i = 0; i < procs; i++) {
        int si = i * chunksPerTask;
        int ei = Math.min(trendLength, si + chunksPerTask);
        tasks.add(fjp.submit(() -> {
            NormalDistribution nd2 = new NormalDistribution(0, sd2);
            RandomGenerator rand2 = new MersenneTwister();
            for (int j = si; j < ei; j++) {
                generateBrownian(col, (long) j * CHUNK_SIZE, trend.readDouble((long) j << 3),
                        trend.readDouble((long) (j + 1) << 3), nd2, rand2);
            }
        }));
    }
    for (ForkJoinTask task : tasks) {
        task.join();
    }
    trend.release();
}

From source file:ws.salient.aws.RecordProcessor.java

@Override
public void initialize(InitializationInput input) {
    AmazonClientProvider provider = new AmazonClientProvider();
    sessions = new Sessions(new AmazonS3Repository(provider.getAmazonS3()),
            new DynamoDBProfiles(provider.getDynamoDB(), provider.getAWSKMS(), json),
            new DynamoDBStore(provider.getDynamoDB(), provider.getAWSKMS(), json,
                    Executors.newSingleThreadExecutor()),
            Guice.createInjector(provider), Executors.newSingleThreadExecutor(), ForkJoinPool.commonPool());
}

From source file:com.xylocore.cassandra.query.PagedQueryBuilder.java

/**
 * FILLIN/* w w w.j a v a  2 s.c om*/
 * 
 * @param       aExecutor
 * 
 * @return
 */
public PagedQueryBuilder<T> executor(Executor aExecutor) {
    if (aExecutor == null) {
        aExecutor = ForkJoinPool.commonPool();
    }

    executor = aExecutor;

    return this;
}

From source file:com.xylocore.cassandra.query.TableScanQueryBuilder.java

/**
 * FILLIN/*  ww  w . j  ava2  s.  c o  m*/
 * 
 * @param       aExecutor
 * 
 * @return
 */
public TableScanQueryBuilder<T> executor(Executor aExecutor) {
    if (aExecutor == null) {
        aExecutor = ForkJoinPool.commonPool();
    }

    executor = aExecutor;

    return this;
}

From source file:net.openhft.chronicle.timeseries.Columns.java

public static <T> void setAll(LongColumn col, Supplier<T> perThread,
        LongColumnIndexObjectConsumer<T> consumer) {
    long length = col.length();
    int chunks = Math.toIntExact((length - 1) / CHUNK_SIZE + 1);
    ForkJoinPool fjp = ForkJoinPool.commonPool();
    int procs = Runtime.getRuntime().availableProcessors();
    List<ForkJoinTask> tasks = new ArrayList<>(procs);
    int chunksPerTask = (chunks - 1) / procs + 1;
    for (int i = 0; i < procs; i++) {
        int si = i * chunksPerTask;
        int ei = Math.min(chunks, si + chunksPerTask);
        tasks.add(fjp.submit(() -> {//from ww  w  . j  ava  2s.c o  m
            T t = perThread.get();
            long first = (long) si * CHUNK_SIZE;
            int max = (int) Math.min((ei - si) * CHUNK_SIZE, length - first);
            for (int j = 0; j < max; j++) {
                consumer.apply(col, first + j, t);
            }
        }));
    }
    for (ForkJoinTask task : tasks) {
        task.join();
    }
}

From source file:io.kamax.mxisd.lookup.provider.DnsLookupProvider.java

@Override
public List<ThreePidMapping> populate(List<ThreePidMapping> mappings) {
    Map<String, List<ThreePidMapping>> domains = new HashMap<>();

    for (ThreePidMapping mapping : mappings) {
        if (!ThreePidMedium.Email.is(mapping.getMedium())) {
            log.info("Skipping unsupported type {} for {}", mapping.getMedium(), mapping.getValue());
            continue;
        }/*  w ww. ja  v  a2 s  .c o  m*/

        Optional<String> domainOpt = getDomain(mapping.getValue());
        if (!domainOpt.isPresent()) {
            log.warn("No domain for 3PID {}", mapping.getValue());
            continue;
        }

        String domain = domainOpt.get();
        List<ThreePidMapping> domainMappings = domains.computeIfAbsent(domain, s -> new ArrayList<>());
        domainMappings.add(mapping);
    }

    log.info("Looking mappings across {} domains", domains.keySet().size());
    ForkJoinPool pool = ForkJoinPool.commonPool();
    RecursiveTask<List<ThreePidMapping>> task = new RecursiveTask<List<ThreePidMapping>>() {

        @Override
        protected List<ThreePidMapping> compute() {
            List<ThreePidMapping> mappingsFound = new ArrayList<>();
            List<DomainBulkLookupTask> tasks = new ArrayList<>();

            for (String domain : domains.keySet()) {
                DomainBulkLookupTask domainTask = new DomainBulkLookupTask(domain, domains.get(domain));
                domainTask.fork();
                tasks.add(domainTask);
            }

            for (DomainBulkLookupTask task : tasks) {
                mappingsFound.addAll(task.join());
            }

            return mappingsFound;
        }
    };
    pool.submit(task);
    pool.shutdown();

    List<ThreePidMapping> mappingsFound = task.join();
    log.info("Found {} mappings overall", mappingsFound.size());
    return mappingsFound;
}

From source file:com.ge.research.semtk.sparqlX.parallel.SparqlParallelQueries.java

public void runQueries() {
    // Use fork-join technique
    // https://docs.oracle.com/javase/tutorial/essential/concurrency/forkjoin.html
    ForkJoinPool.commonPool().invoke(this);
}

From source file:com.xylocore.cassandra.query.PagedQuery.java

/**
 * FILLIN/*  w w  w  .  j ava 2 s  .  c  o  m*/
 * 
 * @param       aSession
 * @param       aExecutor
 * @param       aFirstQuery
 * @param       aNextQueries
 * @param       aKeyColumnNames
 */
PagedQuery(Session aSession, Executor aExecutor, PreparedStatement aFirstQuery,
        List<PreparedStatement> aNextQueries, Map<String, String> aKeyColumnNames) {
    Validate.notNull(aSession);
    Validate.notNull(aFirstQuery);
    Validate.notEmpty(aNextQueries);
    Validate.noNullElements(aNextQueries);
    Validate.notEmpty(aKeyColumnNames);

    if (aExecutor == null) {
        aExecutor = ForkJoinPool.commonPool();
    }

    session = aSession;
    executor = aExecutor;
    firstQuery = aFirstQuery;
    nextQueries = new ArrayList<>(aNextQueries);
    keyColumnNames = Collections.emptyMap();

    if (aKeyColumnNames != null && !aKeyColumnNames.isEmpty()) {
        keyColumnNames = new HashMap<>(aKeyColumnNames);
    }
}

From source file:com.github.benmanes.caffeine.cache.Stresser.java

private void status() {
    local.evictionLock.lock();/*from  w w w.  ja v  a2s .  c  o  m*/
    int pendingWrites = local.writeBuffer().size();
    int drainStatus = local.drainStatus();
    local.evictionLock.unlock();

    LocalTime elapsedTime = LocalTime.ofSecondOfDay(stopwatch.elapsed(TimeUnit.SECONDS));
    System.out.printf("---------- %s ----------%n", elapsedTime);
    System.out.printf("Pending reads: %,d; writes: %,d%n", local.readBuffer.size(), pendingWrites);
    System.out.printf("Drain status = %s (%s)%n", STATUS[drainStatus], drainStatus);
    System.out.printf("Evictions = %,d%n", cache.stats().evictionCount());
    System.out.printf("Size = %,d (max: %,d)%n", local.data.mappingCount(), operation.maxEntries);
    System.out.printf("Lock = [%s%n", StringUtils.substringAfter(local.evictionLock.toString(), "["));
    System.out.printf("Pending tasks = %,d%n", ForkJoinPool.commonPool().getQueuedSubmissionCount());

    long maxMemory = Runtime.getRuntime().maxMemory();
    long freeMemory = Runtime.getRuntime().freeMemory();
    long allocatedMemory = Runtime.getRuntime().totalMemory();
    System.out.printf("Max Memory = %,d bytes%n", maxMemory);
    System.out.printf("Free Memory = %,d bytes%n", freeMemory);
    System.out.printf("Allocated Memory = %,d bytes%n", allocatedMemory);

    System.out.println();
}

From source file:com.newtranx.util.mysql.fabric.SpringQueryAllShardsAspect.java

@Around("@annotation(com.newtranx.util.mysql.fabric.QueryAllShards)")
public Object union(ProceedingJoinPoint pjp) throws Throwable {
    Method method = AspectJUtils.getMethod(pjp);
    QueryAllShards annotation = method.getAnnotation(QueryAllShards.class);
    String table = annotation.table();
    log.debug("Table=" + table);
    Set<String> groups = groupsCache.get(cacheKey);
    log.debug("ServerGroups=" + groups);
    List<Object> list;
    boolean readOnly = annotation.readOnly();
    Pattern excludePattern;//  w w  w. j  a  v  a2  s  . co  m
    String excludeRegex = annotation.excludeShardsPatternRegex();
    if (!StringUtils.isEmpty(excludeRegex)) {
        excludePattern = Pattern.compile(excludeRegex);
    } else {
        excludePattern = null;
    }

    Function<Boolean, List<Object>> computeFunction = (par) -> {
        Stream<String> stream = groups.stream();
        if (par)
            stream = stream.parallel();
        return stream.filter(gp -> {
            boolean exclude = excludePattern != null && excludePattern.matcher(gp).matches();
            if (exclude) {
                log.debug("Skipping group:" + gp);
            }
            return !exclude;
        }).map(gp -> {
            log.debug("Querying group: " + gp);
            ds.whenNewConnection().doInit(conn -> conn.setServerGroupName(gp))
                    .doInit(conn -> conn.setReadOnly(readOnly));
            try {
                return pjp.proceed();
            } catch (Throwable t) {
                throw Exceptions.propagate(t);
            } finally {
                ds.clearInitOps();
            }
        }).collect(Collectors.toList());
    };

    if (StringUtils.isEmpty(annotation.parallelPool())) {
        list = computeFunction.apply(false);
    } else {
        ForkJoinPool pool;
        if ("!jdkCommon".equals(annotation.parallelPool()))
            pool = ForkJoinPool.commonPool();
        else
            pool = applicationContext.getBean(annotation.parallelPool(), ForkJoinPool.class);
        log.debug("Executing queries in parallel, pool=" + pool);
        list = pool.submit(() -> {
            return computeFunction.apply(true);
        }).get();
    }
    Aggregator aggregator;
    try {
        aggregator = (Aggregator) annotation.aggregator().getDeclaredMethod("getInstance", EMPTY_PARAM)
                .invoke(null, EMPTY_ARGS);
    } catch (Exception e) {
        log.warn("Can not get singleton for class " + annotation.aggregator().getName()
                + ", creating new instance");
        aggregator = annotation.aggregator().newInstance();
    }
    return aggregator.apply(list);
}