List of usage examples for java.util.concurrent ForkJoinPool submit
@SuppressWarnings("unchecked") public ForkJoinTask<?> submit(Runnable task)
From source file:MSUmpire.SpectrumParser.mzXMLParser.java
private List<MzXMLthreadUnit> ParseScans(final BitSet IncludedScans) { List<MzXMLthreadUnit> ScanList = new ArrayList<>(); ArrayList<ForkJoinTask<?>> futures = new ArrayList<>(); final ForkJoinPool fjp = new ForkJoinPool(NoCPUs); Iterator<Entry<Integer, Long>> iter = ScanIndex.entrySet().iterator(); Entry<Integer, Long> ent = iter.next(); long currentIdx = ent.getValue(); int nextScanNo = ent.getKey(); final RandomAccessFile fileHandler; try {/*from w ww. j a va2 s .c om*/ fileHandler = new RandomAccessFile(filename, "r"); } catch (FileNotFoundException e) { throw new RuntimeException(e); } byte[] buffer = new byte[1 << 10]; if (step == -1) step = fjp.getParallelism() * 32; while (iter.hasNext()) { ent = iter.next(); long startposition = currentIdx; long nexposition = ent.getValue(); int currentScanNo = nextScanNo; nextScanNo = ent.getKey(); currentIdx = nexposition; if (IncludedScans.get(currentScanNo)) { try { final int bufsize = (int) (nexposition - startposition); if (buffer.length < bufsize) buffer = new byte[Math.max(bufsize, buffer.length << 1)]; // byte[] buffer = new byte[bufsize]; // RandomAccessFile fileHandler = new RandomAccessFile(filename, "r"); fileHandler.seek(startposition); fileHandler.read(buffer, 0, bufsize); // fileHandler.close(); // String xmltext = new String(buffer); String xmltext = new String(buffer, 0, bufsize, StandardCharsets.ISO_8859_1); if (ent.getKey() == Integer.MAX_VALUE) { xmltext = xmltext.replaceAll("</msRun>", ""); } boolean ReadPeak = true; final MzXMLthreadUnit unit = new MzXMLthreadUnit(xmltext, parameter, datatype, ReadPeak); futures.add(fjp.submit(unit)); ScanList.add(unit); if ((ScanList.size() % step) == 0) { futures.get(futures.size() - step).get(); if (iter.hasNext() && fjp.getActiveThreadCount() < fjp.getParallelism()) { step *= 2; // System.out.println("MzXMLthreadUnit: fjp.getActiveThreadCount()\t" + fjp.getActiveThreadCount()+"\t"+step); } } } catch (Exception ex) { Logger.getRootLogger().error(ExceptionUtils.getStackTrace(ex)); } } } try { fileHandler.close(); } catch (IOException ex) { throw new RuntimeException(ex); } fjp.shutdown(); try { fjp.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException ex) { throw new RuntimeException(ex); } // for (MzXMLthreadUnit unit : ScanList) { // executorPool.execute(unit); // } // executorPool.shutdown(); // // try { // executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); // } catch (InterruptedException e) { // Logger.getRootLogger().info("interrupted.."); // } return ScanList; }
From source file:MSUmpire.PeptidePeakClusterDetection.PDHandlerBase.java
protected void FindAllMzTracePeakCurves(ScanCollection scanCollection) throws IOException { // final HashSet<String> IncludedHashMap = new HashSet<>(); // Logger.getRootLogger().info("Processing all scans to detect possible m/z peak curves...."); Logger.getRootLogger().info("Processing all scans to detect possible m/z peak curves and"); Logger.getRootLogger().info("Smoothing detected signals......"); float preRT = 0f; //Loop for each scan in the ScanCollection final ArrayList<ForkJoinTask<ArrayList<PeakCurve>>> ftemp = new ArrayList<>(); final ForkJoinPool fjp = new ForkJoinPool(NoCPUs); final int idx_end = scanCollection.GetScanNoArray(MSlevel).size(); final int[] ia = new int[idx_end + 1]; ia[0] = 0;// ww w.ja v a2 s.c o m for (int idx = 0; idx < idx_end; idx++) { final int scanNO = scanCollection.GetScanNoArray(MSlevel).get(idx); final ScanData sd = scanCollection.GetScan(scanNO); ia[idx + 1] = sd.Data.size() + ia[idx]; } final boolean[] included = new boolean[ia[ia.length - 1]]; if (step == -1) step = fjp.getParallelism() * 32; long peakCurvesCount = 0; for (int idx = 0; idx < idx_end; idx++) { int scanNO = scanCollection.GetScanNoArray(MSlevel).get(idx); ScanData scanData = scanCollection.GetScan(scanNO); //If we are doing targeted peak detection and the RT of current scan is not in the range of targeted list, jump to the next scan if (TargetedOnly && !FoundInInclusionRTList(scanData.RetentionTime)) { continue; } if (idx == 0) { preRT = scanData.RetentionTime - 0.01f; } for (int i = 0; i < scanData.PointCount(); i++) { XYData peak = scanData.Data.get(i); //If we are doing targeted peak detection and the RT and m/z of current peak is not in the range of targeted list, jump to the next peak if (TargetedOnly && !FoundInInclusionMZList(scanData.RetentionTime, peak.getX())) { continue; } if (peak.getX() < parameter.MinMZ) { continue; } //Check if the current peak has been included in previously developed peak curves // if (!IncludedHashMap.contains(scanNO + "_" + peak.getX())) {//The peak hasn't been included final int id_scanNO_peak = int_id(ia, idx, i); if (!included[id_scanNO_peak]) {//The peak hasn't been included //The current peak will be the starting peak of a new peak curve //Add it to the hash table // IncludedHashMap.add(scanNO + "_" + peak.getX()); included[id_scanNO_peak] = true; float startmz = peak.getX(); float startint = peak.getY(); //Find the maximum peak within PPM window as the starting peak for (int j = i + 1; j < scanData.PointCount(); j++) { XYData currentpeak = scanData.Data.get(j); final int id_scanNO_currentpeak = int_id(ia, idx, j); if (!included[id_scanNO_currentpeak]) { // if (!IncludedHashMap.contains(scanNO + "_" + currentpeak.getX())) { if (InstrumentParameter.CalcPPM(currentpeak.getX(), startmz) <= PPM) { included[id_scanNO_currentpeak] = true; // IncludedHashMap.add(scanNO + "_" + currentpeak.getX()); if (currentpeak.getY() >= startint) { startmz = currentpeak.getX(); startint = currentpeak.getY(); } } else { break; } } } //Initialize a new peak curve PeakCurve Peakcurve = new PeakCurve(parameter); //Add a background peak Peakcurve.AddPeak(preRT, startmz, scanData.background); //Add the starting peak Peakcurve.AddPeak(scanData.RetentionTime, startmz, startint); Peakcurve.StartScan = scanNO; int missedScan = 0; float endrt = scanData.RetentionTime; int endScan = scanData.ScanNum; float bk = 0f; //Starting from the next scan, find the following peaks given the starting peak for (int idx2 = idx + 1; idx2 < scanCollection.GetScanNoArray(MSlevel).size() && (missedScan < parameter.NoMissedScan /*|| (TargetedOnly && Peakcurve.RTWidth()<parameter.MaxCurveRTRange)*/); idx2++) { int scanNO2 = scanCollection.GetScanNoArray(MSlevel).get(idx2); ScanData scanData2 = scanCollection.GetScan(scanNO2); endrt = scanData2.RetentionTime; endScan = scanData2.ScanNum; bk = scanData2.background; float currentmz = 0f; float currentint = 0f; //If the scan is empty if (scanData2.PointCount() == 0) { if (parameter.FillGapByBK) { Peakcurve.AddPeak(scanData2.RetentionTime, Peakcurve.TargetMz, scanData2.background); } missedScan++; continue; } //Find the m/z index int mzidx = scanData2.GetLowerIndexOfX(Peakcurve.TargetMz); for (int pkidx = mzidx; pkidx < scanData2.Data.size(); pkidx++) { XYData currentpeak = scanData2.Data.get(pkidx); if (currentpeak.getX() < parameter.MinMZ) { continue; } //Check if the peak has been included or not final int int_id_scanNO2_currentpeak = int_id(ia, idx2, pkidx); // if (!included.get(int_id_scanNO2_currentpeak)) { if (!included[int_id_scanNO2_currentpeak]) { if (InstrumentParameter.CalcPPM(currentpeak.getX(), Peakcurve.TargetMz) > PPM) { if (currentpeak.getX() > Peakcurve.TargetMz) { break; } } else { //////////The peak is in the ppm window, select the highest peak included[int_id_scanNO2_currentpeak] = true; // IncludedHashMap.add(scanNO2 + "_" + currentpeak.getX()); if (currentint < currentpeak.getY()) { currentmz = currentpeak.getX(); currentint = currentpeak.getY(); } } } } //No peak in the PPM window has been found if (currentmz == 0f) { if (parameter.FillGapByBK) { Peakcurve.AddPeak(scanData2.RetentionTime, Peakcurve.TargetMz, scanData2.background); } missedScan++; } else { missedScan = 0; Peakcurve.AddPeak(scanData2.RetentionTime, currentmz, currentint); } } Peakcurve.AddPeak(endrt, Peakcurve.TargetMz, bk); Peakcurve.EndScan = endScan; //First check if the peak curve is in targeted list if (FoundInInclusionList(Peakcurve.TargetMz, Peakcurve.StartRT(), Peakcurve.EndRT())) { // LCMSPeakBase.UnSortedPeakCurves.add(Peakcurve); ++peakCurvesCount; ftemp.add(fjp.submit(new PeakCurveSmoothingUnit(Peakcurve, parameter))); //Then check if the peak curve passes the criteria } else if (Peakcurve.GetRawSNR() > LCMSPeakBase.SNR && Peakcurve.GetPeakList().size() >= parameter.MinPeakPerPeakCurve + 2) { // LCMSPeakBase.UnSortedPeakCurves.add(Peakcurve); ++peakCurvesCount; ftemp.add(fjp.submit(new PeakCurveSmoothingUnit(Peakcurve, parameter))); } else { Peakcurve = null; } } } preRT = scanData.RetentionTime; if (ReleaseScans) { scanData.dispose(); } /** the if statement below does PeakCurveSmoothing() and ClearRawPeaks() */ final boolean last_iter = idx + 1 == idx_end; if (ftemp.size() == step || last_iter) { final List<ForkJoinTask<ArrayList<PeakCurve>>> ftemp_sublist_view = last_iter ? ftemp : ftemp.subList(0, step / 2); for (final Future<ArrayList<PeakCurve>> f : ftemp_sublist_view) { try { LCMSPeakBase.UnSortedPeakCurves.addAll(f.get()); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } ftemp_sublist_view.clear(); if (!last_iter && fjp.getActiveThreadCount() < fjp.getParallelism()) { // System.out.println("PeakCurveSmoothingUnit: fjp.getActiveThreadCount()\t"+fjp.getActiveThreadCount()+"\t"+step); step *= 2; } } } assert ftemp.isEmpty(); //System.out.print("PSM removed (PeakCurve generation):" + PSMRemoved ); int i = 1; //Assign peak curve index for (PeakCurve peakCurve : LCMSPeakBase.UnSortedPeakCurves) { peakCurve.Index = i++; } System.gc(); // Logger.getRootLogger().info(LCMSPeakBase.UnSortedPeakCurves.size() + " Peak curves found (Memory usage:" + Math.round((Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1048576) + "MB)"); Logger.getRootLogger() .info(peakCurvesCount + " Peak curves found (Memory usage:" + Math.round( (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1048576) + "MB)"); }
From source file:org.apache.metron.stellar.dsl.functions.HashFunctionsTest.java
@Test public void tlsh_multithread() throws Exception { //we want to ensure that everything is threadsafe, so we'll spin up some random data //generate some hashes and then do it all in parallel and make sure it all matches. Map<Map.Entry<byte[], Map<String, Object>>, String> hashes = new HashMap<>(); Random r = new Random(0); for (int i = 0; i < 20; ++i) { byte[] d = new byte[256]; r.nextBytes(d);//from ww w . ja va 2s. co m Map<String, Object> config = new HashMap<String, Object>() { { put(TLSHHasher.Config.BUCKET_SIZE.key, r.nextBoolean() ? 128 : 256); put(TLSHHasher.Config.CHECKSUM.key, r.nextBoolean() ? 1 : 3); } }; String hash = (String) run("HASH(data, 'tlsh', config)", ImmutableMap.of("config", config, "data", d)); Assert.assertNotNull(hash); hashes.put(new AbstractMap.SimpleEntry<>(d, config), hash); } ForkJoinPool forkJoinPool = new ForkJoinPool(5); forkJoinPool.submit(() -> hashes.entrySet().parallelStream().forEach(kv -> { Map<String, Object> config = kv.getKey().getValue(); byte[] data = kv.getKey().getKey(); String hash = (String) run("HASH(data, 'tlsh', config)", ImmutableMap.of("config", config, "data", data)); Assert.assertEquals(hash, kv.getValue()); })); }
From source file:org.csanchez.jenkins.plugins.kubernetes.KubernetesTestUtil.java
/** * Delete pods with matching labels/*from w w w . j a v a 2 s .c o m*/ * * @param client * @param labels * @param wait * wait some time for pods to finish * @return whether any pod was deleted * @throws Exception */ public static boolean deletePods(KubernetesClient client, Map<String, String> labels, boolean wait) throws Exception { if (client != null) { // wait for 90 seconds for all pods to be terminated if (wait) { LOGGER.log(INFO, "Waiting for pods to terminate"); ForkJoinPool forkJoinPool = new ForkJoinPool(1); try { forkJoinPool.submit(() -> IntStream.range(1, 1_000_000).anyMatch(i -> { try { FilterWatchListDeletable<Pod, PodList, Boolean, Watch, Watcher<Pod>> pods = client .pods().withLabels(labels); LOGGER.log(INFO, "Still waiting for pods to terminate: {0}", print(pods)); boolean allTerminated = pods.list().getItems().isEmpty(); if (allTerminated) { LOGGER.log(INFO, "All pods are terminated: {0}", print(pods)); } else { LOGGER.log(INFO, "Still waiting for pods to terminate: {0}", print(pods)); Thread.sleep(5000); } return allTerminated; } catch (InterruptedException e) { LOGGER.log(INFO, "Waiting for pods to terminate - interrupted"); return true; } })).get(90, TimeUnit.SECONDS); } catch (TimeoutException e) { LOGGER.log(INFO, "Waiting for pods to terminate - timed out"); // job not done in interval } } FilterWatchListDeletable<Pod, PodList, Boolean, Watch, Watcher<Pod>> pods = client.pods() .withLabels(labels); if (!pods.list().getItems().isEmpty()) { LOGGER.log(WARNING, "Deleting leftover pods: {0}", print(pods)); if (Boolean.TRUE.equals(pods.delete())) { return true; } } } return false; }
From source file:org.diorite.impl.world.tick.TickGroups.java
@Override public synchronized void doTick(final int tps) { if (this.groups.isEmpty()) { if (!CoreMain.isClient()) { SpammyError.err(/*ww w. j ava2 s . c o m*/ "There is no tick groups, server don't have anything to do. Do you have any worlds?", 10, key); } return; } if (this.groups.size() == 1) { /** * TODO count time of execution and split if needed. */ try { this.groups.iterator().next().doTick(tps); } catch (final Throwable throwable) { throwable.printStackTrace(); } return; } final AtomicInteger i = new AtomicInteger(0); final ForkJoinPool pool = new ForkJoinPool(this.groups.size(), p -> new NamedForkJoinWorkerThread(p, i.getAndIncrement()), (t, e) -> { // TODO: maybe add some pretty error priting System.err.println("Error in tick thread: " + t.getName()); e.printStackTrace(); }, false); /** * TODO count time of execution for all groups. * if any group is creating lags, try split it. (should not count single-time lags?) * if two grups can be join, try join them. */ final CountDownLatch latch = new CountDownLatch(this.groups.size()); for (final Iterator<TickGroupImpl> it = this.groups.iterator(); it.hasNext();) { final TickGroupImpl tickGroup = it.next(); if (tickGroup.isEmpty()) { it.remove(); latch.countDown(); continue; } pool.submit(() -> { try { tickGroup.doTick(tps); this.core.runScheduler(false); this.core.runSync(); } finally { latch.countDown(); } }); } try { latch.await(); } catch (final InterruptedException e) { e.printStackTrace(); } }
From source file:org.diorite.utils.concurrent.ParallelUtils.java
public static void realParallelStream(final Runnable streamAction, final int parallelism, final boolean await) { final ForkJoinPool pool = new ForkJoinPool(parallelism); if (await) {/* ww w . ja v a 2 s .c o m*/ pool.invoke(createSimpleTask(streamAction)); } else { pool.submit(streamAction); } }
From source file:org.diorite.utils.concurrent.ParallelUtils.java
public static void realParallelStream(final Runnable streamAction, final int parallelism, final boolean await, final String name) { final ForkJoinPool pool = new ForkJoinPool(parallelism, new NamedForkJoinWorkerFactory(name), null, false); if (await) {/*from w w w . ja va 2s. c om*/ pool.invoke(createSimpleTask(streamAction)); } else { pool.submit(streamAction); } }