Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:edu.lternet.pasta.dml.download.DocumentHandler.java

public InputStream downloadDocument() throws Exception {

    log.info("starting the download");

    boolean success = true;

    final String id = docId;
    final EcogridEndPointInterface endpoint = ecogridEndPointInterface;

    ExecutorService service = Executors.newSingleThreadExecutor();
    service.execute(new Runnable() {
        public void run() {
            long startTime = System.currentTimeMillis();

            //get from the ecogrid
            try {
                if (ecogridEndPointInterface instanceof AuthenticatedEcogridEndPointInterface) {
                    AuthenticatedQueryServiceGetToStreamClient authenticatedEcogridClient = new AuthenticatedQueryServiceGetToStreamClient(
                            new URL(((AuthenticatedEcogridEndPointInterface) endpoint)
                                    .getMetacatAuthenticatedEcogridEndPoint()));
                    authenticatedEcogridClient.get(id,
                            ((AuthenticatedEcogridEndPointInterface) endpoint).getSessionId(), outputStream);
                } else {
                    QueryServiceGetToStreamClient ecogridClient = new QueryServiceGetToStreamClient(
                            new URL(endpoint.getMetacatEcogridEndPoint()));
                    ecogridClient.get(id, outputStream);
                }//ww  w  .j  av a  2s .  c o m
                outputStream.close();
                long endTime = System.currentTimeMillis();
                log.debug((endTime - startTime) + " ms to download: " + docId);
                log.debug("Done downloading id=" + id);

            } catch (Exception e) {
                log.error("Error getting document from ecogrid: " + e.getMessage());
                e.printStackTrace();
            }

        }
    });

    //wait for the download to complete
    service.shutdown();
    service.awaitTermination(0, TimeUnit.SECONDS);

    return inputStream;
}

From source file:org.orekit.utils.GenericTimeStampedCacheTest.java

private int checkDatesMultiThread(final List<AbsoluteDate> centralDates,
        final GenericTimeStampedCache<AbsoluteDate> cache, final int threadPoolSize)
        throws TimeStampedCacheException {

    final int n = cache.getNeighborsSize();
    final double step = ((Generator) cache.getGenerator()).getStep();
    final AtomicReference<AbsoluteDate[]> failedDates = new AtomicReference<AbsoluteDate[]>();
    final AtomicReference<TimeStampedCacheException> caught = new AtomicReference<TimeStampedCacheException>();
    ExecutorService executorService = Executors.newFixedThreadPool(threadPoolSize);

    for (final AbsoluteDate central : centralDates) {
        executorService.execute(new Runnable() {
            public void run() {
                try {
                    final List<AbsoluteDate> neighbors = cache.getNeighbors(central);
                    Assert.assertEquals(n, neighbors.size());
                    for (final AbsoluteDate date : neighbors) {
                        if (date.durationFrom(central) < -(n + 1) * step
                                || date.durationFrom(central) > n * step) {
                            AbsoluteDate[] dates = new AbsoluteDate[n + 1];
                            dates[0] = central;
                            System.arraycopy(neighbors, 0, dates, 1, n);
                            failedDates.set(dates);
                        }/*from w w w  .j  a va  2 s  .c  o m*/
                    }
                } catch (TimeStampedCacheException tce) {
                    caught.set(tce);
                }
            }
        });
    }

    try {
        executorService.shutdown();
        Assert.assertTrue("Not enough time for all threads to complete, try increasing the timeout",
                executorService.awaitTermination(10, TimeUnit.MINUTES));
    } catch (InterruptedException ie) {
        Assert.fail(ie.getLocalizedMessage());
    }

    if (caught.get() != null) {
        throw caught.get();
    }

    if (failedDates.get() != null) {
        AbsoluteDate[] dates = failedDates.get();
        StringBuilder builder = new StringBuilder();
        String eol = System.getProperty("line.separator");
        builder.append("central = ").append(dates[0]).append(eol);
        builder.append("step = ").append(step).append(eol);
        builder.append("neighbors =").append(eol);
        for (int i = 1; i < dates.length; ++i) {
            builder.append("    ").append(dates[i]).append(eol);
        }
        Assert.fail(builder.toString());
    }

    return centralDates.size();

}

From source file:org.deeplearning4j.models.word2vec.Word2Vec.java

/**
 * Train the model//from  w ww .j a  va  2 s  . co  m
 */
public void fit() throws IOException {
    boolean loaded = buildVocab();
    //save vocab after building
    if (!loaded && saveVocab)
        vocab().saveVocab();
    if (stopWords == null)
        readStopWords();

    log.info("Training word2vec multithreaded");

    if (sentenceIter != null)
        sentenceIter.reset();
    if (docIter != null)
        docIter.reset();

    int[] docs = vectorizer.index().allDocs();

    if (docs.length < 1) {
        vectorizer.fit();
    }

    docs = vectorizer.index().allDocs();
    if (docs.length < 1) {
        throw new IllegalStateException("No documents found");
    }

    totalWords = vectorizer.numWordsEncountered();
    if (totalWords < 1)
        throw new IllegalStateException("Unable to train, total words less than 1");

    totalWords *= numIterations;

    log.info("Processing sentences...");

    AtomicLong numWordsSoFar = new AtomicLong(0);
    final AtomicLong nextRandom = new AtomicLong(5);
    ExecutorService exec = new ThreadPoolExecutor(Runtime.getRuntime().availableProcessors(),
            Runtime.getRuntime().availableProcessors(), 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(), new RejectedExecutionHandler() {
                @Override
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                    }
                    executor.submit(r);
                }
            });

    final Queue<List<VocabWord>> batch2 = new ConcurrentLinkedDeque<>();
    vectorizer.index().eachDoc(new Function<List<VocabWord>, Void>() {
        @Override
        public Void apply(List<VocabWord> input) {
            List<VocabWord> batch = new ArrayList<>();
            addWords(input, nextRandom, batch);
            if (!batch.isEmpty()) {
                batch2.add(batch);
            }

            return null;
        }
    }, exec);

    exec.shutdown();
    try {
        exec.awaitTermination(1, TimeUnit.DAYS);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    ActorSystem actorSystem = ActorSystem.create();

    for (int i = 0; i < numIterations; i++)
        doIteration(batch2, numWordsSoFar, nextRandom, actorSystem);
    actorSystem.shutdown();

}

From source file:kafka.deploy.utils.command.CommandRemoteOperation.java

/**
 * Executes the given commands in parallel on the remote hosts and
 * aggregates the results for the caller.
 * /*w w w.  ja v  a 2 s . com*/
 * @param hostNameCommandLineMap Map with a key is the external host name
 *        and the value is the command line to execute remotely on that host
 * 
 * @return List of result types as dictated by the subclass
 * 
 * @throws RemoteOperationException Thrown on error invoking the command on
 *         one or more clients.
 */
protected void execute(Map<String, String> hostNameCommandLineMap) throws RemoteOperationException {

    ExecutorService threadPool = Executors.newFixedThreadPool(hostNameCommandLineMap.size());
    List<Future<?>> futures = new ArrayList<Future<?>>();

    for (Map.Entry<String, String> entry : hostNameCommandLineMap.entrySet()) {
        String hostName = entry.getKey();
        String commandLine = entry.getValue();

        if (logger.isDebugEnabled())
            logger.debug("Command to execute: " + commandLine);

        List<String> commandArgs = parse(commandLine);
        UnixCommand command = new UnixCommand(hostName, commandArgs);
        Callable<?> callable = getCallable(command);
        Future<?> future = threadPool.submit(callable);
        futures.add(future);
    }

    // Build up a list of all the results and/or capture the errors as they
    // occur.
    try {
        StringBuilder errors = new StringBuilder();

        for (Future<?> future : futures) {
            Throwable t = null;

            try {
                future.get();
            } catch (ExecutionException ex) {
                t = ex.getCause();
            } catch (Exception e) {
                t = e;
            }

            if (t != null) {
                if (logger.isWarnEnabled())
                    logger.warn(t, t);

                if (errors.length() > 0)
                    errors.append("; ");

                errors.append(t.getMessage());
            }
        }

        if (errors.length() > 0)
            throw new RemoteOperationException(errors.toString());
    } finally {
        threadPool.shutdown();

        try {
            threadPool.awaitTermination(60, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            if (logger.isWarnEnabled())
                logger.warn(e, e);
        }
    }
}

From source file:org.elasticsearch.client.sniff.SnifferTests.java

/**
 * Test behaviour when a bunch of onFailure sniffing rounds are triggered in parallel. Each run will always
 * schedule a subsequent afterFailure round. Also, for each onFailure round that starts, the net scheduled round
 * (either afterFailure or ordinary) gets cancelled.
 *///from   w  w w .  j a  va2s  .  c  om
public void testSniffOnFailure() throws Exception {
    RestClient restClient = mock(RestClient.class);
    CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
    final AtomicBoolean initializing = new AtomicBoolean(true);
    final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
    final long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
    int minNumOnFailureRounds = randomIntBetween(5, 10);
    final CountDownLatch initializingLatch = new CountDownLatch(1);
    final Set<Sniffer.ScheduledTask> ordinaryRoundsTasks = new CopyOnWriteArraySet<>();
    final AtomicReference<Future<?>> initializingFuture = new AtomicReference<>();
    final Set<Sniffer.ScheduledTask> onFailureTasks = new CopyOnWriteArraySet<>();
    final Set<Sniffer.ScheduledTask> afterFailureTasks = new CopyOnWriteArraySet<>();
    final AtomicBoolean onFailureCompleted = new AtomicBoolean(false);
    final CountDownLatch completionLatch = new CountDownLatch(1);
    final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
    try {
        Scheduler scheduler = new Scheduler() {
            @Override
            public Future<?> schedule(final Sniffer.Task task, long delayMillis) {
                if (initializing.compareAndSet(true, false)) {
                    assertEquals(0L, delayMillis);
                    Future<?> future = executor.submit(new Runnable() {
                        @Override
                        public void run() {
                            try {
                                task.run();
                            } finally {
                                //we need to make sure that the sniffer is initialized, so the sniffOnFailure
                                //call does what it needs to do. Otherwise nothing happens until initialized.
                                initializingLatch.countDown();
                            }
                        }
                    });
                    assertTrue(initializingFuture.compareAndSet(null, future));
                    return future;
                }
                if (delayMillis == 0L) {
                    Future<?> future = executor.submit(task);
                    onFailureTasks.add(new Sniffer.ScheduledTask(task, future));
                    return future;
                }
                if (delayMillis == sniffAfterFailureDelay) {
                    Future<?> future = scheduleOrSubmit(task);
                    afterFailureTasks.add(new Sniffer.ScheduledTask(task, future));
                    return future;
                }

                assertEquals(sniffInterval, delayMillis);
                assertEquals(sniffInterval, task.nextTaskDelay);

                if (onFailureCompleted.get() && onFailureTasks.size() == afterFailureTasks.size()) {
                    completionLatch.countDown();
                    return mock(Future.class);
                }

                Future<?> future = scheduleOrSubmit(task);
                ordinaryRoundsTasks.add(new Sniffer.ScheduledTask(task, future));
                return future;
            }

            private Future<?> scheduleOrSubmit(Sniffer.Task task) {
                if (randomBoolean()) {
                    return executor.schedule(task, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS);
                } else {
                    return executor.submit(task);
                }
            }

            @Override
            public void shutdown() {
            }
        };
        final Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval,
                sniffAfterFailureDelay);
        assertTrue("timeout waiting for sniffer to get initialized",
                initializingLatch.await(1000, TimeUnit.MILLISECONDS));

        ExecutorService onFailureExecutor = Executors.newFixedThreadPool(randomIntBetween(5, 20));
        Set<Future<?>> onFailureFutures = new CopyOnWriteArraySet<>();
        try {
            //with tasks executing quickly one after each other, it is very likely that the onFailure round gets skipped
            //as another round is already running. We retry till enough runs get through as that's what we want to test.
            while (onFailureTasks.size() < minNumOnFailureRounds) {
                onFailureFutures.add(onFailureExecutor.submit(new Runnable() {
                    @Override
                    public void run() {
                        sniffer.sniffOnFailure();
                    }
                }));
            }
            assertThat(onFailureFutures.size(), greaterThanOrEqualTo(minNumOnFailureRounds));
            for (Future<?> onFailureFuture : onFailureFutures) {
                assertNull(onFailureFuture.get());
            }
            onFailureCompleted.set(true);
        } finally {
            onFailureExecutor.shutdown();
            onFailureExecutor.awaitTermination(1000, TimeUnit.MILLISECONDS);
        }

        assertFalse(initializingFuture.get().isCancelled());
        assertTrue(initializingFuture.get().isDone());
        assertNull(initializingFuture.get().get());

        assertTrue("timeout waiting for sniffing rounds to be completed",
                completionLatch.await(1000, TimeUnit.MILLISECONDS));
        assertThat(onFailureTasks.size(), greaterThanOrEqualTo(minNumOnFailureRounds));
        assertEquals(onFailureTasks.size(), afterFailureTasks.size());

        for (Sniffer.ScheduledTask onFailureTask : onFailureTasks) {
            assertFalse(onFailureTask.future.isCancelled());
            assertTrue(onFailureTask.future.isDone());
            assertNull(onFailureTask.future.get());
            assertTrue(onFailureTask.task.hasStarted());
            assertFalse(onFailureTask.task.isSkipped());
        }

        int cancelledTasks = 0;
        int completedTasks = onFailureTasks.size() + 1;
        for (Sniffer.ScheduledTask afterFailureTask : afterFailureTasks) {
            if (assertTaskCancelledOrCompleted(afterFailureTask)) {
                completedTasks++;
            } else {
                cancelledTasks++;
            }
        }

        assertThat(ordinaryRoundsTasks.size(), greaterThan(0));
        for (Sniffer.ScheduledTask task : ordinaryRoundsTasks) {
            if (assertTaskCancelledOrCompleted(task)) {
                completedTasks++;
            } else {
                cancelledTasks++;
            }
        }
        assertEquals(onFailureTasks.size(), cancelledTasks);

        assertEquals(completedTasks, hostsSniffer.runs.get());
        int setHostsRuns = hostsSniffer.runs.get() - hostsSniffer.failures.get() - hostsSniffer.emptyList.get();
        verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg());
        verifyNoMoreInteractions(restClient);
    } finally {
        executor.shutdown();
        executor.awaitTermination(1000L, TimeUnit.MILLISECONDS);
    }
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java

@Test
public void shouldAllowVariableReuseAcrossThreads() throws Exception {
    final ExecutorService service = Executors.newFixedThreadPool(8, testingThreadFactory);
    final GremlinExecutor gremlinExecutor = GremlinExecutor.build().create();

    final AtomicBoolean failed = new AtomicBoolean(false);
    final int max = 512;
    final List<Pair<Integer, List<Integer>>> futures = Collections.synchronizedList(new ArrayList<>(max));
    IntStream.range(0, max).forEach(i -> {
        final int yValue = i * 2;
        final Bindings b = new SimpleBindings();
        b.put("x", i);
        b.put("y", yValue);
        final int zValue = i * -1;

        final String script = "z=" + zValue + ";[x,y,z]";
        try {/*from www  .  j  ava 2s. co m*/
            service.submit(() -> {
                try {
                    final List<Integer> result = (List<Integer>) gremlinExecutor.eval(script, b).get();
                    futures.add(Pair.with(i, result));
                } catch (Exception ex) {
                    failed.set(true);
                }
            });
        } catch (Exception ex) {
            throw new RuntimeException(ex);
        }
    });

    service.shutdown();
    assertThat(service.awaitTermination(60000, TimeUnit.MILLISECONDS), is(true));

    // likely a concurrency exception if it occurs - and if it does then we've messed up because that's what this
    // test is partially designed to protected against.
    assertThat(failed.get(), is(false));

    assertEquals(max, futures.size());
    futures.forEach(t -> {
        assertEquals(t.getValue0(), t.getValue1().get(0));
        assertEquals(t.getValue0() * 2, t.getValue1().get(1).intValue());
        assertEquals(t.getValue0() * -1, t.getValue1().get(2).intValue());
    });
}

From source file:org.apache.hadoop.yarn.util.TestFSDownload.java

private void downloadWithFileType(TEST_FILE_TYPE fileType)
        throws IOException, URISyntaxException, InterruptedException {
    Configuration conf = new Configuration();
    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
    FileContext files = FileContext.getLocalFSFileContext(conf);
    final Path basedir = files.makeQualified(new Path("target", TestFSDownload.class.getSimpleName()));
    files.mkdir(basedir, null, true);//from   w w  w.  j a  v a  2 s. com
    conf.setStrings(TestFSDownload.class.getName(), basedir.toString());

    Random rand = new Random();
    long sharedSeed = rand.nextLong();
    rand.setSeed(sharedSeed);
    System.out.println("SEED: " + sharedSeed);

    Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>();
    ExecutorService exec = Executors.newSingleThreadExecutor();
    LocalDirAllocator dirs = new LocalDirAllocator(TestFSDownload.class.getName());

    int size = rand.nextInt(512) + 512;
    LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
    Path p = new Path(basedir, "" + 1);
    String strFileName = "";
    LocalResource rsrc = null;
    switch (fileType) {
    case TAR:
        rsrc = createTarFile(files, p, size, rand, vis);
        break;
    case JAR:
        rsrc = createJarFile(files, p, size, rand, vis);
        rsrc.setType(LocalResourceType.PATTERN);
        break;
    case ZIP:
        rsrc = createZipFile(files, p, size, rand, vis);
        strFileName = p.getName() + ".ZIP";
        break;
    case TGZ:
        rsrc = createTgzFile(files, p, size, rand, vis);
        break;
    }
    Path destPath = dirs.getLocalPathForWrite(basedir.toString(), size, conf);
    destPath = new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet()));
    FSDownload fsd = new FSDownload(files, UserGroupInformation.getCurrentUser(), conf, destPath, rsrc);
    pending.put(rsrc, exec.submit(fsd));
    exec.shutdown();
    while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS))
        ;
    try {
        pending.get(rsrc).get(); // see if there was an Exception during download
        FileStatus[] filesstatus = files.getDefaultFileSystem().listStatus(basedir);
        for (FileStatus filestatus : filesstatus) {
            if (filestatus.isDirectory()) {
                FileStatus[] childFiles = files.getDefaultFileSystem().listStatus(filestatus.getPath());
                for (FileStatus childfile : childFiles) {
                    if (strFileName.endsWith(".ZIP") && childfile.getPath().getName().equals(strFileName)
                            && !childfile.isDirectory()) {
                        Assert.fail("Failure...After unzip, there should have been a"
                                + " directory formed with zip file name but found a file. "
                                + childfile.getPath());
                    }
                    if (childfile.getPath().getName().startsWith("tmp")) {
                        Assert.fail("Tmp File should not have been there " + childfile.getPath());
                    }
                }
            }
        }
    } catch (Exception e) {
        throw new IOException("Failed exec", e);
    }
}

From source file:MSUmpire.LCMSPeakStructure.LCMSPeakDIAMS2.java

private void PrepareMGF_MS1Cluster(LCMSPeakMS1 ms1lcms) throws IOException {

    ArrayList<PseudoMSMSProcessing> ScanList = new ArrayList<>();
    ExecutorService executorPool = Executors.newFixedThreadPool(NoCPUs);
    for (PeakCluster ms1cluster : ms1lcms.PeakClusters) {
        final ArrayList<PrecursorFragmentPairEdge> frags = FragmentsClu2Cur.get(ms1cluster.Index);
        if (frags != null && DIA_MZ_Range.getX() <= ms1cluster.GetMaxMz()
                && DIA_MZ_Range.getY() >= ms1cluster.TargetMz()) {
            //            if (DIA_MZ_Range.getX() <= ms1cluster.GetMaxMz() && DIA_MZ_Range.getY() >= ms1cluster.TargetMz() && FragmentsClu2Cur.containsKey(ms1cluster.Index)) {
            //                ArrayList<PrecursorFragmentPairEdge> frags = FragmentsClu2Cur.get(ms1cluster.Index);
            ms1cluster.GroupedFragmentPeaks.addAll(frags);
            if (Last_MZ_Range == null || Last_MZ_Range.getY() < ms1cluster.TargetMz()) {
                PseudoMSMSProcessing mSMSProcessing = new PseudoMSMSProcessing(ms1cluster, parameter);
                ScanList.add(mSMSProcessing);
            }//from  w ww . j  a  v a  2s .  c o m
        }
    }

    for (PseudoMSMSProcessing proc : ScanList) {
        executorPool.execute(proc);
    }
    executorPool.shutdown();

    try {
        executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    } catch (InterruptedException e) {
        Logger.getRootLogger().info("interrupted..");
    }

    String mgffile = FilenameUtils.getFullPath(ParentmzXMLName) + GetQ1Name() + ".mgf.temp";
    String mgffile2 = FilenameUtils.getFullPath(ParentmzXMLName) + GetQ2Name() + ".mgf.temp";
    //        FileWriter mapwriter = new FileWriter(FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName) + ".ScanClusterMapping_Q1", true);
    //        FileWriter mapwriter2 = new FileWriter(FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName) + ".ScanClusterMapping_Q2", true);

    //        FileWriter mgfWriter = new FileWriter(mgffile, true);
    //        FileWriter mgfWriter2 = new FileWriter(mgffile2, true);
    final BufferedWriter mapwriter = DIAPack.get_file(DIAPack.OutputFile.ScanClusterMapping_Q1,
            FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName)
                    + ".ScanClusterMapping_Q1"),
            mapwriter2 = DIAPack.get_file(DIAPack.OutputFile.ScanClusterMapping_Q2,
                    FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName)
                            + ".ScanClusterMapping_Q2"),
            mgfWriter = DIAPack.get_file(DIAPack.OutputFile.Mgf_Q1, mgffile),
            mgfWriter2 = DIAPack.get_file(DIAPack.OutputFile.Mgf_Q2, mgffile2);

    for (PseudoMSMSProcessing mSMSProcessing : ScanList) {
        if (MatchedFragmentMap.size() > 0) {
            mSMSProcessing.RemoveMatchedFrag(MatchedFragmentMap);
        }

        XYPointCollection Scan = mSMSProcessing.GetScan();

        if (Scan != null && Scan.PointCount() > parameter.MinFrag) {
            //                StringBuilder mgfString = new StringBuilder();

            if (mSMSProcessing.Precursorcluster.IsotopeComplete(3)) {
                final BufferedWriter mgfString = mgfWriter;
                parentDIA.Q1Scan++;
                mgfString.append("BEGIN IONS\n");
                mgfString.append("PEPMASS=").append(String.valueOf(mSMSProcessing.Precursorcluster.TargetMz()))
                        .append("\n");
                mgfString.append("CHARGE=").append(String.valueOf(mSMSProcessing.Precursorcluster.Charge))
                        .append("+\n");
                mgfString.append("RTINSECONDS=")
                        .append(String.valueOf(mSMSProcessing.Precursorcluster.PeakHeightRT[0] * 60f))
                        .append("\n");
                mgfString.append("TITLE=").append(GetQ1Name()).append(".")
                        .append(String.valueOf(parentDIA.Q1Scan)).append(".")
                        .append(String.valueOf(parentDIA.Q1Scan)).append(".")
                        .append(String.valueOf(mSMSProcessing.Precursorcluster.Charge)).append("\n");
                for (int i = 0; i < Scan.PointCount(); i++) {
                    mgfString.append(String.valueOf(Scan.Data.get(i).getX())).append(" ")
                            .append(String.valueOf(Scan.Data.get(i).getY())).append("\n");
                }
                mgfString.append("END IONS\n\n");
                mapwriter.write(parentDIA.Q1Scan + "_" + mSMSProcessing.Precursorcluster.Index + "\n");
                //                    mgfWriter.write(mgfString.toString());
                //} else if (mSMSProcessing.Precursorcluster.IsotopeComplete(2)) {
            } else {
                final BufferedWriter mgfString = mgfWriter2;
                parentDIA.Q2Scan++;
                mgfString.append("BEGIN IONS\n");
                mgfString.append("PEPMASS=").append(String.valueOf(mSMSProcessing.Precursorcluster.TargetMz()))
                        .append("\n");
                mgfString.append("CHARGE=").append(String.valueOf(mSMSProcessing.Precursorcluster.Charge))
                        .append("+\n");
                mgfString.append("RTINSECONDS=")
                        .append(String.valueOf(mSMSProcessing.Precursorcluster.PeakHeightRT[0] * 60f))
                        .append("\n");
                mgfString.append("TITLE=").append(GetQ2Name()).append(".")
                        .append(String.valueOf(parentDIA.Q2Scan)).append(".")
                        .append(String.valueOf(parentDIA.Q2Scan)).append(".")
                        .append(String.valueOf(mSMSProcessing.Precursorcluster.Charge)).append("\n");
                for (int i = 0; i < Scan.PointCount(); i++) {
                    mgfString.append(String.valueOf(Scan.Data.get(i).getX())).append(" ")
                            .append(String.valueOf(Scan.Data.get(i).getY())).append("\n");
                }
                mgfString.append("END IONS\n\n");
                mapwriter2.write(parentDIA.Q2Scan + "_" + mSMSProcessing.Precursorcluster.Index + "\n");
                //                    mgfWriter2.write(mgfString.toString());
            }
        }
        mSMSProcessing.Precursorcluster.GroupedFragmentPeaks.clear();
    }
    //        mgfWriter2.close();
    //        mgfWriter.close();
    //        mapwriter.close();
    //        mapwriter2.close();
}

From source file:com.google.cloud.hadoop.gcsio.GoogleCloudStorageIntegrationHelper.java

/**
 * Creates objects with the given names in the given bucket.
 *///from   ww w  .jav a 2 s  .  c  o  m
private void createObjects(final String bucketName, String[] objectNames) throws IOException {

    final ExecutorService threadPool = Executors.newCachedThreadPool();
    final CountDownLatch counter = new CountDownLatch(objectNames.length);
    List<Future<?>> futures = new ArrayList<>();
    // Do each creation asynchronously.
    for (final String objectName : objectNames) {
        Future<?> future = threadPool.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    if (objectName.endsWith(GoogleCloudStorage.PATH_DELIMITER)) {
                        mkdir(bucketName, objectName);
                    } else {
                        // Just use objectName as file contents.
                        writeTextFile(bucketName, objectName, objectName);
                    }
                } catch (Throwable ioe) {
                    throw new RuntimeException(
                            String.format("Exception creating %s/%s", bucketName, objectName), ioe);
                } finally {
                    counter.countDown();
                }
            }
        });
        futures.add(future);
    }

    try {
        counter.await();
    } catch (InterruptedException ie) {
        throw new IOException("Interrupted while awaiting object creation!", ie);
    } finally {
        threadPool.shutdown();
        try {
            if (!threadPool.awaitTermination(10L, TimeUnit.SECONDS)) {
                System.err.println("Failed to awaitTermination! Forcing executor shutdown.");
                threadPool.shutdownNow();
            }
        } catch (InterruptedException ie) {
            throw new IOException("Interrupted while shutting down threadpool!", ie);
        }
    }

    for (Future<?> future : futures) {
        try {
            // We should already be done.
            future.get(10, TimeUnit.MILLISECONDS);
        } catch (Exception e) {
            throw new IOException("Creation of file failed with exception", e);
        }
    }
}

From source file:org.apache.hadoop.mapred.TestJvmManager.java

/**
 * Create a bunch of tasks and use a special hash map to detect
 * racy access to the various internal data structures of JvmManager.
 * (Regression test for MAPREDUCE-2224)/*w  ww .  j av a 2 s . com*/
 */
@Test
public void testForRaces() throws Exception {
    JvmManagerForType mapJvmManager = jvmManager.getJvmManagerForType(TaskType.MAP);

    // Sub out the HashMaps for maps that will detect racy access.
    mapJvmManager.jvmToRunningTask = new RaceHashMap<JVMId, TaskRunner>();
    mapJvmManager.runningTaskToJvm = new RaceHashMap<TaskRunner, JVMId>();
    mapJvmManager.jvmIdToRunner = new RaceHashMap<JVMId, JvmRunner>();

    // Launch a bunch of JVMs, but only allow MAP_SLOTS to run at once.
    final ExecutorService exec = Executors.newFixedThreadPool(MAP_SLOTS);
    final AtomicReference<Throwable> failed = new AtomicReference<Throwable>();

    for (int i = 0; i < MAP_SLOTS * 5; i++) {
        JobConf taskConf = new JobConf(ttConf);
        TaskAttemptID attemptID = new TaskAttemptID("test", 0, TaskType.MAP, i, 0);
        Task task = new MapTask(null, attemptID, i, null, 1);
        task.setConf(taskConf);
        TaskInProgress tip = tt.new TaskInProgress(task, taskConf);
        File pidFile = new File(TEST_DIR, "pid_" + i);
        final TaskRunner taskRunner = task.createRunner(tt, tip);
        // launch a jvm which sleeps for 60 seconds
        final Vector<String> vargs = new Vector<String>(2);
        vargs.add(writeScript("script_" + i, "echo hi\n", pidFile).getAbsolutePath());
        final File workDir = new File(TEST_DIR, "work_" + i);
        workDir.mkdir();
        final File stdout = new File(TEST_DIR, "stdout_" + i);
        final File stderr = new File(TEST_DIR, "stderr_" + i);

        // launch the process and wait in a thread, till it finishes
        Runnable launcher = new Runnable() {
            public void run() {
                try {
                    taskRunner.launchJvmAndWait(null, vargs, stdout, stderr, 100, workDir, null);
                } catch (Throwable t) {
                    failed.compareAndSet(null, t);
                    exec.shutdownNow();
                    return;
                }
            }
        };
        exec.submit(launcher);
    }

    exec.shutdown();
    exec.awaitTermination(3, TimeUnit.MINUTES);
    if (failed.get() != null) {
        throw new RuntimeException(failed.get());
    }
}