Example usage for com.google.common.util.concurrent ListeningExecutorService awaitTermination

List of usage examples for com.google.common.util.concurrent ListeningExecutorService awaitTermination

Introduction

In this page you can find the example usage for com.google.common.util.concurrent ListeningExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:org.n52.lod.triplestore.AbstractWorkerTripleSink.java

@Override
protected int addRecordsToModel(Map<String, GetRecordByIdResponseDocument> records, final Model m,
        final Report report) {
    final MutableInt counter = new MutableInt(0);
    final long modelSizeBefore = m.size();

    ListeningExecutorService executorService = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(NUM_THREADS));

    for (Entry<String, GetRecordByIdResponseDocument> entry : records.entrySet()) {

        final String id = entry.getKey();
        log.debug("Adding {} to the model", id);

        CallableMapper c = new CallableMapper(this.mapper.replicate(), entry.getValue());
        ListenableFuture<Model> modelFuture = executorService.submit(c);

        Futures.addCallback(modelFuture, new FutureCallback<Model>() {

            @Override//from  w w  w  .java 2s.com
            public void onFailure(Throwable t) {
                log.error("Error mapping xml to model", t);
                report.issues.put(id, "Error while adding to model: " + t.getMessage());
            }

            @Override
            public void onSuccess(Model result) {
                log.trace("Adding result to model: {}", result);
                m.add(result);
                log.trace("ADDED result to mode.");

                counter.increment();
                report.added++;
                report.addedIds.add(id);
            }

        });
    }

    executorService.shutdown();
    while (!executorService.isTerminated()) {
        try {
            executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        } catch (InterruptedException e) {
            log.error("Could not await termination", e);
        }
    }

    log.debug("Added {} of {} records to model {}, which now has size {} (before {})", counter, records.size(),
            m.getClass(), m.size(), modelSizeBefore);
    return counter.intValue();
}

From source file:com.github.rinde.rinsim.experiment.LocalComputer.java

@Override
public ExperimentResults compute(Builder builder, Set<SimArgs> inputs) {
    final ImmutableList.Builder<ExperimentRunner> runnerBuilder = ImmutableList.builder();
    for (final SimArgs args : inputs) {
        runnerBuilder.add(new ExperimentRunner(args));
    }/* www . j  a v  a  2 s.co m*/

    final List<ExperimentRunner> runners = runnerBuilder.build();

    final int threads = Math.min(builder.numThreads, runners.size());
    final ListeningExecutorService executor;
    if (builder.showGui) {
        executor = MoreExecutors.newDirectExecutorService();
    } else {
        executor = MoreExecutors
                .listeningDecorator(Executors.newFixedThreadPool(threads, new LocalThreadFactory()));
    }

    final List<SimulationResult> results = Collections.synchronizedList(new ArrayList<SimulationResult>());
    final ResultCollector resultCollector = new ResultCollector(executor, results, builder.resultListeners);

    try {
        for (final ExperimentRunner r : runners) {
            checkForError(executor, resultCollector);
            final ListenableFuture<SimulationResult> f = executor.submit(r);
            Futures.addCallback(f, resultCollector);
        }
        while (results.size() < inputs.size() && !resultCollector.hasError()) {
            Thread.sleep(THREAD_SLEEP_TIME_MS);
        }
        checkForError(executor, resultCollector);
    } catch (final InterruptedException e) {
        LOGGER.trace("Interrupt, shutting down the executor.");
        executor.shutdownNow();
        LOGGER.trace("Waiting for executor to shutdown.");
        try {
            final boolean executorStopped = executor.awaitTermination(MAX_WAIT_FOR_SHUTDOWN_S,
                    TimeUnit.SECONDS);
            if (executorStopped) {
                LOGGER.trace("Executor is shutdown.");
            } else {
                LOGGER.warn("Executor did not stop, timed out after {} seconds.", MAX_WAIT_FOR_SHUTDOWN_S);
            }
        } catch (final InterruptedException e1) {
            LOGGER.warn("Waiting for executor to shutdown is interrupted.");
        }
        return ExperimentResults.create(builder, ImmutableSet.<SimulationResult>of());
    }

    checkForError(executor, resultCollector);
    executor.shutdown();

    final ExperimentResults er = ExperimentResults.create(builder, ImmutableSet.copyOf(results));
    for (final ResultListener rl : builder.resultListeners) {
        rl.doneComputing(er);
    }
    return er;
}

From source file:de.softwareforge.kafka.LoadCommand.java

@Override
public void execute() throws Exception {
    Logging logging = Logging.initialize();
    logging.configure(new LoggingConfiguration());
    new LoggingMBean().setLevel("kafka", "ERROR");

    String tableNames = loaderOptions.tables;
    final Map<String, TpchTable<?>> allTables = ImmutableMap
            .copyOf(Maps.uniqueIndex(TpchTable.getTables(), new Function<TpchTable<?>, String>() {
                @Override//from w  w w  .j  a  v  a  2  s. c  o  m
                public String apply(@Nonnull TpchTable<?> input) {
                    return input.getTableName();
                }
            }));

    List<String> tables;
    if (tableNames == null) {
        tables = ImmutableList.copyOf(allTables.keySet());
    } else {
        ImmutableList.Builder<String> builder = ImmutableList.builder();
        for (String tableName : Splitter.on(",").omitEmptyStrings().trimResults().split(tableNames)) {
            checkState(allTables.keySet().contains(tableName), "Table %s is unknown", tableName);
            builder.add(tableName);
        }
        tables = builder.build();
    }

    LOG.info("Processing tables: %s", tables);

    Properties props = new Properties();
    props.put("metadata.broker.list", loaderOptions.brokers);
    props.put("serializer.class", StringEncoder.class.getName());
    props.put("key.serializer.class", LongEncoder.class.getName());
    props.put("partitioner.class", LongPartitioner.class.getName());
    props.put("serializer.encoding", "UTF8");
    props.put("request.required.acks", "1");
    ProducerConfig producerConfig = new ProducerConfig(props);

    final ObjectMapper mapper = objectMapperProvider.get();
    mapper.enable(MapperFeature.AUTO_DETECT_GETTERS);

    final Producer<Long, String> producer = new Producer<>(producerConfig);

    ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());

    ImmutableList.Builder<ListenableFuture<Long>> futureBuilder = ImmutableList.builder();

    for (final String table : tables) {
        ListenableFuture<Long> future = executor.submit(new Callable<Long>() {
            @Override
            public Long call() throws Exception {
                TpchTable<?> tpchTable = allTables.get(table);
                LOG.info("Loading table '%s' into topic '%s%s'...", table, loaderOptions.prefix, table);
                long count = 0;

                for (List<? extends TpchEntity> partition : Iterables.partition(
                        tpchTable.createGenerator(loaderOptions.tpchType.getScaleFactor(), 1, 1), 100)) {
                    ImmutableList.Builder<KeyedMessage<Long, String>> builder = ImmutableList.builder();
                    for (TpchEntity o : partition) {
                        builder.add(new KeyedMessage<>(loaderOptions.prefix + table, count++,
                                mapper.writeValueAsString(o)));
                    }
                    producer.send(builder.build());
                }
                LOG.info("Generated %d rows for table '%s'.", count, table);
                return count;
            }
        });
        futureBuilder.add(future);
    }

    Futures.allAsList(futureBuilder.build()).get();
    executor.shutdown();
    executor.awaitTermination(1, TimeUnit.DAYS);
    producer.close();
}

From source file:org.n52.lod.csw.CSWLoDEnabler.java

protected Map<String, GetRecordByIdResponseDocument> retrieveRecordsThreaded(int startPos, int maxRecords,
        long recordsInTotal) {
    log.info("Retrieve {} records, starting from {} of {}", maxRecords, startPos, recordsInTotal);

    // one thread for getting ids
    List<String> recordIdList = getRecordIds(startPos, maxRecords);

    // many threads getting records descriptions
    final Map<String, GetRecordByIdResponseDocument> recordDescriptions = Maps.newConcurrentMap();

    ListeningExecutorService executorService = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(maxRecords));

    for (String id : recordIdList) {
        final String recordId = id;
        log.debug("Adding {} to the model", recordId);

        CallableRecordDescription c = new CallableRecordDescription(id, csw);
        ListenableFuture<GetRecordByIdResponseDocument> responseFuture = executorService.submit(c);

        Futures.addCallback(responseFuture, new FutureCallback<GetRecordByIdResponseDocument>() {

            private final Logger logger = LoggerFactory.getLogger("Record Downloader");

            @Override/*from w  w  w.  j a  va2  s.c  om*/
            public void onFailure(Throwable t) {
                logger.error("Error retrieving and parsing record {}", t);
                report.retrievalIssues.put(recordId, t);
            }

            @Override
            public void onSuccess(GetRecordByIdResponseDocument result) {
                logger.trace("SUCCESS with {}", result);
                recordDescriptions.put(recordId, result);

                report.added++;
                report.addedIds.add(recordId);
            }

        });
    }

    executorService.shutdown();
    while (!executorService.isTerminated()) {
        try {
            executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        } catch (InterruptedException e) {
            log.error("Could not await termination", e);
        }
    }

    log.info("Done with requests and parsing, have {} GetRecordById documents.", recordDescriptions.size());
    return recordDescriptions;
}

From source file:edu.dirla.app.ws.rest.services.LogsRestService.java

@ResponseBody
@RequestMapping(method = RequestMethod.POST)
public DataTrafficResult addLogs(@RequestBody CheckTrafficRep checkTrafficRep) {

    List<LogData> results = null;
    long t1 = Calendar.getInstance().getTimeInMillis();

    final List<String> filesToUpload = checkTrafficRep.getFilesToUpload();

    ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(3));
    for (final String fileName : filesToUpload) {

        Callable<Integer> job = new Callable<Integer>() {

            @Override//from  www  .  j a  v  a2s  .com
            public Integer call() throws Exception {
                List<LogData> lines = new ArrayList<LogData>();
                try {
                    lines.addAll(readFile(fileName));
                } catch (IOException e) {
                    e.printStackTrace();
                }

                Map<String, Long> data = new HashMap<String, Long>();

                for (LogData res : lines) {
                    String key = res.getDomain();
                    long value = res.getSize();
                    Long oldValue = data.get(key);
                    data.put(key, value + (oldValue != null ? oldValue : 0));
                }

                logsService.pushLogs("accessLogs." + fileName, data);

                return 0;
            }
        }; // create the job here
        ListenableFuture<Integer> completion = executor.submit(job);
        Futures.addCallback(completion, new FutureCallback<Integer>() {

            @Override
            public void onFailure(Throwable t) {
                // log error
            }

            @Override
            public void onSuccess(Integer result) {
                // do something with the result
            }

        });
    }
    executor.shutdown();
    while (!executor.isTerminated()) {
        try {
            executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

    UserDataTrafficResult userTrafficData = logsService.checkDataTraffic(checkTrafficRep.getClientAddress());

    long t2 = Calendar.getInstance().getTimeInMillis();

    DataTrafficResult dtr = new DataTrafficResult();
    dtr.setCheckTrafficRequest(checkTrafficRep);
    dtr.setTrafficValue(userTrafficData);
    dtr.setTime(t2 - t1);
    return dtr;
}

From source file:org.glowroot.central.RollupService.java

@Override
public void run() {
    Session.setInRollupThread(true);/*from   ww w  .  j ava  2  s .c om*/
    int counter = 0;
    int numWorkerThreads = INITIAL_WORKER_THREADS;
    ListeningExecutorService workerExecutor = newWorkerExecutor(numWorkerThreads);
    while (!closed) {
        try {
            MILLISECONDS.sleep(millisUntilNextRollup(clock.currentTimeMillis()));
            // perform larger sweep approx every 100 minutes
            long lastXMillis = counter++ % 100 == 0 ? DAYS.toMillis(7) : MINUTES.toMillis(30);
            Stopwatch stopwatch = Stopwatch.createStarted();
            List<AgentRollup> agentRollups = activeAgentDao.readRecentlyActiveAgentRollups(lastXMillis);
            runInternal(agentRollups, workerExecutor);
            long elapsedInSeconds = stopwatch.elapsed(SECONDS);
            int oldNumWorkerThreads = numWorkerThreads;
            if (elapsedInSeconds > 300) {
                if (numWorkerThreads < MAX_WORKER_THREADS) {
                    numWorkerThreads++;
                } else {
                    logger.warn(
                            "rolling up data across {} agent rollup took {} seconds (using" + " {} threads)",
                            count(agentRollups), elapsedInSeconds, numWorkerThreads);
                }
            } else if (elapsedInSeconds < 60 && numWorkerThreads > MIN_WORKER_THREADS) {
                numWorkerThreads--;
            }
            if (numWorkerThreads != oldNumWorkerThreads) {
                ExecutorService oldWorkerExecutor = workerExecutor;
                workerExecutor = newWorkerExecutor(numWorkerThreads);
                oldWorkerExecutor.shutdown();
                if (!oldWorkerExecutor.awaitTermination(10, SECONDS)) {
                    logger.error("timed out waiting for old worker rollup thread to terminate");
                }
            }
        } catch (InterruptedException e) {
            // probably shutdown requested (see close method below)
            logger.debug(e.getMessage(), e);
            continue;
        } catch (Throwable t) {
            // this probably should never happen since runInternal catches and logs exceptions
            logger.error(t.getMessage(), t);
        }
    }
    // shutdownNow() is needed here to send interrupt to worker rollup thread
    workerExecutor.shutdownNow();
    try {
        if (!workerExecutor.awaitTermination(10, SECONDS)) {
            throw new IllegalStateException("Timed out waiting for worker rollup thread to terminate");
        }
    } catch (InterruptedException e) {
        // this is unexpected (but not harmful since already closing)
        logger.error(e.getMessage(), e);
    }
}

From source file:com.github.rinde.datgen.pdptw.DatasetGenerator.java

Dataset<GeneratedScenario> doGenerate() {

    final ListeningExecutorService service = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(builder.numThreads));
    final Dataset<GeneratedScenario> dataset = Dataset.naturalOrder();

    final List<ScenarioCreator> jobs = new ArrayList<>();

    final RandomGenerator rng = new MersenneTwister(builder.randomSeed);
    final Map<GeneratorSettings, IdSeedGenerator> rngMap = new LinkedHashMap<>();

    for (final Long urgency : builder.urgencyLevels) {
        for (final Double scale : builder.scaleLevels) {
            for (final Entry<TimeSeriesType, Collection<Range<Double>>> dynLevel : builder.dynamismLevels
                    .asMap().entrySet()) {

                final int reps = builder.numInstances * dynLevel.getValue().size();

                final long urg = urgency * 60 * 1000L;
                // The office hours is the period in which new orders are accepted,
                // it is defined as [0,officeHoursLength).
                final long officeHoursLength;
                if (urg < halfDiagTT) {
                    officeHoursLength = builder.scenarioLengthMs - twoDiagTT - PICKUP_DURATION
                            - DELIVERY_DURATION;
                } else {
                    officeHoursLength = builder.scenarioLengthMs - urg - oneAndHalfDiagTT - PICKUP_DURATION
                            - DELIVERY_DURATION;
                }/*w w  w .j  a  v a  2s . c  o  m*/

                final int numOrders = DoubleMath.roundToInt(scale * numOrdersPerScale,
                        RoundingMode.UNNECESSARY);

                final ImmutableMap.Builder<String, String> props = ImmutableMap.builder();

                props.put("expected_num_orders", Integer.toString(numOrders));
                props.put("pickup_duration", Long.toString(PICKUP_DURATION));
                props.put("delivery_duration", Long.toString(DELIVERY_DURATION));
                props.put("width_height", String.format("%1.1fx%1.1f", AREA_WIDTH, AREA_WIDTH));

                // TODO store this in TimeSeriesType?
                final RangeSet<Double> rset = TreeRangeSet.create();
                for (final Range<Double> r : dynLevel.getValue()) {
                    rset.add(r);
                }

                // createTimeSeriesGenerator(dynLevel.getKey(), officeHoursLength,
                // numOrders, numOrdersPerScale, props);

                final GeneratorSettings set = GeneratorSettings.builder().setDayLength(builder.scenarioLengthMs)
                        .setOfficeHours(officeHoursLength).setTimeSeriesType(dynLevel.getKey())
                        .setDynamismRangeCenters(builder.dynamismRangeMap.subRangeMap(rset.span()))
                        .setUrgency(urg).setScale(scale).setNumOrders(numOrders).setProperties(props.build())
                        .build();

                final IdSeedGenerator isg = new IdSeedGenerator(rng.nextLong());
                rngMap.put(set, isg);

                for (int i = 0; i < reps; i++) {
                    final LocationGenerator lg = Locations.builder().min(0d).max(AREA_WIDTH).buildUniform();

                    final TimeSeriesGenerator tsg2 = createTimeSeriesGenerator(dynLevel.getKey(),
                            officeHoursLength, numOrders, numOrdersPerScale,
                            ImmutableMap.<String, String>builder());
                    final ScenarioGenerator gen = createGenerator(officeHoursLength, urg, scale, tsg2,
                            set.getDynamismRangeCenters(), lg, builder, numOrdersPerScale);

                    jobs.add(ScenarioCreator.create(isg.next(), set, gen));
                }
            }
        }
    }

    final AtomicLong currentJobs = new AtomicLong(0L);
    final AtomicLong datasetSize = new AtomicLong(0L);

    LOGGER.info(" - Submitting " + jobs.size() + " Jobs");
    for (final ScenarioCreator job : jobs) {
        submitJob(currentJobs, service, job, builder.numInstances, dataset, rngMap, datasetSize);
    }

    final long targetSize = builder.numInstances * builder.dynamismLevels.values().size()
            * builder.scaleLevels.size() * builder.urgencyLevels.size();
    while (datasetSize.get() < targetSize || dataset.size() < targetSize) {
        try {
            // LOGGER.info(" - Waiting, current size ==" + dataset.size());
            Thread.sleep(THREAD_SLEEP_DURATION);
        } catch (final InterruptedException e) {
            throw new IllegalStateException(e);
        }
    }

    LOGGER.info(" - Shutdown Service, Awaiting Termination");
    service.shutdown();
    try {
        service.awaitTermination(1L, TimeUnit.HOURS);
    } catch (final InterruptedException e) {
        throw new IllegalStateException(e);
    }

    LOGGER.info(" - Returning dataset");

    return dataset;
}