Example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

List of usage examples for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService.

Prototype

public ExecutorCompletionService(Executor executor) 

Source Link

Document

Creates an ExecutorCompletionService using the supplied executor for base task execution and a LinkedBlockingQueue as a completion queue.

Usage

From source file:com.netflix.curator.framework.recipes.locks.TestReaper.java

private void testSimulationWithLocks(String namespace) throws Exception {
    final int LOCK_CLIENTS = 10;
    final int ITERATIONS = 250;
    final int MAX_WAIT_MS = 10;

    ExecutorService service = Executors.newFixedThreadPool(LOCK_CLIENTS);
    ExecutorCompletionService<Object> completionService = new ExecutorCompletionService<Object>(service);

    Timing timing = new Timing();
    Reaper reaper = null;/*from   ww w. ja v  a  2  s  .  com*/
    final CuratorFramework client = makeClient(timing, namespace);
    try {
        client.start();

        reaper = new Reaper(client, MAX_WAIT_MS / 2);
        reaper.start();
        reaper.addPath("/a/b");

        for (int i = 0; i < LOCK_CLIENTS; ++i) {
            completionService.submit(new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                    final InterProcessMutex lock = new InterProcessMutex(client, "/a/b");
                    for (int i = 0; i < ITERATIONS; ++i) {
                        lock.acquire();
                        try {
                            Thread.sleep((int) (Math.random() * MAX_WAIT_MS));
                        } finally {
                            lock.release();
                        }
                    }
                    return null;
                }
            });
        }

        for (int i = 0; i < LOCK_CLIENTS; ++i) {
            completionService.take().get();
        }

        Thread.sleep(timing.session());
        timing.sleepABit();

        Stat stat = client.checkExists().forPath("/a/b");
        Assert.assertNull("Child qty: " + ((stat != null) ? stat.getNumChildren() : 0), stat);
    } finally {
        service.shutdownNow();
        IOUtils.closeQuietly(reaper);
        IOUtils.closeQuietly(client);
    }
}

From source file:nl.umcg.westrah.binarymetaanalyzer.BinaryMetaAnalysis.java

public void run() throws IOException {
    initialize();//from   ww  w  .ja  v a  2s .  co  m
    loadProbeAnnotation();

    String outdir = settings.getOutput();
    if (usetmp) {
        outdir = tempDir;
    }

    System.out.println("Placing output here: " + outdir);
    outdir = Gpio.formatAsDirectory(outdir);
    Gpio.createDir(outdir);

    System.out.println(
            "Permutations: " + settings.getStartPermutations() + " until " + settings.getNrPermutations());

    String zscoretableheader = null;
    if (settings.isMakezscoretable()) {
        StringBuilder builder = new StringBuilder();
        builder.append("SNP\tAlleles\tAlleleAssessed");
        for (int t = 0; t < traitList.length; t++) {
            builder.append("\t").append(traitList[t].getMetaTraitName()).append("_")
                    .append(traitList[t].getAnnotation());
        }
        zscoretableheader = builder.toString();
    }

    int availableProcessors = Runtime.getRuntime().availableProcessors();
    int cores = settings.getNrThreads();
    if (cores < 1) {
        cores = 1;
    } else if (cores > availableProcessors) {
        cores = availableProcessors;
    }

    System.out.println("Will try to make use of " + cores + " CPU cores");
    System.out.println();

    HashSet<QTLPair> prevSet = null;
    for (int permutation = settings.getStartPermutations(); permutation <= settings
            .getNrPermutations(); permutation++) {
        // load probe annotation and index
        // this particular probe annotation can take multiple probes for a single location into account.

        HashSet<QTLPair> set = new HashSet<>();

        Descriptives.initializeZScoreToPValue();

        // re-intialize for each permutation, just to be sure
        if (permutation > settings.getStartPermutations()) {
            initialize();
            System.out.println("Loading probe annotation from: " + settings.getProbetranslationfile());
            loadProbeAnnotation();
            if (traitList.length == 0) {
                System.err.println("Error: no annotation loaded.");
                System.exit(-1);
            }
        }
        //         clearResultsBuffer();

        // create dataset objects
        System.out.println("Running permutation " + permutation);
        datasets = new BinaryMetaAnalysisDataset[settings.getDatasetlocations().size()];

        System.out.println("Loading datasets");
        for (int d = 0; d < datasets.length; d++) {
            datasets[d] = new BinaryMetaAnalysisDataset(settings.getDatasetlocations().get(d),
                    settings.getDatasetnames().get(d), settings.getDatasetPrefix().get(d), permutation,
                    settings.getDatasetannotations().get(d), probeAnnotation,
                    settings.getFeatureOccuranceScaleMaps().get(d));
        }

        System.out.println("Loaded " + datasets.length + " datasets");

        // create meta-analysis SNP index. have to recreate this every permutation,
        // since the order of SNPs is generated at random.
        System.out.println("Creating SNP index");
        createSNPIndex(outdir);
        System.out.println("Total of " + snpIndex.length + " SNPs");

        System.out.println("Creating probe index");
        createProbeIndex(outdir);
        System.out.println("Total of " + probeIndex.length + " probes");

        // make index of snp/probe combinations, if any specified
        createSNPProbeCombos(outdir);

        // load SNP annotation for SNPs present in dataset
        //         if (snpChr == null) {
        System.out.println("Loading SNP annotation from " + settings.getSNPAnnotationFile());
        loadSNPAnnotation();
        //         }

        // run analysis
        System.out.println("Type of analysis: " + settings.getAnalysisType());
        System.out.println("Cis-window: " + settings.getCisdistance());
        System.out.println("Trans-window: " + settings.getTransdistance());

        TextFile zscoreTableTf = null;
        TextFile zscoreTableTfNrSamples = null;

        if (settings.isMakezscoretable()) {

            String tableoutfile = outdir + "ZScoreMatrix-Permutation" + permutation + ".txt.gz";
            String tableoutfileNrSamples = outdir + "ZScoreMatrixNrSamples-Permutation" + permutation
                    + ".txt.gz";
            if (permutation == 0) {
                tableoutfile = outdir + "ZScoreMatrix.txt.gz";
                tableoutfileNrSamples = outdir + "ZScoreMatrixNrSamples.txt.gz";
            }
            System.out.println("Writing z-score table: " + tableoutfile);
            zscoreTableTf = new TextFile(tableoutfile, TextFile.W, 10 * 1048576);
            zscoreTableTfNrSamples = new TextFile(tableoutfileNrSamples, TextFile.W, 10 * 1048576);

            // write header
            zscoreTableTf.writeln(zscoretableheader);
            zscoreTableTfNrSamples.writeln(zscoretableheader);
        }

        ExecutorService threadPool = Executors.newFixedThreadPool(cores);
        CompletionService<Triple<ArrayList<QTL>, String, String>> pool = new ExecutorCompletionService<Triple<ArrayList<QTL>, String, String>>(
                threadPool);

        maxSavedPvalue = -Double.MAX_VALUE;
        locationToStoreResult = 0;
        bufferHasOverFlown = false;
        System.out.println("Max P: " + maxSavedPvalue + "\tLocationToStoreResult: " + locationToStoreResult);

        System.out.println("Starting meta-analysis");
        ProgressBar pb = new ProgressBar(snpList.length);
        int returned = 0;
        ArrayList<Future> futures = new ArrayList<>();
        for (int snp = 0; snp < snpList.length; snp++) {
            // this can go in different threads..
            boolean outputallzscores = true;
            if (permutation > 0) {
                outputallzscores = fullpermutationoutput;
            }
            BinaryMetaAnalysisTask t = new BinaryMetaAnalysisTask(settings, probeAnnotation, datasets, snpIndex,
                    snpList, snpChr, snpPositions, probeIndex, snpprobeCombos, traitMap, traitList, snp, DEBUG,
                    outputallzscores);
            futures.add(pool.submit(t));
        }

        // give the threadpool the signal to shutdown
        threadPool.shutdown();

        int addcalled = 0;
        while (returned < snpList.length) {
            try {
                Future<Triple<ArrayList<QTL>, String, String>> threadfuture = pool.take();
                if (threadfuture != null) {
                    Triple<ArrayList<QTL>, String, String> result = threadfuture.get();

                    for (QTL q : result.getLeft()) {
                        if (!DEBUG) {
                            addEQTL(q);
                        } else {

                            //                        int snpid = q.getSNPId();
                            //                        MetaQTL4MetaTrait trait = q.getMetaTrait();

                            //                        QTLPair combo = new QTLPair();
                            //                        combo.snpid = snpid;
                            //                        combo.trait = trait;
                            //                        set.add(combo);

                        }

                        addcalled++;
                    }
                    if (settings.isMakezscoretable()) {
                        zscoreTableTf.writeln(result.getMiddle());

                        zscoreTableTfNrSamples.writeln(result.getRight());
                    }
                    result = null;
                    returned++;
                    pb.iterate();
                }
                threadfuture = null;
            } catch (InterruptedException e) {
                e.printStackTrace();
            } catch (ExecutionException e) {
                e.printStackTrace();
            }
        }
        pb.close();

        if (DEBUG) {
            if (prevSet != null) {
                // compare sets
                TextFile tf = new TextFile(outdir + "debug-p" + permutation + ".txt", TextFile.W);
                for (QTLPair p : prevSet) {
                    if (!set.contains(p)) {
                        tf.writeln(snpList[p.snpid] + "\t" + p.trait.getMetaTraitName());
                    }
                }
                tf.close();
            }
            prevSet = set;
        }

        System.out.println("Snps returned: " + returned + "\tNr of snps submitted: " + snpList.length
                + "\tNr of eQTLs evaluated: " + addcalled);
        System.out.println("Max P: " + maxSavedPvalue + "\tLocationToStoreResult: " + locationToStoreResult);

        if (settings.isMakezscoretable()) {
            zscoreTableTf.close();
            zscoreTableTfNrSamples.close();

            if (usetmp) {

                String filename = "ZScoreMatrix-Permutation" + permutation + ".txt.gz";
                if (permutation == 0) {
                    filename = "ZScoreMatrix.txt.gz";
                }
                File source = new File(tempDir + filename);
                File dest = new File(settings.getOutput() + filename);
                if (dest.exists()) {
                    System.out.println(
                            "Destination file: " + dest.getAbsolutePath() + " exists already.. Deleting!");
                    dest.delete();
                }
                System.out.println(
                        "Moving file: " + tempDir + filename + " --> " + settings.getOutput() + filename);
                FileUtils.moveFile(source, dest);

                filename = "ZScoreMatrixNrSamples-Permutation" + permutation + ".txt.gz";
                if (permutation == 0) {
                    filename = "ZScoreMatrixNrSamples.txt.gz";
                }
                source = new File(tempDir + filename);
                dest = new File(settings.getOutput() + filename);
                if (dest.exists()) {
                    System.out.println(
                            "Destination file: " + dest.getAbsolutePath() + " exists already.. Deleting!");
                    dest.delete();
                }
                System.out.println(
                        "Moving file: " + tempDir + filename + " --> " + settings.getOutput() + filename);
                FileUtils.moveFile(source, dest);
            }
        }

        for (BinaryMetaAnalysisDataset dataset : datasets) {
            dataset.close();
        }

        if (!DEBUG) {
            writeBuffer(outdir, permutation);

        }
    }
    if (usetmp) {
        // move remaining contents of tmp dir to final directory
        File source = new File(tempDir);
        File dest = new File(settings.getOutput());
        FileUtils.copyDirectory(source, dest);
        FileUtils.cleanDirectory(source);
    }
}

From source file:org.apache.nifi.cluster.manager.impl.HttpRequestReplicatorImpl.java

private Set<NodeResponse> replicateHelper(final Set<NodeIdentifier> nodeIds, final String method,
        final String scheme, final String path, final Map<String, List<String>> parameters, final Object entity,
        final Map<String, String> headers) throws UriConstructionException {

    if (nodeIds.isEmpty()) {
        return new HashSet<>(); // return quickly for trivial case
    }//from w w w.j av  a2  s.  co  m

    final CompletionService<NodeResponse> completionService = new ExecutorCompletionService<>(executorService);

    // keeps track of future requests so that failed requests can be tied back to the failing node
    final Collection<NodeHttpRequestFutureWrapper> futureNodeHttpRequests = new ArrayList<>();

    // construct the URIs for the nodes
    final Map<NodeIdentifier, URI> uriMap = new HashMap<>();
    try {
        for (final NodeIdentifier nodeId : nodeIds) {
            final URI nodeUri = new URI(scheme, null, nodeId.getApiAddress(), nodeId.getApiPort(), path,
                    /* query */ null, /* fragment */ null);
            uriMap.put(nodeId, nodeUri);
        }
    } catch (final URISyntaxException use) {
        throw new UriConstructionException(use);
    }

    // submit the requests to the nodes
    final String requestId = UUID.randomUUID().toString();
    headers.put(WebClusterManager.REQUEST_ID_HEADER, requestId);
    for (final Map.Entry<NodeIdentifier, URI> entry : uriMap.entrySet()) {
        final NodeIdentifier nodeId = entry.getKey();
        final URI nodeUri = entry.getValue();
        final NodeHttpRequestCallable callable = (entity == null)
                ? new NodeHttpRequestCallable(nodeId, method, nodeUri, parameters, headers)
                : new NodeHttpRequestCallable(nodeId, method, nodeUri, entity, headers);
        futureNodeHttpRequests.add(
                new NodeHttpRequestFutureWrapper(nodeId, method, nodeUri, completionService.submit(callable)));
    }

    // get the node responses
    final Set<NodeResponse> result = new HashSet<>();
    for (int i = 0; i < nodeIds.size(); i++) {

        // keeps track of the original request information in case we receive an exception
        NodeHttpRequestFutureWrapper futureNodeHttpRequest = null;
        try {

            // get the future resource response for the node
            final Future<NodeResponse> futureNodeResourceResponse = completionService.take();

            // find the original request by comparing the submitted future with the future returned by the completion service
            for (final NodeHttpRequestFutureWrapper futureNodeHttpRequestElem : futureNodeHttpRequests) {
                if (futureNodeHttpRequestElem.getFuture() == futureNodeResourceResponse) {
                    futureNodeHttpRequest = futureNodeHttpRequestElem;
                }
            }

            // try to retrieve the node response and add to result
            final NodeResponse nodeResponse = futureNodeResourceResponse.get();
            result.add(nodeResponse);

        } catch (final InterruptedException | ExecutionException ex) {

            logger.warn(
                    "Node request for " + futureNodeHttpRequest.getNodeId() + " encountered exception: " + ex,
                    ex);

            // create node response with the thrown exception and add to result
            final NodeResponse nodeResponse = new NodeResponse(futureNodeHttpRequest.getNodeId(),
                    futureNodeHttpRequest.getHttpMethod(), futureNodeHttpRequest.getRequestUri(), ex);
            result.add(nodeResponse);

        }
    }

    if (logger.isDebugEnabled()) {
        NodeResponse min = null;
        NodeResponse max = null;
        long nanosSum = 0L;
        int nanosAdded = 0;

        for (final NodeResponse response : result) {
            final long requestNanos = response.getRequestDuration(TimeUnit.NANOSECONDS);
            final long minNanos = (min == null) ? -1 : min.getRequestDuration(TimeUnit.NANOSECONDS);
            final long maxNanos = (max == null) ? -1 : max.getRequestDuration(TimeUnit.NANOSECONDS);

            if (requestNanos < minNanos || minNanos < 0L) {
                min = response;
            }

            if (requestNanos > maxNanos || maxNanos < 0L) {
                max = response;
            }

            if (requestNanos >= 0L) {
                nanosSum += requestNanos;
                nanosAdded++;
            }
        }

        final StringBuilder sb = new StringBuilder();
        sb.append("Node Responses for ").append(method).append(" ").append(path).append(" (Request ID ")
                .append(requestId).append("):\n");
        for (final NodeResponse response : result) {
            sb.append(response).append("\n");
        }

        final long averageNanos = (nanosAdded == 0) ? -1L : nanosSum / nanosAdded;
        final long averageMillis = (averageNanos < 0) ? averageNanos
                : TimeUnit.MILLISECONDS.convert(averageNanos, TimeUnit.NANOSECONDS);
        logger.debug("For {} {} (Request ID {}), minimum response time = {}, max = {}, average = {} ms", method,
                path, requestId, min, max, averageMillis);
        logger.debug(sb.toString());
    }

    return result;
}

From source file:com.baidu.rigel.biplatform.tesseract.isservice.search.service.impl.CallbackSearchServiceImpl.java

/**
 * @param context ?//w w  w  .j  a v  a  2s. c o m
 * @param query ?
 * @return 
 * @throws IndexAndSearchException exception occurred when 
 */
public SearchIndexResultSet query(QueryContext context, QueryRequest query) throws IndexAndSearchException {
    LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_BEGIN, "callbackquery",
            "[callbackquery:" + query + "]"));
    if (query == null || context == null || StringUtils.isEmpty(query.getCubeId())) {
        LOGGER.error(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_EXCEPTION, "callbackquery",
                "[callbackquery:" + query + "]"));
        throw new IndexAndSearchException(
                TesseractExceptionUtils.getExceptionMessage(IndexAndSearchException.QUERYEXCEPTION_MESSAGE,
                        IndexAndSearchExceptionType.ILLEGALARGUMENT_EXCEPTION),
                IndexAndSearchExceptionType.ILLEGALARGUMENT_EXCEPTION);
    }
    // TODO ???
    if (query.getGroupBy() == null || query.getSelect() == null) {
        return null;
    }
    Map<String, String> requestParams = ((QueryContextAdapter) context).getQuestionModel().getRequestParams();
    // Build query target map
    Map<String, List<MiniCubeMeasure>> callbackMeasures = context.getQueryMeasures().stream()
            .filter(m -> m.getType().equals(MeasureType.CALLBACK)).map(m -> {
                CallbackMeasure tmp = (CallbackMeasure) m;
                for (Map.Entry<String, String> entry : tmp.getCallbackParams().entrySet()) {
                    if (requestParams.containsKey(entry.getKey())) {
                        tmp.getCallbackParams().put(entry.getKey(), requestParams.get(entry.getKey()));
                    }
                }
                return m;
            }).collect(Collectors.groupingBy(c -> ((CallbackMeasure) c).getCallbackUrl(), Collectors.toList()));
    if (callbackMeasures == null || callbackMeasures.isEmpty()) {
        LOGGER.error(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_EXCEPTION, "Empty callback measure",
                "[callbackquery:" + query + "]"));
        throw new IndexAndSearchException(
                TesseractExceptionUtils.getExceptionMessage(IndexAndSearchException.QUERYEXCEPTION_MESSAGE,
                        IndexAndSearchExceptionType.ILLEGALARGUMENT_EXCEPTION),
                IndexAndSearchExceptionType.ILLEGALARGUMENT_EXCEPTION);
    }
    LOGGER.info("Find callback targets " + callbackMeasures);

    // Keep group-by sequence.
    List<String> groupby = new ArrayList<String>(query.getGroupBy().getGroups());
    LinkedHashMap<String, List<String>> groupbyParams = new LinkedHashMap<String, List<String>>(groupby.size());
    for (String g : groupby) {
        groupbyParams.put(g, new ArrayList<String>());
    }

    LinkedHashMap<String, List<String>> whereParams = new LinkedHashMap<String, List<String>>();
    for (Expression e : query.getWhere().getAndList()) {
        List<String> l = e.getQueryValues().stream().filter(v -> !StringUtils.isEmpty(v.getValue()))
                .map(v -> v.getValue()).collect(Collectors.toList());
        if (groupbyParams.containsKey(e.getProperties())) {
            // if not contains SUMMARY_KEY, add it into group by list
            if (!l.contains(TesseractConstant.SUMMARY_KEY)) {
                l.add(TesseractConstant.SUMMARY_KEY);
            }
            // Put it into group by field
            groupbyParams.get(e.getProperties()).addAll(l);
        } else {
            // Put it into filter field
            if (CollectionUtils.isEmpty(l)) {
                List<Set<String>> tmp = e.getQueryValues().stream().map(v -> v.getLeafValues())
                        .collect(Collectors.toList());
                List<String> values = Lists.newArrayList();
                tmp.forEach(t -> values.addAll(t));
                whereParams.put(e.getProperties(), values);
            } else {
                whereParams.put(e.getProperties(), new ArrayList<String>(l));
            }
        }
    }

    // Prepare query tools
    //        CountDownLatch latch = new CountDownLatch(response.size());
    //        List<Future<CallbackResponse>> results = Lists.newArrayList();
    Map<CallbackExecutor, Future<CallbackResponse>> results = Maps.newHashMap();
    ExecutorCompletionService<CallbackResponse> service = new ExecutorCompletionService<CallbackResponse>(
            taskExecutor);
    StringBuilder callbackMeasureNames = new StringBuilder();
    for (Entry<String, List<MiniCubeMeasure>> e : callbackMeasures.entrySet()) {
        CallbackExecutor ce = new CallbackExecutor(e, groupbyParams, whereParams);
        results.put(ce, service.submit(ce));
        e.getValue().forEach(m -> {
            callbackMeasureNames.append(" " + m.getCaption() + " ");
        });
    }
    //        }
    Map<CallbackExecutor, CallbackResponse> response = new ConcurrentHashMap<CallbackExecutor, CallbackResponse>(
            callbackMeasures.size());
    StringBuffer sb = new StringBuffer();
    results.forEach((k, v) -> {
        try {
            response.put(k, v.get());
        } catch (Exception e1) {
            LOGGER.error(e1.getMessage(), e1);
            sb.append(": " + callbackMeasureNames.toString()
                    + " ??, ?");
        }
    });
    if (!StringUtils.isEmpty(sb.toString())) {
        if (ThreadLocalPlaceholder.getProperty(ThreadLocalPlaceholder.ERROR_MSG_KEY) != null) {
            ThreadLocalPlaceholder.unbindProperty(ThreadLocalPlaceholder.ERROR_MSG_KEY);
        }
        ThreadLocalPlaceholder.bindProperty(ThreadLocalPlaceholder.ERROR_MSG_KEY, sb.toString());
    }
    // Package result
    SqlQuery sqlQuery = QueryRequestUtil.transQueryRequest2SqlQuery(query);
    SearchIndexResultSet result = null;
    if (!response.isEmpty()) {
        result = packageResultRecords(query, sqlQuery, response);
    } else {
        result = new SearchIndexResultSet(new Meta(query.getGroupBy().getGroups().toArray(new String[0])), 0);
    }

    LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_END, "query", "[query:" + query + "]"));
    return result;
}

From source file:com.ibm.jaggr.core.impl.deps.DepTree.java

/**
 * Object constructor. Attempts to de-serialize the cached dependency lists
 * from disk and then validates the dependency lists based on last-modified
 * dates, looking for any new or removed files. If the cached dependency
 * list data cannot be de-serialized, new lists are constructed. Once the
 * dependency lists have been validated, the list data is serialized back
 * out to disk.//  w ww .  j a  va  2s  .c om
 *
 * @param paths
 *            Collection of URIs which specify the target resources
 *            to be scanned for javascript files.
 * @param aggregator
 *            The servlet instance for this object
 * @param stamp
 *            timestamp associated with external override/customization
 *            resources that are check on every server restart
 * @param clean
 *            If true, then the dependency lists are generated from scratch
 *            rather than by de-serializing and then validating the cached
 *            dependency lists.
 * @param validateDeps
 *            If true, then validate existing cached dependencies using
 *            file last-modified times.
 * @throws IOException
 */
public DepTree(Collection<URI> paths, IAggregator aggregator, long stamp, boolean clean, boolean validateDeps)
        throws IOException {
    final String sourceMethod = "<ctor>"; //$NON-NLS-1$
    boolean isTraceLogging = log.isLoggable(Level.FINER);
    if (isTraceLogging) {
        log.entering(DepTree.class.getName(), sourceMethod,
                new Object[] { paths, aggregator, stamp, clean, validateDeps });
    }
    this.stamp = stamp;
    IConfig config = aggregator.getConfig();
    rawConfig = config.toString();
    cacheBust = AggregatorUtil.getCacheBust(aggregator);

    File cacheDir = new File(aggregator.getWorkingDirectory(), DEPCACHE_DIRNAME);
    File cacheFile = new File(cacheDir, CACHE_FILE);

    /*
     * The de-serialized dependency map. If we have a cached dependency map,
     * then it will be validated against the last-modified dates of the
     * current files and only the files that have changed will need to be
     * re-parsed to update the dependency lists.
     */
    DepTree cached = null;

    if (!clean) {
        // If we're not starting clean, try to de-serialize the map from
        // cache
        try {
            ObjectInputStream is = new ObjectInputStream(new FileInputStream(cacheFile));
            try {
                if (isTraceLogging) {
                    log.finer("Attempting to read cached dependencies from " + cacheFile.toString()); //$NON-NLS-1$
                }
                cached = (DepTree) is.readObject();
            } finally {
                try {
                    is.close();
                } catch (Exception ignore) {
                }
            }
        } catch (FileNotFoundException e) {
            /*
             * Not an error. Just means that the cache file hasn't been
             * written yet or else it's been deleted.
             */
            if (log.isLoggable(Level.INFO))
                log.log(Level.INFO, Messages.DepTree_1);
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
        }
    }

    // If the cacheBust config param has changed, then do a clean build
    // of the dependencies.
    if (cached != null) {
        if (stamp == 0) {
            // no init stamp provided.  Preserve the cached one.
            stamp = cached.stamp;
        }
        if (stamp > cached.stamp) {
            // init stamp has been updated.  Validate dependencies.
            validateDeps = true;
        }
        if (!StringUtils.equals(cacheBust, cached.cacheBust)) {
            if (isTraceLogging) {
                log.finer("Current cacheBust = " + cacheBust + ", cached cacheBust = " + cached.cacheBust); //$NON-NLS-1$//$NON-NLS-2$
            }
            if (log.isLoggable(Level.INFO)) {
                log.info(Messages.DepTree_2);
            }
            cached = null;
        }
        if (cached != null && !StringUtils.equals(rawConfig, cached.rawConfig)) {
            if (isTraceLogging) {
                log.finer("Current config = " + rawConfig); //$NON-NLS-1$
                log.finer("Cached config = " + cached.rawConfig); //$NON-NLS-1$
            }
            validateDeps = true;
        }
    }

    /*
     * If we de-serialized a previously saved dependency map, then go with
     * that.
     */
    if (cached != null && !validateDeps && !clean) {
        depMap = cached.depMap;
        fromCache = true;
        return;
    } else if (isTraceLogging) {
        log.finer("Building/validating deps: cached = " + cached + ", validateDeps = " + validateDeps //$NON-NLS-1$//$NON-NLS-2$
                + ", clean = " + clean); //$NON-NLS-1$
    }

    // Initialize the dependency map
    depMap = new ConcurrentHashMap<URI, DepTreeNode>();

    // This can take a while, so print something to the console
    String msg = MessageFormat.format(Messages.DepTree_3, new Object[] { aggregator.getName() });

    ConsoleService cs = new ConsoleService();
    cs.println(msg);

    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    // Make sure that all the paths are unique and orthogonal
    paths = DepUtils.removeRedundantPaths(paths);

    /*
     * Create the thread pools, one for the tree builders and one for the
     * parsers. Since a tree builder thread will wait for all the outstanding
     * parser threads started by that builder to complete, we need to use two
     * independent thread pools to guard against the possibility of deadlock
     * caused by all the threads in the pool being consumed by tree builders
     * and leaving none available to service the parsers.
     */
    final ThreadGroup treeBuilderTG = new ThreadGroup(TREEBUILDER_TGNAME),
            parserTG = new ThreadGroup(JSPARSER_TGNAME);
    ExecutorService treeBuilderExc = Executors.newFixedThreadPool(10, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(treeBuilderTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { treeBuilderTG.getName(), treeBuilderTG.activeCount() }));
        }
    }), parserExc = Executors.newFixedThreadPool(20, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(parserTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { parserTG.getName(), parserTG.activeCount() }));
        }
    });

    // Counter to keep track of number of tree builder threads started
    AtomicInteger treeBuilderCount = new AtomicInteger(0);

    // The completion services for the thread pools
    final CompletionService<URI> parserCs = new ExecutorCompletionService<URI>(parserExc);
    CompletionService<DepTreeBuilder.Result> treeBuilderCs = new ExecutorCompletionService<DepTreeBuilder.Result>(
            treeBuilderExc);

    Set<String> nonJSExtensions = Collections.unmodifiableSet(getNonJSExtensions(aggregator));
    // Start the tree builder threads to process the paths
    for (final URI path : paths) {
        /*
         * Create or get from cache the root node for this path and
         * add it to the new map.
         */
        DepTreeNode root = new DepTreeNode("", path); //$NON-NLS-1$
        DepTreeNode cachedNode = null;
        if (cached != null) {
            cachedNode = cached.depMap.get(path);
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_4, new Object[] { path }));
            }
        } else {
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_5, new Object[] { path }));
            }
        }
        depMap.put(path, root);

        treeBuilderCount.incrementAndGet();
        treeBuilderCs.submit(new DepTreeBuilder(aggregator, parserCs, path, root, cachedNode, nonJSExtensions));
    }

    // List of parser exceptions
    LinkedList<Exception> parserExceptions = new LinkedList<Exception>();

    /*
     * Pull the completed tree builder tasks from the completion queue until
     * all the paths have been processed
     */
    while (treeBuilderCount.decrementAndGet() >= 0) {
        try {
            DepTreeBuilder.Result result = treeBuilderCs.take().get();
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_6,
                        new Object[] { result.parseCount, result.dirName }));
            }
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
            parserExceptions.add(e);
        }
    }

    // shutdown the thread pools now that we're done with them
    parserExc.shutdown();
    treeBuilderExc.shutdown();

    // If parser exceptions occurred, then rethrow the first one
    if (parserExceptions.size() > 0) {
        throw new RuntimeException(parserExceptions.get(0));
    }

    // Prune dead nodes (folder nodes with no children)
    for (Map.Entry<URI, DepTreeNode> entry : depMap.entrySet()) {
        entry.getValue().prune();
    }

    /*
     * Make sure the cache directory exists before we try to serialize the
     * dependency map.
     */
    if (!cacheDir.exists())
        if (!cacheDir.mkdirs()) {
            throw new IOException(
                    MessageFormat.format(Messages.DepTree_0, new Object[] { cacheDir.getAbsolutePath() }));
        }

    // Serialize the map to the cache directory
    ObjectOutputStream os;
    os = new ObjectOutputStream(new FileOutputStream(cacheFile));
    try {
        if (isTraceLogging) {
            log.finer("Writing cached dependencies to " + cacheFile.toString()); //$NON-NLS-1$
        }
        os.writeObject(this);
    } finally {
        try {
            os.close();
        } catch (Exception ignore) {
        }
    }
    msg = MessageFormat.format(Messages.DepTree_7, new Object[] { aggregator.getName() });

    // Output that we're done.
    cs.println(msg);
    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    if (isTraceLogging) {
        log.exiting(DepTree.class.getName(), sourceMethod);
    }
}

From source file:org.geowebcache.layer.wms.WMSLayerTest.java

private List<ConveyorTile> getTiles(StorageBroker storageBroker, TileRange tr, final WMSLayer tl)
        throws Exception {
    final String layerName = tl.getName();
    // define the meta tile size to 1,1 so we hit all the tiles
    final TileRangeIterator trIter = new TileRangeIterator(tr, new int[] { 1, 1 });

    long[] gridLoc = trIter.nextMetaGridLocation(new long[3]);

    // six concurrent requests max
    ExecutorService requests = Executors.newFixedThreadPool(6);
    ExecutorCompletionService completer = new ExecutorCompletionService(requests);

    List<Future<ConveyorTile>> futures = new ArrayList<Future<ConveyorTile>>();
    while (gridLoc != null) {
        Map<String, String> fullParameters = tr.getParameters();

        final ConveyorTile tile = new ConveyorTile(storageBroker, layerName, tr.getGridSetId(), gridLoc,
                tr.getMimeType(), fullParameters, null, null);
        futures.add(completer.submit(new Callable<ConveyorTile>() {

            public ConveyorTile call() throws Exception {
                try {
                    return tl.getTile(tile);
                } catch (OutsideCoverageException oce) {
                    return null;
                }//  w  w w. j  a va  2 s.  com
            }
        }));

        gridLoc = trIter.nextMetaGridLocation(gridLoc);
    }

    // these assertions could be externalized
    List<ConveyorTile> results = new ArrayList<ConveyorTile>();
    for (int i = 0; i < futures.size(); i++) {
        ConveyorTile get = futures.get(i).get();
        if (get != null) {
            results.add(get);
        }
    }

    requests.shutdown();

    return results;
}

From source file:org.m2x.rssreader.service.FetcherService.java

private int refreshFeeds() {
    ContentResolver cr = getContentResolver();
    final Cursor cursor = cr.query(FeedColumns.CONTENT_URI, FeedColumns.PROJECTION_ID, null, null, null);
    int nbFeed = cursor.getCount();

    ExecutorService executor = Executors.newFixedThreadPool(THREAD_NUMBER, new ThreadFactory() {
        @Override/*from  w ww  . j  a  v  a  2s.  c om*/
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setPriority(Thread.MIN_PRIORITY);
            return t;
        }
    });

    CompletionService<Integer> completionService = new ExecutorCompletionService<Integer>(executor);
    while (cursor.moveToNext()) {
        final String feedId = cursor.getString(0);
        completionService.submit(new Callable<Integer>() {
            @Override
            public Integer call() {
                int result = 0;
                try {
                    result = refreshFeed(feedId);
                } catch (Exception ignored) {
                }
                return result;
            }
        });
    }
    cursor.close();

    int globalResult = 0;
    for (int i = 0; i < nbFeed; i++) {
        try {
            Future<Integer> f = completionService.take();
            globalResult += f.get();
        } catch (Exception ignored) {
        }
    }

    executor.shutdownNow(); // To purge all threads

    return globalResult;
}

From source file:com.clustercontrol.monitor.run.factory.RunMonitor.java

/**
 * ????//  ww w  .ja  va  2 s  .c  o  m
 * <p>
 * <ol>
 * <li>????????{@link #setMonitorInfo(String, String)}</li>
 * <li>?????????{@link #setJudgementInfo()}</li>
 * <li>??????????{@link #setCheckInfo()}</li>
 * <li>???????? {@link #collect(String)}</li>
 * <li>???????? {@link #getCheckResult(boolean)}</li>
 * <li>?????????{@link #getPriority(int)}</li>
 * <li>????{@link #notify(boolean, String, int, Date)}</li>
 * </ol>
 *
 * @return ??????</code> true </code>
 * @throws FacilityNotFound
 * @throws MonitorNotFound
 * @throws InvalidRole
 * @throws EntityExistsException
 * @throws HinemosUnknown
 *
 * @see #setMonitorInfo(String, String)
 * @see #setJudgementInfo()
 * @see #setCheckInfo()
 * @see #collect(String)
 * @see #getCheckResult(boolean)
 * @see #getPriority(int)
 * @see #notify(boolean, String, int, Date)
 */
protected boolean runMonitorInfo()
        throws FacilityNotFound, MonitorNotFound, InvalidRole, EntityExistsException, HinemosUnknown {

    m_now = HinemosTime.getDateInstance();

    m_priorityMap = new HashMap<Integer, ArrayList<String>>();
    m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_INFO), new ArrayList<String>());
    m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_WARNING), new ArrayList<String>());
    m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_CRITICAL), new ArrayList<String>());
    m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_UNKNOWN), new ArrayList<String>());
    List<Sample> sampleList = new ArrayList<Sample>();
    List<StringSample> collectedSamples = new ArrayList<>();

    try {
        // 
        boolean run = this.setMonitorInfo(m_monitorTypeId, m_monitorId);
        if (!run) {
            // ?
            return true;
        }

        // 
        setJudgementInfo();

        // ??
        setCheckInfo();

        ArrayList<String> facilityList = null;
        ExecutorCompletionService<MonitorRunResultInfo> ecs = new ExecutorCompletionService<MonitorRunResultInfo>(
                ParallelExecution.instance().getExecutorService());
        int taskCount = 0;

        if (!m_isMonitorJob) {
            // ??
            // ID?????
            // /?true?????ID??
            facilityList = new RepositoryControllerBean().getExecTargetFacilityIdList(m_facilityId,
                    m_monitor.getOwnerRoleId());
            if (facilityList.size() == 0) {
                return true;
            }

            m_isNode = new RepositoryControllerBean().isNode(m_facilityId);

            // ???????
            nodeInfo = new HashMap<String, NodeInfo>();
            for (String facilityId : facilityList) {
                try {
                    synchronized (this) {
                        nodeInfo.put(facilityId, new RepositoryControllerBean().getNode(facilityId));
                    }
                } catch (FacilityNotFound e) {
                    // ???
                }
            }

            m_log.debug("monitor start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId);

            /**
             * ?
             */
            // ID???????
            Iterator<String> itr = facilityList.iterator();
            while (itr.hasNext()) {
                String facilityId = itr.next();
                if (facilityId != null && !"".equals(facilityId)) {

                    // ???RunMonitor????
                    // ?????????????
                    RunMonitor runMonitor = this.createMonitorInstance();

                    // ?????
                    runMonitor.m_monitorTypeId = this.m_monitorTypeId;
                    runMonitor.m_monitorId = this.m_monitorId;
                    runMonitor.m_now = this.m_now;
                    runMonitor.m_priorityMap = this.m_priorityMap;
                    runMonitor.setMonitorInfo(runMonitor.m_monitorTypeId, runMonitor.m_monitorId);
                    runMonitor.setJudgementInfo();
                    runMonitor.setCheckInfo();
                    runMonitor.nodeInfo = this.nodeInfo;

                    ecs.submit(new MonitorExecuteTask(runMonitor, facilityId));
                    taskCount++;

                    if (m_log.isDebugEnabled()) {
                        m_log.debug("starting monitor result : monitorId = " + m_monitorId + ", facilityId = "
                                + facilityId);
                    }
                } else {
                    facilityList.remove(facilityId);
                }
            }

        } else {
            // ??
            // ID?????
            // /?true?????ID??
            facilityList = new RepositoryControllerBean().getExecTargetFacilityIdList(m_facilityId,
                    m_monitor.getOwnerRoleId());
            if (facilityList.size() != 1 || !facilityList.get(0).equals(m_facilityId)) {
                return true;
            }

            m_isNode = true;

            // ???????
            nodeInfo = new HashMap<String, NodeInfo>();
            try {
                synchronized (this) {
                    nodeInfo.put(m_facilityId, new RepositoryControllerBean().getNode(m_facilityId));
                }
            } catch (FacilityNotFound e) {
                // ???
            }
            m_log.debug("monitor start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId);

            /**
             * ?
             */
            // ???RunMonitor????
            // ?????????????
            RunMonitor runMonitor = this.createMonitorInstance();

            // ?????
            runMonitor.m_isMonitorJob = this.m_isMonitorJob;
            runMonitor.m_monitorTypeId = this.m_monitorTypeId;
            runMonitor.m_monitorId = this.m_monitorId;
            runMonitor.m_now = this.m_now;
            runMonitor.m_priorityMap = this.m_priorityMap;
            runMonitor.setMonitorInfo(runMonitor.m_monitorTypeId, runMonitor.m_monitorId);
            runMonitor.setJudgementInfo();
            runMonitor.setCheckInfo();
            runMonitor.nodeInfo = this.nodeInfo;
            runMonitor.m_prvData = this.m_prvData;

            ecs.submit(new MonitorExecuteTask(runMonitor, m_facilityId));
            taskCount++;

            if (m_log.isDebugEnabled()) {
                m_log.debug("starting monitor result : monitorId = " + m_monitorId + ", facilityId = "
                        + m_facilityId);
            }
        }

        /**
         * ??
         */
        MonitorRunResultInfo result = new MonitorRunResultInfo(); // ??

        m_log.debug("total start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId);

        // ???
        StringSample strSample = null;
        Sample sample = null;
        if (m_monitor.getCollectorFlg()) {
            //? - 
            if (m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_STRING
                    || m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_TRAP) {
                strSample = new StringSample(HinemosTime.getDateInstance(), m_monitor.getMonitorId());
            }
            //? - 
            else {
                sample = new Sample(HinemosTime.getDateInstance(), m_monitor.getMonitorId());
            }
        }

        for (int i = 0; i < taskCount; i++) {
            Future<MonitorRunResultInfo> future = ecs.take();
            result = future.get(); // ??

            String facilityId = result.getFacilityId();
            m_nodeDate = result.getNodeDate();

            if (m_log.isDebugEnabled()) {
                m_log.debug("finished monitor : monitorId = " + m_monitorId + ", facilityId = " + facilityId);
            }

            //??????????
            if (m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_STRING
                    || m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_TRAP) {
                if (strSample != null) {
                    strSample.set(facilityId, m_monitor.getMonitorTypeId(), result.getMessageOrg());
                }
            }

            if (!m_isMonitorJob) {
                // ???
                if (result.getProcessType().booleanValue()) {
                    // ?
                    notify(true, facilityId, result.getCheckResult(), new Date(m_nodeDate), result);
                    // ???
                    if (sample != null) {
                        int errorType = -1;
                        if (result.isCollectorResult()) {
                            errorType = CollectedDataErrorTypeConstant.NOT_ERROR;
                        } else {
                            errorType = CollectedDataErrorTypeConstant.UNKNOWN;
                        }
                        sample.set(facilityId, m_monitor.getItemName(), result.getValue(), errorType);
                    }
                }
            } else {
                m_monitorRunResultInfo = new MonitorRunResultInfo();
                m_monitorRunResultInfo.setPriority(result.getPriority());
                m_monitorRunResultInfo.setCheckResult(result.getCheckResult());
                m_monitorRunResultInfo.setNodeDate(m_nodeDate);
                m_monitorRunResultInfo
                        .setMessageOrg(makeJobOrgMessage(result.getMessageOrg(), result.getMessage()));
                m_monitorRunResultInfo.setCurData(result.getCurData());
            }
        }

        // ?????
        if (m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_STRING
                || m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_TRAP) {
            //? - ?????
            if (strSample != null) {
                collectedSamples.add(strSample);
            }
            if (!collectedSamples.isEmpty()) {
                CollectStringDataUtil.store(collectedSamples);
            }
        } else {
            if (sample != null) {
                sampleList.add(sample);
            }
            if (!sampleList.isEmpty()) {
                CollectDataUtil.put(sampleList);
            }
        }

        m_log.debug("monitor end : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId);

        return true;

    } catch (FacilityNotFound e) {
        throw e;
    } catch (InterruptedException e) {
        m_log.info("runMonitorInfo() monitorTypeId = " + m_monitorTypeId + ", monitorId  = " + m_monitorId
                + " : " + e.getClass().getSimpleName() + ", " + e.getMessage());
        return false;
    } catch (ExecutionException e) {
        m_log.info("runMonitorInfo() monitorTypeId = " + m_monitorTypeId + ", monitorId  = " + m_monitorId
                + " : " + e.getClass().getSimpleName() + ", " + e.getMessage());
        return false;
    }
}

From source file:org.codice.ddf.catalog.sourcepoller.Poller.java

/**
 * @throws IllegalStateException if unable to wait for polls
 * @throws InterruptedException if the current thread was interrupted
 * @throws CancellationException if the task to wait for the loader {@link Callable<V>} to be
 *     complete was cancelled//w  w  w . j  av a2  s  .  co m
 * @throws ExecutionException if the the task to wait for the loader {@link Callable<V>} threw an
 *     exception
 * @throws PollerException if unable to commit the value for any of the {@code itemsToPoll}
 */
private void doPollItems(long timeout, TimeUnit timeoutTimeUnit, ImmutableMap<K, Callable<V>> itemsToPoll)
        throws InterruptedException, ExecutionException, PollerException {
    removeNoncurrentKeysFromTheCache(itemsToPoll.keySet());

    if (itemsToPoll.isEmpty()) {
        LOGGER.debug("itemsToPoll is empty. Nothing to poll");
        return;
    }

    // Gather any exceptions while loading or committing new values
    final Map<K, Throwable> exceptions = new HashMap<>();
    final CompletionService<Pair<K, Commitable>> completionService = new ExecutorCompletionService<>(
            pollTimeoutWatcherThreadPool);
    final int startedLoadsCount = startLoads(timeout, timeoutTimeUnit, itemsToPoll, completionService,
            exceptions);

    boolean interrupted = false;
    try {
        for (int i = 0; i < startedLoadsCount; i++) {
            // Use CompletionService#poll(long, TimeUnit) instead of CompletionService#take() even
            // though the timeout has already been accounted for in #load(K, Callable<V>, long,
            // TimeUnit) to prevent blocking forever
            // @throws InterruptedException if interrupted while waiting
            final Future<Pair<K, Commitable>> nextCompletedLoadFuture = completionService.poll(timeout,
                    timeoutTimeUnit);
            if (nextCompletedLoadFuture == null) {
                final String message = String.format("Unable to wait for polls to finish within %d %s", timeout,
                        timeoutTimeUnit);
                LOGGER.debug(message);
                throw new IllegalStateException(message);
            }

            // @throws CancellationException if the computation was cancelled
            // @throws ExecutionException if the computation threw an exception
            // @throws InterruptedException if the current thread was interrupted
            final Pair<K, Commitable> nextCompletedLoad = nextCompletedLoadFuture.get();

            try {
                attemptToCommitLoadedValue(nextCompletedLoad.getKey(), nextCompletedLoad.getValue(),
                        exceptions);
            } catch (InterruptedException e) {
                interrupted = true;
            }
        }
    } finally {
        if (interrupted) {
            Thread.currentThread().interrupt();
        }
    }

    if (!exceptions.isEmpty()) {
        throw new PollerException(exceptions);
    }
}

From source file:com.rapid7.diskstorage.dynamodb.DynamoDBDelegate.java

public void parallelMutate(List<MutateWorker> workers) throws BackendException {
    CompletionService<Void> completion = new ExecutorCompletionService<>(clientThreadPool);
    List<Future<Void>> futures = Lists.newLinkedList();
    for (MutateWorker worker : workers) {
        futures.add(completion.submit(worker));
    }/*from  w w  w.  j  av  a 2s.  co m*/

    //block on the futures all getting or throwing instead of using a latch as i need to check future status anyway
    boolean interrupted = false;
    try {
        for (int i = 0; i < workers.size(); i++) {
            try {
                completion.take().get(); //Void
            } catch (InterruptedException e) {
                interrupted = true;
                // fail out because titan does not poll this thread for interrupted anywhere
                throw new BackendRuntimeException("was interrupted during parallelMutate");
            } catch (ExecutionException e) {
                throw unwrapExecutionException(e, MUTATE_ITEM);
            }
        }
    } finally {
        for (Future<Void> future : futures) {
            if (!future.isDone()) {
                future.cancel(interrupted /* mayInterruptIfRunning */);
            }
        }
        if (interrupted) {
            // set interrupted on this thread
            Thread.currentThread().interrupt();
        }
    }
}