Example usage for com.google.common.collect Maps newConcurrentMap

List of usage examples for com.google.common.collect Maps newConcurrentMap

Introduction

In this page you can find the example usage for com.google.common.collect Maps newConcurrentMap.

Prototype

public static <K, V> ConcurrentMap<K, V> newConcurrentMap() 

Source Link

Document

Returns a general-purpose instance of ConcurrentMap , which supports all optional operations of the ConcurrentMap interface.

Usage

From source file:org.glowroot.agent.impl.AsyncComponents.java

AsyncQueryData getOrCreateAsyncServiceCallData(String serviceCallType, String serviceCallText,
        boolean bypassLimit) {
    Map<String, AsyncQueryData> serviceCallsForType = asyncServiceCalls.get(serviceCallType);
    if (serviceCallsForType == null) {
        serviceCallsForType = Maps.newConcurrentMap();
        asyncServiceCalls.put(serviceCallType, serviceCallsForType);
    }//from w  ww .ja v  a2 s. c o  m
    AsyncQueryData serviceCallData = serviceCallsForType.get(serviceCallText);
    if (serviceCallData == null) {
        serviceCallData = createServiceCallData(serviceCallsForType, serviceCallText, bypassLimit);
        serviceCallsForType.put(serviceCallText, serviceCallData);
    }
    return serviceCallData;
}

From source file:com.ebay.pulsar.analytics.datasource.DataSourceMetaRepo.java

private Map<String, DataSourceProvider> createMultiDBInstances(List<String> dataSourceNames) {
    if (dataSourceNames == null || dataSourceNames.size() == 0) {
        return null;
    }//from   ww w .j  av a 2 s  .c  o m

    final Map<String, DataSourceProvider> tableMetasMap = Maps.newConcurrentMap();
    final CountDownLatch countDownLatch = new CountDownLatch(dataSourceNames.size());
    for (final String dbNameSpace : dataSourceNames) {
        EXECUTOR.submit(new Runnable() {
            @Override
            public void run() {
                DataSourceProvider dataBase = createDBInstance(dbNameSpace);
                if (dataBase != null) {
                    tableMetasMap.put(dbNameSpace, dataBase);
                }
                countDownLatch.countDown();
            }
        });
    }
    try {
        countDownLatch.await();
    } catch (InterruptedException e) {
    }

    return tableMetasMap;
}

From source file:com.quancheng.saluki.registry.consul.ConsulRegistry.java

private Map<String, List<GrpcURL>> lookupServiceUpdate(String group) {
    Long lastConsulIndexId = lookupGroupServices.get(group) == null ? 0L : lookupGroupServices.get(group);
    String serviceName = GrpcURLUtils.toServiceName(group);
    ConsulServiceResp consulResp = client.lookupHealthService(serviceName, lastConsulIndexId);
    if (consulResp != null) {
        List<ConsulService> consulServcies = consulResp.getSalukiConsulServices();
        boolean updated = consulServcies != null && !consulServcies.isEmpty()
                && consulResp.getConsulIndex() > lastConsulIndexId;
        if (updated) {
            Map<String, List<GrpcURL>> groupProviderUrls = Maps.newConcurrentMap();
            for (ConsulService service : consulServcies) {
                GrpcURL providerUrl = buildURL(service);
                String serviceKey = providerUrl.getServiceKey();
                List<GrpcURL> urlList = groupProviderUrls.get(serviceKey);
                if (urlList == null) {
                    urlList = Lists.newArrayList();
                    groupProviderUrls.put(serviceKey, urlList);
                }/* w  w w  .  ja  va 2 s  .c  om*/
                urlList.add(providerUrl);
            }
            lookupGroupServices.put(group, consulResp.getConsulIndex());
            return groupProviderUrls;
        }
    }
    return null;
}

From source file:org.jclouds.ec2.compute.config.EC2ComputeServiceDependenciesModule.java

@Provides
@Singleton
protected ConcurrentMap<RegionAndName, KeyPair> keypairMap(Injector i) {
    return Maps.newConcurrentMap();
}

From source file:org.glowroot.agent.impl.PreloadSomeSuperTypesCache.java

private static LoadFromFileResult loadFromFile(File file) {
    ConcurrentMap<String, CacheValue> cache = Maps.newConcurrentMap();
    if (file.exists()) {
        try {//ww  w.j a  v  a2 s.  co  m
            return Files.readLines(file, UTF_8, new LoadFromFile(file));
        } catch (IOException e) {
            logger.error("error reading {}: {}", file.getAbsolutePath(), e.getMessage(), e);
        }
    }
    return ImmutableLoadFromFileResult.builder().cache(cache).linesInFile(0).trackAccessTimes(false).build();
}

From source file:qa.qcri.nadeef.core.utils.ConsistencyManager.java

public Set<Integer> findNewViolations(Cell updatedCell, ExecutionContext context) throws Exception {
    // check if this new cell creates a new violation
    Set<Integer> affectedCells = Sets.newHashSet();

    NonBlockingCollectionIterator<Violation> outputIterator = new NonBlockingCollectionIterator<>();
    Collection<Collection<Fix>> newRepairs = Lists.newArrayList();

    for (Rule rule : ruleList) {

        String tableName = updatedCell.getColumn().getTableName();

        // create a single block from the whole table
        List<Table> tableList = new ArrayList<>();
        tableList.add(new SQLTable(tableName, context.getConnectionPool()));

        // generate newly added tuple list, consists of single tuple - last updated one
        ConcurrentMap<String, HashSet<Integer>> newTuples = Maps.newConcurrentMap();
        newTuples.put(tableName, Sets.newHashSet(updatedCell.getTid()));

        DirectIteratorResultHandler directIteratorResultHandler = new DirectIteratorResultHandler(rule,
                outputIterator);/*ww  w .j a va 2 s  . co m*/

        // call the rule iterator on whole table block and single new tuple list
        rule.iterator(tableList, newTuples, directIteratorResultHandler);

        //now outputIterator contains newly detected violation. We just need to serialize them into database

    }

    Connection conn = null;
    PreparedStatement stat = null;
    DBConnectionPool connectionPool = context.getConnectionPool();
    try {
        int vid = Violations.generateViolationId(connectionPool);

        conn = connectionPool.getNadeefConnection();
        stat = conn.prepareStatement("INSERT INTO VIOLATION VALUES (?, ?, ?, ?, ?, ?)");
        int count = 0;
        while (outputIterator.hasNext()) {
            Violation violation = outputIterator.next();
            violation.setVid(vid);
            count++;
            Collection<Cell> cells = violation.getCells();
            for (Cell cell : cells) {
                // skip the tuple id
                if (cell.hasColumnName("tid")) {
                    continue;
                }
                stat.setInt(1, vid);
                stat.setString(2, violation.getRuleId());
                stat.setString(3, cell.getColumn().getTableName());
                stat.setInt(4, cell.getTid());
                stat.setString(5, cell.getColumn().getColumnName());
                Object value = cell.getValue();
                if (value == null) {
                    stat.setString(6, null);
                } else {
                    stat.setString(6, value.toString());
                }
                stat.addBatch();
            }

            if (count % 4096 == 0) {
                stat.executeBatch();
            }

            // generate fixes for this violation
            Rule rule = ruleMap.get(violation.getRuleId());

            Collection fixes = rule.repair(violation);
            newRepairs.add(fixes);

            vid++;
        }
        stat.executeBatch();
        conn.commit();

    } finally {
        if (stat != null) {
            stat.close();
        }
        if (conn != null) {
            conn.close();
        }
    }

    // now insert newRepairs into repair table
    Statement statement = null;
    try {
        conn = connectionPool.getNadeefConnection();
        statement = conn.createStatement();
        int id = Fixes.generateFixId(connectionPool);
        for (Collection<Fix> fixes : newRepairs) {
            for (Fix fix : fixes) {
                String sql = FixExport.getSQLInsert(id, fix);
                statement.addBatch(sql);

                // add tupleid to affected cells
                if (fix.getLeft().getColumn().equals(updatedCell.getColumn())
                        && fix.getRight().getColumn().equals(updatedCell.getColumn())) {
                    affectedCells.add(fix.getLeft().getTid());
                    affectedCells.add(fix.getRight().getTid());
                }
            }
            id++;
        }
        statement.executeBatch();
        conn.commit();

    } finally {
        if (statement != null) {
            statement.close();
        }
        if (conn != null) {
            conn.close();
        }
    }

    // remove the original tupleid
    affectedCells.remove(updatedCell.getTid());
    return affectedCells;

}

From source file:org.locationtech.geogig.repository.impl.WorkingTreeInsertHelper.java

public Map<NodeRef, RevTree> buildTrees() {

    final Map<NodeRef, RevTree> result = Maps.newConcurrentMap();

    List<AsyncBuildTree> tasks = Lists.newArrayList();

    for (Entry<String, CanonicalTreeBuilder> builderEntry : treeBuilders.entrySet()) {
        final String treePath = builderEntry.getKey();
        final RevTreeBuilder builder = builderEntry.getValue();
        final RevFeatureType revFeatureType = revFeatureTypes.get(treePath);
        final ObjectId metadataId = revFeatureType.getId();
        tasks.add(new AsyncBuildTree(treePath, builder, metadataId, result));
    }/*from ww w.  ja  v a 2  s.com*/
    try {
        executorService.invokeAll(tasks);
    } catch (InterruptedException e) {
        throw Throwables.propagate(e);
    }
    db.putAll(result.values().iterator());
    return result;
}

From source file:com.metamx.druid.realtime.plumber.RealtimePlumberSchool.java

@Override
public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) {
    verifyState();/*ww w  .j ava2s  .c o  m*/

    final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod);
    log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy);

    return new Plumber() {
        private volatile boolean stopped = false;
        private volatile ExecutorService persistExecutor = null;
        private volatile ScheduledExecutorService scheduledExecutor = null;

        private final Map<Long, Sink> sinks = Maps.newConcurrentMap();
        private final VersionedIntervalTimeline<String, Sink> sinkTimeline = new VersionedIntervalTimeline<String, Sink>(
                String.CASE_INSENSITIVE_ORDER);

        @Override
        public void startJob() {
            computeBaseDir(schema).mkdirs();
            initializeExecutors();
            bootstrapSinksFromDisk();
            registerServerViewCallback();
            startPersistThread();
        }

        @Override
        public Sink getSink(long timestamp) {
            if (!rejectionPolicy.accept(timestamp)) {
                return null;
            }

            final long truncatedTime = segmentGranularity.truncate(timestamp);

            Sink retVal = sinks.get(truncatedTime);

            if (retVal == null) {
                final Interval sinkInterval = new Interval(new DateTime(truncatedTime),
                        segmentGranularity.increment(new DateTime(truncatedTime)));

                retVal = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval));

                try {
                    segmentAnnouncer.announceSegment(retVal.getSegment());
                    sinks.put(truncatedTime, retVal);
                    sinkTimeline.add(retVal.getInterval(), retVal.getVersion(),
                            new SingleElementPartitionChunk<Sink>(retVal));
                } catch (IOException e) {
                    log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource())
                            .addData("interval", retVal.getInterval()).emit();
                }
            }

            return retVal;
        }

        @Override
        public <T> QueryRunner<T> getQueryRunner(final Query<T> query) {
            final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
            final QueryToolChest<T, Query<T>> toolchest = factory.getToolchest();

            final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = new Function<Query<T>, ServiceMetricEvent.Builder>() {

                @Override
                public ServiceMetricEvent.Builder apply(@Nullable Query<T> input) {
                    return toolchest.makeMetricBuilder(query);
                }
            };

            List<TimelineObjectHolder<String, Sink>> querySinks = Lists.newArrayList();
            for (Interval interval : query.getIntervals()) {
                querySinks.addAll(sinkTimeline.lookup(interval));
            }

            return toolchest.mergeResults(factory.mergeRunners(EXEC, FunctionalIterable.create(querySinks)
                    .transform(new Function<TimelineObjectHolder<String, Sink>, QueryRunner<T>>() {
                        @Override
                        public QueryRunner<T> apply(TimelineObjectHolder<String, Sink> holder) {
                            final Sink theSink = holder.getObject().getChunk(0).getObject();
                            return new SpecificSegmentQueryRunner<T>(new MetricsEmittingQueryRunner<T>(emitter,
                                    builderFn, factory.mergeRunners(EXEC, Iterables.transform(theSink,
                                            new Function<FireHydrant, QueryRunner<T>>() {
                                                @Override
                                                public QueryRunner<T> apply(FireHydrant input) {
                                                    return factory.createRunner(input.getSegment());
                                                }
                                            }))),
                                    new SpecificSegmentSpec(new SegmentDescriptor(holder.getInterval(),
                                            theSink.getSegment().getVersion(),
                                            theSink.getSegment().getShardSpec().getPartitionNum())));
                        }
                    })));
        }

        @Override
        public void persist(final Runnable commitRunnable) {
            final List<Pair<FireHydrant, Interval>> indexesToPersist = Lists.newArrayList();
            for (Sink sink : sinks.values()) {
                if (sink.swappable()) {
                    indexesToPersist.add(Pair.of(sink.swap(), sink.getInterval()));
                }
            }

            log.info("Submitting persist runnable for dataSource[%s]", schema.getDataSource());

            persistExecutor.execute(new ThreadRenamingRunnable(
                    String.format("%s-incremental-persist", schema.getDataSource())) {
                @Override
                public void doRun() {
                    for (Pair<FireHydrant, Interval> pair : indexesToPersist) {
                        metrics.incrementRowOutputCount(persistHydrant(pair.lhs, schema, pair.rhs));
                    }
                    commitRunnable.run();
                }
            });
        }

        // Submits persist-n-merge task for a Sink to the persistExecutor
        private void persistAndMerge(final long truncatedTime, final Sink sink) {
            final String threadName = String.format("%s-%s-persist-n-merge", schema.getDataSource(),
                    new DateTime(truncatedTime));
            persistExecutor.execute(new ThreadRenamingRunnable(threadName) {
                @Override
                public void doRun() {
                    final Interval interval = sink.getInterval();

                    for (FireHydrant hydrant : sink) {
                        if (!hydrant.hasSwapped()) {
                            log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant, sink);
                            final int rowCount = persistHydrant(hydrant, schema, interval);
                            metrics.incrementRowOutputCount(rowCount);
                        }
                    }

                    final File mergedTarget = new File(computePersistDir(schema, interval), "merged");
                    if (mergedTarget.exists()) {
                        log.info("Skipping already-merged sink: %s", sink);
                        return;
                    }

                    File mergedFile = null;
                    try {
                        List<QueryableIndex> indexes = Lists.newArrayList();
                        for (FireHydrant fireHydrant : sink) {
                            Segment segment = fireHydrant.getSegment();
                            final QueryableIndex queryableIndex = segment.asQueryableIndex();
                            log.info("Adding hydrant[%s]", fireHydrant);
                            indexes.add(queryableIndex);
                        }

                        mergedFile = IndexMerger.mergeQueryableIndex(indexes, schema.getAggregators(),
                                mergedTarget);

                        QueryableIndex index = IndexIO.loadIndex(mergedFile);

                        DataSegment segment = dataSegmentPusher.push(mergedFile, sink.getSegment()
                                .withDimensions(Lists.newArrayList(index.getAvailableDimensions())));

                        segmentPublisher.publishSegment(segment);
                    } catch (IOException e) {
                        log.makeAlert(e, "Failed to persist merged index[%s]", schema.getDataSource())
                                .addData("interval", interval).emit();
                    }

                    if (mergedFile != null) {
                        try {
                            if (mergedFile != null) {
                                log.info("Deleting Index File[%s]", mergedFile);
                                FileUtils.deleteDirectory(mergedFile);
                            }
                        } catch (IOException e) {
                            log.warn(e, "Error deleting directory[%s]", mergedFile);
                        }
                    }
                }
            });
        }

        @Override
        public void finishJob() {
            log.info("Shutting down...");

            for (final Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                persistAndMerge(entry.getKey(), entry.getValue());
            }

            while (!sinks.isEmpty()) {
                try {
                    log.info("Cannot shut down yet! Sinks remaining: %s", Joiner.on(", ")
                            .join(Iterables.transform(sinks.values(), new Function<Sink, String>() {
                                @Override
                                public String apply(Sink input) {
                                    return input.getSegment().getIdentifier();
                                }
                            })));

                    synchronized (handoffCondition) {
                        while (!sinks.isEmpty()) {
                            handoffCondition.wait();
                        }
                    }
                } catch (InterruptedException e) {
                    throw Throwables.propagate(e);
                }
            }

            // scheduledExecutor is shutdown here, but persistExecutor is shutdown when the
            // ServerView sends it a new segment callback
            if (scheduledExecutor != null) {
                scheduledExecutor.shutdown();
            }

            stopped = true;
        }

        private void initializeExecutors() {
            if (persistExecutor == null) {
                persistExecutor = Executors.newFixedThreadPool(1,
                        new ThreadFactoryBuilder().setDaemon(true).setNameFormat("plumber_persist_%d").build());
            }
            if (scheduledExecutor == null) {
                scheduledExecutor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
                        .setDaemon(true).setNameFormat("plumber_scheduled_%d").build());
            }
        }

        private void bootstrapSinksFromDisk() {
            for (File sinkDir : computeBaseDir(schema).listFiles()) {
                Interval sinkInterval = new Interval(sinkDir.getName().replace("_", "/"));

                //final File[] sinkFiles = sinkDir.listFiles();
                // To avoid reading and listing of "merged" dir
                final File[] sinkFiles = sinkDir.listFiles(new FilenameFilter() {
                    @Override
                    public boolean accept(File dir, String fileName) {
                        return !(Ints.tryParse(fileName) == null);
                    }
                });
                Arrays.sort(sinkFiles, new Comparator<File>() {
                    @Override
                    public int compare(File o1, File o2) {
                        try {
                            return Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName()));
                        } catch (NumberFormatException e) {
                            log.error(e, "Couldn't compare as numbers? [%s][%s]", o1, o2);
                            return o1.compareTo(o2);
                        }
                    }
                });

                try {
                    List<FireHydrant> hydrants = Lists.newArrayList();
                    for (File segmentDir : sinkFiles) {
                        log.info("Loading previously persisted segment at [%s]", segmentDir);

                        // Although this has been tackled at start of this method.
                        // Just a doubly-check added to skip "merged" dir. from being added to hydrants 
                        // If 100% sure that this is not needed, this check can be removed.
                        if (Ints.tryParse(segmentDir.getName()) == null) {
                            continue;
                        }

                        hydrants.add(
                                new FireHydrant(new QueryableIndexSegment(null, IndexIO.loadIndex(segmentDir)),
                                        Integer.parseInt(segmentDir.getName())));
                    }

                    Sink currSink = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval),
                            hydrants);
                    sinks.put(sinkInterval.getStartMillis(), currSink);
                    sinkTimeline.add(currSink.getInterval(), currSink.getVersion(),
                            new SingleElementPartitionChunk<Sink>(currSink));

                    segmentAnnouncer.announceSegment(currSink.getSegment());
                } catch (IOException e) {
                    log.makeAlert(e, "Problem loading sink[%s] from disk.", schema.getDataSource())
                            .addData("interval", sinkInterval).emit();
                }
            }
        }

        private void registerServerViewCallback() {
            serverView.registerSegmentCallback(persistExecutor, new ServerView.BaseSegmentCallback() {
                @Override
                public ServerView.CallbackAction segmentAdded(DruidServer server, DataSegment segment) {
                    if (stopped) {
                        log.info("Unregistering ServerViewCallback");
                        persistExecutor.shutdown();
                        return ServerView.CallbackAction.UNREGISTER;
                    }

                    if ("realtime".equals(server.getType())) {
                        return ServerView.CallbackAction.CONTINUE;
                    }

                    log.debug("Checking segment[%s] on server[%s]", segment, server);
                    if (schema.getDataSource().equals(segment.getDataSource())) {
                        final Interval interval = segment.getInterval();
                        for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                            final Long sinkKey = entry.getKey();
                            if (interval.contains(sinkKey)) {
                                final Sink sink = entry.getValue();
                                log.info("Segment[%s] matches sink[%s] on server[%s]", segment, sink, server);

                                final String segmentVersion = segment.getVersion();
                                final String sinkVersion = sink.getSegment().getVersion();
                                if (segmentVersion.compareTo(sinkVersion) >= 0) {
                                    log.info("Segment version[%s] >= sink version[%s]", segmentVersion,
                                            sinkVersion);
                                    try {
                                        segmentAnnouncer.unannounceSegment(sink.getSegment());
                                        FileUtils
                                                .deleteDirectory(computePersistDir(schema, sink.getInterval()));
                                        log.info("Removing sinkKey %d for segment %s", sinkKey,
                                                sink.getSegment().getIdentifier());
                                        sinks.remove(sinkKey);
                                        sinkTimeline.remove(sink.getInterval(), sink.getVersion(),
                                                new SingleElementPartitionChunk<Sink>(sink));

                                        synchronized (handoffCondition) {
                                            handoffCondition.notifyAll();
                                        }
                                    } catch (IOException e) {
                                        log.makeAlert(e, "Unable to delete old segment for dataSource[%s].",
                                                schema.getDataSource()).addData("interval", sink.getInterval())
                                                .emit();
                                    }
                                }
                            }
                        }
                    }

                    return ServerView.CallbackAction.CONTINUE;
                }
            });
        }

        private void startPersistThread() {
            final long truncatedNow = segmentGranularity.truncate(new DateTime()).getMillis();
            final long windowMillis = windowPeriod.toStandardDuration().getMillis();

            log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(),
                    segmentGranularity.increment(truncatedNow) + windowMillis)));

            ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor,
                    new Duration(System.currentTimeMillis(),
                            segmentGranularity.increment(truncatedNow) + windowMillis),
                    new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)),
                    new ThreadRenamingCallable<ScheduledExecutors.Signal>(String.format("%s-overseer-%d",
                            schema.getDataSource(), schema.getShardSpec().getPartitionNum())) {
                        @Override
                        public ScheduledExecutors.Signal doCall() {
                            if (stopped) {
                                log.info("Stopping merge-n-push overseer thread");
                                return ScheduledExecutors.Signal.STOP;
                            }

                            log.info("Starting merge and push.");

                            long minTimestamp = segmentGranularity
                                    .truncate(rejectionPolicy.getCurrMaxTime().minus(windowMillis)).getMillis();

                            List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList();
                            for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                                final Long intervalStart = entry.getKey();
                                if (intervalStart < minTimestamp) {
                                    log.info("Adding entry[%s] for merge and push.", entry);
                                    sinksToPush.add(entry);
                                }
                            }

                            for (final Map.Entry<Long, Sink> entry : sinksToPush) {
                                persistAndMerge(entry.getKey(), entry.getValue());
                            }

                            if (stopped) {
                                log.info("Stopping merge-n-push overseer thread");
                                return ScheduledExecutors.Signal.STOP;
                            } else {
                                return ScheduledExecutors.Signal.REPEAT;
                            }
                        }
                    });
        }
    };
}

From source file:com.mattc.argus2.concurrent.Decompressors.java

/**
 * Updates our Decompressors. This will reload the Reflections object,
 * {@link #reloadReflector()}, <br />
 * and actually update our Decompressor List to include Subtypes of
 * DecompressProcess who are annotated <br />
 * with {@literal @Decompressor}. <br />
 * <br />/*w ww  .ja  v  a  2 s  .c  o m*/
 * {@link #reloadReflector()} is SLOW and so this method should be called
 * sparingly.
 */
private static void update() {
    // Reload org.reflections Reflector
    long delay;
    final long start = System.currentTimeMillis();
    final long reloadDelay = Decompressors.reloadReflector();

    final Set<Class<? extends DecompressProcess>> processes = Sets
            .newConcurrentHashSet(reflector.getSubTypesOf(DecompressProcess.class));
    final Map<String[], Class<? extends DecompressProcess>> formats = Maps.newConcurrentMap();

    for (final Class<? extends DecompressProcess> clazz : processes) {
        if (clazz.getAnnotation(Decompressor.class) == null) {
            processes.remove(clazz);
        } else {
            try {
                final String[] key = clazz.getAnnotation(Decompressor.class).value();
                formats.put(key, clazz);
                Console.info("Registered " + clazz.getName() + " as Decompressor with format suffixes: "
                        + Arrays.toString(key) + "...");
            } catch (final IncompleteAnnotationException e) {
                Console.exception(new InvalidDecompressorException(clazz,
                        " No Formats specified in @Decompressor Annotation! Check Plugin Version...", e));
            }
        }
    }

    decompressorFormats = ImmutableMap.copyOf(formats);
    Console.debug(
            String.format("Updated Decompressors in %,d ms, Reloaded Reflector in %,d ms (%.02f%% of Delay)",
                    (delay = System.currentTimeMillis() - start), reloadDelay,
                    ((float) reloadDelay / (float) delay) * 100.0f));
}

From source file:com.google.idea.blaze.cpp.BlazeConfigurationResolver.java

private ImmutableMap<TargetKey, BlazeResolveConfiguration> buildBlazeConfigurationMap(
        BlazeContext parentContext, BlazeProjectData blazeProjectData,
        ImmutableMap<TargetKey, CToolchainIdeInfo> toolchainLookupMap,
        ImmutableMap<File, VirtualFile> headerRoots) {
    // Type specification needed to avoid incorrect type inference during command line build.
    return Scope.push(parentContext,
            (ScopedFunction<ImmutableMap<TargetKey, BlazeResolveConfiguration>>) context -> {
                context.push(new TimingScope("Build C configuration map"));

                ConcurrentMap<CToolchainIdeInfo, File> compilerWrapperCache = Maps.newConcurrentMap();
                List<ListenableFuture<MapEntry>> mapEntryFutures = Lists.newArrayList();

                for (TargetIdeInfo target : blazeProjectData.targetMap.targets()) {
                    if (target.kind.getLanguageClass() == LanguageClass.C) {
                        ListenableFuture<MapEntry> future = submit(() -> createResolveConfiguration(target,
                                toolchainLookupMap, headerRoots, compilerWrapperCache, blazeProjectData));
                        mapEntryFutures.add(future);
                    }//from w  w w .  jav  a2 s  .co m
                }

                ImmutableMap.Builder<TargetKey, BlazeResolveConfiguration> newResolveConfigurations = ImmutableMap
                        .builder();
                List<MapEntry> mapEntries;
                try {
                    mapEntries = Futures.allAsList(mapEntryFutures).get();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    context.setCancelled();
                    return ImmutableMap.of();
                } catch (ExecutionException e) {
                    IssueOutput.error("Could not build C resolve configurations: " + e).submit(context);
                    LOG.error("Could not build C resolve configurations", e);
                    return ImmutableMap.of();
                }

                for (MapEntry mapEntry : mapEntries) {
                    // Skip over labels that don't have C configuration data.
                    if (mapEntry != null) {
                        newResolveConfigurations.put(mapEntry.targetKey, mapEntry.configuration);
                    }
                }
                return newResolveConfigurations.build();
            });
}