Example usage for com.google.common.collect Maps newConcurrentMap

List of usage examples for com.google.common.collect Maps newConcurrentMap

Introduction

In this page you can find the example usage for com.google.common.collect Maps newConcurrentMap.

Prototype

public static <K, V> ConcurrentMap<K, V> newConcurrentMap() 

Source Link

Document

Returns a general-purpose instance of ConcurrentMap , which supports all optional operations of the ConcurrentMap interface.

Usage

From source file:com.turbospaces.serialization.DecoratedKryo.java

@Override
public RegisteredClass register(final Class type, final Serializer serializer) {
    RegisteredClass regClass = super.register(type, serializer);
    if (serializers == null)
        serializers = Maps.newConcurrentMap();
    serializers.put(type, regClass);/*  w  ww  .j a v a 2s.  com*/
    return regClass;
}

From source file:com.datatorrent.contrib.dimensions.AppDataSingleSchemaDimensionStoreHDHT.java

@Override
public void setup(OperatorContext context) {
    boolean initializeSeenEnumValues = seenEnumValues == null;

    if (initializeSeenEnumValues) {
        seenEnumValues = Maps.newConcurrentMap();
    }//from  w ww. jav a 2s  . co  m

    super.setup(context);

    this.buckets = Sets.newHashSet(bucketID);

    if (!dimensionalSchema.isPredefinedFromTo()) {
        if (getMinTimestamp() != null) {
            dimensionalSchema.setFrom(getMinTimestamp());
        }

        if (getMaxTimestamp() != null) {
            dimensionalSchema.setTo(getMaxTimestamp());
        }
    }

    if (initializeSeenEnumValues) {
        Map<String, List<Object>> keysToEnumValuesList = this.configurationSchema.getKeysToEnumValuesList();

        for (String key : configurationSchema.getKeyDescriptor().getFieldList()) {
            if (DimensionsDescriptor.RESERVED_DIMENSION_NAMES.contains(key)) {
                continue;
            }

            @SuppressWarnings("rawtypes")
            Set<Comparable> enumValuesSet = new ConcurrentSkipListSet<>();
            @SuppressWarnings({ "unchecked", "rawtypes" })
            List<Comparable> enumValuesList = (List) keysToEnumValuesList.get(key);
            enumValuesSet.addAll(enumValuesList);
            seenEnumValues.put(key, enumValuesSet);
        }
    }
}

From source file:org.aliuge.crawler.pendingqueue.AbstractsPendingQueue.java

/**
 * ??/*from w ww.j  a va 2  s . co  m*/
 * 
 * @return
 */
public Map<String, Object> pendingStatus() {
    Map<String, Object> stat = Maps.newConcurrentMap();
    stat.put("count", new Long(Queue.size()));
    stat.put("urls", Queue.toString());
    stat.put("success", success.get());
    stat.put("failure", failure.get());
    return stat;
}

From source file:org.apache.drill.exec.expr.fn.registry.FunctionRegistryHolder.java

/**
 * Adds jars to the function registry./*from w  ww. ja  va2  s  .  c  o m*/
 * If jar with the same name already exists, it and its functions will be removed.
 * Then jar will be added to {@link #jars}
 * and each function will be added using {@link #addFunctions(Map, List)}.
 * Function version registry will be incremented by 1 if at least one jar was added but not for each jar.
 * This is write operation, so one user at a time can call perform such action,
 * others will wait till first user completes his action.
 *
 * @param newJars jars and list of their function holders, each contains function name, signature and holder
 */
public void addJars(Map<String, List<FunctionHolder>> newJars) {
    try (AutoCloseableLock lock = writeLock.open()) {
        for (Map.Entry<String, List<FunctionHolder>> newJar : newJars.entrySet()) {
            String jarName = newJar.getKey();
            removeAllByJar(jarName);
            Map<String, Queue<String>> jar = Maps.newConcurrentMap();
            jars.put(jarName, jar);
            addFunctions(jar, newJar.getValue());
        }
        if (!newJars.isEmpty()) {
            version++;
        }
    }
}

From source file:co.cask.cdap.internal.app.runtime.schedule.StreamSizeScheduler.java

@Inject
public StreamSizeScheduler(CConfiguration cConf, NotificationService notificationService,
        MetricStore metricStore, Provider<Store> storeProvider, ProgramLifecycleService lifecycleService,
        PropertiesResolver propertiesResolver, DatasetBasedStreamSizeScheduleStore scheduleStore) {
    this.pollingDelay = TimeUnit.SECONDS
            .toMillis(cConf.getLong(Constants.Notification.Stream.STREAM_SIZE_SCHEDULE_POLLING_DELAY));
    this.notificationService = notificationService;
    this.metricStore = metricStore;
    this.storeProvider = storeProvider;
    this.lifecycleService = lifecycleService;
    this.propertiesResolver = propertiesResolver;
    this.scheduleStore = scheduleStore;
    this.streamSubscribers = Maps.newConcurrentMap();
    this.scheduleSubscribers = new ConcurrentSkipListMap<>();
    this.schedulerStarted = false;
}

From source file:org.glowroot.agent.impl.AsyncComponents.java

AsyncQueryData getOrCreateAsyncQueryData(String queryType, String queryText, boolean bypassLimit) {
    Map<String, AsyncQueryData> queriesForType = asyncQueries.get(queryType);
    if (queriesForType == null) {
        queriesForType = Maps.newConcurrentMap();
        asyncQueries.put(queryType, queriesForType);
    }/*  w  w w  .  j a  v  a2  s  .c  om*/
    AsyncQueryData queryData = queriesForType.get(queryText);
    if (queryData == null) {
        queryData = createQueryData(queriesForType, queryText, bypassLimit);
        queriesForType.put(queryText, queryData);
    }
    return queryData;
}

From source file:gobblin.source.extractor.extract.kafka.KafkaSource.java

@Override
public List<WorkUnit> getWorkunits(SourceState state) {
    Map<String, List<WorkUnit>> workUnits = Maps.newConcurrentMap();
    if (state.getPropAsBoolean(KafkaSource.GOBBLIN_KAFKA_EXTRACT_ALLOW_TABLE_TYPE_NAMESPACE_CUSTOMIZATION)) {
        String tableTypeStr = state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY,
                KafkaSource.DEFAULT_TABLE_TYPE.toString());
        tableType = Extract.TableType.valueOf(tableTypeStr);
        extractNameSpace = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY,
                KafkaSource.DEFAULT_NAMESPACE_NAME);
    } else {// www .  j av a2 s.  c  o  m
        // To be compatible, reject table type and namespace configuration keys as previous implementation
        tableType = KafkaSource.DEFAULT_TABLE_TYPE;
        extractNameSpace = KafkaSource.DEFAULT_NAMESPACE_NAME;
    }
    isFullExtract = state.getPropAsBoolean(ConfigurationKeys.EXTRACT_IS_FULL_KEY);

    try {
        this.kafkaConsumerClient = kafkaConsumerClientResolver
                .resolveClass(state.getProp(GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS,
                        DEFAULT_GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS))
                .newInstance().create(ConfigUtils.propertiesToConfig(state.getProperties()));

        List<KafkaTopic> topics = getFilteredTopics(state);

        for (KafkaTopic topic : topics) {
            LOG.info("Discovered topic " + topic.getName());
        }
        Map<String, State> topicSpecificStateMap = DatasetUtils
                .getDatasetSpecificProps(Iterables.transform(topics, new Function<KafkaTopic, String>() {

                    @Override
                    public String apply(KafkaTopic topic) {
                        return topic.getName();
                    }
                }), state);

        int numOfThreads = state.getPropAsInt(ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_THREADS,
                ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_DEFAULT_THREAD_COUNT);
        ExecutorService threadPool = Executors.newFixedThreadPool(numOfThreads,
                ExecutorsUtils.newThreadFactory(Optional.of(LOG)));

        Stopwatch createWorkUnitStopwatch = Stopwatch.createStarted();

        for (KafkaTopic topic : topics) {
            threadPool.submit(new WorkUnitCreator(topic, state,
                    Optional.fromNullable(topicSpecificStateMap.get(topic.getName())), workUnits));
        }

        ExecutorsUtils.shutdownExecutorService(threadPool, Optional.of(LOG), 1L, TimeUnit.HOURS);
        LOG.info(String.format("Created workunits for %d topics in %d seconds", workUnits.size(),
                createWorkUnitStopwatch.elapsed(TimeUnit.SECONDS)));

        // Create empty WorkUnits for skipped partitions (i.e., partitions that have previous offsets,
        // but aren't processed).
        createEmptyWorkUnitsForSkippedPartitions(workUnits, topicSpecificStateMap, state);

        int numOfMultiWorkunits = state.getPropAsInt(ConfigurationKeys.MR_JOB_MAX_MAPPERS_KEY,
                ConfigurationKeys.DEFAULT_MR_JOB_MAX_MAPPERS);
        List<WorkUnit> workUnitList = KafkaWorkUnitPacker.getInstance(this, state).pack(workUnits,
                numOfMultiWorkunits);
        addTopicSpecificPropsToWorkUnits(workUnitList, topicSpecificStateMap);
        setLimiterReportKeyListToWorkUnits(workUnitList, getLimiterExtractorReportKeys());
        return workUnitList;
    } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
        throw new RuntimeException(e);
    } finally {
        try {
            if (this.kafkaConsumerClient != null) {
                this.kafkaConsumerClient.close();
            }
        } catch (IOException e) {
            throw new RuntimeException("Exception closing kafkaConsumerClient");
        }
    }
}

From source file:org.apache.spark.network.shuffle.ExternalShuffleBlockResolver.java

@VisibleForTesting
ExternalShuffleBlockResolver(TransportConf conf, File registeredExecutorFile, Executor directoryCleaner)
        throws IOException {
    this.conf = conf;
    this.registeredExecutorFile = registeredExecutorFile;
    if (registeredExecutorFile != null) {
        Options options = new Options();
        options.createIfMissing(false);//from w  w w .ja  v  a2s  . c  om
        options.logger(new LevelDBLogger());
        DB tmpDb;
        try {
            tmpDb = JniDBFactory.factory.open(registeredExecutorFile, options);
        } catch (NativeDB.DBException e) {
            if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
                logger.info("Creating state database at " + registeredExecutorFile);
                options.createIfMissing(true);
                try {
                    tmpDb = JniDBFactory.factory.open(registeredExecutorFile, options);
                } catch (NativeDB.DBException dbExc) {
                    throw new IOException("Unable to create state store", dbExc);
                }
            } else {
                // the leveldb file seems to be corrupt somehow.  Lets just blow it away and create a new
                // one, so we can keep processing new apps
                logger.error("error opening leveldb file {}.  Creating new file, will not be able to "
                        + "recover state for existing applications", registeredExecutorFile, e);
                if (registeredExecutorFile.isDirectory()) {
                    for (File f : registeredExecutorFile.listFiles()) {
                        if (!f.delete()) {
                            logger.warn("error deleting {}", f.getPath());
                        }
                    }
                }
                if (!registeredExecutorFile.delete()) {
                    logger.warn("error deleting {}", registeredExecutorFile.getPath());
                }
                options.createIfMissing(true);
                try {
                    tmpDb = JniDBFactory.factory.open(registeredExecutorFile, options);
                } catch (NativeDB.DBException dbExc) {
                    throw new IOException("Unable to create state store", dbExc);
                }

            }
        }
        // if there is a version mismatch, we throw an exception, which means the service is unusable
        checkVersion(tmpDb);
        executors = reloadRegisteredExecutors(tmpDb);
        db = tmpDb;
    } else {
        db = null;
        executors = Maps.newConcurrentMap();
    }
    this.directoryCleaner = directoryCleaner;
}

From source file:com.metamx.druid.realtime.RealtimePlumberSchool.java

@Override
public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) {
    verifyState();/*from   www .j a v  a  2 s .c  om*/
    initializeExecutors();

    computeBaseDir(schema).mkdirs();

    final Map<Long, Sink> sinks = Maps.newConcurrentMap();

    for (File sinkDir : computeBaseDir(schema).listFiles()) {
        Interval sinkInterval = new Interval(sinkDir.getName().replace("_", "/"));

        final File[] sinkFiles = sinkDir.listFiles();
        Arrays.sort(sinkFiles, new Comparator<File>() {
            @Override
            public int compare(File o1, File o2) {
                try {
                    return Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName()));
                } catch (NumberFormatException e) {
                    log.error(e, "Couldn't compare as numbers? [%s][%s]", o1, o2);
                    return o1.compareTo(o2);
                }
            }
        });

        try {
            List<FireHydrant> hydrants = Lists.newArrayList();
            for (File segmentDir : sinkFiles) {
                log.info("Loading previously persisted segment at [%s]", segmentDir);
                hydrants.add(new FireHydrant(new QueryableIndexSegment(null, IndexIO.loadIndex(segmentDir)),
                        Integer.parseInt(segmentDir.getName())));
            }

            Sink currSink = new Sink(sinkInterval, schema, hydrants);
            sinks.put(sinkInterval.getStartMillis(), currSink);

            metadataUpdater.announceSegment(currSink.getSegment());
        } catch (IOException e) {
            log.makeAlert(e, "Problem loading sink[%s] from disk.", schema.getDataSource())
                    .addData("interval", sinkInterval).emit();
        }
    }

    serverView.registerSegmentCallback(persistExecutor, new ServerView.BaseSegmentCallback() {
        @Override
        public ServerView.CallbackAction segmentAdded(DruidServer server, DataSegment segment) {
            if ("realtime".equals(server.getType())) {
                return ServerView.CallbackAction.CONTINUE;
            }

            log.debug("Checking segment[%s] on server[%s]", segment, server);
            if (schema.getDataSource().equals(segment.getDataSource())) {
                final Interval interval = segment.getInterval();
                for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                    final Long sinkKey = entry.getKey();
                    if (interval.contains(sinkKey)) {
                        final Sink sink = entry.getValue();
                        log.info("Segment matches sink[%s]", sink);

                        if (segment.getVersion().compareTo(sink.getSegment().getVersion()) >= 0) {
                            try {
                                metadataUpdater.unannounceSegment(sink.getSegment());
                                FileUtils.deleteDirectory(computePersistDir(schema, sink.getInterval()));
                                sinks.remove(sinkKey);
                            } catch (IOException e) {
                                log.makeAlert(e, "Unable to delete old segment for dataSource[%s].",
                                        schema.getDataSource()).addData("interval", sink.getInterval()).emit();
                            }
                        }
                    }
                }
            }

            return ServerView.CallbackAction.CONTINUE;
        }
    });

    final long truncatedNow = segmentGranularity.truncate(new DateTime()).getMillis();
    final long windowMillis = windowPeriod.toStandardDuration().getMillis();
    final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod);
    log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy);

    log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(),
            segmentGranularity.increment(truncatedNow) + windowMillis)));

    ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor,
            new Duration(System.currentTimeMillis(), segmentGranularity.increment(truncatedNow) + windowMillis),
            new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)),
            new ThreadRenamingRunnable(String.format("%s-overseer", schema.getDataSource())) {
                @Override
                public void doRun() {
                    log.info("Starting merge and push.");

                    long minTimestamp = segmentGranularity.truncate(rejectionPolicy.getCurrMaxTime())
                            .getMillis() - windowMillis;

                    List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList();
                    for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                        final Long intervalStart = entry.getKey();
                        if (intervalStart < minTimestamp) {
                            log.info("Adding entry[%s] for merge and push.", entry);
                            sinksToPush.add(entry);
                        }
                    }

                    for (final Map.Entry<Long, Sink> entry : sinksToPush) {
                        final Sink sink = entry.getValue();

                        final String threadName = String.format("%s-%s-persist-n-merge", schema.getDataSource(),
                                new DateTime(entry.getKey()));
                        persistExecutor.execute(new ThreadRenamingRunnable(threadName) {
                            @Override
                            public void doRun() {
                                final Interval interval = sink.getInterval();

                                for (FireHydrant hydrant : sink) {
                                    if (!hydrant.hasSwapped()) {
                                        log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant,
                                                sink);
                                        final int rowCount = persistHydrant(hydrant, schema, interval);
                                        metrics.incrementRowOutputCount(rowCount);
                                    }
                                }

                                final File mergedFile;
                                try {
                                    List<QueryableIndex> indexes = Lists.newArrayList();
                                    for (FireHydrant fireHydrant : sink) {
                                        Segment segment = fireHydrant.getSegment();
                                        final QueryableIndex queryableIndex = segment.asQueryableIndex();
                                        log.info("Adding hydrant[%s]", fireHydrant);
                                        indexes.add(queryableIndex);
                                    }

                                    mergedFile = IndexMerger.mergeQueryableIndex(indexes,
                                            schema.getAggregators(),
                                            new File(computePersistDir(schema, interval), "merged"));

                                    QueryableIndex index = IndexIO.loadIndex(mergedFile);

                                    DataSegment segment = segmentPusher.push(mergedFile,
                                            sink.getSegment().withDimensions(
                                                    Lists.newArrayList(index.getAvailableDimensions())));

                                    metadataUpdater.publishSegment(segment);
                                } catch (IOException e) {
                                    log.makeAlert(e, "Failed to persist merged index[%s]",
                                            schema.getDataSource()).addData("interval", interval).emit();
                                }
                            }
                        });
                    }
                }
            });

    return new Plumber() {
        @Override
        public Sink getSink(long timestamp) {
            if (!rejectionPolicy.accept(timestamp)) {
                return null;
            }

            final long truncatedTime = segmentGranularity.truncate(timestamp);

            Sink retVal = sinks.get(truncatedTime);

            if (retVal == null) {
                retVal = new Sink(new Interval(new DateTime(truncatedTime),
                        segmentGranularity.increment(new DateTime(truncatedTime))), schema);

                try {
                    metadataUpdater.announceSegment(retVal.getSegment());

                    sinks.put(truncatedTime, retVal);
                } catch (IOException e) {
                    log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource())
                            .addData("interval", retVal.getInterval()).emit();
                }
            }

            return retVal;
        }

        @Override
        public <T> QueryRunner<T> getQueryRunner(final Query<T> query) {
            final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
            final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = new Function<Query<T>, ServiceMetricEvent.Builder>() {
                private final QueryToolChest<T, Query<T>> toolchest = factory.getToolchest();

                @Override
                public ServiceMetricEvent.Builder apply(@Nullable Query<T> input) {
                    return toolchest.makeMetricBuilder(query);
                }
            };

            return factory.mergeRunners(EXEC,
                    FunctionalIterable.create(sinks.values()).transform(new Function<Sink, QueryRunner<T>>() {
                        @Override
                        public QueryRunner<T> apply(@Nullable Sink input) {
                            return new MetricsEmittingQueryRunner<T>(emitter, builderFn, factory.mergeRunners(
                                    EXEC,
                                    Iterables.transform(input, new Function<FireHydrant, QueryRunner<T>>() {
                                        @Override
                                        public QueryRunner<T> apply(@Nullable FireHydrant input) {
                                            return factory.createRunner(input.getSegment());
                                        }
                                    })));
                        }
                    }));
        }

        @Override
        public void persist(final Runnable commitRunnable) {
            final List<Pair<FireHydrant, Interval>> indexesToPersist = Lists.newArrayList();
            for (Sink sink : sinks.values()) {
                if (sink.swappable()) {
                    indexesToPersist.add(Pair.of(sink.swap(), sink.getInterval()));
                }
            }

            log.info("Submitting persist runnable for dataSource[%s]", schema.getDataSource());

            persistExecutor.execute(new ThreadRenamingRunnable(
                    String.format("%s-incremental-persist", schema.getDataSource())) {
                @Override
                public void doRun() {
                    for (Pair<FireHydrant, Interval> pair : indexesToPersist) {
                        metrics.incrementRowOutputCount(persistHydrant(pair.lhs, schema, pair.rhs));
                    }
                    commitRunnable.run();
                }
            });
        }

        @Override
        public void finishJob() {
            throw new UnsupportedOperationException();
        }
    };
}

From source file:org.locationtech.geogig.repository.WorkingTreeInsertHelper.java

public Map<NodeRef, RevTree> buildTrees() {

    final Map<NodeRef, RevTree> result = Maps.newConcurrentMap();

    List<AsyncBuildTree> tasks = Lists.newArrayList();

    for (Entry<String, RevTreeBuilder2> builderEntry : treeBuilders.entrySet()) {
        final String treePath = builderEntry.getKey();
        final RevTreeBuilder2 builder = builderEntry.getValue();
        tasks.add(new AsyncBuildTree(treePath, builder, result));
    }/*from w  w w  .  j  av  a 2 s  . c om*/
    try {
        executorService.invokeAll(tasks);
    } catch (InterruptedException e) {
        throw Throwables.propagate(e);
    }
    db.putAll(result.values().iterator());
    return result;
}