Example usage for com.google.common.collect Maps newConcurrentMap

List of usage examples for com.google.common.collect Maps newConcurrentMap

Introduction

In this page you can find the example usage for com.google.common.collect Maps newConcurrentMap.

Prototype

public static <K, V> ConcurrentMap<K, V> newConcurrentMap() 

Source Link

Document

Returns a general-purpose instance of ConcurrentMap , which supports all optional operations of the ConcurrentMap interface.

Usage

From source file:com.datatorrent.lib.bucket.BucketManagerImpl.java

public BucketManagerImpl() {
    eventQueue = new LinkedBlockingQueue<Long>();
    evictionCandidates = Sets.newHashSet();
    dirtyBuckets = Maps.newConcurrentMap();
    bucketHeap = MinMaxPriorityQueue.orderedBy(new Comparator<Bucket<T>>() {
        @Override//from   w w  w  .  j a v  a2 s  . com
        public int compare(Bucket<T> bucket1, Bucket<T> bucket2) {
            if (bucket1.lastUpdateTime() < bucket2.lastUpdateTime()) {
                return -1;
            }
            if (bucket1.lastUpdateTime() > bucket2.lastUpdateTime()) {
                return 1;
            }
            return 0;
        }

    }).create();
    lock = new Lock();
    committedWindow = -1;

    noOfBuckets = DEF_NUM_BUCKETS;
    noOfBucketsInMemory = DEF_NUM_BUCKETS_MEM;
    maxNoOfBucketsInMemory = DEF_NUM_BUCKETS_MEM + 100;
    millisPreventingBucketEviction = DEF_MILLIS_PREVENTING_EVICTION;
    writeEventKeysOnly = true;
}

From source file:io.macgyver.core.scheduler.ScheduledTaskManager.java

protected Map<String, JsonNode> loadScheduledScriptTasks() {

    Map<String, JsonNode> m = Maps.newConcurrentMap();
    String cypher = "match (s:ScheduledTask) where length(s.script)>0 return s";
    neo4j.execCypher(cypher).forEach(it -> {
        String name = it.path(SCHEDULED_BY_SCRIPT).asText();
        if (!Strings.isNullOrEmpty(name)) {
            m.put(name, it);//from   w ww  . jav a 2 s . c om
        }
    });
    return m;

}

From source file:org.onosproject.cfg.impl.ComponentConfigManager.java

@Override
public void registerProperties(Class<?> componentClass) {
    checkPermission(CONFIG_WRITE);/*  w ww  .j  a  v  a 2s . c o m*/

    String componentName = componentClass.getName();
    String resourceName = componentClass.getSimpleName() + RESOURCE_EXT;
    try (InputStream ris = componentClass.getResourceAsStream(resourceName)) {
        checkArgument(ris != null, "Property definitions not found at resource %s", resourceName);

        // Read the definitions
        Set<ConfigProperty> defs = ConfigPropertyDefinitions.read(ris);

        // Produce a new map of the properties and register it.
        Map<String, ConfigProperty> map = Maps.newConcurrentMap();
        defs.forEach(p -> map.put(p.name(), p));

        properties.put(componentName, map);
        loadExistingValues(componentName);
    } catch (IOException e) {
        log.error("Unable to read property definitions from resource " + resourceName, e);
    }
}

From source file:org.apache.hive.spark.client.SparkClientImpl.java

SparkClientImpl(RpcServer rpcServer, Map<String, String> conf, HiveConf hiveConf)
        throws IOException, SparkException {
    this.conf = conf;
    this.hiveConf = hiveConf;
    this.jobs = Maps.newConcurrentMap();

    String clientId = UUID.randomUUID().toString();
    String secret = rpcServer.createSecret();
    this.driverThread = startDriver(rpcServer, clientId, secret);
    this.protocol = new ClientProtocol();

    try {/*w ww . j a  v  a 2s . co m*/
        // The RPC server will take care of timeouts here.
        this.driverRpc = rpcServer.registerClient(clientId, secret, protocol).get();
    } catch (Throwable e) {
        String errorMsg = null;
        if (e.getCause() instanceof TimeoutException) {
            errorMsg = "Timed out waiting for client to connect.\nPossible reasons include network "
                    + "issues, errors in remote driver or the cluster has no available resources, etc."
                    + "\nPlease check YARN or Spark driver's logs for further information.";
        } else if (e.getCause() instanceof InterruptedException) {
            errorMsg = "Interruption occurred while waiting for client to connect.\nPossibly the Spark session is closed "
                    + "such as in case of query cancellation."
                    + "\nPlease refer to HiveServer2 logs for further information.";
        } else {
            errorMsg = "Error while waiting for client to connect.";
        }
        LOG.error(errorMsg, e);
        driverThread.interrupt();
        try {
            driverThread.join();
        } catch (InterruptedException ie) {
            // Give up.
            LOG.warn("Interrupted before driver thread was finished.", ie);
        }
        throw Throwables.propagate(e);
    }

    driverRpc.addListener(new Rpc.Listener() {
        @Override
        public void rpcClosed(Rpc rpc) {
            if (isAlive) {
                LOG.warn("Client RPC channel closed unexpectedly.");
                isAlive = false;
            }
        }
    });
    isAlive = true;
}

From source file:org.apache.drill.exec.memory.Accountor.java

public Accountor(DrillConfig config, boolean errorOnLeak, FragmentContext context, Accountor parent, long max,
        long preAllocated, boolean applyFragLimit) {
    // TODO: fix preallocation stuff
    this.errorOnLeak = errorOnLeak;
    AtomicRemainder parentRemainder = parent != null ? parent.remainder : null;
    this.parent = parent;

    boolean enableFragmentLimit;
    double fragmentMemOvercommitFactor;

    try {//from  www.  ja v  a 2 s  . co  m
        enableFragmentLimit = config.getBoolean(ExecConstants.ENABLE_FRAGMENT_MEMORY_LIMIT);
        fragmentMemOvercommitFactor = config.getDouble(ExecConstants.FRAGMENT_MEM_OVERCOMMIT_FACTOR);
    } catch (Exception e) {
        enableFragmentLimit = DEFAULT_ENABLE_FRAGMENT_LIMIT;
        fragmentMemOvercommitFactor = DEFAULT_FRAGMENT_MEM_OVERCOMMIT_FACTOR;
    }
    this.enableFragmentLimit = enableFragmentLimit;
    this.fragmentMemOvercommitFactor = fragmentMemOvercommitFactor;

    this.applyFragmentLimit = applyFragLimit;

    this.remainder = new AtomicRemainder(errorOnLeak, parentRemainder, max, preAllocated, applyFragmentLimit);
    this.total = max;
    this.fragmentContext = context;
    this.handle = (context != null) ? context.getHandle() : null;
    this.fragmentStr = (handle != null) ? (handle.getMajorFragmentId() + ":" + handle.getMinorFragmentId())
            : "0:0";
    this.fragmentLimit = this.total; // Allow as much as possible to start with;
    if (ENABLE_ACCOUNTING) {
        buffers = Maps.newConcurrentMap();
    } else {
        buffers = null;
    }
    this.fragmentContexts = new ArrayList<FragmentContext>();
    if (parent != null && parent.parent == null) { // Only add the fragment context to the fragment level accountor
        synchronized (this) {
            addFragmentContext(this.fragmentContext);
        }
    }
}

From source file:tajo.engine.cluster.ClusterManager.java

public ClusterManager(WorkerCommunicator wc, final TajoConf conf, LeafServerTracker tracker)
        throws IOException {
    this.wc = wc;
    this.conf = conf;
    this.tracker = tracker;
    this.DNSNameToHostsMap = Maps.newConcurrentMap();
    this.servingInfoMap = Maps.newConcurrentMap();
    this.resourcePool = Maps.newConcurrentMap();
    this.failedWorkers = Sets.newHashSet();
    this.clusterSize = 0;
}

From source file:io.macgyver.server.Startup.java

@Override
public void onApplicationEvent(ApplicationReadyEvent event) {

    try {//w  w  w  .  ja  va 2 s.  c  o m
        copyPluginsJson();
        readPlugins();
        Resource resource = applicationContext.getResource("classpath:templates/build.gradle.template");

        File buildGradleFile = new File(tempDir, "build.gradle");
        SimpleTemplateEngine ste = new SimpleTemplateEngine();
        Template template = ste.createTemplate(new InputStreamReader(resource.getInputStream()));
        FileWriter fw = new FileWriter(buildGradleFile);

        Map<String, Object> x = Maps.newConcurrentMap();
        x.put("dependencies", dependencies);
        template.make(x).writeTo(fw);

        fw.close();

        logger.info("resource: {}", resource);

        ProjectConnection connect = GradleConnector.newConnector().forProjectDirectory(tempDir).connect();

        logger.info("{}", connect);
        BuildLauncher launcher = connect.newBuild();
        launcher.setStandardOutput(System.out);
        launcher.forTasks("fetch").run();

        targetLibDir.mkdirs();

        Arrays.asList(targetLibDir.listFiles()).forEach(it -> {
            it.delete();
        });

        Arrays.asList(new File(tempDir, "jars").listFiles()).forEach(it -> {
            try {
                File targetFile = new File(targetLibDir, it.getName());
                logger.info("using {}", targetFile.getName());
                Files.copy(it, targetFile);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        });

        connect.close();

        File logbackFile = new File(targetDir, "config/logback.xml");

        if (!logbackFile.exists()) {
            logbackFile.getParentFile().mkdirs();
            try (FileOutputStream fos = new FileOutputStream(logbackFile)) {
                ByteStreams.copy(
                        applicationContext.getResource("classpath:templates/logback.xml").getInputStream(),
                        fos);
            }
        }

        File targetFile = new File(targetDir, "bin/macgyverctl");
        targetFile.getParentFile().mkdirs();

        try (FileOutputStream fos = new FileOutputStream(targetFile)) {
            ByteStreams.copy(applicationContext.getResource("classpath:templates/macgyverctl").getInputStream(),
                    fos);
        }

        targetFile.setExecutable(true);

        String[] env = new String[] { "JAVA_HOME=" + System.getProperty("java.home") };

        Process p = Runtime.getRuntime().exec(targetFile.getAbsolutePath(), env);

        int rc = p.waitFor();
        logger.info("return code from start script: {}", rc);

    } catch (IOException | InterruptedException e) {
        logger.error("problem", e);
    }

}

From source file:com.spotify.helios.agent.TaskHistoryWriter.java

public TaskHistoryWriter(final String hostname, final ZooKeeperClient client, final Path backingFile)
        throws IOException, InterruptedException {
    this.hostname = hostname;
    this.client = client;
    this.backingStore = PersistentAtomicReference.create(backingFile,
            new TypeReference<ConcurrentMap<JobId, Deque<TaskStatusEvent>>>() {
            }, new Supplier<ConcurrentMap<JobId, Deque<TaskStatusEvent>>>() {
                @Override/*from ww  w  . j  a v a 2s  .com*/
                public ConcurrentMap<JobId, Deque<TaskStatusEvent>> get() {
                    return Maps.newConcurrentMap();
                }
            });
    this.items = backingStore.get();

    // Clean out any errant null values.  Normally shouldn't have any, but we did have a few
    // where it happened, and this will make sure we can get out of a bad state if we get into it.
    final ImmutableSet<JobId> curKeys = ImmutableSet.copyOf(this.items.keySet());
    for (final JobId key : curKeys) {
        if (this.items.get(key) == null) {
            this.items.remove(key);
        }
    }

    int itemCount = 0;
    for (final Deque<TaskStatusEvent> deque : items.values()) {
        itemCount += deque.size();
    }
    this.count = new AtomicInteger(itemCount);
}

From source file:com.spotify.helios.agent.QueueingHistoryWriter.java

public QueueingHistoryWriter(final String hostname, final ZooKeeperClient client, final Path backingFile)
        throws IOException, InterruptedException {
    this.hostname = hostname;
    this.client = client;
    this.backingStore = PersistentAtomicReference.create(backingFile,
            new TypeReference<ConcurrentMap<JobId, Deque<TaskStatusEvent>>>() {
            }, new Supplier<ConcurrentMap<JobId, Deque<TaskStatusEvent>>>() {
                @Override//from w ww. ja v  a 2 s.co  m
                public ConcurrentMap<JobId, Deque<TaskStatusEvent>> get() {
                    return Maps.newConcurrentMap();
                }
            });
    this.items = backingStore.get();

    // Clean out any errant null values.  Normally shouldn't have any, but we did have a few
    // where it happened, and this will make sure we can get out of a bad state if we get into it.
    final ImmutableSet<JobId> curKeys = ImmutableSet.copyOf(this.items.keySet());
    for (JobId key : curKeys) {
        if (this.items.get(key) == null) {
            this.items.remove(key);
        }
    }

    int itemCount = 0;
    for (Deque<TaskStatusEvent> deque : items.values()) {
        itemCount += deque.size();
    }
    this.count = new AtomicInteger(itemCount);
}

From source file:com.github.hilcode.versionator.ListExecutor.java

public final void execute() {
    final File rootDir_ = this.commandList.rootDir.getAbsoluteFile();
    final File rootDir = rootDir_.getPath().endsWith(".") ? rootDir_.getParentFile() : rootDir_;
    final ImmutableList<Pom> poms = this.pomFinder.findAllPoms(rootDir);
    final Set<PomAndGav> pomAndGavs = Sets.newConcurrentHashSet();
    for (final Pom pom : poms) {
        if (pom.parent.isPresent()) {
            final Pom parent = pom.parent.get();
            boolean includeGav = this.commandList.patterns.get(0).startsWith("!");
            for (final String pattern : this.commandList.patterns) {
                final boolean exclusion = pattern.startsWith("!");
                final Glob glob = exclusion ? Globs.create(pattern.substring(1)) : Globs.create(pattern);
                if (exclusion) {
                    if (glob.match(parent.gav.toText())) {
                        includeGav = false;
                    }/* w w w.  j  a  va 2  s.  c o  m*/
                } else {
                    if (glob.match(parent.gav.toText())) {
                        includeGav = true;
                    }
                }
            }
            if (includeGav) {
                pomAndGavs.add(new PomAndGav(pom, parent.gav));
            }
        }
        final Document pomDocument = this.pomParser.toDocument(pom.file);
        final ImmutableList<Dependency> dependencies = this.pomParser.findDependencies(pomDocument);
        for (final Dependency dependency : dependencies) {
            boolean includeGav = this.commandList.patterns.get(0).startsWith("!");
            for (final String pattern : this.commandList.patterns) {
                final boolean exclusion = pattern.startsWith("!");
                final Glob glob = exclusion ? Globs.create(pattern.substring(1)) : Globs.create(pattern);
                if (exclusion) {
                    if (glob.match(dependency.gav.toText())) {
                        includeGav = false;
                    }
                } else {
                    if (glob.match(dependency.gav.toText())) {
                        includeGav = true;
                    }
                }
            }
            if (includeGav) {
                pomAndGavs.add(new PomAndGav(pom, dependency.gav));
            }
        }
    }
    final List<PomAndGav> pomAndGavs_ = Lists.newArrayList(pomAndGavs);
    Collections.sort(pomAndGavs_);
    if (this.commandList.grouping == Command.Grouping.BY_POM) {
        final List<Pom> poms_ = Lists.newArrayList();
        final Map<Pom, List<Gav>> map = Maps.newConcurrentMap();
        for (final PomAndGav pomAndGav : pomAndGavs_) {
            if (!map.containsKey(pomAndGav.pom)) {
                poms_.add(pomAndGav.pom);
                map.put(pomAndGav.pom, Lists.<Gav>newArrayList());
            }
            map.get(pomAndGav.pom).add(pomAndGav.gav);
        }
        Collections.sort(poms_);
        final String pomMask = "%" + (1 + (int) Math.floor(Math.log10(poms_.size()))) + "d) %s (%s)";
        int pomIndex = 0;
        for (final Pom pom : poms_) {
            if (pomIndex != 0) {
                System.out.println();
            }
            pomIndex++;
            final File relativePomFile = new File(
                    pom.file.getPath().substring(rootDir.getAbsolutePath().length() + 1));
            System.out.println(
                    String.format(pomMask, Integer.valueOf(pomIndex), pom.gav.toText(), relativePomFile));
            final List<Gav> gavs = map.get(pom);
            final String gavMask = "    %" + (1 + (int) Math.floor(Math.log10(gavs.size()))) + "d) %s";
            int gavIndex = 0;
            for (final Gav gav : gavs) {
                gavIndex++;
                System.out.println(String.format(gavMask, Integer.valueOf(gavIndex), gav.toText()));
            }
        }
    } else {
        if (this.commandList.verbosity == Command.Verbosity.VERBOSE) {
            final List<GroupArtifact> groupArtifacts = Lists.newArrayList();
            final Map<GroupArtifact, Set<Pom>> map = Maps.newConcurrentMap();
            int maxLength = 0;
            int maxGroupSize = 0;
            for (final PomAndGav gavAndPom : pomAndGavs_) {
                final GroupArtifact groupArtifact = gavAndPom.gav.groupArtifact;
                if (!map.containsKey(groupArtifact)) {
                    groupArtifacts.add(groupArtifact);
                    map.put(groupArtifact, Sets.<Pom>newConcurrentHashSet());
                }
                map.get(groupArtifact).add(gavAndPom.pom);
                final int groupSize = map.get(groupArtifact).size();
                if (groupSize > maxGroupSize) {
                    maxGroupSize = groupSize;
                }
                final int length = gavAndPom.pom.gav.groupArtifact.toText().length();
                if (length > maxLength) {
                    maxLength = length;
                }
            }
            final int maxGroupLength = 1 + (int) Math.floor(Math.log10(maxGroupSize));
            Collections.sort(groupArtifacts);
            final String groupArtifactMask = "%" + (1 + (int) Math.floor(Math.log10(groupArtifacts.size())))
                    + "d) %s";
            int groupArtifactIndex = 0;
            final String pomMask = "    %" + maxGroupLength + "d) %-" + maxLength + "s   %s";
            for (final GroupArtifact groupArtifact : groupArtifacts) {
                if (groupArtifactIndex != 0) {
                    System.out.println();
                }
                groupArtifactIndex++;
                System.out.println(String.format(groupArtifactMask, Integer.valueOf(groupArtifactIndex),
                        groupArtifact.toText()));
                final List<Pom> poms_ = Lists.newArrayList(map.get(groupArtifact));
                Collections.sort(poms_);
                int pomIndex = 0;
                for (final Pom pom : poms_) {
                    pomIndex++;
                    final File relativePomFile = new File(
                            pom.file.getPath().substring(rootDir.getAbsolutePath().length() + 1));
                    System.out.println(String.format(pomMask, Integer.valueOf(pomIndex),
                            pom.gav.groupArtifact.toText(), relativePomFile));
                }
            }
        } else {
            final List<Gav> gavs = Lists.newArrayList();
            final Set<Gav> gavsSeen = Sets.newConcurrentHashSet();
            for (final PomAndGav gavAndPom : pomAndGavs_) {
                final Gav gav = gavAndPom.gav;
                if (!gavsSeen.contains(gav)) {
                    gavs.add(gav);
                    gavsSeen.add(gav);
                }
            }
            Collections.sort(gavs);
            final String gavMask = "%" + (1 + (int) Math.floor(Math.log10(gavs.size()))) + "d) %s";
            int gavIndex = 0;
            for (final Gav gav : gavs) {
                gavIndex++;
                System.out.println(String.format(gavMask, Integer.valueOf(gavIndex), gav.toText()));
            }
        }
    }
}