Example usage for java.util.concurrent.atomic AtomicReference AtomicReference

List of usage examples for java.util.concurrent.atomic AtomicReference AtomicReference

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference AtomicReference.

Prototype

public AtomicReference(V initialValue) 

Source Link

Document

Creates a new AtomicReference with the given initial value.

Usage

From source file:de.acosix.alfresco.utility.repo.subsystems.SubsystemChildApplicationContextManager.java

/**
 * Determines the instance ID for a specific child application context from within all the child application contexts managed by this
 * instance.//ww w.  j a  va 2 s  .c o m
 *
 * @param childApplicationContext
 *            the child application context
 * @return the ID of the child application instance or {@code null} if none of the currently active child application contexts match the
 *         provided one
 */
public String determineInstanceId(final ApplicationContext childApplicationContext) {
    this.lock.readLock().lock();
    try {
        final SubsystemApplicationContextManagerState state = (SubsystemApplicationContextManagerState) this
                .getState(false);

        final Collection<String> instanceIds = state.getInstanceIds();
        final AtomicReference<String> matchingInstanceId = new AtomicReference<>(null);

        for (final String id : instanceIds) {
            if (matchingInstanceId.get() == null) {
                final SubsystemChildApplicationContextFactory applicationContextFactory = state
                        .getApplicationContextFactory(id);
                final ApplicationContext readOnlyApplicationContext = applicationContextFactory
                        .getReadOnlyApplicationContext();

                if (readOnlyApplicationContext == childApplicationContext) {
                    matchingInstanceId.set(id);
                }
            }
        }

        return matchingInstanceId.get();
    } finally

    {
        this.lock.readLock().unlock();
    }
}

From source file:eu.eidas.auth.commons.EIDASUtil.java

/**
 * Private constructor. Prevents the class from being instantiated.
 *//* w  ww  .  j ava 2  s. c o m*/
EIDASUtil() {
    propertiesRef = new AtomicReference<ImmutableMap<String, String>>(ImmutableMap.<String, String>of());
}

From source file:io.druid.client.cache.MemcachedCache.java

public static MemcachedCache create(final MemcachedCacheConfig config) {
    final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>();
    final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>();
    final AbstractMonitor monitor = new AbstractMonitor() {
        final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>(
                new HashMap<String, Long>());

        @Override//from w w  w  .  jav  a2 s  .co  m
        public boolean doMonitor(ServiceEmitter emitter) {
            final Map<String, Long> priorValues = this.priorValues.get();
            final Map<String, Long> currentValues = getCurrentValues();
            final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder();
            for (Map.Entry<String, Long> entry : currentValues.entrySet()) {
                emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                        .build("query/cache/memcached/total", entry.getValue()));
                final Long prior = priorValues.get(entry.getKey());
                if (prior != null) {
                    emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                            .build("query/cache/memcached/delta", entry.getValue() - prior));
                }
            }

            if (!this.priorValues.compareAndSet(priorValues, currentValues)) {
                log.error("Prior value changed while I was reporting! updating anyways");
                this.priorValues.set(currentValues);
            }
            return true;
        }

        private Map<String, Long> getCurrentValues() {
            final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();
            for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            return builder.build();
        }
    };
    try {
        LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize());

        // always use compression
        transcoder.setCompressionThreshold(0);

        OperationQueueFactory opQueueFactory;
        long maxQueueBytes = config.getMaxOperationQueueSize();
        if (maxQueueBytes > 0) {
            opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes);
        } else {
            opQueueFactory = new LinkedOperationQueueFactory();
        }

        final Predicate<String> interesting = new Predicate<String>() {
            // See net.spy.memcached.MemcachedConnection.registerMetrics()
            private final Set<String> interestingMetrics = ImmutableSet.of(
                    "[MEM] Reconnecting Nodes (ReconnectQueue)",
                    //"[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted
                    "[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write",
                    "[MEM] Average Bytes read from OS per read",
                    "[MEM] Average Time on wire for operations (s)",
                    "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry",
                    "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success");

            @Override
            public boolean apply(@Nullable String input) {
                return input != null && interestingMetrics.contains(input);
            }
        };

        final MetricCollector metricCollector = new MetricCollector() {
            @Override
            public void addCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                counters.putIfAbsent(name, new AtomicLong(0L));

                if (log.isDebugEnabled()) {
                    log.debug("Add Counter [%s]", name);
                }
            }

            @Override
            public void removeCounter(String name) {
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.addAndGet(amount);

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s] %d", name, amount);
                }
            }

            @Override
            public void decrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.decrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s]", name);
                }
            }

            @Override
            public void decrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0L));
                    counter = counters.get(name);
                }
                counter.addAndGet(-amount);

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s] %d", name, amount);
                }
            }

            @Override
            public void addMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                meters.putIfAbsent(name, new AtomicLong(0L));
                if (log.isDebugEnabled()) {
                    log.debug("Adding meter [%s]", name);
                }
            }

            @Override
            public void removeMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove meter [%s]", name);
                }
            }

            @Override
            public void markMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong meter = meters.get(name);
                if (meter == null) {
                    meters.putIfAbsent(name, new AtomicLong(0L));
                    meter = meters.get(name);
                }
                meter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment counter [%s]", name);
                }
            }

            @Override
            public void addHistogram(String name) {
                log.debug("Ignoring add histogram [%s]", name);
            }

            @Override
            public void removeHistogram(String name) {
                log.debug("Ignoring remove histogram [%s]", name);
            }

            @Override
            public void updateHistogram(String name, int amount) {
                log.debug("Ignoring update histogram [%s]: %d", name, amount);
            }
        };

        final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder()
                // 1000 repetitions gives us good distribution with murmur3_128
                // (approx < 5% difference in counts across nodes, with 5 cache nodes)
                .setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128)
                .setProtocol(ConnectionFactoryBuilder.Protocol.BINARY)
                .setLocatorType(ConnectionFactoryBuilder.Locator.CONSISTENT).setDaemon(true)
                .setFailureMode(FailureMode.Cancel).setTranscoder(transcoder).setShouldOptimize(true)
                .setOpQueueMaxBlockTime(config.getTimeout()).setOpTimeout(config.getTimeout())
                .setReadBufferSize(config.getReadBufferSize()).setOpQueueFactory(opQueueFactory)
                .setMetricCollector(metricCollector).setEnableMetrics(MetricType.DEBUG) // Not as scary as it sounds
                .build();

        final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts());

        final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier;

        if (config.getNumConnections() > 1) {
            clientSupplier = new LoadBalancingPool<MemcachedClientIF>(config.getNumConnections(),
                    new Supplier<MemcachedClientIF>() {
                        @Override
                        public MemcachedClientIF get() {
                            try {
                                return new MemcachedClient(connectionFactory, hosts);
                            } catch (IOException e) {
                                log.error(e, "Unable to create memcached client");
                                throw Throwables.propagate(e);
                            }
                        }
                    });
        } else {
            clientSupplier = Suppliers.<ResourceHolder<MemcachedClientIF>>ofInstance(StupidResourceHolder
                    .<MemcachedClientIF>create(new MemcachedClient(connectionFactory, hosts)));
        }

        return new MemcachedCache(clientSupplier, config, monitor);
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}

From source file:com.sysunite.nifi.XmlSplit.java

@Override
protected void init(final ProcessorInitializationContext context) {
    //position 0//from  w  ww .ja  va 2s. co m
    final List<PropertyDescriptor> properties = new ArrayList<>();
    //properties.add(SPLIT);
    this.properties = Collections.unmodifiableList(properties);

    final Set<Relationship> set = new HashSet<>();
    set.add(ORIGINAL);
    relationships = new AtomicReference<>(set);
}

From source file:com.sysunite.nifi.StringSplit.java

@Override
protected void init(final ProcessorInitializationContext context) {
    //position 0/*from w  w w  .j  a v a  2s. c  o m*/
    final List<PropertyDescriptor> properties = new ArrayList<>();
    properties.add(SPLIT);
    this.properties = Collections.unmodifiableList(properties);

    final Set<Relationship> set = new HashSet<>();
    set.add(ORIGINAL);
    this.relationships = new AtomicReference<>(set);
}

From source file:com.alibaba.jstorm.cluster.StormZkClusterState.java

public StormZkClusterState(Object cluster_state_spec) throws Exception {

    if (cluster_state_spec instanceof ClusterState) {
        solo = false;/*from   www  . j a  va2 s  .co  m*/
        cluster_state = (ClusterState) cluster_state_spec;
    } else {

        solo = true;
        cluster_state = new DistributedClusterState((Map) cluster_state_spec);
    }

    assignment_info_callback = new ConcurrentHashMap<String, RunnableCallback>();
    supervisors_callback = new AtomicReference<RunnableCallback>(null);
    assignments_callback = new AtomicReference<RunnableCallback>(null);
    storm_base_callback = new ConcurrentHashMap<String, RunnableCallback>();
    master_callback = new AtomicReference<RunnableCallback>(null);

    state_id = cluster_state.register(new ClusterStateCallback() {

        public <T> Object execute(T... args) {
            if (args == null) {
                LOG.warn("Input args is null");
                return null;
            } else if (args.length < 2) {
                LOG.warn("Input args is invalid, args length:" + args.length);
                return null;
            }

            EventType zkEventTypes = (EventType) args[0];
            String path = (String) args[1];

            List<String> toks = PathUtils.tokenize_path(path);
            int size = toks.size();
            if (size >= 1) {
                String params = null;
                String root = toks.get(0);
                RunnableCallback fn = null;
                if (root.equals(Cluster.ASSIGNMENTS_ROOT)) {
                    if (size == 1) {
                        // set null and get the old value
                        fn = assignments_callback.getAndSet(null);
                    } else {
                        params = toks.get(1);
                        fn = assignment_info_callback.remove(params);
                    }

                } else if (root.equals(Cluster.SUPERVISORS_ROOT)) {
                    fn = supervisors_callback.getAndSet(null);
                } else if (root.equals(Cluster.STORMS_ROOT) && size > 1) {
                    params = toks.get(1);
                    fn = storm_base_callback.remove(params);
                } else if (root.equals(Cluster.MASTER_ROOT)) {
                    fn = master_callback.getAndSet(null);
                } else {
                    LOG.error("Unknown callback for subtree " + path);
                }

                if (fn != null) {
                    // FIXME How to set the args
                    // fn.setArgs(params, zkEventTypes, path);
                    fn.run();
                }
            }

            return null;
        }

    });

    String[] pathlist = JStormUtils.mk_arr(Cluster.SUPERVISORS_SUBTREE, Cluster.STORMS_SUBTREE,
            Cluster.ASSIGNMENTS_SUBTREE, Cluster.ASSIGNMENTS_BAK_SUBTREE, Cluster.TASKS_SUBTREE,
            Cluster.TASKBEATS_SUBTREE, Cluster.TASKERRORS_SUBTREE, Cluster.METRIC_SUBTREE,
            Cluster.BACKPRESSURE_SUBTREE);
    for (String path : pathlist) {
        cluster_state.mkdirs(path);
    }

}

From source file:com.jivesoftware.os.upena.uba.service.Nanny.java

public Nanny(PasswordStore passwordStore, UpenaClient upenaClient, RepositoryProvider repositoryProvider,
        InstanceDescriptor instanceDescriptor, InstancePath instancePath,
        DeployableValidator deployableValidator, DeployLog deployLog, HealthLog healthLog,
        DeployableScriptInvoker invokeScript, UbaLog ubaLog,
        Cache<String, Boolean> haveRunConfigExtractionCache) {

    this.passwordStore = passwordStore;
    this.upenaClient = upenaClient;
    this.repositoryProvider = repositoryProvider;
    this.instanceDescriptor = new AtomicReference<>(instanceDescriptor);
    this.instancePath = instancePath;
    this.deployableValidator = deployableValidator;
    this.deployLog = deployLog;
    this.healthLog = healthLog;
    this.invokeScript = invokeScript;
    this.ubaLog = ubaLog;
    linkedBlockingQueue = new LinkedBlockingQueue<>(10);
    threadPoolExecutor = new ThreadPoolExecutor(1, 1, 1000, TimeUnit.MILLISECONDS, linkedBlockingQueue);
    boolean exists = instancePath.deployLog().exists();
    LOG.info("Stats script for {} exists == {}", instanceDescriptor, exists);
    redeploy = new AtomicBoolean(!exists);
    destroyed = new AtomicBoolean(false);
    this.haveRunConfigExtractionCache = haveRunConfigExtractionCache;
}

From source file:com.globocom.grou.report.ReportService.java

public void send(Test test) throws Exception {
    final AtomicReference<List<Throwable>> exceptions = new AtomicReference<>(new ArrayList<>());
    final Map<String, Double> report = getReport(test);
    final HashMap<String, Double> reportSanitized = sanitizeKeyName(report);
    test.setResult(reportSanitized);/*from   www .  j a  v  a 2 s. com*/
    testRepository.save(test);
    test.getNotify().forEach(notify -> {
        try {
            if (VALID_EMAIL_ADDRESS_REGEX.matcher(notify).matches()) {
                notifyByMail(test, notify.replaceAll("^mailto:[/]{0,2}", ""), report);
            } else if (VALID_HTTP_ADDRESS_REGEX.matcher(notify).matches()) {
                notifyByHttp(test, notify);
            } else {
                throw new UnsupportedOperationException("notify destination unsupported: " + notify);
            }
        } catch (Exception e) {
            exceptions.get().add(e);
        }
    });
    String exceptionsStr = exceptions.get().stream().map(Throwable::getMessage)
            .collect(Collectors.joining(" "));
    if (!exceptionsStr.isEmpty()) {
        throw new IllegalStateException(exceptionsStr);
    }
}

From source file:com.netflix.curator.framework.recipes.leader.TestLeaderSelectorCluster.java

@Test
public void testLostRestart() throws Exception {
    final Timing timing = new Timing();

    CuratorFramework client = null;/* w  ww. ja v a2 s.  c om*/
    TestingCluster cluster = new TestingCluster(3);
    cluster.start();
    try {
        client = CuratorFrameworkFactory.newClient(cluster.getConnectString(), timing.session(),
                timing.connection(), new RetryOneTime(1));
        client.start();
        client.sync("/", null);

        final AtomicReference<Exception> error = new AtomicReference<Exception>(null);
        final AtomicReference<String> lockNode = new AtomicReference<String>(null);
        final Semaphore semaphore = new Semaphore(0);
        final CountDownLatch lostLatch = new CountDownLatch(1);
        final CountDownLatch internalLostLatch = new CountDownLatch(1);
        LeaderSelectorListener listener = new LeaderSelectorListener() {
            @Override
            public void takeLeadership(CuratorFramework client) throws Exception {
                try {
                    List<String> names = client.getChildren().forPath("/leader");
                    if (names.size() != 1) {
                        semaphore.release();
                        Exception exception = new Exception("Names size isn't 1: " + names.size());
                        error.set(exception);
                        return;
                    }
                    lockNode.set(names.get(0));

                    semaphore.release();
                    if (!timing.multiple(4).awaitLatch(internalLostLatch)) {
                        error.set(new Exception("internalLostLatch await failed"));
                    }
                } finally {
                    lostLatch.countDown();
                }
            }

            @Override
            public void stateChanged(CuratorFramework client, ConnectionState newState) {
                if (newState == ConnectionState.LOST) {
                    internalLostLatch.countDown();
                }
            }
        };
        LeaderSelector selector = new LeaderSelector(client, "/leader", listener);
        selector.start();
        Assert.assertTrue(timing.multiple(4).acquireSemaphore(semaphore));
        if (error.get() != null) {
            throw new AssertionError(error.get());
        }

        Collection<InstanceSpec> instances = cluster.getInstances();
        cluster.stop();

        Assert.assertTrue(timing.multiple(4).awaitLatch(lostLatch));
        timing.sleepABit();
        Assert.assertFalse(selector.hasLeadership());

        Assert.assertNotNull(lockNode.get());

        cluster = new TestingCluster(instances.toArray(new InstanceSpec[instances.size()]));
        cluster.start();

        try {
            client.delete().forPath(ZKPaths.makePath("/leader", lockNode.get())); // simulate the lock deleting due to session expiration
        } catch (Exception ignore) {
            // ignore
        }

        Assert.assertTrue(semaphore.availablePermits() == 0);
        Assert.assertFalse(selector.hasLeadership());

        selector.requeue();
        Assert.assertTrue(timing.multiple(4).acquireSemaphore(semaphore));
    } finally {
        IOUtils.closeQuietly(client);
        IOUtils.closeQuietly(cluster);
    }
}

From source file:io.fabric8.kubernetes.client.dsl.internal.WatchConnectionManager.java

public WatchConnectionManager(final OkHttpClient client, final BaseOperation<T, L, ?, ?> baseOperation,
        final String version, final Watcher<T> watcher, final int reconnectInterval, final int reconnectLimit)
        throws InterruptedException, ExecutionException, MalformedURLException {
    if (version == null) {
        KubernetesResourceList currentList = baseOperation.list();
        this.resourceVersion = new AtomicReference<>(currentList.getMetadata().getResourceVersion());
    } else {/*from  w  w w  .  j a  v  a  2  s. co  m*/
        this.resourceVersion = new AtomicReference<>(version);
    }
    this.clonedClient = client.newBuilder().readTimeout(1200, TimeUnit.MILLISECONDS).build();
    this.baseOperation = baseOperation;
    this.watcher = watcher;
    this.reconnectInterval = reconnectInterval;
    this.reconnectLimit = reconnectLimit;

    runWatch();
}