Example usage for java.util.concurrent.atomic AtomicReference AtomicReference

List of usage examples for java.util.concurrent.atomic AtomicReference AtomicReference

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference AtomicReference.

Prototype

public AtomicReference(V initialValue) 

Source Link

Document

Creates a new AtomicReference with the given initial value.

Usage

From source file:com.dgtlrepublic.anitomyj.ParserNumber.java

/**
 * Match type and episode. e.g. "2x01", "S01E03", "S01-02xE001-150".
 *
 * @param word  the word/*from   w  w w . java2s .c o  m*/
 * @param token the token
 * @return true if the token matched
 */
public boolean matchTypeAndEpisodePattern(String word, Token token) {
    int numberBegin = ParserHelper.indexOfFirstDigit(word);
    String prefix = StringUtils.substring(word, 0, numberBegin);

    AtomicReference<ElementCategory> category = new AtomicReference<>(kElementAnimeType);
    AtomicReference<KeywordOptions> options = new AtomicReference<>();

    if (KeywordManager.getInstance().findAndSet(KeywordManager.normalzie(prefix), category, options)) {
        parser.getElements().add(new Element(kElementAnimeType, prefix));
        String number = StringUtils.substring(word, numberBegin);
        if (matchEpisodePatterns(number, token) || setEpisodeNumber(number, token, true)) {
            int foundIdx = parser.getTokens().indexOf(token);
            if (foundIdx != -1) {
                token.setContent(number);
                parser.getTokens().add(foundIdx, new Token(
                        options.get().isIdentifiable() ? kIdentifier : kUnknown, prefix, token.isEnclosed()));
            }

            return true;
        }
    }

    return false;
}

From source file:org.zodiark.service.subscriber.SubscriberServiceImpl.java

public PublisherEndpoint retrieve(final String uuid) {
    // TODO: This won't work asynchronous
    final AtomicReference<PublisherEndpoint> publisher = new AtomicReference<>(null);
    eventBus.message(RETRIEVE_PUBLISHER, uuid, new Reply<PublisherEndpoint, String>() {
        @Override//from  w w w . ja  va2s .  c  o m
        public void ok(PublisherEndpoint p) {
            publisher.set(p);
        }

        @Override
        public void fail(ReplyException replyException) {
            logger.error("Unable to retrieve publisher {}", uuid);
        }
    });
    return publisher.get();
}

From source file:de.codesourcery.eve.skills.market.impl.EveCentralMarketDataProvider.java

@Override
public Map<InventoryType, PriceInfoQueryResult> getPriceInfos(final MarketFilter filter,
        final IPriceQueryCallback callback, final InventoryType... items) throws PriceInfoUnavailableException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("getPriceInfos(): filter = " + filter + ", items = " + items);
    }/*from w  w  w.  j  a  v  a  2s  .c  o m*/

    if (ArrayUtils.isEmpty(items)) {
        return Collections.emptyMap();
    }

    final AtomicReference<Map<InventoryType, PriceInfoQueryResult>> resultHolder = new AtomicReference<Map<InventoryType, PriceInfoQueryResult>>(
            new ConcurrentHashMap<InventoryType, PriceInfoQueryResult>());

    final IUpdateStrategy updateStrategy = createUpdateStrategy(filter.getUpdateMode(), filter.getOrderType());

    final Vector<NameValuePair> params = new Vector<NameValuePair>();

    /*
     * NEEDS to be run on the EDT since Hibernate
     * lazy-fetching might kick in and
     * the Hibernate session is confined to the EDT.
     */
    runOnEDT(new Runnable() {

        @Override
        public void run() {
            if (LOG.isDebugEnabled()) {
                LOG.debug("getPriceInfos(): update_strategy = " + updateStrategy);
            }

            for (InventoryType t : items) {

                // make sure we don't query over and over
                // for prices that are unavailable anyway
                if (isPriceMissingOnEveCentral(filter, t)) {
                    if (!mayQueryAgainForMissingPrice(filter, t)) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("getPriceInfos(): " + "Price for " + t + " "
                                    + "unavailable on eve-central , filter " + filter);
                        }
                        continue;
                    }

                    if (LOG.isDebugEnabled()) {
                        LOG.debug("getPriceInfos(): [ retrying ] " + "Price for " + t + " "
                                + "unavailable on eve-central , filter " + filter);
                    }
                }

                final PriceInfoQueryResult cached = getCachedEntry(filter, t);

                resultHolder.get().put(t, cached);

                if (LOG.isDebugEnabled()) {

                    if (cached.isEmpty()) {
                        LOG.debug("getPriceInfos(): [ NOT CACHED ] type = " + t.getId() + " , name = "
                                + t.getName());
                    } else {
                        LOG.debug("getPriceInfos(): [ CACHE HIT ] " + cached);
                    }
                }

                final boolean requiresUpdate;
                switch (filter.getOrderType()) {
                case BUY:
                    requiresUpdate = updateStrategy.requiresUpdate(t,
                            cached.hasBuyPrice() ? cached.buyPrice() : null);
                    break;
                case SELL:
                    requiresUpdate = updateStrategy.requiresUpdate(t,
                            cached.hasSellPrice() ? cached.sellPrice() : null);
                    break;
                case ANY:
                    requiresUpdate = (updateStrategy.requiresUpdate(t,
                            cached.hasBuyPrice() ? cached.buyPrice() : null)
                            || updateStrategy.requiresUpdate(t,
                                    cached.hasSellPrice() ? cached.sellPrice() : null));
                    break;
                default:
                    throw new RuntimeException("Unhandled switch/case: " + filter.getOrderType());
                }

                if (LOG.isDebugEnabled()) {
                    LOG.debug("getPriceInfos(): [ " + updateStrategy + "] requires_update => " + requiresUpdate
                            + " , type=" + t.getName());
                }

                if (requiresUpdate) {
                    params.add(new BasicNameValuePair("typeid", t.getId().toString()));
                }
            }
        }
    });

    if (params.isEmpty() || isOfflineMode()) { // all entries served from cache
        return resultHolder.get();
    }

    addFilterToRequest(params, filter);

    /*
     * Query data from eve central
     */
    final String responseXmlFromServer = eveCentralClient.sendRequestToServer(params);
    final AtomicReference<String> xml = new AtomicReference<String>(responseXmlFromServer);

    /*
     * NEEDS to be run on the EDT since Hibernate
     * lazy-fetching might kick in and
     * the Hibernate session is confined to the EDT.
     */
    return runOnEventThread(new PriceCallable() {

        public Map<InventoryType, PriceInfoQueryResult> call() throws PriceInfoUnavailableException {
            final Map<InventoryType, PriceInfoQueryResult> realResult = resultHolder.get();

            final Map<Long, List<PriceInfo>> result = parsePriceInfo(filter, xml.get());

            // group prices by item types

            List<PriceInfo> updated = new ArrayList<>();
            try {
                for (InventoryType type : items) {
                    List<PriceInfo> info = result.get(type.getId());

                    if (info == null || info.isEmpty()) {
                        // failed to fetch data, query user 
                        rememberPriceMissingOnEveCentral(filter, type);
                        info = queryPriceFromUser(filter, callback, type);
                    }

                    forgetPriceMissingOnEveCentral(filter, type);

                    for (PriceInfo dataFromServer : info) {
                        dataFromServer.setRegion(filter.getRegion());
                        dataFromServer.setTimestamp(new EveDate(systemClock));
                        dataFromServer.setInventoryType(type);

                        final PriceInfoQueryResult cachedResult = realResult.get(type);

                        if (LOG.isDebugEnabled()) {
                            LOG.debug("getPriceInfos(): from server: " + dataFromServer + " , cached="
                                    + cachedResult);
                        }

                        PriceInfo existing;
                        switch (filter.getOrderType()) {
                        case BUY:
                            existing = cachedResult.hasBuyPrice() ? cachedResult.buyPrice() : null;
                            if (updateStrategy.requiresUpdate(type, existing)) {
                                LOG.debug("getPriceInfos(): merging buy price.");
                                realResult.put(type, cachedResult.merge(filter.getOrderType(), dataFromServer));
                                storeCacheEntry(dataFromServer);
                                updated.add(dataFromServer);
                            }
                            break;
                        case SELL:
                            existing = cachedResult.hasSellPrice() ? cachedResult.sellPrice() : null;
                            if (updateStrategy.requiresUpdate(type, existing)) {
                                LOG.debug("getPriceInfos(): merging sell price.");
                                realResult.put(type, cachedResult.merge(filter.getOrderType(), dataFromServer));
                                storeCacheEntry(dataFromServer);
                                updated.add(dataFromServer);
                            }
                            break;
                        case ANY:
                            existing = cachedResult.hasBuyPrice() ? cachedResult.buyPrice() : null;
                            if (updateStrategy.requiresUpdate(type, existing)) {
                                LOG.debug("getPriceInfos(): merging buy price.");
                                realResult.put(type, cachedResult.merge(PriceInfo.Type.BUY, dataFromServer));
                                storeCacheEntry(dataFromServer);
                                updated.add(dataFromServer);
                            }
                            existing = cachedResult.hasSellPrice() ? cachedResult.sellPrice() : null;
                            if (updateStrategy.requiresUpdate(type, existing)) {
                                LOG.debug("getPriceInfos(): merging sell price.");
                                realResult.put(type, cachedResult.merge(PriceInfo.Type.SELL, dataFromServer));
                                storeCacheEntry(dataFromServer);
                                updated.add(dataFromServer);
                            }
                            break;
                        default:
                            throw new RuntimeException("Unhandled switch/case: " + filter.getOrderType());
                        }
                    }
                }
            } finally {
                fireItemPriceChanged(updated);
            }
            return realResult;
        }
    });
}

From source file:com.netflix.curator.framework.recipes.queue.TestDistributedQueue.java

@Test
public void testSafetyWithCrash() throws Exception {
    final int itemQty = 100;

    DistributedQueue<TestQueueItem> producerQueue = null;
    DistributedQueue<TestQueueItem> consumerQueue1 = null;
    DistributedQueue<TestQueueItem> consumerQueue2 = null;

    CuratorFramework producerClient = CuratorFrameworkFactory.newClient(server.getConnectString(),
            new RetryOneTime(1));
    CuratorFramework consumerClient1 = CuratorFrameworkFactory.newClient(server.getConnectString(),
            new RetryOneTime(1));
    CuratorFramework consumerClient2 = CuratorFrameworkFactory.newClient(server.getConnectString(),
            new RetryOneTime(1));
    try {//from  w  w  w  .java 2  s.  c  o  m
        producerClient.start();
        consumerClient1.start();
        consumerClient2.start();

        ExecutorService service = Executors.newCachedThreadPool();

        // make the producer queue
        {
            producerQueue = QueueBuilder.builder(producerClient, null, serializer, QUEUE_PATH).buildQueue();
            producerQueue.start();
            QueueTestProducer producer = new QueueTestProducer(producerQueue, itemQty, 0);
            service.submit(producer);
        }

        final Set<TestQueueItem> takenItems = Sets.newTreeSet();
        final Set<TestQueueItem> takenItemsForConsumer1 = Sets.newTreeSet();
        final Set<TestQueueItem> takenItemsForConsumer2 = Sets.newTreeSet();
        final AtomicReference<TestQueueItem> thrownItemFromConsumer1 = new AtomicReference<TestQueueItem>(null);

        // make the first consumer queue
        {
            final QueueConsumer<TestQueueItem> ourQueue = new QueueConsumer<TestQueueItem>() {
                @Override
                public void consumeMessage(TestQueueItem message) throws Exception {
                    synchronized (takenItems) {
                        if (takenItems.size() > 10) {
                            thrownItemFromConsumer1.set(message);
                            throw new Exception("dummy"); // simulate a crash
                        }
                    }

                    addToTakenItems(message, takenItems, itemQty);
                    synchronized (takenItemsForConsumer1) {
                        takenItemsForConsumer1.add(message);
                    }

                    Thread.sleep((long) (Math.random() * 5));
                }

                @Override
                public void stateChanged(CuratorFramework client, ConnectionState newState) {
                }
            };
            consumerQueue1 = QueueBuilder.builder(consumerClient1, ourQueue, serializer, QUEUE_PATH)
                    .lockPath("/a/locks").buildQueue();
            consumerQueue1.start();
        }

        // make the second consumer queue
        {
            final QueueConsumer<TestQueueItem> ourQueue = new QueueConsumer<TestQueueItem>() {
                @Override
                public void consumeMessage(TestQueueItem message) throws Exception {
                    addToTakenItems(message, takenItems, itemQty);
                    synchronized (takenItemsForConsumer2) {
                        takenItemsForConsumer2.add(message);
                    }
                    Thread.sleep((long) (Math.random() * 5));
                }

                @Override
                public void stateChanged(CuratorFramework client, ConnectionState newState) {
                }
            };
            consumerQueue2 = QueueBuilder.builder(consumerClient2, ourQueue, serializer, QUEUE_PATH)
                    .lockPath("/a/locks").buildQueue();
            consumerQueue2.start();
        }

        synchronized (takenItems) {
            while (takenItems.size() < itemQty) {
                takenItems.wait(1000);
            }
        }

        int i = 0;
        for (TestQueueItem item : takenItems) {
            Assert.assertEquals(item.str, Integer.toString(i++));
        }

        Assert.assertNotNull(thrownItemFromConsumer1.get());
        Assert.assertTrue((takenItemsForConsumer2.contains(thrownItemFromConsumer1.get())));
        Assert.assertTrue(Sets.intersection(takenItemsForConsumer1, takenItemsForConsumer2).size() == 0);
    } finally {
        IOUtils.closeQuietly(producerQueue);
        IOUtils.closeQuietly(consumerQueue1);
        IOUtils.closeQuietly(consumerQueue2);

        IOUtils.closeQuietly(producerClient);
        IOUtils.closeQuietly(consumerClient1);
        IOUtils.closeQuietly(consumerClient2);
    }
}

From source file:com.hardincoding.sonar.subsonic.service.SubsonicMusicService.java

private HttpResponse executeWithRetry(Context context, String url, String originalUrl, HttpParams requestParams,
        List<String> parameterNames, List<Object> parameterValues, List<Header> headers,
        ProgressListener progressListener, CancellableTask task) throws IOException {
    Log.i(TAG, "Using URL " + url);

    final AtomicReference<Boolean> cancelled = new AtomicReference<Boolean>(false);
    int attempts = 0;
    while (true) {
        attempts++;/*  w  ww.  j  a v a 2 s .co m*/
        HttpContext httpContext = new BasicHttpContext();
        final HttpPost request = new HttpPost(url);

        if (task != null) {
            // Attempt to abort the HTTP request if the task is cancelled.
            task.setOnCancelListener(new CancellableTask.OnCancelListener() {
                @Override
                public void onCancel() {
                    cancelled.set(true);
                    request.abort();
                }
            });
        }

        if (parameterNames != null) {
            List<NameValuePair> params = new ArrayList<NameValuePair>();
            for (int i = 0; i < parameterNames.size(); i++) {
                params.add(
                        new BasicNameValuePair(parameterNames.get(i), String.valueOf(parameterValues.get(i))));
            }
            request.setEntity(new UrlEncodedFormEntity(params, Util.UTF_8));
        }

        if (requestParams != null) {
            request.setParams(requestParams);
            Log.d(TAG, "Socket read timeout: " + HttpConnectionParams.getSoTimeout(requestParams) + " ms.");
        }

        if (headers != null) {
            for (Header header : headers) {
                request.addHeader(header);
            }
        }

        try {
            HttpResponse response = mHttpClient.execute(request, httpContext);
            detectRedirect(originalUrl, context, httpContext);
            return response;
        } catch (IOException x) {
            request.abort();
            if (attempts >= HTTP_REQUEST_MAX_ATTEMPTS || cancelled.get()) {
                throw x;
            }
            if (progressListener != null) {
                String msg = context.getResources().getString(R.string.music_service_retry, attempts,
                        HTTP_REQUEST_MAX_ATTEMPTS - 1);
                progressListener.updateProgress(msg);
            }
            Log.w(TAG, "Got IOException (" + attempts + "), will retry", x);
            increaseTimeouts(requestParams);
            Util.sleepQuietly(2000L);
        }
    }
}

From source file:com.yahoo.pulsar.broker.loadbalance.SimpleLoadManagerImplTest.java

@Test(enabled = false)
public void testPrimarySecondary() throws Exception {
    LocalZooKeeperCache mockCache = mock(LocalZooKeeperCache.class);
    ZooKeeperChildrenCache zooKeeperChildrenCache = mock(ZooKeeperChildrenCache.class);

    Set<String> activeBrokers = Sets.newHashSet("prod2-broker7.messaging.use.example.com:8080",
            "prod2-broker8.messaging.use.example.com:8080", "prod2-broker9.messaging.use.example.com:8080");
    when(mockCache.getChildren(SimpleLoadManagerImpl.LOADBALANCE_BROKERS_ROOT)).thenReturn(activeBrokers);
    when(zooKeeperChildrenCache.get()).thenReturn(activeBrokers);
    when(zooKeeperChildrenCache.get(SimpleLoadManagerImpl.LOADBALANCE_BROKERS_ROOT)).thenReturn(activeBrokers);

    Field zkCacheField = PulsarService.class.getDeclaredField("localZkCache");
    zkCacheField.setAccessible(true);/*  w ww  .  java2s  .c o m*/

    LocalZooKeeperCache originalLZK1 = (LocalZooKeeperCache) zkCacheField.get(pulsar1);
    LocalZooKeeperCache originalLZK2 = (LocalZooKeeperCache) zkCacheField.get(pulsar2);
    System.out.println("lzk are " + originalLZK1.getChildren(SimpleLoadManagerImpl.LOADBALANCE_BROKERS_ROOT)
            + " 2: " + originalLZK2.getChildren(SimpleLoadManagerImpl.LOADBALANCE_BROKERS_ROOT));
    zkCacheField.set(pulsar1, mockCache);

    LocalZooKeeperCache newZk = (LocalZooKeeperCache) pulsar1.getLocalZkCache();
    System.out.println("lzk mocked are " + newZk.getChildren(SimpleLoadManagerImpl.LOADBALANCE_BROKERS_ROOT));

    ZooKeeperChildrenCache availableActiveBrokers = new ZooKeeperChildrenCache(pulsar1.getLocalZkCache(),
            SimpleLoadManagerImpl.LOADBALANCE_BROKERS_ROOT);

    System.out.println("lzk mocked active brokers are "
            + availableActiveBrokers.get(SimpleLoadManagerImpl.LOADBALANCE_BROKERS_ROOT));

    LoadManager loadManager = new SimpleLoadManagerImpl(pulsar1);

    PulsarResourceDescription rd = new PulsarResourceDescription();
    rd.put("memory", new ResourceUsage(1024, 4096));
    rd.put("cpu", new ResourceUsage(10, 100));
    rd.put("bandwidthIn", new ResourceUsage(250 * 1024, 1024 * 1024));
    rd.put("bandwidthOut", new ResourceUsage(550 * 1024, 1024 * 1024));

    ResourceUnit ru1 = new SimpleResourceUnit("http://prod2-broker7.messaging.usw.example.com:8080", rd);
    Set<ResourceUnit> rus = new HashSet<ResourceUnit>();
    rus.add(ru1);
    LoadRanker lr = new ResourceAvailabilityRanker();
    AtomicReference<Map<Long, Set<ResourceUnit>>> sortedRankingsInstance = new AtomicReference<>(
            Maps.newTreeMap());
    sortedRankingsInstance.get().put(lr.getRank(rd), rus);

    Field sortedRankings = SimpleLoadManagerImpl.class.getDeclaredField("sortedRankings");
    sortedRankings.setAccessible(true);
    sortedRankings.set(loadManager, sortedRankingsInstance);

    ResourceUnit found = ((SimpleLoadManagerImpl) loadManager)
            .getLeastLoaded(new NamespaceName("pulsar/use/primary-ns.10"));
    assertEquals(found.getResourceId(), ru1.getResourceId());

    zkCacheField.set(pulsar1, originalLZK1);
}

From source file:com.datatorrent.contrib.kafka.SimpleKafkaConsumer.java

@Override
public void start() {
    monitorException = new AtomicReference<Throwable>(null);
    monitorExceptionCount = new AtomicInteger(0);
    super.start();

    // thread to consume the kafka data
    kafkaConsumerExecutor = Executors.newCachedThreadPool(
            new ThreadFactoryBuilder().setNameFormat("kafka-consumer-" + topic + "-%d").build());

    if (metadataRefreshInterval <= 0 || CollectionUtils.isEmpty(kps)) {
        return;/*  w w w  .j a va2  s  .c om*/
    }

    // background thread to monitor the kafka metadata change
    metadataRefreshExecutor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
            .setNameFormat("kafka-consumer-monitor-" + topic + "-%d").setDaemon(true).build());

    // start one monitor thread to monitor the leader broker change and trigger some action
    metadataRefreshExecutor.scheduleAtFixedRate(new MetaDataMonitorTask(this), 0, metadataRefreshInterval,
            TimeUnit.MILLISECONDS);
}

From source file:de.hybris.platform.test.HJMPTest.java

private AtomicReference<Boolean> startPKLookupInOtherThread(final PK pk) {
    final Tenant tenant = Registry.getCurrentTenantNoFallback();

    final AtomicReference<Boolean> success = new AtomicReference<Boolean>(null);

    final Thread thread = new Thread() {
        @Override/*from w w  w.  jav a2 s  .  co m*/
        public void run() {
            Registry.setCurrentTenant(tenant);
            try {
                assertNotNull(SystemEJB.getInstance().findRemoteObjectByPK(pk));
                success.set(Boolean.TRUE);
            } catch (final EJBItemNotFoundException e) {
                success.set(Boolean.FALSE);
            } catch (final Exception e) {
                success.set(Boolean.FALSE);
                fail(e.getMessage());
            }
        }
    };
    thread.start();

    return success;
}

From source file:info.archinnov.achilles.test.integration.tests.LWTOperationsIT.java

@Test
public void should_notify_listener_on_LWT_update_failure() throws Exception {
    //Given/*w ww .  ja  v  a 2 s.  c o m*/
    final AtomicReference<LWTResultListener.LWTResult> atomicLWTResult = new AtomicReference(null);
    LWTResultListener listener = new LWTResultListener() {
        @Override
        public void onSuccess() {
        }

        @Override
        public void onError(LWTResult lwtResult) {
            atomicLWTResult.compareAndSet(null, lwtResult);
        }
    };
    Map<String, Object> expectedCurrentValues = ImmutableMap.<String, Object>of("[applied]", false, "name",
            "John");

    CompleteBean entity = builder().randomId().name("John").addFollowers("Paul", "Andrew").buid();
    final CompleteBean managed = manager.insert(entity);
    managed.getFollowers().add("Helen");

    //When
    manager.update(managed, ifEqualCondition("name", "Helen").lwtResultListener(listener));

    //Then
    final LWTResultListener.LWTResult LWTResult = atomicLWTResult.get();
    assertThat(LWTResult).isNotNull();
    assertThat(LWTResult.operation()).isEqualTo(UPDATE);
    assertThat(LWTResult.currentValues()).isEqualTo(expectedCurrentValues);

}