Example usage for java.util.concurrent.atomic AtomicReference get

List of usage examples for java.util.concurrent.atomic AtomicReference get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference get.

Prototype

public final V get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:com.coinblesk.client.MainActivity.java

private void initSwitch(Menu menu) {
    MenuItem item = menu.findItem(R.id.myswitch);
    View view = item.getActionView();
    final Switch mySwitch = (Switch) view.findViewById(R.id.switchAB);
    final AtomicReference<CountDownTimer> ref = new AtomicReference<>();
    mySwitch.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
        @Override/* ww w  .  j  a v a  2 s.co  m*/
        public void onCheckedChanged(CompoundButton compoundButton, boolean isChecked) {
            if (isChecked) {
                //enable BT
                ref.set(new CountDownTimer(30000, 1000) {
                    int i = 0;

                    public void onTick(final long millisUntilFinished) {
                        mySwitch.setButtonDrawable(
                                (i++ % 2) == 0 ? R.drawable.bluetooth_onon : R.drawable.bluetooth_on);
                        mySwitch.setTextOn("" + millisUntilFinished / 1000);
                    }

                    public void onFinish() {
                        mySwitch.setButtonDrawable(R.drawable.bluetooth_on);
                        mySwitch.setChecked(false);
                    }

                });
                ref.get().start();
                LocalBroadcastManager.getInstance(MainActivity.this)
                        .sendBroadcast(new Intent(Constants.START_CLIENTS_ACTION));

            } else {
                //mySwitch.setShowText(false);
                CountDownTimer tmp;
                if ((tmp = ref.getAndSet(null)) != null) {
                    tmp.cancel();
                }
                LocalBroadcastManager.getInstance(MainActivity.this)
                        .sendBroadcast(new Intent(Constants.STOP_CLIENTS_ACTION));
            }
        }
    });
}

From source file:io.atomix.protocols.gossip.map.AntiEntropyMapDelegate.java

@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> recomputeFunction) {
    checkState(!closed, destroyedMessage);
    checkNotNull(key, ERROR_NULL_KEY);//www .j av  a 2s . c  om
    checkNotNull(recomputeFunction, "Recompute function cannot be null");

    String encodedKey = encodeKey(key);
    AtomicReference<MapDelegateEvent.Type> update = new AtomicReference<>();
    AtomicReference<MapValue> previousValue = new AtomicReference<>();
    MapValue computedValue = items.compute(encodedKey, (k, mv) -> {
        previousValue.set(mv);
        V newRawValue = recomputeFunction.apply(key, mv == null ? null : mv.get(this::decodeValue));
        byte[] newEncodedValue = encodeValue(newRawValue);
        if (mv != null && Arrays.equals(newEncodedValue, mv.get())) {
            // value was not updated
            return mv;
        }
        MapValue newValue = new MapValue(newEncodedValue,
                timestampProvider.get(Maps.immutableEntry(key, newRawValue)));
        if (mv == null) {
            update.set(INSERT);
            return newValue;
        } else if (newValue.isNewerThan(mv)) {
            update.set(UPDATE);
            return newValue;
        } else {
            return mv;
        }
    });
    if (update.get() != null) {
        notifyPeers(new UpdateEntry(encodedKey, computedValue), peerUpdateFunction
                .select(Maps.immutableEntry(key, computedValue.get(this::decodeValue)), membershipService));
        MapDelegateEvent.Type updateType = computedValue.isTombstone() ? REMOVE : update.get();
        V value = computedValue.isTombstone()
                ? previousValue.get() == null ? null : previousValue.get().get(this::decodeValue)
                : computedValue.get(this::decodeValue);
        if (value != null) {
            notifyListeners(new MapDelegateEvent<>(updateType, key, value));
        }
        return value;
    }
    return computedValue.get(this::decodeValue);
}

From source file:com.todoroo.astrid.activity.TaskEditFragment.java

@SuppressWarnings("nls")
private void attachImage(Bitmap bitmap) {

    AtomicReference<String> nameRef = new AtomicReference<String>();
    String path = FileUtilities.getNewImageAttachmentPath(getActivity(), nameRef);

    try {/*from  ww  w . ja v a2s.  c om*/
        FileOutputStream fos = new FileOutputStream(path);
        bitmap.compress(Bitmap.CompressFormat.PNG, 100, fos);
        fos.flush();
        fos.close();

        createNewFileAttachment(path, nameRef.get(), TaskAttachment.FILE_TYPE_IMAGE + "png");
    } catch (Exception e) {
        Toast.makeText(getActivity(), R.string.file_err_copy, Toast.LENGTH_LONG).show();
    }
}

From source file:io.cloudslang.lang.tools.build.SlangBuilderTest.java

@Test
public void testProcessRunTestsParallel() {
    final Map<String, SlangTestCase> testCases = new LinkedHashMap<>();
    final SlangTestCase testCase1 = new SlangTestCase("test1", "testFlowPath", "desc", asList("abc", "new"),
            "mock", null, null, false, "SUCCESS");
    final SlangTestCase testCase2 = new SlangTestCase("test2", "testFlowPath", "desc", asList("efg", "new"),
            "mock", null, null, false, "SUCCESS");
    final SlangTestCase testCase3 = new SlangTestCase("test3", "testFlowPath", "desc", asList("new", "new2"),
            "mock", null, null, false, "SUCCESS");
    final SlangTestCase testCase4 = new SlangTestCase("test4", "testFlowPath", "desc", asList("jjj", "new2"),
            "mock", null, null, false, "SUCCESS");
    final SlangTestCase testCase5 = new SlangTestCase("test5", "testFlowPath", "desc",
            asList("hhh", "jjj", "abc"), "mock", null, null, false, "SUCCESS");

    testCases.put("test1", testCase1);
    testCases.put("test2", testCase2);
    testCases.put("test3", testCase3);
    testCases.put("test4", testCase4);
    testCases.put("test5", testCase5);

    final List<String> testSuites = newArrayList("abc");
    final Map<String, CompilationArtifact> compiledFlows = new HashMap<>();
    final String projectPath = "aaa";

    final AtomicReference<IRunTestResults> capturedArgument = new AtomicReference<>();
    doAnswer(getAnswer(capturedArgument)).when(slangTestRunner).splitTestCasesByRunState(any(BulkRunMode.class),
            anyMap(), anyList(), any(IRunTestResults.class), any(BuildModeConfig.class));
    doNothing().when(slangTestRunner).runTestsParallel(anyString(), anyMap(), anyMap(),
            any(ThreadSafeRunTestResults.class));

    // Tested call
    slangBuilder.processRunTests(projectPath, testSuites, ALL_PARALLEL, compiledFlows, testCases,
            buildModeConfig);//from w  w  w  .  jav  a 2  s  .com

    InOrder inOrder = Mockito.inOrder(slangTestRunner);
    inOrder.verify(slangTestRunner).splitTestCasesByRunState(eq(ALL_PARALLEL), eq(testCases), eq(testSuites),
            isA(ThreadSafeRunTestResults.class), any(BuildModeConfig.class));
    inOrder.verify(slangTestRunner).runTestsParallel(eq(projectPath), anyMap(), eq(compiledFlows),
            eq((ThreadSafeRunTestResults) capturedArgument.get()));
    verifyNoMoreInteractions(slangTestRunner);
    verify(slangTestRunner, never()).runTestsSequential(anyString(), anyMap(), anyMap(),
            any(IRunTestResults.class));
}

From source file:io.fabric8.maven.core.service.openshift.OpenshiftBuildService.java

private void waitForOpenShiftBuildToComplete(OpenShiftClient client, Build build)
        throws MojoExecutionException, InterruptedException {
    final CountDownLatch latch = new CountDownLatch(1);
    final CountDownLatch logTerminateLatch = new CountDownLatch(1);
    final String buildName = KubernetesHelper.getName(build);

    final AtomicReference<Build> buildHolder = new AtomicReference<>();

    // Don't query for logs directly, Watch over the build pod:
    waitUntilPodIsReady(buildName + "-build", 20, log);
    log.info("Waiting for build " + buildName + " to complete...");
    try (LogWatch logWatch = client.pods().withName(buildName + "-build").watchLog()) {
        KubernetesClientUtil.printLogsAsync(logWatch, "Failed to tail build log", logTerminateLatch, log);
        Watcher<Build> buildWatcher = getBuildWatcher(latch, buildName, buildHolder);
        try (Watch watcher = client.builds().withName(buildName).watch(buildWatcher)) {
            // Check if the build is already finished to avoid waiting indefinitely
            Build lastBuild = client.builds().withName(buildName).get();
            if (Builds.isFinished(KubernetesResourceUtil.getBuildStatusPhase(lastBuild))) {
                log.debug("Build %s is already finished", buildName);
                buildHolder.set(lastBuild);
                latch.countDown();// w ww .  j  a  v a 2  s .  co  m
            }

            waitUntilBuildFinished(latch);
            logTerminateLatch.countDown();

            build = buildHolder.get();
            if (build == null) {
                log.debug("Build watcher on %s was closed prematurely", buildName);
                build = client.builds().withName(buildName).get();
            }
            String status = KubernetesResourceUtil.getBuildStatusPhase(build);
            if (Builds.isFailed(status) || Builds.isCancelled(status)) {
                throw new MojoExecutionException("OpenShift Build " + buildName + " error: "
                        + KubernetesResourceUtil.getBuildStatusReason(build));
            }

            if (!Builds.isFinished(status)) {
                log.warn(
                        "Could not wait for the completion of build %s. It may be  may be still running (status=%s)",
                        buildName, status);
            } else {
                log.info("Build %s in status %s", buildName, status);
            }
        }
    }
}

From source file:com.quartercode.eventbridge.test.def.bridge.module.DefaultStandardHandlerModuleTest.java

@SuppressWarnings("unchecked")
@Test//from   ww  w.jav a2s. c om
public void testCallHandler() {

    final BridgeConnector source = context.mock(BridgeConnector.class);

    final EmptyEvent1 regularEvent = new EmptyEvent1();
    final EmptyEvent2 otherEvent = new EmptyEvent2();

    final EventHandler<Event> handler = context.mock(EventHandler.class, "handler");
    final EventPredicate<Event> predicate = context.mock(EventPredicate.class, "predicate");

    final StandardHandleInterceptor interceptor = context.mock(StandardHandleInterceptor.class);
    module.getChannel().addInterceptor(new DummyStandardHandleInterceptor(interceptor), 1);

    final AtomicReference<LowLevelHandler> lowLevelHandler = new AtomicReference<>();

    // @formatter:off
    context.checking(new Expectations() {
        {

            allowing(predicate).test(regularEvent);
            will(returnValue(true));
            allowing(predicate).test(otherEvent);
            will(returnValue(false));

            oneOf(lowLevelHandlerModule).addHandler(with(aLowLevelHandlerWithThePredicate(predicate)));
            will(storeArgument(0).in(lowLevelHandler));

            final Sequence handleChain = context.sequence("handleChain");
            // Regular event
            oneOf(interceptor).handle(with(any(ChannelInvocation.class)), with(regularEvent), with(source),
                    with(handler));
            inSequence(handleChain);
            oneOf(handler).handle(regularEvent);
            inSequence(handleChain);
            // Other event
            // Expect the unwanted event to be invoked since the predicate is not tested by the StandardHandlerModule
            // In fact, the predicate is tested by the LowLevelHandlerModule
            oneOf(interceptor).handle(with(any(ChannelInvocation.class)), with(otherEvent), with(source),
                    with(handler));
            inSequence(handleChain);
            oneOf(handler).handle(otherEvent);
            inSequence(handleChain);

        }
    });
    // @formatter:on

    module.addHandler(handler, predicate);

    lowLevelHandler.get().handle(regularEvent, source);
    lowLevelHandler.get().handle(otherEvent, source);
}

From source file:io.cloudslang.lang.tools.build.SlangBuilderTest.java

@Test
public void testProcessRunTestsSequential() {
    final Map<String, SlangTestCase> testCases = new LinkedHashMap<>();
    final SlangTestCase testCase1 = new SlangTestCase("test1", "testFlowPath", "desc", asList("abc", "new"),
            "mock", null, null, false, "SUCCESS");
    final SlangTestCase testCase2 = new SlangTestCase("test2", "testFlowPath", "desc", asList("efg", "new"),
            "mock", null, null, false, "SUCCESS");
    final SlangTestCase testCase3 = new SlangTestCase("test3", "testFlowPath", "desc", asList("new", "new2"),
            "mock", null, null, false, "SUCCESS");

    testCases.put("test1", testCase1);
    testCases.put("test2", testCase2);
    testCases.put("test3", testCase3);

    final List<String> testSuites = newArrayList("abc");
    final Map<String, CompilationArtifact> compiledFlows = new HashMap<>();
    final String projectPath = "aaa";

    final AtomicReference<IRunTestResults> theCapturedArgument = new AtomicReference<>();
    doAnswer(getAnswer(theCapturedArgument)).when(slangTestRunner).splitTestCasesByRunState(
            any(BulkRunMode.class), anyMap(), anyList(), any(IRunTestResults.class),
            any(BuildModeConfig.class));
    doNothing().when(slangTestRunner).runTestsSequential(anyString(), anyMap(), anyMap(),
            any(IRunTestResults.class));

    BuildModeConfig basic = BuildModeConfig.createBasicBuildModeConfig();
    // Tested call
    slangBuilder.processRunTests(projectPath, testSuites, ALL_SEQUENTIAL, compiledFlows, testCases, basic);

    InOrder inOrder = Mockito.inOrder(slangTestRunner);
    inOrder.verify(slangTestRunner).splitTestCasesByRunState(eq(ALL_SEQUENTIAL), eq(testCases), eq(testSuites),
            isA(RunTestsResults.class), eq(basic));
    inOrder.verify(slangTestRunner).runTestsSequential(eq(projectPath), anyMap(), eq(compiledFlows),
            eq((RunTestsResults) theCapturedArgument.get()));
    inOrder.verify(slangTestRunner, never()).runTestsParallel(anyString(), anyMap(), anyMap(),
            any(ThreadSafeRunTestResults.class));
    verifyNoMoreInteractions(slangTestRunner);
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.Workstation.java

/**
 * Internal method to get the {@link InternalCache}.
 *///from   w w  w .j  a  v a 2 s .  co m
public InternalCache getCache() {
    boolean cacheReloaded = false;
    InternalCache ret;

    final AtomicReference<InternalWorkspaceConflictInfo[]> outConflictingWorkspaces = new AtomicReference<InternalWorkspaceConflictInfo[]>();

    synchronized (cacheMutex) {
        cacheReloaded = ensureCacheLoaded(outConflictingWorkspaces);
        ret = workspaceCache;
    }

    if (cacheReloaded) {
        onNonFatalError(outConflictingWorkspaces.get());

        // Let the listeners (Client objects) know.
        onCacheFileReloaded();
    }

    return ret;
}

From source file:com.networknt.openapi.ValidatorHandlerTest.java

@Test
public void testInvalidPost() throws Exception {
    final AtomicReference<ClientResponse> reference = new AtomicReference<>();
    final Http2Client client = Http2Client.getInstance();
    final CountDownLatch latch = new CountDownLatch(1);
    final ClientConnection connection;
    try {//  ww w .  j  a  va2s  .  c  o m
        connection = client.connect(new URI("http://localhost:8080"), Http2Client.WORKER, Http2Client.SSL,
                Http2Client.POOL, OptionMap.EMPTY).get();
    } catch (Exception e) {
        throw new ClientException(e);
    }

    try {
        String post = "{\"name\":\"Pinky\", \"photoUrl\": \"http://www.photo.com/1.jpg\"}";
        connection.getIoThread().execute(new Runnable() {
            @Override
            public void run() {
                final ClientRequest request = new ClientRequest().setMethod(Methods.POST).setPath("/post");
                request.getRequestHeaders().put(Headers.HOST, "localhost");
                request.getRequestHeaders().put(Headers.CONTENT_TYPE, "application/json");
                request.getRequestHeaders().put(Headers.TRANSFER_ENCODING, "chunked");
                connection.sendRequest(request, client.createClientCallback(reference, latch, post));
            }
        });

        latch.await(10, TimeUnit.SECONDS);
    } catch (Exception e) {
        logger.error("IOException: ", e);
        throw new ClientException(e);
    } finally {
        IoUtils.safeClose(connection);
    }
    int statusCode = reference.get().getResponseCode();
    String body = reference.get().getAttachment(Http2Client.RESPONSE_BODY);
    Assert.assertEquals(404, statusCode);
    if (statusCode == 404) {
        Status status = Config.getInstance().getMapper().readValue(body, Status.class);
        Assert.assertNotNull(status);
        Assert.assertEquals("ERR10007", status.getCode());
    }
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.Workstation.java

private Workspace[] updateWorkspaceInfoCache(final String key, final VersionControlClient client,
        String ownerName) {//from  w  w  w . j  a  va 2  s .c o  m
    // Do not allow null for owner name. We do not want to put the
    // workspaces for every owner
    // into the cache. The machine may be a server with many different users
    // having workspaces
    // on it.
    Check.notNullOrEmpty(ownerName, "ownerName"); //$NON-NLS-1$

    // Make sure we have the fully qualified owner name.
    ownerName = client.resolveUserUniqueName(ownerName);

    // We do this *before* removing the workspaces from cache in case it
    // throws.
    Workspace[] workspaces = null;

    if (client.isAuthorizedUser(ownerName)) {
        // Only add the permissions filter when ownerName is the
        // AuthenticatedUser.
        workspaces = client.queryWorkspaces(null, ownerName, getName(),
                WorkspacePermissions.READ.combine(WorkspacePermissions.USE));
    } else {
        workspaces = client.queryWorkspaces(null, ownerName, getName());
    }

    // Refresh the server GUID in case it changed somehow
    client.refreshServerGUID();

    final List<InternalWorkspaceConflictInfo> warningList = new ArrayList<InternalWorkspaceConflictInfo>();
    final List<KeyValuePair<Exception, Workspace>> errorList = new ArrayList<KeyValuePair<Exception, Workspace>>();

    final Set<String> keysUpdated = new TreeSet<String>(String.CASE_INSENSITIVE_ORDER);

    // Make sure to add the key passed in. This way if no workspaces are
    // found at least we can record that nothing was found for this key.
    keysUpdated.add(key);

    synchronized (cacheMutex) {
        // Remove all workspaces for the specified repository.
        final List<WorkspaceInfo> infoRemovedList = removeCachedWorkspaceInfo(client, workspaces, false);
        infoRemovedList.addAll(removeCachedWorkspaceInfo(client, ownerName, false));

        // Add all workspaces that are local and are owned by the specified
        // owner.
        for (final Workspace workspace : workspaces) {
            try {
                final AtomicReference<InternalWorkspaceConflictInfo[]> cws1 = new AtomicReference<InternalWorkspaceConflictInfo[]>();

                final WorkspaceInfo info = getCache().insertWorkspace(workspace, cws1);

                for (final InternalWorkspaceConflictInfo iwci : cws1.get()) {
                    warningList.add(iwci);
                }

                // For each workspace info that was removed, we want to copy
                // its local metadata (e.g., LastSavedCheckin) to the new
                // object.
                copyLocalMetadata(info, infoRemovedList);

                keysUpdated.addAll(createLoadWorkspacesTableKeys(client, workspace));
            } catch (final VersionControlException exception) {
                // Skip the workspace if there's a mapping conflict, etc.
                errorList.add(new KeyValuePair<Exception, Workspace>(exception, workspace));
            }
        }

        final AtomicReference<InternalWorkspaceConflictInfo[]> cws2 = new AtomicReference<InternalWorkspaceConflictInfo[]>();

        InternalCacheLoader.saveConfigIfDirty(getCache(), cws2, cacheMutex, workspaceCacheFile);

        for (final InternalWorkspaceConflictInfo iwci : cws2.get()) {
            warningList.add(iwci);
        }

        // Record for EnsureUpdateWorkspaceInfoCache() the fact that we have
        // already queried the server for the user's workspaces.
        final Long now = System.currentTimeMillis();
        for (final String updatedKey : keysUpdated) {
            workspacesLoadedTable.put(updatedKey, now);
        }
    }

    // Raise all of the error events after releasing the lock.
    for (final InternalWorkspaceConflictInfo iwci : warningList) {
        onNonFatalError(new WorkstationNonFatalErrorEvent(EventSource.newFromHere(), iwci));
    }
    for (final KeyValuePair<Exception, Workspace> error : errorList) {
        onNonFatalError(
                new WorkstationNonFatalErrorEvent(EventSource.newFromHere(), error.getKey(), error.getValue()));
    }

    return workspaces;
}