Example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

List of usage examples for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean.

Prototype

public AtomicBoolean(boolean initialValue) 

Source Link

Document

Creates a new AtomicBoolean with the given initial value.

Usage

From source file:com.vmware.admiral.request.ContainerControlLoopServiceTest.java

@SuppressWarnings("unchecked")
@Test/*w  w  w.j  a v a2  s  .c  o  m*/
public void redeploymentOfSingleContainers() throws Throwable {
    containerDescription2 = createContainerDescription(false);

    // provision 3 single containers, 2 of them in ERROR state
    ContainerState state = null;
    for (int i = 0; i < SINGLE_CONTAINERS_TO_BE_PROVISIONED; i++) {
        state = provisionContainer(containerDescription2.documentSelfLink);

        if (i < SINGLE_CONTAINERS_TO_BE_PROVISIONED - 1) {
            state.powerState = PowerState.ERROR;
            doPut(state);
        }
    }

    Map<String, List<String>> containersPerContextId = new HashMap<>();

    retrieveContainerStates(containerDescription2.documentSelfLink).thenAccept(containerStates -> {
        containerStates.stream().forEach(cs -> {
            containersPerContextId.put(cs.customProperties.get(RequestUtils.FIELD_NAME_CONTEXT_ID_KEY),
                    Arrays.asList(cs.documentSelfLink));
        });
    });

    doOperation(new ContainerControlLoopState(),
            UriUtils.buildUri(host, ContainerControlLoopService.CONTROL_LOOP_INFO_LINK), false,
            Service.Action.PATCH);

    Map<String, List<String>> redeployedContainersPerContextId = new HashMap<>();
    AtomicBoolean containerFromDesc2Redeployed = new AtomicBoolean(false);

    waitFor(() -> {
        // get all containers from containerDescription2
        retrieveContainerStates(containerDescription2.documentSelfLink).thenAccept(containerStates -> {
            long healthyContainers = containerStates.stream()
                    .filter(cs -> PowerState.RUNNING.equals(cs.powerState)).count();
            host.log("Healthy containers from %s : %d", containerDescription2.documentSelfLink,
                    healthyContainers);
            containerFromDesc2Redeployed.set(SINGLE_CONTAINERS_TO_BE_PROVISIONED == healthyContainers
                    && SINGLE_CONTAINERS_TO_BE_PROVISIONED == containerStates.size());

            containerStates.stream().forEach(cs -> {
                redeployedContainersPerContextId.put(
                        cs.customProperties.get(RequestUtils.FIELD_NAME_CONTEXT_ID_KEY),
                        Arrays.asList(cs.documentSelfLink));
            });
        });

        if (containerFromDesc2Redeployed.get()) {
            containersPerContextId.entrySet().stream().forEach(m -> {
                String contextId = m.getKey();
                List<String> redeployedContainers = redeployedContainersPerContextId.get(contextId);
                host.log("Redeployed container: %s -> %s", StringUtils.join(m.getValue()),
                        StringUtils.join(redeployedContainers));
            });
        }

        return containerFromDesc2Redeployed.get();
    });
}

From source file:com.nridge.core.app.mgr.AppMgr.java

/**
 * Constructor suitable for use within Servlet Container where the default
 * paths for the application are difficult to derive automatically.
 *
 * @param aInsPathName Installation path name.
 * @param aCfgPathName Configuration path name.
 * @param aLogPathName Log file output path name.
 * @param aDSPathName Data source path name.
 * @param anRDBMSPathName RDBMS path name.
 * @param aGraphPathName Graph path name.
 *//*from  ww w  .ja  va 2 s  .  c  om*/
public AppMgr(String aInsPathName, String aCfgPathName, String aLogPathName, String aDSPathName,
        String anRDBMSPathName, String aGraphPathName) {
    mIsPathsExplicit = true;
    mIsAlive = new AtomicBoolean(true);
    mPropertyMap = new HashMap<String, Object>();
    if (StringUtils.isNotEmpty(aInsPathName))
        mInsPathName = aInsPathName;
    else
        mInsPathName = System.getProperty("user.dir");
    if (StringUtils.isNotEmpty(aCfgPathName))
        mCfgPathName = aCfgPathName;
    else
        mCfgPathName = String.format("%s%ccfg", mInsPathName, File.separatorChar);
    if (StringUtils.isNotEmpty(aLogPathName))
        mLogPathName = aLogPathName;
    else
        mLogPathName = String.format("%s%clog", mInsPathName, File.separatorChar);
    if (StringUtils.isNotEmpty(aDSPathName))
        mDSPathName = aDSPathName;
    else
        mDSPathName = String.format("%s%cds", mInsPathName, File.separatorChar);
    if (StringUtils.isNotEmpty(anRDBMSPathName))
        mRDBMSPathName = aGraphPathName;
    else
        mRDBMSPathName = String.format("%s%crdb", mInsPathName, File.separatorChar);
    if (StringUtils.isNotEmpty(aGraphPathName))
        mGraphPathName = aGraphPathName;
    else
        mGraphPathName = String.format("%s%cgdb", mInsPathName, File.separatorChar);
}

From source file:com.addthis.hydra.kafka.consumer.KafkaSource.java

@Override
public void init() {
    try {//from w w  w. java  2 s  .co  m
        if (ignoreMarkDir) {
            File md = new File(markDir);
            if (md.exists()) {
                FileUtils.deleteDirectory(md);
                log.info("Deleted marks directory : {}", md);
            }
        }
        this.bundleQueue = new LinkedBlockingQueue<>(queueSize);
        this.markDb = new PageDB<>(LessFiles.initDirectory(markDir), SimpleMark.class, 100, 100);
        // move to init method
        this.fetchExecutor = new ThreadPoolExecutor(fetchThreads, fetchThreads, 0L, TimeUnit.SECONDS,
                new LinkedBlockingQueue<>(),
                new ThreadFactoryBuilder().setNameFormat("source-kafka-fetch-%d").setDaemon(true).build());
        this.decodeExecutor = new ThreadPoolExecutor(decodeThreads, decodeThreads, 0L, TimeUnit.SECONDS,
                new LinkedBlockingQueue<>(),
                new ThreadFactoryBuilder().setNameFormat("source-kafka-decode-%d").setDaemon(true).build());
        this.running = new AtomicBoolean(true);
        final DateTime startTime = (startDate != null) ? DateUtil.getDateTime(dateFormat, startDate) : null;

        zkClient = ZkUtil.makeStandardClient(zookeeper, false);
        TopicMetadata metadata = null;
        int metadataAttempt = 0;
        while (metadata == null && metadataAttempt < metadataRetries) {
            try {
                metadata = ConsumerUtils.getTopicMetadata(zkClient, seedBrokers, topic);
            } catch (Exception e) {
                log.error(
                        "failed to get kafka metadata (attempt {} / {}) for topic: {}, using brokers: {}, error: {}",
                        metadataAttempt, metadataRetries, topic, seedBrokers, e);
                Thread.sleep(metadataBackoff);
            }
            metadataAttempt++;
        }

        final Integer[] shards = config.calcShardList(metadata.partitionsMetadata().size());
        final ListBundleFormat bundleFormat = new ListBundleFormat();
        final CountDownLatch decodeLatch = new CountDownLatch(shards.length);
        for (final int shard : shards) {
            LinkedBlockingQueue<MessageWrapper> messageQueue = new LinkedBlockingQueue<>(this.queueSize);
            final PartitionMetadata partition = metadata.partitionsMetadata().get(shard);
            FetchTask fetcher = new FetchTask(this, topic, partition, startTime, messageQueue);
            fetchExecutor.execute(fetcher);
            Runnable decoder = new DecodeTask(decodeLatch, format, bundleFormat, running, messageQueue,
                    bundleQueue);
            decodeExecutor.execute(decoder);
        }
        decodeExecutor.submit(new MarkEndTask<>(decodeLatch, running, bundleQueue, bundleQueueEndMarker));
    } catch (Exception ex) {
        log.error("Error initializing kafka source: ", ex);
        throw new RuntimeException(ex);
    }
}

From source file:io.druid.segment.realtime.plumber.RealtimePlumberSchoolTest.java

private void testPersist(final Object commitMetadata) throws Exception {
    final AtomicBoolean committed = new AtomicBoolean(false);
    plumber.getSinks().put(0L, new Sink(new Interval(0, TimeUnit.HOURS.toMillis(1)), schema, tuningConfig,
            new DateTime("2014-12-01T12:34:56.789").toString()));
    Assert.assertNull(plumber.startJob());

    final InputRow row = EasyMock.createNiceMock(InputRow.class);
    EasyMock.expect(row.getTimestampFromEpoch()).andReturn(0L);
    EasyMock.expect(row.getDimensions()).andReturn(new ArrayList<String>());
    EasyMock.replay(row);/* www. j  a v  a2 s.  co m*/
    final Committer committer = new Committer() {
        @Override
        public Object getMetadata() {
            return commitMetadata;
        }

        @Override
        public void run() {
            committed.set(true);
        }
    };
    plumber.add(row, Suppliers.ofInstance(committer));
    plumber.persist(committer);

    while (!committed.get()) {
        Thread.sleep(100);
    }
    plumber.getSinks().clear();
    plumber.finishJob();
}

From source file:com.alibaba.wasp.fserver.handler.OpenEntityGroupHandler.java

/**
 * Update ZK, ROOT or META. This can take a while if for example the .META. is
 * not available -- if server hosting .META. crashed and we are waiting on it
 * to come back -- so run in a thread and keep updating znode state meantime
 * so master doesn't timeout our entityGroup-in-transition. Caller must
 * cleanup entityGroup if this fails.//from   ww  w .ja  va 2s. c om
 */
boolean updateMeta(final EntityGroup entityGroup) {
    if (this.server.isStopped() || this.fsServices.isStopping()) {
        return false;
    }
    // Object we do wait/notify on. Make it boolean. If set, we're done.
    // Else, wait.
    final AtomicBoolean signaller = new AtomicBoolean(false);
    PostOpenDeployTasksThread t = new PostOpenDeployTasksThread(entityGroup, this.server, this.fsServices,
            signaller);
    t.start();
    int assignmentTimeout = this.server.getConfiguration()
            .getInt("wasp.master.assignment.timeoutmonitor.period", 10000);
    // Total timeout for meta edit. If we fail adding the edit then close out
    // the entityGroup and let it be assigned elsewhere.
    long timeout = assignmentTimeout * 10;
    long now = System.currentTimeMillis();
    long endTime = now + timeout;
    // Let our period at which we update OPENING state to be be 1/3rd of the
    // entityGroups-in-transition timeout period.
    long period = Math.max(1, assignmentTimeout / 3);
    long lastUpdate = now;
    boolean tickleOpening = true;
    while (!signaller.get() && t.isAlive() && !this.server.isStopped() && !this.fsServices.isStopping()
            && (endTime > now)) {
        long elapsed = now - lastUpdate;
        if (elapsed > period) {
            // Only tickle OPENING if postOpenDeployTasks is taking some time.
            lastUpdate = now;
            tickleOpening = tickleOpening("post_open_deploy");
        }
        synchronized (signaller) {
            try {
                signaller.wait(period);
            } catch (InterruptedException e) {
                // Go to the loop check.
            }
        }
        now = System.currentTimeMillis();
    }
    // Is thread still alive? We may have left above loop because server is
    // stopping or we timed out the edit. Is so, interrupt it.
    if (t.isAlive()) {
        if (!signaller.get()) {
            // Thread still running; interrupt
            LOG.debug("Interrupting thread " + t);
            t.interrupt();
        }
        try {
            t.join();
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted joining " + entityGroup.getEntityGroupInfo().getEntityGroupNameAsString(),
                    ie);
            Thread.currentThread().interrupt();
        }
    }

    // Was there an exception opening the entityGroup? This should trigger on
    // InterruptedException too. If so, we failed. Even if tickle opening fails
    // then it is a failure.
    return ((!Thread.interrupted() && t.getException() == null) && tickleOpening);
}

From source file:com.facebook.AccessTokenManager.java

private void refreshCurrentAccessTokenImpl() {
    final AccessToken accessToken = currentAccessToken;
    if (accessToken == null) {
        return;//from w ww .  j a  va2  s.c om
    }
    if (!tokenRefreshInProgress.compareAndSet(false, true)) {
        return;
    }

    Validate.runningOnUiThread();

    lastAttemptedTokenExtendDate = new Date();

    final Set<String> permissions = new HashSet<>();
    final Set<String> declinedPermissions = new HashSet<>();
    final AtomicBoolean permissionsCallSucceeded = new AtomicBoolean(false);
    final RefreshResult refreshResult = new RefreshResult();

    GraphRequestBatch batch = new GraphRequestBatch(
            createGrantedPermissionsRequest(accessToken, new GraphRequest.Callback() {
                @Override
                public void onCompleted(GraphResponse response) {
                    JSONObject result = response.getJSONObject();
                    if (result == null) {
                        return;
                    }
                    JSONArray permissionsArray = result.optJSONArray("data");
                    if (permissionsArray == null) {
                        return;
                    }
                    permissionsCallSucceeded.set(true);
                    for (int i = 0; i < permissionsArray.length(); i++) {
                        JSONObject permissionEntry = permissionsArray.optJSONObject(i);
                        if (permissionEntry == null) {
                            continue;
                        }
                        String permission = permissionEntry.optString("permission");
                        String status = permissionEntry.optString("status");
                        if (!Utility.isNullOrEmpty(permission) && !Utility.isNullOrEmpty(status)) {
                            status = status.toLowerCase(Locale.US);
                            if (status.equals("granted")) {
                                permissions.add(permission);
                            } else if (status.equals("declined")) {
                                declinedPermissions.add(permission);
                            } else {
                                Log.w(TAG, "Unexpected status: " + status);
                            }
                        }
                    }
                }
            }), createExtendAccessTokenRequest(accessToken, new GraphRequest.Callback() {
                @Override
                public void onCompleted(GraphResponse response) {
                    JSONObject data = response.getJSONObject();
                    if (data == null) {
                        return;
                    }
                    refreshResult.accessToken = data.optString("access_token");
                    refreshResult.expiresAt = data.optInt("expires_at");
                }
            }));

    batch.addCallback(new GraphRequestBatch.Callback() {
        @Override
        public void onBatchCompleted(GraphRequestBatch batch) {
            if (getInstance().getCurrentAccessToken() == null
                    || getInstance().getCurrentAccessToken().getUserId() != accessToken.getUserId()) {
                return;
            }
            try {
                if (permissionsCallSucceeded.get() == false && refreshResult.accessToken == null
                        && refreshResult.expiresAt == 0) {
                    return;
                }
                AccessToken newAccessToken = new AccessToken(
                        refreshResult.accessToken != null ? refreshResult.accessToken : accessToken.getToken(),
                        accessToken.getApplicationId(), accessToken.getUserId(),
                        permissionsCallSucceeded.get() ? permissions : accessToken.getPermissions(),
                        permissionsCallSucceeded.get() ? declinedPermissions
                                : accessToken.getDeclinedPermissions(),
                        accessToken.getSource(),
                        refreshResult.expiresAt != 0 ? new Date(refreshResult.expiresAt * 1000l)
                                : accessToken.getExpires(),
                        new Date());
                getInstance().setCurrentAccessToken(newAccessToken);
            } finally {
                tokenRefreshInProgress.set(false);
            }
        }
    });
    batch.executeAsync();
}

From source file:eu.stratosphere.pact.runtime.task.ReduceTaskTest.java

@Test
public void testCancelReduceTaskWhileSorting() {
    addInputComparator(this.comparator);
    setOutput(new NirvanaOutputList());
    getTaskConfig().setDriverStrategy(DriverStrategy.SORTED_GROUP_REDUCE);

    final GroupReduceDriver<Record, Record> testTask = new GroupReduceDriver<Record, Record>();

    try {//from  ww w  .ja v  a2 s .  c o m
        addInputSorted(new DelayingInfinitiveInputIterator(100), this.comparator.duplicate());
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail();
    }

    final AtomicBoolean success = new AtomicBoolean(false);

    Thread taskRunner = new Thread() {
        @Override
        public void run() {
            try {
                testDriver(testTask, MockReduceStub.class);
                success.set(true);
            } catch (Exception ie) {
                ie.printStackTrace();
            }
        }
    };
    taskRunner.start();

    TaskCancelThread tct = new TaskCancelThread(1, taskRunner, this);
    tct.start();

    try {
        tct.join();
        taskRunner.join();
    } catch (InterruptedException ie) {
        Assert.fail("Joining threads failed");
    }

    Assert.assertTrue("Test threw an exception even though it was properly canceled.", success.get());
}

From source file:com.mirth.connect.connectors.tcp.TcpReceiver.java

@Override
public void onDeploy() throws ConnectorTaskException {
    connectorProperties = (TcpReceiverProperties) getConnectorProperties();

    if (connectorProperties.isDataTypeBinary() && isProcessBatch()) {
        throw new ConnectorTaskException("Batch processing is not supported for binary data.");
    }/*  ww w  .  j a v  a 2s.com*/

    // load the default configuration
    String configurationClass = configurationController.getProperty(connectorProperties.getProtocol(),
            "tcpConfigurationClass");

    try {
        configuration = (TcpConfiguration) Class.forName(configurationClass).newInstance();
    } catch (Throwable t) {
        logger.trace("could not find custom configuration class, using default");
        configuration = new DefaultTcpConfiguration();
    }

    try {
        configuration.configureConnectorDeploy(this);
    } catch (Exception e) {
        throw new ConnectorTaskException(e);
    }

    maxConnections = NumberUtils.toInt(connectorProperties.getMaxConnections());
    timeout = NumberUtils.toInt(connectorProperties.getReceiveTimeout());
    bufferSize = NumberUtils.toInt(connectorProperties.getBufferSize());
    reconnectInterval = NumberUtils.toInt(connectorProperties.getReconnectInterval());

    ExtensionController extensionController = ControllerFactory.getFactory().createExtensionController();

    String pluginPointName = (String) connectorProperties.getTransmissionModeProperties().getPluginPointName();
    if (pluginPointName.equals("Basic")) {
        transmissionModeProvider = new BasicModeProvider();
    } else {
        transmissionModeProvider = (TransmissionModeProvider) extensionController.getTransmissionModeProviders()
                .get(pluginPointName);
    }

    if (transmissionModeProvider == null) {
        throw new ConnectorTaskException("Unable to find transmission mode plugin: " + pluginPointName);
    }

    dataTypeServerPlugin = extensionController.getDataTypePlugins().get(getInboundDataType().getType());

    if (dataTypeServerPlugin == null) {
        throw new ConnectorTaskException("Unable to find data type plugin: " + getInboundDataType().getType());
    }

    disposing = new AtomicBoolean(false);

    eventController.dispatchEvent(new ConnectorCountEvent(getChannelId(), getMetaDataId(), getSourceName(),
            ConnectionStatusEventType.IDLE, null, maxConnections));
}

From source file:co.cask.cdap.internal.app.runtime.distributed.AbstractDistributedProgramRunner.java

/**
 * Adds a listener to the given TwillController to delete local temp files when the program has started/terminated.
 * The local temp files could be removed once the program is started, since Twill would keep the files in
 * HDFS and no long needs the local temp files once program is started.
 *
 * @return The same TwillController instance.
 *///from www.  j av  a 2 s .  co m
private TwillController addCleanupListener(TwillController controller, final File hConfFile,
        final File cConfFile, final Program program, final File programDir) {

    final AtomicBoolean deleted = new AtomicBoolean(false);
    controller.addListener(new ServiceListenerAdapter() {
        @Override
        public void running() {
            cleanup();
        }

        @Override
        public void terminated(Service.State from) {
            cleanup();
        }

        @Override
        public void failed(Service.State from, Throwable failure) {
            cleanup();
        }

        private void cleanup() {
            if (deleted.compareAndSet(false, true)) {
                LOG.debug("Cleanup tmp files for {}: {} {} {}", program.getName(), hConfFile, cConfFile,
                        program.getJarLocation().toURI());
                hConfFile.delete();
                cConfFile.delete();
                try {
                    program.getJarLocation().delete();
                } catch (IOException e) {
                    LOG.warn("Failed to delete program jar {}", program.getJarLocation().toURI(), e);
                }
                try {
                    FileUtils.deleteDirectory(programDir);
                } catch (IOException e) {
                    LOG.warn("Failed to delete program directory {}", programDir, e);
                }
            }
        }
    }, Threads.SAME_THREAD_EXECUTOR);
    return controller;
}

From source file:org.dataconservancy.packaging.tool.integration.PackageGenerationTest.java

@Test
public void verifyRemediationTest() throws Exception {

    PackageState state = initializer.initialize(DCS_PROFILE);

    Set<URI> originalFileLocations = new HashSet<>();

    ipm2rdf.transformToNode(state.getPackageTree())
            .walk(node -> originalFileLocations.add(node.getFileInfo().getLocation()));

    // The package should contain two files:
    // - READMX/*from ww  w .j ava2 s.  com*/
    // - READM
    //
    // The file with the acute E will be remediated to a resource named 'READMX', which will collide with
    // an existing resource of the same name.

    // assert that our sample problem files are in the content to be packaged
    assertTrue(originalFileLocations.stream().anyMatch(uri -> uri.getPath().endsWith("READMX")));
    // 0x0301 is the UTF-16 encoding of the 'COMBINING ACUTE ACCENT' combining diacritic
    // 0x00c9 is the UTF-16 encoding of 'LATIN CAPITAL LETTER E WITH ACUTE'
    assertTrue(originalFileLocations.stream().anyMatch(uri -> (uri.getPath().endsWith("README" + '\u0301'))
            || (uri.getPath().endsWith("READM" + '\u00c9'))));

    OpenedPackage opened = packager.createPackage(state, folder.getRoot());

    AtomicBoolean foundIllegal = new AtomicBoolean(Boolean.FALSE);
    AtomicBoolean foundRemediated = new AtomicBoolean(Boolean.FALSE);
    AtomicReference<String> remediatedFilename = new AtomicReference<>();
    AtomicBoolean foundCollision = new AtomicBoolean(Boolean.FALSE);
    AtomicReference<String> collidingFilename = new AtomicReference<>();

    // Walk the generated package, and make sure that
    // 1. That a resource with illegal characters does not exist
    // 2. That a resource named 'READMX' does exist
    // 3. That a resource named after the SHA-1 hex of its identifier exists
    // 4. That those two resources originate from two different files in the original package content
    opened.getPackageTree().walk(node -> {
        if (node.getFileInfo() == null || !node.getFileInfo().isFile()) {
            return;
        }

        System.err.println(node.getFileInfo().getName());
        System.err.println("  " + node.getFileInfo().getLocation().toString());

        // this should not happen, because a file name with invalid characters should have
        // been remediated prior to being inserted into the package
        if (node.getFileInfo().getLocation().getPath().endsWith("README" + '\u0301')
                || node.getFileInfo().getLocation().getPath().endsWith("READM" + '\u00c9')) {
            foundIllegal.set(Boolean.TRUE);
        }

        if (node.getFileInfo().getLocation().getPath().endsWith(shaHex(node.getIdentifier().toString()))) {
            foundRemediated.set(Boolean.TRUE);
            remediatedFilename.set(node.getFileInfo().getName());
            // short circuit
            return;
        }

        if (node.getFileInfo().getName().equals("READMX") || node.getFileInfo().getName().equals("READM")) {
            foundCollision.set(Boolean.TRUE);
            collidingFilename.set(node.getFileInfo().getName());
        }
    });

    assertFalse(foundIllegal.get());
    assertTrue(foundCollision.get());
    assertTrue(foundRemediated.get());

    assertNotNull(remediatedFilename.get());
    assertNotNull(collidingFilename.get());
    assertNotEquals(remediatedFilename.get(), collidingFilename.get());

}