Example usage for java.util.concurrent.atomic AtomicBoolean set

List of usage examples for java.util.concurrent.atomic AtomicBoolean set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean set.

Prototype

public final void set(boolean newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:io.nats.client.ITClusterTest.java

@Test
public void testHotSpotReconnect() throws InterruptedException {
    int numClients = 100;
    ExecutorService executor = Executors.newFixedThreadPool(numClients,
            new NatsThreadFactory("testhotspotreconnect"));

    final BlockingQueue<String> rch = new LinkedBlockingQueue<String>();
    final BlockingQueue<Integer> dch = new LinkedBlockingQueue<Integer>();
    final AtomicBoolean shutdown = new AtomicBoolean(false);
    try (NatsServer s1 = runServerOnPort(1222)) {
        try (NatsServer s2 = runServerOnPort(1224)) {
            try (NatsServer s3 = runServerOnPort(1226)) {

                final class NATSClient implements Runnable {
                    Connection nc = null;
                    final AtomicInteger numReconnects = new AtomicInteger(0);
                    final AtomicInteger numDisconnects = new AtomicInteger(0);
                    String currentUrl = null;
                    final AtomicInteger instance = new AtomicInteger(-1);

                    final Options opts;

                    NATSClient(int inst) {
                        this.instance.set(inst);
                        opts = defaultOptions();
                        opts.servers = Nats.processUrlArray(testServers);

                        opts.disconnectedCb = new DisconnectedCallback() {
                            public void onDisconnect(ConnectionEvent event) {
                                numDisconnects.incrementAndGet();
                                try {
                                    dch.put(instance.get());
                                } catch (InterruptedException e) {
                                    e.printStackTrace();
                                }/*from  ww w  .  j av a  2 s .c  o  m*/
                                nc.setDisconnectedCallback(null);
                            }
                        };
                        opts.reconnectedCb = new ReconnectedCallback() {
                            public void onReconnect(ConnectionEvent event) {
                                numReconnects.incrementAndGet();
                                currentUrl = nc.getConnectedUrl();
                                try {
                                    rch.put(currentUrl);
                                } catch (InterruptedException e) {
                                    e.printStackTrace();
                                }
                            }
                        };
                    }

                    @Override
                    public void run() {
                        try {
                            nc = opts.connect();
                            assertTrue(!nc.isClosed());
                            assertNotNull(nc.getConnectedUrl());
                            currentUrl = nc.getConnectedUrl();
                            // System.err.println("Instance " + instance + " connected to " +
                            // currentUrl);
                            while (!shutdown.get()) {
                                sleep(10);
                            }
                            nc.close();
                        } catch (IOException e) {
                            e.printStackTrace();
                        }
                    }

                    public synchronized boolean isConnected() {
                        return (nc != null && !nc.isClosed());
                    }

                    public void shutdown() {
                        shutdown.set(true);
                    }
                }

                List<NATSClient> tasks = new ArrayList<NATSClient>(numClients);
                for (int i = 0; i < numClients; i++) {
                    NATSClient task = new NATSClient(i);
                    tasks.add(task);
                    executor.submit(task);
                }

                Map<String, Integer> cs = new HashMap<String, Integer>();

                int numReady = 0;
                while (numReady < numClients) {
                    numReady = 0;
                    for (NATSClient cli : tasks) {
                        if (cli.isConnected()) {
                            numReady++;
                        }
                    }
                    sleep(100);
                }

                s1.shutdown();
                sleep(1000);

                int disconnected = 0;
                // wait for disconnects
                while (dch.size() > 0 && disconnected < numClients) {
                    Integer instance = -1;
                    instance = dch.poll(5, TimeUnit.SECONDS);
                    assertNotNull("timed out waiting for disconnect signal", instance);
                    disconnected++;
                }
                assertTrue(disconnected > 0);

                int reconnected = 0;
                // wait for reconnects
                for (int i = 0; i < disconnected; i++) {
                    String url = null;
                    while (rch.size() == 0) {
                        sleep(50);
                    }
                    url = rch.poll(5, TimeUnit.SECONDS);
                    assertNotNull("timed out waiting for reconnect signal", url);
                    reconnected++;
                    Integer count = cs.get(url);
                    if (count != null) {
                        cs.put(url, ++count);
                    } else {
                        cs.put(url, 1);
                    }
                }

                for (NATSClient client : tasks) {
                    client.shutdown();
                }
                executor.shutdownNow();
                assertTrue(executor.awaitTermination(2, TimeUnit.SECONDS));

                assertEquals(disconnected, reconnected);

                int numServers = 2;

                assertEquals(numServers, cs.size());

                int expected = numClients / numServers;
                // We expect a 40 percent variance
                int var = (int) ((float) expected * 0.40);

                int delta = Math.abs(cs.get(testServers[2]) - cs.get(testServers[4]));
                // System.err.printf("var = %d, delta = %d\n", var, delta);
                if (delta > var) {
                    String str = String.format("Connected clients to servers out of range: %d/%d", delta, var);
                    fail(str);
                }
            }
        }
    }
}

From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java

@Test
@FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES)
@FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES)
@FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_SERIALIZABLE_VALUES)
public void shouldSupportUUIDInGraphSON() throws Exception {
    final UUID id = UUID.randomUUID();
    final Vertex v1 = g.addVertex(T.label, "person");
    final Vertex v2 = g.addVertex(T.label, "person");
    final Edge e = v1.addEdge("friend", v2, "uuid", id);

    try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) {
        final GraphSONWriter writer = g.io().graphSONWriter()
                .mapper(g.io().graphSONMapper().embedTypes(true).create()).create();
        writer.writeEdge(os, e);/*from w w w . jav a 2 s. c o m*/

        final AtomicBoolean called = new AtomicBoolean(false);
        final GraphSONReader reader = g.io().graphSONReader()
                .mapper(g.io().graphSONMapper().embedTypes(true).create()).create();
        try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) {
            reader.readEdge(bais, detachedEdge -> {
                assertEquals(e.id(),
                        graphProvider.reconstituteGraphSONIdentifier(Edge.class, detachedEdge.id()));
                assertEquals(v1.id(), graphProvider.reconstituteGraphSONIdentifier(Vertex.class,
                        detachedEdge.iterators().vertexIterator(Direction.OUT).next().id()));
                assertEquals(v2.id(), graphProvider.reconstituteGraphSONIdentifier(Vertex.class,
                        detachedEdge.iterators().vertexIterator(Direction.IN).next().id()));
                assertEquals(v1.label(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().label());
                assertEquals(v2.label(), detachedEdge.iterators().vertexIterator(Direction.IN).next().label());
                assertEquals(e.label(), detachedEdge.label());
                assertEquals(e.keys().size(),
                        StreamFactory.stream(detachedEdge.iterators().propertyIterator()).count());
                assertEquals(id, detachedEdge.value("uuid"));

                called.set(true);

                return null;
            });
        }

        assertTrue(called.get());
    }
}

From source file:org.apache.hadoop.hdfs.client.impl.TestBlockReaderFactory.java

/**
 * Test the case where we have a failure to complete a short circuit read
 * that occurs, and then later on, we have a success.
 * Any thread waiting on a cache load should receive the failure (if it
 * occurs);  however, the failure result should not be cached.  We want
 * to be able to retry later and succeed.
 *//*from   w w w  .  j  av a2  s  . c  o m*/
@Test(timeout = 60000)
public void testShortCircuitCacheTemporaryFailure() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicBoolean replicaCreationShouldFail = new AtomicBoolean(true);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            if (replicaCreationShouldFail.get()) {
                // Insert a short delay to increase the chance that one client
                // thread waits for the other client thread's failure via
                // a condition variable.
                Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
                return new ShortCircuitReplicaInfo();
            }
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testShortCircuitCacheTemporaryFailure", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int NUM_THREADS = 2;
    final int SEED = 0xFADED;
    final CountDownLatch gotFailureLatch = new CountDownLatch(NUM_THREADS);
    final CountDownLatch shouldRetryLatch = new CountDownLatch(1);
    DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                // First time should fail.
                List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
                        .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
                LocatedBlock lblock = locatedBlocks.get(0); // first block
                BlockReader blockReader = null;
                try {
                    blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0,
                            TEST_FILE_LEN);
                    Assert.fail("expected getBlockReader to fail the first time.");
                } catch (Throwable t) {
                    Assert.assertTrue(
                            "expected to see 'TCP reads were disabled " + "for testing' in exception " + t,
                            t.getMessage().contains("TCP reads were disabled for testing"));
                } finally {
                    if (blockReader != null)
                        blockReader.close(); // keep findbugs happy
                }
                gotFailureLatch.countDown();
                shouldRetryLatch.await();

                // Second time should succeed.
                try {
                    blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0,
                            TEST_FILE_LEN);
                } catch (Throwable t) {
                    LOG.error("error trying to retrieve a block reader " + "the second time.", t);
                    throw t;
                } finally {
                    if (blockReader != null)
                        blockReader.close();
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
            }
        }
    };
    Thread threads[] = new Thread[NUM_THREADS];
    for (int i = 0; i < NUM_THREADS; i++) {
        threads[i] = new Thread(readerRunnable);
        threads[i].start();
    }
    gotFailureLatch.await();
    replicaCreationShouldFail.set(false);
    shouldRetryLatch.countDown();
    for (int i = 0; i < NUM_THREADS; i++) {
        Uninterruptibles.joinUninterruptibly(threads[i]);
    }
    cluster.shutdown();
    sockDir.close();
    Assert.assertFalse(testFailed.get());
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.engines.internal.workers.GetDownloadWorker.java

/**
 * Moves a temp file to the operation's target local item (or possibly
 * creates a symbolic link instead), but doesn't set any attributes on the
 * target item./*from  w w w .  j a va  2  s. c  o  m*/
 *
 * @param tempFile
 *        the temp file to move into the operation's target local item (must
 *        not be <code>null</code>)
 * @param targetSymLink
 *        if not <code>null</code>, the method sets <code>true</code> if the
 *        user wants the target to be a symbolic link, <code>false</code> if
 *        it is a normal file
 * @param targetSymLinkDestinationUnmapped
 *        if not <code>null</code>, the method sets <code>true</code> on
 *        this holder if the target was supposed to be a symbolic link but
 *        was not created because the destination was not mapped, sets
 *        <code>false</code> if it was a successful symbolc link or a normal
 *        (non-link) target item
 * @throws VersionControlException
 *         if the temp file could not be moved to the target file
 */
private void moveTempFileToTargetFile(final File tempFile, final AtomicBoolean targetSymLink,
        final AtomicBoolean targetSymLinkDestinationUnmapped) {
    Check.notNull(tempFile, "tempFile"); //$NON-NLS-1$

    if (!tempFile.exists()) {
        throw new VersionControlException(MessageFormat.format(
                Messages.getString("GetEngineDownloadWorker.TempFileIsMissingCantCompleteTheDownloadFormat"), //$NON-NLS-1$
                tempFile));
    }

    /*
     * Load any symbolic link attributes first. An empty string returned
     * means the file is supposed to be a symbolic link but the destination
     * was unresolvable.
     */
    final String targetSymLinkDestination = getEngine.getSymbolicLinkDestination(tempFile, operation,
            asyncOp.getWorkspace());

    /*
     * If the target file exists, delete it so we can move the temp file
     * there. Overwrite during rename/move not supported on all systems.
     */
    final File targetFile = new File(operation.getTargetLocalItem());

    try {
        /*
         * The temp file may be subject to line ending conversion or other
         * content changes.
         */
        if (targetSymLinkDestination == null) {
            getEngine.applyFileAttributesToTempFile(operation.getTargetServerItem(),
                    operation.getTargetLocalItem(), operation.getEncoding(), tempFile, operation);
        }

        final FileSystemAttributes originalTargetAttrs = FileSystemUtils.getInstance()
                .getAttributes(targetFile);

        // rely on FileSystemAttributes to check file/symlink exists or not
        if (originalTargetAttrs.exists()) {
            log.trace(MessageFormat.format("target file {0} exists, deleting", targetFile)); //$NON-NLS-1$

            /*
             * Mac OS X requires us to make the original file writeable
             * before we can delete it (this clears the immutable bit, which
             * would also block us). Skip if the target is a symbolic link
             * so we don't set the link destination file's attributes.
             */

            if (targetSymLinkDestination == null && originalTargetAttrs.isReadOnly()) {
                log.trace(MessageFormat.format("setting target file {0} writable before delete", targetFile)); //$NON-NLS-1$
                originalTargetAttrs.setReadOnly(false);

                if (FileSystemUtils.getInstance().setAttributes(targetFile, originalTargetAttrs)) {
                    log.trace(MessageFormat.format("target file {0} now writable before delete", targetFile)); //$NON-NLS-1$
                } else {
                    log.warn(MessageFormat.format(
                            "error setting file {0} writable before delete, expect trouble finishing the get", //$NON-NLS-1$
                            targetFile));
                }
            }

            if (!targetFile.delete()) {
                // Delete the temp file; we never got to use it.
                tempFile.delete();

                throw new VersionControlException(MessageFormat.format(
                        //@formatter:off
                        Messages.getString(
                                "GetEngineDownloadWorker.DeleteOfTargetFileFailedMakeSureNotInUseFormat"), //$NON-NLS-1$
                        //@formatter:on
                        targetFile));
            }
        }

        if (targetSymLink != null) {
            targetSymLink.set(false);
        }
        if (targetSymLinkDestinationUnmapped != null) {
            targetSymLinkDestinationUnmapped.set(false);
        }

        if (targetSymLinkDestination != null) {
            if (targetSymLink != null) {
                targetSymLink.set(true);
            }

            log.trace(MessageFormat.format("target {0} should be a symbolic link", //$NON-NLS-1$
                    operation.getTargetLocalItem()));

            if (targetSymLinkDestination.length() > 0) {
                log.trace(MessageFormat.format("creating link to mapped path {0}", targetSymLinkDestination)); //$NON-NLS-1$

                FileSystemUtils.getInstance().createSymbolicLink(targetSymLinkDestination,
                        operation.getTargetLocalItem());
            } else {
                if (targetSymLinkDestinationUnmapped != null) {
                    targetSymLinkDestinationUnmapped.set(true);
                }

                log.info(MessageFormat.format(
                        "Symbolic link at {0} would point to unmapped item, not creating link", //$NON-NLS-1$
                        operation.getTargetLocalItem()));
            }

            // Delete the temp file because the contents are unused for
            // symlinks
            TempStorageService.getInstance().deleteItem(tempFile);

            log.trace(MessageFormat.format("link at {0} created", operation.getTargetLocalItem())); //$NON-NLS-1$
        } else {
            // Rename the temp file to the real file name.
            log.trace(MessageFormat.format("renaming temp file {0} to target {1}", //$NON-NLS-1$
                    tempFile, operation.getTargetLocalItem()));

            TempStorageService.getInstance().renameItem(tempFile, targetFile);
        }
    } finally {
        // Ensure we don't leave the one passed in to us
        if (tempFile.exists()) {
            TempStorageService.getInstance().deleteItem(tempFile);
        }
    }
}

From source file:com.microsoft.tfs.client.common.ui.config.UITransportRequestHandler.java

/**
 * {@inheritDoc}//www .  j a va  2s .c o  m
 */
@Override
public Status handleException(final SOAPService service, final SOAPRequest request, final Exception exception,
        final AtomicBoolean cancel) {
    final ConnectionInstanceData connectionInstanceData = getConnectionInstanceData();

    log.info("Authentication requested: ", exception); //$NON-NLS-1$

    /*
     * Super method handles FederatedAuthException with service credentials
     * if credentials were specified. Try that first.
     */
    if (super.handleException(service, request, exception, cancel) == Status.COMPLETE) {
        log.debug("DefaultTransportAuthHandler handled auth exception for us"); //$NON-NLS-1$
        return Status.COMPLETE;
    }

    log.debug("DefaultTransportAuthHandler did not handle auth exception"); //$NON-NLS-1$

    final UITransportAuthRunnable dialogRunnable;

    /*
     * For a federated authentication exception, always raise the login to
     * ACS or OAuth credentials dialog.
     */
    if (exception instanceof FederatedAuthException) {
        log.debug(" FederatedAuthException has been raised."); //$NON-NLS-1$

        cleanupSavedCredentials(service.getClient());

        if (EnvironmentVariables.getBoolean(EnvironmentVariables.USE_OAUTH_LIBRARY, true)) {
            dialogRunnable = new UITransportOAuthRunnable(connectionInstanceData.getServerURI());
        } else {
            dialogRunnable = new UITransportFederatedFallbackAuthRunnable(connectionInstanceData.getServerURI(),
                    connectionInstanceData.getCredentials(), (FederatedAuthException) exception);
        }
    }
    /*
     * For failed username/password or PAT credentials, raise the UI dialog
     * if the service recommends prompting. The SharePoint and Reports
     * services seems to be the only ones that do not recommend.
     */
    else if (exception instanceof UnauthorizedException && service.isPromptForCredentials()) {
        log.debug(" UnauthorizedException has been raised."); //$NON-NLS-1$

        if (EnvironmentVariables.getBoolean(EnvironmentVariables.USE_OAUTH_LIBRARY, true)
                && isPatCredentials(connectionInstanceData.getCredentials())) {
            // PAT token is probably expired. Remove it from the Eclipse
            // secure storage and retry.
            final CredentialsManager credentialsManager = EclipseCredentialsManagerFactory
                    .getGitCredentialsManager();
            credentialsManager.removeCredentials(connectionInstanceData.getServerURI());
            dialogRunnable = new UITransportOAuthRunnable(connectionInstanceData.getServerURI());
        } else {
            dialogRunnable = new UITransportUsernamePasswordAuthRunnable(connectionInstanceData.getServerURI(),
                    connectionInstanceData.getCredentials(), (UnauthorizedException) exception);
        }
    }
    /*
     * The Cookie Credentials used are incorrect. They are either corrupted
     * in Eclipse secure storage or expired. Cleanup the storage and retry
     * from scratch.
     */
    else if (exception instanceof FederatedAuthFailedException) {
        cleanupSavedCredentials(service.getClient());
        return Status.CONTINUE;
    } else {
        log.debug(" Unknown authentication type or shouldn't prompt for this service."); //$NON-NLS-1$
        return Status.CONTINUE;
    }

    log.debug(" Prompt for credentials"); //$NON-NLS-1$
    final Credentials credentials = getCredentials(dialogRunnable);

    log.debug(" The dialog returned credentials: " //$NON-NLS-1$
            + (credentials == null ? "null" : credentials.getClass().getName())); //$NON-NLS-1$

    if (credentials == null) {
        log.info(" Credentials dialog has been cancelled by the user."); //$NON-NLS-1$
        cancel.set(true);
        return Status.CONTINUE;
    }

    // Apply the credentials data to the existing client.
    log.debug("Apply the new credentials to the existing client."); //$NON-NLS-1$
    connectionInstanceData.setCredentials(credentials);

    log.debug(" Save the new credentials to the existing Client Factory for future clients in this session."); //$NON-NLS-1$
    getClientFactory().configureClientCredentials(service.getClient(), service.getClient().getState(),
            connectionInstanceData);

    return Status.COMPLETE;
}

From source file:io.realm.RealmTests.java

@Test
public void processLocalListenersAfterRefresh() throws InterruptedException {
    // Used to validate the result
    final AtomicBoolean listenerWasCalled = new AtomicBoolean(false);
    final AtomicBoolean typeListenerWasCalled = new AtomicBoolean(false);

    // Used by the background thread to wait for the main thread to do the write operation
    final CountDownLatch bgThreadLatch = new CountDownLatch(1);
    final CountDownLatch bgClosedLatch = new CountDownLatch(1);
    final CountDownLatch bgThreadReadyLatch = new CountDownLatch(1);

    Thread backgroundThread = new Thread() {
        @Override//  w  ww. j ava2 s.c o  m
        public void run() {
            // this will allow to register a listener.
            // we don't start looping to prevent the callback to be invoked via
            // the handler mechanism, the purpose of this test is to make sure refresh calls
            // the listeners.
            Looper.prepare();

            Realm bgRealm = Realm.getInstance(realmConfig);
            RealmResults<Dog> dogs = bgRealm.where(Dog.class).findAll();
            try {
                bgRealm.addChangeListener(new RealmChangeListener() {
                    @Override
                    public void onChange() {
                        listenerWasCalled.set(true);
                    }
                });
                dogs.addChangeListener(new RealmChangeListener() {
                    @Override
                    public void onChange() {
                        typeListenerWasCalled.set(true);
                    }
                });

                bgThreadReadyLatch.countDown();
                bgThreadLatch.await(); // Wait for the main thread to do a write operation
                bgRealm.refresh(); // This should call the listener
                assertTrue(listenerWasCalled.get());
                assertTrue(typeListenerWasCalled.get());
                bgRealm.close();
                bgRealm = null;
                // DON'T count down in the final block! The test will fail silently!!!
                bgClosedLatch.countDown();
            } catch (InterruptedException e) {
                fail(e.getMessage());
            } finally {
                if (bgRealm != null) {
                    bgRealm.close();
                }
            }
        }
    };
    backgroundThread.start();

    // Wait until bgThread finishes adding listener to the RealmResults. Otherwise same TableView version won't
    // trigger the listener.
    bgThreadReadyLatch.await();
    realm.beginTransaction();
    realm.createObject(Dog.class);
    realm.commitTransaction();
    bgThreadLatch.countDown();
    bgClosedLatch.await();
}

From source file:it.anyplace.sync.bep.BlockPusher.java

public FileUploadObserver pushFile(final DataSource dataSource, @Nullable FileInfo fileInfo,
        final String folder, final String path) {
    checkArgument(connectionHandler.hasFolder(folder),
            "supplied connection handler %s will not share folder %s", connectionHandler, folder);
    checkArgument(fileInfo == null || equal(fileInfo.getFolder(), folder));
    checkArgument(fileInfo == null || equal(fileInfo.getPath(), path));
    try {/*w  w  w  .  j  av  a 2s .com*/
        final ExecutorService monitoringProcessExecutorService = Executors.newCachedThreadPool();
        final long fileSize = dataSource.getSize();
        final Set<String> sentBlocks = Sets.newConcurrentHashSet();
        final AtomicReference<Exception> uploadError = new AtomicReference<>();
        final AtomicBoolean isCompleted = new AtomicBoolean(false);
        final Object updateLock = new Object();
        final Object listener = new Object() {
            @Subscribe
            public void handleRequestMessageReceivedEvent(RequestMessageReceivedEvent event) {
                BlockExchageProtos.Request request = event.getMessage();
                if (equal(request.getFolder(), folder) && equal(request.getName(), path)) {
                    try {
                        final String hash = BaseEncoding.base16().encode(request.getHash().toByteArray());
                        logger.debug("handling block request = {}:{}-{} ({})", request.getName(),
                                request.getOffset(), request.getSize(), hash);
                        byte[] data = dataSource.getBlock(request.getOffset(), request.getSize(), hash);
                        checkNotNull(data, "data not found for hash = %s", hash);
                        final Future future = connectionHandler.sendMessage(
                                Response.newBuilder().setCode(BlockExchageProtos.ErrorCode.NO_ERROR)
                                        .setData(ByteString.copyFrom(data)).setId(request.getId()).build());
                        monitoringProcessExecutorService.submit(new Runnable() {
                            @Override
                            public void run() {
                                try {
                                    future.get();
                                    sentBlocks.add(hash);
                                    synchronized (updateLock) {
                                        updateLock.notifyAll();
                                    }
                                    //TODO retry on error, register error and throw on watcher
                                } catch (InterruptedException ex) {
                                    //return and do nothing
                                } catch (ExecutionException ex) {
                                    uploadError.set(ex);
                                    synchronized (updateLock) {
                                        updateLock.notifyAll();
                                    }
                                }
                            }
                        });
                    } catch (Exception ex) {
                        logger.error("error handling block request", ex);
                        connectionHandler.sendMessage(Response.newBuilder()
                                .setCode(BlockExchageProtos.ErrorCode.GENERIC).setId(request.getId()).build());
                        uploadError.set(ex);
                        synchronized (updateLock) {
                            updateLock.notifyAll();
                        }
                    }
                }
            }
        };
        connectionHandler.getEventBus().register(listener);
        logger.debug("send index update for file = {}", path);
        final Object indexListener = new Object() {

            @Subscribe
            public void handleIndexRecordAquiredEvent(IndexHandler.IndexRecordAquiredEvent event) {
                if (equal(event.getFolder(), folder)) {
                    for (FileInfo fileInfo : event.getNewRecords()) {
                        if (equal(fileInfo.getPath(), path)
                                && equal(fileInfo.getHash(), dataSource.getHash())) { //TODO check not invalid
                            //                                sentBlocks.addAll(dataSource.getHashes());
                            isCompleted.set(true);
                            synchronized (updateLock) {
                                updateLock.notifyAll();
                            }
                        }
                    }
                }
            }
        };
        if (indexHandler != null) {
            indexHandler.getEventBus().register(indexListener);
        }
        final IndexUpdate indexUpdate = sendIndexUpdate(folder,
                BlockExchageProtos.FileInfo.newBuilder().setName(path).setSize(fileSize)
                        .setType(BlockExchageProtos.FileInfoType.FILE).addAllBlocks(dataSource.getBlocks()),
                fileInfo == null ? null : fileInfo.getVersionList()).getRight();
        final FileUploadObserver messageUploadObserver = new FileUploadObserver() {
            @Override
            public void close() {
                logger.debug("closing upload process");
                try {
                    connectionHandler.getEventBus().unregister(listener);
                    monitoringProcessExecutorService.shutdown();
                    if (indexHandler != null) {
                        indexHandler.getEventBus().unregister(indexListener);
                    }
                } catch (Exception ex) {
                }
                if (closeConnection && connectionHandler != null) {
                    connectionHandler.close();
                }
                if (indexHandler != null) {
                    FileInfo fileInfo = indexHandler.pushRecord(indexUpdate.getFolder(),
                            Iterables.getOnlyElement(indexUpdate.getFilesList()));
                    logger.info("sent file info record = {}", fileInfo);
                }
            }

            @Override
            public double getProgress() {
                return isCompleted() ? 1d : sentBlocks.size() / ((double) dataSource.getHashes().size());
            }

            @Override
            public String getProgressMessage() {
                return (Math.round(getProgress() * 1000d) / 10d) + "% " + sentBlocks.size() + "/"
                        + dataSource.getHashes().size();
            }

            @Override
            public boolean isCompleted() {
                //                    return sentBlocks.size() == dataSource.getHashes().size();
                return isCompleted.get();
            }

            @Override
            public double waitForProgressUpdate() throws InterruptedException {
                synchronized (updateLock) {
                    updateLock.wait();
                }
                if (uploadError.get() != null) {
                    throw new RuntimeException(uploadError.get());
                }
                return getProgress();
            }

            @Override
            public DataSource getDataSource() {
                return dataSource;
            }

        };
        return messageUploadObserver;
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}

From source file:org.apache.geode.internal.cache.OplogJUnitTest.java

/**
 * tests directory stats are correctly updated in case of single directory (for bug 37531)
 *//*from ww w . j av a2s  . com*/
@Test
public void testPersist1DirStats() {
    final AtomicBoolean freezeRoller = new AtomicBoolean();
    CacheObserver old = CacheObserverHolder.setInstance(new CacheObserverAdapter() {
        private volatile boolean didBeforeCall = false;

        @Override
        public void beforeGoingToCompact() {
            this.didBeforeCall = true;
            synchronized (freezeRoller) {
                if (!assertDone) {
                    try {
                        // Here, we are not allowing the Roller thread to roll the old oplog into htree
                        while (!freezeRoller.get()) {
                            freezeRoller.wait();
                        }
                        freezeRoller.set(false);
                    } catch (InterruptedException e) {
                        fail("interrupted");
                    }
                }
            }
        }

        @Override
        public void afterHavingCompacted() {
            if (this.didBeforeCall) {
                this.didBeforeCall = false;
                LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
                // assertTrue("Assert failure for DSpaceUsage in afterHavingCompacted ",
                // diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
                // what is the point of this assert?
                checkDiskStats();
            }
        }
    });
    try {
        final int MAX_OPLOG_SIZE = 500;
        diskProps.setMaxOplogSize(MAX_OPLOG_SIZE);
        diskProps.setPersistBackup(true);
        diskProps.setRolling(true);
        diskProps.setSynchronous(true);
        diskProps.setOverflow(false);
        diskProps.setDiskDirsAndSizes(new File[] { dirs[0] }, new int[] { 4000 });
        final byte[] val = new byte[200];
        region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
        region.put("key1", val);
        // Disk space should have changed due to 1 put
        // assertTrue("stats did not increase after put 1 ", diskSpaceUsageStats() ==
        // calculatedDiskSpaceUsageStats());
        checkDiskStats();
        region.put("key2", val);
        // assertTrue("stats did not increase after put 2", diskSpaceUsageStats() ==
        // calculatedDiskSpaceUsageStats());
        checkDiskStats();
        // This put will cause a switch as max-oplog size (500) will be exceeded (600)
        region.put("key3", val);
        synchronized (freezeRoller) {
            // assertTrue("current disk space usage with Roller thread in wait and put key3 done is
            // incorrect " + diskSpaceUsageStats() + " " + calculatedDiskSpaceUsageStats(),
            // diskSpaceUsageStats()== calculatedDiskSpaceUsageStats());
            checkDiskStats();
            assertDone = true;
            freezeRoller.set(true);
            freezeRoller.notifyAll();
        }

        region.close();
        closeDown();
        // Stop rolling to get accurate estimates:
        diskProps.setRolling(false);

        region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);

        // On recreating the region after closing, old Oplog file gets rolled into htree
        // "Disk space usage zero when region recreated"
        checkDiskStats();
        region.put("key4", val);
        // assertTrue("stats did not increase after put 4", diskSpaceUsageStats() ==
        // calculatedDiskSpaceUsageStats());
        checkDiskStats();
        region.put("key5", val);
        // assertTrue("stats did not increase after put 5", diskSpaceUsageStats() ==
        // calculatedDiskSpaceUsageStats());
        checkDiskStats();
        assertDone = false;
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
        region.put("key6", val);
        // again we expect a switch in oplog here
        synchronized (freezeRoller) {
            // assertTrue("current disk space usage with Roller thread in wait and put key6 done is
            // incorrect", diskSpaceUsageStats()== calculatedDiskSpaceUsageStats());
            checkDiskStats();
            assertDone = true;
            freezeRoller.set(true);
            freezeRoller.notifyAll();
        }
        region.close();
    } catch (Exception e) {
        e.printStackTrace();
        fail("Test failed due to exception" + e);
    } finally {
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
        CacheObserverHolder.setInstance(old);
        synchronized (freezeRoller) {
            assertDone = true;
            freezeRoller.set(true);
            freezeRoller.notifyAll();
        }
    }
}

From source file:com.gemstone.gemfire.internal.cache.OplogJUnitTest.java

/**
 * tests directory stats are correctly updated in case of single directory
 * (for bug 37531)/*from  w  w  w  .j  a v a2 s.c  om*/
 */
@Test
public void testPersist1DirStats() {
    final AtomicBoolean freezeRoller = new AtomicBoolean();
    CacheObserver old = CacheObserverHolder.setInstance(new CacheObserverAdapter() {
        private volatile boolean didBeforeCall = false;

        @Override
        public void beforeGoingToCompact() {
            this.didBeforeCall = true;
            synchronized (freezeRoller) {
                if (!assertDone) {
                    try {
                        // Here, we are not allowing the Roller thread to roll the old oplog into htree
                        while (!freezeRoller.get()) {
                            freezeRoller.wait();
                        }
                        freezeRoller.set(false);
                    } catch (InterruptedException e) {
                        fail("interrupted");
                    }
                }
            }
        }

        @Override
        public void afterHavingCompacted() {
            if (this.didBeforeCall) {
                this.didBeforeCall = false;
                LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
                //assertTrue("Assert failure for DSpaceUsage in afterHavingCompacted ", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
                // what is the point of this assert?
                checkDiskStats();
            }
        }
    });
    try {
        final int MAX_OPLOG_SIZE = 500;
        diskProps.setMaxOplogSize(MAX_OPLOG_SIZE);
        diskProps.setPersistBackup(true);
        diskProps.setRolling(true);
        diskProps.setSynchronous(true);
        diskProps.setOverflow(false);
        diskProps.setDiskDirsAndSizes(new File[] { dirs[0] }, new int[] { 4000 });
        final byte[] val = new byte[200];
        region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
        region.put("key1", val);
        // Disk space should have changed due to 1 put
        //assertTrue("stats did not increase after put 1 ", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
        checkDiskStats();
        region.put("key2", val);
        //assertTrue("stats did not increase after put 2", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
        checkDiskStats();
        // This put will cause a switch as max-oplog size (500) will be exceeded (600)
        region.put("key3", val);
        synchronized (freezeRoller) {
            //assertTrue("current disk space usage with Roller thread in wait and put key3 done is incorrect " +  diskSpaceUsageStats() + " " + calculatedDiskSpaceUsageStats(), diskSpaceUsageStats()== calculatedDiskSpaceUsageStats());
            checkDiskStats();
            assertDone = true;
            freezeRoller.set(true);
            freezeRoller.notifyAll();
        }

        region.close();
        closeDown();
        //    Stop rolling to get accurate estimates:
        diskProps.setRolling(false);

        region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);

        // On recreating the region after closing, old Oplog file gets rolled into htree
        // "Disk space usage zero when region recreated"
        checkDiskStats();
        region.put("key4", val);
        //assertTrue("stats did not increase after put 4", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
        checkDiskStats();
        region.put("key5", val);
        //assertTrue("stats did not increase after put 5", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
        checkDiskStats();
        assertDone = false;
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
        region.put("key6", val);
        // again we expect a switch in oplog here
        synchronized (freezeRoller) {
            //assertTrue("current disk space usage with Roller thread in wait and put key6 done is incorrect", diskSpaceUsageStats()== calculatedDiskSpaceUsageStats());
            checkDiskStats();
            assertDone = true;
            freezeRoller.set(true);
            freezeRoller.notifyAll();
        }
        region.close();
    } catch (Exception e) {
        e.printStackTrace();
        fail("Test failed due to exception" + e);
    } finally {
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
        CacheObserverHolder.setInstance(old);
        synchronized (freezeRoller) {
            assertDone = true;
            freezeRoller.set(true);
            freezeRoller.notifyAll();
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.TestRegionReplicas.java

@Test(timeout = 300000)
public void testFlushAndCompactionsInPrimary() throws Exception {

    long runtime = 30 * 1000;
    // enable store file refreshing
    final int refreshPeriod = 100; // 100ms refresh is a lot
    HTU.getConfiguration().setInt("hbase.hstore.compactionThreshold", 3);
    HTU.getConfiguration().setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, refreshPeriod);
    // restart the region server so that it starts the refresher chore
    restartRegionServer();/*from   www  . ja  v a 2 s . com*/
    final int startKey = 0, endKey = 1000;

    try {
        openRegion(HTU, getRS(), hriSecondary);

        //load some data to primary so that reader won't fail
        HTU.loadNumericRows(table, f, startKey, endKey);
        TestRegionServerNoMaster.flushRegion(HTU, hriPrimary);
        // ensure that chore is run
        Threads.sleep(2 * refreshPeriod);

        final AtomicBoolean running = new AtomicBoolean(true);
        @SuppressWarnings("unchecked")
        final AtomicReference<Exception>[] exceptions = new AtomicReference[3];
        for (int i = 0; i < exceptions.length; i++) {
            exceptions[i] = new AtomicReference<Exception>();
        }

        Runnable writer = new Runnable() {
            int key = startKey;

            @Override
            public void run() {
                try {
                    while (running.get()) {
                        byte[] data = Bytes.toBytes(String.valueOf(key));
                        Put put = new Put(data);
                        put.add(f, null, data);
                        table.put(put);
                        key++;
                        if (key == endKey)
                            key = startKey;
                    }
                } catch (Exception ex) {
                    LOG.warn(ex);
                    exceptions[0].compareAndSet(null, ex);
                }
            }
        };

        Runnable flusherCompactor = new Runnable() {
            Random random = new Random();

            @Override
            public void run() {
                try {
                    while (running.get()) {
                        // flush or compact
                        if (random.nextBoolean()) {
                            TestRegionServerNoMaster.flushRegion(HTU, hriPrimary);
                        } else {
                            HTU.compact(table.getName(), random.nextBoolean());
                        }
                    }
                } catch (Exception ex) {
                    LOG.warn(ex);
                    exceptions[1].compareAndSet(null, ex);
                }
            }
        };

        Runnable reader = new Runnable() {
            Random random = new Random();

            @Override
            public void run() {
                try {
                    while (running.get()) {
                        // whether to do a close and open
                        if (random.nextInt(10) == 0) {
                            try {
                                closeRegion(HTU, getRS(), hriSecondary);
                            } catch (Exception ex) {
                                LOG.warn("Failed closing the region " + hriSecondary + " "
                                        + StringUtils.stringifyException(ex));
                                exceptions[2].compareAndSet(null, ex);
                            }
                            try {
                                openRegion(HTU, getRS(), hriSecondary);
                            } catch (Exception ex) {
                                LOG.warn("Failed opening the region " + hriSecondary + " "
                                        + StringUtils.stringifyException(ex));
                                exceptions[2].compareAndSet(null, ex);
                            }
                        }

                        int key = random.nextInt(endKey - startKey) + startKey;
                        assertGetRpc(hriSecondary, key, true);
                    }
                } catch (Exception ex) {
                    LOG.warn("Failed getting the value in the region " + hriSecondary + " "
                            + StringUtils.stringifyException(ex));
                    exceptions[2].compareAndSet(null, ex);
                }
            }
        };

        LOG.info("Starting writer and reader");
        ExecutorService executor = Executors.newFixedThreadPool(3);
        executor.submit(writer);
        executor.submit(flusherCompactor);
        executor.submit(reader);

        // wait for threads
        Threads.sleep(runtime);
        running.set(false);
        executor.shutdown();
        executor.awaitTermination(30, TimeUnit.SECONDS);

        for (AtomicReference<Exception> exRef : exceptions) {
            Assert.assertNull(exRef.get());
        }
    } finally {
        HTU.deleteNumericRows(table, HConstants.CATALOG_FAMILY, startKey, endKey);
        closeRegion(HTU, getRS(), hriSecondary);
    }
}