Example usage for java.util.concurrent.atomic AtomicReference set

List of usage examples for java.util.concurrent.atomic AtomicReference set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference set.

Prototype

public final void set(V newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:com.netflix.curator.ensemble.exhibitor.TestExhibitorEnsembleProvider.java

@Test
public void testExhibitorFailures() throws Exception {
    final AtomicReference<String> backupConnectionString = new AtomicReference<String>("backup1:1");
    final AtomicReference<String> connectionString = new AtomicReference<String>(
            "count=1&port=2&server0=localhost");
    Exhibitors exhibitors = new Exhibitors(Lists.newArrayList("foo", "bar"), 1000,
            new Exhibitors.BackupConnectionStringProvider() {
                @Override//from w  w  w . j  a v  a  2 s . co  m
                public String getBackupConnectionString() {
                    return backupConnectionString.get();
                }
            });
    ExhibitorRestClient mockRestClient = new ExhibitorRestClient() {
        @Override
        public String getRaw(String hostname, int port, String uriPath, String mimeType) throws Exception {
            String localConnectionString = connectionString.get();
            if (localConnectionString == null) {
                throw new IOException();
            }
            return localConnectionString;
        }
    };

    final Semaphore semaphore = new Semaphore(0);
    ExhibitorEnsembleProvider provider = new ExhibitorEnsembleProvider(exhibitors, mockRestClient, "/foo", 10,
            new RetryOneTime(1)) {
        @Override
        protected void poll() {
            super.poll();
            semaphore.release();
        }
    };
    provider.pollForInitialEnsemble();
    try {
        provider.start();

        Assert.assertEquals(provider.getConnectionString(), "localhost:2");

        connectionString.set(null);
        semaphore.drainPermits();
        semaphore.acquire(); // wait for next poll
        Assert.assertEquals(provider.getConnectionString(), "backup1:1");

        backupConnectionString.set("backup2:2");
        semaphore.drainPermits();
        semaphore.acquire(); // wait for next poll
        Assert.assertEquals(provider.getConnectionString(), "backup2:2");

        connectionString.set("count=1&port=3&server0=localhost3");
        semaphore.drainPermits();
        semaphore.acquire(); // wait for next poll
        Assert.assertEquals(provider.getConnectionString(), "localhost3:3");
    } finally {
        IOUtils.closeQuietly(provider);
    }
}

From source file:com.netflix.curator.ensemble.exhibitor.TestExhibitorEnsembleProvider.java

@Test
public void testChanging() throws Exception {
    TestingServer secondServer = new TestingServer();
    try {//  www  .  j a  va2s . c o m
        String mainConnectionString = "count=1&port=" + server.getPort() + "&server0=localhost";
        String secondConnectionString = "count=1&port=" + secondServer.getPort() + "&server0=localhost";

        final Semaphore semaphore = new Semaphore(0);
        final AtomicReference<String> connectionString = new AtomicReference<String>(mainConnectionString);
        Exhibitors exhibitors = new Exhibitors(Lists.newArrayList("foo", "bar"), 1000,
                dummyConnectionStringProvider);
        ExhibitorRestClient mockRestClient = new ExhibitorRestClient() {
            @Override
            public String getRaw(String hostname, int port, String uriPath, String mimeType) throws Exception {
                semaphore.release();
                return connectionString.get();
            }
        };
        ExhibitorEnsembleProvider provider = new ExhibitorEnsembleProvider(exhibitors, mockRestClient, "/foo",
                10, new RetryOneTime(1));
        provider.pollForInitialEnsemble();

        Timing timing = new Timing().multiple(4);
        final CuratorZookeeperClient client = new CuratorZookeeperClient(provider, timing.session(),
                timing.connection(), null, new RetryOneTime(2));
        client.start();
        try {
            RetryLoop.callWithRetry(client, new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                    client.getZooKeeper().create("/test", new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE,
                            CreateMode.PERSISTENT);
                    return null;
                }
            });

            connectionString.set(secondConnectionString);
            semaphore.drainPermits();
            semaphore.acquire();

            server.stop(); // create situation where the current zookeeper gets a sys-disconnected

            Stat stat = RetryLoop.callWithRetry(client, new Callable<Stat>() {
                @Override
                public Stat call() throws Exception {
                    return client.getZooKeeper().exists("/test", false);
                }
            });
            Assert.assertNull(stat); // it's a different server so should be null
        } finally {
            client.close();
        }
    } finally {
        IOUtils.closeQuietly(secondServer);
    }
}

From source file:org.appverse.web.framework.backend.frontfacade.websocket.IntegrationWebsocketTest.java

@Test
public void getPositions() throws Exception {

    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicReference<Throwable> failure = new AtomicReference<>();

    StompSessionHandler handler = new AbstractTestSessionHandler(failure) {

        @Override/*from ww  w  . j a v  a2  s.  com*/
        public void afterConnected(final StompSession session, StompHeaders connectedHeaders) {
            session.subscribe("/app/positions", new StompFrameHandler() {
                @Override
                public Type getPayloadType(StompHeaders headers) {
                    return byte[].class;
                }

                @Override
                public void handleFrame(StompHeaders headers, Object payload) {
                    String json = new String((byte[]) payload);
                    logger.debug("Got " + json);
                    try {
                        new JsonPathExpectationsHelper("$[0].company").assertValue(json,
                                "Citrix Systems, Inc.");
                        new JsonPathExpectationsHelper("$[1].company").assertValue(json, "Dell Inc.");
                        new JsonPathExpectationsHelper("$[2].company").assertValue(json, "Microsoft");
                        new JsonPathExpectationsHelper("$[3].company").assertValue(json, "Oracle");
                    } catch (Throwable t) {
                        failure.set(t);
                    } finally {
                        session.disconnect();
                        latch.countDown();
                    }
                }
            });
        }
    };

    WebSocketStompClient stompClient = new WebSocketStompClient(sockJsClient);
    stompClient.connect("http://localhost:{port}/services/websocket", this.headers, handler, port);

    if (failure.get() != null) {
        throw new AssertionError("", failure.get());
    }

    if (!latch.await(5, TimeUnit.SECONDS)) {
        fail("Portfolio positions not received");
    }
}

From source file:org.apache.hadoop.hdfs.TestFileAppend4.java

/**
 * Test case that stops a writer after finalizing a block but
 * before calling completeFile, and then tries to recover
 * the lease.//from   ww w  .j  a va  2s . c  o  m
 */
public void testRecoverFinalizedBlock() throws Throwable {
    cluster = new MiniDFSCluster(conf, 3, true, null);

    try {
        cluster.waitActive();
        NameNode preSpyNN = cluster.getNameNode();
        NameNode spyNN = spy(preSpyNN);

        // Delay completeFile
        DelayAnswer delayer = new DelayAnswer();
        doAnswer(delayer).when(spyNN).complete(anyString(), anyString());

        DFSClient client = new DFSClient(null, spyNN, conf, null);
        file1 = new Path("/testRecoverFinalized");
        final OutputStream stm = client.create("/testRecoverFinalized", true);

        // write 1/2 block
        AppendTestUtil.write(stm, 0, 4096);
        final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
        Thread t = new Thread() {
            public void run() {
                try {
                    stm.close();
                } catch (Throwable t) {
                    err.set(t);
                }
            }
        };
        t.start();
        LOG.info("Waiting for close to get to latch...");
        delayer.waitForCall();

        // At this point, the block is finalized on the DNs, but the file
        // has not been completed in the NN.
        // Lose the leases
        LOG.info("Killing lease checker");
        client.leasechecker.interruptAndJoin();

        FileSystem fs1 = cluster.getFileSystem();
        FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());

        LOG.info("Recovering file");
        recoverFile(fs2);

        LOG.info("Telling close to proceed.");
        delayer.proceed();
        LOG.info("Waiting for close to finish.");
        t.join();
        LOG.info("Close finished.");

        // We expect that close will get a "Could not complete file"
        // error.
        Throwable thrownByClose = err.get();
        assertNotNull(thrownByClose);
        assertTrue(thrownByClose instanceof IOException);
        if (!thrownByClose.getMessage().contains("does not have any open files")) {
            throw thrownByClose;
        }
    } finally {
        cluster.shutdown();
    }
}

From source file:org.apache.hadoop.hdfs.TestFileAppend4.java

/**
 * Test case that stops a writer after finalizing a block but
 * before calling completeFile, recovers a file from another writer,
 * starts writing from that writer, and then has the old lease holder
 * call completeFile//from   www.  ja  v  a  2 s  .c  o m
 */
public void testCompleteOtherLeaseHoldersFile() throws Throwable {
    cluster = new MiniDFSCluster(conf, 3, true, null);

    try {
        cluster.waitActive();
        NameNode preSpyNN = cluster.getNameNode();
        NameNode spyNN = spy(preSpyNN);

        // Delay completeFile
        DelayAnswer delayer = new DelayAnswer();
        doAnswer(delayer).when(spyNN).complete(anyString(), anyString());

        DFSClient client = new DFSClient(null, spyNN, conf, null);
        file1 = new Path("/testRecoverFinalized");
        final OutputStream stm = client.create("/testRecoverFinalized", true);

        // write 1/2 block
        AppendTestUtil.write(stm, 0, 4096);
        final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
        Thread t = new Thread() {
            public void run() {
                try {
                    stm.close();
                } catch (Throwable t) {
                    err.set(t);
                }
            }
        };
        t.start();
        LOG.info("Waiting for close to get to latch...");
        delayer.waitForCall();

        // At this point, the block is finalized on the DNs, but the file
        // has not been completed in the NN.
        // Lose the leases
        LOG.info("Killing lease checker");
        client.leasechecker.interruptAndJoin();

        FileSystem fs1 = cluster.getFileSystem();
        FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());

        LOG.info("Recovering file");
        recoverFile(fs2);

        LOG.info("Opening file for append from new fs");
        FSDataOutputStream appenderStream = fs2.append(file1);

        LOG.info("Writing some data from new appender");
        AppendTestUtil.write(appenderStream, 0, 4096);

        LOG.info("Telling old close to proceed.");
        delayer.proceed();
        LOG.info("Waiting for close to finish.");
        t.join();
        LOG.info("Close finished.");

        // We expect that close will get a "Lease mismatch"
        // error.
        Throwable thrownByClose = err.get();
        assertNotNull(thrownByClose);
        assertTrue(thrownByClose instanceof IOException);
        if (!thrownByClose.getMessage().contains("Lease mismatch"))
            throw thrownByClose;

        // The appender should be able to close properly
        appenderStream.close();
    } finally {
        cluster.shutdown();
    }
}

From source file:org.apache.hadoop.hdfs.TestFileAppend4.java

/**
 * Test for a race in appendFile where the file might get removed in between
 * the two synchronized sections./*from w  ww.ja va 2s . c o  m*/
 */
public void testAppendFileRace() throws Throwable {
    LOG.info("START");
    cluster = new MiniDFSCluster(conf, 1, true, null);
    final FileSystem fs1 = cluster.getFileSystem();
    ;

    try {
        createFile(fs1, "/testAppendFileRace", 1, BBW_SIZE);
        stm.close();

        NameNode nn = cluster.getNameNode();
        FSEditLog editLogSpy = FSImageAdapter.injectEditLogSpy(nn.getNamesystem());
        DelayAnswer delayer = new DelayAnswer();
        doAnswer(delayer).when(editLogSpy).logSync();

        final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
        Thread appender = new Thread() {
            public void run() {
                try {
                    stm = fs1.append(file1);
                } catch (Throwable t) {
                    err.set(t);
                }
            }
        };
        LOG.info("Triggering append in other thread");
        appender.start();

        LOG.info("Waiting for logsync");
        delayer.waitForCall();

        LOG.info("Resetting spy");
        reset(editLogSpy);

        LOG.info("Deleting file");
        fs1.delete(file1, true);

        LOG.info("Allowing append to proceed");
        delayer.proceed();

        LOG.info("Waiting for append to finish");

        appender.join();

        if (err.get() != null) {
            if (err.get().getMessage().contains("File does not exist.")) {
                LOG.info("Got expected exception", err.get());
            } else {
                throw err.get();
            }
        }
        LOG.info("Closing stream");
        stm.close();
    } finally {
        fs1.close();
        cluster.shutdown();
    }
}

From source file:org.apache.hadoop.hdfs.TestFileAppend4.java

/**
 * Test case where recovery starts on one node, but it's very slow
 * (delayed right after nextGenerationStamp). A second recovery attempt
 * completes while this one is being slow. Then we should reject the
 * recovery from the first one, since it has a lower gen stamp.
 *//* w w  w  .  j  a  v a2  s. c  om*/
public void testSimultaneousRecoveries() throws Exception {
    LOG.info("START");
    cluster = new MiniDFSCluster(conf, 3, true, null);
    FileSystem fs1 = cluster.getFileSystem();
    ;
    final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
    try {
        createFile(fs1, "/testSimultaneousRecoveries", 3, BBW_SIZE);
        stm.sync();
        loseLeases(fs1);

        // Make the first nextGenerationStamp call get delayed
        DelayAnswer delayer = new DelayAnswer(false);

        NameNode nn = cluster.getNameNode();
        nn.namesystem = spy(nn.namesystem);
        NameNodeAdapter.callNextGenerationStampForBlock(doAnswer(delayer).when(nn.namesystem),
                (Block) anyObject(), anyBoolean());

        final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
        Thread recoverThread = new Thread("Recovery thread") {
            public void run() {
                try {
                    recoverFile(fs2);
                } catch (Throwable t) {
                    err.set(t);
                }
            }
        };
        recoverThread.start();

        LOG.info("Waiting for first nextGenerationStamp to return");
        delayer.waitForCall();

        LOG.info("Allowing recovery time to try again");
        Thread.sleep(10000);

        LOG.info("Proceeding first recovery with old GS");
        delayer.proceed();

        LOG.info("Joining on recovery thread");
        recoverThread.join();

        LOG.info("Waiting a few seconds for blocks to get corrupted");
        Thread.sleep(5000);

        // close() should write recovered bbw to HDFS block
        assertFileSize(fs2, BBW_SIZE);
        checkFile(fs2, BBW_SIZE);
    } finally {
        fs2.close();
        fs1.close();
        cluster.shutdown();
    }
    LOG.info("STOP");
}

From source file:org.apache.hadoop.hdfs.TestFileAppend4.java

/**
 * Test for following sequence://from w w  w  . ja  va2  s  . c  o  m
 * 1. Client finishes writing a block, but does not allocate next one
 * 2. Client loses lease
 * 3. Recovery process starts, but commitBlockSynchronization not called yet
 * 4. Client calls addBlock and continues writing
 * 5. commitBlockSynchronization proceeds
 * 6. Original client tries to write/close
 */
public void testRecoveryOnBlockBoundary() throws Throwable {
    LOG.info("START");
    cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs1 = cluster.getFileSystem();
    ;
    final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());

    // Allow us to delay commitBlockSynchronization
    DelayAnswer delayer = new DelayAnswer();
    NameNode nn = cluster.getNameNode();
    nn.namesystem = spy(nn.namesystem);
    doAnswer(delayer).when(nn.namesystem).commitBlockSynchronization((Block) anyObject(), anyInt(), anyInt(),
            anyBoolean(), anyBoolean(), (DatanodeID[]) anyObject());

    try {
        file1 = new Path("/testWritingDuringRecovery.test");
        stm = fs1.create(file1, true, (int) BLOCK_SIZE * 2, (short) 3, BLOCK_SIZE);
        AppendTestUtil.write(stm, 0, (int) (BLOCK_SIZE));
        stm.sync();

        LOG.info("Losing lease");
        loseLeases(fs1);

        LOG.info("Triggering recovery in another thread");

        final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
        Thread recoverThread = new Thread() {
            public void run() {
                try {
                    recoverFile(fs2);
                } catch (Throwable t) {
                    err.set(t);
                }
            }
        };
        recoverThread.start();

        LOG.info("Waiting for recovery about to call commitBlockSynchronization");
        delayer.waitForCall();

        LOG.info("Continuing to write to stream");
        AppendTestUtil.write(stm, 0, (int) (BLOCK_SIZE));
        try {
            stm.sync();
            fail("Sync was allowed after recovery started");
        } catch (IOException ioe) {
            LOG.info("Got expected IOE trying to write to a file from the writer " + "that lost its lease",
                    ioe);
        }

        LOG.info("Written more to stream, allowing commit to proceed");
        delayer.proceed();

        LOG.info("Joining on recovery thread");
        recoverThread.join();
        if (err.get() != null) {
            throw err.get();
        }

        LOG.info("Now that recovery has finished, still expect further writes to fail.");
        try {
            AppendTestUtil.write(stm, 0, (int) (BLOCK_SIZE));
            stm.sync();
            fail("Further writes after recovery finished did not fail!");
        } catch (IOException ioe) {
            LOG.info("Got expected exception", ioe);
        }

        LOG.info("Checking that file looks good");

        // close() should write recovered only the first successful
        // writes
        assertFileSize(fs2, BLOCK_SIZE);
        checkFile(fs2, BLOCK_SIZE);
    } finally {
        try {
            fs2.close();
            fs1.close();
            cluster.shutdown();
        } catch (Throwable t) {
            LOG.warn("Didn't close down cleanly", t);
        }
    }
    LOG.info("STOP");
}

From source file:com.amazon.carbonado.repo.replicated.ReplicatedRepositoryBuilder.java

public Repository build(AtomicReference<Repository> rootRef) throws RepositoryException {
    assertReady();//from ww w  .  j  a  v a  2  s .c o m

    Repository replica, master;

    {
        boolean originalOption = mReplicaRepositoryBuilder.isMaster();
        try {
            mReplicaRepositoryBuilder.setMaster(false);
            for (TriggerFactory factory : getTriggerFactories()) {
                mReplicaRepositoryBuilder.addTriggerFactory(factory);
            }
            replica = mReplicaRepositoryBuilder.build(rootRef);
        } finally {
            mReplicaRepositoryBuilder.setMaster(originalOption);
        }
    }

    {
        // Create master using BelatedRepositoryCreator such that we can
        // start up and read from replica even if master is down.

        final boolean originalOption = mMasterRepositoryBuilder.isMaster();
        mMasterRepositoryBuilder.setMaster(mIsMaster);

        Log log = LogFactory.getLog(ReplicatedRepositoryBuilder.class);
        BelatedRepositoryCreator creator = new BelatedRepositoryCreator(log, mMasterRepositoryBuilder, rootRef,
                DEFAULT_RETRY_MILLIS) {

            @Override
            protected void createdNotification(Repository repo) {
                // Don't need builder any more so restore it.
                mMasterRepositoryBuilder.setMaster(originalOption);
            }
        };

        master = creator.get(DEFAULT_MASTER_TIMEOUT_MILLIS);
    }

    Repository repo = new ReplicatedRepository(getName(), replica, master);
    rootRef.set(repo);
    return repo;
}

From source file:com.jeremydyer.processors.salesforce.GenerateSOQL.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;/*from w ww . j  ava 2  s.  c om*/
    }

    final AtomicReference<String> query_url = new AtomicReference<>();

    session.read(flowFile, new InputStreamCallback() {
        @Override
        public void process(InputStream inputStream) throws IOException {
            String jsonString = IOUtils.toString(inputStream);
            JSONObject json = new JSONObject(jsonString);

            JSONArray fields = json.getJSONArray("fields");

            StringBuffer buffer = new StringBuffer();
            buffer.append(context.getProperty(SALESFORCE_SERVER_INSTANCE).evaluateAttributeExpressions(flowFile)
                    .getValue());
            buffer.append("/services/data/v36.0/queryAll/?q=");
            buffer.append("SELECT ");

            //Loops through the fields and builds the SOQL
            for (int i = 0; i < fields.length() - 1; i++) {
                buffer.append(fields.getJSONObject(i).getString("name"));
                buffer.append(",");
            }

            //Append the last field name
            buffer.append(fields.getJSONObject(fields.length() - 1).getString("name"));

            buffer.append(" FROM " + TABLE_NAME);
            buffer.append(" WHERE SYSTEMMODSTAMP > ");
            buffer.append(
                    context.getProperty(LAST_SYNC_TIME).evaluateAttributeExpressions(flowFile).getValue());
            buffer.append(" order by SYSTEMMODSTAMP asc");

            String soql = buffer.toString();
            //Replace all spaces with + as required by Salesforce
            soql = soql.replace(" ", "+");

            query_url.set(soql);
        }
    });

    FlowFile ff = session.putAttribute(flowFile, "SOQL_QUERY_URL", query_url.get());

    session.transfer(ff, REL_SUCCESS);
}