Example usage for org.apache.zookeeper ZooKeeper getState

List of usage examples for org.apache.zookeeper ZooKeeper getState

Introduction

In this page you can find the example usage for org.apache.zookeeper ZooKeeper getState.

Prototype

public States getState() 

Source Link

Usage

From source file:co.cask.tephra.distributed.ThriftTransactionServerTest.java

License:Apache License

private void expireZkSession(ZKClientService zkClientService) throws Exception {
    ZooKeeper zooKeeper = zkClientService.getZooKeeperSupplier().get();
    final SettableFuture<?> connectFuture = SettableFuture.create();
    Watcher watcher = new Watcher() {
        @Override// w  ww.j  av a  2s  .  c  o m
        public void process(WatchedEvent event) {
            if (event.getState() == Event.KeeperState.SyncConnected) {
                connectFuture.set(null);
            }
        }
    };

    // Create another Zookeeper session with the same sessionId so that the original one expires.
    final ZooKeeper dupZookeeper = new ZooKeeper(zkClientService.getConnectString(),
            zooKeeper.getSessionTimeout(), watcher, zooKeeper.getSessionId(), zooKeeper.getSessionPasswd());
    connectFuture.get(30, TimeUnit.SECONDS);
    Assert.assertEquals("Failed to re-create current session", dupZookeeper.getState(),
            ZooKeeper.States.CONNECTED);
    dupZookeeper.close();
}

From source file:com.bigdata.rdf.sail.webapp.HAStatusServletUtil.java

License:Open Source License

/**
 * Show the interesting things about the quorum.
 * <ol>/*w ww. ja v a2  s. c  o  m*/
 * <li>QuorumState</li>
 * <li>Who is the leader, who is a follower.</li>
 * <li>What is the SPARQL end point for each leader and follower.</li>
 * <li>Dump of the zookeeper state related to the quorum.</li>
 * <li>listServices (into pre element).</li>
 * </ol>
 * 
 * @throws IOException
 */
public void doGet(final HttpServletRequest req, final HttpServletResponse resp, final XMLBuilder.Node current)
        throws IOException {

    if (!(indexManager instanceof HAJournal))
        return;

    final HAJournal journal = (HAJournal) indexManager;

    final ZKQuorumImpl<HAGlue, ZKQuorumClient<HAGlue>> quorum = (ZKQuorumImpl) journal.getQuorum();

    // The current token.
    final long quorumToken = quorum.token();

    // The last valid token.
    final long lastValidToken = quorum.lastValidToken();

    // This token is a bit different. It is senstive to the journal role in
    // the quorum (joined or not).
    final long haReadyToken = journal.getHAReady();

    final int njoined = quorum.getJoined().length;

    /*
     * Note: This is the *local* HAGlueService.
     * 
     * This page must be robust to some new failure modes. The ZooKeeper
     * client can now be associated with an expired session, River discovery
     * can now be disabled, and the HAQuorumService might not be available
     * from quorum.getClient(). All of those things can happen if there is a
     * zookeeper session expiration that forces us to terminate the
     * HAQuorumService. This condition will be cured automatically (unless
     * the service is being shutdown), but only limited status information
     * can be provided while the HAQuorumService is not running.
     */
    final QuorumService<HAGlue> quorumService;
    {
        QuorumService<HAGlue> t;
        try {
            t = (QuorumService) quorum.getClient();
        } catch (IllegalStateException ex) {
            // Note: Not available (quorum.start() not called)./
            t = null;
        }
        quorumService = t;
    }

    final DigestEnum digestEnum;
    {
        final String str = req.getParameter(StatusServlet.DIGESTS);
        if (str == null) {
            digestEnum = null;
        } else {
            if (str.trim().isEmpty()) {
                digestEnum = StatusServlet.DEFAULT_DIGESTS;
            } else {
                digestEnum = DigestEnum.valueOf(str.trim());
            }
        }
    }

    current.node("h1", "High Availability");

    {

        final XMLBuilder.Node p = current.node("p");

        // The quorum state
        if (quorumService == null) {

            p.text("The local quorum service is not running.").node("br").close();

        } else {

            p.text("The quorum is " + (quorum.isQuorumMet() ? "" : "not") + " met.").node("br").close();

            p.text("" + njoined + " out of " + quorum.replicationFactor() + " services are joined.").node("br")
                    .close();

            p.text("quorumToken=" + quorumToken + ", lastValidToken=" + lastValidToken).node("br").close();

            p.text("logicalServiceZPath=" + quorumService.getLogicalServiceZPath()).node("br").close();

            p.text("PlatformStatsPlugIn="
                    + (journal.getPlatformStatisticsCollector() == null ? "N/A" : "Running")).node("br")
                    .close();

            p.text("GangliaPlugIn=" + (journal.getGangliaService() == null ? "N/A" : "Running")).node("br")
                    .close();

            // Note: This is the *local* value of getHAStatus().
            // Note: The HAReady token reflects whether or not the service
            // is
            // joined.
            p.text("HAStatus: " + quorumService.getService().getHAStatus() + ", HAReadyToken=" + haReadyToken)
                    .node("br").close();

            /*
             * Report on the Service.
             */
            {
                p.text("Service: serviceId=" + quorumService.getServiceId()).node("br").close();
                p.text("Service: pid=" + quorumService.getPID()).node("br").close();
                p.text("Service: path=" + quorumService.getServiceDir()).node("br").close();
                p.text("Service: proxy=" + journal.getHAJournalServer().getProxy()).node("br").close();

            }

        }

        /*
         * Report on the HA backup status (snapshot and restore policy).
         * 
         * Note: The age and commit counter for the available snapshots
         * are provided in another section (below).
         */
        {

            // snapshot policy.
            {
                final SnapshotManager mgr = journal.getSnapshotManager();
                //                    final IRootBlockView lastSnapshotRB = mgr
                //                            .getNewestSnapshot();
                //                    final long sinceCommitCounter = lastSnapshotRB == null ? -1L
                //                            : lastSnapshotRB.getCommitCounter();
                //                    final long haLogBytesOnDiskSinceLastSnapshot = mgr
                //                            .getHALogFileBytesSinceCommitCounter(sinceCommitCounter);
                final ISnapshotPolicy snapshotPolicy = mgr.getSnapshotPolicy();
                final boolean takeSnapshot = mgr.isReadyToSnapshot(snapshotPolicy.newSnapshotRequest());
                p.text("Service"//
                        + ": snapshotPolicy=" + snapshotPolicy//
                        + ", shouldSnapshot=" + takeSnapshot//
                //                            + ", lastSnapshotCommitCounter="
                //                            + sinceCommitCounter//
                //                            + ", HALogFileBytesOnDiskSinceLastSnapshot="
                //                            + haLogBytesOnDiskSinceLastSnapshot//
                ).node("br").close();
            }
            // restore policy.
            p.text("Service: restorePolicy=" + journal.getSnapshotManager().getRestorePolicy()).node("br")
                    .close();

            // HA Load Balancer.
            {

                p.text("Service: LBSPolicy=" + HALoadBalancerServlet.toString(req.getServletContext()))
                        .node("br").close();
            }
            //                if(true) {
            //                    /*
            //                     * HABackup: disable this code block. It is for
            //                     * debug purposes only.
            //                     */
            //                    p.text("Service: getEarliestRestorableCommitPoint()="
            //                            + journal.getSnapshotManager().getRestorePolicy().getEarliestRestorableCommitPoint(journal))
            //                            .node("br").close();
            //                }

        }

        /*
         * Report on the Journal.
         */
        {
            final File file = journal.getFile();
            if (file != null) {
                String digestStr = null;
                if (digestEnum != null && (digestEnum == DigestEnum.All || digestEnum == DigestEnum.Journal)) {
                    try {
                        final MessageDigest digest = MessageDigest.getInstance("MD5");
                        journal.getBufferStrategy().computeDigest(null/* snapshot */, digest);
                        digestStr = new BigInteger(1, digest.digest()).toString(16);
                    } catch (NoSuchAlgorithmException ex) {
                        // ignore
                    } catch (DigestException ex) {
                        // ignore
                    }
                }
                final IRootBlockView rb = journal.getRootBlockView();
                final long commitCounter = rb.getCommitCounter();
                //                    // Move this stuff to a TXS Status section?
                //                    long releaseTime = -1;
                //                    try {
                //                        // Note: Can throw exception if quorum is not met.
                //                        releaseTime = journal.getTransactionService()
                //                                .getReleaseTime();
                //                    } catch (QuorumException ex) {
                //                        // Ignore.
                //                    }
                final long fileSize = file == null ? 0L : file.length();
                p.text("HAJournal: file=" + file //
                        + ", commitCounter=" + commitCounter //
                        + ", nbytes=" + fileSize//
                        + (digestStr == null ? "" : ", md5=" + digestStr)//
                //                            + (releaseTime != -1L ? ", releaseTime="
                //                                    + RootBlockView.toString(releaseTime)//
                //                            : "")//
                ).node("br").close();
                // Show the current root block.
                if (debug)
                    current.node("pre", rb.toString());
            }
        }

        /**
         * Report #of files and bytes in the HALog directory.
         * 
         * @see <a
         *      href="https://sourceforge.net/apps/trac/bigdata/ticket/670">
         *      Accumulating HALog files cause latency for HA commit</a>
         */
        {
            final HALogNexus nexus = journal.getHALogNexus();
            {
                /*
                 * Use efficient index to compute the #of bytes (scan with
                 * sum) and the #of files.
                 */
                int nfiles = 0;
                long nbytes = 0L;
                final Iterator<IHALogRecord> itr = nexus.getHALogs();
                IHALogRecord r = null;
                while (itr.hasNext()) {
                    r = itr.next();
                    nbytes += r.sizeOnDisk();
                    nfiles++;
                }
                /*
                 * Add in the current HALog file (if any).
                 */
                final File currentFile = nexus.getHALogWriter().getFile();
                if (currentFile != null) {
                    nbytes += currentFile.length();
                    nfiles++;
                }
                final String compressorKey = journal.getProperties().getProperty(
                        com.bigdata.journal.Options.HALOG_COMPRESSOR,
                        com.bigdata.journal.Options.DEFAULT_HALOG_COMPRESSOR);
                p.text("HALogDir: nfiles=" + nfiles + ", nbytes=" + nbytes + ", path=" + nexus.getHALogDir()
                        + ", compressorKey=" + compressorKey + ", lastHALogClosed="
                        + (r == null ? "N/A" : CommitCounterUtility.getCommitCounterStr(r.getCommitCounter()))
                        + ", liveLog=" + (currentFile == null ? "N/A" : currentFile.getName())).node("br")
                        .close();
            }
            if (digestEnum != null && (digestEnum == DigestEnum.All || digestEnum == DigestEnum.HALogs)) {
                /*
                 * List each historical HALog file together with its digest.
                 * 
                 * Note: This can be VERY expensive.
                 */
                final Iterator<IHALogRecord> itr = nexus.getHALogs();
                while (itr.hasNext()) {
                    final IHALogRecord rec = itr.next();
                    final long nbytes = rec.sizeOnDisk();
                    final long closingCommitCounter = rec.getRootBlock().getCommitCounter();
                    String digestStr = null;
                    final File file = nexus.getHALogFile(closingCommitCounter);
                    final IHALogReader r = nexus.getHALogWriter().getReader(closingCommitCounter);
                    try {
                        if (!r.isEmpty()) {
                            try {
                                final MessageDigest digest = MessageDigest.getInstance("MD5");
                                r.computeDigest(digest);
                                digestStr = new BigInteger(1, digest.digest()).toString(16);
                            } catch (NoSuchAlgorithmException ex) {
                                // ignore
                            } catch (DigestException ex) {
                                // ignore
                            }
                        }
                    } finally {
                        r.close();
                    }
                    p.text("HALogFile: closingCommitCounter=" + closingCommitCounter//
                            + ", file=" + file//
                            + ", nbytes=" + nbytes//
                            + (digestStr == null ? "" : ", md5=" + digestStr)).node("br").close();
                }
            }
        }

        /*
         * Report #of files and bytes in the snapshot directory.
         * 
         * Note: This uses the in-memory index rather than scanning the
         * directory in order to reduce latency associated with the file
         * system.
         */
        {
            {
                //                    final File snapshotDir = journal
                //                            .getSnapshotManager().getSnapshotDir();
                //                    final File[] a = snapshotDir.listFiles(new FilenameFilter() {
                //                        @Override
                //                        public boolean accept(File dir, String name) {
                //                            return name.endsWith(SnapshotManager.SNAPSHOT_EXT);
                //                        }
                //                    });
                //                    for (File file : a) {
                //                        nbytes += file.length();
                //                        nfiles++;
                //                    }
                /*
                 * List the available snapshots (in order by increasing
                 * commitTime).
                 */
                final Iterator<ISnapshotRecord> itr = journal.getSnapshotManager().getSnapshots();
                int nfiles = 0;
                long nbytes = 0L;
                while (itr.hasNext()) {
                    final ISnapshotRecord sr = itr.next();
                    nbytes += sr.sizeOnDisk();
                    nfiles++;
                }
                p.text("SnapshotDir: nfiles=" + nfiles + ", nbytes=" + nbytes + ", path="
                        + journal.getSnapshotManager().getSnapshotDir()).node("br").close();
            }
            if (true) {

                /*
                 * List the available snapshots (in order by increasing
                 * commitTime).
                 */
                final Iterator<ISnapshotRecord> itr = journal.getSnapshotManager().getSnapshots();

                while (itr.hasNext()) {
                    final ISnapshotRecord r = itr.next();
                    final IRootBlockView rb = r.getRootBlock();
                    final long nbytes = r.sizeOnDisk();
                    //                        final File file = journal.getSnapshotManager()
                    //                                .getSnapshotFile(rb.getCommitCounter());
                    String digestStr = null;
                    if (digestEnum != null
                            && (digestEnum == DigestEnum.All || digestEnum == DigestEnum.Snapshots)) {
                        try {
                            final MessageDigest digest = MessageDigest.getInstance("MD5");
                            journal.getSnapshotManager().getDigest(rb.getCommitCounter(), digest);
                            digestStr = new BigInteger(1, digest.digest()).toString(16);
                        } catch (NoSuchAlgorithmException ex) {
                            // ignore
                        } catch (DigestException ex) {
                            // ignore
                        }
                    }

                    p.text("SnapshotFile: commitTime=" + RootBlockView.toString(rb.getLastCommitTime())
                            + ", commitCounter=" + rb.getCommitCounter() + ", nbytes=" + nbytes
                            + (digestStr == null ? "" : ", md5=" + digestStr)).node("br").close();

                }

            }

        }

        /*
         * If requested, conditionally start a snapshot.
         */
        {
            final String val = req.getParameter(StatusServlet.SNAPSHOT);

            if (val != null) {

                /*
                 * Attempt to interpret the parameter as a percentage
                 * (expressed as an integer).
                 * 
                 * Note: The default threshold will trigger a snapshot
                 * regardless of the size of the journal and the #of HALog
                 * files. A non-default value of 100 will trigger the
                 * snapshot if the HALog files occupy as much space on the
                 * disk as the Journal. Other values may be used as
                 * appropriate.
                 */
                int percentLogSize = 0;
                try {
                    percentLogSize = Integer.parseInt(val);
                } catch (NumberFormatException ex) {
                    // ignore.
                }

                journal.getSnapshotManager().takeSnapshot(new HASnapshotRequest(percentLogSize));

            }

        }

        /*
         * Report if a snapshot is currently running.
         */
        if (journal.getSnapshotManager().getSnapshotFuture() != null) {

            p.text("Snapshot running.").node("br").close();

        }

        p.close();

        if (debug)
            current.node("pre", quorum.toString());

    }

    /**
     * If requested, conditionally REBUILD the service from the leader
     * (disaster recover).
     * 
     * FIXME This should only be triggered by a POST (it is modestly safe
     * since a REBUILD can not be triggered if the service is joined, at the
     * same commit point as the leader, or already running, but it is not so
     * safe that you should be able to use a GET to demand a REBUILD).
     */
    if (quorumService != null) {

        final String val = req.getParameter(HAStatusServletUtil.REBUILD);

        if (val != null) {

            // Local HAGlue interface for this service (not proxy).
            final HAGlue haGlue = quorumService.getService();

            // Request RESTORE.
            if (haGlue.rebuildFromLeader(new HARemoteRebuildRequest()) != null) {

                current.node("h2", "Running Disaster Recovery for this service (REBUILD).");

            }

        }

    }

    if (quorumService != null) {

        /*
         * Force the service into the "ERROR" state. It will automatically
         * attempt to recover.
         */

        final String val = req.getParameter(HAStatusServletUtil.ERROR);

        if (val != null) {

            quorumService.enterErrorState();

        }

    }

    /*
     * Display the NSS port, host, and leader/follower/not-joined
     * status for each service in the quorum.
     */
    if (quorumService != null) {

        current.node("h2", "Quorum Services");

        {

            final XMLBuilder.Node p = current.node("p");

            final UUID[] joined = quorum.getJoined();

            final UUID[] pipeline = quorum.getPipeline();

            // In pipeline order.
            for (UUID serviceId : pipeline) {

                final HAGlue remoteService;
                try {

                    remoteService = quorumService.getService(serviceId);

                } catch (RuntimeException ex) {

                    /*
                     * Ignore. Might not be an HAGlue instance.
                     */

                    if (log.isInfoEnabled())
                        log.info(ex, ex);

                    continue;

                }

                /*
                 * Do all RMIs to the remote service in a try/catch. This
                 * allows us to catch problems with communications to the
                 * remote service and continue to paint the page.
                 */
                final String hostname;
                final int nssPort;
                final InetSocketAddress writePipelineAddr;
                final String extendedRunState;
                try {

                    hostname = remoteService.getHostname();

                    /*
                     * TODO When there are multiple ethernet interfaces, is
                     * not necessarily reporting the interface(s) that the
                     * port is exposed to.
                     */
                    nssPort = remoteService.getNSSPort();

                    // address where the downstream service will listen.
                    writePipelineAddr = remoteService.getWritePipelineAddr();

                    // The AbstractServer and HAQuorumService run states.
                    extendedRunState = remoteService.getExtendedRunState();

                } catch (IOException ex) {

                    /*
                     * Note error and continue with the next service.
                     */

                    p.text("Unable to reach service: " + remoteService).close();

                    log.error(ex, ex);

                    continue;

                }

                final boolean isLeader = serviceId.equals(quorum.getLeaderId());

                final boolean isFollower = indexOf(serviceId, joined) > 0;

                final boolean isSelf = serviceId.equals(quorumService.getServiceId());

                final int pipelineIndex = indexOf(serviceId, pipeline);

                /*
                 * TODO This assumes that the context path at the remote
                 * service is the same as the context path for the local
                 * service.
                 */
                final String nssUrl = "http://" + hostname + ":" + nssPort + BigdataStatics.getContextPath();

                // hyper link to NSS service.
                p.node("a").attr("href", nssUrl).text(nssUrl).close();

                // plus the other metadata.
                p.text(" : "//
                        + (isLeader ? "leader" : (isFollower ? "follower" : " is not joined"))//
                        + ", pipelineOrder=" + (pipelineIndex == -1 ? "N/A" : pipelineIndex)//
                        + ", writePipelineAddr=" + writePipelineAddr//
                        + ", service=" + (isSelf ? "self" : "other")//
                        + ", extendedRunState=" + extendedRunState//
                ).node("br").close();
            }

            p.close();

        }

        // DumpZookeeper
        {

            current.node("h2", "Zookeeper");

            ZooKeeper zk;
            try {
                zk = quorum.getZookeeper();
            } catch (InterruptedException e1) {
                // Continue, but ignore zookeeper.
                zk = null;
            }

            if (zk == null || !zk.getState().isAlive()) {

                final XMLBuilder.Node p = current.node("p");

                p.text("ZooKeeper is not available.").close();

            } else {

                // final XMLBuilder.Node section = current.node("pre");
                // flush writer before writing on PrintStream.
                current.getBuilder().getWriter().flush();

                // dump onto the response.
                final PrintWriter out = new PrintWriter(resp.getOutputStream(), true/* autoFlush */);

                out.print("<pre>\n");

                try {

                    final DumpZookeeper dump = new DumpZookeeper(zk);

                    dump.dump(out, true/* showDatatrue */, quorumService.getLogicalServiceZPath()/* zpath */,
                            0/* depth */);

                } catch (InterruptedException e) {

                    e.printStackTrace(out);

                } catch (KeeperException e) {

                    e.printStackTrace(out);

                }

                // close section.
                out.print("\n</pre>");

                // flush PrintWriter before resuming writes on Writer.
                out.flush();

            }

        }

    }

}

From source file:com.bigdata.zookeeper.AbstractZooTestCase.java

License:Open Source License

/**
 * Return a new {@link ZooKeeper} instance that is connected to the same
 * zookeeper ensemble as the given instance and is using the same session
 * but is nevertheless a distinct instance.
 * <p>//from   w  w  w .j  a  v  a 2  s . c o m
 * Note: This is used by some unit tests to force the given
 * {@link ZooKeeper} to report a {@link SessionExpiredException} by closing
 * the returned instance.
 * 
 * @param zookeeper
 *            A zookeeper instance.
 * 
 * @return A distinct instance associated with the same session.
 * 
 * @throws IOException
 * @throws InterruptedException
 */
protected ZooKeeper getDistinctZooKeeperForSameSession(final ZooKeeper zookeeper1)
        throws IOException, InterruptedException {

    final ZooKeeper zookeeper2 = new ZooKeeper(zookeeperAccessor.hosts, zookeeperAccessor.sessionTimeout,
            new Watcher() {
                public void process(WatchedEvent e) {

                }
            }, zookeeper1.getSessionId(), zookeeper1.getSessionPasswd());

    /*
     * Wait until this instance is connected.
     */
    final long timeout = TimeUnit.MILLISECONDS.toNanos(1000/* ms */);

    final long begin = System.nanoTime();

    while (zookeeper2.getState() != ZooKeeper.States.CONNECTED && zookeeper2.getState().isAlive()) {

        final long elapsed = System.nanoTime() - begin;

        if (elapsed > timeout) {

            fail("ZooKeeper session did not connect? elapsed=" + TimeUnit.NANOSECONDS.toMillis(elapsed));

        }

        if (log.isInfoEnabled()) {

            log.info("Awaiting connected.");

        }

        Thread.sleep(100/* ms */);

    }

    if (!zookeeper2.getState().isAlive()) {

        fail("Zookeeper died?");

    }

    if (log.isInfoEnabled())
        log.info("Zookeeper connected.");

    return zookeeper2;

}

From source file:com.bigdata.zookeeper.AbstractZooTestCase.java

License:Open Source License

/**
 * Expires the session associated with the {@link Zookeeper} client
 * instance./*from w  w  w.ja va2 s  .  c  o  m*/
 * 
 * @param zookeeper
 * 
 * @throws IOException
 * @throws InterruptedException
 */
protected void expireSession(ZooKeeper zookeeper) throws IOException, InterruptedException {

    /*
     * Obtain a distinct ZooKeeper instance associated with the _same_
     * session.
     */
    final ZooKeeper zookeeper2 = getDistinctZooKeeperForSameSession(zookeeper);

    /*
     * Close this instance, forcing the original instance to report a
     * SessionExpiredException. Note that this is not synchronous so we need
     * to wait until the original ZooKeeper instance notices that its
     * session is expired.
     */
    zookeeper2.close();

    /*
     * Wait up to the session timeout and then wait some more so that the
     * events triggered by that timeout have time to propagate.
     */
    final long timeout = TimeUnit.MILLISECONDS.toNanos(sessionTimeout * 2);

    final long begin = System.nanoTime();

    while (zookeeper.getState().isAlive()) {

        final long elapsed = System.nanoTime() - begin;

        if (elapsed > timeout) {

            fail("ZooKeeper session did not expire? elapsed=" + TimeUnit.NANOSECONDS.toMillis(elapsed)
                    + ", sessionTimeout=" + sessionTimeout);

        }

        if (log.isInfoEnabled()) {

            log.info("Awaiting session expired.");

        }

        Thread.sleep(500/* ms */);

    }

    if (log.isInfoEnabled()) {

        final long elapsed = System.nanoTime() - begin;

        log.info("Session was expired: elapsed=" + TimeUnit.NANOSECONDS.toMillis(elapsed) + ", sessionTimeout="
                + sessionTimeout);

    }

}

From source file:com.bigdata.zookeeper.TestZookeeperAccessor.java

License:Open Source License

public void test_handleExpiredSession() throws InterruptedException {

    final ZooKeeperAccessor accessor = new ZooKeeperAccessor("localhost:" + clientPort, sessionTimeout);

    assertTrue(accessor.awaitZookeeperConnected(4000, TimeUnit.MILLISECONDS));

    ZooKeeper zookeeper = accessor.getZookeeper();

    assertTrue(zookeeper.getState().isAlive());

    // close the existing instance.
    zookeeper.close();//  w  w  w  .  ja  v  a2s.  c  om

    assertFalse(zookeeper.getState().isAlive());

    /*
     * Now obtain a new session.
     */

    assertTrue(accessor.awaitZookeeperConnected(4000, TimeUnit.MILLISECONDS));

    zookeeper = accessor.getZookeeper();

    assertTrue(zookeeper.getState().isAlive());

}

From source file:com.bigdata.zookeeper.TestZookeeperSessionSemantics.java

License:Open Source License

public void test_handleExpiredSession() throws InterruptedException, KeeperException, IOException {

    final String hosts = "localhost:" + clientPort;

    final Lock lock = new ReentrantLock();
    final Condition expireCond = lock.newCondition();
    final Condition connectCond = lock.newCondition();
    final Condition disconnectCond = lock.newCondition();
    final AtomicBoolean didExpire = new AtomicBoolean(false);
    final AtomicBoolean didDisconnect = new AtomicBoolean(false);

    /*//  w  w  w . j  ava 2 s  .  c  om
     * Start an instance and run until it gets an assigned sessionId.
     */
    {
        final ZooKeeper zk1a = new ZooKeeper(hosts, requestedSessionTimeout, new Watcher() {
            @Override
            public void process(WatchedEvent event) {
                log.warn(event);
            }
        });
        int i = 0;
        while (i < 10) {
            boolean done = false;
            if (zk1a.getState() == ZooKeeper.States.CONNECTED) {
                done = true;
            }
            log.info("zk.getState()=" + zk1a.getState() + ", zk.getSessionId()=" + zk1a.getSessionId());
            if (done)
                break;
            Thread.sleep(500);
            i++;
        }
        if (zk1a.getState() != ZooKeeper.States.CONNECTED) {
            fail("Did not connect.");
        }
        zk1a.close();
    }

    final ZooKeeper zk1 = new ZooKeeper(hosts, requestedSessionTimeout, new Watcher() {
        /**
         * Note: The default watcher will not receive any events
         * after a session expire. A {@link Zookeeper#close()}
         * causes an immediate session expire. Thus, no events
         * (include the session expire) will be received after a
         * close().
         */
        @Override
        public void process(final WatchedEvent event) {
            log.warn(event);
            switch (event.getState()) {
            case AuthFailed:
                break;
            case Disconnected:
                lock.lock();
                try {
                    didDisconnect.set(true);
                    disconnectCond.signalAll();
                } finally {
                    lock.unlock();
                }
                break;
            case Expired:
                lock.lock();
                try {
                    didExpire.set(true);
                    expireCond.signalAll();
                } finally {
                    lock.unlock();
                }
                break;
            //                        case ConnectedReadOnly: // not in 3.3.3
            //                            break;
            //                        case NoSyncConnected: // not in 3.3.3
            //                            break;
            //                        case SaslAuthenticated: // not in 3.3.3
            //                            break;
            case SyncConnected:
                lock.lock();
                try {
                    connectCond.signalAll();
                } finally {
                    lock.unlock();
                }
                break;
            case Unknown:
                break;
            }

        }
    });

    /*
     * Note: You can not obtain the negotiated session timeout until the
     * zookeeper client has connected to a zookeeper service (or rather,
     * it will return ZERO until it is connected).
     */
    final int negotiatedSessionTimeout;
    lock.lock();
    try {
        log.info("Waiting zk connected.");
        connectCond.await(10, TimeUnit.SECONDS);
        negotiatedSessionTimeout = zk1.getSessionTimeout();
        if (log.isInfoEnabled())
            log.info("Negotiated sessionTimeout=" + negotiatedSessionTimeout);
        assertNotSame(0, negotiatedSessionTimeout);
        assertTrue(negotiatedSessionTimeout > 0);
    } finally {
        lock.unlock();
    }

    assertTrue(zk1.getState().isAlive());

    assertFalse(didDisconnect.get());

    assertFalse(didExpire.get());

    // clear out test znodes.
    destroyZNodes(zk1, "/test");

    // ensure root /test znode exists.
    try {
        zk1.create("/test", new byte[] {}, acl, CreateMode.PERSISTENT);
    } catch (KeeperException.NodeExistsException ex) {
        log.warn("Ignoring: " + ex);
    }

    // look at that znode, establishing a watcher.
    zk1.getData("/test", true/* watch */, null/* stat */);

    // update the znode's data.
    zk1.setData("/test", new byte[] { 1 }, -1/* version */);

    // create an ephemeral sequential znode that is a child of /test.
    final String foozpath = zk1.create("/test/foo", new byte[] {}, acl, CreateMode.EPHEMERAL_SEQUENTIAL);

    // create a 2nd ephemeral sequential znode that is a child of /test.
    final String foozpath2 = zk1.create("/test/foo", new byte[] {}, acl, CreateMode.EPHEMERAL_SEQUENTIAL);

    /*
     * Look at that znode, establishing a watcher.
     * 
     * Note: We appear to see node deleted events for the ephemeral znodes
     * if the client connection is closed, but the state is still reported
     * as SyncConnected rather than SessionExpired.
     * 
     * Note: If we do not establish a watcher for an ephemeral znode, then
     * we DO NOT see an node deleted event when the client is closed!
     */
    zk1.getData(foozpath, true/* watch */, null/* stat */);
    //        zk1.getData(foozpath2, true/* watch */, null/* stat */);

    ////      close the existing instance.
    //        log.info("Closing ZK client");
    //        zk1.close();

    //        log.fatal("killing local zookeeper service.");
    //        killZKServer();
    //        Thread.sleep(5000);
    //        fail("done");

    if (false) {
        log.info("Spin loop awaiting !isAlive() for client.");
        final long begin = System.currentTimeMillis();
        while (zk1.getState().isAlive()) {
            log.info("zk.getState()=" + zk1.getState() + ", zk.getSessionId()=" + zk1.getSessionId());
            final long elapsed = System.currentTimeMillis() - begin;
            if (elapsed > 60000 * 2)
                fail("Client still isAlive().");
            Thread.sleep(1000);
        }
        log.info("Continuing");
    }

    if (true) {
        log.error("KILL ZOOKEEPER.");
        Thread.sleep(5000);
        log.info("Spin loop on ephemeral znode getData() for client.");
        while (true) {
            try {
                zk1.getData(foozpath, true/* watch */, null/* stat */);
            } catch (KeeperException ex) {
                log.error(ex, ex);
                Thread.sleep(1000);
                continue;
            }
            log.info("zk.getState()=" + zk1.getState() + ", zk.getSessionId()=" + zk1.getSessionId());
            break;
            //                final long elapsed = System.currentTimeMillis() - begin;
            //                if (elapsed > 60000 * 2)
            //                    fail("Client still isAlive().");
            //                Thread.sleep(1000);
        }
        log.info("Continuing");
        final byte[] a = zk1.getData(foozpath, true/* watch */, null/* stat */);
        assertTrue("Expected " + Arrays.toString(new byte[] { 1 }) + ", not " + Arrays.toString(a),
                BytesUtil.bytesEqual(new byte[] { 1 }, a));
    }

    // // The disconnect event should be immediate.
    // lock.lock();
    // try {
    // disconnectCond.await(100, TimeUnit.MILLISECONDS);
    // } finally {
    // lock.unlock();
    // }
    //
    // assertTrue(didDisconnect.get());

    assertFalse(didDisconnect.get());
    assertFalse(didExpire.get());

    assertFalse(zk1.getState().isAlive());

    /*
     * Wait up to a little more than the negotiated session timeout for the
     * session to be expired.
     */
    lock.lock();
    try {
        // Attempt to get the znode again.
        new Thread(new Runnable() {
            public void run() {
                try {
                    final byte[] tmp = zk1.getData("/test", true/* watch */, null/* stat */);
                } catch (KeeperException e) {
                    log.error(e, e);
                } catch (InterruptedException e) {
                    log.error(e, e);
                }
            }
        }).start();
        expireCond.await(negotiatedSessionTimeout + 10000, TimeUnit.MILLISECONDS);
        /*
         * Note: No events are ever delivered to the watcher with
         * KeeperStates:=SessionExpired. This appears to be a design
         * decision.
         */
        assertFalse(didExpire.get());
    } finally {
        lock.unlock();
    }

    /*
     * Now obtain a new session.
     */
    {
        log.warn("Starting new ZK connection");
        final ZooKeeper zk2 = new ZooKeeper(hosts, requestedSessionTimeout, new Watcher() {

            @Override
            public void process(WatchedEvent event) {
                log.warn(event);
            }
        });

        assertTrue(zk2.getState().isAlive());

    }

}

From source file:com.deem.zkui.utils.ServletUtil.java

License:Open Source License

public ZooKeeper getZookeeper(HttpServletRequest request, HttpServletResponse response, String zkServer,
        Properties globalProps) {
    try {//from  w ww .  j  a  v a 2  s.  co  m

        HttpSession session = request.getSession();
        ZooKeeper zk = (ZooKeeper) session.getAttribute("zk");
        if (zk == null || zk.getState() != ZooKeeper.States.CONNECTED) {
            Integer zkSessionTimeout = Integer.parseInt(globalProps.getProperty("zkSessionTimeout"));
            //Converting seconds to ms.
            zkSessionTimeout = zkSessionTimeout * 1000;
            zk = ZooKeeperUtil.INSTANCE.createZKConnection(zkServer, zkSessionTimeout);
            ZooKeeperUtil.INSTANCE.setDefaultAcl(globalProps.getProperty("defaultAcl"));
            if (zk.getState() != ZooKeeper.States.CONNECTED) {
                session.setAttribute("zk", null);
            } else {
                session.setAttribute("zk", zk);
            }

        }
        return zk;
    } catch (IOException | InterruptedException ex) {
        logger.error(Arrays.toString(ex.getStackTrace()));
    }
    return null;
}

From source file:com.deem.zkui.utils.ZooKeeperUtil.java

License:Open Source License

public ZooKeeper createZKConnection(String url, Integer zkSessionTimeout)
        throws IOException, InterruptedException {
    Integer connectAttempt = 0;//from  ww w  .  jav a2  s. c  o  m
    ZooKeeper zk = new ZooKeeper(url, zkSessionTimeout, new Watcher() {
        @Override
        public void process(WatchedEvent event) {
            logger.trace("Connecting to ZK.");
        }
    });
    //Wait till connection is established.
    while (zk.getState() != ZooKeeper.States.CONNECTED) {
        Thread.sleep(30);
        connectAttempt++;
        if (connectAttempt == MAX_CONNECT_ATTEMPT) {
            break;
        }
    }
    return zk;

}

From source file:com.doctor.zookeeper.api.Practice.java

License:Apache License

public static void main(String[] args) throws IOException {
    String connectString = "127.0.0.1:2181,127.0.0.1:2182,127.0.0.1:2183";
    ZooKeeper zooKeeper = new ZooKeeper(connectString, 5000, new Practice());

    System.out.println("zooKeeper connected state " + zooKeeper.getState());
    try {//  w w w .j  a v a  2  s. c om
        countDownLatch.await();
    } catch (InterruptedException e) {

        e.printStackTrace();
    }
}

From source file:com.github.mosuka.zookeeper.nicli.util.ZooKeeperConnectionTest.java

License:Apache License

@Test
public void testGetZooKeeper() {
    ZooKeeper zk = zkConnection.getZooKeeper();

    String name = zk.getClass().getName();
    assertEquals("org.apache.zookeeper.ZooKeeper", name);

    boolean alive = zk.getState().isAlive();
    assertEquals(true, alive);//from w  ww  . j  a  v  a 2  s . c  o m
}