Example usage for org.apache.zookeeper KeeperException.NoNodeException code

List of usage examples for org.apache.zookeeper KeeperException.NoNodeException code

Introduction

In this page you can find the example usage for org.apache.zookeeper KeeperException.NoNodeException code.

Prototype

Code code

To view the source code for org.apache.zookeeper KeeperException.NoNodeException code.

Click Source Link

Usage

From source file:org.apache.solr.cloud.ZkController.java

License:Apache License

private void createEphemeralLiveNode() throws KeeperException, InterruptedException {
    String nodeName = getNodeName();
    String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName;
    log.info("Register node as live in ZooKeeper:" + nodePath);

    try {//from www.ja  va2 s.  c o  m
        boolean nodeDeleted = true;
        try {
            // we attempt a delete in the case of a quick server bounce -
            // if there was not a graceful shutdown, the node may exist
            // until expiration timeout - so a node won't be created here because
            // it exists, but eventually the node will be removed. So delete
            // in case it exists and create a new node.
            zkClient.delete(nodePath, -1, true);
        } catch (KeeperException.NoNodeException e) {
            // fine if there is nothing to delete
            // TODO: annoying that ZK logs a warning on us
            nodeDeleted = false;
        }
        if (nodeDeleted) {
            log.info("Found a previous node that still exists while trying to register a new live node "
                    + nodePath + " - removing existing node to create another.");
        }
        zkClient.makePath(nodePath, CreateMode.EPHEMERAL, true);
    } catch (KeeperException e) {
        // its okay if the node already exists
        if (e.code() != KeeperException.Code.NODEEXISTS) {
            throw e;
        }
    }
}

From source file:org.springframework.integration.x.kafka.KafkaPartitionAllocator.java

License:Apache License

@Override
public void onApplicationEvent(ContextClosedEvent event) {
    CuratorFrameworkState state = client.getState();
    if (state.equals(STARTED)) {
        try {//from  w  w w.ja v a2  s. c  om
            byte[] streamStatusData;
            try {
                streamStatusData = client.getData().forPath(String.format(STREAM_PATH_PATTERN, streamName));
            } catch (KeeperException.NoNodeException e) {
                // we ignore this error - the stream path does not exist, so it may have been removed already
                // we'll behave as if we have received no data
                streamStatusData = null;
            }
            // use the stored values in Zookeeper directoy, so as to we do not have a dependency on spring-xd-dirt
            String deploymentStatus;
            if (streamStatusData == null || streamStatusData.length == 0) {
                deploymentStatus = STREAM_STATUS_UNDEPLOYED;
            } else {
                Map<String, String> statusDataAsMap = objectMapper.reader(Map.class)
                        .readValue(streamStatusData);
                deploymentStatus = statusDataAsMap.get("state");
            }
            if (STREAM_STATUS_UNDEPLOYED.equals(deploymentStatus)
                    || STREAM_STATUS_UNDEPLOYING.equals(deploymentStatus)) {
                // remove partitioning data from Kafka
                if (client.checkExists().forPath(getDataPath()) != null) {
                    try {
                        client.delete().deletingChildrenIfNeeded().forPath(getDataPath());
                    } catch (KeeperException.NoNodeException e) {
                        if (KeeperException.Code.NONODE.equals(e.code())) {
                            // ignore, most likely someone else has deleted the path already
                        } else {
                            // this actually was an exception : we cannot recover, but cannot stop either
                            log.error("Irrecoverable error while trying to clean partitions table:", e);
                        }
                    }
                }
                // also, remove offsets
                for (Partition partition : partitions) {
                    if (log.isDebugEnabled()) {
                        log.debug("Deleting offsets for " + partition.toString());
                    }
                    offsetManager.deleteOffset(partition);
                    try {
                        offsetManager.flush();
                    } catch (IOException e) {
                        log.error("Error while trying to flush offsets: " + e);
                    }
                }

            }
        } catch (Exception e) {
            log.error(e);
        }
    } else {
        log.warn("Could not check the stream state and perform cleanup, client state was " + state);
    }
}