Example usage for org.apache.solr.common.cloud ZkCoreNodeProps ZkCoreNodeProps

List of usage examples for org.apache.solr.common.cloud ZkCoreNodeProps ZkCoreNodeProps

Introduction

In this page you can find the example usage for org.apache.solr.common.cloud ZkCoreNodeProps ZkCoreNodeProps.

Prototype

public ZkCoreNodeProps(ZkNodeProps nodeProps) 

Source Link

Usage

From source file:de.qaware.chronix.storage.solr.ChronixSolrCloudStorage.java

License:Apache License

/**
 * Returns the list of shards of the default collection.
 *
 * @param zkHost            ZooKeeper URL
 * @param chronixCollection Solr collection name for chronix time series data
 * @return the list of shards of the default collection
 *///from w  w w. j  ava 2s  .c om
public List<String> getShardList(String zkHost, String chronixCollection) throws IOException {

    CloudSolrClient cloudSolrClient = new CloudSolrClient(zkHost);
    List<String> shards = new ArrayList<>();

    try {
        cloudSolrClient.connect();

        ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();

        ClusterState clusterState = zkStateReader.getClusterState();

        String[] collections;
        if (clusterState.hasCollection(chronixCollection)) {
            collections = new String[] { chronixCollection };
        } else {
            // might be a collection alias?
            Aliases aliases = zkStateReader.getAliases();
            String aliasedCollections = aliases.getCollectionAlias(chronixCollection);
            if (aliasedCollections == null)
                throw new IllegalArgumentException("Collection " + chronixCollection + " not found!");
            collections = aliasedCollections.split(",");
        }

        Set<String> liveNodes = clusterState.getLiveNodes();
        Random random = new Random(5150);

        for (String coll : collections) {
            for (Slice slice : clusterState.getSlices(coll)) {
                List<String> replicas = new ArrayList<>();
                for (Replica r : slice.getReplicas()) {
                    if (r.getState().equals(Replica.State.ACTIVE)) {
                        ZkCoreNodeProps replicaCoreProps = new ZkCoreNodeProps(r);
                        if (liveNodes.contains(replicaCoreProps.getNodeName()))
                            replicas.add(replicaCoreProps.getCoreUrl());
                    }
                }
                int numReplicas = replicas.size();
                if (numReplicas == 0)
                    throw new IllegalStateException("Shard " + slice.getName() + " in collection " + coll
                            + " does not have any active replicas!");

                String replicaUrl = (numReplicas == 1) ? replicas.get(0)
                        : replicas.get(random.nextInt(replicas.size()));
                shards.add(replicaUrl);
            }
        }
    } finally {
        cloudSolrClient.close();
    }

    return shards;
}

From source file:org.opencommercesearch.CloudSearchServer.java

License:Apache License

/**
 * Reloads the core//from   ww w.ja v a  2s .  com
 *
 * @param collectionName
 *            the cored to be reloaded
 *
 * @throws SearchServerException
 *          if an error occurs while reloading the core
 *
 */
public void reloadCollection(String collectionName, Locale locale) throws SearchServerException {
    CoreAdminRequest adminRequest = new CoreAdminRequest();
    adminRequest.setCoreName(collectionName);
    adminRequest.setAction(CoreAdminAction.RELOAD);

    CloudSolrServer server = getSolrServer(collectionName, locale);
    ZkStateReader zkStateReader = server.getZkStateReader();
    if (zkStateReader == null) {
        //if the zkStateReader is null it means we haven't connect to this collection
        server.connect();
        zkStateReader = server.getZkStateReader();
    }

    ClusterState clusterState = zkStateReader.getClusterState();
    Set<String> liveNodes = clusterState.getLiveNodes();

    if (liveNodes == null || liveNodes.size() == 0) {
        if (isLoggingInfo()) {
            logInfo("No live nodes found, 0 cores were reloaded");
        }
        return;
    }

    Map<String, Slice> slices = clusterState.getSlicesMap(collectionName);
    if (slices.size() == 0) {
        if (isLoggingInfo()) {
            logInfo("No slices found, 0 cores were reloaded");
        }
    }

    for (Slice slice : slices.values()) {
        for (ZkNodeProps nodeProps : slice.getReplicas()) {
            ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
            String node = coreNodeProps.getNodeName();
            if (!liveNodes.contains(coreNodeProps.getNodeName())
                    || !coreNodeProps.getState().equals(ZkStateReader.ACTIVE)) {
                if (isLoggingInfo()) {
                    logInfo("Node " + node + " is not live, unable to reload core " + collectionName);
                }
                continue;
            }

            if (isLoggingInfo()) {
                logInfo("Reloading core " + collectionName + " on " + node);
            }
            HttpClient httpClient = server.getLbServer().getHttpClient();
            HttpSolrServer nodeServer = new HttpSolrServer(coreNodeProps.getBaseUrl(), httpClient,
                    getResponseParser());
            try {
                CoreAdminResponse adminResponse = adminRequest.process(nodeServer);
                if (isLoggingInfo()) {
                    logInfo("Reladed core " + collectionName + ", current status is "
                            + adminResponse.getCoreStatus());
                }
            } catch (SolrServerException ex) {
                if (ex.getCause() instanceof SocketTimeoutException) {
                    //if we experience a socket timeout out don't kill the entire process. Try to reload the other nodes
                    if (isLoggingError()) {
                        logError("Reloading core failed due to socket timeout for node [" + node
                                + "] and collection [" + collectionName + "]");
                    }
                } else {
                    throw create(CORE_RELOAD_EXCEPTION, ex);
                }
            } catch (IOException ex) {
                throw create(CORE_RELOAD_EXCEPTION, ex);
            }
        }
    }
}

From source file:uk.bl.wa.apache.solr.hadoop.ZooKeeperInspector.java

License:Apache License

public List<List<String>> extractShardUrls(String zkHost, String collection) {

    DocCollection docCollection = extractDocCollection(zkHost, collection);
    List<Slice> slices = getSortedSlices(docCollection.getSlices());
    List<List<String>> solrUrls = new ArrayList<List<String>>(slices.size());
    for (Slice slice : slices) {
        if (slice.getLeader() == null) {
            throw new IllegalArgumentException("Cannot find SolrCloud slice leader. "
                    + "It looks like not all of your shards are registered in ZooKeeper yet");
        }/*from w w  w.  ja  v a2s  .  c om*/
        Collection<Replica> replicas = slice.getReplicas();
        List<String> urls = new ArrayList<String>(replicas.size());
        for (Replica replica : replicas) {
            ZkCoreNodeProps props = new ZkCoreNodeProps(replica);
            urls.add(props.getCoreUrl());
        }
        solrUrls.add(urls);
    }
    return solrUrls;
}