Example usage for org.apache.solr.common.cloud ZkStateReader getClusterState

List of usage examples for org.apache.solr.common.cloud ZkStateReader getClusterState

Introduction

In this page you can find the example usage for org.apache.solr.common.cloud ZkStateReader getClusterState.

Prototype

public ClusterState getClusterState() 

Source Link

Usage

From source file:com.doculibre.constellio.solr.context.SolrCoreContext.java

License:Open Source License

public static synchronized void initCores() {
    try {/*from w  w  w.ja  v a  2  s  .  c o m*/

        // do not use CoreAdminRequest
        Set<String> collectionNameSet = new HashSet<String>();
        mainSolrServer.connect();
        ZkStateReader reader = mainSolrServer.getZkStateReader();
        ClusterState state = reader.getClusterState();

        // do synchronization between coreServers and solrCloud
        for (String collectionName : state.getCollections()) {
            if (!collectionName.startsWith("_") || DEFAULT_COLLECTION_NAME.equals(collectionName)) {
                // "_xxx" is for system only, not for users
                collectionNameSet.add(collectionName);
            }
        }

        Map<String, String> aliasMap = reader.getAliases().getCollectionAliasMap();
        if (aliasMap != null) {
            for (String aliasName : aliasMap.keySet()) {
                if (!aliasName.startsWith("_")) {
                    // "_xxx" is for system only, not for users
                    collectionNameSet.remove(aliasMap.get(aliasName));
                    collectionNameSet.add(aliasName);
                }
            }
        }

        for (String collectionName : collectionNameSet) {
            if (!coreServers.containsKey(collectionName)) {
                setHttpSolrServer(collectionName, ConstellioSpringUtils.getSolrServerAddress());
            }
        }

        userCoreNames.clear();
        Iterator<Map.Entry<String, SolrServer>> iter = coreServers.entrySet().iterator();
        while (iter.hasNext()) {
            Map.Entry<String, SolrServer> entry = iter.next();
            if (!collectionNameSet.contains(entry.getKey())) {
                entry.getValue().shutdown();
                iter.remove();
            } else if (!DEFAULT_COLLECTION_NAME.equals(entry.getKey())) {
                userCoreNames.add(entry.getKey());
            }
        }

        // CoreAdminRequest adminRequest = new CoreAdminRequest();
        // adminRequest.setAction(CoreAdminAction.STATUS);
        // CoreAdminResponse adminResponse =
        // adminRequest.process(solrServer);
        // NamedList<NamedList<Object>> coreStatus =
        // adminResponse.getCoreStatus();
        // for (Object core : coreStatus) {
        // String coreName = StringUtils.substringBefore(core.toString(),
        // "=");
        // if (!coreName.startsWith("_"))// "_xxx" is for system only, like
        // "_log", "_fetch_db"
        // setHttpSolrServer(coreName, ((HttpSolrServer)
        // solrServer).getBaseURL());
        // }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:com.doculibre.constellio.solr.context.SolrLogContext.java

License:Open Source License

private static synchronized void initCores() {
    //      try {

    // do not use CoreAdminRequest
    mainSolrServer.connect();// w  w  w.j av a 2s.  com
    ZkStateReader reader = mainSolrServer.getZkStateReader();
    ClusterState state = reader.getClusterState();

    // since we only read log collections, we do not do the synchronization
    Set<String> collectionNames = state.getCollections();
    for (String collectionName : LOG_CORE_NAMES) {
        if (!collectionNames.contains(collectionName)) {
            int numReplicationFactor = ConstellioSpringUtils.getSolrReplicationFactor();
            // 1 shard is ok
            SolrServicesImpl.createCollectionInCloud(collectionName, collectionName, 1, numReplicationFactor);
        }
        setHttpSolrServer(collectionName, ConstellioSpringUtils.getSolrServerAddress());
    }
    //      } catch (Exception e) {
    //         throw new RuntimeException(e);
    //      }

}

From source file:com.shaie.solr.SolrCloudUtils.java

License:Apache License

/** Returns the collection names that were created with the given configuration name. */
@SuppressWarnings("resource")
public static List<String> getCollectionsCreatedWithConfig(CloudSolrClient solrClient, String configName) {
    final List<String> result = Lists.newArrayList();
    final ZkStateReader zkStateReader = solrClient.getZkStateReader();
    for (final String collection : zkStateReader.getClusterState().getCollectionsMap().keySet()) {
        final String collectionConfigName = getCollectionConfigName(zkStateReader, collection);
        if (configName.equals(collectionConfigName)) {
            result.add(collection);/*  www .  java2s  . c o  m*/
        }
    }
    return result;
}

From source file:com.thinkaurelius.titan.diskstorage.solr.Solr5Index.java

License:Apache License

@Override
public void clearStorage() throws BackendException {
    try {//from   w  ww.  j  a  va2s  .com
        if (mode != Mode.CLOUD)
            throw new UnsupportedOperationException("Operation only supported for SolrCloud");
        logger.debug("Clearing storage from Solr: {}", solrClient);
        ZkStateReader zkStateReader = ((CloudSolrClient) solrClient).getZkStateReader();
        zkStateReader.updateClusterState(true);
        ClusterState clusterState = zkStateReader.getClusterState();
        for (String collection : clusterState.getCollections()) {
            logger.debug("Clearing collection [{}] in Solr", collection);
            UpdateRequest deleteAll = newUpdateRequest();
            deleteAll.deleteByQuery("*:*");
            solrClient.request(deleteAll, collection);
        }

    } catch (SolrServerException e) {
        logger.error("Unable to clear storage from index due to server error on Solr.", e);
        throw new PermanentBackendException(e);
    } catch (IOException e) {
        logger.error("Unable to clear storage from index due to low-level I/O error.", e);
        throw new PermanentBackendException(e);
    } catch (Exception e) {
        logger.error("Unable to clear storage from index due to general error.", e);
        throw new PermanentBackendException(e);
    }
}

From source file:com.thinkaurelius.titan.diskstorage.solr.Solr5Index.java

License:Apache License

/**
 * Checks if the collection has already been created in Solr.
 *//*from ww  w.j a  v a 2 s. c o m*/
private static boolean checkIfCollectionExists(CloudSolrClient server, String collection)
        throws KeeperException, InterruptedException {
    ZkStateReader zkStateReader = server.getZkStateReader();
    zkStateReader.updateClusterState(true);
    ClusterState clusterState = zkStateReader.getClusterState();
    return clusterState.getCollectionOrNull(collection) != null;
}

From source file:com.thinkaurelius.titan.diskstorage.solr.Solr5Index.java

License:Apache License

/**
 * Wait for all the collection shards to be ready.
 *///from   ww w .java 2 s .c  om
private static void waitForRecoveriesToFinish(CloudSolrClient server, String collection)
        throws KeeperException, InterruptedException {
    ZkStateReader zkStateReader = server.getZkStateReader();
    try {
        boolean cont = true;

        while (cont) {
            boolean sawLiveRecovering = false;
            zkStateReader.updateClusterState(true);
            ClusterState clusterState = zkStateReader.getClusterState();
            Map<String, Slice> slices = clusterState.getSlicesMap(collection);
            Preconditions.checkNotNull("Could not find collection:" + collection, slices);

            for (Map.Entry<String, Slice> entry : slices.entrySet()) {
                Map<String, Replica> shards = entry.getValue().getReplicasMap();
                for (Map.Entry<String, Replica> shard : shards.entrySet()) {
                    String state = shard.getValue().getStr(ZkStateReader.STATE_PROP);
                    if ((state.equals(ZkStateReader.RECOVERING) || state.equals(ZkStateReader.SYNC)
                            || state.equals(ZkStateReader.DOWN))
                            && clusterState
                                    .liveNodesContain(shard.getValue().getStr(ZkStateReader.NODE_NAME_PROP))) {
                        sawLiveRecovering = true;
                    }
                }
            }
            if (!sawLiveRecovering) {
                cont = false;
            } else {
                Thread.sleep(1000);
            }
        }
    } finally {
        logger.info("Exiting solr wait");
    }
}

From source file:de.qaware.chronix.storage.solr.ChronixSolrCloudStorage.java

License:Apache License

/**
 * Returns the list of shards of the default collection.
 *
 * @param zkHost            ZooKeeper URL
 * @param chronixCollection Solr collection name for chronix time series data
 * @return the list of shards of the default collection
 *///from  ww w.  j ava 2 s.  com
public List<String> getShardList(String zkHost, String chronixCollection) throws IOException {

    CloudSolrClient cloudSolrClient = new CloudSolrClient(zkHost);
    List<String> shards = new ArrayList<>();

    try {
        cloudSolrClient.connect();

        ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();

        ClusterState clusterState = zkStateReader.getClusterState();

        String[] collections;
        if (clusterState.hasCollection(chronixCollection)) {
            collections = new String[] { chronixCollection };
        } else {
            // might be a collection alias?
            Aliases aliases = zkStateReader.getAliases();
            String aliasedCollections = aliases.getCollectionAlias(chronixCollection);
            if (aliasedCollections == null)
                throw new IllegalArgumentException("Collection " + chronixCollection + " not found!");
            collections = aliasedCollections.split(",");
        }

        Set<String> liveNodes = clusterState.getLiveNodes();
        Random random = new Random(5150);

        for (String coll : collections) {
            for (Slice slice : clusterState.getSlices(coll)) {
                List<String> replicas = new ArrayList<>();
                for (Replica r : slice.getReplicas()) {
                    if (r.getState().equals(Replica.State.ACTIVE)) {
                        ZkCoreNodeProps replicaCoreProps = new ZkCoreNodeProps(r);
                        if (liveNodes.contains(replicaCoreProps.getNodeName()))
                            replicas.add(replicaCoreProps.getCoreUrl());
                    }
                }
                int numReplicas = replicas.size();
                if (numReplicas == 0)
                    throw new IllegalStateException("Shard " + slice.getName() + " in collection " + coll
                            + " does not have any active replicas!");

                String replicaUrl = (numReplicas == 1) ? replicas.get(0)
                        : replicas.get(random.nextInt(replicas.size()));
                shards.add(replicaUrl);
            }
        }
    } finally {
        cloudSolrClient.close();
    }

    return shards;
}

From source file:org.apache.hadoop.hive.solr.SolrInputFormat.java

License:Apache License

@Override
public InputSplit[] getSplits(JobConf job, int numSplits) throws MalformedURLException {

    CloudSolrServer cloudServer = null;/* ww  w.j a  va  2s  .  c o m*/
    ZkStateReader stateReader;
    Path[] result = FileInputFormat.getInputPaths(job);
    Path path = result[0];
    String zooKeeperAddress = job.get(ExternalTableProperties.ZOOKEEPER_SERVICE_URL);
    cloudServer = new CloudSolrServer(zooKeeperAddress);
    cloudServer.setDefaultCollection(job.get(ExternalTableProperties.COLLECTION_NAME));
    cloudServer.connect();
    stateReader = cloudServer.getZkStateReader();
    ClusterState cs = stateReader.getClusterState();
    Collection<Slice> slices = cs.getSlices(job.get(ExternalTableProperties.COLLECTION_NAME));
    InputSplit[] inputSplits = new SolrFileSplit[slices.size()];
    int i = 0;
    for (Slice slice : slices) {
        Replica leader = slice.getLeader();
        SolrInputSplit split = new SolrInputSplit(leader.getProperties().get("base_url").toString(),
                leader.getProperties().get("core").toString(),
                job.get(ExternalTableProperties.COLLECTION_NAME));
        inputSplits[i] = new SolrFileSplit(split, path);
        i++;
    }
    LOG.debug("solr splits size = " + inputSplits.length);
    stateReader.close();
    return inputSplits;
}

From source file:org.apache.hadoop.hive.solr.SolrOutputFormat.java

License:Apache License

@Override
public RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
        final Class<? extends Writable> valueClass, boolean isCompressed, Properties tableProperties,
        Progressable progress) throws IOException {

    // Need to figure out how to improve the degree of parallelism.
    // For now we will just have 1 shard insert all the documents.
    CloudSolrServer cloudServer = null;/* ww  w  .  j  a  va 2s .  c  o m*/
    ZkStateReader stateReader;
    Collection<Slice> slices;
    String zooKeeperAddress = jc.get(ExternalTableProperties.ZOOKEEPER_SERVICE_URL);
    try {
        cloudServer = new CloudSolrServer(zooKeeperAddress);
    } catch (MalformedURLException ex) {
        LOG.log(Level.ERROR, "Exception occured while connecting to CloudSolrServer", ex);
    }
    cloudServer.connect();
    stateReader = cloudServer.getZkStateReader();
    ClusterState cs = stateReader.getClusterState();
    Slice s = cs.getSlice(jc.get(ExternalTableProperties.COLLECTION_NAME), "shard1");
    Replica r = s.getLeader();
    String baseURL = r.getProperties().get("base_url").toString();
    String shardName = r.getProperties().get("core").toString();
    ;
    String collectionName = jc.get(ExternalTableProperties.COLLECTION_NAME);
    SolrDAO solrDAO = new SolrDAO(baseURL, shardName, collectionName, null);
    return new SolrRecordWriter(solrDAO);
}

From source file:org.apache.sentry.tests.e2e.solr.AbstractSolrSentryTestBase.java

License:Apache License

protected static void waitForRecoveriesToFinish(String collection, CloudSolrServer solrServer, boolean verbose,
        boolean failOnTimeout, int timeoutSeconds) throws Exception {
    LOG.info("Entering solr wait with timeout " + timeoutSeconds);
    ZkStateReader zkStateReader = solrServer.getZkStateReader();
    try {//w  w  w  . ja va 2s  .c  om
        boolean cont = true;
        int cnt = 0;

        while (cont) {
            if (verbose)
                LOG.debug("-");
            boolean sawLiveRecovering = false;
            zkStateReader.updateClusterState(true);
            ClusterState clusterState = zkStateReader.getClusterState();
            Map<String, Slice> slices = clusterState.getSlicesMap(collection);
            assertNotNull("Could not find collection:" + collection, slices);
            for (Map.Entry<String, Slice> entry : slices.entrySet()) {
                Map<String, Replica> shards = entry.getValue().getReplicasMap();
                for (Map.Entry<String, Replica> shard : shards.entrySet()) {
                    if (verbose)
                        LOG.debug("rstate:" + shard.getValue().getStr(ZkStateReader.STATE_PROP) + " live:"
                                + clusterState.liveNodesContain(shard.getValue().getNodeName()));
                    String state = shard.getValue().getStr(ZkStateReader.STATE_PROP);
                    if ((state.equals(ZkStateReader.RECOVERING) || state.equals(ZkStateReader.SYNC)
                            || state.equals(ZkStateReader.DOWN))
                            && clusterState
                                    .liveNodesContain(shard.getValue().getStr(ZkStateReader.NODE_NAME_PROP))) {
                        sawLiveRecovering = true;
                    }
                }
            }
            if (!sawLiveRecovering || cnt == timeoutSeconds) {
                if (!sawLiveRecovering) {
                    if (verbose)
                        LOG.debug("no one is recovering");
                } else {
                    if (verbose)
                        LOG.debug("Gave up waiting for recovery to finish..");
                    if (failOnTimeout) {
                        fail("There are still nodes recovering - waited for " + timeoutSeconds + " seconds");
                        // won't get here
                        return;
                    }
                }
                cont = false;
            } else {
                Thread.sleep(1000);
            }
            cnt++;
        }
    } finally {
        LOG.info("Exiting solr wait");
    }
}