Example usage for org.apache.solr.common.cloud Slice getLeader

List of usage examples for org.apache.solr.common.cloud Slice getLeader

Introduction

In this page you can find the example usage for org.apache.solr.common.cloud Slice getLeader.

Prototype

public Replica getLeader() 

Source Link

Usage

From source file:org.apache.hadoop.hive.solr.SolrInputFormat.java

License:Apache License

@Override
public InputSplit[] getSplits(JobConf job, int numSplits) throws MalformedURLException {

    CloudSolrServer cloudServer = null;//from ww w . j  a  va  2s  . c  o m
    ZkStateReader stateReader;
    Path[] result = FileInputFormat.getInputPaths(job);
    Path path = result[0];
    String zooKeeperAddress = job.get(ExternalTableProperties.ZOOKEEPER_SERVICE_URL);
    cloudServer = new CloudSolrServer(zooKeeperAddress);
    cloudServer.setDefaultCollection(job.get(ExternalTableProperties.COLLECTION_NAME));
    cloudServer.connect();
    stateReader = cloudServer.getZkStateReader();
    ClusterState cs = stateReader.getClusterState();
    Collection<Slice> slices = cs.getSlices(job.get(ExternalTableProperties.COLLECTION_NAME));
    InputSplit[] inputSplits = new SolrFileSplit[slices.size()];
    int i = 0;
    for (Slice slice : slices) {
        Replica leader = slice.getLeader();
        SolrInputSplit split = new SolrInputSplit(leader.getProperties().get("base_url").toString(),
                leader.getProperties().get("core").toString(),
                job.get(ExternalTableProperties.COLLECTION_NAME));
        inputSplits[i] = new SolrFileSplit(split, path);
        i++;
    }
    LOG.debug("solr splits size = " + inputSplits.length);
    stateReader.close();
    return inputSplits;
}

From source file:org.apache.hadoop.hive.solr.SolrOutputFormat.java

License:Apache License

@Override
public RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath,
        final Class<? extends Writable> valueClass, boolean isCompressed, Properties tableProperties,
        Progressable progress) throws IOException {

    // Need to figure out how to improve the degree of parallelism.
    // For now we will just have 1 shard insert all the documents.
    CloudSolrServer cloudServer = null;//from  w  ww  .  j a  v a  2 s  .  c o m
    ZkStateReader stateReader;
    Collection<Slice> slices;
    String zooKeeperAddress = jc.get(ExternalTableProperties.ZOOKEEPER_SERVICE_URL);
    try {
        cloudServer = new CloudSolrServer(zooKeeperAddress);
    } catch (MalformedURLException ex) {
        LOG.log(Level.ERROR, "Exception occured while connecting to CloudSolrServer", ex);
    }
    cloudServer.connect();
    stateReader = cloudServer.getZkStateReader();
    ClusterState cs = stateReader.getClusterState();
    Slice s = cs.getSlice(jc.get(ExternalTableProperties.COLLECTION_NAME), "shard1");
    Replica r = s.getLeader();
    String baseURL = r.getProperties().get("base_url").toString();
    String shardName = r.getProperties().get("core").toString();
    ;
    String collectionName = jc.get(ExternalTableProperties.COLLECTION_NAME);
    SolrDAO solrDAO = new SolrDAO(baseURL, shardName, collectionName, null);
    return new SolrRecordWriter(solrDAO);
}

From source file:org.apache.sentry.tests.e2e.solr.TestUpdateOperations.java

License:Apache License

private void checkUpdateDistribPhase(CloudSolrServer server, String collectionName, String userName,
        DistribPhase distribPhase) throws Exception {
    String path = "/" + collectionName + "/update?commit=true";
    String updateDistribParam = "";
    if (distribPhase != null) {
        updateDistribParam = distribPhase.toString();
        path += "&update.distrib=" + updateDistribParam;
    }//from ww w. j a  va  2 s  . com
    String docId = "testUpdateDistribDoc" + updateDistribParam;
    String body = "<add><doc><field name=\"id\">" + docId + "</field></doc></add>";

    String node = null;
    ClusterState clusterState = server.getZkStateReader().getClusterState();
    for (Slice slice : clusterState.getActiveSlices(collectionName)) {
        if (slice.getRange().includes(docId.hashCode())) {
            node = slice.getLeader().getNodeName().replace("_solr", "/solr");
        }
    }
    assertNotNull("Expected to find leader node for document", node);

    String ret = makeHttpRequest(server, node, "POST", path, body.getBytes("UTF-8"), "text/xml");
    assertTrue("Expected sentry exception", ret.contains("SentrySolrAuthorizationException: " + "User "
            + userName + " does not have privileges for " + collectionName));
}

From source file:uk.bl.wa.apache.solr.hadoop.ZooKeeperInspector.java

License:Apache License

public List<List<String>> extractShardUrls(String zkHost, String collection) {

    DocCollection docCollection = extractDocCollection(zkHost, collection);
    List<Slice> slices = getSortedSlices(docCollection.getSlices());
    List<List<String>> solrUrls = new ArrayList<List<String>>(slices.size());
    for (Slice slice : slices) {
        if (slice.getLeader() == null) {
            throw new IllegalArgumentException("Cannot find SolrCloud slice leader. "
                    + "It looks like not all of your shards are registered in ZooKeeper yet");
        }/*w  ww  .  j  a  va  2s  .  c om*/
        Collection<Replica> replicas = slice.getReplicas();
        List<String> urls = new ArrayList<String>(replicas.size());
        for (Replica replica : replicas) {
            ZkCoreNodeProps props = new ZkCoreNodeProps(replica);
            urls.add(props.getCoreUrl());
        }
        solrUrls.add(urls);
    }
    return solrUrls;
}