Example usage for org.apache.solr.client.solrj.impl CloudSolrClient close

List of usage examples for org.apache.solr.client.solrj.impl CloudSolrClient close

Introduction

In this page you can find the example usage for org.apache.solr.client.solrj.impl CloudSolrClient close.

Prototype

@Override
    public void close() throws IOException 

Source Link

Usage

From source file:com.databasepreservation.visualization.utils.SolrUtils.java

public static void setupSolrCloudConfigsets(String zkHost) {
    // before anything else, try to get a zookeeper client
    CloudSolrClient zkClient = new CloudSolrClient(zkHost);

    // get resources and copy them to a temporary directory
    Path databaseDir = null;/*from  www  .  j  a v a2  s.com*/
    Path tableDir = null;
    Path savedSearchesDir = null;
    try {
        final File jarFile = new File(
                SolrManager.class.getProtectionDomain().getCodeSource().getLocation().toURI());

        // if it is a directory the application in being run from an IDE
        // in that case do not setup (assuming setup is done)
        if (!jarFile.isDirectory()) {
            databaseDir = Files.createTempDirectory("dbv_db_");
            tableDir = Files.createTempDirectory("dbv_tab_");
            savedSearchesDir = Files.createTempDirectory("dbv_tab_");
            final JarFile jar = new JarFile(jarFile);
            final Enumeration<JarEntry> entries = jar.entries();

            while (entries.hasMoreElements()) {
                JarEntry entry = entries.nextElement();
                String name = entry.getName();

                String nameWithoutOriginPart = null;
                Path destination = null;
                if (name.startsWith(ViewerSafeConstants.SOLR_CONFIGSET_DATABASE_RESOURCE + "/")) {
                    nameWithoutOriginPart = name
                            .substring(ViewerSafeConstants.SOLR_CONFIGSET_DATABASE_RESOURCE.length() + 1);
                    destination = databaseDir;
                } else if (name.startsWith(ViewerSafeConstants.SOLR_CONFIGSET_TABLE_RESOURCE + "/")) {
                    nameWithoutOriginPart = name
                            .substring(ViewerSafeConstants.SOLR_CONFIGSET_TABLE_RESOURCE.length() + 1);
                    destination = tableDir;
                } else if (name.startsWith(ViewerSafeConstants.SOLR_CONFIGSET_SEARCHES_RESOURCE + "/")) {
                    nameWithoutOriginPart = name
                            .substring(ViewerSafeConstants.SOLR_CONFIGSET_SEARCHES_RESOURCE.length() + 1);
                    destination = savedSearchesDir;
                } else {
                    continue;
                }

                Path output = destination.resolve(nameWithoutOriginPart);
                if (name.endsWith("/")) {
                    Files.createDirectories(output);
                } else {
                    InputStream inputStream = SolrManager.class.getResourceAsStream("/" + name);
                    output = Files.createFile(output);
                    OutputStream outputStream = Files.newOutputStream(output, StandardOpenOption.CREATE,
                            StandardOpenOption.WRITE);
                    IOUtils.copy(inputStream, outputStream);
                    inputStream.close();
                    outputStream.close();
                }
            }
            jar.close();
        }
    } catch (IOException | URISyntaxException e) {
        LOGGER.error("Could not extract Solr configset", e);
        if (databaseDir != null) {
            try {
                FileUtils.deleteDirectoryRecursive(databaseDir);
            } catch (IOException e1) {
                LOGGER.debug("IO error deleting temporary folder: " + databaseDir, e1);
            }
        }
        if (tableDir != null) {
            try {
                FileUtils.deleteDirectoryRecursive(tableDir);
            } catch (IOException e1) {
                LOGGER.debug("IO error deleting temporary folder: " + tableDir, e1);
            }
        }
        databaseDir = null;
        tableDir = null;
    }

    // copy configurations to solr
    if (databaseDir != null && tableDir != null) {
        try {
            zkClient.uploadConfig(databaseDir, ViewerSafeConstants.SOLR_CONFIGSET_DATABASE);
        } catch (IOException e) {
            LOGGER.debug("IO error uploading database config to solr cloud", e);
        }
        try {
            zkClient.uploadConfig(tableDir, ViewerSafeConstants.SOLR_CONFIGSET_TABLE);
        } catch (IOException e) {
            LOGGER.debug("IO error uploading table config to solr cloud", e);
        }
        try {
            zkClient.uploadConfig(savedSearchesDir, ViewerSafeConstants.SOLR_CONFIGSET_SEARCHES);
        } catch (IOException e) {
            LOGGER.debug("IO error uploading saved searches config to solr cloud", e);
        }

        try {
            FileUtils.deleteDirectoryRecursive(databaseDir);
        } catch (IOException e1) {
            LOGGER.debug("IO error deleting temporary folder: " + databaseDir, e1);
        }
        try {
            FileUtils.deleteDirectoryRecursive(tableDir);
        } catch (IOException e1) {
            LOGGER.debug("IO error deleting temporary folder: " + tableDir, e1);
        }
        try {
            FileUtils.deleteDirectoryRecursive(savedSearchesDir);
        } catch (IOException e1) {
            LOGGER.debug("IO error deleting temporary folder: " + savedSearchesDir, e1);
        }
    }

    try {
        zkClient.close();
    } catch (IOException e) {
        LOGGER.debug("IO error closing connection to solr cloud", e);
    }
}

From source file:de.qaware.chronix.storage.solr.ChronixSolrCloudStorage.java

License:Apache License

/**
 * Returns the list of shards of the default collection.
 *
 * @param zkHost            ZooKeeper URL
 * @param chronixCollection Solr collection name for chronix time series data
 * @return the list of shards of the default collection
 *//*from   w ww  . j  a v a2  s . co  m*/
public List<String> getShardList(String zkHost, String chronixCollection) throws IOException {

    CloudSolrClient cloudSolrClient = new CloudSolrClient(zkHost);
    List<String> shards = new ArrayList<>();

    try {
        cloudSolrClient.connect();

        ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();

        ClusterState clusterState = zkStateReader.getClusterState();

        String[] collections;
        if (clusterState.hasCollection(chronixCollection)) {
            collections = new String[] { chronixCollection };
        } else {
            // might be a collection alias?
            Aliases aliases = zkStateReader.getAliases();
            String aliasedCollections = aliases.getCollectionAlias(chronixCollection);
            if (aliasedCollections == null)
                throw new IllegalArgumentException("Collection " + chronixCollection + " not found!");
            collections = aliasedCollections.split(",");
        }

        Set<String> liveNodes = clusterState.getLiveNodes();
        Random random = new Random(5150);

        for (String coll : collections) {
            for (Slice slice : clusterState.getSlices(coll)) {
                List<String> replicas = new ArrayList<>();
                for (Replica r : slice.getReplicas()) {
                    if (r.getState().equals(Replica.State.ACTIVE)) {
                        ZkCoreNodeProps replicaCoreProps = new ZkCoreNodeProps(r);
                        if (liveNodes.contains(replicaCoreProps.getNodeName()))
                            replicas.add(replicaCoreProps.getCoreUrl());
                    }
                }
                int numReplicas = replicas.size();
                if (numReplicas == 0)
                    throw new IllegalStateException("Shard " + slice.getName() + " in collection " + coll
                            + " does not have any active replicas!");

                String replicaUrl = (numReplicas == 1) ? replicas.get(0)
                        : replicas.get(random.nextInt(replicas.size()));
                shards.add(replicaUrl);
            }
        }
    } finally {
        cloudSolrClient.close();
    }

    return shards;
}

From source file:fr.jetoile.hadoopunit.component.SolrCloudBootstrapTest.java

License:Apache License

@Test
public void solrCloudShouldStart() throws IOException, SolrServerException, KeeperException,
        InterruptedException, NotFoundServiceException {

    String collectionName = configuration.getString(SolrCloudBootstrap.SOLR_COLLECTION_NAME);

    //        String zkHostString = configuration.getString(Config.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(Config.ZOOKEEPER_PORT_KEY);
    //        CloudSolrClient client = new CloudSolrClient(zkHostString);
    CloudSolrClient client = ((SolrCloudBootstrap) HadoopBootstrap.INSTANCE.getService(Component.SOLRCLOUD))
            .getClient();//from   www . j  a v a 2  s .co  m

    for (int i = 0; i < 1000; ++i) {
        SolrInputDocument doc = new SolrInputDocument();
        doc.addField("cat", "book");
        doc.addField("id", "book-" + i);
        doc.addField("name", "The Legend of the Hobbit part " + i);
        client.add(collectionName, doc);
        if (i % 100 == 0)
            client.commit(collectionName); // periodically flush
    }
    client.commit("collection1");

    SolrDocument collection1 = client.getById(collectionName, "book-1");

    assertNotNull(collection1);

    assertThat(collection1.getFieldValue("name")).isEqualTo("The Legend of the Hobbit part 1");

    client.close();
}

From source file:fr.jetoile.hadoopunit.integrationtest.IntegrationBootstrapTest.java

License:Apache License

@Test
public void solrCloudShouldStart()
        throws IOException, SolrServerException, KeeperException, InterruptedException {

    String collectionName = configuration.getString(SolrCloudBootstrap.SOLR_COLLECTION_NAME);

    String zkHostString = configuration.getString(Config.ZOOKEEPER_HOST_KEY) + ":"
            + configuration.getInt(Config.ZOOKEEPER_PORT_KEY);
    CloudSolrClient client = new CloudSolrClient(zkHostString);

    for (int i = 0; i < 1000; ++i) {
        SolrInputDocument doc = new SolrInputDocument();
        doc.addField("cat", "book");
        doc.addField("id", "book-" + i);
        doc.addField("name", "The Legend of the Hobbit part " + i);
        client.add(collectionName, doc);
        if (i % 100 == 0)
            client.commit(collectionName); // periodically flush
    }//  w w  w.j a v  a 2 s .c  o  m
    client.commit("collection1");

    SolrDocument collection1 = client.getById(collectionName, "book-1");

    assertNotNull(collection1);

    assertThat(collection1.getFieldValue("name")).isEqualTo("The Legend of the Hobbit part 1");

    client.close();
}

From source file:fr.jetoile.hadoopunit.integrationtest.SparkSolrIntegrationTest.java

License:Apache License

@Test
public void spark_should_read_parquet_file_and_index_into_solr() throws IOException, SolrServerException {
    //given//from  ww w.  j  av  a 2s.c o m
    SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("test");

    JavaSparkContext context = new JavaSparkContext(conf);

    //read hive-site from classpath
    HiveContext hiveContext = new HiveContext(context.sc());

    DataFrame sql = hiveContext.sql("SELECT * FROM default.test");
    sql.write().parquet("hdfs://localhost:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY)
            + "/khanh/test_parquet/file.parquet");

    FileSystem fileSystem = HdfsUtils.INSTANCE.getFileSystem();
    assertThat(fileSystem.exists(new Path("hdfs://localhost:"
            + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet")))
                    .isTrue();

    context.close();

    //when
    context = new JavaSparkContext(conf);
    SQLContext sqlContext = new SQLContext(context);

    DataFrame file = sqlContext.read().parquet("hdfs://localhost:"
            + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet");
    DataFrame select = file.select("id", "value");

    JavaRDD<SolrInputDocument> solrInputDocument = select.toJavaRDD().map(r -> {
        SolrInputDocument solr = new SolrInputDocument();
        solr.setField("id", r.getInt(0));
        solr.setField("value_s", r.getString(1));
        return solr;
    });

    String collectionName = configuration.getString(SolrCloudBootstrap.SOLR_COLLECTION_NAME);
    String zkHostString = configuration.getString(Config.ZOOKEEPER_HOST_KEY) + ":"
            + configuration.getInt(Config.ZOOKEEPER_PORT_KEY);
    SolrSupport.indexDocs(zkHostString, collectionName, 1000, solrInputDocument);

    //then
    CloudSolrClient client = new CloudSolrClient(zkHostString);
    SolrDocument collection1 = client.getById(collectionName, "1");

    assertNotNull(collection1);
    assertThat(collection1.getFieldValue("value_s")).isEqualTo("value1");

    client.close();

    context.close();

}

From source file:fr.jetoile.hadoopunit.sample.ParquetToSolrJobIntegrationTest.java

License:Apache License

@Test
public void spark_should_read_parquet_file_and_index_into_solr() throws IOException, SolrServerException {
    //given//from ww w.j  a v  a2 s.  c  om
    SparkSession sqlContext = SparkSession.builder().appName("test").master("local[*]").getOrCreate();

    Dataset<Row> df = sqlContext.read().format("com.databricks.spark.csv").option("header", "true") // Use first line of all files as header
            .option("inferSchema", "true") // Automatically infer data types
            .load("hdfs://localhost:" + configuration.getInt(HadoopUnitClientConfig.HDFS_NAMENODE_PORT_KEY)
                    + "/khanh/test/test.csv");

    df.write().parquet("hdfs://localhost:" + configuration.getInt(HadoopUnitClientConfig.HDFS_NAMENODE_PORT_KEY)
            + "/khanh/test_parquet/file.parquet");

    FileSystem fileSystem = HdfsUtils.INSTANCE.getFileSystem();
    assertThat(fileSystem.exists(
            new Path("hdfs://localhost:" + configuration.getInt(HadoopUnitClientConfig.HDFS_NAMENODE_PORT_KEY)
                    + "/khanh/test_parquet/file.parquet"))).isTrue();

    sqlContext.close();

    //when
    sqlContext = SparkSession.builder().appName("test").master("local[*]").getOrCreate();

    ParquetToSolrJob parquetToSolrJob = new ParquetToSolrJob(sqlContext);
    parquetToSolrJob.run();

    String zkHostString = configuration.getString("zookeeper.host") + ":"
            + configuration.getInt("zookeeper.port");

    //then
    CloudSolrClient client = new CloudSolrClient(zkHostString);
    SolrDocument collection1 = client.getById("collection1", "1");

    assertNotNull(collection1);
    assertThat(collection1.getFieldValue("value_s")).isEqualTo("value1");

    client.close();

    sqlContext.close();

}

From source file:fr.jetoile.hadoopunit.sample.ParquetToSolrJobTest.java

License:Apache License

@Test
public void spark_should_read_parquet_file_and_index_into_solr() throws IOException, SolrServerException {
    //given/* ww w .j a  v  a 2s. co m*/
    SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("test");

    JavaSparkContext context = new JavaSparkContext(conf);

    //read hive-site from classpath
    HiveContext hiveContext = new HiveContext(context.sc());

    DataFrame sql = hiveContext.sql("SELECT * FROM default.test");
    sql.write().parquet("hdfs://localhost:" + configuration.getInt(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY)
            + "/khanh/test_parquet/file.parquet");

    FileSystem fileSystem = HdfsUtils.INSTANCE.getFileSystem();
    assertThat(fileSystem
            .exists(new Path("hdfs://localhost:" + configuration.getInt(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY)
                    + "/khanh/test_parquet/file.parquet"))).isTrue();

    context.close();

    //when
    context = new JavaSparkContext(conf);

    ParquetToSolrJob parquetToSolrJob = new ParquetToSolrJob(context);
    parquetToSolrJob.run();

    String zkHostString = configuration.getString(HadoopUnitConfig.ZOOKEEPER_HOST_KEY) + ":"
            + configuration.getInt(HadoopUnitConfig.ZOOKEEPER_PORT_KEY);

    //then
    CloudSolrClient client = new CloudSolrClient(zkHostString);
    SolrDocument collection1 = client.getById("collection1", "1");

    assertNotNull(collection1);
    assertThat(collection1.getFieldValue("value_s")).isEqualTo("value1");

    client.close();

    context.close();

}

From source file:org.apache.nifi.processors.solr.QuerySolrIT.java

License:Apache License

@AfterClass
public static void teardown() {
    try {//from  w  ww  .  j av a  2 s.c o  m
        CloudSolrClient solrClient = createSolrClient();
        CollectionAdminRequest.Delete deleteCollection = CollectionAdminRequest
                .deleteCollection(SOLR_COLLECTION);
        deleteCollection.process(solrClient);
        solrClient.close();
    } catch (Exception e) {
    }
}

From source file:org.apache.ranger.patch.cliutil.DbToSolrMigrationUtil.java

License:Apache License

private SolrClient createSolrClient() throws Exception {
    SolrClient solrClient = null;// w  ww.j av a  2  s  .com

    registerSolrClientJAAS();
    String zkHosts = PropertiesUtil.getProperty(SOLR_ZK_HOSTS);
    if (zkHosts == null) {
        zkHosts = PropertiesUtil.getProperty("ranger.audit.solr.zookeeper");
    }
    if (zkHosts == null) {
        zkHosts = PropertiesUtil.getProperty("ranger.solr.zookeeper");
    }

    String solrURL = PropertiesUtil.getProperty(SOLR_URLS_PROP);
    if (solrURL == null) {
        // Try with url
        solrURL = PropertiesUtil.getProperty("ranger.audit.solr.url");
    }
    if (solrURL == null) {
        // Let's try older property name
        solrURL = PropertiesUtil.getProperty("ranger.solr.url");
    }

    if (zkHosts != null && !zkHosts.trim().equals("") && !zkHosts.trim().equalsIgnoreCase("none")) {
        zkHosts = zkHosts.trim();
        String collectionName = PropertiesUtil.getProperty(SOLR_COLLECTION_NAME);
        if (collectionName == null || collectionName.equalsIgnoreCase("none")) {
            collectionName = DEFAULT_COLLECTION_NAME;
        }

        logger.info("Solr zkHosts=" + zkHosts + ", collectionName=" + collectionName);

        try {
            // Instantiate
            HttpClientUtil.setConfigurer(new Krb5HttpClientConfigurer());
            CloudSolrClient solrCloudClient = new CloudSolrClient(zkHosts);
            solrCloudClient.setDefaultCollection(collectionName);
            solrClient = solrCloudClient;
            solrCloudClient.close();
        } catch (Exception e) {
            logger.fatal(
                    "Can't connect to Solr server. ZooKeepers=" + zkHosts + ", collection=" + collectionName,
                    e);
            throw e;
        }
    } else {
        if (solrURL == null || solrURL.isEmpty() || solrURL.equalsIgnoreCase("none")) {
            logger.fatal("Solr ZKHosts and URL for Audit are empty. Please set property " + SOLR_ZK_HOSTS
                    + " or " + SOLR_URLS_PROP);
        } else {
            try {
                HttpClientUtil.setConfigurer(new Krb5HttpClientConfigurer());
                solrClient = new HttpSolrClient(solrURL);
                if (solrClient instanceof HttpSolrClient) {
                    HttpSolrClient httpSolrClient = (HttpSolrClient) solrClient;
                    httpSolrClient.setAllowCompression(true);
                    httpSolrClient.setConnectionTimeout(1000);
                    httpSolrClient.setMaxRetries(1);
                    httpSolrClient.setRequestWriter(new BinaryRequestWriter());
                }
            } catch (Exception e) {
                logger.fatal("Can't connect to Solr server. URL=" + solrURL, e);
                throw e;
            }
        }
    }
    return solrClient;
}