Example usage for org.apache.solr.core SolrCore getLatestSchema

List of usage examples for org.apache.solr.core SolrCore getLatestSchema

Introduction

In this page you can find the example usage for org.apache.solr.core SolrCore getLatestSchema.

Prototype

public IndexSchema getLatestSchema() 

Source Link

Usage

From source file:alba.components.FilteredShowFileRequestHandler.java

License:Apache License

public static boolean isHiddenFile(SolrQueryRequest req, SolrQueryResponse rsp, String fnameIn,
        boolean reportError, Set<String> hiddenFiles) {
    String fname = fnameIn.toUpperCase(Locale.ROOT);
    if (hiddenFiles.contains(fname) || hiddenFiles.contains("*")) {
        if (reportError) {
            log.error("Cannot access " + fname);
            rsp.setException(//  w  w  w . java  2s . co m
                    new SolrException(SolrException.ErrorCode.FORBIDDEN, "Can not access: " + fnameIn));
        }
        return true;
    }

    // This is slightly off, a valid path is something like ./schema.xml. I don't think it's worth the effort though
    // to fix it to handle all possibilities though.
    if (fname.indexOf("..") >= 0 || fname.startsWith(".")) {
        if (reportError) {
            log.error("Invalid path: " + fname);
            rsp.setException(new SolrException(SolrException.ErrorCode.FORBIDDEN, "Invalid path: " + fnameIn));
        }
        return true;
    }

    // Make sure that if the schema is managed, we don't allow editing. Don't really want to put
    // this in the init since we're not entirely sure when the managed schema will get initialized relative to this
    // handler.
    SolrCore core = req.getCore();
    IndexSchema schema = core.getLatestSchema();
    if (schema instanceof ManagedIndexSchema) {
        String managed = schema.getResourceName();

        if (fname.equalsIgnoreCase(managed)) {
            return true;
        }
    }
    return false;
}

From source file:com.billiger.solr.handler.component.QLTBComponent.java

License:Apache License

/**
 * Inform component of core reload./*from  w w  w.ja  v  a 2 s. c  o  m*/
 *
 * This will both set the analyzer according to the configured
 * queryFieldType, and load the QLTB data. Data source can be (in this
 * order) ZooKeeper, the conf/ directory or the data/ directory.
 */
@Override
public final void inform(final SolrCore core) {
    // load analyzer
    String queryFieldType = initArgs.get(FIELD_TYPE);
    if (queryFieldType != null) {
        FieldType ft = core.getLatestSchema().getFieldTypes().get(queryFieldType);
        if (ft == null) {
            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                    "unknown FieldType \"" + queryFieldType + "\" used in QLTBComponent");
        }
        analyzer = ft.getQueryAnalyzer();
    } else {
        analyzer = null;
    }
    synchronized (qltbCache) {
        qltbCache.clear();
        try {
            // retrieve QLTB data filename
            String qltbFile = initArgs.get(QLTB_FILE);
            if (qltbFile == null) {
                throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                        "QLTBComponent must specify argument: \"" + QLTB_FILE + "\" - path to QLTB data");
            }
            boolean exists = false;
            // check ZooKeeper
            ZkController zkController = core.getCoreDescriptor().getCoreContainer().getZkController();
            if (zkController != null) {
                exists = zkController.configFileExists(zkController.readConfigName(
                        core.getCoreDescriptor().getCloudDescriptor().getCollectionName()), qltbFile);
            } else {
                // no ZooKeeper, check conf/ and data/ directories
                File fConf = new File(core.getResourceLoader().getConfigDir(), qltbFile);
                File fData = new File(core.getDataDir(), qltbFile);
                if (fConf.exists() == fData.exists()) {
                    // both or neither exist
                    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                            "QLTBComponent missing config file: \"" + qltbFile + "\": either "
                                    + fConf.getAbsolutePath() + " or " + fData.getAbsolutePath()
                                    + " must exist, but not both");
                }
                if (fConf.exists()) {
                    // conf/ found, load it
                    exists = true;
                    log.info("QLTB source conf/: " + fConf.getAbsolutePath());
                    Config cfg = new Config(core.getResourceLoader(), qltbFile);
                    qltbCache.put(null, loadQLTBMap(cfg, core));
                }
            }
            if (!exists) {
                // Neither ZooKeeper nor conf/, so must be in data/
                // We need an IndexReader and the normal
                RefCounted<SolrIndexSearcher> searcher = null;
                try {
                    searcher = core.getNewestSearcher(false);
                    IndexReader reader = searcher.get().getIndexReader();
                    getQLTBMap(reader, core);
                } finally {
                    if (searcher != null) {
                        searcher.decref();
                    }
                }
            }
        } catch (Exception ex) {
            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error initializing QltbComponent.",
                    ex);
        }
    }
}

From source file:com.billiger.solr.handler.component.QLTBComponent.java

License:Apache License

/**
 * Load the QLTB map from a Config./*from ww w . j  a v  a  2s . c o  m*/
 *
 * Read and process the "boosts/query" XPath nodes from the given
 * Config, and build them into a QLTB map. The XML format is described
 * in the class documentation.
 *
 * The result of this function is a map of (analyzed) query strings
 * with their respective lists of boosted query terms. These are
 * ConstantScoreQuery instances for each term with the corresponding
 * boost factor. (Invalid - i.e. non-numerical - boost factors are
 * logged as warnings).
 *
 * The SOLR core that is passed into this function is necessary for
 * determinating the FieldType of the boosted fields. Only with the
 * correct field type is it possible to boost non-string fields, as
 * these non-string values need to be ft.readableToIndexed().
 *
 * @param cfg
 *            Config object to read the XML QLTB from
 * @param core
 *            SOLR Core the query is performed on
 * @return QLTB map
 *
 * @throws IOException
 *             If the query could not be analysed
 */
private Map<String, List<Query>> loadQLTBMap(final Config cfg, final SolrCore core) throws IOException {
    Map<String, List<Query>> map = new HashMap<String, List<Query>>();
    NodeList nodes = (NodeList) cfg.evaluate("boosts/query", XPathConstants.NODESET);
    for (int i = 0; i < nodes.getLength(); i++) {
        Node node = nodes.item(i);
        String qstr = DOMUtil.getAttr(node, "text", "missing query 'text'");
        qstr = getAnalyzedQuery(qstr);
        NodeList children = node.getChildNodes();
        List<Query> termBoosts = new ArrayList<Query>();
        for (int j = 0; j < children.getLength(); j++) {
            Node child = children.item(j);
            if (!child.getNodeName().equals("term")) {
                continue;
            }
            String field = DOMUtil.getAttr(child, "field", "missing 'field'");
            String value = DOMUtil.getAttr(child, "value", "missing 'value'");
            String boost = DOMUtil.getAttr(child, "boost", "missing 'boost'");
            float termBoost = 1;
            try {
                termBoost = Float.parseFloat(boost);
            } catch (NumberFormatException e) {
                log.warn("invalid boost " + boost + " for query \"" + qstr + "\", term: \"" + field + ":"
                        + value + "\": " + e.getMessage());
                continue;
            }
            // without readableToIndexed QLTB boosting would only work
            // for string field types
            FieldType ft = core.getLatestSchema().getField(field).getType();
            value = ft.readableToIndexed(value);
            Term t = new Term(field, value);
            TermQuery tq = new TermQuery(t);
            ConstantScoreQuery csq = new ConstantScoreQuery(tq);
            csq.setBoost(termBoost);
            termBoosts.add(csq);
        }
        map.put(qstr, termBoosts);
    }
    return map;
}

From source file:com.ifactory.press.db.solr.processor.FieldMergingProcessorFactory.java

License:Apache License

@Override
public void inform(SolrCore core) {
    schema = core.getLatestSchema();
    doInit();
}

From source file:com.searchbox.engine.solr.EmbeddedSolr.java

License:Apache License

private boolean addCopyFields(Collection collection, Field field, Set<String> copyFields) {
    SolrCore core = coreContainer.getCore(collection.getName());
    IndexSchema schema = core.getLatestSchema();

    for (CopyField copyField : schema.getCopyFieldsList(field.getKey())) {
        copyFields.remove(copyField.getDestination().getName());
    }/*from   ww w  .  j a v  a 2 s  .  co m*/

    Map<String, java.util.Collection<String>> copyFieldsMap = new HashMap<String, java.util.Collection<String>>();
    copyFieldsMap.put(field.getKey(), copyFields);
    schema = schema.addCopyFields(copyFieldsMap);

    core.setLatestSchema(schema);

    return true;
}

From source file:com.sindicetech.siren.solr.facet.SirenFacetProcessor.java

License:Open Source License

@Override
public void processAdd(AddUpdateCommand cmd) throws IOException {
    SolrCore core = cmd.getReq().getCore();
    IndexSchema schema = core.getLatestSchema();

    if (!schema.isMutable()) {
        throw new SolrException(BAD_REQUEST,
                String.format("This IndexSchema, of core %s, is not mutable.", core.getName()));
    }//from ww w .j av  a2  s.c om

    SolrInputDocument doc = cmd.getSolrInputDocument();

    extractor.setSchema(schema);
    List<SirenFacetEntry> entries = extractor.extractFacets(doc);

    // update schema
    // use Sets so that we add a fieldname only once even if it is generated multiple times (for
    // multiple paths)
    Set<SchemaField> newFields = new HashSet<SchemaField>();
    for (SirenFacetEntry entry : entries) {
        // skip entry if the field is already in the schema
        if (schema.getFieldOrNull(entry.toFieldName()) != null) {
            continue;
        }

        TypeMapping typeMapping = getTypeMappingValueClass(entry.datatype.xsdDatatype);

        // skip facet values that are too large altogether - they don't make sense for faceting 
        if (entry.value instanceof String && ((String) entry.value)
                .length() > (typeMapping.maxFieldSize != null ? typeMapping.maxFieldSize
                        : DEFAULT_MAX_FACET_VALUE_LENGTH)) {
            continue;
        }

        String fieldTypeName = getTypeMappingValueClass(entry.datatype.xsdDatatype).fieldType;

        Map<String, Boolean> options = new HashMap<String, Boolean>();
        // see FieldProperties.propertyNames[]
        options.put("indexed", false);
        options.put("stored", false);
        options.put("docValues", true);
        options.put("multiValued", true);

        newFields.add(schema.newField(entry.toFieldName(), fieldTypeName, options));
    }

    if (!newFields.isEmpty()) {
        IndexSchema newSchema = schema.addFields(newFields);
        cmd.getReq().getCore().setLatestSchema(newSchema);
        cmd.getReq().updateSchemaToLatest();
        logger.debug("Successfully added field(s) to the schema.");
    }

    // update document
    for (SirenFacetEntry entry : entries) {
        TypeMapping typeMapping = getTypeMappingValueClass(entry.datatype.xsdDatatype);

        // skip facet values that are too large altogether - they don't make sense for faceting 
        if (entry.value instanceof String && ((String) entry.value)
                .length() > (typeMapping.maxFieldSize != null ? typeMapping.maxFieldSize
                        : DEFAULT_MAX_FACET_VALUE_LENGTH)) {
            continue;
        }

        doc.addField(entry.toFieldName(), entry.value);
    }

    // call the next one in chain
    super.processAdd(cmd);
}

From source file:lux.solr.SolrIndexConfig.java

License:Mozilla Public License

public void inform(SolrCore core) {
    schema = core.getLatestSchema();
    // XML_STORE is not listed explicitly by the indexer
    informField(indexConfig.getField(FieldRole.XML_STORE), core);
    // This must be run before informField() registers default analyzers with the Schema
    registerXmlTextFields();/*from   w w w. j a  v  a2 s .  c o m*/
    for (FieldDefinition xmlField : indexConfig.getFields()) {
        informField(xmlField, core);
    }
    if (xpathFieldConfig != null) {
        addXPathFields();
    }
    SchemaField uniqueKeyField = schema.getUniqueKeyField();
    if (uniqueKeyField == null) {
        logger.error("{} schema does not define any unique field", core.getName());
    } else if (!uniqueKeyField.getName().equals(indexConfig.getFieldName(FieldRole.URI))) {
        logger.error(
                "{} schema defines a different unique field than the uri field declared in lux configuration",
                core.getName());
    }
    // must call this after making changes to the field map:
    schema.refreshAnalyzers();
}

From source file:net.yacy.search.index.SingleDocumentMatcher.java

License:Open Source License

/**
 * Check a given Solr document against a Solr query, without requesting a Solr
 * index, but using instead in-memory Lucene utility. This lets checking if a
 * single document matches some criterias, before adding it to a Solr index.
 * // w  w w . j a v a 2s  .com
 * @param solrDoc
 *            the Solr document to check
 * @param query
 *            a standard Solr query string
 * @param core
 *            the Solr index core holding the Solr schema of the document
 * @return true when the document matches the given Solr query
 * @throws SyntaxError
 *             when the query String syntax is not valid
 * @throws SolrException when a query required element is missing, or when a problem occurred when accessing the target core
 * @throws IllegalArgumentException
 *             when a parameter is null.
 * @see <a href=
 *      "http://lucene.apache.org/solr/guide/6_6/the-standard-query-parser.html">The
 *      Solr Standard Query Parser</a>
 */
public static boolean matches(final SolrInputDocument solrDoc, final String query, final SolrCore core)
        throws SyntaxError, IllegalArgumentException {
    if (solrDoc == null || query == null || core == null) {
        throw new IllegalArgumentException("All parameters must be non null");
    }
    final IndexSchema schema = core.getLatestSchema();
    if (schema == null) {
        throw new IllegalArgumentException("All parameters must be non null");
    }

    final org.apache.lucene.document.Document luceneDoc = DocumentBuilder.toDocument(solrDoc, schema);

    final Analyzer indexAnalyzer = schema.getIndexAnalyzer();

    /*
     * Using the Lucene RAMDirectory could be an alternative, but it is slower with
     * a larger memory footprint
     */
    final MemoryIndex index = MemoryIndex.fromDocument(luceneDoc, indexAnalyzer);

    final Query luceneQuery = toLuceneQuery(query, core);

    final float score = index.search(luceneQuery);

    return score > 0.0f;
}

From source file:org.alfresco.solr.component.EnsureModelsComponent.java

License:Open Source License

private ModelTracker registerModelTracker(SolrCore core, AlfrescoCoreAdminHandler adminHandler) {
    SolrResourceLoader loader = core.getLatestSchema().getResourceLoader();
    SolrKeyResourceLoader keyResourceLoader = new SolrKeyResourceLoader(loader);
    SOLRAPIClientFactory clientFactory = new SOLRAPIClientFactory();
    Properties props = new CoreDescriptorDecorator(core.getCoreDescriptor()).getCoreProperties();
    SOLRAPIClient repositoryClient = clientFactory.getSOLRAPIClient(props, keyResourceLoader,
            AlfrescoSolrDataModel.getInstance().getDictionaryService(CMISStrictDictionaryService.DEFAULT),
            AlfrescoSolrDataModel.getInstance().getNamespaceDAO());
    String solrHome = core.getCoreDescriptor().getCoreContainer().getSolrHome();
    SolrContentStore solrContentStore = new SolrContentStore(CoreWatcherJob.locateContentHome(solrHome));
    SolrInformationServer srv = new SolrInformationServer(adminHandler, core, repositoryClient,
            solrContentStore);/*from w  w w .j  ava2 s.co m*/

    ModelTracker mTracker = new ModelTracker(solrHome, props, repositoryClient, core.getName(), srv);
    TrackerRegistry trackerRegistry = adminHandler.getTrackerRegistry();
    trackerRegistry.setModelTracker(mTracker);
    SolrTrackerScheduler scheduler = adminHandler.getScheduler();
    scheduler.schedule(mTracker, core.getName(), props);
    return mTracker;
}

From source file:org.alfresco.solr.lifecycle.SolrCoreLoadRegistration.java

License:Open Source License

/**
 * Registers with the admin handler the information server and the trackers.
 *//*from   w w w.j  a  v a  2  s . c om*/
public static void registerForCore(AlfrescoCoreAdminHandler adminHandler, CoreContainer coreContainer,
        SolrCore core, String coreName) {

    TrackerRegistry trackerRegistry = adminHandler.getTrackerRegistry();
    Properties props = new CoreDescriptorDecorator(core.getCoreDescriptor()).getProperties();
    //Prepare cores
    SolrResourceLoader loader = core.getLatestSchema().getResourceLoader();
    SolrKeyResourceLoader keyResourceLoader = new SolrKeyResourceLoader(loader);
    SOLRAPIClientFactory clientFactory = new SOLRAPIClientFactory();
    SOLRAPIClient repositoryClient = clientFactory.getSOLRAPIClient(props, keyResourceLoader,
            AlfrescoSolrDataModel.getInstance().getDictionaryService(CMISStrictDictionaryService.DEFAULT),
            AlfrescoSolrDataModel.getInstance().getNamespaceDAO());
    //Start content store
    SolrContentStore contentStore = new SolrContentStore(coreContainer.getSolrHome());
    SolrInformationServer srv = new SolrInformationServer(adminHandler, core, repositoryClient, contentStore);
    props.putAll(srv.getProps());
    adminHandler.getInformationServers().put(coreName, srv);

    SolrTrackerScheduler scheduler = adminHandler.getScheduler();

    // Prevents other threads from registering the ModelTracker at the same time
    // Create model tracker and load all the persisted models
    createModelTracker(coreName, trackerRegistry, props, coreContainer.getSolrHome(), repositoryClient, srv,
            scheduler);

    log.info("Starting to track " + coreName);
    if (Boolean.parseBoolean(props.getProperty("enable.alfresco.tracking", "false"))) {

        if (trackerRegistry.hasTrackersForCore(coreName)) {
            log.info("Trackers for " + coreName + " is already registered, shutting them down.");
            shutdownTrackers(coreName, trackerRegistry.getTrackersForCore(coreName), scheduler);
            trackerRegistry.removeTrackersForCore(coreName);
            adminHandler.getInformationServers().remove(coreName);
        }

        List<Tracker> trackers = createCoreTrackers(coreName, trackerRegistry, props, scheduler,
                repositoryClient, srv);

        CommitTracker commitTracker = new CommitTracker(props, repositoryClient, coreName, srv, trackers);
        trackerRegistry.register(coreName, commitTracker);
        scheduler.schedule(commitTracker, coreName, props);
        log.info("The Trackers are now scheduled to run");
        trackers.add(commitTracker); //Add the commitTracker to the list of scheduled trackers that can be shutdown

        core.addCloseHook(new CloseHook() {
            @Override
            public void preClose(SolrCore core) {
                log.info("Shutting down " + core.getName());
                SolrCoreLoadRegistration.shutdownTrackers(core.getName(), trackers, scheduler);
            }

            @Override
            public void postClose(SolrCore core) {
                // Nothing to be done here
            }
        });
    }
}