Example usage for org.apache.lucene.util Version toString

List of usage examples for org.apache.lucene.util Version toString

Introduction

In this page you can find the example usage for org.apache.lucene.util Version toString.

Prototype

@Override
    public String toString() 

Source Link

Usage

From source file:com.grantingersoll.opengrok.analysis.BaseTokenStreamFactoryTestCase.java

License:Apache License

private AbstractAnalysisFactory analysisFactory(Class<? extends AbstractAnalysisFactory> clazz,
        Version matchVersion, ResourceLoader loader, String... keysAndValues) throws Exception {
    if (keysAndValues.length % 2 == 1) {
        throw new IllegalArgumentException("invalid keysAndValues map");
    }// www  .j a  v a  2  s.co m
    Map<String, String> args = new HashMap<>();
    for (int i = 0; i < keysAndValues.length; i += 2) {
        String previous = args.put(keysAndValues[i], keysAndValues[i + 1]);
        assertNull("duplicate values for key: " + keysAndValues[i], previous);
    }
    if (matchVersion != null) {
        String previous = args.put("luceneMatchVersion", matchVersion.toString());
        assertNull("duplicate values for key: luceneMatchVersion", previous);
    }
    AbstractAnalysisFactory factory = null;
    try {
        factory = clazz.getConstructor(Map.class).newInstance(args);
    } catch (InvocationTargetException e) {
        // to simplify tests that check for illegal parameters
        if (e.getCause() instanceof IllegalArgumentException) {
            throw (IllegalArgumentException) e.getCause();
        } else {
            throw e;
        }
    }
    if (factory instanceof ResourceLoaderAware) {
        ((ResourceLoaderAware) factory).inform(loader);
    }
    return factory;
}

From source file:gov.nih.nlm.ncbi.seqr.tokenizer.BaseTokenStreamFactoryTestCase.java

License:Apache License

private AbstractAnalysisFactory analysisFactory(Class<? extends AbstractAnalysisFactory> clazz,
        Version matchVersion, ResourceLoader loader, String... keysAndValues) throws Exception {
    if (keysAndValues.length % 2 == 1) {
        throw new IllegalArgumentException("invalid keysAndValues map");
    }//from  w  w w  .ja va2  s  .  c  o  m
    Map<String, String> args = new HashMap<String, String>();
    for (int i = 0; i < keysAndValues.length; i += 2) {
        String previous = args.put(keysAndValues[i], keysAndValues[i + 1]);
        assertNull("duplicate values for key: " + keysAndValues[i], previous);
    }
    if (matchVersion != null) {
        String previous = args.put("luceneMatchVersion", matchVersion.toString());
        assertNull("duplicate values for key: luceneMatchVersion", previous);
    }
    AbstractAnalysisFactory factory = null;
    try {
        factory = clazz.getConstructor(Map.class).newInstance(args);
    } catch (InvocationTargetException e) {
        // to simplify tests that check for illegal parameters
        if (e.getCause() instanceof IllegalArgumentException) {
            throw (IllegalArgumentException) e.getCause();
        } else {
            throw e;
        }
    }
    if (factory instanceof ResourceLoaderAware) {
        ((ResourceLoaderAware) factory).inform(loader);
    }
    return factory;
}

From source file:org.apache.derby.optional.lucene.LuceneQueryVTI.java

License:Apache License

/**
 * <p>/*from   w  ww  .j a v  a  2s .  c o m*/
 * Make sure that the index wasn't created with a Lucene version from
 * the future.
 * </p>
 */
private void vetLuceneVersion(String indexVersionString) throws SQLException {
    Version currentVersion = LuceneUtils.currentVersion();
    Version indexVersion = null;

    try {
        indexVersion = Version.parseLeniently(indexVersionString);
    } catch (Exception e) {
    }

    if ((indexVersion == null) || !currentVersion.onOrAfter(indexVersion)) {
        throw LuceneSupport.newSQLException(SQLState.LUCENE_BAD_VERSION, currentVersion.toString(),
                indexVersionString);
    }
}

From source file:org.apache.derby.optional.lucene.LuceneSupport.java

License:Apache License

/**
 * Create or re-create a Lucene index on the specified column.
 *  /* w  w w  .  ja va 2  s .c o  m*/
 * @param schema The schema of the column to index
 * @param table The table of the column to index
 * @param textcol The column to create the Lucene index on
 * @param indexDescriptorMaker name of static method which instantiates the index configuration. may be null.
 * @param create True if the index is to be created, false if it is to be recreated
 * @throws SQLException
 * @throws IOException
 */
private static void createOrRecreateIndex(Connection conn, String schema, String table, String textcol,
        String indexDescriptorMaker, boolean create, String... keyColumns)
        throws SQLException, IOException, PrivilegedActionException {
    VTITemplate.ColumnDescriptor[] primaryKeys = new VTITemplate.ColumnDescriptor[0];

    // can't override keys when the index is updated
    if (!create) {
        primaryKeys = getKeys(conn, schema, table, textcol);
    }
    // use the supplied keys if possible
    else if ((keyColumns != null) && (keyColumns.length > 0)) {
        primaryKeys = getKeys(conn, schema, table, keyColumns);
    } else {
        primaryKeys = getPrimaryKeys(conn, schema, table);
    }

    // can't create an index without specifying keys for joining it back to Derby data
    if (primaryKeys.length == 0) {
        throw newSQLException(SQLState.LUCENE_NO_PRIMARY_KEY);
    }

    // don't let the user create a table function with duplicate column names
    vetColumnName(textcol);
    for (VTITemplate.ColumnDescriptor key : primaryKeys) {
        vetColumnName(key.columnName);
    }

    int keyCount = 0;
    StorageFile propertiesFile = getIndexPropertiesFile(conn, schema, table, textcol);

    //
    // Drop the old index directory if we're recreating the index.
    // We do this after verifying that the key exists.
    //
    if (!create) {
        dropIndexDirectories(schema, table, textcol);
    }

    Version luceneVersion = LuceneUtils.currentVersion();

    // create the new directory
    DerbyLuceneDir derbyLuceneDir = getDerbyLuceneDir(conn, schema, table, textcol);

    // get the Analyzer and the field names. use the default if the user didn't specify an override
    if (indexDescriptorMaker == null) {
        indexDescriptorMaker = LuceneUtils.class.getName() + ".defaultIndexDescriptor";
    }
    LuceneIndexDescriptor indexDescriptor = getIndexDescriptor(indexDescriptorMaker);
    String[] fieldNames = indexDescriptor.getFieldNames();
    Analyzer analyzer = indexDescriptor.getAnalyzer();

    // make sure the field names don't overlap with the key names
    sortAndVetFieldNames(fieldNames, primaryKeys);

    Properties indexProperties = new Properties();
    indexProperties.setProperty(LUCENE_VERSION, luceneVersion.toString());
    indexProperties.setProperty(UPDATE_TIMESTAMP, Long.toString(System.currentTimeMillis()));
    indexProperties.setProperty(INDEX_DESCRIPTOR_MAKER, indexDescriptorMaker);
    indexProperties.setProperty(ANALYZER, analyzer.getClass().getName());

    StringBuilder tableFunction = new StringBuilder();
    tableFunction.append("create function " + makeTableFunctionName(schema, table, textcol) + "\n");
    tableFunction.append("( query varchar( 32672 ), windowSize int, scoreCeiling real )\n");
    tableFunction.append("returns table\n(");

    writeIndexProperties(propertiesFile, indexProperties);

    PreparedStatement ps = null;
    ResultSet rs = null;
    IndexWriter iw = null;
    try {
        iw = getIndexWriter(luceneVersion, analyzer, derbyLuceneDir);

        // select all keys and the textcol from this column, add to lucene index
        StringBuilder query = new StringBuilder("select ");

        for (VTITemplate.ColumnDescriptor keyDesc : primaryKeys) {
            String keyName = derbyIdentifier(keyDesc.columnName);
            if (keyCount > 0) {
                query.append(", ");
            }
            query.append(keyName);

            String keyType = mapType(keyDesc);

            if (keyCount > 0) {
                tableFunction.append(",");
            }
            tableFunction.append("\n\t" + keyName + " " + keyType);
            keyCount++;
        }
        tableFunction.append(",\n\t" + DOCUMENT_ID + " int");
        tableFunction.append(",\n\t" + SCORE + " real");
        tableFunction.append("\n)\nlanguage java parameter style derby_jdbc_result_set contains sql\n");
        tableFunction.append("external name '" + LuceneSupport.class.getName() + ".luceneQuery'");

        // now create the table function for this text column
        if (create) {
            conn.prepareStatement(tableFunction.toString()).execute();
        }

        query.append(", ");
        query.append(derbyIdentifier(textcol));
        query.append(" from " + makeTableName(schema, table));

        ps = conn.prepareStatement(query.toString());
        rs = ps.executeQuery();

        while (rs.next()) {
            Document doc = new Document();

            for (int i = 0; i < keyCount; i++) {
                VTITemplate.ColumnDescriptor keyDescriptor = primaryKeys[i];
                addValue(doc, keyDescriptor, rs, i + 1);
            }

            String textcolValue = rs.getString(keyCount + 1);
            if (textcolValue != null) {
                for (String fieldName : fieldNames) {
                    doc.add(new TextField(fieldName, textcolValue, Store.NO));
                }
            }
            addDocument(iw, doc);
        }
    } finally {
        try {
            if (iw != null) {
                close(iw);
            }
        } finally {
            try {
                if (rs != null) {
                    rs.close();
                }
            } finally {
                if (ps != null) {
                    ps.close();
                }
            }
        }
    }
}

From source file:org.apache.solr.schema.SubIndexSchema.java

License:Apache License

private Analyzer readAnalyzer(final Node node) throws XPathExpressionException {
    // parent node used to be passed in as "fieldtype"
    // if (!fieldtype.hasChildNodes()) return null;
    // Node node = DOMUtil.getChild(fieldtype,"analyzer");

    if (node == null)
        return null;
    final NamedNodeMap attrs = node.getAttributes();
    final String analyzerName = DOMUtil.getAttr(attrs, "class");
    if (analyzerName != null) {
        // No need to be core-aware as Analyzers are not in the core-aware list
        final Class<? extends Analyzer> clazz = loader.findClass(analyzerName).asSubclass(Analyzer.class);
        try {/*from   ww w.j a  v a2  s .  c o  m*/
            try {
                // first try to use a ctor with version parameter (needed for many new Analyzers that have no default one anymore)
                final Constructor<? extends Analyzer> cnstr = clazz.getConstructor(Version.class);
                final String matchVersionStr = DOMUtil.getAttr(attrs, LUCENE_MATCH_VERSION_PARAM);
                final Version luceneMatchVersion = (matchVersionStr == null) ? this.luceneMatchVersion
                        : Config.parseLuceneVersionString(matchVersionStr);
                if (luceneMatchVersion == null) {
                    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                            "Configuration Error: Analyzer '" + clazz.getName()
                                    + "' needs a 'luceneMatchVersion' parameter");
                }
                return cnstr.newInstance(luceneMatchVersion);
            } catch (final NoSuchMethodException nsme) {
                // otherwise use default ctor
                return clazz.newInstance();
            }
        } catch (final Exception e) {
            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                    "Cannot load analyzer: " + analyzerName);
        }
    }

    final XPath xpath = XPathFactory.newInstance().newXPath();

    // Load the CharFilters
    // --------------------------------------------------------------------------------
    final ArrayList<CharFilterFactory> charFilters = new ArrayList<CharFilterFactory>();
    final AbstractPluginLoader<CharFilterFactory> charFilterLoader = new AbstractPluginLoader<CharFilterFactory>(
            "[schema.xml] analyzer/charFilter", false, false) {
        @Override
        protected void init(final CharFilterFactory plugin, final Node node) throws Exception {
            if (plugin != null) {
                final Map<String, String> params = DOMUtil.toMapExcept(node.getAttributes(), "class");
                // copy the luceneMatchVersion from config, if not set
                if (!params.containsKey(LUCENE_MATCH_VERSION_PARAM))
                    params.put(LUCENE_MATCH_VERSION_PARAM, luceneMatchVersion.toString());
                plugin.init(params);
                charFilters.add(plugin);
            }
        }

        @Override
        protected CharFilterFactory register(final String name, final CharFilterFactory plugin)
                throws Exception {
            return null; // used for map registration
        }
    };
    charFilterLoader.load(loader, (NodeList) xpath.evaluate("./charFilter", node, XPathConstants.NODESET));

    // Load the Tokenizer
    // Although an analyzer only allows a single Tokenizer, we load a list to make sure
    // the configuration is ok
    // --------------------------------------------------------------------------------
    final ArrayList<TokenizerFactory> tokenizers = new ArrayList<TokenizerFactory>(1);
    final AbstractPluginLoader<TokenizerFactory> tokenizerLoader = new AbstractPluginLoader<TokenizerFactory>(
            "[schema.xml] analyzer/tokenizer", false, false) {
        @Override
        protected void init(final TokenizerFactory plugin, final Node node) throws Exception {
            if (!tokenizers.isEmpty()) {
                throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                        "The schema defines multiple tokenizers for: " + node);
            }
            final Map<String, String> params = DOMUtil.toMapExcept(node.getAttributes(), "class");
            // copy the luceneMatchVersion from config, if not set
            if (!params.containsKey(LUCENE_MATCH_VERSION_PARAM))
                params.put(LUCENE_MATCH_VERSION_PARAM, luceneMatchVersion.toString());
            plugin.init(params);
            tokenizers.add(plugin);
        }

        @Override
        protected TokenizerFactory register(final String name, final TokenizerFactory plugin) throws Exception {
            return null; // used for map registration
        }
    };
    tokenizerLoader.load(loader, (NodeList) xpath.evaluate("./tokenizer", node, XPathConstants.NODESET));

    // Make sure something was loaded
    if (tokenizers.isEmpty()) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                "analyzer without class or tokenizer & filter list");
    }

    // Load the Filters
    // --------------------------------------------------------------------------------
    final ArrayList<TokenFilterFactory> filters = new ArrayList<TokenFilterFactory>();
    final AbstractPluginLoader<TokenFilterFactory> filterLoader = new AbstractPluginLoader<TokenFilterFactory>(
            "[schema.xml] analyzer/filter", false, false) {
        @Override
        protected void init(final TokenFilterFactory plugin, final Node node) throws Exception {
            if (plugin != null) {
                final Map<String, String> params = DOMUtil.toMapExcept(node.getAttributes(), "class");
                // copy the luceneMatchVersion from config, if not set
                if (!params.containsKey(LUCENE_MATCH_VERSION_PARAM))
                    params.put(LUCENE_MATCH_VERSION_PARAM, luceneMatchVersion.toString());
                plugin.init(params);
                filters.add(plugin);
            }
        }

        @Override
        protected TokenFilterFactory register(final String name, final TokenFilterFactory plugin)
                throws Exception {
            return null; // used for map registration
        }
    };
    filterLoader.load(loader, (NodeList) xpath.evaluate("./filter", node, XPathConstants.NODESET));

    return new TokenizerChain(charFilters.toArray(new CharFilterFactory[charFilters.size()]), tokenizers.get(0),
            filters.toArray(new TokenFilterFactory[filters.size()]));
}

From source file:org.dspace.app.util.IndexVersion.java

License:BSD License

/**
 * Determine the version of Solr/Lucene which was used to create a given index directory.
 * // w  w w .  java 2  s  . c om
 * @param indexDirPath
 *          Full path of the Solr/Lucene index directory
 * @return version as a string (e.g. "4.4"), empty string ("") if index directory is empty,
 *         or null if directory doesn't exist.
 * @throws IOException 
 */
public static String getIndexVersion(String indexDirPath) throws IOException {
    String indexVersion = null;

    // Make sure this directory exists
    File dir = new File(indexDirPath);
    if (dir.exists() && dir.isDirectory()) {
        // Check if this index directory has any contents
        String[] dirContents = dir.list();
        // If this directory is empty, return an empty string.
        // It is a valid directory, but it's an empty index.
        if (dirContents != null && dirContents.length == 0) {
            return "";
        }

        // Open this index directory in Lucene
        Directory indexDir = FSDirectory.open(dir);

        // Get info on the Lucene segment file(s) in index directory
        SegmentInfos sis = new SegmentInfos();
        try {
            sis.read(indexDir);
        } catch (IOException ie) {
            // Wrap default IOException, providing more info about which directory cannot be read
            throw new IOException("Could not read Lucene segments files in " + dir.getAbsolutePath(), ie);
        }

        // If we have a valid Solr index dir, but it has no existing segments
        // then just return an empty string. It's a valid but empty index.
        if (sis != null && sis.size() == 0) {
            return "";
        }

        // Loop through our Lucene segment files to locate the OLDEST
        // version. It is possible for individual segment files to be
        // created by different versions of Lucene. So, we just need
        // to find the oldest version of Lucene which created these
        // index segment files. 
        // This logic borrowed from Lucene v.4.10 CheckIndex class:
        // https://github.com/apache/lucene-solr/blob/lucene_solr_4_10/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java#L426
        // WARNING: It MAY require updating whenever we upgrade the 
        // "lucene.version" in our DSpace Parent POM
        Version oldest = null;
        Version oldSegment = null;
        for (SegmentCommitInfo si : sis) {
            // Get the version of Lucene which created this segment file
            Version version = si.info.getVersion();
            if (version == null) {
                // If null, then this is a pre-3.1 segment file.
                // For our purposes, we will just assume it is "3.0", 
                // This lets us know we will need to upgrade it to 3.5
                // before upgrading to Solr/Lucene 4.x or above
                try {
                    oldSegment = Version.parse("3.0");
                } catch (ParseException pe) {
                    throw new IOException(pe);
                }
            }
            // else if this segment is older than our oldest thus far
            else if (oldest == null || version.onOrAfter(oldest) == false) {
                // We have a new oldest segment version
                oldest = version;
            }
        }

        // If we found a really old segment, compare it to the oldest
        // to see which is actually older
        if (oldSegment != null && oldSegment.onOrAfter(oldest) == false) {
            oldest = oldSegment;
        }

        // At this point, we should know what version of Lucene created our
        // oldest segment file. We will return this as the Index version
        // as it's the oldest segment we will need to upgrade.
        if (oldest != null) {
            indexVersion = oldest.toString();
        }
    }

    return indexVersion;
}

From source file:org.hibernate.search.analyzer.impl.LuceneAnalyzerBuilder.java

License:LGPL

private static Map<String, String> getMapOfParameters(Parameter[] params, Version luceneMatchVersion) {
    Map<String, String> mapOfParams = ParameterAnnotationsReader.toNewMutableMap(params);
    mapOfParams.put(LUCENE_VERSION_PARAM, luceneMatchVersion.toString());
    return mapOfParams;
}

From source file:org.hibernate.search.engine.impl.SolrAnalyzerBuilder.java

License:LGPL

private static Map<String, String> getMapOfParameters(Parameter[] params, Version luceneMatchVersion) {
    Map<String, String> mapOfParams = new HashMap<String, String>(params.length);
    for (Parameter param : params) {
        mapOfParams.put(param.name(), param.value());
    }//from  w  w  w.j a  v  a  2  s  .c om
    mapOfParams.put(SOLR_LUCENE_VERSION_PARAM, luceneMatchVersion.toString());
    return mapOfParams;
}

From source file:org.hibernate.search.impl.ConfigContext.java

License:Open Source License

private Version getLuceneMatchVersion(SearchConfiguration cfg) {
    Version version;/*from   ww w  .  j av a 2 s  . c om*/
    String tmp = cfg.getProperty(Environment.LUCENE_MATCH_VERSION);
    if (StringHelper.isEmpty(tmp)) {
        log.recommendConfiguringLuceneVersion();
        version = DEFAULT_LUCENE_MATCH_VERSION;
    } else {
        try {
            version = Version.valueOf(tmp);
            if (log.isDebugEnabled()) {
                log.debug("Setting Lucene compatibility to Version " + version.name());
            }
        } catch (IllegalArgumentException e) {
            StringBuilder msg = new StringBuilder(tmp);
            msg.append(" is a invalid value for the Lucene match version. Possible values are: ");
            for (Version v : Version.values()) {
                msg.append(v.toString());
                msg.append(", ");
            }
            msg.delete(msg.lastIndexOf(","), msg.length() - 1);
            throw new SearchException(msg.toString());
        }
    }
    return version;
}

From source file:org.hibernate.search.impl.SolrAnalyzerBuilder.java

License:Open Source License

private static Map<String, String> getMapOfParameters(Parameter[] params, Version luceneMatchVersion) {
    Map<String, String> mapOfParams = new HashMap<String, String>(params.length);
    for (Parameter param : params) {
        mapOfParams.put(param.name(), param.value());
    }//from  w  ww.j  ava2s  .c o m
    mapOfParams.put(SOLR_LUCENE_VERSION_PARAM, luceneMatchVersion.toString());
    return Collections.unmodifiableMap(mapOfParams);
}