Example usage for org.apache.lucene.analysis.standard StandardTokenizer setMaxTokenLength

List of usage examples for org.apache.lucene.analysis.standard StandardTokenizer setMaxTokenLength

Introduction

In this page you can find the example usage for org.apache.lucene.analysis.standard StandardTokenizer setMaxTokenLength.

Prototype

public void setMaxTokenLength(int length) 

Source Link

Document

Set the max allowed token length.

Usage

From source file:MyStandardAnalyzer.java

License:Apache License

@Override
protected TokenStreamComponents createComponents(final String fieldName) {
    final StandardTokenizer src = new StandardTokenizer();
    src.setMaxTokenLength(maxTokenLength);
    TokenStream tok = new StandardFilter(src);
    // tok = new LowerCaseFilter(tok);
    tok = new StopFilter(tok, stopwords);
    return new TokenStreamComponents(src, tok) {
        @Override/*from  w  ww.  j  a v a 2 s  . c  o  m*/
        protected void setReader(final Reader reader) throws IOException {
            src.setMaxTokenLength(MyStandardAnalyzer.this.maxTokenLength);
            super.setReader(reader);
        }
    };
}

From source file:at.ac.univie.mminf.luceneSKOS.analysis.SNOMEDAnalyzer.java

License:Apache License

@Override
protected TokenStreamComponents createComponents(String fileName, Reader reader) {

    final StandardTokenizer src = new StandardTokenizer(matchVersion, reader);
    src.setMaxTokenLength(maxTokenLength);
    TokenStream tok = new StandardFilter(matchVersion, src);
    // prior to this we get the classic behavior, standardfilter does it for
    // us./*from w  w w  . j a v a  2s.  c  o  m*/
    tok = new SNOMEDFilter(tok, skosEngine, new StandardAnalyzer(matchVersion), bufferSize, types);
    tok = new LowerCaseFilter(matchVersion, tok);
    tok = new StopFilter(matchVersion, tok, stopwords);
    tok = new RemoveDuplicatesTokenFilter(tok);
    return new TokenStreamComponents(src, tok) {
        @Override
        protected void setReader(final Reader reader) throws IOException {
            src.setMaxTokenLength(maxTokenLength);
            super.setReader(reader);
        }
    };
}

From source file:ca.ubc.cs.reverb.indexer.ReverbLuceneAnalyzer.java

License:Apache License

@Override
protected TokenStreamComponents createComponents(final String fieldName, final Reader reader) {
    final StandardTokenizer src = new StandardTokenizer(matchVersion, reader);
    src.setMaxTokenLength(maxTokenLength);
    src.setReplaceInvalidAcronym(replaceInvalidAcronym);
    TokenStream tok = new StandardFilter(matchVersion, src);
    tok = new LowerCaseFilter(matchVersion, tok);
    tok = new StopFilter(matchVersion, tok, stopwords);
    tok = new MethodCallFilter(matchVersion, tok);
    return new TokenStreamComponents(src, tok) {
        @Override/*  ww w . ja  v  a2 s .c  om*/
        protected boolean reset(final Reader reader) throws IOException {
            src.setMaxTokenLength(ReverbLuceneAnalyzer.this.maxTokenLength);
            return super.reset(reader);
        }
    };
}

From source file:ca.ubc.cs.reverb.indexer.WebPageAnalyzer.java

License:Apache License

@Override
protected TokenStreamComponents createComponents(final String fieldName, final Reader reader) {
    final StandardTokenizer src = new StandardTokenizer(matchVersion, reader);
    src.setMaxTokenLength(maxTokenLength);
    src.setReplaceInvalidAcronym(replaceInvalidAcronym);
    TokenStream tok = new StandardFilter(matchVersion, src);
    tok = new LowerCaseFilter(matchVersion, tok);
    tok = new StopFilter(matchVersion, tok, stopwords);
    tok = new MethodCallFilter(matchVersion, tok);
    return new TokenStreamComponents(src, tok) {
        @Override// ww  w  .  j  ava2s . com
        protected boolean reset(final Reader reader) throws IOException {
            src.setMaxTokenLength(WebPageAnalyzer.this.maxTokenLength);
            return super.reset(reader);
        }
    };
}

From source file:cc.explain.lucene.StandardAnalyzerWithoutLowerCase.java

License:Apache License

/**
 * Constructs a {@link org.apache.lucene.analysis.standard.StandardTokenizer} filtered by a {@link
 * org.apache.lucene.analysis.standard.StandardFilter}, a {@link org.apache.lucene.analysis.LowerCaseFilter} and a {@link org.apache.lucene.analysis.StopFilter}.
 *///from  ww  w.j  av  a2 s  .c o m
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
    StandardTokenizer tokenStream = new StandardTokenizer(matchVersion, reader);
    tokenStream.setMaxTokenLength(maxTokenLength);
    TokenStream result = new StandardFilter(tokenStream);
    result = new StopFilter(enableStopPositionIncrements, result, stopSet);
    return result;
}

From source file:com.dnikulin.vijil.lexer.PorterAnalyzer.java

License:Open Source License

@Override
protected TokenStreamComponents createComponents(final String fieldName, final Reader reader) {
    // Build standard tokenizer with configured version.
    StandardTokenizer src = new StandardTokenizer(VERSION, reader);
    src.setMaxTokenLength(MAX_TOKEN_LENGTH);

    // Build token stream pipeline.
    TokenStream tok = new StandardFilter(VERSION, src);
    tok = new LowerCaseFilter(VERSION, tok);
    tok = new PorterStemFilter(tok);
    return new TokenStreamComponents(src, tok);
}

From source file:com.google.ie.common.search.analyzer.IdeaExchangeQueryAnalyzer.java

License:Apache License

@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
    StandardTokenizer tokenStream = new StandardTokenizer(reader, false);
    tokenStream.setMaxTokenLength(maxTokenLength);
    TokenStream result = new StandardFilter(tokenStream);
    result = new LowerCaseFilter(result);
    result = new StopFilter(result, stopSet);
    fieldName = DEFAULT_LANGUAGE;/* ww  w  .j  a v  a  2 s  .co  m*/
    result = new SnowballFilter(result, fieldName);
    return result;
}

From source file:com.radialpoint.word2vec.lucene.SearchFiles.java

License:Open Source License

/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {
    String usage = "Usage:\tjava com.radialpoint.word2vec.lucene.SearchFiles [-index dir] [-vectors v] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details.";
    if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) {
        System.out.println(usage);
        System.exit(0);/*from  w ww  .j  a  v a 2s. com*/
    }

    String index = "index";
    String field = "contents";
    String queries = null;
    String vectors = "vectors";
    int repeat = 0;
    boolean raw = false;
    String queryString = null;
    int hitsPerPage = 10;

    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            index = args[i + 1];
            i++;
        } else if ("-field".equals(args[i])) {
            field = args[i + 1];
            i++;
        } else if ("-vectors".equals(args[i])) {
            vectors = args[i + 1];
            i++;
        } else if ("-queries".equals(args[i])) {
            queries = args[i + 1];
            i++;
        } else if ("-query".equals(args[i])) {
            queryString = args[i + 1];
            i++;
        } else if ("-repeat".equals(args[i])) {
            repeat = Integer.parseInt(args[i + 1]);
            i++;
        } else if ("-raw".equals(args[i])) {
            raw = true;
        } else if ("-paging".equals(args[i])) {
            hitsPerPage = Integer.parseInt(args[i + 1]);
            if (hitsPerPage <= 0) {
                System.err.println("There must be at least 1 hit per page.");
                System.exit(1);
            }
            i++;
        }
    }

    IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    // Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
    final File vectorsFile = new File(vectors);
    Analyzer analyzer = new Analyzer() {

        @SuppressWarnings("deprecation")
        @Override
        protected TokenStreamComponents createComponents(final String fieldName, final java.io.Reader reader) {
            final StandardTokenizer src = new StandardTokenizer(Version.LUCENE_40, reader);
            src.setMaxTokenLength(15);
            TokenStream tok = new StandardFilter(Version.LUCENE_40, src);
            tok = new LowerCaseFilter(Version.LUCENE_40, tok);
            tok = new StopFilter(Version.LUCENE_40, tok, StandardAnalyzer.STOP_WORDS_SET);
            TokenStream baseTok = tok;
            if (vectorsFile.exists()) {
                try {
                    tok = new Word2VecFilter(tok,
                            new QueryExpander(new Vectors(new FileInputStream(vectorsFile)), true,
                                    TermSelection.CUT_75_ABS),
                            3, false);
                } catch (IOException e) {
                    e.printStackTrace();
                    tok = baseTok;
                }
            }
            return new TokenStreamComponents(src, tok) {
                @Override
                protected void setReader(final java.io.Reader reader) throws IOException {
                    src.setMaxTokenLength(15);
                    super.setReader(reader);
                }
            };
        }
    };

    BufferedReader in = null;
    if (queries != null) {
        in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), "UTF-8"));
    } else {
        in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
    }
    @SuppressWarnings("deprecation")
    QueryParser parser = new QueryParser(Version.LUCENE_40, field, analyzer);
    while (true) {
        if (queries == null && queryString == null) { // prompt the user
            System.out.println("Enter query: ");
        }

        String line = queryString != null ? queryString : in.readLine();

        if (line == null || line.length() == -1) {
            break;
        }

        line = line.trim();
        if (line.length() == 0) {
            break;
        }

        Query query = parser.parse(line);
        System.out.println("Searching for: " + query.toString(field));

        if (repeat > 0) { // repeat & time as benchmark
            Date start = new Date();
            for (int i = 0; i < repeat; i++) {
                searcher.search(query, null, 100);
            }
            Date end = new Date();
            System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms");
        }

        doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null);

        if (queryString != null) {
            break;
        }
    }
    reader.close();
}

From source file:com.sismics.reader.core.dao.lucene.ReaderStandardAnalyzer.java

License:Apache License

@Override
protected TokenStreamComponents createComponents(final String fieldName, final Reader reader) {
    final StandardTokenizer src = new StandardTokenizer(matchVersion, reader);
    src.setMaxTokenLength(maxTokenLength);
    TokenStream tok = new StandardFilter(matchVersion, src);
    tok = new LowerCaseFilter(matchVersion, tok);
    tok = new StopFilter(matchVersion, tok, stopwords);
    return new TokenStreamComponents(src, tok) {
        @Override/*from   w w  w .j  a  va  2  s .  c  om*/
        protected void setReader(final Reader reader) throws IOException {
            src.setMaxTokenLength(ReaderStandardAnalyzer.this.maxTokenLength);
            super.setReader(reader);
        }
    };
}

From source file:ddf.catalog.pubsub.criteria.contextual.CaseSensitiveStandardAnalyzer.java

License:Open Source License

/**
 * Constructs a {@link StandardTokenizer} filtered by a {@link StandardFilter}, a
 * {@link LowerCaseFilter} and a {@link StopFilter}.
 *//*ww w  . j ava  2s. co m*/
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
    StandardTokenizer tokenStream = new StandardTokenizer(matchVersion, reader);
    tokenStream.setMaxTokenLength(maxTokenLength);
    TokenStream result = new StandardFilter(tokenStream);
    // HUGH result = new LowerCaseFilter( result );
    result = new StopFilter(enableStopPositionIncrements, result, stopSet);
    return result;
}