Example usage for org.apache.lucene.analysis TokenStream close

List of usage examples for org.apache.lucene.analysis TokenStream close

Introduction

In this page you can find the example usage for org.apache.lucene.analysis TokenStream close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Releases resources associated with this stream.

Usage

From source file:org.cosmo.common.util.WordUtil.java

License:Apache License

public static void main(String[] args) throws Exception {

    StringReader reader = new StringReader(
            "CNN, CNN news, CNN.com, CNN TV, news, news online, breaking news, U.S. news, world news, weather, business, CNN Money, sports, politics, law, technology, entertainment, education, travel, health, special reports, autos, developing story, news video, CNN Intl");
    /*//from w  ww .j av a  2 s  .  c  o m
    LetterTokenizer tokenizer = new LetterTokenizer(reader);
    AttributeSource filter = new StopFilter(true, tokenizer, StopAnalyzer.ENGLISH_STOP_WORDS_SET, true);
            
    while (filter.hasAttributes()) {
       Attribute attribute = filter.captureState().
       System.out.println(attribute);
    }
    */
    StopAnalyzer analyzer = new StopAnalyzer(Index.Version);
    Set<String> uniqueTerms = new HashSet();
    TokenStream tokenStream = analyzer.reusableTokenStream("anyting", reader);
    tokenStream.reset();
    while (tokenStream.incrementToken()) {
        TermAttribute term = tokenStream.getAttribute(TermAttribute.class);
        uniqueTerms.add(term.term());
    }
    tokenStream.end();
    tokenStream.close();

    System.out.println(Arrays.toString(uniqueTerms.toArray()));

}

From source file:org.dbpedia.spotlight.lucene.analysis.NGramAnalyzer.java

License:Apache License

public static void main(String[] args) throws IOException {
    String myString = "cancer";
    Analyzer analyzer = new NGramAnalyzer(3, 3);
    System.out.println("Analyzing: \"" + myString + "\"");
    StringReader reader = new StringReader(myString);
    TokenStream stream = analyzer.tokenStream("field", reader);
    //        TokenStream stream = new NGramTokenizer(reader, EdgeNGramTokenizer.Side.BACK, 1,2);
    stream.reset();//from   w  w  w. j ava2 s .  co m

    // print all tokens until stream is exhausted
    while (stream.incrementToken()) {
        System.out.println("token: " + stream);
    }

    stream.end();
    stream.close();
}

From source file:org.dbpedia.spotlight.lucene.analysis.PhoneticAnalyzer.java

License:Apache License

public static void main(String[] args) throws IOException {
    String myString = "cancer";
    Analyzer analyzer = new PhoneticAnalyzer(Version.LUCENE_36, SpotlightConfiguration.DEFAULT_STOPWORDS);
    System.out.println("Analyzing: \"" + myString + "\"");
    StringReader reader = new StringReader(myString);
    TokenStream stream = analyzer.tokenStream("field", reader);
    stream.reset();/*from  ww  w.  j av  a  2s .  c  om*/

    // print all tokens until stream is exhausted
    while (stream.incrementToken()) {
        System.out.println("token: " + stream);
    }

    stream.end();
    stream.close();
}

From source file:org.deals.lucene.highlight.Highlighter.java

License:Apache License

/**
 * Low level api to get the most relevant (formatted) sections of the document.
 * This method has been made public to allow visibility of score information held in TextFragment objects.
 * Thanks to Jason Calabrese for help in redefining the interface.
 * @param tokenStream/*w w  w  .jav a2  s. c om*/
 * @param text
 * @param maxNumFragments
 * @param mergeContiguousFragments
 * @throws IOException
 */
public final TextFragment[] getBestTextFragments(TokenStream tokenStream, String text,
        boolean mergeContiguousFragments, int maxNumFragments) throws IOException {
    ArrayList docFrags = new ArrayList();
    StringBuffer newText = new StringBuffer();

    TextFragment currentFrag = new TextFragment(newText, newText.length(), docFrags.size());
    fragmentScorer.startFragment(currentFrag);
    docFrags.add(currentFrag);

    FragmentQueue fragQueue = new FragmentQueue(maxNumFragments);

    try {
        org.apache.lucene.analysis.Token token;
        String tokenText;
        int startOffset;
        int endOffset;
        int lastEndOffset = 0;
        textFragmenter.start(text);

        TokenGroup tokenGroup = new TokenGroup();
        token = tokenStream.next();
        while ((token != null) && (token.startOffset() < maxDocBytesToAnalyze)) {
            if ((tokenGroup.numTokens > 0) && (tokenGroup.isDistinct(token))) {
                //the current token is distinct from previous tokens -
                // markup the cached token group info
                startOffset = tokenGroup.matchStartOffset;
                endOffset = tokenGroup.matchEndOffset;
                tokenText = text.substring(startOffset, endOffset);
                String markedUpText = formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup);
                //store any whitespace etc from between this and last group
                if (startOffset > lastEndOffset)
                    newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset)));
                newText.append(markedUpText);
                lastEndOffset = Math.max(endOffset, lastEndOffset);
                tokenGroup.clear();

                //check if current token marks the start of a new fragment
                if (textFragmenter.isNewFragment(token)) {
                    currentFrag.setScore(fragmentScorer.getFragmentScore());
                    //record stats for a new fragment
                    currentFrag.textEndPos = newText.length();
                    currentFrag = new TextFragment(newText, newText.length(), docFrags.size());
                    fragmentScorer.startFragment(currentFrag);
                    docFrags.add(currentFrag);
                }
            }

            tokenGroup.addToken(token, fragmentScorer.getTokenScore(token));

            //            if(lastEndOffset>maxDocBytesToAnalyze)
            //            {
            //               break;
            //            }
            token = tokenStream.next();
        }
        currentFrag.setScore(fragmentScorer.getFragmentScore());

        if (tokenGroup.numTokens > 0) {
            //flush the accumulated text (same code as in above loop)
            startOffset = tokenGroup.matchStartOffset;
            endOffset = tokenGroup.matchEndOffset;
            tokenText = text.substring(startOffset, endOffset);
            String markedUpText = formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup);
            //store any whitespace etc from between this and last group
            if (startOffset > lastEndOffset)
                newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset)));
            newText.append(markedUpText);
            lastEndOffset = Math.max(lastEndOffset, endOffset);
        }

        //Test what remains of the original text beyond the point where we stopped analyzing 
        if (
        //               if there is text beyond the last token considered..
        (lastEndOffset < text.length()) &&
        //               and that text is not too large...
                (text.length() < maxDocBytesToAnalyze)) {
            //append it to the last fragment
            newText.append(encoder.encodeText(text.substring(lastEndOffset)));
        }

        currentFrag.textEndPos = newText.length();

        //sort the most relevant sections of the text
        for (Iterator i = docFrags.iterator(); i.hasNext();) {
            currentFrag = (TextFragment) i.next();

            //If you are running with a version of Lucene before 11th Sept 03
            // you do not have PriorityQueue.insert() - so uncomment the code below
            /*
                   if (currentFrag.getScore() >= minScore)
                   {
                      fragQueue.put(currentFrag);
                      if (fragQueue.size() > maxNumFragments)
                      { // if hit queue overfull
                         fragQueue.pop(); // remove lowest in hit queue
                         minScore = ((TextFragment) fragQueue.top()).getScore(); // reset minScore
                      }
                    
                    
                   }
            */
            //The above code caused a problem as a result of Christoph Goller's 11th Sept 03
            //fix to PriorityQueue. The correct method to use here is the new "insert" method
            // USE ABOVE CODE IF THIS DOES NOT COMPILE!
            fragQueue.insert(currentFrag);
        }

        //return the most relevant fragments
        TextFragment frag[] = new TextFragment[fragQueue.size()];
        for (int i = frag.length - 1; i >= 0; i--) {
            frag[i] = (TextFragment) fragQueue.pop();
        }

        //merge any contiguous fragments to improve readability
        if (mergeContiguousFragments) {
            mergeContiguousFragments(frag);
            ArrayList fragTexts = new ArrayList();
            for (int i = 0; i < frag.length; i++) {
                if ((frag[i] != null) && (frag[i].getScore() > 0)) {
                    fragTexts.add(frag[i]);
                }
            }
            frag = (TextFragment[]) fragTexts.toArray(new TextFragment[0]);
        }

        return frag;

    } finally {
        if (tokenStream != null) {
            try {
                tokenStream.close();
            } catch (Exception e) {
            }
        }
    }
}

From source file:org.drftpd.vfs.index.lucene.LuceneUtils.java

License:Open Source License

/**
 * Parses the name removing unwanted chars from it.
 *
 * @param field/*from  w w  w  . j  a  v a 2s  .  com*/
 * @param term
 * @param name
 * @return Query
 */
public static Query analyze(String field, Term term, String name) {
    TokenStream ts = LuceneEngine.ANALYZER.tokenStream(field, new StringReader(name));

    BooleanQuery bQuery = new BooleanQuery();
    WildcardQuery wQuery;

    Set<String> tokens = new HashSet<String>(); // avoids repeated terms.

    // get the CharTermAttribute from the TokenStream
    CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);

    try {
        ts.reset();
        while (ts.incrementToken()) {
            tokens.add(termAtt.toString());
        }
        ts.end();
        ts.close();
    } catch (IOException e) {
        logger.error("IOException analyzing string", e);
    }

    for (String text : tokens) {
        wQuery = new WildcardQuery(term.createTerm(text));
        bQuery.add(wQuery, BooleanClause.Occur.MUST);
    }

    return bQuery;
}

From source file:org.eclipse.recommenders.test.codesearch.rcp.indexer.analyzer.AnalysisTestBase.java

License:Open Source License

private List<String> parseKeywords(Analyzer analyzer, String field, String keywords) {

    List<String> result = Lists.newArrayList();
    TokenStream stream = analyzer.tokenStream(field, new StringReader(keywords));

    try {//  w w w.  j  ava 2 s.  c  o  m
        while (stream.incrementToken()) {
            result.add(stream.getAttribute(TermAttribute.class).term());
        }
        stream.close();
    } catch (IOException e) {
        // not thrown b/c we're using a string reader...
    }

    return result;
}

From source file:org.eclipse.smila.search.lucene.index.IndexConnection.java

License:Open Source License

/**
 * Transform query.// w ww . j a  v  a 2  s. c o m
 * 
 * @param dTerm
 *          the d term
 * @param boostFactor
 *          the boost factor
 * 
 * @return the query
 */
private Query transformQuery(final DTerm dTerm, final float boostFactor) {
    if (dTerm.getTerm() instanceof DOPN) {
        final BooleanQuery q = new BooleanQuery();
        if (_log.isDebugEnabled()) {
            _log.debug("<BooleanQuery>");
        }
        final DOPN op = dTerm.getOpN();
        final String operation = op.getOperation();
        for (int i = 0; i < op.getTermCount(); i++) {
            BooleanClause bc;
            // Lucene does not correctly execute BooleanQueries with only
            // prohibited
            // elements.
            // Therefore we must take the child term of the NOT operation
            // and move
            // it up to the current query.
            // Consequence: "a OR b OR c OR NOT d" will be converted to "(a
            // OR b OR
            // c) AND NOT d"
            if (op.getTerm(i).getTerm() instanceof DOP1
                    && ((DOP1) op.getTerm(i).getTerm()).getOperation().equals("NOT")) {
                if (_log.isDebugEnabled()) {
                    _log.debug("<BooleanClause boost=" + boostFactor + " prohibited>");
                }
                final Query tq = transformQuery(((DOP1) op.getTerm(i).getTerm()).getTerm(), boostFactor);
                tq.setBoost(boostFactor);
                bc = new BooleanClause(tq, BooleanClause.Occur.MUST_NOT);
            } else {
                if (_log.isDebugEnabled()) {
                    _log.debug("<BooleanClause boost=" + boostFactor + " "
                            + (operation.equals("AND") ? "required" : "") + ">");
                }
                final Query tq = transformQuery(op.getTerm(i), boostFactor);
                tq.setBoost(boostFactor);

                if (operation.equals("AND")) {
                    bc = new BooleanClause(tq, BooleanClause.Occur.MUST);
                } else {
                    bc = new BooleanClause(tq, BooleanClause.Occur.SHOULD);
                }
                if (_log.isDebugEnabled()) {
                    _log.debug("</BooleanClause>");
                }
            }
            q.add(bc);
        }
        if (_log.isDebugEnabled()) {
            _log.debug("</BooleanQuery>");
        }
        return q;
    } else if (dTerm.getTerm() instanceof DWMEAN) {
        final BooleanQuery q = new BooleanQuery();
        if (_log.isDebugEnabled()) {
            _log.debug("<BooleanQuery>");
        }
        final DWMEAN op = dTerm.getWMEAN();
        for (int i = 0; i < op.getTermCount(); i++) {
            BooleanClause bc;
            final String constraint = op.getConstraint(i);
            final float boost = op.getBoost(i) * boostFactor;
            // Lucene does not correctly execute BooleanQueries with only
            // prohibited
            // elements.
            // Therefore we must take the child term of the NOT operation
            // and move
            // it up to the current query.
            // Consequence: "a OR b OR c OR NOT d" will be converted to "(a
            // OR b OR
            // c) AND NOT d"
            if (op.getTerm(i).getTerm() instanceof DOP1
                    && ((DOP1) op.getTerm(i).getTerm()).getOperation().equals("NOT")) {
                if (_log.isDebugEnabled()) {
                    _log.debug("<BooleanClause boost=" + boost + " prohibited>");
                }
                final Query tq = transformQuery(((DOP1) op.getTerm(i).getTerm()).getTerm(), boost);
                tq.setBoost(boost);
                bc = new BooleanClause(tq, BooleanClause.Occur.MUST_NOT);
                if (_log.isDebugEnabled()) {
                    _log.debug("</BooleanClause>");
                }
            } else {
                if (_log.isDebugEnabled()) {
                    _log.debug("<BooleanClause boost=" + boost + " "
                            + (constraint.equals("required") ? "required " : "")
                            + (constraint.equals("prohibited") ? "prohibited" : "") + ">");
                }
                final Query tq = transformQuery(op.getTerm(i), boost);
                tq.setBoost(boost);

                if (constraint.equals("required")) {
                    bc = new BooleanClause(tq, BooleanClause.Occur.MUST);
                } else {
                    if (constraint.equals("prohibited")) {
                        bc = new BooleanClause(tq, BooleanClause.Occur.MUST_NOT);
                    } else {
                        bc = new BooleanClause(tq, BooleanClause.Occur.SHOULD);
                    }
                }

                if (_log.isDebugEnabled()) {
                    _log.debug("</BooleanClause>");
                }
            }
            q.add(bc);
        }
        if (_log.isDebugEnabled()) {
            _log.debug("</BooleanQuery>");
        }
        return q;
    } else if (dTerm.getTerm() instanceof DOP1) {
        final BooleanQuery q = new BooleanQuery();
        if (_log.isDebugEnabled()) {
            _log.debug("<BooleanQuery>");
        }
        final DOP1 op = dTerm.getOP1();
        if (op.getOperation().equals("NOT")) {
            if (_log.isDebugEnabled()) {
                _log.debug("<BooleanClause prohibited>");
            }
            q.add(transformQuery(op.getTerm(), boostFactor), BooleanClause.Occur.MUST_NOT);
            if (_log.isDebugEnabled()) {
                _log.debug("</BooleanClause>");
            }
        }
        if (_log.isDebugEnabled()) {
            _log.debug("</BooleanQuery>");
        }
        return q;
    } else if (dTerm.getTerm() instanceof DNumField) {
        final DNumField field = (DNumField) dTerm.getTerm();
        final Term lower = new Term(_index.getIndexStructure().getField(field.getFieldNo()).getName(),
                padNumField("" + field.getMin()));
        final Term upper = new Term(_index.getIndexStructure().getField(field.getFieldNo()).getName(),
                padNumField("" + field.getMax()));
        if (_log.isDebugEnabled()) {
            _log.debug("<RangeQuery inclusive>");
            _log.debug("<LowerTerm FieldName=" + lower.field() + " Text=" + lower.text() + "/>");
            _log.debug("<UpperTerm FieldName=" + upper.field() + " Text=" + upper.text() + "/>");
            _log.debug("</RangeQuery>");
        }
        final RangeQuery q = new RangeQuery(lower, upper, true);
        return q;
    } else if (dTerm.getTerm() instanceof DDateField) {
        final DDateField field = (DDateField) dTerm.getTerm();
        final SimpleDateFormat df = new SimpleDateFormat(DATE_FORMAT_PATTERN);
        Term lower = null;
        Term upper = null;
        if (_log.isDebugEnabled()) {
            _log.debug("<RangeQuery inclusive>");
        }
        if (field.getMin() != null) {
            lower = new Term(_index.getIndexStructure().getField(field.getFieldNo()).getName(),
                    df.format(field.getMin()));
            if (_log.isDebugEnabled()) {
                _log.debug("<LowerTerm FieldName=" + lower.field() + " Text=" + lower.text() + "/>");
            }
        }
        if (field.getMax() != null) {
            upper = new Term(_index.getIndexStructure().getField(field.getFieldNo()).getName(),
                    df.format(field.getMax()));
            if (_log.isDebugEnabled()) {
                _log.debug("<UpperTerm FieldName=" + upper.field() + " Text=" + upper.text() + "/>");
            }
        }

        if (_log.isDebugEnabled()) {
            _log.debug("</RangeQuery>");
        }
        final RangeQuery q = new RangeQuery(lower, upper, true);
        return q;
    } else {
        Query q;
        final DTextField tf = dTerm.getTextField();
        final Analyzer a = getAnalyzer();
        final String fieldName = _index.getIndexStructure().getField(tf.getFieldNo()).getName();
        final String fieldText = tf.getText().trim();
        final boolean isPhraseSearch = fieldText.indexOf(" ") > 0;
        boolean isWildcardSearch = false;
        boolean isFuzzySearch = false;

        final ArrayList<Term> terms = new ArrayList<Term>();
        final DIndexField indexField = (DIndexField) _index.getIndexStructure().getField(tf.getFieldNo());
        if (indexField.getTokenize()) {

            // wildcard and fuzzy search must not contain spaces
            isWildcardSearch = tf.getParseWildcards() && !isPhraseSearch && containsWildcards(fieldText);
            isFuzzySearch = tf.getFuzzy() && !isPhraseSearch && !isWildcardSearch;

            if (isWildcardSearch) {
                if (_log.isDebugEnabled()) {
                    _log.debug("fieldText=" + fieldText + ", token " + fieldText);
                }
                terms.add(new Term(fieldName, fieldText));
            } else {
                try {
                    Token t;
                    final TokenStream ts = a.tokenStream(fieldName, new StringReader(fieldText));
                    while ((t = ts.next()) != null) {
                        final String text = t.termText();
                        if (_log.isDebugEnabled()) {
                            _log.debug(
                                    "fieldText=" + fieldText + ", token " + text + " (type " + t.type() + ")");
                        }
                        terms.add(new Term(fieldName, text));
                    }
                    ts.close();
                } catch (final IOException ioe) {
                    if (_log.isErrorEnabled()) {
                        _log.error(ioe);
                    }
                }
            }

        } else {
            // wildcard and fuzzy search could contain spaces
            isWildcardSearch = tf.getParseWildcards() && containsWildcards(fieldText);
            isFuzzySearch = tf.getFuzzy() && !isWildcardSearch;

            if (_log.isDebugEnabled()) {
                _log.debug("fieldText=" + fieldText + ", token " + fieldText);
            }
            terms.add(new Term(fieldName, fieldText));
        }

        if (terms.size() == 0) {
            terms.add(new Term(fieldName, ""));
        }

        if (isFuzzySearch && terms.size() > 1) {
            q = new BooleanQuery();
            if (_log.isDebugEnabled()) {
                _log.debug("<BooleanQuery>");
            }
            for (int i = 0; i < terms.size(); i++) {
                final FuzzyQuery fq = new FuzzyQuery(new Term(fieldName, terms.get(i).text()));
                ((BooleanQuery) q).add(new BooleanClause(fq, BooleanClause.Occur.MUST));
                if (_log.isDebugEnabled()) {
                    _log.debug("<FuzzyQuery FieldName=" + terms.get(i).field() + " Text=" + terms.get(i).text()
                            + "/>");
                }
            }
            if (_log.isDebugEnabled()) {
                _log.debug("</BooleanQuery>");
            }
        } else if (terms.size() > 1) {
            // PhraseQuery if field contains at least two words
            // Checking done by whitespace. This is a problem with analyzers
            // that
            // don't treat
            // all whitespace as word separators!
            q = new PhraseQuery();
            if (_log.isDebugEnabled()) {
                _log.debug("<PhraseQuery Slop=" + tf.getSlop() + ">");
            }
            for (int i = 0; i < terms.size(); i++) {
                ((PhraseQuery) q).add(terms.get(i));
                if (_log.isDebugEnabled()) {
                    _log.debug(
                            "<Term FieldName=" + terms.get(i).field() + " Text=" + terms.get(i).text() + "/>");
                }
            }
            ((PhraseQuery) q).setSlop(tf.getSlop());
            if (_log.isDebugEnabled()) {
                _log.debug("</PhraseQuery>");
            }
        } else if (isWildcardSearch) {
            final String txt = indexField.getTokenize() ? fieldText.toLowerCase() : fieldText;
            if (_log.isDebugEnabled()) {
                _log.debug("<WildcardQuery FieldName=" + fieldName + " Text=" + txt + "/>");
            }
            q = new WildcardQuery(new Term(fieldName, txt));
        } else if (isFuzzySearch) {
            if (_log.isDebugEnabled()) {
                _log.debug("<FuzzyQuery FieldName=" + fieldName + " Text=" + terms.get(0).text() + "/>");
            }
            q = new FuzzyQuery(new Term(fieldName, terms.get(0).text()));
        } else {
            if (_log.isDebugEnabled()) {
                _log.debug("<TermQuery FieldName=" + fieldName + " Text=" + terms.get(0).text() + "/>");
            }
            q = new TermQuery(new Term(fieldName, terms.get(0).text()));
        }
        return q;
    }
}

From source file:org.elasticsearch.action.admin.indices.analyze.TransportAnalyzeAction.java

License:Apache License

@Override
protected AnalyzeResponse shardOperation(AnalyzeRequest request, int shardId) throws ElasticsearchException {
    IndexService indexService = null;/* ww w .ja v a 2  s  .  c  o  m*/
    if (request.index() != null) {
        indexService = indicesService.indexServiceSafe(request.index());
    }
    Analyzer analyzer = null;
    boolean closeAnalyzer = false;
    String field = null;
    if (request.field() != null) {
        if (indexService == null) {
            throw new ElasticsearchIllegalArgumentException(
                    "No index provided, and trying to analyzer based on a specific field which requires the index parameter");
        }
        FieldMapper<?> fieldMapper = indexService.mapperService().smartNameFieldMapper(request.field());
        if (fieldMapper != null) {
            if (fieldMapper.isNumeric()) {
                throw new ElasticsearchIllegalArgumentException("Can't process field [" + request.field()
                        + "], Analysis requests are not supported on numeric fields");
            }
            analyzer = fieldMapper.indexAnalyzer();
            field = fieldMapper.names().indexName();

        }
    }
    if (field == null) {
        if (indexService != null) {
            field = indexService.queryParserService().defaultField();
        } else {
            field = AllFieldMapper.NAME;
        }
    }
    if (analyzer == null && request.analyzer() != null) {
        if (indexService == null) {
            analyzer = indicesAnalysisService.analyzer(request.analyzer());
        } else {
            analyzer = indexService.analysisService().analyzer(request.analyzer());
        }
        if (analyzer == null) {
            throw new ElasticsearchIllegalArgumentException(
                    "failed to find analyzer [" + request.analyzer() + "]");
        }
    } else if (request.tokenizer() != null) {
        TokenizerFactory tokenizerFactory;
        if (indexService == null) {
            TokenizerFactoryFactory tokenizerFactoryFactory = indicesAnalysisService
                    .tokenizerFactoryFactory(request.tokenizer());
            if (tokenizerFactoryFactory == null) {
                throw new ElasticsearchIllegalArgumentException(
                        "failed to find global tokenizer under [" + request.tokenizer() + "]");
            }
            tokenizerFactory = tokenizerFactoryFactory.create(request.tokenizer(),
                    ImmutableSettings.Builder.EMPTY_SETTINGS);
        } else {
            tokenizerFactory = indexService.analysisService().tokenizer(request.tokenizer());
            if (tokenizerFactory == null) {
                throw new ElasticsearchIllegalArgumentException(
                        "failed to find tokenizer under [" + request.tokenizer() + "]");
            }
        }
        TokenFilterFactory[] tokenFilterFactories = new TokenFilterFactory[0];
        if (request.tokenFilters() != null && request.tokenFilters().length > 0) {
            tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().length];
            for (int i = 0; i < request.tokenFilters().length; i++) {
                String tokenFilterName = request.tokenFilters()[i];
                if (indexService == null) {
                    TokenFilterFactoryFactory tokenFilterFactoryFactory = indicesAnalysisService
                            .tokenFilterFactoryFactory(tokenFilterName);
                    if (tokenFilterFactoryFactory == null) {
                        throw new ElasticsearchIllegalArgumentException(
                                "failed to find global token filter under [" + request.tokenizer() + "]");
                    }
                    tokenFilterFactories[i] = tokenFilterFactoryFactory.create(tokenFilterName,
                            ImmutableSettings.Builder.EMPTY_SETTINGS);
                } else {
                    tokenFilterFactories[i] = indexService.analysisService().tokenFilter(tokenFilterName);
                    if (tokenFilterFactories[i] == null) {
                        throw new ElasticsearchIllegalArgumentException(
                                "failed to find token filter under [" + request.tokenizer() + "]");
                    }
                }
                if (tokenFilterFactories[i] == null) {
                    throw new ElasticsearchIllegalArgumentException(
                            "failed to find token filter under [" + request.tokenizer() + "]");
                }
            }
        }
        analyzer = new CustomAnalyzer(tokenizerFactory, new CharFilterFactory[0], tokenFilterFactories);
        closeAnalyzer = true;
    } else if (analyzer == null) {
        if (indexService == null) {
            analyzer = Lucene.STANDARD_ANALYZER;
        } else {
            analyzer = indexService.analysisService().defaultIndexAnalyzer();
        }
    }
    if (analyzer == null) {
        throw new ElasticsearchIllegalArgumentException("failed to find analyzer");
    }

    List<AnalyzeResponse.AnalyzeToken> tokens = Lists.newArrayList();
    TokenStream stream = null;
    try {
        stream = analyzer.tokenStream(field, request.text());
        stream.reset();
        CharTermAttribute term = stream.addAttribute(CharTermAttribute.class);
        PositionIncrementAttribute posIncr = stream.addAttribute(PositionIncrementAttribute.class);
        OffsetAttribute offset = stream.addAttribute(OffsetAttribute.class);
        TypeAttribute type = stream.addAttribute(TypeAttribute.class);

        int position = 0;
        while (stream.incrementToken()) {
            int increment = posIncr.getPositionIncrement();
            if (increment > 0) {
                position = position + increment;
            }
            tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), position, offset.startOffset(),
                    offset.endOffset(), type.type()));
        }
        stream.end();
    } catch (IOException e) {
        throw new ElasticsearchException("failed to analyze", e);
    } finally {
        if (stream != null) {
            try {
                stream.close();
            } catch (IOException e) {
                // ignore
            }
        }
        if (closeAnalyzer) {
            analyzer.close();
        }
    }

    return new AnalyzeResponse(tokens);
}

From source file:org.elasticsearch.docvalues.string.DVStringFieldMapper.java

License:Apache License

@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
    // luckily this is single thread access and we dont need a thread local.
    hasDocValsNow = false;/*from   ww  w  .j  av a2  s  . c  om*/
    super.parseCreateField(context, fields);
    hasDocValsNow = true;
    String value = null;
    if (context.externalValueSet()) {
        value = (String) context.externalValue();
    } else {
        for (Field f : fields) {
            Class<?> fClass = f.getClass();
            if (fClass == Field.class || fClass == TextField.class || fClass == StringField.class) {
                value = f.stringValue();
                break;
            }
        }
    }
    if (value != null) {
        TokenStream stream = docValuesAnalyzer.analyzer().tokenStream(null, new StringReader(value));
        CharTermAttribute cattr = stream.addAttribute(CharTermAttribute.class);
        stream.reset();
        while (stream.incrementToken()) {
            String token = cattr.toString();
            // take the first token and make it a doc value
            fields.add(new SortedSetDocValuesField(names.indexName(), new BytesRef(token)));
            break;
        }
        stream.end();
        stream.close();
    }
}

From source file:org.elasticsearch.index.analysis.PaodingAnalysisTests.java

License:Apache License

public List getname(String param) throws IOException {

    System.setProperty("paoding.dic.home.config-first",
            "D:/Projects/Java Related/ElasticSearch/plugins/elasticsearch-analysis-paoding/config/paoding/dic");

    //?(??)/*w w  w.  j a v  a  2s .  c  o  m*/
    Analyzer ika = new PaodingAnalyzer();
    List<String> keys = new ArrayList<String>();
    TokenStream ts = null;

    try {
        Reader r = new StringReader(param);
        ts = ika.tokenStream("TestField", r);
        CharTermAttribute termAtt = (CharTermAttribute) ts.getAttribute(CharTermAttribute.class);
        TypeAttribute typeAtt = (TypeAttribute) ts.getAttribute(TypeAttribute.class);
        String key = null;
        while (ts.incrementToken()) {
            if ("word".equals(typeAtt.type())) {
                key = termAtt.toString();
                if (key.length() >= 2) {
                    keys.add(key);
                }
            }
        }
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        if (ts != null) {
            ts.close();
        }
    }

    Map<String, Integer> keyMap = new HashMap<String, Integer>();
    Integer $ = null;
    //??
    for (String key : keys) {
        keyMap.put(key, ($ = keyMap.get(key)) == null ? 1 : $ + 1);
    }
    List<Map.Entry<String, Integer>> keyList = new ArrayList<Map.Entry<String, Integer>>(keyMap.entrySet());
    //?
    Collections.sort(keyList, new Comparator<Map.Entry<String, Integer>>() {
        public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) {
            return (o2.getValue() - o1.getValue());
        }
    });
    //??
    String id = null;
    String str = "";
    List list = new ArrayList();
    if (keyList.size() > 0) {
        for (int i = 0; i < keyList.size(); i++) {
            id = keyList.get(i).toString();
            String[] strs = id.split("\\=");
            str = strs[0];
            list.add(strs[0]);
            System.out.println("id:" + id);
        }
    }
    return list;
}