Example usage for org.apache.lucene.store RAMDirectory RAMDirectory

List of usage examples for org.apache.lucene.store RAMDirectory RAMDirectory

Introduction

In this page you can find the example usage for org.apache.lucene.store RAMDirectory RAMDirectory.

Prototype

public RAMDirectory() 

Source Link

Document

Constructs an empty Directory .

Usage

From source file:com.shaie.suggest.ContextSuggestDemo.java

License:Apache License

public ContextSuggestDemo() throws IOException {
    indexDir = new RAMDirectory();
    suggestDir = new RAMDirectory();
    analyzer = new SimpleAnalyzer();
    suggester = new AnalyzingInfixSuggester(suggestDir, analyzer, analyzer, 1, true);
    buildSearchIndex();/*from  ww  w .  j a v a 2  s . co  m*/
    buildSuggesterIndex();
}

From source file:com.shaie.UTF8Indexing.java

License:Apache License

@SuppressWarnings("resource")
public static void main(String[] args) throws Exception {
    final Directory dir = new RAMDirectory();
    final StandardAnalyzer analyzer = new StandardAnalyzer();
    final IndexWriterConfig conf = new IndexWriterConfig(analyzer);
    final IndexWriter writer = new IndexWriter(dir, conf);

    final Document doc = new Document();
    doc.add(new TextField("f", "Russia\u2013United States relations", Store.YES));
    writer.addDocument(doc);/*ww w  . j a  va2s.c  o  m*/
    writer.close();

    final DirectoryReader reader = DirectoryReader.open(dir);
    final IndexSearcher searcher = new IndexSearcher(reader);
    final QueryParser qp = new QueryParser("f", analyzer);
    search(searcher, qp, "Russia United States relations");
    search(searcher, qp, "\"Russia United states relations\"");
    search(searcher, qp, "\"Russia-United states relations\"");
    search(searcher, qp, "\"Russia\u2013United states relations\"");
    reader.close();

    dir.close();
}

From source file:com.shmsoft.dmass.main.FileProcessor.java

License:Apache License

/**
 * Search metadata and file contents//  w w  w  . j  av a 2 s  .  c  om
 *
 * @param metadata
 * @return true if match is found else false
 */
private boolean isResponsive(Metadata metadata) {
    // set true if search finds a match
    boolean isResponsive = false;

    // get culling parameters
    String queryString = Project.getProject().getCullingAsTextBlock();

    // TODO parse important parameters to mappers and reducers individually, not globally
    IndexWriter writer = null;
    RAMDirectory idx = null;
    try {
        // construct a RAMDirectory to hold the in-memory representation of the index.
        idx = new RAMDirectory();

        // make a writer to create the index
        writer = new IndexWriter(idx, new StandardAnalyzer(Version.LUCENE_30), true,
                IndexWriter.MaxFieldLength.UNLIMITED);

        writer.addDocument(createDocument(metadata));

        // optimize and close the writer to finish building the index
        writer.optimize();
        writer.close();

        //adding the build index to FS
        if (Project.getProject().isLuceneFSIndexEnabled() && luceneIndex != null) {
            luceneIndex.addToIndex(idx);
        }

        SolrIndex.getInstance().addBatchData(metadata);

        if (queryString == null || queryString.trim().isEmpty()) {
            return true;
        }

        // build an IndexSearcher using the in-memory index
        Searcher searcher = new IndexSearcher(idx);
        // search directory
        isResponsive = search(searcher, queryString);

        searcher.close();
    } catch (Exception e) {
        // TODO handle this better
        // if anything happens - don't stop processing
        e.printStackTrace(System.out);
    } finally {
        try {
            if (writer != null) {
                writer.close();
            }
            if (idx != null) {
                idx.close();
            }
        } catch (Exception e) {
            // swallow exception, what else can you do now?
        }
    }
    return isResponsive;
}

From source file:com.slieer.app.lecene3x.LuceneIndexAndSearchDemo.java

License:Apache License

/**
 *  ???/*from  w  ww  .java2 s .co  m*/
 * 
 * @param args
 */
public static void main(String[] args) {
    // Lucene Document??
    String fieldName = "text";
    // 
    String text = "IK Analyzer???????";
    String text1 = "? (Chinese Word Segmentation) ???????????";
    String text2 = "?????,,??,?";

    // IKAnalyzer?
    Analyzer analyzer = new IKAnalyzer(true);

    Directory directory = null;
    IndexWriter iwriter = null;
    IndexReader ireader = null;
    IndexSearcher isearcher = null;
    try {
        // 
        directory = new RAMDirectory();

        // ?IndexWriterConfig
        IndexWriterConfig iwConfig = new IndexWriterConfig(Version.LUCENE_4_9, analyzer);
        iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
        iwriter = new IndexWriter(directory, iwConfig);
        // 
        Document doc = new Document();
        //document.add(new Field("content", content, Field.Store.YES, Field.Index.ANALYZED));
        Field strField = new StringField("ID", "10000", Field.Store.YES);
        Field textFild = new StringField(fieldName, text, Field.Store.YES);
        //textFild.setBoost(2);

        doc.add(strField);
        doc.add(textFild);
        iwriter.addDocument(doc);

        doc = new Document();
        strField = new StringField("ID", "10001", Field.Store.YES);
        textFild = new StringField(fieldName, text1, Field.Store.YES);
        //strField.setBoost(1);
        doc.add(strField);
        doc.add(textFild);
        iwriter.addDocument(doc);

        doc = new Document();
        strField = new StringField("ID", "10002", Field.Store.YES);
        //            textFild = new TextField(fieldName, text2, Field.Store.YES);
        textFild = new StringField(fieldName, text2, Field.Store.YES);
        //strField.setBoost(1);
        doc.add(strField);
        doc.add(textFild);
        iwriter.addDocument(doc);

        iwriter.close();

        // ?**********************************
        // ?
        ireader = DirectoryReader.open(directory);
        isearcher = new IndexSearcher(ireader);

        String keyword = "?";
        // QueryParser?Query
        QueryParser qp = new QueryParser(Version.LUCENE_4_9, fieldName, analyzer);
        qp.setDefaultOperator(QueryParser.AND_OPERATOR);
        Query query = qp.parse(keyword);
        System.out.println("Query = " + query);

        // ?5?
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        // 
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < topDocs.totalHits; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }

    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader != null) {
            try {
                ireader.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory != null) {
            try {
                directory.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.spd.ukraine.lucenewebsearch1.web.IndexingController.java

@PostConstruct
public void init() {
    if (IS_DIRECTORY_IN_DISK) {
        String userDirectory = System.getProperty("user.dir");// + "/lucene"; 
        System.out.println("userDirectory " + userDirectory);
        Path userPath = Paths.get(userDirectory);
        Path rootPath = userPath.getRoot();
        String workingDirectory = rootPath.toString()
                .concat(System.getProperty("file.separator").equals("/")
                        ? userPath.subpath(0, 2).toString() + "/"
                        : "\\Users\\sf\\")
                .concat("luceneindex");
        System.out.println("workingDirectory " + workingDirectory);
        indexDir = new File(workingDirectory);
        try {/*from   www  .j av a 2s . c o m*/
            Files.createDirectory(Paths.get(workingDirectory));
        } catch (FileAlreadyExistsException ex) {
            System.out.println("FileAlreadyExistsException");
        } catch (IOException ex) {
            //            System.out.println("IOException: " + ex.getMessage());
            ex.printStackTrace();
        }
        if (null == indexDir) {
            return;
        }
        try {
            directory = FSDirectory.open(indexDir);
        } catch (IOException ex) {
            System.out.println("IOException: " + ex.getMessage());
        }
    } else {
        directory = new RAMDirectory();
    }
    analyzer = new StandardAnalyzer(Version.LUCENE_43);//new StandardAnalyzer();
    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_43, analyzer);
    try {
        indexWriter = new IndexWriter(directory, config);
    } catch (IOException ex) {
        //            ex.printStackTrace();
        //            return;
    }
}

From source file:com.stratio.cassandra.lucene.index.RAMIndex.java

License:Apache License

/**
 * Builds a new {@link RAMIndex}.//from w  w w . j av a  2 s.c o  m
 *
 * @param analyzer the index writer analyzer
 */
public RAMIndex(Analyzer analyzer) {
    try {
        directory = new RAMDirectory();
        indexWriter = new IndexWriter(directory, new IndexWriterConfig(analyzer));
    } catch (Exception e) {
        throw new IndexException(logger, e, "Error while creating index");
    }
}

From source file:com.sxc.lucene.analysis.codec.MetaphoneAnalyzerTest.java

License:Apache License

public void testKoolKat() throws Exception {
    RAMDirectory directory = new RAMDirectory();
    Analyzer analyzer = new MetaphoneReplacementAnalyzer();
    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Version.LUCENE_47, analyzer);
    IndexWriter writer = new IndexWriter(directory, indexWriterConfig);
    Document doc = new Document();
    doc.add(new TextField("contents", "cool cat", Field.Store.YES));
    writer.addDocument(doc);//from  w ww.j  a  va 2 s  . c om
    writer.close();
    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(directory));
    Query query = new QueryParser(Version.LUCENE_47, "contents", analyzer).parse("kool kat");
    TopDocs hits = searcher.search(query, 1);
    assertEquals(1, hits.totalHits);
    int docID = hits.scoreDocs[0].doc;
    doc = searcher.doc(docID);
    assertEquals("cool cat", doc.get("contents"));
    searcher.getIndexReader().close();
}

From source file:com.sxc.lucene.analysis.synonym.SynonymAnalyzerTest.java

License:Apache License

public void setUp() throws Exception {
    RAMDirectory directory = new RAMDirectory();
    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Version.LUCENE_47, synonymAnalyzer);
    IndexWriter writer = new IndexWriter(directory, indexWriterConfig);
    Document doc = new Document();
    doc.add(new TextField("content", "The quick brown fox jumps over the lazy dog", Field.Store.YES)); //#2
    writer.addDocument(doc);/*from  ww  w  .j a  va2s. c om*/

    writer.close();

    searcher = new IndexSearcher(DirectoryReader.open(directory));
}

From source file:com.tamingtext.frankenstein.Frankenstein.java

License:Apache License

/**
 * Index the content of Frankenstein//  www.  jav  a 2s .c  om
 *
 * @throws IOException
 */
private void index() throws IOException {
    System.out.println("Indexing Frankenstein");
    InputStream stream = getClass().getClassLoader().getResourceAsStream("frankenstein-gutenberg.txt");
    BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
    //let's index paragraphs at a time
    IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_36, new StandardAnalyzer(Version.LUCENE_36));
    directory = new RAMDirectory();
    IndexWriter iw = new IndexWriter(directory, conf);
    String line;
    StringBuilder paraBuffer = new StringBuilder(2048);
    int lines = 0;
    int paragraphs = 0;
    int paragraphLines = 0;
    while ((line = reader.readLine()) != null) {
        if (line.contains("End of the Project Gutenberg")) {//we are in the license section at the end of the book
            break;
        }
        if (line.startsWith("#")) {//skip comments
            continue;
        }
        //if the line is blank, we have a paragraph, so let's index it
        if (line.matches("^\\s*$") && paraBuffer.length() > 0) {
            Document doc = new Document();
            //We can retrieve by paragraph number if we want
            String theString = paraBuffer.toString();
            theString.trim();
            if (theString.length() > 0 && theString.matches("^\\s*$") == false) {
                addMetadata(doc, lines, paragraphs, paragraphLines);
                doc.add(new Field("paragraph", theString, Field.Store.YES, Field.Index.ANALYZED));//add the main content
                iw.addDocument(doc);//Index the document
                paragraphs++;
            }
            //reset some of our state
            paraBuffer.setLength(0);//we are done w/ this paragraph
            paragraphLines = 0;
        } else {
            paraBuffer.append(line).append(' ');
        }
        lines++;
        paragraphLines++;
    }
    System.out.println("Processed " + lines + " lines.  Paragraphs: " + paragraphs);
    iw.close();
}

From source file:com.tamingtext.fuzzy.OverlapMeasures.java

License:Apache License

public TopDocs cosine(String queryTerm, int n, String... terms) throws IOException, ParseException {
    Directory directory = new RAMDirectory();
    final Pattern pattern = Pattern.compile(".");
    Analyzer analyzer = new Analyzer() {
        @Override/*from   w w  w.  ja  va2 s  . c o m*/
        public TokenStream tokenStream(String fieldName, Reader reader) {
            TokenStream result = null;
            try {
                result = new PatternTokenizer(reader, pattern, 0);
            } catch (IOException e) {
            }
            return result;
        }
    };
    IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_36, analyzer);
    IndexWriter writer = new IndexWriter(directory, conf);
    for (String term : terms) {
        Document doc = new Document();
        doc.add(new Field("chars", term, Field.Store.YES, Field.Index.ANALYZED));
        writer.addDocument(doc);
    }
    writer.close();
    IndexReader reader = IndexReader.open(directory);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), terms.length);
    for (int i = 0; i < topDocs.scoreDocs.length; i++) {
        System.out.println("Id: " + topDocs.scoreDocs[i].doc + " Val: "
                + searcher.doc(topDocs.scoreDocs[i].doc).get("chars"));
    }
    QueryParser qp = new QueryParser(Version.LUCENE_36, "chars", analyzer);
    Query query = qp.parse(queryTerm);
    return searcher.search(query, n);
}