Example usage for org.apache.lucene.index IndexWriter close

List of usage examples for org.apache.lucene.index IndexWriter close

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Closes all open resources and releases the write lock.

Usage

From source file:com.revorg.goat.IndexManager.java

License:Open Source License

/**
 * deletes all of the documents in a collection. Causes the collection to be taken offline, preventing searches.
 *
 * @param indexPath Directory that contains the Lucene Collection
 * @throws Exception/*from   w  ww. j ava  2  s  .  c o  m*/
 * @return ActionResult
 */
public static String purgeIndex(String indexPath) {

    try {
        String indexExists = isIndexExistant(indexPath);
        if (indexExists.equalsIgnoreCase("Yes")) {
            //StandardAnalyzer new StandardAnalyzer() = new StandardAnalyzer();    //Initialize Class
            IndexWriter writer = new IndexWriter(indexPath, new StandardAnalyzer(), true,
                    IndexWriter.MaxFieldLength.LIMITED);
            writer.commit();
            writer.close();
            ActionResult = "Success";
            return ActionResult;
        } else {
            throw new Exception("Unable to open index");
        }
    } catch (Exception e) {
        ActionResultError = " caught a " + e.getClass() + " with message: " + e.getMessage();
        System.out.println("Failure to purge index: " + indexPath);
    }
    ActionResult = "Failure";
    return ActionResult + ActionResultError;
}

From source file:com.revorg.goat.IndexManager.java

License:Open Source License

/**
 * Counts the total number of documents in the index.
 *
 * @param indexPath Directory that contains the Lucene Collection
 * @throws Exception//w w w.  j  a v  a 2s.c om
 * @return ActionResult
 */
public static String getIndexCount(String indexPath) {

    try {
        //StandardAnalyzer new StandardAnalyzer() = new StandardAnalyzer();    //Initialize Class
        IndexWriter writer = new IndexWriter(indexPath, new StandardAnalyzer(), false,
                IndexWriter.MaxFieldLength.LIMITED);
        int totalInIndex = writer.maxDoc();
        ActionResult = Integer.toString(totalInIndex);
        writer.commit();
        writer.close();
        return ActionResult;
    } catch (Exception e) {
        ActionResultError = " caught a " + e.getClass() + " with message: " + e.getMessage();
        System.out.println("Failure to count index: " + indexPath);
    }
    ActionResult = "Failure";
    return ActionResult + ActionResultError;
}

From source file:com.revorg.goat.IndexManager.java

License:Open Source License

/**
 * Checks to see whether an index is locked or not.
 *
 * @param indexPath Directory that contains the Lucene Collection
 * @throws Exception//from w w  w  . j a v  a  2  s .  c  o m
 * @return ActionResult
 */
public static String isIndexLocked(String indexPath) {
    try {
        IndexWriter writer = new IndexWriter(indexPath, new StandardAnalyzer(), false,
                IndexWriter.MaxFieldLength.LIMITED);
        boolean indexLocked = writer.isLocked(indexPath);
        writer.close(); //Close Writer
        if (indexLocked) {
            ActionResult = "Yes";
        } else {
            ActionResult = "No";
        }
        return ActionResult;
    } catch (Exception e) {
        ActionResultError = " caught a " + e.getClass() + " with message: " + e.getMessage();
        System.out.println("Failure to optimize index: " + indexPath);
    }
    ActionResult = "Failure";
    return ActionResult + ActionResultError;
}

From source file:com.revorg.goat.IndexManager.java

License:Open Source License

/**
 * Merges two indexes together./*from   w  w  w  .j a  v  a  2s .  c o  m*/
 *
 * @param primaryIndex      The Primary Lucene Index
 * @param secondaryIndex    The Secondary Lucene Index that should be merged 
 * @throws Exception
 * @return ActionResult
 */
public static String mergeIndexes(String primaryIndex, String secondaryIndex) {
    try {

        //Writer Class
        IndexWriter writer = new IndexWriter(primaryIndex, new StandardAnalyzer(), false,
                IndexWriter.MaxFieldLength.LIMITED);
        //Merge Index #2 to Index #1
        writer.addIndexesNoOptimize(new Directory[] { FSDirectory.getDirectory(secondaryIndex) });

        writer.commit();
        writer.optimize();
        writer.close();
        ActionResult = "Success";
        return ActionResult;
    } catch (Exception e) {
        ActionResultError = " caught a " + e.getClass() + " with message: " + e.getMessage();
        System.out.println("Failure to merge index: " + primaryIndex);
        System.out.println(ActionResultError);
    }
    ActionResult = "Failure";
    return ActionResult + ActionResultError;
}

From source file:com.revorg.goat.IndexManager.java

License:Open Source License

/**
 * Creates documents inside of the Lucene Index
 *
 * @param driverName        Database Driver
 * @param sourceURL         Database Connection URL
 * @param dbUsername        Database Username
 * @param dbPassword        Database Password
 * @param execSQL           T-SQL//  ww w  .j  a  v a  2s . c  om
 * @throws Exception
 * @return ActionResult
 */
public static String indexDatabase(String indexPath, String driverName, String sourceURL, String dbUsername,
        String dbPassword, String execSQL) {
    //
    execSQL = execSQL.replace("from", "FROM");
    execSQL = execSQL.replace("FROM", " FROM ");
    String returnResult = "";
    String workPath = indexPath + dirSep + "working" + new Random().nextInt(100000) + dirSep;
    String configFile = indexPath + dirSep + "config" + dirSep + "dsschema.xml";
    String tempHTMLDir = tempDirectory + dirSep + "htmlIndexing" + dirSep + Utilities.CreateUUID() + dirSep;

    File workDir = new File(workPath);
    File schemaFile = new File(configFile);
    File htmlIndexing = new File(tempHTMLDir);

    // Declare the JDBC objects.
    Connection con = null;
    Statement stmt = null;
    ResultSet rs = null;
    ResultSetMetaData rsMetaData = null;

    try {

        //Get Configuration File
        if (schemaFile.exists() == false) {

            ActionResultError = "DB Schema File (" + schemaFile + ")Does Not Exists";
            System.out.println(ActionResultError);
            ActionResult = "Failure ";
            return ActionResult + ActionResultError;
        }

        //Make Sure Working Director Does Not Exists
        if (workDir.exists()) {
            ActionResultError = "Failure to create index: " + workPath + " The index/directory already exists:";
            ActionResult = "Failure";
            return ActionResult + ActionResultError;
        } else {
            //Create Temporary Index
            IndexManager.createIndex(workPath);
        }

        //Load the driver class
        Class.forName(driverName);
        System.out.println("Driver Loaded");
        Properties prop = new Properties();
        prop.setProperty("user", dbUsername);
        prop.setProperty("password", dbPassword);

        DriverManager.setLoginTimeout(5); //Set Login Time
        con = DriverManager.getConnection(sourceURL, prop);

        //Result Set
        // Create and execute an SQL statement that returns some data.
        String SQL = execSQL;
        stmt = con.createStatement();
        rs = stmt.executeQuery(SQL);
        rsMetaData = rs.getMetaData();
        int columns = rsMetaData.getColumnCount();
        String[] indexTypeArray = new String[columns];
        String[] columnNamesArray = new String[columns]; //Set array Length to Column Length from Meta Data
        String[] columnTypesArray = new String[columns]; //Set array Length to Column Length from Meta Data

        int primaryKeyPos = 0;
        int triggerPos = 0;
        String triggerType = "";
        boolean triggerExists = false;
        boolean primaryExists = false;

        XMLReader readerXML = new XMLReader(); //XML Reader Class   
        //Drop into an array to keep from parsing XML more than once
        for (int i = 0; i < columns; i++) {
            indexTypeArray[i] = readerXML.getNodeValueByFile(configFile, i, "indextype");
            columnNamesArray[i] = readerXML.getNodeValueByFile(configFile, i, "columnname");
            columnTypesArray[i] = readerXML.getNodeValueByFile(configFile, i, "columntype");
            //Get Trigger Position 
            if (indexTypeArray[i].equalsIgnoreCase("PrimaryKey") == true) {
                primaryExists = true;
                primaryKeyPos = i + 1;
            }
            //Update or Delete Trigger
            if (indexTypeArray[i].equalsIgnoreCase("triggerUpdate") == true
                    || indexTypeArray[i].equalsIgnoreCase("triggerDelete") == true) {
                triggerExists = true;
                triggerPos = i + 1;

                //Update or Delete Trigger
                if (indexTypeArray[i].equalsIgnoreCase("triggerUpdate") == true) {
                    triggerType = "Update";
                } else {
                    triggerType = "Delete";
                }

            }
        }

        //Create Temporary HTML Indexing Folder
        if (htmlIndexing.exists()) {
            ActionResultError = "Failure to create directory: " + htmlIndexing
                    + " The index/directory already exists:";
            ActionResult = "Failure";
            return ActionResult + ActionResultError;
        } else {
            //Create Temporary Index
            IndexManager.createIndex(tempHTMLDir);
        }

        Date start = new Date();
        //StandardAnalyzer new StandardAnalyzer() = new StandardAnalyzer();    //Initialize Class
        IndexWriter writer = new IndexWriter(workPath, new StandardAnalyzer(), false,
                IndexWriter.MaxFieldLength.LIMITED);
        System.out.println("Indexing to directory '" + workPath + "'...");

        String dynamicSQL = "";
        int currentRow = 0; //Process Next Rows                   
        while (rs.next()) {
            //Create Dynamic SQL
            if (primaryKeyPos != 0) {
                if (currentRow > 0) {
                    dynamicSQL = dynamicSQL + ",";
                }
                dynamicSQL = dynamicSQL + rs.getInt(primaryKeyPos);
            }

            String docStatus = createDocument(writer, rs, columnNamesArray, indexTypeArray, tempHTMLDir);

            //On Failure
            if (docStatus.substring(0, 4).equalsIgnoreCase("Fail")) {
                IndexManager.deleteIndex(tempHTMLDir);

                IndexManager.deleteIndex(workPath);
                return docStatus;
            }

            //Create Actual Document  
            ++currentRow;
        }

        returnResult = "Successfully indexing of " + Integer.toString(currentRow) + " documents.";

        //Get Table From String
        //System.out.println(execSQL);
        String updateTable = "";
        String[] words = execSQL.split(" ");
        int wordPos = 0;
        int tablePos = 0;
        for (String word : words) {
            ++wordPos;
            if (word.equalsIgnoreCase("FROM") == true) {
                tablePos = wordPos + 2;
            }
            if (tablePos == wordPos) {
                updateTable = word;
                break;
            }
        }

        //Must be Records
        if (triggerExists && primaryExists && updateTable.length() > 0 && currentRow != 0) {

            if (triggerType.equalsIgnoreCase("Update") == true) {
                dynamicSQL = "update " + updateTable + " set " + columnNamesArray[triggerPos - 1] + " =1 where "
                        + columnNamesArray[primaryKeyPos - 1] + " in (" + dynamicSQL + ");";
            } else {
                dynamicSQL = "delete from " + updateTable + " where " + columnNamesArray[primaryKeyPos - 1]
                        + " in (" + dynamicSQL + ");";
            }
            System.out.println(dynamicSQL);
            stmt.execute(dynamicSQL);
        }

        System.out.println("Optimizing..." + currentRow + " Documents");

        //Close Working Writer
        writer.close();

        //Merge Indexes;
        IndexManager.mergeIndexes(indexPath, workPath);
        //Optimization Done Inside Merge

        //Delete Working Folder
        IndexManager.deleteIndex(workPath);
        IndexManager.deleteIndex(tempHTMLDir);

        Date end = new Date();
        System.out.println(end.getTime() - start.getTime() + " total milliseconds: = "
                + ((end.getTime() - start.getTime()) / 1000) + " Seconds");

        ActionResult = returnResult;
        rs.close(); //Close Result Set
        con.close();
        return ActionResult;

    }

    catch (Exception e) {
        IndexManager.deleteIndex(workPath); //Delete Working Folder
        IndexManager.deleteIndex(tempHTMLDir);

        e.printStackTrace();
        ActionResult = "Failure: " + e + " caught a " + e.getClass() + " with message: " + e.getMessage();
        return ActionResult + ActionResultError;

    }
}

From source file:com.ricky.codelab.lucene.LuceneIndexAndSearchDemo.java

License:Apache License

/**
 * /* w  w w  . jav a 2  s  . c om*/
 * ???
 * @param args
 */
public static void main(String[] args) {
    //Lucene Document??
    String fieldName = "text";
    //
    String text = "IK Analyzer???????";

    //IKAnalyzer?
    Analyzer analyzer = new IKAnalyzer(true);

    Directory directory = null;
    IndexWriter iwriter = null;
    IndexReader ireader = null;
    IndexSearcher isearcher = null;
    try {
        //
        directory = new RAMDirectory();

        //?IndexWriterConfig
        IndexWriterConfig iwConfig = new IndexWriterConfig(analyzer);
        iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
        iwriter = new IndexWriter(directory, iwConfig);
        //
        Document doc = new Document();
        doc.add(new StringField("ID", "10000", Field.Store.YES));
        doc.add(new TextField(fieldName, text, Field.Store.YES));
        iwriter.addDocument(doc);
        iwriter.close();

        //?**********************************
        //?   
        ireader = DirectoryReader.open(directory);
        isearcher = new IndexSearcher(ireader);

        String keyword = "?";
        //QueryParser?Query
        QueryParser qp = new QueryParser(fieldName, analyzer);
        qp.setDefaultOperator(QueryParser.AND_OPERATOR);
        Query query = qp.parse(keyword);
        System.out.println("Query = " + query);

        //?5?
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        //
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < topDocs.totalHits; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }

    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader != null) {
            try {
                ireader.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory != null) {
            try {
                directory.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.rocana.lucene.codec.v1.TestBlockPostingsFormat.java

License:Apache License

/** Make sure the final sub-block(s) are not skipped. */
public void testFinalBlock() throws Exception {
    Directory d = newDirectory();/*  w  w w.j a v a  2 s  . c  o  m*/
    IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())));
    for (int i = 0; i < 25; i++) {
        Document doc = new Document();
        doc.add(newStringField("field", Character.toString((char) (97 + i)), Field.Store.NO));
        doc.add(newStringField("field", "z" + Character.toString((char) (97 + i)), Field.Store.NO));
        w.addDocument(doc);
    }
    w.forceMerge(1);

    DirectoryReader r = DirectoryReader.open(w);
    assertEquals(1, r.leaves().size());
    RocanaFieldReader field = (RocanaFieldReader) r.leaves().get(0).reader().fields().terms("field");
    // We should see exactly two blocks: one root block (prefix empty string) and one block for z* terms (prefix z):
    RocanaStats stats = field.getStats();
    assertEquals(0, stats.floorBlockCount);
    assertEquals(2, stats.nonFloorBlockCount);
    r.close();
    w.close();
    d.close();
}

From source file:com.rocana.lucene.codec.v1.TestBlockPostingsFormat2.java

License:Apache License

@Override
public void tearDown() throws Exception {
    iw.close();
    TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge
    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
    iwc.setCodec(TestUtil.alwaysPostingsFormat(new RocanaLucene50PostingsFormat()));
    iwc.setOpenMode(OpenMode.APPEND);//from   ww  w  . j  a v a2  s .  c  om
    IndexWriter iw = new IndexWriter(dir, iwc);
    iw.forceMerge(1);
    iw.close();
    dir.close(); // just force a checkindex for now
    super.tearDown();
}

From source file:com.rocana.lucene.codec.v1.TestBlockPostingsFormat3.java

License:Apache License

public void test() throws Exception {
    Directory dir = newDirectory();// w  w  w.  j av  a 2s .c o  m
    Analyzer analyzer = new Analyzer(Analyzer.PER_FIELD_REUSE_STRATEGY) {
        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer tokenizer = new MockTokenizer();
            if (fieldName.contains("payloadsFixed")) {
                TokenFilter filter = new MockFixedLengthPayloadFilter(new Random(0), tokenizer, 1);
                return new TokenStreamComponents(tokenizer, filter);
            } else if (fieldName.contains("payloadsVariable")) {
                TokenFilter filter = new MockVariableLengthPayloadFilter(new Random(0), tokenizer);
                return new TokenStreamComponents(tokenizer, filter);
            } else {
                return new TokenStreamComponents(tokenizer);
            }
        }
    };
    IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
    iwc.setCodec(TestUtil.alwaysPostingsFormat(new RocanaLucene50PostingsFormat()));
    // TODO we could actually add more fields implemented with different PFs
    // or, just put this test into the usual rotation?
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
    Document doc = new Document();
    FieldType docsOnlyType = new FieldType(TextField.TYPE_NOT_STORED);
    // turn this on for a cross-check
    docsOnlyType.setStoreTermVectors(true);
    docsOnlyType.setIndexOptions(IndexOptions.DOCS);

    FieldType docsAndFreqsType = new FieldType(TextField.TYPE_NOT_STORED);
    // turn this on for a cross-check
    docsAndFreqsType.setStoreTermVectors(true);
    docsAndFreqsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS);

    FieldType positionsType = new FieldType(TextField.TYPE_NOT_STORED);
    // turn these on for a cross-check
    positionsType.setStoreTermVectors(true);
    positionsType.setStoreTermVectorPositions(true);
    positionsType.setStoreTermVectorOffsets(true);
    positionsType.setStoreTermVectorPayloads(true);
    FieldType offsetsType = new FieldType(positionsType);
    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
    Field field1 = new Field("field1docs", "", docsOnlyType);
    Field field2 = new Field("field2freqs", "", docsAndFreqsType);
    Field field3 = new Field("field3positions", "", positionsType);
    Field field4 = new Field("field4offsets", "", offsetsType);
    Field field5 = new Field("field5payloadsFixed", "", positionsType);
    Field field6 = new Field("field6payloadsVariable", "", positionsType);
    Field field7 = new Field("field7payloadsFixedOffsets", "", offsetsType);
    Field field8 = new Field("field8payloadsVariableOffsets", "", offsetsType);
    doc.add(field1);
    doc.add(field2);
    doc.add(field3);
    doc.add(field4);
    doc.add(field5);
    doc.add(field6);
    doc.add(field7);
    doc.add(field8);
    for (int i = 0; i < MAXDOC; i++) {
        String stringValue = Integer.toString(i) + " verycommon " + English.intToEnglish(i).replace('-', ' ')
                + " " + TestUtil.randomSimpleString(random());
        field1.setStringValue(stringValue);
        field2.setStringValue(stringValue);
        field3.setStringValue(stringValue);
        field4.setStringValue(stringValue);
        field5.setStringValue(stringValue);
        field6.setStringValue(stringValue);
        field7.setStringValue(stringValue);
        field8.setStringValue(stringValue);
        iw.addDocument(doc);
    }
    iw.close();
    verify(dir);
    TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge
    iwc = newIndexWriterConfig(analyzer);
    iwc.setCodec(TestUtil.alwaysPostingsFormat(new RocanaLucene50PostingsFormat()));
    iwc.setOpenMode(OpenMode.APPEND);
    IndexWriter iw2 = new IndexWriter(dir, iwc);
    iw2.forceMerge(1);
    iw2.close();
    verify(dir);
    dir.close();
}

From source file:com.rocana.lucene.codec.v1.TestRocanaPerFieldPostingsFormat2.java

License:Apache License

@Test
public void testMergeUnusedPerFieldCodec() throws IOException {
    Directory dir = newDirectory();//from   ww  w  . j av a2s. c o  m
    IndexWriterConfig iwconf = newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)
            .setCodec(new MockCodec());
    IndexWriter writer = newWriter(dir, iwconf);
    addDocs(writer, 10);
    writer.commit();
    addDocs3(writer, 10);
    writer.commit();
    addDocs2(writer, 10);
    writer.commit();
    assertEquals(30, writer.maxDoc());
    TestUtil.checkIndex(dir);
    writer.forceMerge(1);
    assertEquals(30, writer.maxDoc());
    writer.close();
    dir.close();
}