Example usage for org.apache.lucene.index IndexWriter updateDocument

List of usage examples for org.apache.lucene.index IndexWriter updateDocument

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter updateDocument.

Prototype

private long updateDocument(final DocumentsWriterDeleteQueue.Node<?> delNode,
            Iterable<? extends IndexableField> doc) throws IOException 

Source Link

Usage

From source file:com.codenvy.test.lucene.IndexFiles.java

License:Open Source License

/** Indexes a single document */
static void indexDoc(IndexWriter writer, Path file, long lastModified) throws IOException {
    try (InputStream stream = Files.newInputStream(file)) {
        // make a new, empty document
        Document doc = new Document();

        // Add the path of the file as a field named "path".  Use a
        // field that is indexed (i.e. searchable), but don't tokenize
        // the field into separate words and don't index term frequency
        // or positional information:
        Field pathField = new StringField("path", file.toString(), Field.Store.YES);
        doc.add(pathField);/*from  w ww  . jav  a 2s  .  c o m*/

        // Add the last modified date of the file a field named "modified".
        // Use a LongField that is indexed (i.e. efficiently filterable with
        // NumericRangeFilter).  This indexes to milli-second resolution, which
        // is often too fine.  You could instead create a number based on
        // year/month/day/hour/minutes/seconds, down the resolution you require.
        // For example the long value 2011021714 would mean
        // February 17, 2011, 2-3 PM.
        doc.add(new LongField("modified", lastModified, Field.Store.NO));

        // Add the contents of the file to a field named "contents".  Specify a Reader,
        // so that the text of the file is tokenized and indexed, but not stored.
        // Note that FileReader expects the file to be in UTF-8 encoding.
        // If that's not the case searching for special characters will fail.
        doc.add(new TextField("contents",
                new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))));

        if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
            // New index, so we just add the document (no old document can be there):
            System.out.println("adding " + file);
            writer.addDocument(doc);
        } else {
            // Existing index (an old copy of this document may have been indexed) so
            // we use updateDocument instead to replace the old one matching the exact
            // path, if present:
            System.out.println("updating " + file);
            writer.updateDocument(new Term("path", file.toString()), doc);
        }
    }
}

From source file:com.company.Indexer.java

License:Apache License

/** Indexes a single document */
static void indexDoc(IndexWriter writer, Path file, long lastModified) throws IOException {
    try (InputStream stream = Files.newInputStream(file)) {
        // make a new, empty document

        JTidyHTMLHandler mrT = new JTidyHTMLHandler();

        Document doc = mrT.getDocument(stream);

        // Add the path of the file as a field named "path".  Use a
        // field that is indexed (i.e. searchable), but don't tokenize
        // the field into separate words and don't index term frequency
        // or positional information:
        Field pathField = new StringField("path", file.toString(), Field.Store.YES);
        doc.add(pathField);//from   w w w  .  ja v  a2  s.  c o  m

        // Add the last modified date of the file a field named "modified".
        // Use a LongField that is indexed (i.e. efficiently filterable with
        // NumericRangeFilter).  This indexes to milli-second resolution, which
        // is often too fine.  You could instead create a number based on
        // year/month/day/hour/minutes/seconds, down the resolution you require.
        // For example the long value 2011021714 would mean
        // February 17, 2011, 2-3 PM.
        doc.add(new LongField("modified", lastModified, Field.Store.NO));

        // Add the contents of the file to a field named "contents".  Specify a Reader,
        // so that the text of the file is tokenized and indexed, but not stored.
        // Note that FileReader expects the file to be in UTF-8 encoding.
        // If that's not the case searching for special characters will fail.

        //doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))));

        if (writer.getConfig().getOpenMode() == IndexWriterConfig.OpenMode.CREATE) {
            // New index, so we just add the document (no old document can be there):
            System.out.println("adding " + file);
            writer.addDocument(doc);
        } else {
            // Existing index (an old copy of this document may have been indexed) so
            // we use updateDocument instead to replace the old one matching the exact
            // path, if present:
            System.out.println("updating " + file);
            writer.updateDocument(new Term("path", file.toString()), doc);
        }
    }
}

From source file:com.company.IndexFiles.java

License:Apache License

/** Indexes a single document */
static void indexDoc(IndexWriter writer, Path file, long lastModified) throws IOException {
    try (InputStream stream = Files.newInputStream(file)) {
        // make a new, empty document
        Document doc = new Document();

        // Add the path of the file as a field named "path".  Use a
        // field that is indexed (i.e. searchable), but don't tokenize
        // the field into separate words and don't index term frequency
        // or positional information:
        Field pathField = new StringField("path", file.toString(), Field.Store.YES);
        doc.add(pathField);//w ww  .j av  a  2 s. c om

        // Add the last modified date of the file a field named "modified".
        // Use a LongField that is indexed (i.e. efficiently filterable with
        // NumericRangeFilter).  This indexes to milli-second resolution, which
        // is often too fine.  You could instead create a number based on
        // year/month/day/hour/minutes/seconds, down the resolution you require.
        // For example the long value 2011021714 would mean
        // February 17, 2011, 2-3 PM.
        doc.add(new LongField("modified", lastModified, Field.Store.NO));

        // Add the contents of the file to a field named "contents".  Specify a Reader,
        // so that the text of the file is tokenized and indexed, but not stored.
        // Note that FileReader expects the file to be in UTF-8 encoding.
        // If that's not the case searching for special characters will fail.
        doc.add(new TextField("contents",
                new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))));

        if (writer.getConfig().getOpenMode() == IndexWriterConfig.OpenMode.CREATE) {
            // New index, so we just add the document (no old document can be there):
            System.out.println("adding " + file);
            writer.addDocument(doc);
        } else {
            // Existing index (an old copy of this document may have been indexed) so
            // we use updateDocument instead to replace the old one matching the exact
            // path, if present:
            System.out.println("updating " + file);
            writer.updateDocument(new Term("path", file.toString()), doc);
        }
    }
}

From source file:com.czw.search.lucene.example.IndexFiles.java

License:Apache License

/**
 * Indexes a single document/*  ww  w. j a  v a 2  s.c  om*/
 */
static void indexDoc(IndexWriter writer, Path file, long lastModified) throws IOException {
    try (InputStream stream = Files.newInputStream(file)) {
        // make a new, empty document
        Document doc = new Document();

        // Add the path of the file as a field named "path".  Use a
        // field that is indexed (i.e. searchable), but don't tokenize
        // the field into separate words and don't index term frequency
        // or positional information:
        Field pathField = new StringField("path", file.toString(), Field.Store.YES);
        doc.add(pathField);

        // Add the last modified date of the file a field named "modified".
        // Use a LongPoint that is indexed (i.e. efficiently filterable with
        // PointRangeQuery).  This indexes to milli-second resolution, which
        // is often too fine.  You could instead create a number based on
        // year/month/day/hour/minutes/seconds, down the resolution you require.
        // For example the long value 2011021714 would mean
        // February 17, 2011, 2-3 PM.
        doc.add(new LongPoint("modified", lastModified));

        // Add the contents of the file to a field named "contents".  Specify a Reader,
        // so that the text of the file is tokenized and indexed, but not stored.
        // Note that FileReader expects the file to be in UTF-8 encoding.
        // If that's not the case searching for special characters will fail.
        doc.add(new TextField("contents",
                new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))));

        if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
            // New index, so we just add the document (no old document can be there):
            System.out.println("adding " + file);
            writer.addDocument(doc);
        } else {
            // Existing index (an old copy of this document may have been indexed) so
            // we use updateDocument instead to replace the old one matching the exact
            // path, if present:
            System.out.println("updating " + file);
            writer.updateDocument(new Term("path", file.toString()), doc);
        }
    }
}

From source file:com.dreamerpartner.codereview.lucene.IndexHelper.java

License:Apache License

/**
 * ?/*from w w w  .j  a va2  s  .com*/
 * @param module ?
 * @param doc
 * @param isNew
 * @param delTerm del
 * @throws IOException 
 */
@SuppressWarnings("deprecation")
public static void add(String module, Document doc, boolean isNew, Term delTerm) throws IOException {
    long beginTime = System.currentTimeMillis();
    IndexWriter writer = null;
    try {
        Directory dir = FSDirectory.open(new File(LuceneUtil.getIndexPath(module)));
        Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_4_10_0);
        IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_4_10_0, analyzer);
        iwc.setMaxBufferedDocs(100);
        iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
        //         iwc.setRAMBufferSizeMB(256.0);// ?
        writer = new IndexWriter(dir, iwc);
        if (isNew) {
            writer.addDocument(doc);
        } else {
            writer.updateDocument(delTerm, doc);
        }
        //???
        writer.commit();
    } finally {
        long endTime = System.currentTimeMillis();
        logger.debug("isNew:" + isNew + ", add consume " + (endTime - beginTime) + " milliseconds.");
        if (writer != null)
            writer.close();
    }
}

From source file:com.eden.lucene.IndexFiles.java

License:Apache License

/**
 * Indexes the given file using the given writer, or if a directory is given,
 * recurses over files and directories found under the given directory.
 * /*from  w w  w.  j  av  a 2 s .c  om*/
 * NOTE: This method indexes one document per input file.  This is slow.  For good
 * throughput, put multiple documents into your input file(s).  An example of this is
 * in the benchmark module, which can create "line doc" files, one document per line,
 * using the
 * <a href="../../../../../contrib-benchmark/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.html"
 * >WriteLineDocTask</a>.
 *  
 * @param writer Writer to the index where the given file/dir info will be stored
 * @param file The file to index, or the directory to recurse into to find files to index
 * @throws IOException If there is a low-level I/O error
 */
static void indexDocs(IndexWriter writer, File file) throws IOException {
    // do not try to index files that cannot be read
    if (file.canRead()) {
        if (file.isDirectory()) {
            String[] files = file.list();
            // an IO error could occur
            if (files != null) {
                for (int i = 0; i < files.length; i++) {
                    indexDocs(writer, new File(file, files[i]));
                }
            }
        } else {

            FileInputStream fis;
            try {
                fis = new FileInputStream(file);
            } catch (FileNotFoundException fnfe) {
                // at least on windows, some temporary files raise this exception with an "access denied" message
                // checking if the file can be read doesn't help
                return;
            }

            try {

                // make a new, empty document
                Document doc = new Document();

                // Add the path of the file as a field named "path".  Use a
                // field that is indexed (i.e. searchable), but don't tokenize 
                // the field into separate words and don't index term frequency
                // or positional information:
                Field pathField = new StringField("path", file.getPath(), Field.Store.YES);
                doc.add(pathField);

                // Add the last modified date of the file a field named "modified".
                // Use a LongField that is indexed (i.e. efficiently filterable with
                // NumericRangeFilter).  This indexes to milli-second resolution, which
                // is often too fine.  You could instead create a number based on
                // year/month/day/hour/minutes/seconds, down the resolution you require.
                // For example the long value 4 would mean
                // February 17, 1, 2-3 PM.
                doc.add(new LongField("modified", file.lastModified(), Field.Store.NO));

                // Add the contents of the file to a field named "contents".  Specify a Reader,
                // so that the text of the file is tokenized and indexed, but not stored.
                // Note that FileReader expects the file to be in UTF-8 encoding.
                // If that's not the case searching for special characters will fail.
                doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(fis, "UTF-8"))));

                if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
                    // New index, so we just add the document (no old document can be there):
                    System.out.println("adding " + file);
                    writer.addDocument(doc);
                } else {
                    // Existing index (an old copy of this document may have been indexed) so 
                    // we use updateDocument instead to replace the old one matching the exact 
                    // path, if present:
                    System.out.println("updating " + file);
                    writer.updateDocument(new Term("path", file.getPath()), doc);
                }

            } finally {
                fis.close();
            }
        }
    }
}

From source file:com.ekinoks.lucene.introduction.demos.IndexFiles.java

License:Apache License

/**
 * Indexes the given file using the given writer, or if a directory is
 * given, recurses over files and directories found under the given
 * directory./*w  w  w  . j  a v a2s  . co m*/
 * 
 * NOTE: This method indexes one document per input file. This is slow. For
 * good throughput, put multiple documents into your input file(s). An
 * example of this is in the benchmark module, which can create "line doc"
 * files, one document per line, using the <a href=
 * "../../../../../contrib-benchmark/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.html"
 * >WriteLineDocTask</a>.
 * 
 * @param writer
 *            Writer to the index where the given file/dir info will be
 *            stored
 * @param file
 *            The file to index, or the directory to recurse into to find
 *            files to index
 * @throws IOException
 */
static void indexDocs(IndexWriter writer, File file) throws IOException {
    // do not try to index files that cannot be read
    if (file.canRead()) {
        if (file.isDirectory()) {
            String[] files = file.list();
            // an IO error could occur
            if (files != null) {
                for (int i = 0; i < files.length; i++) {
                    indexDocs(writer, new File(file, files[i]));
                }
            }
        } else {

            FileInputStream fis;
            try {
                fis = new FileInputStream(file);
            } catch (FileNotFoundException fnfe) {
                // at least on windows, some temporary files raise this
                // exception with an "access denied" message
                // checking if the file can be read doesn't help
                return;
            }

            try {

                // make a new, empty document
                Document doc = new Document();

                // Add the path of the file as a field named "path". Use a
                // field that is indexed (i.e. searchable), but don't
                // tokenize
                // the field into separate words and don't index term
                // frequency
                // or positional information:
                Field pathField = new Field("path", file.getPath(), Field.Store.YES,
                        Field.Index.NOT_ANALYZED_NO_NORMS);
                pathField.setOmitTermFreqAndPositions(true);
                doc.add(pathField);

                // Add the last modified date of the file a field named
                // "modified".
                // Use a NumericField that is indexed (i.e. efficiently
                // filterable with
                // NumericRangeFilter). This indexes to milli-second
                // resolution, which
                // is often too fine. You could instead create a number
                // based on
                // year/month/day/hour/minutes/seconds, down the resolution
                // you require.
                // For example the long value 2011021714 would mean
                // February 17, 2011, 2-3 PM.
                NumericField modifiedField = new NumericField("modified");
                modifiedField.setLongValue(file.lastModified());
                doc.add(modifiedField);

                // Add the contents of the file to a field named "contents".
                // Specify a Reader,
                // so that the text of the file is tokenized and indexed,
                // but not stored.
                // Note that FileReader expects the file to be in UTF-8
                // encoding.
                // If that's not the case searching for special characters
                // will fail.
                doc.add(new Field("contents", new BufferedReader(new InputStreamReader(fis, "UTF-8"))));

                if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
                    // New index, so we just add the document (no old
                    // document can be there):
                    System.out.println("adding " + file);
                    writer.addDocument(doc);
                } else {
                    // Existing index (an old copy of this document may have
                    // been indexed) so
                    // we use updateDocument instead to replace the old one
                    // matching the exact
                    // path, if present:
                    System.out.println("updating " + file);
                    writer.updateDocument(new Term("path", file.getPath()), doc);
                }

            } finally {
                fis.close();
            }
        }
    }
}

From source file:com.epimorphics.server.indexers.LuceneIndex.java

License:Apache License

private void indexEntity(IndexWriter iwriter, boolean update, String graphname, Resource entity)
        throws IOException {
    if (entity.isAnon())
        return;//  w  w  w .  ja  v a2s. com
    Document doc = new Document();
    doc.add(new StringField(FIELD_URI, entity.getURI(), Field.Store.YES));
    doc.add(new StringField(FIELD_GRAPH, graphname, Field.Store.YES));
    StmtIterator si = entity.listProperties();
    while (si.hasNext()) {
        Statement s = si.next();
        Property p = s.getPredicate();
        RDFNode value = s.getObject();
        String valueStr = asString(value);
        if (labelProps.contains(p)) {
            doc.add(new TextField(p.getURI(), valueStr, Field.Store.YES));
            doc.add(new TextField(FIELD_LABEL, valueStr, Field.Store.NO));
        } else if (labelOnlyProps.contains(p)) {
            doc.add(new TextField(p.getURI(), valueStr, Field.Store.NO));
            doc.add(new TextField(FIELD_LABEL, valueStr, Field.Store.NO));
        } else if (valueProps.contains(p) || (indexAll && !ignoreProps.contains(p))) {
            if (value.isURIResource()) {
                doc.add(new StringField(p.getURI(), value.asResource().getURI(), Field.Store.YES));
                // Alternative below would share storage of URIs but only allows per document field
                //                    doc.add( new DerefBytesDocValuesField(p.getURI(), new BytesRef(value.asResource().getURI())) );
            } else if (value.isLiteral()) {
                Literal lvalue = value.asLiteral();
                Object jvalue = lvalue.getValue();
                if (jvalue instanceof Long || jvalue instanceof Integer) {
                    doc.add(new LongField(p.getURI(), ((Number) jvalue).longValue(), Field.Store.YES));
                } else {
                    doc.add(new TextField(p.getURI(), valueStr, Field.Store.YES));
                }
            }
        }
    }
    if (update) {
        iwriter.updateDocument(new Term(FIELD_URI, entity.getURI()), doc);
    } else {
        iwriter.addDocument(doc);
    }
}

From source file:com.esri.gpt.catalog.lucene.LuceneIndexAdapter.java

License:Apache License

@Override
public void publishDocument(String uuid, Timestamp updateDate, Schema schema, String acl)
        throws CatalogIndexException {
    uuid = Val.chkStr(uuid);/*  w  ww.  j  a  v a 2 s. co m*/
    if (uuid.length() == 0) {
        throw new IllegalArgumentException("The supplied document UUID was empty.");
    }

    if (this.useRemoteWriter) {
        RemoteIndexer remoteIndexer = new RemoteIndexer();
        remoteIndexer.send(this.getRequestContext(), "publish", uuid);
    }
    if (!this.useLocalWriter)
        return;

    IndexWriter writer = null;
    PreparedStatement st = null;
    PreparedStatement stCol = null;
    try {

        // determine if the XML should always be stored within the index
        StringAttributeMap params = this.getRequestContext().getCatalogConfiguration().getParameters();
        String s = Val.chkStr(params.getValue("lucene.alwaysStoreXmlInIndex"));
        boolean alwaysStoreXmlInIndex = !s.equalsIgnoreCase("false");

        // determine if collections are being used
        List<String[]> collections = null;
        CollectionDao colDao = new CollectionDao(this.getRequestContext());
        boolean hasCollections = false;
        boolean useCollections = colDao.getUseCollections();
        String sColMemberTable = colDao.getCollectionMemberTableName();
        String sqlCol = "SELECT COLUUID FROM " + sColMemberTable + " WHERE DOCUUID=?";
        if (useCollections) {
            collections = colDao.queryCollections();
            hasCollections = (collections.size() > 0);
        }

        // determine the storeables
        Document document = new Document();
        Storeables storeables = null;
        PropertyMeanings meanings = null;
        Indexables indexables = schema.getIndexables();
        if ((indexables != null) && (indexables.getIndexableContext() != null)) {
            meanings = indexables.getIndexableContext().getPropertyMeanings();
            storeables = (Storeables) indexables.getIndexableContext().getStoreables();
        }
        if (storeables == null) {
            useCollections = false;
            meanings = schema.getMeaning().getPropertyMeanings();
            storeables = (Storeables) schema.getMeaning().getStoreables();
        }

        // resolve the thumbnail URL          
        if (Val.chkStr(schema.getMeaning().getThumbnailUrl()).length() == 0) {
            String thumbBinary = Val.chkStr(schema.getMeaning().getThumbnailBinary());
            if ((thumbBinary != null) && (thumbBinary.length() > 0)) {
                String thumbUrl = "/thumbnail?uuid=" + URLEncoder.encode(uuid, "UTF-8");
                //IStoreable storeable = schema.getMeaning().getStoreables().get(Meaning.MEANINGTYPE_THUMBNAIL_URL);
                IStoreable storeable = storeables.get(Meaning.MEANINGTYPE_THUMBNAIL_URL);
                if (storeable != null) {
                    storeable.setValue(thumbUrl);
                } else {
                    storeables.ensure(meanings, Meaning.MEANINGTYPE_THUMBNAIL_URL).setValue(thumbUrl);
                }
            }
        }

        // build the ACL property for the document
        acl = Val.chkStr(acl);
        MetadataAcl oAcl = new MetadataAcl(this.getRequestContext());
        String[] aclValues = oAcl.makeDocumentAcl(acl);
        AclProperty aclProp = new AclProperty(Storeables.FIELD_ACL);
        aclProp.setValues(aclValues);

        // build the document to store 
        storeables.ensure(meanings, Storeables.FIELD_UUID).setValue(uuid);
        storeables.ensure(meanings, Storeables.FIELD_DATEMODIFIED).setValue(updateDate);
        storeables.add(aclProp);

        String fldName = null;
        Field fld = null;

        // document XML
        String xml = Val.chkStr(schema.getActiveDocumentXml());
        String testBrief = Val.chkStr(schema.getCswBriefXslt());
        if (alwaysStoreXmlInIndex || (testBrief.length() > 0)) {
            fldName = Storeables.FIELD_XML;
            LOGGER.log(Level.FINER, "Appending field: {0}", fldName);
            fld = new Field(fldName, xml, Field.Store.YES, Field.Index.NO, Field.TermVector.NO);
            document.add(fld);
        }

        // add additional indexable fields based upon the SQL database record
        boolean bReadDB = true;
        if (bReadDB) {
            CatalogConfiguration cfg = this.getRequestContext().getCatalogConfiguration();
            this.getRequestContext().getCatalogConfiguration().getResourceTableName();
            String sql = "SELECT SITEUUID, TITLE FROM " + cfg.getResourceTableName() + " WHERE DOCUUID=?";
            Connection con = this.returnConnection().getJdbcConnection();
            this.logExpression(sql);
            st = con.prepareStatement(sql);
            st.setString(1, uuid);
            ResultSet rs = st.executeQuery();
            if (rs.next()) {

                String dbVal = Val.chkStr(rs.getString("SITEUUID"));
                if (dbVal.length() > 0) {
                    //storeables.ensure(meanings,Storeables.FIELD_SITEUUID).setValue(dbVal);
                    fldName = Storeables.FIELD_SITEUUID;
                    LOGGER.log(Level.FINER, "Appending field: {0} ={1}", new Object[] { fldName, dbVal });
                    fld = new Field(fldName, dbVal, Field.Store.YES, Field.Index.NOT_ANALYZED,
                            Field.TermVector.NO);
                    document.add(fld);
                }

                dbVal = Val.chkStr(rs.getString("TITLE"));
                if (dbVal.length() > 0) {
                    // if the title is found and is different than that in the database
                    // it means that title from the database is typed by the user. In
                    // that case make 'title.org' element based on the current title.
                    IStoreable iTitle = storeables.get(Meaning.MEANINGTYPE_TITLE);
                    if (iTitle != null) {
                        Object[] values = iTitle.getValues();
                        if (values.length > 0 && values[0] instanceof String) {
                            String val = (String) values[0];
                            if (!val.equals(dbVal)) {
                                storeables.ensure(meanings, Meaning.MEANINGTYPE_TITLE_ORG).setValue(val);
                            }
                        }
                    }
                    // ensure the title from the database
                    storeables.ensure(meanings, Meaning.MEANINGTYPE_TITLE).setValue(dbVal);
                }

            }
            st.close();
            st = null;

            // determine collection membership
            if (useCollections && hasCollections) {
                ArrayList<String> alCol = new ArrayList<String>();
                stCol = con.prepareStatement(sqlCol);
                stCol.setString(1, uuid);
                ResultSet rsCol = stCol.executeQuery();
                while (rsCol.next()) {
                    String sCUuid = rsCol.getString(1);
                    for (String[] col : collections) {
                        if (sCUuid.equals(col[0])) {
                            alCol.add(col[1]);
                            break;
                        }
                    }
                }
                stCol.close();
                stCol = null;
                if (alCol.size() > 0) {
                    fldName = "isPartOf";
                    Storeable storeable = (Storeable) storeables.ensure(meanings, fldName);
                    if (storeable == null) {
                        // TODO: add a warning message to the log
                    } else {
                        indexables.getIndexableContext().addStorableValues(meanings.get(fldName),
                                alCol.toArray(new String[0]));
                    }
                }
            }
        }

        for (IStoreable ist : storeables.collection()) {
            Storeable storeable = (Storeable) ist;
            storeable.appendForWrite(document);
        }

        // schema key
        String schemaKey = Val.chkStr(schema.getKey());
        if (schemaKey.length() > 0) {
            fldName = Storeables.FIELD_SCHEMA_KEY;
            LOGGER.log(Level.FINER, "Appending field: {0} ={1}", new Object[] { fldName, schemaKey });
            fld = new Field(fldName, schemaKey, Field.Store.YES, Field.Index.NOT_ANALYZED, Field.TermVector.NO);
            document.add(fld);
        }

        // cswOutputSchema, cswBriefXml, cswSummaryXml
        String cswOutputSchema = Val.chkStr(schema.getCswOutputSchema());
        if (cswOutputSchema.length() > 0) {
            fldName = Storeables.FIELD_SCHEMA;
            LOGGER.log(Level.FINER, "Appending field: {0} ={1}", new Object[] { fldName, cswOutputSchema });
            fld = new Field(fldName, cswOutputSchema, Field.Store.YES, Field.Index.NOT_ANALYZED,
                    Field.TermVector.NO);
            document.add(fld);
        }
        String briefXslt = Val.chkStr(schema.getCswBriefXslt());
        if (briefXslt.length() > 0) {
            MetadataDocument mdDoc = new MetadataDocument();
            String briefXml = mdDoc.transform(xml, briefXslt);
            fldName = Storeables.FIELD_XML_BRIEF;
            LOGGER.log(Level.FINER, "Appending field: {0}", fldName);
            fld = new Field(fldName, briefXml, Field.Store.YES, Field.Index.NO, Field.TermVector.NO);
            document.add(fld);
        }
        String summaryXslt = Val.chkStr(schema.getCswSummaryXslt());
        if (summaryXslt.length() > 0) {
            MetadataDocument mdDoc = new MetadataDocument();
            String summaryXml = mdDoc.transform(xml, summaryXslt);
            fldName = Storeables.FIELD_XML_SUMMARY;
            LOGGER.log(Level.FINER, "Appending field: {0}", fldName);
            fld = new Field(fldName, summaryXml, Field.Store.YES, Field.Index.NO, Field.TermVector.NO);
            document.add(fld);
        }

        // check for to see if a batch IndexWriter has been placed within the RequestContext objectMap,
        // this is useful for batch processes where open/close/optimize on the index is costly
        IndexWriter batchWriter = null;
        Object o = this.getRequestContext().getObjectMap().get(LuceneIndexAdapter.BATCH_INDEXWRITER_KEY);
        if (o != null && (o instanceof IndexWriter)) {
            batchWriter = (IndexWriter) o;
        }

        // write the document (use update to replace an existing document),
        Term term = new Term(Storeables.FIELD_UUID, uuid);
        this.getObservers().onDocumentUpdate(document, uuid);
        if (batchWriter != null) {
            batchWriter.updateDocument(term, document);
        } else {
            writer = newWriter();
            writer.updateDocument(term, document);
        }
        this.getObservers().onDocumentUpdated(document, uuid);

    } catch (Exception e) {
        String sMsg = "Error indexing document:\n " + Val.chkStr(e.getMessage());
        throw new CatalogIndexException(sMsg, e);
    } finally {
        try {
            if (st != null)
                st.close();
        } catch (Exception ef) {
        }
        try {
            if (stCol != null)
                stCol.close();
        } catch (Exception ef) {
        }
        if (this.useSingleSearcher) {
            if (this.getAutoCommitSingleWriter()) {
                closeWriter(writer);
            }
        } else {
            closeWriter(writer);
        }
    }
}

From source file:com.esri.gpt.server.assertion.index.AsnIndexAdapter.java

License:Apache License

/**
 * Indexes an assertion.//from  ww w  .j av  a  2 s  .  c o m
 * @param context the assertion operation context
 * @param assertion the assertion to index
 * @throws CorruptIndexException if the index is corrupt
 * @throws LockObtainFailedException if a write lock cannot be obtained
 * @throws IOException if an I/O exception occurs
 */
public void index(AsnContext context, Assertion assertion)
        throws CorruptIndexException, LockObtainFailedException, IOException {
    IndexWriter writer = null;
    try {
        Document document = assertion.makeWritableDocument(context);
        String assertionId = assertion.getSystemPart().getAssertionId();
        Term idTerm = new Term(AsnConstants.FIELD_SYS_ASSERTIONID, assertionId);
        writer = this.makeIndexWriter(this.newAnalyzer(context));
        writer.updateDocument(idTerm, document);
    } finally {
        this.closeWriter(writer);
    }
}