Example usage for org.apache.lucene.index IndexWriter updateDocument

List of usage examples for org.apache.lucene.index IndexWriter updateDocument

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter updateDocument.

Prototype

private long updateDocument(final DocumentsWriterDeleteQueue.Node<?> delNode,
            Iterable<? extends IndexableField> doc) throws IOException 

Source Link

Usage

From source file:org.silverpeas.core.index.indexing.model.IndexManager.java

License:Open Source License

/**
 * Method declaration//from  www . j ava2  s.c  om
 *
 * @param writer
 * @param indexEntry
 */
private void index(IndexWriter writer, FullIndexEntry indexEntry) {
    try {
        Term key = new Term(KEY, indexEntry.getPK().toString());
        writer.updateDocument(key, makeDocument(indexEntry));
    } catch (Exception e) {
        SilverLogger.getLogger(this).error(e.getMessage(), e);
    }
}

From source file:org.sonatype.nexus.index.context.DefaultIndexingContext.java

License:Open Source License

private void storeDescriptor() throws IOException {
    Document hdr = new Document();

    hdr.add(new Field(FLD_DESCRIPTOR, FLD_DESCRIPTOR_CONTENTS, Field.Store.YES, Field.Index.UN_TOKENIZED));

    hdr.add(new Field(FLD_IDXINFO, VERSION + ArtifactInfo.FS + getRepositoryId(), Field.Store.YES,
            Field.Index.NO));/*from   w  w  w .  j  av  a  2  s .  c o m*/

    IndexWriter w = getIndexWriter();

    w.updateDocument(DESCRIPTOR_TERM, hdr);

    w.flush();
}

From source file:org.sonatype.nexus.index.context.IndexUtils.java

License:Open Source License

static void setGroups(IndexingContext context, Collection<String> groups, String groupField,
        String groupFieldValue, String groupListField) throws IOException, CorruptIndexException {
    IndexWriter w = context.getIndexWriter();

    w.updateDocument(new Term(groupField, groupFieldValue),
            createGroupsDocument(groups, groupField, groupFieldValue, groupListField));

    w.flush();/*from   www.j av a 2  s. c  o  m*/
}

From source file:org.sonatype.nexus.index.DefaultIndexerEngine.java

License:Open Source License

public void update(IndexingContext context, ArtifactContext ac) throws IOException {
    Document d = ac.createDocument(context);

    if (d != null) {
        IndexWriter w = context.getIndexWriter();

        w.updateDocument(new Term(ArtifactInfo.UINFO, ac.getArtifactInfo().getUinfo()), d);

        updateGroups(context, ac);/*from  w  w  w.ja  va2  s.c o m*/

        w.flush();

        context.updateTimestamp();
    }
}

From source file:org.sonatype.nexus.ReindexIT.java

License:Open Source License

protected void shiftContextInTime(IndexingContext ctx, int shiftDays) throws IOException {
    if (shiftDays != 0) {
        final IndexWriter iw = ctx.getIndexWriter();
        final IndexSearcher is = ctx.acquireIndexSearcher();
        try {//from   w ww . j a v a  2 s. c o  m
            final IndexReader ir = is.getIndexReader();
            for (int docNum = 0; docNum < ir.maxDoc(); docNum++) {
                if (!ir.isDeleted(docNum)) {
                    Document doc = ir.document(docNum);

                    String lastModified = doc.get(ArtifactInfo.LAST_MODIFIED);

                    if (lastModified != null) {
                        long lm = Long.parseLong(lastModified);

                        lm = lm + (shiftDays * A_DAY_MILLIS);

                        doc.removeFields(ArtifactInfo.LAST_MODIFIED);

                        doc.add(new Field(ArtifactInfo.LAST_MODIFIED, Long.toString(lm), Field.Store.YES,
                                Field.Index.NO));

                        iw.updateDocument(new Term(ArtifactInfo.UINFO, doc.get(ArtifactInfo.UINFO)), doc);
                    }
                }
            }

            ctx.optimize();

            ctx.commit();

            // shift timestamp too
            if (ctx.getTimestamp() != null) {
                ctx.updateTimestamp(true, new Date(ctx.getTimestamp().getTime() + (shiftDays * A_DAY_MILLIS)));
            } else {
                ctx.updateTimestamp(true, new Date(System.currentTimeMillis() + (shiftDays * A_DAY_MILLIS)));
            }
        } finally {
            ctx.releaseIndexSearcher(is);
        }
    }
}

From source file:org.sonatype.nexus.ReindexLRTest.java

License:Open Source License

protected void shiftContextInTime(IndexingContext ctx, int shiftDays) throws IOException {
    if (shiftDays != 0) {
        IndexWriter iw = ctx.getIndexWriter();

        for (int docNum = 0; docNum < ctx.getIndexReader().maxDoc(); docNum++) {
            if (!ctx.getIndexReader().isDeleted(docNum)) {
                Document doc = ctx.getIndexReader().document(docNum);

                String lastModified = doc.get(ArtifactInfo.LAST_MODIFIED);

                if (lastModified != null) {
                    long lm = Long.parseLong(lastModified);

                    lm = lm + (shiftDays * A_DAY_MILLIS);

                    doc.removeFields(ArtifactInfo.LAST_MODIFIED);

                    doc.add(new Field(ArtifactInfo.LAST_MODIFIED, Long.toString(lm), Field.Store.YES,
                            Field.Index.NO));

                    iw.updateDocument(new Term(ArtifactInfo.UINFO, doc.get(ArtifactInfo.UINFO)), doc);
                }// w  w w . ja  v  a 2s  . c  o m
            }
        }

        ctx.optimize();

        ctx.commit();

        // shift timestamp too
        if (ctx.getTimestamp() != null) {
            ctx.updateTimestamp(true, new Date(ctx.getTimestamp().getTime() + (shiftDays * A_DAY_MILLIS)));
        } else {
            ctx.updateTimestamp(true, new Date(System.currentTimeMillis() + (shiftDays * A_DAY_MILLIS)));
        }
    }
}

From source file:org.wlr.lucene.learning.IndexFiles.java

License:Apache License

/**
 * Indexes a single document//from   w  w  w . ja  va  2s  .  c om
 */
static void indexDoc(IndexWriter writer, Path file, long lastModified) throws IOException {
    try (InputStream stream = Files.newInputStream(file)) {
        // make a new, empty document
        Document doc = new Document();

        // Add the path of the file as a field named "path".  Use a
        // field that is indexed (i.e. searchable), but don't tokenize
        // the field into separate words and don't index term frequency
        // or positional information:
        Field pathField = new StringField("path", file.toString(), Field.Store.YES);
        doc.add(pathField);

        // Add the last modified date of the file a field named "modified".
        // Use a LongPoint that is indexed (i.e. efficiently filterable with
        // PointRangeQuery).  This indexes to milli-second resolution, which
        // is often too fine.  You could instead create a number based on
        // year/month/day/hour/minutes/seconds, down the resolution you require.
        // For example the long value 2011021714 would mean
        // February 17, 2011, 2-3 PM.
        doc.add(new LongPoint("modified", lastModified));

        // Add the contents of the file to a field named "contents".  Specify a Reader,
        // so that the text of the file is tokenized and indexed, but not stored.
        // Note that FileReader expects the file to be in UTF-8 encoding.
        // If that's not the case searching for special characters will fail.
        doc.add(new TextField("contents",
                new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))));

        // store ?yes ??no ??
        // StringField IndexOptions  DOCS?????
        //  Store  NO  ?????
        Field ext = new StringField("ext", "synchronized", Field.Store.YES);
        doc.add(ext);

        if (writer.getConfig().getOpenMode() == IndexWriterConfig.OpenMode.CREATE) {
            // New index, so we just add the document (no old document can be there):
            System.out.println("adding " + file);
            writer.addDocument(doc);
        } else {
            // Existing index (an old copy of this document may have been indexed) so
            // we use updateDocument instead to replace the old one matching the exact
            // path, if present:
            System.out.println("updating " + file);
            writer.updateDocument(new Term("path", file.toString()), doc);
        }
    }
}

From source file:org.wso2.carbon.analytics.dataservice.core.indexing.AnalyticsDataIndexer.java

License:Open Source License

private void updateIndex(int shardIndex, List<Record> recordBatch, Map<String, ColumnDefinition> columns)
        throws AnalyticsIndexException {
    if (log.isDebugEnabled()) {
        log.debug("Updating data in local index [" + shardIndex + "]: " + recordBatch.size());
    }/*from  w  ww. java  2s.c om*/
    Record firstRecord = recordBatch.get(0);
    int tenantId = firstRecord.getTenantId();
    String tableName = firstRecord.getTableName();
    String tableId = this.generateTableId(tenantId, tableName);
    IndexWriter indexWriter = this.lookupIndexWriter(shardIndex, tableId);
    TaxonomyWriter taxonomyWriter = this.lookupTaxonomyIndexWriter(shardIndex, tableId);
    try {
        for (Record record : recordBatch) {
            indexWriter.updateDocument(new Term(INDEX_ID_INTERNAL_FIELD, record.getId()),
                    this.generateIndexDoc(record, columns, taxonomyWriter).getFields());
        }
        indexWriter.commit();
        taxonomyWriter.commit();
        if (this.isIndexingStatsEnabled()) {
            this.statsCollector.processedRecords(recordBatch.size());
        }
    } catch (IOException e) {
        throw new AnalyticsIndexException("Error in updating index: " + e.getMessage(), e);
    }
}

From source file:org.wso2.carbon.analytics.dataservice.indexing.AnalyticsDataIndexer.java

License:Open Source License

private void updateIndex(int tenantId, String tableName, List<Record> recordBatch,
        Map<String, IndexType> columns, String shardId) throws AnalyticsIndexException {
    String shardedTableId = this.generateShardedTableId(tenantId, tableName, shardId);
    IndexWriter indexWriter = this.createIndexWriter(shardedTableId);
    try {//from   w  ww . j  a v a 2  s  .com
        for (Record record : recordBatch) {
            indexWriter.updateDocument(new Term(INDEX_ID_INTERNAL_FIELD, record.getId()),
                    this.generateIndexDoc(record, columns).getFields());
        }
        indexWriter.commit();
    } catch (IOException e) {
        throw new AnalyticsIndexException("Error in updating index: " + e.getMessage(), e);
    } finally {
        try {
            indexWriter.close();
        } catch (IOException e) {
            log.error("Error closing index writer: " + e.getMessage(), e);
        }
    }
}

From source file:org.wzy.ir.IndexFiles.java

License:Apache License

/** Indexes a single document */
static void indexDoc(IndexWriter writer, Path file, long lastModified, String code) throws IOException {
    try (InputStream stream = Files.newInputStream(file)) {
        // make a new, empty document
        Document doc = new Document();

        // Add the path of the file as a field named "path".  Use a
        // field that is indexed (i.e. searchable), but don't tokenize 
        // the field into separate words and don't index term frequency
        // or positional information:
        Field pathField = new StringField("path", file.toString(), Field.Store.YES);
        doc.add(pathField);/*from  w ww  . j  av a 2s .c  om*/

        // Add the last modified date of the file a field named "modified".
        // Use a LongPoint that is indexed (i.e. efficiently filterable with
        // PointRangeQuery).  This indexes to milli-second resolution, which
        // is often too fine.  You could instead create a number based on
        // year/month/day/hour/minutes/seconds, down the resolution you require.
        // For example the long value 2011021714 would mean
        // February 17, 2011, 2-3 PM.
        doc.add(new LongPoint("modified", lastModified));

        // Add the contents of the file to a field named "contents".  Specify a Reader,
        // so that the text of the file is tokenized and indexed, but not stored.
        // Note that FileReader expects the file to be in UTF-8 encoding.
        // If that's not the case searching for special characters will fail.
        //doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))));
        doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(stream, code))));

        if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
            // New index, so we just add the document (no old document can be there):
            System.out.println("adding " + file);
            writer.addDocument(doc);
        } else {
            // Existing index (an old copy of this document may have been indexed) so 
            // we use updateDocument instead to replace the old one matching the exact 
            // path, if present:
            System.out.println("updating " + file);
            writer.updateDocument(new Term("path", file.toString()), doc);
        }
    }
}