Example usage for org.apache.lucene.document StoredField StoredField

List of usage examples for org.apache.lucene.document StoredField StoredField

Introduction

In this page you can find the example usage for org.apache.lucene.document StoredField StoredField.

Prototype

public StoredField(String name, byte[] value, int offset, int length) 

Source Link

Document

Create a stored-only field with the given binary value.

Usage

From source file:com.vmware.dcp.services.common.LuceneBlobIndexService.java

License:Open Source License

protected void handlePost(Operation post) {
    if (post.isRemote()) {
        post.fail(new IllegalStateException("Remote requests not allowed"));
        return;// w w w .  j  a va 2 s  .  c  o  m
    }

    Map<String, String> params = UriUtils.parseUriQueryParams(post.getUri());
    String key = params.get(URI_PARAM_NAME_KEY);
    if (key == null) {
        post.fail(new IllegalArgumentException("key query parameter is required"));
        return;
    }

    String updateTimeParam = params.get(URI_PARAM_NAME_UPDATE_TIME);

    if (updateTimeParam == null) {
        post.fail(new IllegalArgumentException("update time query parameter is required"));
        return;
    }

    long updateTime = Long.parseLong(updateTimeParam);
    IndexWriter wr = this.writer;
    if (wr == null) {
        post.fail(new CancellationException());
        return;
    }

    try {
        Object content = post.getBodyRaw();
        if (content == null) {
            post.fail(new IllegalArgumentException("service instance is required"));
            return;
        }
        byte[] binaryContent = new byte[this.maxBinaryContextSizeBytes];
        int count = Utils.toBytes(content, binaryContent, 0);
        Document doc = new Document();
        Field binaryContentField = new StoredField(LUCENE_FIELD_NAME_BINARY_CONTENT, binaryContent, 0, count);
        doc.add(binaryContentField);
        Field keyField = new StringField(URI_PARAM_NAME_KEY, key, Field.Store.NO);
        doc.add(keyField);

        Field updateTimeField = new LongField(URI_PARAM_NAME_UPDATE_TIME, updateTime, this.longStoredField);
        doc.add(updateTimeField);
        wr.addDocument(doc);
        this.indexUpdateTimeMicros = Utils.getNowMicrosUtc();
        post.setBody(null).complete();
    } catch (Throwable e) {
        logSevere(e);
        post.fail(e);
    }
}

From source file:com.vmware.dcp.services.common.LuceneDocumentIndexService.java

License:Open Source License

private void addBinaryStateFieldToDocument(ServiceDocument s, ServiceDocumentDescription desc, Document doc) {
    try {/* ww w .  j av  a  2 s.c  o  m*/
        byte[] content = Utils.getBuffer(desc.serializedStateSizeLimit);
        int count = Utils.toBytes(s, content, 0);
        Field bodyField = new StoredField(LUCENE_FIELD_NAME_BINARY_SERIALIZED_STATE, content, 0, count);
        doc.add(bodyField);
    } catch (KryoException ke) {
        throw new IllegalArgumentException(
                "Failure serializing state of service " + s.documentSelfLink + ", possibly due to size limit."
                        + " Service author should override getDocumentTemplate() and adjust"
                        + " ServiceDocumentDescription.serializedStateSizeLimit. Cause: " + ke.toString());
    }
}

From source file:com.vmware.xenon.services.common.LuceneBlobIndexService.java

License:Open Source License

public void handlePost(Operation post) {
    if (post.isRemote()) {
        post.fail(new IllegalStateException("Remote requests not allowed"));
        return;//from  w ww .j  a v  a 2 s  .  c o m
    }

    Map<String, String> params = UriUtils.parseUriQueryParams(post.getUri());
    String key = params.get(URI_PARAM_NAME_KEY);
    if (key == null) {
        post.fail(new IllegalArgumentException("key query parameter is required"));
        return;
    }

    String updateTimeParam = params.get(URI_PARAM_NAME_UPDATE_TIME);

    if (updateTimeParam == null) {
        post.fail(new IllegalArgumentException("update time query parameter is required"));
        return;
    }

    long updateTime = Long.parseLong(updateTimeParam);
    IndexWriter wr = this.writer;
    if (wr == null) {
        post.fail(new CancellationException());
        return;
    }

    try {
        Object content = post.getBodyRaw();
        if (content == null) {
            post.fail(new IllegalArgumentException("service instance is required"));
            return;
        }

        byte[] binaryContent = getBuffer();
        int count = Utils.toBytes(content, binaryContent, 0);
        Document doc = new Document();
        Field binaryContentField = new StoredField(LUCENE_FIELD_NAME_BINARY_CONTENT, binaryContent, 0, count);
        doc.add(binaryContentField);
        Field keyField = new StringField(URI_PARAM_NAME_KEY, key, Field.Store.NO);
        doc.add(keyField);

        LuceneDocumentIndexService.addNumericField(doc, URI_PARAM_NAME_UPDATE_TIME, updateTime, true);

        wr.addDocument(doc);
        this.indexUpdateTimeMicros = Utils.getNowMicrosUtc();
        post.setBody(null).complete();
    } catch (Throwable e) {
        logSevere(e);
        post.fail(e);
    }
}

From source file:com.vmware.xenon.services.common.LuceneDocumentIndexService.java

License:Open Source License

private void addBinaryStateFieldToDocument(ServiceDocument s, byte[] serializedDocument,
        ServiceDocumentDescription desc, Document doc) {
    try {//w ww  .ja v  a 2  s .c  o  m
        int count = 0;
        if (serializedDocument == null) {
            serializedDocument = KryoSerializers.getBuffer(desc.serializedStateSizeLimit);
            count = KryoSerializers.serializeObjectForIndexing(s, serializedDocument, 0);
        } else {
            count = serializedDocument.length;
        }
        Field bodyField = new StoredField(LUCENE_FIELD_NAME_BINARY_SERIALIZED_STATE, serializedDocument, 0,
                count);
        doc.add(bodyField);
    } catch (KryoException ke) {
        throw new IllegalArgumentException(
                "Failure serializing state of service " + s.documentSelfLink + ", possibly due to size limit."
                        + " Service author should override getDocumentTemplate() and adjust"
                        + " ServiceDocumentDescription.serializedStateSizeLimit. Cause: " + ke.toString());
    }
}

From source file:com.vmware.xenon.services.common.LuceneIndexDocumentHelper.java

License:Open Source License

public void addBinaryStateFieldToDocument(ServiceDocument s, byte[] serializedDocument,
        ServiceDocumentDescription desc) {
    try {//from w  w  w .  j ava2s  .co m
        int count = 0;
        if (serializedDocument == null) {
            Output o = KryoSerializers.serializeDocumentForIndexing(s, desc.serializedStateSizeLimit);
            count = o.position();
            serializedDocument = o.getBuffer();
        } else {
            count = serializedDocument.length;
        }
        Field bodyField = new StoredField(LuceneDocumentIndexService.LUCENE_FIELD_NAME_BINARY_SERIALIZED_STATE,
                serializedDocument, 0, count);
        this.doc.add(bodyField);
    } catch (KryoException ke) {
        throw new IllegalArgumentException(
                "Failure serializing state of service " + s.documentSelfLink + ", possibly due to size limit."
                        + " Service author should override getDocumentTemplate() and adjust"
                        + " ServiceDocumentDescription.serializedStateSizeLimit. Cause: " + ke.toString());
    }
}

From source file:org.elasticsearch.index.engine.EngineTestCase.java

License:Apache License

protected static ParsedDocument testParsedDocument(String id, String routing, ParseContext.Document document,
        BytesReference source, Mapping mappingUpdate) {
    Field uidField = new Field("_id", Uid.encodeId(id), IdFieldMapper.Defaults.FIELD_TYPE);
    Field versionField = new NumericDocValuesField("_version", 0);
    SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
    document.add(uidField);/*ww w .  jav a  2 s.com*/
    document.add(versionField);
    document.add(seqID.seqNo);
    document.add(seqID.seqNoDocValue);
    document.add(seqID.primaryTerm);
    BytesRef ref = source.toBytesRef();
    document.add(new StoredField(SourceFieldMapper.NAME, ref.bytes, ref.offset, ref.length));
    return new ParsedDocument(versionField, seqID, id, "test", routing, Arrays.asList(document), source,
            XContentType.JSON, mappingUpdate);
}

From source file:org.elasticsearch.index.mapper.internal.SourceFieldMapper.java

License:Apache License

@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
    if (!enabled) {
        return;//from  w w  w. j  a  v  a  2  s .co m
    }
    if (!fieldType.stored()) {
        return;
    }
    if (context.flyweight()) {
        return;
    }
    BytesReference source = context.source();

    boolean filtered = (includes != null && includes.length > 0) || (excludes != null && excludes.length > 0);
    if (filtered) {
        // we don't update the context source if we filter, we want to keep it as is...

        Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(source, true);
        Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes);
        BytesStreamOutput bStream = new BytesStreamOutput();
        StreamOutput streamOutput = bStream;
        if (compress != null && compress && (compressThreshold == -1 || source.length() > compressThreshold)) {
            streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
        }
        XContentType contentType = formatContentType;
        if (contentType == null) {
            contentType = mapTuple.v1();
        }
        XContentBuilder builder = XContentFactory.contentBuilder(contentType, streamOutput).map(filteredSource);
        builder.close();

        source = bStream.bytes();
    } else if (compress != null && compress && !CompressorFactory.isCompressed(source)) {
        if (compressThreshold == -1 || source.length() > compressThreshold) {
            BytesStreamOutput bStream = new BytesStreamOutput();
            XContentType contentType = XContentFactory.xContentType(source);
            if (formatContentType != null && formatContentType != contentType) {
                XContentBuilder builder = XContentFactory.contentBuilder(formatContentType,
                        CompressorFactory.defaultCompressor().streamOutput(bStream));
                builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source));
                builder.close();
            } else {
                StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
                source.writeTo(streamOutput);
                streamOutput.close();
            }
            source = bStream.bytes();
            // update the data in the context, so it can be compressed and stored compressed outside...
            context.source(source);
        }
    } else if (formatContentType != null) {
        // see if we need to convert the content type
        Compressor compressor = CompressorFactory.compressor(source);
        if (compressor != null) {
            CompressedStreamInput compressedStreamInput = compressor.streamInput(source.streamInput());
            XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
            compressedStreamInput.resetToBufferStart();
            if (contentType != formatContentType) {
                // we need to reread and store back, compressed....
                BytesStreamOutput bStream = new BytesStreamOutput();
                StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
                XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, streamOutput);
                builder.copyCurrentStructure(
                        XContentFactory.xContent(contentType).createParser(compressedStreamInput));
                builder.close();
                source = bStream.bytes();
                // update the data in the context, so we store it in the translog in this format
                context.source(source);
            } else {
                compressedStreamInput.close();
            }
        } else {
            XContentType contentType = XContentFactory.xContentType(source);
            if (contentType != formatContentType) {
                // we need to reread and store back
                // we need to reread and store back, compressed....
                BytesStreamOutput bStream = new BytesStreamOutput();
                XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, bStream);
                builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source));
                builder.close();
                source = bStream.bytes();
                // update the data in the context, so we store it in the translog in this format
                context.source(source);
            }
        }
    }
    assert source.hasArray();
    fields.add(new StoredField(names().indexName(), source.array(), source.arrayOffset(), source.length()));
}

From source file:org.elasticsearch.index.mapper.SourceFieldMapper.java

License:Apache License

@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
    if (!enabled) {
        return;/*  w  w w  . ja v  a  2  s  .c  o  m*/
    }
    if (!fieldType().stored()) {
        return;
    }
    BytesReference source = context.sourceToParse().source();
    // Percolate and tv APIs may not set the source and that is ok, because these APIs will not index any data
    if (source == null) {
        return;
    }

    boolean filtered = (includes != null && includes.length > 0) || (excludes != null && excludes.length > 0);
    if (filtered) {
        // we don't update the context source if we filter, we want to keep it as is...

        Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(source, true);
        Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes);
        BytesStreamOutput bStream = new BytesStreamOutput();
        XContentType contentType = mapTuple.v1();
        XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource);
        builder.close();

        source = bStream.bytes();
    }
    BytesRef ref = source.toBytesRef();
    fields.add(new StoredField(fieldType().name(), ref.bytes, ref.offset, ref.length));
}

From source file:org.elasticsearch.index.percolator.PercolatorQueryCacheTests.java

License:Apache License

void storeQuery(String id, IndexWriter indexWriter, QueryBuilder queryBuilder, boolean typeField,
        boolean legacy) throws IOException {
    Document doc = new Document();
    doc.add(new StringField("id", id, Field.Store.NO));
    if (typeField) {
        if (legacy) {
            doc.add(new StringField(TypeFieldMapper.NAME, PercolatorFieldMapper.LEGACY_TYPE_NAME,
                    Field.Store.NO));
        } else {//from   ww  w  . ja v a2  s  . co m
            doc.add(new StringField(TypeFieldMapper.NAME, "query", Field.Store.NO));
        }
    }
    if (legacy) {
        BytesReference percolatorQuery = XContentFactory.jsonBuilder().startObject()
                .field("query", queryBuilder).endObject().bytes();
        doc.add(new StoredField(SourceFieldMapper.NAME, percolatorQuery.array(), percolatorQuery.arrayOffset(),
                percolatorQuery.length()));
    } else {
        BytesRef queryBuilderAsBytes = new BytesRef(
                XContentFactory.contentBuilder(PercolatorQueryCache.QUERY_BUILDER_CONTENT_TYPE)
                        .value(queryBuilder).bytes().toBytes());
        doc.add(new BinaryDocValuesField(PercolatorFieldMapper.QUERY_BUILDER_FIELD_NAME, queryBuilderAsBytes));
    }
    indexWriter.addDocument(doc);
}

From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.FieldSubsetReaderTests.java

License:Open Source License

/**
 * test special handling for _source field.
 *//* ww  w. j a va  2  s .  c om*/
public void testSourceFilteringIntegration() throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add document with 2 fields
    Document doc = new Document();
    doc.add(new StringField("fieldA", "testA", Field.Store.NO));
    doc.add(new StringField("fieldB", "testB", Field.Store.NO));
    byte bytes[] = "{\"fieldA\":\"testA\", \"fieldB\":\"testB\"}".getBytes(StandardCharsets.UTF_8);
    doc.add(new StoredField(SourceFieldMapper.NAME, bytes, 0, bytes.length));
    iw.addDocument(doc);

    // open reader
    Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", SourceFieldMapper.NAME));
    DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton));

    // see only one field
    Document d2 = ir.document(0);
    assertEquals(1, d2.getFields().size());
    assertEquals("{\"fieldA\":\"testA\"}", d2.getBinaryValue(SourceFieldMapper.NAME).utf8ToString());

    TestUtil.checkReader(ir);
    IOUtils.close(ir, iw, dir);
}