Example usage for org.apache.solr.common SolrDocument put

List of usage examples for org.apache.solr.common SolrDocument put

Introduction

In this page you can find the example usage for org.apache.solr.common SolrDocument put.

Prototype

@Override
    public Object put(String key, Object value) 

Source Link

Usage

From source file:de.qaware.chronix.solr.query.analysis.collectors.AnalysisDocumentBuilder.java

License:Apache License

/**
 * Builds a solr document that is needed for the response from the aggregated time series
 *
 * @param timeSeries  - the time series/*w  w  w  . jav a 2 s .c o m*/
 * @param value       - the isAggregation value
 * @param aggregation - the isAggregation
 * @param key         - the join key
 * @return a solr document holding the attributes and the aggregated value
 */
private static SolrDocument buildDocument(MetricTimeSeries timeSeries, double value,
        Map.Entry<AnalysisType, String[]> aggregation, String key) {

    boolean highLevelAnalysis = AnalysisType.isHighLevel(aggregation.getKey());

    //-1 on high level analyses marks that the time series is ok and should not returned
    if (highLevelAnalysis && value < 0) {
        return null;
    }

    SolrDocument doc;

    if (highLevelAnalysis) {
        doc = convert(timeSeries, true);
    } else {
        doc = convert(timeSeries, false);
        doc.put("value", value);
    }

    //Add some information about the analysis
    doc.put("analysis", aggregation.getKey().name());
    doc.put("analysisParam", String.join("-", aggregation.getValue()));

    //add the join key
    doc.put("joinKey", key);

    return doc;
}

From source file:de.qaware.chronix.solr.query.analysis.SolrDocumentBuilder.java

License:Apache License

/**
 * Builds a solr document that is needed for the response.
 * If the functions contains only analyses an every analysis result is false the method returns null.
 * <p>/*from ww w .j  a  v a2 s .com*/
 * Transformations -> Return the time series
 * Aggregations -> Return the document
 * Analyses -> Return the document if a analysis result is positive
 *
 * @param timeSeries         the time series
 * @param functionValues     a map with executed analyses and values
 * @param key                the join key
 * @param dataShouldReturned true if the data should be returned, otherwise false
 * @param dataAsJson         if true, the data is returned as json
 * @return the time series as solr document
 */
public static SolrDocument buildDocument(MetricTimeSeries timeSeries, FunctionValueMap functionValues,
        String key, boolean dataShouldReturned, boolean dataAsJson) {

    //Convert the document
    SolrDocument doc = convert(timeSeries, dataShouldReturned, dataAsJson);
    //add the join key
    doc.put(ChronixQueryParams.JOIN_KEY, key);
    //Only add if we have function values
    if (functionValues != null) {
        //Add the function results
        addAnalysesAndResults(functionValues, doc);
    }

    return doc;
}

From source file:de.qaware.chronix.solr.query.analysis.SolrDocumentBuilder.java

License:Apache License

/**
 * Add the functions and its results to the given solr document
 *
 * @param functionValueMap the function value map with the functions and the results
 * @param doc              the solr document to add the result
 *///from   www .j  av  a  2s  . co  m
private static void addAnalysesAndResults(FunctionValueMap functionValueMap, SolrDocument doc) {

    //For identification purposes
    int counter = 0;

    //add the transformation information
    for (int transformation = 0; transformation < functionValueMap.sizeOfTransformations(); transformation++) {
        ChronixTransformation chronixTransformation = functionValueMap.getTransformation(transformation);
        doc.put(counter + "_" + ChronixQueryParams.FUNCTION + "_"
                + chronixTransformation.getType().name().toLowerCase(), chronixTransformation.getArguments());
        counter++;
    }

    //add the aggregation information
    for (int aggregation = 0; aggregation < functionValueMap.sizeOfAggregations(); aggregation++) {
        ChronixAggregation chronixAggregation = functionValueMap.getAggregation(aggregation);
        double value = functionValueMap.getAggregationValue(aggregation);
        doc.put(counter + "_" + ChronixQueryParams.FUNCTION + "_"
                + chronixAggregation.getType().name().toLowerCase(), value);

        //Only if arguments exists
        if (chronixAggregation.getArguments().length != 0) {
            doc.put(counter + "_" + ChronixQueryParams.FUNCTION_ARGUMENTS + "_"
                    + chronixAggregation.getType().name().toLowerCase(), chronixAggregation.getArguments());
        }
        counter++;
    }

    //add the analyses information
    for (int analysis = 0; analysis < functionValueMap.sizeOfAnalyses(); analysis++) {
        ChronixAnalysis chronixAnalysis = functionValueMap.getAnalysis(analysis);
        boolean value = functionValueMap.getAnalysisValue(analysis);
        String identifier = functionValueMap.getAnalysisIdentifier(analysis);
        String nameWithLeadingUnderscore;

        //Check if there is an identifier
        if (Strings.isNullOrEmpty(identifier)) {
            nameWithLeadingUnderscore = "_" + chronixAnalysis.getType().name().toLowerCase();
        } else {
            nameWithLeadingUnderscore = "_" + chronixAnalysis.getType().name().toLowerCase() + "_" + identifier;
        }

        //Add some information about the analysis
        doc.put(counter + "_" + ChronixQueryParams.FUNCTION + nameWithLeadingUnderscore, value);

        //Only if arguments exists
        if (chronixAnalysis.getArguments().length != 0) {
            doc.put(counter + "_" + ChronixQueryParams.FUNCTION_ARGUMENTS + nameWithLeadingUnderscore,
                    chronixAnalysis.getArguments());
        }
        counter++;
    }
}

From source file:de.unidue.inf.is.ezdl.dlwrapper.wrappers.cs.DBLPSolrWrapperTest.java

License:Open Source License

private SolrDocument getSolrDocument() {
    SolrDocument sd = new SolrDocument();
    sd.put("title", "Challenges for test and design for test.");
    sd.put("url", "db/conf/ddecs/ddecs2009.html#Chichkov09");
    sd.put("booktitle", "DDECS");
    sd.put("crossref", "conf/ddecs/2009");
    sd.put("ee", "http://dx.doi.org/10.1109/DDECS.2009.5012086");
    sd.put("key", "conf/ddecs/Chichkov09");
    sd.put("mdate", "2009-07-05T22:00:00Z");
    sd.put("pages", "3");
    sd.put("year", new Integer(2009));
    PersonList authors = new PersonList();
    authors.add(new Person("Anton Chichkov"));
    sd.put("author", authors);
    return sd;/*from   www  .ja  v a  2  s  .c o  m*/
}

From source file:geocluster.GeoclusterComponent.java

License:Apache License

private void updateCluster(SolrDocument cluster) {
    // Calculate center point from all clustered points.
    HashMap<Integer, SolrDocument> docs = (HashMap<Integer, SolrDocument>) cluster
            .getFieldValue(GEOCLUSTER_DOCS);
    Float latMin = null, latMax = null, lonMin = null, lonMax = null;
    for (Entry<Integer, SolrDocument> entry : docs.entrySet()) {
        SolrDocument doc = entry.getValue();
        String latlon = (String) doc.getFieldValue(this.latlonField);
        if (latlon != null) {
            String[] latlonSplit = latlon.split(",");
            float lat = Float.parseFloat(latlonSplit[0]);
            float lon = Float.parseFloat(latlonSplit[1]);
            latMin = latMin == null ? lat : Math.min(latMin, lat);
            latMax = latMax == null ? lat : Math.max(latMax, lat);
            lonMin = lonMin == null ? lon : Math.min(lonMin, lon);
            lonMax = lonMax == null ? lon : Math.max(lonMax, lon);
        }/*from   w ww. ja va2s  . co  m*/
    }
    try {
        LatLng latlonCenter = new FloatLatLng((latMin + latMax) / 2, (lonMin + lonMax) / 2);
        cluster.put(GEOCLUSTER_CENTER, latlonCenter);
    } catch (Exception e) {
    }
}

From source file:org.apache.nifi.processors.solr.TestPutSolrRecord.java

License:Apache License

@Test
public void testPutSolrOnTriggerIndex() throws IOException, InitializationException, SolrServerException {
    final SolrClient solrClient = createEmbeddedSolrClient(DEFAULT_SOLR_CORE);
    TestableProcessor proc = new TestableProcessor(solrClient);

    TestRunner runner = createDefaultTestRunner(proc);
    MockRecordParser recordParser = new MockRecordParser();
    runner.addControllerService("parser", recordParser);

    runner.enableControllerService(recordParser);
    runner.setProperty(PutSolrRecord.RECORD_READER, "parser");

    runner.setProperty(PutSolrRecord.UPDATE_PATH, "/update");

    recordParser.addSchemaField("id", RecordFieldType.INT);
    recordParser.addSchemaField("first", RecordFieldType.STRING);
    recordParser.addSchemaField("last", RecordFieldType.STRING);
    recordParser.addSchemaField("grade", RecordFieldType.INT);
    recordParser.addSchemaField("subject", RecordFieldType.STRING);
    recordParser.addSchemaField("test", RecordFieldType.STRING);
    recordParser.addSchemaField("marks", RecordFieldType.INT);

    SolrDocument solrDocument = new SolrDocument();
    solrDocument.put("id", 1);
    solrDocument.put("first", "Abhinav");
    solrDocument.put("last", "R");
    solrDocument.put("grade", 8);
    solrDocument.put("subject", "Chemistry");
    solrDocument.put("test", "term1");
    solrDocument.put("marks", 98);

    recordParser.addRecord(1, "Abhinav", "R", 8, "Chemistry", "term1", 98);

    try {/*ww w .j  a v  a 2s. c o  m*/
        runner.enqueue(new byte[0], new HashMap<String, String>() {
            {
                put("id", "1");
            }
        });
        runner.run(1, false);
        verifySolrDocuments(proc.getSolrClient(), Collections.singletonList(solrDocument));
        runner.assertTransferCount(PutSolrRecord.REL_FAILURE, 0);
        runner.assertTransferCount(PutSolrRecord.REL_CONNECTION_FAILURE, 0);
        runner.assertTransferCount(PutSolrRecord.REL_SUCCESS, 1);
    } finally {
        try {
            proc.getSolrClient().close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

From source file:org.apache.nifi.processors.solr.TestPutSolrRecord.java

License:Apache License

@Test
public void testPutSolrOnTriggerIndexForANestedRecord()
        throws IOException, InitializationException, SolrServerException {
    final SolrClient solrClient = createEmbeddedSolrClient(DEFAULT_SOLR_CORE);
    TestableProcessor proc = new TestableProcessor(solrClient);

    TestRunner runner = createDefaultTestRunner(proc);
    MockRecordParser recordParser = new MockRecordParser();
    runner.addControllerService("parser", recordParser);

    runner.enableControllerService(recordParser);
    runner.setProperty(PutSolrRecord.RECORD_READER, "parser");

    runner.setProperty(PutSolrRecord.UPDATE_PATH, "/update");

    recordParser.addSchemaField("id", RecordFieldType.INT);
    recordParser.addSchemaField("first", RecordFieldType.STRING);
    recordParser.addSchemaField("last", RecordFieldType.STRING);
    recordParser.addSchemaField("grade", RecordFieldType.INT);
    recordParser.addSchemaField("exam", RecordFieldType.RECORD);

    final List<RecordField> fields = new ArrayList<>();
    fields.add(new RecordField("subject", RecordFieldType.STRING.getDataType()));
    fields.add(new RecordField("test", RecordFieldType.STRING.getDataType()));
    fields.add(new RecordField("marks", RecordFieldType.INT.getDataType()));
    RecordSchema schema = new SimpleRecordSchema(fields);

    Map<String, Object> values = new HashMap<>();
    values.put("subject", "Chemistry");
    values.put("test", "term1");
    values.put("marks", 98);
    final Record record = new MapRecord(schema, values);

    recordParser.addRecord(1, "Abhinav", "R", 8, record);

    SolrDocument solrDocument = new SolrDocument();
    solrDocument.put("id", 1);
    solrDocument.put("first", "Abhinav");
    solrDocument.put("last", "R");
    solrDocument.put("grade", 8);
    solrDocument.put("exam_subject", "Chemistry");
    solrDocument.put("exam_test", "term1");
    solrDocument.put("exam_marks", 98);

    try {/*w  w w .j  a  v  a 2 s .c  om*/
        runner.enqueue(new byte[0], new HashMap<String, String>() {
            {
                put("id", "1");
            }
        });
        runner.run(1, false);
        runner.assertTransferCount(PutSolrRecord.REL_FAILURE, 0);
        runner.assertTransferCount(PutSolrRecord.REL_CONNECTION_FAILURE, 0);
        runner.assertTransferCount(PutSolrRecord.REL_SUCCESS, 1);
        verifySolrDocuments(proc.getSolrClient(), Collections.singletonList(solrDocument));
    } finally {
        try {
            proc.getSolrClient().close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

From source file:org.apache.nifi.processors.solr.TestPutSolrRecord.java

License:Apache License

@Test
public void testPutSolrOnTriggerIndexForAnArrayOfNestedRecord()
        throws IOException, InitializationException, SolrServerException {
    final SolrClient solrClient = createEmbeddedSolrClient(DEFAULT_SOLR_CORE);
    TestableProcessor proc = new TestableProcessor(solrClient);

    TestRunner runner = createDefaultTestRunner(proc);
    MockRecordParser recordParser = new MockRecordParser();
    runner.addControllerService("parser", recordParser);

    runner.enableControllerService(recordParser);
    runner.setProperty(PutSolrRecord.RECORD_READER, "parser");

    runner.setProperty(PutSolrRecord.UPDATE_PATH, "/update");

    recordParser.addSchemaField("id", RecordFieldType.INT);
    recordParser.addSchemaField("first", RecordFieldType.STRING);
    recordParser.addSchemaField("last", RecordFieldType.STRING);
    recordParser.addSchemaField("grade", RecordFieldType.INT);
    recordParser.addSchemaField("exams", RecordFieldType.ARRAY);

    final List<RecordField> fields = new ArrayList<>();
    fields.add(new RecordField("subject", RecordFieldType.STRING.getDataType()));
    fields.add(new RecordField("test", RecordFieldType.STRING.getDataType()));
    fields.add(new RecordField("marks", RecordFieldType.INT.getDataType()));
    RecordSchema schema = new SimpleRecordSchema(fields);

    Map<String, Object> values1 = new HashMap<>();
    values1.put("subject", "Chemistry");
    values1.put("test", "term1");
    values1.put("marks", 98);
    final Record record1 = new MapRecord(schema, values1);

    Map<String, Object> values2 = new HashMap<>();
    values2.put("subject", "Maths");
    values2.put("test", "term1");
    values2.put("marks", 98);
    final Record record2 = new MapRecord(schema, values2);

    recordParser.addRecord(1, "Abhinav", "R", 8, new Record[] { record1, record2 });

    SolrDocument solrDocument = new SolrDocument();
    solrDocument.put("id", 1);
    solrDocument.put("first", "Abhinav");
    solrDocument.put("last", "R");
    solrDocument.put("grade", 8);
    solrDocument.put("exams_subject", Stream.of("Chemistry", "Maths").collect(Collectors.toList()));
    solrDocument.put("exams_test", Stream.of("term1", "term1").collect(Collectors.toList()));
    solrDocument.put("exams_marks", Stream.of(98, 98).collect(Collectors.toList()));

    try {/*w  ww  .  j a  v  a 2  s.c  om*/
        runner.enqueue(new byte[0], new HashMap<String, String>() {
            {
                put("id", "1");
            }
        });
        runner.run(1, false);
        verifySolrDocuments(solrClient, Collections.singletonList(solrDocument));
        runner.assertTransferCount(PutSolrRecord.REL_FAILURE, 0);
        runner.assertTransferCount(PutSolrRecord.REL_CONNECTION_FAILURE, 0);
        runner.assertTransferCount(PutSolrRecord.REL_SUCCESS, 1);

    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        try {
            proc.getSolrClient().close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

From source file:org.zaizi.sensefy.api.service.SolrSmartAutoCompleteService.java

License:Open Source License

/**
 * this method prevent the document suggestion field to return more than one
 * value. In this way we would have only one suggestion, and containing the
 * value can be improved using highlight
 *
 * @param titleSuggestions/*  ww w.ja  v  a2 s  .c  om*/
 */
private void filterDocumentSuggestions(List<SolrDocument> titleSuggestions,
        Map<String, Map<String, List<String>>> highlightingSnippets) {
    for (SolrDocument doc : titleSuggestions) {
        String docId = (String) doc.get(ID_FIELD);
        doc.remove(DOCUMENT_SUGGESTION);
        if (highlightingSnippets != null) {
            Map<String, List<String>> field2snippet = highlightingSnippets.get(docId);
            if (field2snippet != null) {
                List<String> snippets = field2snippet.get(DOCUMENT_SUGGESTION);
                if (snippets.size() > 0)
                    doc.put(DOCUMENT_SUGGESTION, snippets.get(0));
            }
        }

    }
}