Example usage for org.apache.solr.client.solrj.request ContentStreamUpdateRequest addContentStream

List of usage examples for org.apache.solr.client.solrj.request ContentStreamUpdateRequest addContentStream

Introduction

In this page you can find the example usage for org.apache.solr.client.solrj.request ContentStreamUpdateRequest addContentStream.

Prototype

public void addContentStream(ContentStream contentStream) 

Source Link

Document

Add a org.apache.solr.common.util.ContentStream to #getContentStreams()

Usage

From source file:actors.SolrActor.java

License:Apache License

public void indexUpdated(SolrIndexEvent msg) {
    try {/*from w  ww. j  a v a 2  s  .  c o m*/
        System.out.println("SolrIndexEvent");
        SolrInputDocument doc = msg.getDocuement();
        //Making realtime GET
        System.out.println("GET");
        SolrQuery parameters = new SolrQuery();
        parameters.setRequestHandler("/get");
        String f1 = doc.getFieldValue("literal.id").toString();
        String f2 = doc.getFieldValue("literal.rev").toString();
        parameters.set("id", f1);
        parameters.set("rev", f2);
        //System.out.println(parameters);

        QueryResponse response = server.query(parameters);

        NamedList<Object> result = response.getResponse();
        //System.out.println(response.getResponse());
        //System.out.println(result.size() );
        //System.out.println();
        //System.out.println(result);
        //validate the doc exists
        if (result == null || result.get("doc") == null) {
            System.out.println("/update/extract");
            ContentStreamUpdateRequest req = new ContentStreamUpdateRequest("/update/extract");
            // url dropbox
            URL url = new URL(doc.getFieldValue("literal.links").toString());
            ContentStreamBase content = new ContentStreamBase.URLStream(url);
            System.out.println("ContentStreamBase");
            req.addContentStream(content);
            // Adittionall metadata
            req.setParam("literal.id", doc.getFieldValue("literal.id").toString());
            req.setParam("literal.title", doc.getFieldValue("literal.title").toString());
            req.setParam("literal.rev", doc.getFieldValue("literal.rev").toString());
            req.setParam("literal.when", doc.getFieldValue("literal.when").toString());
            req.setParam("literal.path", doc.getFieldValue("literal.path").toString());
            req.setParam("literal.icon", doc.getFieldValue("literal.icon").toString());
            req.setParam("literal.size", doc.getFieldValue("literal.size").toString());
            req.setParam("literal.url", doc.getFieldValue("literal.links").toString());

            req.setParam("uprefix", "attr_");
            req.setParam("fmap.content", "attr_content");
            req.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
            //Requesting Solr
            result = server.request(req);
            //System.out.println("Result: " + result.toString());

        } else {
            System.out.println("It's already update");

        }

    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
}

From source file:at.kc.tugraz.ss.service.solr.impl.SSSolrImpl.java

License:Apache License

@Override
public void solrAddDoc(final SSServPar parA) throws Exception {

    //    according to Solr specification by adding a document with an ID already
    //     existing in the index will replace the document (eg. refer to 
    //     http://stackoverflow.com/questions/8494923/solr-block-updating-of-existing-document or
    //     http://lucene.apache.org/solr/api-4_0_0-ALPHA/doc-files/tutorial.html ) 

    try {//from   www .j a v a 2s.  com
        final SSSolrAddDocPar par = new SSSolrAddDocPar(parA);
        final ContentStreamUpdateRequest csur = new ContentStreamUpdateRequest("/update/extract");
        final NamedList<Object> response;

        csur.addContentStream(new ContentStreamBase.FileStream(new File(localWorkPath + par.id)));

        csur.setParam("literal.id", par.id);
        csur.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);

        response = solrUpdater.request(csur);

        SSLogU.info("document w/ id " + par.id + " added successfully. ");
    } catch (Exception error) {
        SSServErrReg.regErrThrow(error);
    }
}

From source file:at.tugraz.sss.servs.db.impl.SSDBNoSQLSolrImpl.java

License:Apache License

@Override
public void addDoc(final SSDBNoSQLAddDocPar par) throws SSErr {

    //    according to Solr specification by adding a document with an ID already
    //     existing in the index will replace the document (eg. refer to 
    //     http://stackoverflow.com/questions/8494923/solr-block-updating-of-existing-document or
    //     http://lucene.apache.org/solr/api-4_0_0-ALPHA/doc-files/tutorial.html ) 

    try {/*from w w  w.j  av a2s  .c o  m*/
        final ContentStreamUpdateRequest csur = new ContentStreamUpdateRequest("/update/extract");
        final NamedList<Object> response;

        csur.addContentStream(new ContentStreamBase.FileStream(new File(SSConf.getLocalWorkPath() + par.id)));

        csur.setParam("literal.id", par.id);
        //      csur.setParam  ("stream.type", "application/octet-stream");

        csur.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);

        response = solrServer.request(csur);

        SSLogU.info("document w/ id " + par.id + " added successfully. ");
    } catch (Exception error) {
        SSServErrReg.regErrThrow(error);
    }
}

From source file:com.hortonworks.streamline.streams.runtime.storm.bolt.solr.StreamlineSolrJsonMapper.java

License:Apache License

private SolrRequest<UpdateResponse> createSolrRequest(String json) {
    final ContentStreamUpdateRequest request = new ContentStreamUpdateRequest(jsonUpdateUrl);
    final ContentStream cs = new ContentStreamBase.StringStream(json, CONTENT_TYPE);
    request.addContentStream(cs);
    LOG.debug("Request generated with JSON: {}", json);
    return request;
}

From source file:org.apache.nifi.processors.solr.PutSolrContentStream.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();/*  ww  w.j  a  va 2  s . c  o m*/
    if (flowFile == null) {
        return;
    }

    final AtomicReference<Exception> error = new AtomicReference<>(null);
    final AtomicReference<Exception> connectionError = new AtomicReference<>(null);

    final boolean isSolrCloud = SOLR_TYPE_CLOUD.equals(context.getProperty(SOLR_TYPE).getValue());
    final String collection = context.getProperty(COLLECTION).evaluateAttributeExpressions(flowFile).getValue();
    final Long commitWithin = context.getProperty(COMMIT_WITHIN).evaluateAttributeExpressions(flowFile)
            .asLong();
    final String contentStreamPath = context.getProperty(CONTENT_STREAM_PATH)
            .evaluateAttributeExpressions(flowFile).getValue();
    final MultiMapSolrParams requestParams = new MultiMapSolrParams(getRequestParams(context, flowFile));

    StopWatch timer = new StopWatch(true);
    session.read(flowFile, new InputStreamCallback() {
        @Override
        public void process(final InputStream in) throws IOException {
            ContentStreamUpdateRequest request = new ContentStreamUpdateRequest(contentStreamPath);
            request.setParams(new ModifiableSolrParams());

            // add the extra params, don't use 'set' in case of repeating params
            Iterator<String> paramNames = requestParams.getParameterNamesIterator();
            while (paramNames.hasNext()) {
                String paramName = paramNames.next();
                for (String paramValue : requestParams.getParams(paramName)) {
                    request.getParams().add(paramName, paramValue);
                }
            }

            // specify the collection for SolrCloud
            if (isSolrCloud) {
                request.setParam(COLLECTION_PARAM_NAME, collection);
            }

            if (commitWithin != null && commitWithin > 0) {
                request.setParam(COMMIT_WITHIN_PARAM_NAME, commitWithin.toString());
            }

            // if a username and password were provided then pass them for basic auth
            if (isBasicAuthEnabled()) {
                request.setBasicAuthCredentials(getUsername(), getPassword());
            }

            try (final BufferedInputStream bufferedIn = new BufferedInputStream(in)) {
                // add the FlowFile's content on the UpdateRequest
                request.addContentStream(new ContentStreamBase() {
                    @Override
                    public InputStream getStream() throws IOException {
                        return bufferedIn;
                    }

                    @Override
                    public String getContentType() {
                        return context.getProperty(CONTENT_TYPE).evaluateAttributeExpressions().getValue();
                    }
                });

                UpdateResponse response = request.process(getSolrClient());
                getLogger().debug("Got {} response from Solr", new Object[] { response.getStatus() });
            } catch (SolrException e) {
                error.set(e);
            } catch (SolrServerException e) {
                if (causedByIOException(e)) {
                    connectionError.set(e);
                } else {
                    error.set(e);
                }
            } catch (IOException e) {
                connectionError.set(e);
            }
        }
    });
    timer.stop();

    if (error.get() != null) {
        getLogger().error("Failed to send {} to Solr due to {}; routing to failure",
                new Object[] { flowFile, error.get() });
        session.transfer(flowFile, REL_FAILURE);
    } else if (connectionError.get() != null) {
        getLogger().error("Failed to send {} to Solr due to {}; routing to connection_failure",
                new Object[] { flowFile, connectionError.get() });
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_CONNECTION_FAILURE);
    } else {
        StringBuilder transitUri = new StringBuilder("solr://");
        transitUri.append(getSolrLocation());
        if (isSolrCloud) {
            transitUri.append(":").append(collection);
        }

        final long duration = timer.getDuration(TimeUnit.MILLISECONDS);
        session.getProvenanceReporter().send(flowFile, transitUri.toString(), duration, true);
        getLogger().info("Successfully sent {} to Solr in {} millis", new Object[] { flowFile, duration });
        session.transfer(flowFile, REL_SUCCESS);
    }
}

From source file:org.apache.storm.solr.mapper.SolrJsonMapper.java

License:Apache License

private SolrRequest createtSolrRequest(String json) {
    final ContentStreamUpdateRequest request = new ContentStreamUpdateRequest(jsonUpdateUrl);
    final ContentStream cs = new ContentStreamBase.StringStream(json, CONTENT_TYPE);
    request.addContentStream(cs);
    if (logger.isDebugEnabled()) {
        logger.debug("Request generated with JSON: " + json);
    }//  ww w  .j  av a2s . c o  m
    return request;
}

From source file:org.dspace.discovery.SolrServiceImpl.java

License:BSD License

/**
 * Write the document to the index under the appropriate handle.
 *
 * @param doc the solr document to be written to the server
 * @param streams/*from  w  w  w . j ava 2  s .c  o m*/
 * @throws IOException IO exception
 */
protected void writeDocument(SolrInputDocument doc, List<BitstreamContentStream> streams) throws IOException {

    try {
        if (getSolr() != null) {
            if (CollectionUtils.isNotEmpty(streams)) {
                ContentStreamUpdateRequest req = new ContentStreamUpdateRequest("/update/extract");

                for (BitstreamContentStream bce : streams) {
                    req.addContentStream(bce);
                }

                ModifiableSolrParams params = new ModifiableSolrParams();

                //req.setParam(ExtractingParams.EXTRACT_ONLY, "true");
                for (String name : doc.getFieldNames()) {
                    for (Object val : doc.getFieldValues(name)) {
                        params.add(ExtractingParams.LITERALS_PREFIX + name, val.toString());
                    }
                }

                req.setParams(params);
                req.setParam(ExtractingParams.UNKNOWN_FIELD_PREFIX, "attr_");
                req.setParam(ExtractingParams.MAP_PREFIX + "content", "fulltext");
                req.setParam(ExtractingParams.EXTRACT_FORMAT, "text");
                req.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
                req.process(getSolr());
            } else {
                getSolr().add(doc);
            }
        }
    } catch (SolrServerException e) {
        log.error(e.getMessage(), e);
    }
}

From source file:org.opensextant.matching.DataLoader.java

License:Open Source License

public static void main(String[] args) throws Exception {

    if (args.length < 3 || args.length > 4) {
        usage();//  ww  w. j a  va 2  s .com
    }

    String scheme = args[0];
    String inputForm = args[1];
    String csvFilePath = args[2];
    String solrhome = "";
    if (args.length == 4) {
        solrhome = args[3];
    }

    // get a SolrServer with the proper core
    SolrServer solrServer = getSolrServer(scheme, solrhome);

    // convert indexed content to flat list
    // currently creates a temp file, could stream?
    if (inputForm.equalsIgnoreCase("index")) {
        csvFilePath = flatten(csvFilePath);
    }

    try {

        // set the fieldnames param for the selected schema
        final ModifiableSolrParams params = new ModifiableSolrParams(loadParams);
        if (scheme.equalsIgnoreCase("gazetteer")) {
            params.set("fieldnames", MatcherFactory.getGazetteerFieldNamesLoader());
        } else {
            params.set("fieldnames", MatcherFactory.getVocabFieldNames());
        }

        // build the update request
        final ContentStreamUpdateRequest updateRequest = new ContentStreamUpdateRequest(requestHandler);
        updateRequest.setParams(params);

        ContentStream inStream = new ContentStreamBase.FileStream(new File(csvFilePath));

        // add the input file as a stream to the request
        updateRequest.addContentStream(inStream);

        // make the call
        SolrResponseBase response = null;
        try {
            response = (SolrResponseBase) updateRequest.process(solrServer);
            // see what happened
            printResponse(response);
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

    } finally {
        // cleanup
        solrServer.shutdown();
    }
}

From source file:org.si4t.solr.SolrIndexDispatcher.java

License:Apache License

public String addBinaries(ConcurrentHashMap<String, BinaryIndexData> binaryAdds,
        SolrClientRequest clientRequest)
        throws IOException, SolrServerException, ParserConfigurationException, SAXException {

    SolrServer server = null;//from  w ww  .j a va 2 s  .c om

    server = this.getSolrServer(clientRequest);

    if (server == null) {
        throw new SolrServerException("Solr server not instantiated.");
    }
    StringBuilder rsp = new StringBuilder();
    String rspResponse = " path not found";

    for (Map.Entry<String, BinaryIndexData> entry : binaryAdds.entrySet()) {
        BinaryIndexData data = entry.getValue();

        log.debug("Dispatching binary content to Solr.");

        FileStream fs = this.getBinaryInputStream(data);

        if (fs != null) {

            String id = data.getUniqueIndexId();
            log.info("Indexing binary with Id: " + id + ", and URL Path:" + data.getIndexUrl());
            ContentStreamUpdateRequest up = new ContentStreamUpdateRequest("/update/extract");

            up.addContentStream(fs);

            up.setParam("literal.id", id);
            up.setParam("literal.publicationid", data.getPublicationItemId());
            up.setParam("literal.pubdate", "NOW");
            up.setParam("literal.url", data.getIndexUrl().replace(" ", "%20"));

            if (!Utils.StringIsNullOrEmpty(data.getFileSize())) {
                up.setParam("literal.fileSize", data.getFileSize());
            }
            if (!Utils.StringIsNullOrEmpty(data.getFileType())) {
                up.setParam("literal.fileType", data.getFileType());
            }
            up.setParam("defaultField", "binary_content");

            UpdateResponse serverrsp;

            serverrsp = up.process(server);
            rsp.append(serverrsp.getResponse());

            log.info("Committing adding binaries.");
            rsp.append("\n");

            serverrsp = server.commit();
            rsp.append(serverrsp.getResponse());

            rspResponse = rsp.toString();
        } else {
            log.error("Could not process binary: " + data.getIndexUrl());
        }
    }
    return ("Adding binaries had the following response: " + rspResponse);
}

From source file:org.sleuthkit.autopsy.keywordsearch.Ingester.java

License:Open Source License

/**
 * Delegate method actually performing the indexing work for objects
 * implementing ContentStream//from www.j ava  2 s  .c  o  m
 *
 * @param cs ContentStream to ingest
 * @param fields content specific fields
 * @param size size of the content - used to determine the Solr timeout, not
 * used to populate meta-data
 *
 * @throws IngesterException if there was an error processing a specific
 * content, but the Solr server is probably fine.
 */
private void ingestExtract(ContentStream cs, Map<String, String> fields, final long size)
        throws IngesterException {
    final ContentStreamUpdateRequest up = new ContentStreamUpdateRequest("/update/extract"); //NON-NLS
    up.addContentStream(cs);
    setFields(up, fields);
    up.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);

    final String contentType = cs.getContentType();
    if (contentType != null && !contentType.trim().equals("")) {
        up.setParam("stream.contentType", contentType); //NON-NLS
    }

    //logger.log(Level.INFO, "Ingesting " + fields.get("file_name"));
    up.setParam("commit", "false"); //NON-NLS

    final Future<?> f = upRequestExecutor.submit(new UpRequestTask(up));

    try {
        f.get(getTimeout(size), TimeUnit.SECONDS);
    } catch (TimeoutException te) {
        logger.log(Level.WARNING, "Solr timeout encountered, trying to restart Solr"); //NON-NLS
        //restart may be needed to recover from some error conditions
        hardSolrRestart();
        throw new IngesterException(NbBundle.getMessage(this.getClass(),
                "Ingester.ingestExtract.exception.solrTimeout.msg", fields.get("id"), fields.get("file_name"))); //NON-NLS
    } catch (Exception e) {
        throw new IngesterException(
                NbBundle.getMessage(this.getClass(), "Ingester.ingestExtract.exception.probPostToSolr.msg",
                        fields.get("id"), fields.get("file_name")),
                e); //NON-NLS
    }
    uncommitedIngests = true;
}