Example usage for org.apache.lucene.search IndexSearcher getIndexReader

List of usage examples for org.apache.lucene.search IndexSearcher getIndexReader

Introduction

In this page you can find the example usage for org.apache.lucene.search IndexSearcher getIndexReader.

Prototype

public IndexReader getIndexReader() 

Source Link

Document

Return the IndexReader this searches.

Usage

From source file:com.esri.gpt.catalog.lucene.LuceneIndexAdapter.java

License:Apache License

/**
 * Reads the document UUIDs within the index.
 * @param maxUuids the maximum number to read
 * @param startIndex the index to begin reading
 * @return the set of UUIDs/*from w w  w  .j  a  v a  2s.  c  om*/
 * @throws CatalogIndexException if an exception occurs
 */
private StringSet readUuids(int startIndex, int maxUuids) throws CatalogIndexException {
    StringSet ssUuids = new StringSet();
    IndexSearcher searcher = null;
    TermEnum terms = null;
    try {
        String sField = Storeables.FIELD_UUID;
        searcher = newSearcher();
        terms = searcher.getIndexReader().terms(new Term(sField, ""));
        int nCount = 0;
        while (sField.equals(terms.term().field())) {
            if (nCount >= startIndex) {
                ssUuids.add(terms.term().text());
            }
            nCount++;
            if (nCount >= (startIndex + maxUuids))
                break;
            if (!terms.next())
                break;
        }

    } catch (Exception e) {
        String sMsg = "Error accessing index:\n " + Val.chkStr(e.getMessage());
        throw new CatalogIndexException(sMsg, e);
    } finally {
        try {
            if (terms != null)
                terms.close();
        } catch (Exception ef) {
        }
        closeSearcher(searcher);
    }
    return ssUuids;
}

From source file:com.esri.gpt.catalog.lucene.LuceneQueryAdapter.java

License:Apache License

/**
 * Executes a query against a Lucene index.
 * @param discoveryQuery the query to execute
 *//*ww w.ja  v a 2 s. com*/
protected void executeQuery(DiscoveryQuery discoveryQuery)
        throws DiscoveryException, ParseException, CorruptIndexException, IOException {

    IndexSearcher searcher = null;
    try {

        // initialize
        searcher = getIndexAdapter().newSearcher();
        this.maxDoc = searcher.maxDoc();
        boolean bExecuteQuery = true;
        boolean bProcessHits = true;
        RequestContext reqContext = this.getIndexAdapter().getRequestContext();
        BooleanQuery rootQuery = new BooleanQuery();
        DiscoveryFilter discoveryFilter = discoveryQuery.getFilter();
        DiscoveryResult discoveryResult = discoveryQuery.getResult();
        Discoverables returnables = discoveryQuery.getReturnables();
        if ((returnables == null) || (returnables.size() == 0) || (discoveryFilter.getMaxRecords() <= 0)) {
            bProcessHits = false;
        }

        // CSW query provider options
        boolean isDublinCoreResponse = true;
        boolean isBriefResponse = false;
        boolean isSummaryResponse = false;
        QueryOptions cswQueryOptions = (QueryOptions) reqContext.getObjectMap()
                .get("com.esri.gpt.server.csw.provider.components.QueryOptions");

        // build the query (if no query was supplied, we'll query everything)
        LogicalClauseAdapter logicalAdapter = new LogicalClauseAdapter(this);
        LogicalClause rootClause = discoveryFilter.getRootClause();
        if ((rootClause == null) || (rootClause.getClauses().size() == 0)) {
            if (discoveryFilter.getMaxRecords() <= QUERYALL_THRESHOLD) {
                LOGGER.finer("No filter was supplied, querying all...");
                logicalAdapter.appendSelectAll(rootQuery);
            } else {
                LOGGER.finer("No filter was supplied, query will not be executed.");
                bExecuteQuery = false;
            }
        } else {
            logicalAdapter.adaptLogicalClause(rootQuery, rootClause);
            if ((rootQuery.clauses() == null) && (rootQuery.clauses().size() > 0)) {
                bExecuteQuery = false;
            }
        }
        if (!bExecuteQuery)
            return;

        // execute the query and process the hits if required

        // set the sort option
        Sort sortOption = null;
        if (bProcessHits && (searcher.maxDoc() > 0)) {
            sortOption = makeSortOption(discoveryQuery);
        }

        // filters
        Filter filter = null;

        // make the access control filter
        MetadataAcl acl = new MetadataAcl(reqContext);
        AuthenticationStatus auth = reqContext.getUser().getAuthenticationStatus();
        boolean bAdmin = auth.getAuthenticatedRoles().hasRole("gptAdministrator");
        if (!bAdmin && !acl.isPolicyUnrestricted()) {
            String[] aclValues = acl.makeUserAcl();
            filter = new AclFilter(Storeables.FIELD_ACL, aclValues);
        }

        // isPartOf filter
        filter = IsPartOfFilter.make(reqContext, filter);

        // make the schema filter
        if (cswQueryOptions != null) {
            String schemaName = Val.chkStr(cswQueryOptions.getSchemaFilter());
            if (schemaName.length() > 0) {
                filter = new SchemaFilter(schemaName, filter);
                isDublinCoreResponse = cswQueryOptions.isDublinCoreResponse();
                if (!isDublinCoreResponse) {
                    String elementSetType = Val.chkStr(cswQueryOptions.getElementSetType());
                    if (elementSetType.equalsIgnoreCase("brief")) {
                        isBriefResponse = true;
                    } else if (elementSetType.equalsIgnoreCase("summary")) {
                        isSummaryResponse = true;
                    }
                }
            }
        }

        // determine the start/end positions
        int startRecord = discoveryFilter.getStartRecord() - 1;
        int maxRecords = discoveryFilter.getMaxRecords();
        if (startRecord < 0)
            startRecord = 0;
        int recordsPerPage = maxRecords;
        if (recordsPerPage <= 0)
            recordsPerPage = 1;
        int hitsToReturn = startRecord + recordsPerPage;
        int nextRecord = 0;
        int numDocs = 0;

        // execute the query 
        LOGGER.finer("Executing Lucene Query:\n" + rootQuery);
        TopDocs topDocs = null;
        if (filter != null) {
            if (sortOption != null) {
                topDocs = searcher.search(rootQuery, filter, hitsToReturn, sortOption);
            } else {
                topDocs = searcher.search(rootQuery, filter, hitsToReturn);
            }
        } else {
            if (sortOption != null) {
                topDocs = searcher.search(rootQuery, filter, hitsToReturn, sortOption);
            } else {
                topDocs = searcher.search(rootQuery, hitsToReturn);
            }
        }

        // determine the hit count
        int totalHits = topDocs.totalHits;
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        if ((scoreDocs != null) && (scoreDocs.length) > 0) {
            numDocs = scoreDocs.length;
            if (totalHits > numDocs) {
                nextRecord = numDocs + 1;
            }
        }
        discoveryResult.setNumberOfHits(totalHits);
        LOGGER.finer("Total query hits: " + totalHits);

        if (startRecord > (totalHits - 1))
            bProcessHits = false;
        if (maxRecords <= 0)
            bProcessHits = false;
        int nTotal = numDocs - startRecord;
        if (!bProcessHits)
            return;

        // warn if many records were requested
        if (nTotal >= TOOMANY_WARNING_THRESHOLD) {
            LOGGER.warning("A request to process " + nTotal
                    + " discovery records was recieved and will be exceuted.\n" + discoveryQuery.toString());
        }

        // process the hits, build the results
        LOGGER.finer("Processing " + nTotal + " records from: " + (startRecord + 1) + " to: " + numDocs);
        Storeable storeable;
        DiscoveredRecords records = discoveryResult.getRecords();
        IndexReader reader = searcher.getIndexReader();
        for (int i = startRecord; i < numDocs; i++) {
            ScoreDoc scoreDoc = scoreDocs[i];
            Document document = reader.document(scoreDoc.doc);
            DiscoveredRecord record = new DiscoveredRecord();

            // Dublin Core based responses
            if (isDublinCoreResponse) {
                for (Discoverable target : returnables) {
                    ArrayList<Object> values = new ArrayList<Object>();
                    storeable = (Storeable) target.getStorable();

                    if (storeable instanceof AnyTextProperty) {
                        values = null;

                    } else if (storeable instanceof GeometryProperty) {
                        GeometryProperty geom = (GeometryProperty) storeable;
                        values.add(geom.readEnvelope(document));

                    } else if (target.getMeaning().getMeaningType().equals(PropertyMeaningType.XMLURL)) {
                        String uuid = document.get(Storeables.FIELD_UUID);
                        uuid = URLEncoder.encode(uuid, "UTF-8");
                        values.add("?getxml=" + uuid);

                    } else {
                        DatastoreField retrievalField = storeable.getRetrievalField();
                        Field[] fields = document.getFields(retrievalField.getName());
                        if (fields != null) {
                            for (Field f : fields) {
                                Object value = retrievalField.makeValueToReturn(f.stringValue());
                                values.add(value);
                            }
                        }
                    }

                    if (values != null) {
                        Object[] oValues = null;
                        if (values.size() >= 0)
                            oValues = values.toArray();
                        record.addField(target, oValues);
                    }
                }

                // non Dublin Core based responses
            } else {
                String responseXml = null;
                if (isBriefResponse && (responseXml == null)) {
                    Field field = document.getField(Storeables.FIELD_XML_BRIEF);
                    if (field != null) {
                        responseXml = field.stringValue();
                    }
                } else if (isSummaryResponse && (responseXml == null)) {
                    Field field = document.getField(Storeables.FIELD_XML_SUMMARY);
                    if (field != null) {
                        responseXml = field.stringValue();
                    }
                } else if (responseXml == null) {
                    Field field = document.getField(Storeables.FIELD_XML);
                    if (field != null) {
                        responseXml = field.stringValue();
                    }
                }
                record.setResponseXml(responseXml);
            }

            records.add(record);
        }
        int nPopulated = records.size();
        LOGGER.finer("Populated " + nPopulated + " records.");

    } finally {
        getIndexAdapter().closeSearcher(searcher);
    }
}

From source file:com.esri.gpt.catalog.lucene.ReferencedSearcher.java

License:Apache License

protected synchronized void release(IndexSearcher searcher) throws IOException {
    searcher.getIndexReader().decRef();
}

From source file:com.esri.gpt.catalog.lucene.TimeperiodClauseAdapter.java

License:Apache License

/**
 * Determine the index for the highest interval field within the Lucene index.
 * <br/>e.g. timeperiod.l.7//from w ww  . j  a v a  2  s. co  m
 * <br/>If the the document with the most intervals has 7, then 7 is the max.
 * @throws DiscoveryException if there is a problem accessing the index
 */
private void determineMaxIntervalFieldName() throws DiscoveryException {
    IndexSearcher searcher = null;
    try {
        searcher = this.getQueryAdapter().getIndexAdapter().newSearcher();
        IndexReader reader = searcher.getIndexReader();
        Collection<String> names = reader.getFieldNames(FieldOption.ALL);
        String sPfx = this.baseFieldName.toLowerCase() + ".l.";
        int nBeginSubstring = sPfx.length();
        int nMax = -1;
        for (String name : names) {
            String lc = name.toLowerCase();
            if (lc.startsWith(sPfx)) {
                LOGGER.finest("Found boundary field: " + name);
                String s = lc.substring(nBeginSubstring);
                try {
                    int n = Integer.valueOf(s);
                    if (n > nMax) {
                        nMax = n;
                    }
                } catch (NumberFormatException nfe) {
                }
            }
        }
        LOGGER.finest("MaxBndFieldIndex: " + nMax);
        this.maxIntervalFieldName = nMax;
    } catch (IOException e) {
        LOGGER.log(Level.SEVERE, "Index issue.", e);
        throw new DiscoveryException(e.toString(), e);
    } finally {
        this.getQueryAdapter().getIndexAdapter().closeSearcher(searcher);
    }
}

From source file:com.esri.gpt.control.cart.VolumeTryHandler.java

License:Apache License

@Override
public TryResponse tryKeys(HttpServletRequest request, HttpServletResponse response, RequestContext context,
        Cart cart, List<String> keys) {
    TryResponse tryResponse = new TryResponse();

    String[] fieldNames = readFieldNames(context);
    double max = readMax(context);

    if (keys != null && fieldNames != null && fieldNames.length > 0 && max > 0) {

        LuceneIndexAdapter adapter = null;
        IndexSearcher searcher = null;

        try {//from  ww w  .j a  va 2 s. c o  m
            adapter = new LuceneIndexAdapter(context);
            searcher = adapter.newSearcher();

            IndexReader reader = searcher.getIndexReader();
            TermDocs termDocs = reader.termDocs();
            MapFieldSelector selector = new MapFieldSelector(fieldNames);

            VolumeReader volumeReader = new VolumeReader(reader, termDocs, selector, fieldNames);
            double already = volumeReader.sumWeights(cart.keySet());

            for (String uuid : keys) {
                double after = already + volumeReader.readWeight(uuid);
                tryResponse.add(uuid, after <= max);
            }

        } catch (Exception ex) {
            LOGGER.log(Level.WARNING, "Error opening searcher", ex);
        } finally {
            try {
                if ((adapter != null) && (searcher != null)) {
                    adapter.closeSearcher(searcher);
                }
            } catch (Exception ef) {
            }
        }
    }

    return tryResponse;
}

From source file:com.esri.gpt.control.georss.FieldMetaLoader.java

/**
 * Loads Lucene index metadata./*from  w  w  w  .jav a2  s  . com*/
 * @param context request context
 * @param fields list of fields
 * @throws CatalogIndexException if accessing index fails
 */
public static void loadLuceneMeta(RequestContext context, List<IFeedRecords.FieldMeta> fields)
        throws CatalogIndexException {
    LuceneIndexAdapter indexAdapter = new LuceneIndexAdapter(context);
    IndexSearcher searcher = null;
    try {
        searcher = indexAdapter.newSearcher();
        IndexReader indexReader = searcher.getIndexReader();
        for (String fieldName : indexReader.getFieldNames(IndexReader.FieldOption.ALL)) {
            fields.add(new IFeedRecords.FieldMeta(IFeedRecord.STD_COLLECTION_INDEX + "." + fieldName,
                    "esriFieldTypeString", fieldName));
        }
    } catch (Exception e) {
        String sMsg = "Error accessing index:\n " + Val.chkStr(e.getMessage());
        throw new CatalogIndexException(sMsg, e);
    } finally {
        indexAdapter.closeSearcher(searcher);
    }
}

From source file:com.esri.gpt.control.georss.JsonSearchEngine.java

License:Apache License

/**
 * Loads Lucene index metadata./*from  w w w. j  av  a  2s  .  co m*/
 * @param context request context
 * @param fields list of fields
 * @throws CatalogIndexException if accessing index fails
 */
protected void loadLuceneMeta(RequestContext context, List<IFeedRecords.FieldMeta> fields)
        throws CatalogIndexException {
    if (!isLuceneMetaAllowed()) {
        return;
    }

    LuceneIndexAdapter indexAdapter = new LuceneIndexAdapter(context);
    IndexSearcher searcher = null;
    try {
        searcher = indexAdapter.newSearcher();
        IndexReader indexReader = searcher.getIndexReader();
        for (String fieldName : indexReader.getFieldNames(IndexReader.FieldOption.ALL)) {
            fields.add(new IFeedRecords.FieldMeta(IFeedRecord.STD_COLLECTION_INDEX + "." + fieldName,
                    "esriFieldTypeString", fieldName));
        }
    } catch (Exception e) {
        String sMsg = "Error accessing index:\n " + Val.chkStr(e.getMessage());
        throw new CatalogIndexException(sMsg, e);
    } finally {
        indexAdapter.closeSearcher(searcher);
    }
}

From source file:com.esri.gpt.control.rest.IndexStatsServlet.java

License:Apache License

/**
 * Processes the HTTP request.//from  w  ww.j a va 2s.  co  m
 * @param request the HTTP request.
 * @param response HTTP response.
 * @param context request context
 * @throws Exception if an exception occurs
 */
@Override
protected void execute(HttpServletRequest request, HttpServletResponse response, RequestContext context)
        throws Exception {

    String responseFormat = "html";
    if (Val.chkStr(this.getParameterValue(request, "f")).equalsIgnoreCase("json")) {
        responseFormat = "json";
    }
    if (responseFormat.equals("html")) {
        response.setContentType("text/html");
    } else {
        response.setContentType("text/plain");
    }

    PrintWriter writer = response.getWriter();
    LuceneIndexAdapter adapter = null;
    IndexSearcher searcher = null;
    try {
        String field = this.getParameterValue(request, "field");
        String term = this.getParameterValue(request, "term");
        String sortBy = this.getParameterValue(request, "sortBy");
        String pathInfo = "";
        if (request.getPathInfo() != null) {
            pathInfo = Val.chkStr(request.getPathInfo().toLowerCase());
            if (pathInfo.startsWith("/"))
                pathInfo = pathInfo.substring(1);
            if (pathInfo.endsWith("/"))
                pathInfo = pathInfo.substring(0, (pathInfo.length() - 1));
            //System.err.println(pathInfo);
        }

        adapter = new LuceneIndexAdapter(context);
        searcher = adapter.newSearcher();
        IndexReader reader = searcher.getIndexReader();
        StatsRequest statsRequest = new StatsRequest(context);
        statsRequest.setResponseWriter(writer);
        statsRequest.prepare(reader);
        statsRequest.setSortBy(sortBy);
        statsRequest.setResponseFormat(responseFormat);

        if (field.length() > 0) {
            int maxRecs = Val.chkInt(this.getParameterValue(request, "max"), -2);
            if (maxRecs == -2) {
                maxRecs = Val.chkInt(this.getParameterValue(request, "maxrecords"), -1);
            }
            int minFreq = Val.chkInt(this.getParameterValue(request, "minFrequency"), -1);
            int maxFreq = Val.chkInt(this.getParameterValue(request, "maxFrequency"), -1);
            SingleFieldStats stats = new SingleFieldStats(field, maxRecs, minFreq, maxFreq);
            stats.collectStats(statsRequest, reader);
            //      } else if (term.length() > 0) {
            //        SingleTermStats stats = new SingleTermStats(term);
            //        stats.collectStats(statsRequest,reader);   
            //      } else if (pathInfo.equals("terms")){
            //        GlobalTermStats stats = new GlobalTermStats();
            //        stats.collectStats(statsRequest,reader);  
        } else {
            //GlobalFieldStats stats = new GlobalFieldStats();
            SummaryStats stats = new SummaryStats();
            stats.collectStats(statsRequest, reader);
        }

        // } catch (Exception ex) {

        //  response.setContentType("text/plain;charset=UTF-8");
        // writer.println("Error getting metadata: " + ex.getMessage());
        // LogUtil.getLogger().log(Level.SEVERE, "Error getting metadata", ex);

    } finally {
        try {
            writer.flush();
        } catch (Exception ef) {
        }
        ;
        try {
            if ((adapter != null) && (searcher != null)) {
                adapter.closeSearcher(searcher);
            }
        } catch (Exception ef) {
        }
        ;
    }

}

From source file:com.esri.gpt.control.search.browse.TocIndexAdapter.java

License:Apache License

/**
 * Returns the field values associated with a document
 * @param context the operation context/*from   w  w w.  jav a2  s .  c  o m*/
 * @param fieldName the field name
 * @param uuid the document uuid
 * @return the field values (null if not found)
 * @throws CorruptIndexException if the index is corrupt
 * @throws IOException if an I/O exception occurs
 */
public String[] queryFieldByUuid(TocContext context, String fieldName, String uuid)
        throws CorruptIndexException, IOException {
    TermDocs termDocs = null;
    try {
        uuid = Val.chkStr(uuid);
        if (uuid.length() > 0) {
            IndexSearcher searcher = this.getSearcher(context);
            IndexReader reader = searcher.getIndexReader();
            MapFieldSelector selector = new MapFieldSelector(new String[] { fieldName });
            termDocs = reader.termDocs();
            termDocs.seek(new Term(Storeables.FIELD_UUID, uuid));
            if (termDocs.next()) {
                Document document = reader.document(termDocs.doc(), selector);
                return document.getValues(fieldName);
            }
        }
    } finally {
        try {
            if (termDocs != null)
                termDocs.close();
        } catch (Exception ef) {
        }
    }
    return null;
}

From source file:com.esri.gpt.server.assertion.index.AsnIndexAdapter.java

License:Apache License

/**
 * Loads the assertion previously cast for the active subject, predicate and user.
 * @param context the assertion operation context
 * @param searcher the index searcher/*from w w w .  j ava2s. com*/
 * @return the previously cast assertion (can be null)
 * @throws Exception if an exception occurs
 */
public Assertion loadPreviousUserAssertion(AsnContext context, IndexSearcher searcher) throws Exception {
    AsnOperation operation = context.getOperation();
    String userKey = Val.chkStr(operation.getUserPart().getKey());
    String username = Val.chkStr(operation.getUserPart().getName());
    boolean isAnonymous = username.equalsIgnoreCase(AsnConstants.ANONYMOUS_USERNAME);
    if (!isAnonymous && (userKey.length() > 0)) {
        AsnAssertionSet asnSet = operation.getAssertionSet();
        AsnValueType vType = asnSet.getValueType();
        String subject = operation.getSubject().getURN();
        String predicate = vType.getRdfPredicate();

        // build a query to match the subject/predicate/user triple
        BooleanQuery query = new BooleanQuery();
        Query qSubject = new TermQuery(new Term(AsnConstants.FIELD_RDF_SUBJECT, subject));
        Query qPredicate = new TermQuery(new Term(AsnConstants.FIELD_RDF_PREDICATE, predicate));
        Query qUserKey = new TermQuery(new Term(AsnConstants.FIELD_USER_KEY, userKey));
        query.add(qSubject, BooleanClause.Occur.MUST);
        query.add(qPredicate, BooleanClause.Occur.MUST);
        query.add(qUserKey, BooleanClause.Occur.MUST);

        // make the reader and searcher, execute the search, return the previous assertion
        TopDocs topDocs = searcher.search(query, 1);
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        if ((scoreDocs != null) && (scoreDocs.length) > 0) {
            Document document = searcher.getIndexReader().document(scoreDocs[0].doc);
            Assertion assertion = asnSet.newAssertion(context, false);
            assertion.load(document);
            return assertion;
        }
    }
    return null;
}