Example usage for com.mongodb DBCursor setReadPreference

List of usage examples for com.mongodb DBCursor setReadPreference

Introduction

In this page you can find the example usage for com.mongodb DBCursor setReadPreference.

Prototype

public DBCursor setReadPreference(final ReadPreference readPreference) 

Source Link

Document

Sets the read preference for this cursor.

Usage

From source file:com.querydsl.mongodb.AbstractMongodbQuery.java

License:Apache License

protected DBCursor createCursor(DBCollection collection, @Nullable Predicate where, Expression<?> projection,
        QueryModifiers modifiers, List<OrderSpecifier<?>> orderBy) {
    DBCursor cursor = collection.find(createQuery(where), createProjection(projection));
    Integer limit = modifiers.getLimitAsInteger();
    Integer offset = modifiers.getOffsetAsInteger();
    if (limit != null) {
        cursor.limit(limit);//w  w  w .  ja  v a2  s.com
    }
    if (offset != null) {
        cursor.skip(offset);
    }
    if (orderBy.size() > 0) {
        cursor.sort(serializer.toSort(orderBy));
    }
    if (readPreference != null) {
        cursor.setReadPreference(readPreference);
    }
    return cursor;
}

From source file:com.querydsl.mongodb.MongodbQuery.java

License:Apache License

protected DBCursor createCursor(DBCollection collection, @Nullable Predicate where, Expression<?> projection,
        QueryModifiers modifiers, List<OrderSpecifier<?>> orderBy) {
    DBCursor cursor = collection.find(createQuery(where), createProjection(projection));
    Integer limit = modifiers.getLimitAsInteger();
    Integer offset = modifiers.getOffsetAsInteger();
    if (limit != null) {
        cursor.limit(limit.intValue());//from ww w  .j a va2 s.com
    }
    if (offset != null) {
        cursor.skip(offset.intValue());
    }
    if (orderBy.size() > 0) {
        cursor.sort(serializer.toSort(orderBy));
    }
    if (readPreference != null) {
        cursor.setReadPreference(readPreference);
    }
    return cursor;
}

From source file:com.redhat.lightblue.mongo.crud.BasicDocFinder.java

License:Open Source License

@Override
public long find(CRUDOperationContext ctx, DBCollection coll, DBObject mongoQuery, DBObject mongoProjection,
        DBObject mongoSort, Long from, Long to) {
    LOGGER.debug("Submitting query {}", mongoQuery);

    long executionTime = System.currentTimeMillis();
    DBCursor cursor = null;
    boolean cursorInUse = false;
    try {//from   ww  w.  j  a v  a  2s .c om
        cursor = coll.find(mongoQuery, mongoProjection);
        if (readPreference != null) {
            cursor.setReadPreference(readPreference);
        }

        if (ctx.isLimitQueryTime() && maxQueryTimeMS > 0) {
            cursor.maxTime(maxQueryTimeMS, TimeUnit.MILLISECONDS);
        }

        executionTime = System.currentTimeMillis() - executionTime;

        LOGGER.debug("Query evaluated");
        if (mongoSort != null) {
            cursor = cursor.sort(mongoSort);
            LOGGER.debug("Result set sorted");
        }

        LOGGER.debug("Applying limits: {} - {}", from, to);
        boolean retrieve = true;
        int nRetrieve = 0;
        int numMatched = 0;
        // f and t are from and to indexes, both inclusive
        int f = from == null ? 0 : from.intValue();
        if (f < 0) {
            f = 0;
        }
        cursor.skip(f);
        if (ctx.isComputeCounts()) {
            numMatched = cursor.count();
        }
        int t;
        if (to != null) {
            t = to.intValue();
            if (t < f) {
                retrieve = false;
            } else {
                cursor.limit(nRetrieve = t - f + 1);
            }
        } else {
            if (ctx.isComputeCounts()) {
                t = numMatched - 1;
                nRetrieve = numMatched - f;
            } else {
                t = Integer.MAX_VALUE;
            }
        }
        if (retrieve) {
            LOGGER.debug("Retrieving results");
            CursorStream stream = new CursorStream(cursor, translator, mongoQuery, executionTime, f, t);
            ctx.setDocumentStream(stream);
            cursorInUse = true;
        } else {
            ctx.setDocumentStream(new ListDocumentStream<DocCtx>(new ArrayList<>()));
        }
        if (RESULTSET_LOGGER.isDebugEnabled() && (executionTime > 100)) {
            RESULTSET_LOGGER.debug("execution_time={}, query={}, from={}, to={}", executionTime, mongoQuery, f,
                    t);
        }
        return numMatched;
    } finally {
        if (cursor != null && !cursorInUse) {
            cursor.close();
        }
    }
}

From source file:com.redhat.lightblue.mongo.crud.IterateAndUpdate.java

License:Open Source License

@Override
public void update(CRUDOperationContext ctx, DBCollection collection, EntityMetadata md,
        CRUDUpdateResponse response, DBObject query) {
    LOGGER.debug("iterateUpdate: start");
    LOGGER.debug("Computing the result set for {}", query);
    Measure measure = new Measure();
    BatchUpdate sup = getUpdateProtocol(ctx, collection, query, md, measure);
    DBCursor cursor = null;
    int docIndex = 0;
    int numMatched = 0;
    int numUpdated = 0;
    int numFailed = 0;
    BsonMerge merge = new BsonMerge(md);
    List<DocCtx> docUpdateAttempts = new ArrayList<>();
    List<DocCtx> resultDocs = new ArrayList<>();
    ctx.setInputDocuments(resultDocs);/*from  w  w w . ja v a2  s .c om*/
    try {
        ctx.getFactory().getInterceptors().callInterceptors(InterceptPoint.PRE_CRUD_UPDATE_RESULTSET, ctx);
        measure.begin("collection.find");
        cursor = collection.find(query, null);
        // Read from primary for read-for-update operations
        cursor.setReadPreference(ReadPreference.primary());
        measure.end("collection.find");
        LOGGER.debug("Found {} documents", cursor.count());
        // read-update-write
        measure.begin("iteration");
        int batchStartIndex = 0; // docUpdateAttempts[batchStartIndex] is the first doc in this batch
        // TODO: This code is very messy and probably has several logic bugs. I do not have time to fix it.
        // Things I noticed:
        // 1. numFailed is not updated consistently. Depending on where failure occurs, it may not be updated!
        // 2. resultDocs are not updated consistently. Depending on the branch, the document may not end up in the response.
        //    It is not clear from reading the code when it's expected to be in the response or not.
        //    I know from some failing tests in dependent services that at least some cases are bugged.
        // The amount of branching needs to be toned down, and low level state fiddling needs to be better abstracted
        // so it can be expressed in fewer places.
        while (cursor.hasNext()) {
            DBObject document = cursor.next();
            numMatched++;
            boolean hasErrors = false;
            LOGGER.debug("Retrieved doc {}", docIndex);
            measure.begin("ctx.addDocument");
            DocTranslator.TranslatedDoc translatedDoc = translator.toJson(document);
            DocCtx doc = new DocCtx(translatedDoc.doc, translatedDoc.rmd);
            doc.startModifications();
            measure.end("ctx.addDocument");
            // From now on: doc contains the working copy, and doc.originalDoc contains the original copy
            if (updateDoc(md, doc, measure)) {
                LOGGER.debug("Document {} modified, updating", docIndex);
                ctx.getFactory().getInterceptors()
                        .callInterceptors(InterceptPoint.PRE_CRUD_UPDATE_DOC_VALIDATION, ctx, doc);
                LOGGER.debug("Running constraint validations");
                measure.begin("validation");
                validator.clearErrors();
                validator.validateDoc(doc);
                measure.end("validation");
                List<Error> errors = validator.getErrors();
                if (errors != null && !errors.isEmpty()) {
                    ctx.addErrors(errors);
                    hasErrors = true;
                    LOGGER.debug("Doc has errors");
                }
                errors = validator.getDocErrors().get(doc);
                if (errors != null && !errors.isEmpty()) {
                    doc.addErrors(errors);
                    hasErrors = true;
                    LOGGER.debug("Doc has data errors");
                }
                if (!hasErrors) {
                    hasErrors = accessCheck(doc, measure);
                }
                if (!hasErrors) {
                    try {
                        ctx.getFactory().getInterceptors().callInterceptors(InterceptPoint.PRE_CRUD_UPDATE_DOC,
                                ctx, doc);
                        DocTranslator.TranslatedBsonDoc updatedObject = translate(md, doc, document, merge,
                                measure);

                        sup.addDoc(updatedObject.doc);
                        docUpdateAttempts.add(doc);
                        // update in batches
                        if (docUpdateAttempts.size() - batchStartIndex >= batchSize) {
                            preCommit();
                            measure.begin("bulkUpdate");
                            BatchUpdate.CommitInfo ci = sup.commit();
                            measure.end("bulkUpdate");
                            for (Map.Entry<Integer, Error> entry : ci.errors.entrySet()) {
                                docUpdateAttempts.get(entry.getKey() + batchStartIndex)
                                        .addError(entry.getValue());
                            }
                            numFailed += ci.errors.size();
                            numUpdated += docUpdateAttempts.size() - batchStartIndex - ci.errors.size()
                                    - ci.lostDocs.size();
                            numMatched -= ci.lostDocs.size();
                            batchStartIndex = docUpdateAttempts.size();
                            int di = 0;
                            // Only add the docs that were not lost
                            for (DocCtx d : docUpdateAttempts) {
                                if (!ci.lostDocs.contains(di)) {
                                    enforceMemoryLimit(d);
                                    resultDocs.add(d);
                                }
                                di++;
                            }
                        }
                        doc.setCRUDOperationPerformed(CRUDOperation.UPDATE);
                        doc.setUpdatedDocument(doc);
                    } catch (Error e) {
                        if (MongoCrudConstants.ERROR_RESULT_SIZE_TOO_LARGE.equals(e.getErrorCode())) {
                            throw e;
                        } else {
                            LOGGER.warn("Update exception for document {}: {}", docIndex, e);
                            doc.addError(Error.get(MongoCrudConstants.ERR_UPDATE_ERROR, e.toString()));
                            hasErrors = true;
                        }
                    } catch (Exception e) {
                        LOGGER.warn("Update exception for document {}: {}", docIndex, e);
                        doc.addError(Error.get(MongoCrudConstants.ERR_UPDATE_ERROR, e.toString()));
                        hasErrors = true;
                    }
                } else {
                    numFailed++;
                    resultDocs.add(doc);
                }
            } else {
                LOGGER.debug("Document {} was not modified", docIndex);
                resultDocs.add(doc);
            }
            if (hasErrors) {
                LOGGER.debug("Document {} has errors", docIndex);
                doc.setOutputDocument(errorProjector.project(doc, nodeFactory));
            } else if (projector != null) {
                LOGGER.debug("Projecting document {}", docIndex);
                doc.setOutputDocument(projector.project(doc, nodeFactory));
            }
            docIndex++;
        }
        measure.end("iteration");
        // if we have any remaining items to update
        if (docUpdateAttempts.size() > batchStartIndex) {
            preCommit();
            BatchUpdate.CommitInfo ci = sup.commit();
            for (Map.Entry<Integer, Error> entry : ci.errors.entrySet()) {
                docUpdateAttempts.get(entry.getKey() + batchStartIndex).addError(entry.getValue());
            }
            numFailed += ci.errors.size();
            numUpdated += docUpdateAttempts.size() - batchStartIndex - ci.errors.size() - ci.lostDocs.size();
            numMatched -= ci.lostDocs.size();
            int di = 0;
            for (DocCtx d : docUpdateAttempts) {
                if (!ci.lostDocs.contains(di)) {
                    enforceMemoryLimit(d);
                    resultDocs.add(d);
                }
                di++;
            }
        }
    } finally {
        if (cursor != null) {
            cursor.close();
        }
    }

    ctx.setDocumentStream(new ListDocumentStream<DocCtx>(resultDocs));

    response.setNumUpdated(numUpdated);
    response.setNumFailed(numFailed);
    response.setNumMatched(numMatched);
    METRICS.debug("IterateAndUpdate:\n{}", measure);
}

From source file:org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore.java

License:Apache License

@SuppressWarnings("unchecked")
@Nonnull/*from w  ww  . ja  v  a2s.c  o  m*/
<T extends Document> List<T> queryInternal(Collection<T> collection, String fromKey, String toKey,
        String indexedProperty, long startValue, int limit, long maxQueryTime) {
    log("query", fromKey, toKey, indexedProperty, startValue, limit);
    DBCollection dbCollection = getDBCollection(collection);
    QueryBuilder queryBuilder = QueryBuilder.start(Document.ID);
    queryBuilder.greaterThan(fromKey);
    queryBuilder.lessThan(toKey);

    DBObject hint = new BasicDBObject(NodeDocument.ID, 1);

    if (indexedProperty != null) {
        if (NodeDocument.DELETED_ONCE.equals(indexedProperty)) {
            if (startValue != 1) {
                throw new DocumentStoreException("unsupported value for property " + NodeDocument.DELETED_ONCE);
            }
            queryBuilder.and(indexedProperty);
            queryBuilder.is(true);
        } else {
            queryBuilder.and(indexedProperty);
            queryBuilder.greaterThanEquals(startValue);

            if (NodeDocument.MODIFIED_IN_SECS.equals(indexedProperty) && canUseModifiedTimeIdx(startValue)) {
                hint = new BasicDBObject(NodeDocument.MODIFIED_IN_SECS, -1);
            }
        }
    }
    DBObject query = queryBuilder.get();
    String parentId = Utils.getParentIdFromLowerLimit(fromKey);
    long lockTime = -1;
    final Stopwatch watch = startWatch();

    boolean isSlaveOk = false;
    int resultSize = 0;
    CacheChangesTracker cacheChangesTracker = null;
    if (parentId != null && collection == Collection.NODES) {
        cacheChangesTracker = nodesCache.registerTracker(fromKey, toKey);
    }
    try {
        DBCursor cursor = dbCollection.find(query).sort(BY_ID_ASC);
        if (!disableIndexHint && !hasModifiedIdCompoundIndex) {
            cursor.hint(hint);
        }
        if (maxQueryTime > 0) {
            // OAK-2614: set maxTime if maxQueryTimeMS > 0
            cursor.maxTime(maxQueryTime, TimeUnit.MILLISECONDS);
        }
        ReadPreference readPreference = getMongoReadPreference(collection, parentId, null,
                getDefaultReadPreference(collection));

        if (readPreference.isSlaveOk()) {
            isSlaveOk = true;
            LOG.trace("Routing call to secondary for fetching children from [{}] to [{}]", fromKey, toKey);
        }

        cursor.setReadPreference(readPreference);

        List<T> list;
        try {
            list = new ArrayList<T>();
            for (int i = 0; i < limit && cursor.hasNext(); i++) {
                DBObject o = cursor.next();
                T doc = convertFromDBObject(collection, o);
                list.add(doc);
            }
            resultSize = list.size();
        } finally {
            cursor.close();
        }

        if (cacheChangesTracker != null) {
            nodesCache.putNonConflictingDocs(cacheChangesTracker, (List<NodeDocument>) list);
        }

        return list;
    } finally {
        if (cacheChangesTracker != null) {
            cacheChangesTracker.close();
        }
        stats.doneQuery(watch.elapsed(TimeUnit.NANOSECONDS), collection, fromKey, toKey,
                indexedProperty != null, resultSize, lockTime, isSlaveOk);
    }
}

From source file:org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore.java

License:Apache License

/**
 * Returns the {@link Document#MOD_COUNT} and
 * {@link NodeDocument#MODIFIED_IN_SECS} values of the documents with the
 * given {@code keys}. The returned map will only contain entries for
 * existing documents. The default value is -1 if the document does not have
 * a modCount field. The same applies to the modified field.
 *
 * @param keys the keys of the documents.
 * @return map with key to modification stamp mapping.
 * @throws MongoException if the call fails
 *///from   w w w. ja v a  2s .  c  om
@Nonnull
private Map<String, ModificationStamp> getModStamps(Iterable<String> keys) throws MongoException {
    QueryBuilder query = QueryBuilder.start(Document.ID).in(keys);
    // Fetch only the modCount and id
    final BasicDBObject fields = new BasicDBObject(Document.ID, 1);
    fields.put(Document.MOD_COUNT, 1);
    fields.put(NodeDocument.MODIFIED_IN_SECS, 1);

    DBCursor cursor = nodes.find(query.get(), fields);
    cursor.setReadPreference(ReadPreference.primary());

    Map<String, ModificationStamp> modCounts = Maps.newHashMap();
    for (DBObject obj : cursor) {
        String id = (String) obj.get(Document.ID);
        Long modCount = Utils.asLong((Number) obj.get(Document.MOD_COUNT));
        if (modCount == null) {
            modCount = -1L;
        }
        Long modified = Utils.asLong((Number) obj.get(NodeDocument.MODIFIED_IN_SECS));
        if (modified == null) {
            modified = -1L;
        }
        modCounts.put(id, new ModificationStamp(modCount, modified));
    }
    return modCounts;
}

From source file:org.eclipse.birt.data.oda.mongodb.internal.impl.MDbOperation.java

License:Open Source License

/**
 * Applies data set query properties and hints on DBCursor, except
 * for cursor limit.//from  www  .ja  v  a 2  s. com
 * @see #applyPropertiesToCursor(DBCursor,QueryProperties,boolean,boolean)
 */
static void applyPropertiesToCursor(DBCursor rowsCursor, QueryProperties queryProps, boolean includeSortExpr) {
    if (includeSortExpr) // normally done only when executing a query to get full result set
    {
        DBObject sortExprObj = null;
        try {
            sortExprObj = queryProps.getSortExprAsParsedObject();
        } catch (OdaException ex) {
            // log warning and ignore
            DriverUtil.getLogger().log(Level.WARNING,
                    Messages.bind("Unable to parse the user-defined Sort Expression: {0}", //$NON-NLS-1$
                            queryProps.getSortExpr()), ex);
        }

        if (sortExprObj != null)
            rowsCursor.sort(sortExprObj);
    }

    ReadPreference readPref = queryProps.getTaggableReadPreference();
    if (readPref != null)
        rowsCursor.setReadPreference(readPref);

    if (queryProps.getBatchSize() > 0)
        rowsCursor.batchSize(queryProps.getBatchSize());

    if (queryProps.getNumDocsToSkip() > 0)
        rowsCursor.skip(queryProps.getNumDocsToSkip());

    DBObject hintObj = queryProps.getIndexHintsAsParsedObject();
    if (hintObj != null)
        rowsCursor.hint(hintObj);
    else // try to pass the hint string value as is
    {
        String hintValue = queryProps.getIndexHints();
        if (!hintValue.isEmpty())
            rowsCursor.hint(hintValue);
    }

    if (queryProps.hasNoTimeOut())
        rowsCursor.addOption(Bytes.QUERYOPTION_NOTIMEOUT);
    if (queryProps.isPartialResultsOk())
        rowsCursor.addOption(Bytes.QUERYOPTION_PARTIAL);
}